From 28bdd028436839f820bacee15c0fca96d49734e1 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Mon, 25 Mar 2024 02:43:35 +0000 Subject: [PATCH 01/23] Initial OpenCL things --- .gitignore | 2 + Cargo.toml | 20 +- README-crates.io.md | 1 - README-quick-start.md | 101 +- RELEASES.md | 1308 +++++++++---------- benches/append.rs | 9 +- benches/bench1.rs | 303 ++--- benches/chunks.rs | 15 +- benches/construct.rs | 12 +- benches/gemv_gemm.rs | 18 +- benches/higher-order.rs | 24 +- benches/iter.rs | 105 +- benches/numeric.rs | 3 +- benches/par_rayon.rs | 51 +- benches/to_shape.rs | 33 +- benches/zip.rs | 33 +- examples/axis_ops.rs | 3 +- examples/bounds_check_elim.rs | 21 +- examples/column_standardize.rs | 3 +- examples/convo.rs | 6 +- examples/life.rs | 15 +- examples/rollaxis.rs | 3 +- examples/sort-axis.rs | 36 +- examples/type_conversion.rs | 3 +- examples/zip_many.rs | 3 +- misc/axis_iter.svg | 1589 ++++++++++++------------ misc/split_at.svg | 1543 +++++++++++------------ ndarray-rand/README.md | 6 +- ndarray-rand/RELEASES.md | 56 +- ndarray-rand/benches/bench.rs | 9 +- ndarray-rand/src/lib.rs | 21 +- ndarray-rand/tests/tests.rs | 24 +- rustfmt.toml | 2 +- src/aliases.rs | 24 +- src/argument_traits.rs | 27 +- src/array_approx.rs | 3 +- src/array_serde.rs | 69 +- src/arrayformat.rs | 81 +- src/arraytraits.rs | 84 +- src/data_repr.rs | 320 ++++- src/data_traits.rs | 240 ++-- src/dimension/axes.rs | 45 +- src/dimension/axis.rs | 6 +- src/dimension/broadcast.rs | 12 +- src/dimension/conversion.rs | 27 +- src/dimension/dim.rs | 24 +- src/dimension/dimension_trait.rs | 282 ++--- src/dimension/dynindeximpl.rs | 117 +- src/dimension/mod.rs | 138 +- src/dimension/ndindex.rs | 114 +- src/dimension/ops.rs | 9 +- src/dimension/remove_axis.rs | 15 +- src/dimension/reshape.rs | 3 +- src/dimension/sequence.rs | 33 +- src/error.rs | 42 +- src/extension/nonnull.rs | 6 +- src/free_functions.rs | 48 +- src/geomspace.rs | 36 +- src/impl_1d.rs | 3 +- src/impl_2d.rs | 21 +- src/impl_clone.rs | 9 +- src/impl_constructors.rs | 36 +- src/impl_cow.rs | 18 +- src/impl_dyn.rs | 6 +- src/impl_internal_constructors.rs | 6 +- src/impl_methods.rs | 268 ++-- src/impl_ops.rs | 27 +- src/impl_owned_array.rs | 54 +- src/impl_raw_views.rs | 48 +- src/impl_special_element_types.rs | 3 +- src/impl_views/constructors.rs | 33 +- src/impl_views/conversions.rs | 78 +- src/impl_views/indexing.rs | 21 +- src/impl_views/splitting.rs | 15 +- src/indexes.rs | 78 +- src/iterators/chunks.rs | 30 +- src/iterators/into_iter.rs | 30 +- src/iterators/lanes.rs | 24 +- src/iterators/mod.rs | 453 +++---- src/iterators/windows.rs | 18 +- src/itertools.rs | 3 +- src/layout/layoutfmt.rs | 6 +- src/layout/mod.rs | 51 +- src/lib.rs | 64 +- src/linalg/impl_linalg.rs | 69 +- src/linspace.rs | 18 +- src/logspace.rs | 27 +- src/low_level_util.rs | 12 +- src/math_cell.rs | 57 +- src/numeric/impl_float_maths.rs | 6 +- src/numeric/impl_numeric.rs | 18 +- src/numeric_util.rs | 6 +- src/order.rs | 21 +- src/parallel/impl_par_methods.rs | 3 +- src/parallel/into_impls.rs | 12 +- src/parallel/mod.rs | 3 +- src/parallel/par.rs | 21 +- src/parallel/send_producer.rs | 48 +- src/partial.rs | 27 +- src/shape_builder.rs | 78 +- src/slice.rs | 132 +- src/split_at.rs | 18 +- src/zip/mod.rs | 60 +- src/zip/ndproducer.rs | 189 +-- tests/append.rs | 54 +- tests/array-construct.rs | 90 +- tests/array.rs | 447 +++---- tests/assign.rs | 51 +- tests/azip.rs | 84 +- tests/broadcast.rs | 15 +- tests/clone.rs | 3 +- tests/complex.rs | 6 +- tests/dimension.rs | 48 +- tests/format.rs | 6 +- tests/higher_order_f.rs | 3 +- tests/indices.rs | 3 +- tests/into-ixdyn.rs | 6 +- tests/iterator_chunks.rs | 18 +- tests/iterators.rs | 162 +-- tests/ix0.rs | 12 +- tests/ixdyn.rs | 33 +- tests/numeric.rs | 69 +- tests/oper.rs | 93 +- tests/par_azip.rs | 15 +- tests/par_rayon.rs | 18 +- tests/par_zip.rs | 24 +- tests/raw_views.rs | 30 +- tests/reshape.rs | 45 +- tests/s.rs | 3 +- tests/stacking.rs | 6 +- tests/views.rs | 3 +- tests/windows.rs | 57 +- tests/zst.rs | 6 +- xtest-blas/tests/oper.rs | 60 +- xtest-numeric/tests/accuracy.rs | 33 +- xtest-serialization/tests/serialize.rs | 15 +- 136 files changed, 4644 insertions(+), 6288 deletions(-) diff --git a/.gitignore b/.gitignore index 1e7caa9ea..ffb44634d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ Cargo.lock target/ + +.DS_Store diff --git a/Cargo.toml b/Cargo.toml index ae9e33f06..4332daf4a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,8 +5,8 @@ version = "0.15.6" edition = "2018" rust-version = "1.57" authors = [ - "Ulrik Sverdrup \"bluss\"", - "Jim Turner" + "Ulrik Sverdrup \"bluss\"", + "Jim Turner" ] license = "MIT OR Apache-2.0" readme = "README-crates.io.md" @@ -31,17 +31,21 @@ num-integer = { version = "0.1.39", default-features = false } num-traits = { version = "0.2", default-features = false } num-complex = { version = "0.4", default-features = false } +# Use via the `opencl` crate feature! +#hasty_ = { version = "0.2", optional = true, package = "hasty", default-features = false } +hasty_ = { path = "../../hasty_dev/hasty", optional = true, package = "hasty", default-features = false } + # Use via the `rayon` crate feature! rayon_ = { version = "1.0.3", optional = true, package = "rayon" } -approx = { version = "0.4", optional = true , default-features = false } -approx-0_5 = { package = "approx", version = "0.5", optional = true , default-features = false } +approx = { version = "0.4", optional = true, default-features = false } +approx-0_5 = { package = "approx", version = "0.5", optional = true, default-features = false } # Use via the `blas` crate feature! cblas-sys = { version = "0.1.4", optional = true, default-features = false } libc = { version = "0.2.82", optional = true } -matrixmultiply = { version = "0.3.2", default-features = false, features=["cgemm"] } +matrixmultiply = { version = "0.3.2", default-features = false, features = ["cgemm"] } serde = { version = "1.0", optional = true, default-features = false, features = ["alloc"] } rawpointer = { version = "0.2" } @@ -55,9 +59,13 @@ itertools = { version = "0.10.0", default-features = false, features = ["use_std [features] default = ["std"] +# Enable OpenCL backend +opencl = ["hasty_/opencl"] +cuda = ["hasty_/cuda"] + # Enable blas usage # See README for more instructions -blas = ["cblas-sys", "libc"] +blas = ["cblas-sys", "libc", "hasty_/blas"] # Old name for the serde feature serde-1 = ["serde"] diff --git a/README-crates.io.md b/README-crates.io.md index 3c6b09054..79e6cd521 100644 --- a/README-crates.io.md +++ b/README-crates.io.md @@ -1,4 +1,3 @@ - `ndarray` implements an *n*-dimensional container for general elements and for numerics. diff --git a/README-quick-start.md b/README-quick-start.md index ad13acc72..693386169 100644 --- a/README-quick-start.md +++ b/README-quick-start.md @@ -1,13 +1,15 @@ # Quickstart tutorial -If you are familiar with Python Numpy, do check out this [For Numpy User Doc](https://docs.rs/ndarray/0.13.0/ndarray/doc/ndarray_for_numpy_users/index.html) -after you go through this tutorial. +If you are familiar with Python Numpy, do check out +this [For Numpy User Doc](https://docs.rs/ndarray/0.13.0/ndarray/doc/ndarray_for_numpy_users/index.html) +after you go through this tutorial. You can use [play.integer32.com](https://play.integer32.com/) to immediately try out the examples. ## The Basics -You can create your first 2x3 floating-point ndarray as such: +You can create your first 2x3 floating-point ndarray as such: + ```rust use ndarray::prelude::*; @@ -24,7 +26,9 @@ fn main() { println!("{:?}", a); } ``` + This code will create a simple array, then print it to stdout as such: + ``` [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], shape=[2, 3], strides=[3, 1], layout=C (0x1), const ndim=2 @@ -34,7 +38,9 @@ This code will create a simple array, then print it to stdout as such: ### Element type and dimensionality -Now let's create more arrays. A common operation on matrices is to create a matrix full of 0's of certain dimensions. Let's try to do that with dimensions (3, 2, 4) using the `Array::zeros` function: +Now let's create more arrays. A common operation on matrices is to create a matrix full of 0's of certain dimensions. +Let's try to do that with dimensions (3, 2, 4) using the `Array::zeros` function: + ```rust use ndarray::prelude::*; use ndarray::Array; @@ -43,13 +49,17 @@ fn main() { println!("{:?}", a); } ``` + Unfortunately, this code does not compile. + ``` | let a = Array::zeros((3, 2, 4).f()); | - ^^^^^^^^^^^^ cannot infer type for type parameter `A` ``` -Indeed, note that the compiler needs to infer the element type and dimensionality from context only. In this -case the compiler does not have enough information. To fix the code, we can explicitly give the element type through turbofish syntax, and let it infer the dimensionality type: + +Indeed, note that the compiler needs to infer the element type and dimensionality from context only. In this +case the compiler does not have enough information. To fix the code, we can explicitly give the element type through +turbofish syntax, and let it infer the dimensionality type: ```rust use ndarray::prelude::*; @@ -59,7 +69,9 @@ fn main() { println!("{:?}", a); } ``` + This code now compiles to what we wanted: + ``` [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], @@ -71,11 +83,15 @@ This code now compiles to what we wanted: [0.0, 0.0, 0.0, 0.0]]], shape=[3, 2, 4], strides=[1, 3, 6], layout=F (0x2), const ndim=3 ``` -We could also specify its dimensionality explicitly `Array::::zeros(...)`, with`Ix3` standing for 3D array type. Phew! We achieved type safety. If you tried changing the code above to `Array::::zeros((3, 2, 4, 5).f());`, which is not of dimension 3 anymore, Rust's type system would gracefully prevent you from compiling the code. +We could also specify its dimensionality explicitly `Array::::zeros(...)`, with`Ix3` standing for 3D array +type. Phew! We achieved type safety. If you tried changing the code above +to `Array::::zeros((3, 2, 4, 5).f());`, which is not of dimension 3 anymore, Rust's type system would +gracefully prevent you from compiling the code. ### Creating arrays with different initial values and/or different types -The [`from_elem`](http://docs.rs/ndarray/latest/ndarray/struct.ArrayBase.html#method.from_elem) method allows initializing an array of given dimension to a specific value of any type: +The [`from_elem`](http://docs.rs/ndarray/latest/ndarray/struct.ArrayBase.html#method.from_elem) method allows +initializing an array of given dimension to a specific value of any type: ```rust use ndarray::{Array, Ix3}; @@ -86,7 +102,9 @@ fn main() { ``` ### Some common array initializing helper functions + `linspace` - Create a 1-D array with 11 elements with values 0., …, 5. + ```rust use ndarray::prelude::*; use ndarray::{Array, Ix3}; @@ -95,16 +113,21 @@ fn main() { println!("{:?}", a); } ``` + The output is: + ``` [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0], shape=[11], strides=[1], layout=C | F (0x3), const ndim=1 ``` -Common array initializing methods include [`range`](https://docs.rs/ndarray/0.13.0/ndarray/struct.ArrayBase.html#method.range), [`logspace`](https://docs.rs/ndarray/0.13.0/ndarray/struct.ArrayBase.html#method.logspace), [`eye`](https://docs.rs/ndarray/0.13.0/ndarray/struct.ArrayBase.html#method.eye), [`ones`](https://docs.rs/ndarray/0.13.0/ndarray/struct.ArrayBase.html#method.ones)... +Common array initializing methods +include [`range`](https://docs.rs/ndarray/0.13.0/ndarray/struct.ArrayBase.html#method.range), [`logspace`](https://docs.rs/ndarray/0.13.0/ndarray/struct.ArrayBase.html#method.logspace), [`eye`](https://docs.rs/ndarray/0.13.0/ndarray/struct.ArrayBase.html#method.eye), [`ones`](https://docs.rs/ndarray/0.13.0/ndarray/struct.ArrayBase.html#method.ones)... ## Basic operations -Basic operations on arrays are all element-wise; you need to use specific methods for operations such as matrix multiplication (see later section). +Basic operations on arrays are all element-wise; you need to use specific methods for operations such as matrix +multiplication (see later section). + ```rust use ndarray::prelude::*; use ndarray::Array; @@ -123,20 +146,20 @@ fn main() { } ``` - Note that (for any binary operator `@`): + * `&A @ &A` produces a new `Array` * `B @ A` consumes `B`, updates it with the result, and returns it * `B @ &A` consumes `B`, updates it with the result, and returns it * `C @= &A` performs an arithmetic operation in place -Try removing all the `&` sign in front of `a` and `b` in the last example: it will not compile anymore because of those rules. +Try removing all the `&` sign in front of `a` and `b` in the last example: it will not compile anymore because of those +rules. For more info checkout https://docs.rs/ndarray/latest/ndarray/struct.ArrayBase.html#arithmetic-operations - - -Some operations have `_axis` appended to the function name: they generally take in a parameter of type `Axis` as one of their inputs, +Some operations have `_axis` appended to the function name: they generally take in a parameter of type `Axis` as one of +their inputs, such as `sum_axis`: ```rust @@ -176,7 +199,9 @@ fn main() { println!("{}", a.t().dot(&b.t())); // [4, 1] x [1, 4] -> [4, 4] } ``` + The output is: + ``` a shape [1, 4] b shape [4] @@ -189,6 +214,7 @@ b shape after reshape [4, 1] ``` ## Indexing, Slicing and Iterating + One-dimensional arrays can be indexed, sliced and iterated over, much like `numpy` arrays ```rust @@ -215,7 +241,9 @@ fn main() { } } ``` + The output is: + ``` [0, 1, 8, 27, 64, 125, 216, 343, 512, 729] 8 @@ -225,9 +253,11 @@ The output is: 9.999999999999998, 1, 9.999999999999998, 3, 9.999999999999998, 4.999999999999999, 5.999999999999999, 6.999999999999999, 7.999999999999999, 8.999999999999998, ``` -For more info about iteration see [Loops, Producers, and Iterators](https://docs.rs/ndarray/0.13.0/ndarray/struct.ArrayBase.html#loops-producers-and-iterators) +For more info about iteration +see [Loops, Producers, and Iterators](https://docs.rs/ndarray/0.13.0/ndarray/struct.ArrayBase.html#loops-producers-and-iterators) + +Let's try a iterating over a 3D array with elements of type `isize`. This is how you index it: -Let's try a iterating over a 3D array with elements of type `isize`. This is how you index it: ```rust use ndarray::prelude::*; @@ -261,7 +291,9 @@ fn main() { } } ``` + The output is: + ``` a -> [[[0, 1, 2], @@ -295,6 +327,7 @@ row: [[100, 101, 102], ## Shape Manipulation ### Changing the shape of an array + The shape of an array can be changed with the `into_shape_with_order` or `to_shape` method. ````rust @@ -323,7 +356,9 @@ fn main() { println!("c = \n{:?}", c); } ```` + The output is: + ``` a = [[3.0, 7.0, 3.0, 4.0], @@ -370,7 +405,9 @@ fn main() { println!("concatenate, axis 1:\n{:?}\n", concatenate![Axis(1), a, b]); } ``` + The output is: + ``` stack, axis 0: [[[3.0, 7.0, 8.0], @@ -409,6 +446,7 @@ concatenate, axis 1: ### Splitting one array into several smaller ones More to see here [ArrayView::split_at](https://docs.rs/ndarray/latest/ndarray/type.ArrayView.html#method.split_at) + ```rust use ndarray::prelude::*; use ndarray::Axis; @@ -431,7 +469,9 @@ fn main() { println!("s2 = \n{}\n", s2); } ``` + The output is: + ``` Split a from Axis(0), at index 1: s1 = @@ -450,9 +490,12 @@ s2 = ``` ## Copies and Views + ### View, Ref or Shallow Copy -Rust has ownership, so we cannot simply update an element of an array while we have a shared view of it. This brings guarantees & helps having more robust code. +Rust has ownership, so we cannot simply update an element of an array while we have a shared view of it. This brings +guarantees & helps having more robust code. + ```rust use ndarray::prelude::*; use ndarray::{Array, Axis}; @@ -482,7 +525,9 @@ fn main() { println!("s2 = \n{}\n", s2); } ``` + The output is: + ``` a = [[0, 1, 2, 3], @@ -511,8 +556,10 @@ s2 = ``` ### Deep Copy + As the usual way in Rust, a `clone()` call will make a copy of your array: + ```rust use ndarray::prelude::*; use ndarray::Array; @@ -534,6 +581,7 @@ fn main() { ``` The output is: + ``` a = [[0, 1], @@ -553,13 +601,16 @@ b clone of a = [2, 3]] ``` -Notice that using `clone()` (or cloning) an `Array` type also copies the array's elements. It creates an independently owned array of the same type. +Notice that using `clone()` (or cloning) an `Array` type also copies the array's elements. It creates an independently +owned array of the same type. -Cloning an `ArrayView` does not clone or copy the underlying elements - it only clones the view reference (as it happens in Rust when cloning a `&` reference). +Cloning an `ArrayView` does not clone or copy the underlying elements - it only clones the view reference (as it happens +in Rust when cloning a `&` reference). ## Broadcasting -Arrays support limited broadcasting, where arithmetic operations with array operands of different sizes can be carried out by repeating the elements of the smaller dimension array. +Arrays support limited broadcasting, where arithmetic operations with array operands of different sizes can be carried +out by repeating the elements of the smaller dimension array. ```rust use ndarray::prelude::*; @@ -585,9 +636,11 @@ fn main() { } ``` -See [.broadcast()](https://docs.rs/ndarray/latest/ndarray/struct.ArrayBase.html#method.broadcast) for a more detailed description. +See [.broadcast()](https://docs.rs/ndarray/latest/ndarray/struct.ArrayBase.html#method.broadcast) for a more detailed +description. And here is a short example of it: + ```rust use ndarray::prelude::*; @@ -602,7 +655,9 @@ fn main() { println!("a is broadcased to 3x2x2 = \n{}", b); } ``` + The output is: + ``` shape of a is [2, 2] a is broadcased to 3x2x2 = @@ -617,6 +672,8 @@ a is broadcased to 3x2x2 = ``` ## Want to learn more? + Please checkout these docs for more information + * [`ArrayBase` doc page](https://docs.rs/ndarray/latest/ndarray/struct.ArrayBase.html) * [`ndarray` for `numpy` user doc page](https://docs.rs/ndarray/latest/ndarray/doc/ndarray_for_numpy_users/index.html) diff --git a/RELEASES.md b/RELEASES.md index 364166718..07de2436f 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -26,7 +26,6 @@ Other changes https://github.com/rust-ndarray/ndarray/pull/1191 - Version 0.15.5 (2022-07-30) =========================== @@ -45,7 +44,6 @@ Other changes https://github.com/rust-ndarray/ndarray/pull/1134
https://github.com/rust-ndarray/ndarray/pull/1164 - Version 0.15.4 (2021-11-23) =========================== @@ -164,7 +162,6 @@ Other changes https://github.com/rust-ndarray/ndarray/pull/1114 - Version 0.15.3 (2021-06-05) =========================== @@ -215,7 +212,6 @@ Other changes https://github.com/rust-ndarray/ndarray/pull/1009 - Version 0.15.2 (2021-05-17 🇳🇴) ================================ @@ -225,8 +221,8 @@ New features - New methods for growing/appending to owned `Array`s. These methods allow building an array efficiently chunk by chunk. By [@bluss]. - - `.push_row()`, `.push_column()` - - `.push(axis, array)`, `.append(axis, array)` + - `.push_row()`, `.push_column()` + - `.push(axis, array)`, `.append(axis, array)` `stack`, `concatenate` and `.select()` now support all `Clone`-able elements as a result. @@ -306,7 +302,6 @@ Other changes https://github.com/rust-ndarray/ndarray/pull/1004 - Version 0.15.1 (2021-03-29) =========================== @@ -334,7 +329,6 @@ Other changes https://github.com/rust-ndarray/ndarray/pull/955
https://github.com/rust-ndarray/ndarray/pull/959 - Version 0.15.0 (2021-03-25) =========================== @@ -436,22 +430,22 @@ API changes - Changes to the slicing-related types and macro by [@jturner314] and [@bluss]: - - Remove the `Dimension::SliceArg` associated type, and add a new `SliceArg` - trait for this purpose. - - Change the return type of the `s![]` macro to an owned `SliceInfo` rather - than a reference. - - Replace the `SliceOrIndex` enum with `SliceInfoElem`, which has an - additional `NewAxis` variant and does not have a `step_by` method. - - Change the type parameters of `SliceInfo` in order to support the `NewAxis` - functionality and remove some tricky `unsafe` code. - - Mark the `SliceInfo::new` method as `unsafe`. The new implementations of - `TryFrom` can be used as a safe alternative. - - Remove the `AsRef> for SliceInfo` - implementation. Add the similar `From<&'a SliceInfo> for - SliceInfo<&'a [SliceInfoElem], Din, Dout>` conversion as an alternative. - - Change the *expr* `;` *step* case in the `s![]` macro to error at compile - time if an unsupported type for *expr* is used, instead of panicking at - runtime. + - Remove the `Dimension::SliceArg` associated type, and add a new `SliceArg` + trait for this purpose. + - Change the return type of the `s![]` macro to an owned `SliceInfo` rather + than a reference. + - Replace the `SliceOrIndex` enum with `SliceInfoElem`, which has an + additional `NewAxis` variant and does not have a `step_by` method. + - Change the type parameters of `SliceInfo` in order to support the `NewAxis` + functionality and remove some tricky `unsafe` code. + - Mark the `SliceInfo::new` method as `unsafe`. The new implementations of + `TryFrom` can be used as a safe alternative. + - Remove the `AsRef> for SliceInfo` + implementation. Add the similar `From<&'a SliceInfo> for + SliceInfo<&'a [SliceInfoElem], Din, Dout>` conversion as an alternative. + - Change the *expr* `;` *step* case in the `s![]` macro to error at compile + time if an unsupported type for *expr* is used, instead of panicking at + runtime. https://github.com/rust-ndarray/ndarray/pull/570
https://github.com/rust-ndarray/ndarray/pull/940
@@ -460,10 +454,10 @@ API changes - Removed already deprecated methods by [@bluss]: - - Remove deprecated `.all_close()` - use approx feature and methods like `.abs_diff_eq` instead - - Mark `.scalar_sum()` as deprecated - use `.sum()` instead - - Remove deprecated `DataClone` - use `Data + RawDataClone` instead - - Remove deprecated `ArrayView::into_slice` - use `to_slice()` instead. + - Remove deprecated `.all_close()` - use approx feature and methods like `.abs_diff_eq` instead + - Mark `.scalar_sum()` as deprecated - use `.sum()` instead + - Remove deprecated `DataClone` - use `Data + RawDataClone` instead + - Remove deprecated `ArrayView::into_slice` - use `to_slice()` instead. https://github.com/rust-ndarray/ndarray/pull/874 @@ -474,10 +468,10 @@ API changes - Renamed `Zip` methods by [@bluss] and [@SparrowLii]: - - `apply` -> `for_each` - - `apply_collect` -> `map_collect` - - `apply_collect_into` -> `map_collect_into` - - (`par_` prefixed methods renamed accordingly) + - `apply` -> `for_each` + - `apply_collect` -> `map_collect` + - `apply_collect_into` -> `map_collect_into` + - (`par_` prefixed methods renamed accordingly) https://github.com/rust-ndarray/ndarray/pull/894
https://github.com/rust-ndarray/ndarray/pull/904
@@ -492,10 +486,10 @@ API changes - Renamed methods (old names are now deprecated) by [@bluss] and [@jturner314] - - `genrows/_mut` -> `rows/_mut` - - `gencolumns/_mut` -> `columns/_mut` - - `stack_new_axis` -> `stack` (the new name already existed) - - `visit` -> `for_each` + - `genrows/_mut` -> `rows/_mut` + - `gencolumns/_mut` -> `columns/_mut` + - `stack_new_axis` -> `stack` (the new name already existed) + - `visit` -> `for_each` https://github.com/rust-ndarray/ndarray/pull/872
https://github.com/rust-ndarray/ndarray/pull/937
@@ -506,7 +500,7 @@ API changes https://github.com/rust-ndarray/ndarray/pull/888
https://github.com/rust-ndarray/ndarray/pull/938
- + - Updated `num-complex` dependency to 0.4.0 by [@bluss] https://github.com/rust-ndarray/ndarray/pull/952 @@ -574,7 +568,6 @@ Other changes https://github.com/rust-ndarray/ndarray/pull/887 - Version 0.14.0 (2020-11-28) =========================== @@ -606,7 +599,7 @@ API changes - The **old function** `stack` has been renamed to `concatenate`. A new function `stack` with numpy-like semantics have taken its place. Old usages of `stack` should change to use `concatenate`. - + `concatenate` produces an array with the same number of axes as the inputs. `stack` produces an array that has one more axis than the inputs. @@ -620,24 +613,23 @@ API changes - Remove deprecated items: - - RcArray (deprecated alias for ArcArray) - - Removed `subview_inplace` use `collapse_axis` - - Removed `subview_mut` use `index_axis_mut` - - Removed `into_subview` use `index_axis_move` - - Removed `subview` use `index_axis` - - Removed `slice_inplace` use `slice_collapse` + - RcArray (deprecated alias for ArcArray) + - Removed `subview_inplace` use `collapse_axis` + - Removed `subview_mut` use `index_axis_mut` + - Removed `into_subview` use `index_axis_move` + - Removed `subview` use `index_axis` + - Removed `slice_inplace` use `slice_collapse` - Undeprecated `remove_axis` because its replacement is hard to find out on your own. - Update public external dependencies to new versions by [@Eijebong] and [@bluss] - - num-complex 0.3 - - approx 0.4 (optional) - - blas-src 0.6.1 and openblas-src 0.9.0 (optional) + - num-complex 0.3 + - approx 0.4 (optional) + - blas-src 0.6.1 and openblas-src 0.9.0 (optional) https://github.com/rust-ndarray/ndarray/pull/810 - https://github.com/rust-ndarray/ndarray/pull/851 - + https://github.com/rust-ndarray/ndarray/pull/851 Other changes ------------- @@ -703,203 +695,205 @@ Other changes https://github.com/rust-ndarray/ndarray/pull/802 - Release management by [@bluss] - Version 0.13.0 (2019-09-23) =========================== New features ------------ - - `ndarray-parallel` is merged into `ndarray`. Use the `rayon` feature-flag to get access to parallel iterators and - other parallelized methods. - ([#563](https://github.com/rust-ndarray/ndarray/pull/563/files) by [@bluss]) - - Add `logspace` and `geomspace` constructors - ([#617](https://github.com/rust-ndarray/ndarray/pull/617) by [@JP-Ellis]) - - Implement approx traits for `ArrayBase`. They can be enabled using the `approx` feature-flag. - ([#581](https://github.com/rust-ndarray/ndarray/pull/581) by [@jturner314]) - - Add `mean` method - ([#580](https://github.com/rust-ndarray/ndarray/pull/580) by [@LukeMathWalker]) - - Add `Zip::all` to check if all elements satisfy a predicate - ([#615](https://github.com/rust-ndarray/ndarray/pull/615) by [@mneumann]) - - Add `RawArrayView` and `RawArrayViewMut` types and `RawData`, `RawDataMut`, and `RawDataClone` traits - ([#496](https://github.com/rust-ndarray/ndarray/pull/496) by [@jturner314]) - - Add `CowArray`, `C`lone `o`n `write` array - ([#632](https://github.com/rust-ndarray/ndarray/pull/632) by [@jturner314] and [@andrei-papou]) - - Add `as_standard_layout` to `ArrayBase`: it takes an array by reference and returns a `CoWArray` in standard layout - ([#616](https://github.com/rust-ndarray/ndarray/pull/616) by [@jturner314] and [@andrei-papou]) - - Add `Array2::from_diag` method to create 2D arrays from a diagonal - ([#673](https://github.com/rust-ndarray/ndarray/pull/673) by [@rth]) - - Add `fold` method to `Zip` - ([#684](https://github.com/rust-ndarray/ndarray/pull/684) by [@jturner314]) - - Add `split_at` method to `AxisChunksIter/Mut` - ([#691](https://github.com/rust-ndarray/ndarray/pull/691) by [@jturner314]) - - Implement parallel iteration for `AxisChunksIter/Mut` - ([#639](https://github.com/rust-ndarray/ndarray/pull/639) by [@nitsky]) - - Add `into_scalar` method to `ArrayView0` and `ArrayViewMut0` - ([#700](https://github.com/rust-ndarray/ndarray/pull/700) by [@LukeMathWalker]) - - Add `accumulate_axis_inplace` method to `ArrayBase` - ([#611](https://github.com/rust-ndarray/ndarray/pull/611) by [@jturner314] and [@bluss]) - - Add the `array!`, `azip!`, and `s!` macros to `ndarray::prelude` - ([#517](https://github.com/rust-ndarray/ndarray/pull/517) by [@jturner314]) +- `ndarray-parallel` is merged into `ndarray`. Use the `rayon` feature-flag to get access to parallel iterators and + other parallelized methods. + ([#563](https://github.com/rust-ndarray/ndarray/pull/563/files) by [@bluss]) +- Add `logspace` and `geomspace` constructors + ([#617](https://github.com/rust-ndarray/ndarray/pull/617) by [@JP-Ellis]) +- Implement approx traits for `ArrayBase`. They can be enabled using the `approx` feature-flag. + ([#581](https://github.com/rust-ndarray/ndarray/pull/581) by [@jturner314]) +- Add `mean` method + ([#580](https://github.com/rust-ndarray/ndarray/pull/580) by [@LukeMathWalker]) +- Add `Zip::all` to check if all elements satisfy a predicate + ([#615](https://github.com/rust-ndarray/ndarray/pull/615) by [@mneumann]) +- Add `RawArrayView` and `RawArrayViewMut` types and `RawData`, `RawDataMut`, and `RawDataClone` traits + ([#496](https://github.com/rust-ndarray/ndarray/pull/496) by [@jturner314]) +- Add `CowArray`, `C`lone `o`n `write` array + ([#632](https://github.com/rust-ndarray/ndarray/pull/632) by [@jturner314] and [@andrei-papou]) +- Add `as_standard_layout` to `ArrayBase`: it takes an array by reference and returns a `CoWArray` in standard layout + ([#616](https://github.com/rust-ndarray/ndarray/pull/616) by [@jturner314] and [@andrei-papou]) +- Add `Array2::from_diag` method to create 2D arrays from a diagonal + ([#673](https://github.com/rust-ndarray/ndarray/pull/673) by [@rth]) +- Add `fold` method to `Zip` + ([#684](https://github.com/rust-ndarray/ndarray/pull/684) by [@jturner314]) +- Add `split_at` method to `AxisChunksIter/Mut` + ([#691](https://github.com/rust-ndarray/ndarray/pull/691) by [@jturner314]) +- Implement parallel iteration for `AxisChunksIter/Mut` + ([#639](https://github.com/rust-ndarray/ndarray/pull/639) by [@nitsky]) +- Add `into_scalar` method to `ArrayView0` and `ArrayViewMut0` + ([#700](https://github.com/rust-ndarray/ndarray/pull/700) by [@LukeMathWalker]) +- Add `accumulate_axis_inplace` method to `ArrayBase` + ([#611](https://github.com/rust-ndarray/ndarray/pull/611) by [@jturner314] and [@bluss]) +- Add the `array!`, `azip!`, and `s!` macros to `ndarray::prelude` + ([#517](https://github.com/rust-ndarray/ndarray/pull/517) by [@jturner314]) Enhancements ------------ - - Improve performance for matrix multiplications when using the pure-Rust backend thanks to `matrix-multiply:v0.2` - (leverage SIMD instructions on x86-64 with runtime feature detection) - ([#556](https://github.com/rust-ndarray/ndarray/pull/556) by [@bluss]) - - Improve performance of `fold` for iterators - ([#574](https://github.com/rust-ndarray/ndarray/pull/574) by [@jturner314]) - - Improve performance of `nth_back` for iterators - ([#686](https://github.com/rust-ndarray/ndarray/pull/686) by [@jturner314]) - - Improve performance of iterators for 1-d arrays - ([#614](https://github.com/rust-ndarray/ndarray/pull/614) by [@andrei-papou]) - - Improve formatting for large arrays - ([#606](https://github.com/rust-ndarray/ndarray/pull/606) by [@andrei-papou] and [@LukeMathWalker], - [#633](https://github.com/rust-ndarray/ndarray/pull/633) and [#707](https://github.com/rust-ndarray/ndarray/pull/707) by [@jturner314], - and [#713](https://github.com/rust-ndarray/ndarray/pull/713) by [@bluss]) - - Arithmetic operations between arrays with different element types are now allowed when there is a scalar equivalent - ([#588](https://github.com/rust-ndarray/ndarray/pull/588) by [@jturner314]) - - `.map_axis/_mut` won't panic on 0-length `axis` - ([#579](https://github.com/rust-ndarray/ndarray/pull/612) by [@andrei-papou]) - - Various documentation improvements (by [@jturner314], [@JP-Ellis], [@LukeMathWalker], [@bluss]) + +- Improve performance for matrix multiplications when using the pure-Rust backend thanks to `matrix-multiply:v0.2` + (leverage SIMD instructions on x86-64 with runtime feature detection) + ([#556](https://github.com/rust-ndarray/ndarray/pull/556) by [@bluss]) +- Improve performance of `fold` for iterators + ([#574](https://github.com/rust-ndarray/ndarray/pull/574) by [@jturner314]) +- Improve performance of `nth_back` for iterators + ([#686](https://github.com/rust-ndarray/ndarray/pull/686) by [@jturner314]) +- Improve performance of iterators for 1-d arrays + ([#614](https://github.com/rust-ndarray/ndarray/pull/614) by [@andrei-papou]) +- Improve formatting for large arrays + ([#606](https://github.com/rust-ndarray/ndarray/pull/606) by [@andrei-papou] and [@LukeMathWalker], + [#633](https://github.com/rust-ndarray/ndarray/pull/633) and [#707](https://github.com/rust-ndarray/ndarray/pull/707) + by [@jturner314], + and [#713](https://github.com/rust-ndarray/ndarray/pull/713) by [@bluss]) +- Arithmetic operations between arrays with different element types are now allowed when there is a scalar equivalent + ([#588](https://github.com/rust-ndarray/ndarray/pull/588) by [@jturner314]) +- `.map_axis/_mut` won't panic on 0-length `axis` + ([#579](https://github.com/rust-ndarray/ndarray/pull/612) by [@andrei-papou]) +- Various documentation improvements (by [@jturner314], [@JP-Ellis], [@LukeMathWalker], [@bluss]) API changes ----------- - - The `into_slice` method on ArrayView is deprecated and renamed to `to_slice` - ([#646](https://github.com/rust-ndarray/ndarray/pull/646) by [@max-sixty]) - - `RcArray` is deprecated in favour of `ArcArray` - ([#560](https://github.com/rust-ndarray/ndarray/pull/560) by [@bluss]) - - `into_slice` is renamed to `to_slice`. `into_slice` is now deprecated - ([#646](https://github.com/rust-ndarray/ndarray/pull/646) by [@max-sixty]) - - `from_vec` is deprecated in favour of using the `From` to convert a `Vec` into an `Array` - ([#648](https://github.com/rust-ndarray/ndarray/pull/648) by [@max-sixty]) - - `mean_axis` returns `Option` instead of `A`, to avoid panicking when invoked on a 0-length axis - ([#580](https://github.com/rust-ndarray/ndarray/pull/580) by [@LukeMathWalker]) - - Remove `rustc-serialize` feature-flag. `serde` is the recommended feature-flag for serialization - ([#557](https://github.com/rust-ndarray/ndarray/pull/557) by [@bluss]) - - `rows`/`cols` are renamed to `nrows`/`ncols`. `rows`/`cols` are now deprecated - ([#701](https://github.com/rust-ndarray/ndarray/pull/701) by [@bluss]) - - The usage of the `azip!` macro has changed to be more similar to `for` loops - ([#626](https://github.com/rust-ndarray/ndarray/pull/626) by [@jturner314]) - - For `var_axis` and `std_axis`, the constraints on `ddof` and the trait bounds on `A` have been made more strict - ([#515](https://github.com/rust-ndarray/ndarray/pull/515) by [@jturner314]) - - For `mean_axis`, the constraints on `A` have changed - ([#518](https://github.com/rust-ndarray/ndarray/pull/518) by [@jturner314]) - - `DataClone` is deprecated in favor of using `Data + RawDataClone` - ([#496](https://github.com/rust-ndarray/ndarray/pull/496) by [@jturner314]) - - The `Dimension::Pattern` associated type now has more trait bounds - ([#634](https://github.com/rust-ndarray/ndarray/pull/634) by [@termoshtt]) - - `Axis::index()` now takes `self` instead of `&self` - ([#642](https://github.com/rust-ndarray/ndarray/pull/642) by [@max-sixty]) - - The bounds on the implementation of `Hash` for `Dim` have changed - ([#642](https://github.com/rust-ndarray/ndarray/pull/642) by [@max-sixty]) + +- The `into_slice` method on ArrayView is deprecated and renamed to `to_slice` + ([#646](https://github.com/rust-ndarray/ndarray/pull/646) by [@max-sixty]) +- `RcArray` is deprecated in favour of `ArcArray` + ([#560](https://github.com/rust-ndarray/ndarray/pull/560) by [@bluss]) +- `into_slice` is renamed to `to_slice`. `into_slice` is now deprecated + ([#646](https://github.com/rust-ndarray/ndarray/pull/646) by [@max-sixty]) +- `from_vec` is deprecated in favour of using the `From` to convert a `Vec` into an `Array` + ([#648](https://github.com/rust-ndarray/ndarray/pull/648) by [@max-sixty]) +- `mean_axis` returns `Option` instead of `A`, to avoid panicking when invoked on a 0-length axis + ([#580](https://github.com/rust-ndarray/ndarray/pull/580) by [@LukeMathWalker]) +- Remove `rustc-serialize` feature-flag. `serde` is the recommended feature-flag for serialization + ([#557](https://github.com/rust-ndarray/ndarray/pull/557) by [@bluss]) +- `rows`/`cols` are renamed to `nrows`/`ncols`. `rows`/`cols` are now deprecated + ([#701](https://github.com/rust-ndarray/ndarray/pull/701) by [@bluss]) +- The usage of the `azip!` macro has changed to be more similar to `for` loops + ([#626](https://github.com/rust-ndarray/ndarray/pull/626) by [@jturner314]) +- For `var_axis` and `std_axis`, the constraints on `ddof` and the trait bounds on `A` have been made more strict + ([#515](https://github.com/rust-ndarray/ndarray/pull/515) by [@jturner314]) +- For `mean_axis`, the constraints on `A` have changed + ([#518](https://github.com/rust-ndarray/ndarray/pull/518) by [@jturner314]) +- `DataClone` is deprecated in favor of using `Data + RawDataClone` + ([#496](https://github.com/rust-ndarray/ndarray/pull/496) by [@jturner314]) +- The `Dimension::Pattern` associated type now has more trait bounds + ([#634](https://github.com/rust-ndarray/ndarray/pull/634) by [@termoshtt]) +- `Axis::index()` now takes `self` instead of `&self` + ([#642](https://github.com/rust-ndarray/ndarray/pull/642) by [@max-sixty]) +- The bounds on the implementation of `Hash` for `Dim` have changed + ([#642](https://github.com/rust-ndarray/ndarray/pull/642) by [@max-sixty]) Bug fixes --------- - - Prevent overflow when computing strides in `do_slice` - ([#575](https://github.com/rust-ndarray/ndarray/pull/575) by [@jturner314]) - - Fix issue with BLAS matrix-vector multiplication for array with only 1 non-trivial dimension - ([#585](https://github.com/rust-ndarray/ndarray/pull/585) by [@sebasv]) - - Fix offset computation to avoid UB/panic when slicing in some edge cases - ([#636](https://github.com/rust-ndarray/ndarray/pull/636) by [@jturner314]) - - Fix issues with axis iterators - ([#669](https://github.com/rust-ndarray/ndarray/pull/669) by [@jturner314]) - - Fix handling of empty input to `s!` macro - ([#714](https://github.com/rust-ndarray/ndarray/pull/714) by [@bluss] and [#715](https://github.com/rust-ndarray/ndarray/pull/715) by [@jturner314]) + +- Prevent overflow when computing strides in `do_slice` + ([#575](https://github.com/rust-ndarray/ndarray/pull/575) by [@jturner314]) +- Fix issue with BLAS matrix-vector multiplication for array with only 1 non-trivial dimension + ([#585](https://github.com/rust-ndarray/ndarray/pull/585) by [@sebasv]) +- Fix offset computation to avoid UB/panic when slicing in some edge cases + ([#636](https://github.com/rust-ndarray/ndarray/pull/636) by [@jturner314]) +- Fix issues with axis iterators + ([#669](https://github.com/rust-ndarray/ndarray/pull/669) by [@jturner314]) +- Fix handling of empty input to `s!` macro + ([#714](https://github.com/rust-ndarray/ndarray/pull/714) by [@bluss] + and [#715](https://github.com/rust-ndarray/ndarray/pull/715) by [@jturner314]) Other changes ------------- - - Various improvements to `ndarray`'s CI pipeline (`clippy`, `cargo fmt`, etc. by [@max-sixty] and [@termoshtt]) - - Bump minimum required Rust version to 1.37. +- Various improvements to `ndarray`'s CI pipeline (`clippy`, `cargo fmt`, etc. by [@max-sixty] and [@termoshtt]) +- Bump minimum required Rust version to 1.37. Version 0.12.1 (2018-11-21) =========================== - - Add `std_axis` method for computing standard deviation by @LukeMathWalker. - - Add `product` method for computing product of elements in an array by @sebasv. - - Add `first` and `first_mut` methods for getting the first element of an array. - - Add `into_scalar` method for converting an `Array0` into its element. - - Add `insert_axis_inplace` and `index_axis_inplace` methods for inserting and - removing axes in dynamic-dimensional (`IxDyn`) arrays without taking ownership. - - Add `stride_of` method for getting the stride of an axis. - - Add public `ndim` and `zeros` methods to `Dimension` trait. - - Rename `scalar_sum` to `sum`, `subview` to `index_axis`, - `subview_mut` to `index_axis_mut`, `subview_inplace` to - `collapse_axis`, `into_subview` to `index_axis_move`, and - `slice_inplace` to `slice_collapse` (deprecating the old names, - except for `scalar_sum` which will be in 0.13). - - Deprecate `remove_axis` and fix soundness hole when removing a zero-length axis. - - Implement `Clone` for `LanesIter`. - - Implement `Debug`, `Copy`, and `Clone` for `FoldWhile`. - - Relax constraints on `sum_axis`, `mean_axis`, and `into_owned`. - - Add number of dimensions (and whether it's const or dynamic) to array `Debug` format. - - Allow merging axes with `merge_axes` when either axis length is ≤ 1. - - Clarify and check more precise safety requirements for constructing arrays. - This fixes undefined behavior in some edge cases. - (See [#543](https://github.com/rust-ndarray/ndarray/pull/543).) - - Fix `is_standard_layout` in some edge cases. - (See [#543](https://github.com/rust-ndarray/ndarray/pull/543).) - - Fix chunk sizes in `axis_chunks_iter` and `axis_chunks_iter_mut` when - the stride is zero or the array element type is zero-sized by @bluss. - - Improve documentation by @jturner314, @bluss, and @paulkernfeld. - - Improve element iterators with implementations of `Iterator::rfold`. - - Miscellaneous internal implementation improvements by @jturner314 and @bluss. - +- Add `std_axis` method for computing standard deviation by @LukeMathWalker. +- Add `product` method for computing product of elements in an array by @sebasv. +- Add `first` and `first_mut` methods for getting the first element of an array. +- Add `into_scalar` method for converting an `Array0` into its element. +- Add `insert_axis_inplace` and `index_axis_inplace` methods for inserting and + removing axes in dynamic-dimensional (`IxDyn`) arrays without taking ownership. +- Add `stride_of` method for getting the stride of an axis. +- Add public `ndim` and `zeros` methods to `Dimension` trait. +- Rename `scalar_sum` to `sum`, `subview` to `index_axis`, + `subview_mut` to `index_axis_mut`, `subview_inplace` to + `collapse_axis`, `into_subview` to `index_axis_move`, and + `slice_inplace` to `slice_collapse` (deprecating the old names, + except for `scalar_sum` which will be in 0.13). +- Deprecate `remove_axis` and fix soundness hole when removing a zero-length axis. +- Implement `Clone` for `LanesIter`. +- Implement `Debug`, `Copy`, and `Clone` for `FoldWhile`. +- Relax constraints on `sum_axis`, `mean_axis`, and `into_owned`. +- Add number of dimensions (and whether it's const or dynamic) to array `Debug` format. +- Allow merging axes with `merge_axes` when either axis length is ≤ 1. +- Clarify and check more precise safety requirements for constructing arrays. + This fixes undefined behavior in some edge cases. + (See [#543](https://github.com/rust-ndarray/ndarray/pull/543).) +- Fix `is_standard_layout` in some edge cases. + (See [#543](https://github.com/rust-ndarray/ndarray/pull/543).) +- Fix chunk sizes in `axis_chunks_iter` and `axis_chunks_iter_mut` when + the stride is zero or the array element type is zero-sized by @bluss. +- Improve documentation by @jturner314, @bluss, and @paulkernfeld. +- Improve element iterators with implementations of `Iterator::rfold`. +- Miscellaneous internal implementation improvements by @jturner314 and @bluss. Version 0.12.0 (2018-09-01) =========================== - - Add `var_axis` method for computing variance by @LukeMathWalker. - - Add `map_mut` and `map_axis_mut` methods (mutable variants of `map` and `map_axis`) by @LukeMathWalker. - - Add support for 128-bit integer scalars (`i128` and `u128`). - - Add support for slicing with inclusive ranges (`start..=end` and `..=end`). - - Relax constraint on closure from `Fn` to `FnMut` for `mapv`, `mapv_into`, `map_inplace` and `mapv_inplace`. - - Implement `TrustedIterator` for `IterMut`. - - Bump `num-traits` and `num-complex` to version `0.2`. - - Bump `blas-src` to version `0.2`. - - Bump minimum required Rust version to 1.27. - - Additional contributors to this release: @ExpHP, @jturner314, @alexbool, @messense, @danmack, @nbro +- Add `var_axis` method for computing variance by @LukeMathWalker. +- Add `map_mut` and `map_axis_mut` methods (mutable variants of `map` and `map_axis`) by @LukeMathWalker. +- Add support for 128-bit integer scalars (`i128` and `u128`). +- Add support for slicing with inclusive ranges (`start..=end` and `..=end`). +- Relax constraint on closure from `Fn` to `FnMut` for `mapv`, `mapv_into`, `map_inplace` and `mapv_inplace`. +- Implement `TrustedIterator` for `IterMut`. +- Bump `num-traits` and `num-complex` to version `0.2`. +- Bump `blas-src` to version `0.2`. +- Bump minimum required Rust version to 1.27. +- Additional contributors to this release: @ExpHP, @jturner314, @alexbool, @messense, @danmack, @nbro Version 0.11.2 (2018-03-21) =========================== - - New documentation; @jturner314 has written a large “ndarray for NumPy users” - document, which we include in rustdoc. - [Read it here](https://docs.rs/ndarray/0.11/ndarray/doc/ndarray_for_numpy_users/) - a useful quick guide for any user, and in particular if you are familiar - with numpy. - - Add `ArcArray`. `RcArray` has become `ArcArray`; it is now using thread - safe reference counting just like `Arc`; this means that shared ownership - arrays are now `Send/Sync` if the corresponding element type is `Send +- New documentation; @jturner314 has written a large “ndarray for NumPy users” + document, which we include in rustdoc. + [Read it here](https://docs.rs/ndarray/0.11/ndarray/doc/ndarray_for_numpy_users/) + a useful quick guide for any user, and in particular if you are familiar + with numpy. +- Add `ArcArray`. `RcArray` has become `ArcArray`; it is now using thread + safe reference counting just like `Arc`; this means that shared ownership + arrays are now `Send/Sync` if the corresponding element type is `Send + Sync`. - - Add array method `.permute_axes()` by @jturner314 - - Add array constructor `Array::ones` by @ehsanmok - - Add the method `.reborrow()` to `ArrayView/Mut`, which can be used - to shorten the lifetime of an array view; in a reference-like type this - normally happens implicitly but for technical reasons the views have - an invariant lifetime parameter. - - Fix an issue with type inference, the dimensionality of an array - should not infer correctly in more cases when using slicing. By @jturner314. - +- Add array method `.permute_axes()` by @jturner314 +- Add array constructor `Array::ones` by @ehsanmok +- Add the method `.reborrow()` to `ArrayView/Mut`, which can be used + to shorten the lifetime of an array view; in a reference-like type this + normally happens implicitly but for technical reasons the views have + an invariant lifetime parameter. +- Fix an issue with type inference, the dimensionality of an array + should not infer correctly in more cases when using slicing. By @jturner314. Version 0.11.1 (2018-01-21) =========================== - - Dimension types (`Ix1, Ix2, .., IxDyn`) now implement `Hash` by - @jturner314 - - Blas integration can now use *gemv* for matrix-vector multiplication also - when the matrix is f-order by @maciejkula - - Encapsulated `unsafe` code blocks in the `s![]` macro are now exempted - from the `unsafe_code` lint by @jturner314 +- Dimension types (`Ix1, Ix2, .., IxDyn`) now implement `Hash` by + @jturner314 +- Blas integration can now use *gemv* for matrix-vector multiplication also + when the matrix is f-order by @maciejkula +- Encapsulated `unsafe` code blocks in the `s![]` macro are now exempted + from the `unsafe_code` lint by @jturner314 Version 0.11.0 (2017-12-29) =========================== [Release announcement](https://jim.turner.link/pages/ndarray-0.11/) - - Allow combined slicing and subviews in a single operation by @jturner314 and - @bluss +- Allow combined slicing and subviews in a single operation by @jturner314 and + @bluss * Add support for individual indices (to indicate subviews) to the `s![]` macro, and change the return type to @@ -909,725 +903,763 @@ Version 0.11.0 (2017-12-29) * Replace the `Si` type with `SliceOrIndex`. * Add a new `Slice` type that is similar to the old `Si` type. - - Add support for more index types (e.g. `usize`) to the `s![]` macro by - @jturner314 - - Rename `.islice()` to `.slice_inplace()` by @jturner314 - - Rename `.isubview()` to `.subview_inplace()` by @jturner314 - - Add `.slice_move()`, `.slice_axis()`, `.slice_axis_mut()`, and - `.slice_axis_inplace()` methods by @jturner314 - - Add `Dimension::NDIM` associated constant by @jturner314 - - Change trait bounds for arithmetic ops between an array (by value) and - a reference to an array or array view (“array1 (op) &array2”); before, - an `ArrayViewMut` was supported on the left hand side, now, the left - hand side must not be a view. - ([#380](https://github.com/rust-ndarray/ndarray/pull/380)) by @jturner314 - - Remove deprecated methods (`.whole_chunks()`, `.whole_chunks_mut()`, - `.sum()`, and `.mean()`; replaced by `.exact_chunks()`, - `.exact_chunks_mut()`, `.sum_axis()`, and `.mean_axis()`, - respectively) by @bluss - - Updated to the latest blas (optional) dependencies. See instructions in the - README. - - Minimum required Rust version is 1.22. - +- Add support for more index types (e.g. `usize`) to the `s![]` macro by + @jturner314 +- Rename `.islice()` to `.slice_inplace()` by @jturner314 +- Rename `.isubview()` to `.subview_inplace()` by @jturner314 +- Add `.slice_move()`, `.slice_axis()`, `.slice_axis_mut()`, and + `.slice_axis_inplace()` methods by @jturner314 +- Add `Dimension::NDIM` associated constant by @jturner314 +- Change trait bounds for arithmetic ops between an array (by value) and + a reference to an array or array view (“array1 (op) &array2”); before, + an `ArrayViewMut` was supported on the left hand side, now, the left + hand side must not be a view. + ([#380](https://github.com/rust-ndarray/ndarray/pull/380)) by @jturner314 +- Remove deprecated methods (`.whole_chunks()`, `.whole_chunks_mut()`, + `.sum()`, and `.mean()`; replaced by `.exact_chunks()`, + `.exact_chunks_mut()`, `.sum_axis()`, and `.mean_axis()`, + respectively) by @bluss +- Updated to the latest blas (optional) dependencies. See instructions in the + README. +- Minimum required Rust version is 1.22. Earlier releases ================ - 0.10.13 - - Add an extension trait for longer-life indexing methods for array views - (`IndexLonger`) by @termoshtt and @bluss - - The `a.dot(b)` method now supports a vector times matrix multiplication - by @jturner314 - - More general `.into_owned()` method by @jturner314 + - Add an extension trait for longer-life indexing methods for array views + (`IndexLonger`) by @termoshtt and @bluss + - The `a.dot(b)` method now supports a vector times matrix multiplication + by @jturner314 + - More general `.into_owned()` method by @jturner314 - 0.10.12 - - Implement serde serialization for `IxDyn`, so that arrays and array views - using it are serializable as well. + - Implement serde serialization for `IxDyn`, so that arrays and array views + using it are serializable as well. - 0.10.11 - - Add method `.uswap(a, b)` for unchecked swap by @jturner314 - - Bump private dependencies (itertools 0.7) + - Add method `.uswap(a, b)` for unchecked swap by @jturner314 + - Bump private dependencies (itertools 0.7) - 0.10.10 - - Fix crash with zero size arrays in the fallback matrix multiplication code - (#365) by @jturner314 + - Fix crash with zero size arrays in the fallback matrix multiplication code + (#365) by @jturner314 - 0.10.9 - - Fix crash in `Array::from_shape_fn` when creating an f-order array - with zero elements (#361) by @jturner314 + - Fix crash in `Array::from_shape_fn` when creating an f-order array + with zero elements (#361) by @jturner314 - 0.10.8 - - Add method `.insert_axis()` to arrays and array views by @jturner314 + - Add method `.insert_axis()` to arrays and array views by @jturner314 - 0.10.7 - - Add method `.is_empty()` to arrays and array views by @iamed2 - - Support optional trailing commas in the `array![]` macro by Alex Burka - - Added an example of permuting/sorting along an axis to the sources + - Add method `.is_empty()` to arrays and array views by @iamed2 + - Support optional trailing commas in the `array![]` macro by Alex Burka + - Added an example of permuting/sorting along an axis to the sources - 0.10.6 - - Tweak the implementation for (bounds checked) indexing of arrays - ([] operator). The new code will have the optimizer elide the bounds checks - in more situations. + - Tweak the implementation for (bounds checked) indexing of arrays + ([] operator). The new code will have the optimizer elide the bounds checks + in more situations. - 0.10.5 - - Add method `.into_dimensionality::()` for dimensionality conversion - (From `IxDyn` to fixed size and back). - - New names `.sum_axis` and `.mean_axis` for sum and mean functions. - Old names deprecated to make room for scalar-returning methods, making - a proper convention. - - Fix deserialization using ron (#345) by @Libbum + - Add method `.into_dimensionality::()` for dimensionality conversion + (From `IxDyn` to fixed size and back). + - New names `.sum_axis` and `.mean_axis` for sum and mean functions. + Old names deprecated to make room for scalar-returning methods, making + a proper convention. + - Fix deserialization using ron (#345) by @Libbum - 0.10.4 - - Fix unused mut warnings in `azip!()` macro - - Fix bug #340 by @lloydmeta; uses blas gemm for more memory layouts - of column matrices. Only relevant if using blas. + - Fix unused mut warnings in `azip!()` macro + - Fix bug #340 by @lloydmeta; uses blas gemm for more memory layouts + of column matrices. Only relevant if using blas. - 0.10.3 - - Fix docs.rs doc build + - Fix docs.rs doc build - 0.10.2 - - Support trailing commas in the `s![]` macro - - Some documentation improvements for the introduction, for `azip!()` and - other places. - - Added two more examples in the source + - Support trailing commas in the `s![]` macro + - Some documentation improvements for the introduction, for `azip!()` and + other places. + - Added two more examples in the source - 0.10.1 - - Add method `.into_dyn()` to convert to a dynamic dimensionality array - or array view. By @bobogei81123 - - Edit docs for the fact that type alias pages now show methods. - See the doc pages for `Array` and `ArrayView` and the other aliases. - - Edit docs for `Zip` + - Add method `.into_dyn()` to convert to a dynamic dimensionality array + or array view. By @bobogei81123 + - Edit docs for the fact that type alias pages now show methods. + See the doc pages for `Array` and `ArrayView` and the other aliases. + - Edit docs for `Zip` - 0.10.0 - - Upgrade to Serde 1.0. Crate feature name is `serde-1`. - - Require Rust 1.18. The `pub(crate)` feature is that important. + - Upgrade to Serde 1.0. Crate feature name is `serde-1`. + - Require Rust 1.18. The `pub(crate)` feature is that important. - 0.9.1 - - Fix `Array::from_shape_fn` to give correct indices for f-order shapes - - Fix `Array::from_shape_fn` to panic correctly on shape size overflow + - Fix `Array::from_shape_fn` to give correct indices for f-order shapes + - Fix `Array::from_shape_fn` to panic correctly on shape size overflow - 0.9.0 [Release Announcement](https://bluss.github.io//rust/2017/04/09/ndarray-0.9/) - - Add `Zip::indexed` - - New methods `genrows/_mut, gencolumns/_mut, lanes/_mut` that - return iterable producers (producer means `Zip` compatible). - - New method `.windows()` by @Robbepop, returns an iterable producer - - New function `general_mat_vec_mul` (with fast default and blas acceleration) - - `Zip::apply` and `fold_while` now take `self` as the first argument - - `indices/_of` now return iterable producers (not iterator) - - No allocation for short `IxDyn`. - - Remove `Ix, Ixs` from the prelude - - Remove deprecated `Axis::axis` method (use `.index()`) - - Rename `.whole_chunks` to `.exact_chunks`. - - Remove `.inner_iter` in favour of the new `.genrows()` method. - - Iterators and similar structs are now scoped under `ndarray::iter` - - `IntoNdProducer` now has the `Item` associated type - - Owned array storage types are now encapsulated in newtypes - - `FoldWhile` got the method `is_done`. - - Arrays now implement formatting trait `Binary` if elements do - - Internal changes. `NdProducer` generalized. `Dimension` gets - the `Smaller` type parameter. Internal traits have the private marker now. - - `#` (alternate) in formatting does nothing now. - - Require Rust 1.15 + - Add `Zip::indexed` + - New methods `genrows/_mut, gencolumns/_mut, lanes/_mut` that + return iterable producers (producer means `Zip` compatible). + - New method `.windows()` by @Robbepop, returns an iterable producer + - New function `general_mat_vec_mul` (with fast default and blas acceleration) + - `Zip::apply` and `fold_while` now take `self` as the first argument + - `indices/_of` now return iterable producers (not iterator) + - No allocation for short `IxDyn`. + - Remove `Ix, Ixs` from the prelude + - Remove deprecated `Axis::axis` method (use `.index()`) + - Rename `.whole_chunks` to `.exact_chunks`. + - Remove `.inner_iter` in favour of the new `.genrows()` method. + - Iterators and similar structs are now scoped under `ndarray::iter` + - `IntoNdProducer` now has the `Item` associated type + - Owned array storage types are now encapsulated in newtypes + - `FoldWhile` got the method `is_done`. + - Arrays now implement formatting trait `Binary` if elements do + - Internal changes. `NdProducer` generalized. `Dimension` gets + the `Smaller` type parameter. Internal traits have the private marker now. + - `#` (alternate) in formatting does nothing now. + - Require Rust 1.15 - 0.8.4 - - Use `Zip` in `.all_close()` (performance improvement) - - Use `#[inline]` on a function used for higher dimensional checked - indexing (performance improvement for arrays of ndim >= 3) - - `.subview()` has a more elaborate panic message + - Use `Zip` in `.all_close()` (performance improvement) + - Use `#[inline]` on a function used for higher dimensional checked + indexing (performance improvement for arrays of ndim >= 3) + - `.subview()` has a more elaborate panic message - 0.8.3 - - Fix a bug in `Zip` / `NdProducer` if an array of at least 3 dimensions - was contig but not c- nor f-contig. - - `WholeChunksIter/Mut` now impl `Send/Sync` as appropriate - - Misc cleanup and using dimension-reducing versions of inner_iter - internally. Remove a special case in `zip_mut_with` that only made it - slower (1D not-contig arrays). + - Fix a bug in `Zip` / `NdProducer` if an array of at least 3 dimensions + was contig but not c- nor f-contig. + - `WholeChunksIter/Mut` now impl `Send/Sync` as appropriate + - Misc cleanup and using dimension-reducing versions of inner_iter + internally. Remove a special case in `zip_mut_with` that only made it + slower (1D not-contig arrays). - 0.8.2 - - Add more documentation and an example for dynamic dimensions: see - [`IxDyn`](https://docs.rs/ndarray/0.8.2/ndarray/type.IxDyn.html). - `IxDyn` will have a representation change next incompatible - version. Use it as a type alias for best forward compatibility. - - Add iterable and producer `.whole_chunks_mut(size)`. - - Fix a bug in `whole_chunks`: it didn't check the dimensionality of the - requested chunk size properly (an `IxDyn`-only bug). - - Improve performance of `zip_mut_with` (and thus all binary operators) for - block slices of row major arrays. - - `AxisChunksIter` creation sped up and it implements `Clone`. - - Dimension mismatch in `Zip` has a better panic message. + - Add more documentation and an example for dynamic dimensions: see + [`IxDyn`](https://docs.rs/ndarray/0.8.2/ndarray/type.IxDyn.html). + `IxDyn` will have a representation change next incompatible + version. Use it as a type alias for best forward compatibility. + - Add iterable and producer `.whole_chunks_mut(size)`. + - Fix a bug in `whole_chunks`: it didn't check the dimensionality of the + requested chunk size properly (an `IxDyn`-only bug). + - Improve performance of `zip_mut_with` (and thus all binary operators) for + block slices of row major arrays. + - `AxisChunksIter` creation sped up and it implements `Clone`. + - Dimension mismatch in `Zip` has a better panic message. - 0.8.1 - - Add `Zip` and macro `azip!()` which implement lock step function - application across elements from one up to six arrays (or in general - producers) + - Add `Zip` and macro `azip!()` which implement lock step function + application across elements from one up to six arrays (or in general + producers) - + Apart from array views, axis iterators and the whole chunks iterable are - also producers + + Apart from array views, axis iterators and the whole chunks iterable are + also producers - - Add constructor `Array::uninitialized` - - Add iterable and producer `.whole_chunks(size)` - - Implement a prettier `Debug` for `Si`. - - Fix `Array::default` so that it panics as documented if the size of the - array would wrap around integer type limits. - - Output more verbose panics for errors when slicing arrays (only in debug - mode). + - Add constructor `Array::uninitialized` + - Add iterable and producer `.whole_chunks(size)` + - Implement a prettier `Debug` for `Si`. + - Fix `Array::default` so that it panics as documented if the size of the + array would wrap around integer type limits. + - Output more verbose panics for errors when slicing arrays (only in debug + mode). - 0.8.0 - - Update serde dependency to 0.9 - - Remove deprecated type alias `OwnedArray` (use `Array`) - - Remove deprecated `.assign_scalar()` (use `fill`) + - Update serde dependency to 0.9 + - Remove deprecated type alias `OwnedArray` (use `Array`) + - Remove deprecated `.assign_scalar()` (use `fill`) - 0.7.3 - - Add macro `array![]` for creating one-, two-, or three-dimensional arrays - (with ownership semantics like `vec![]`) - - `Array` now implements `Clone::clone_from()` specifically, so that its - allocation is (possibly) reused. - - Add `.to_vec()` for one-dimensional arrays - - Add `RcArray::into_owned(self) -> Array`. - - Add crate categories + - Add macro `array![]` for creating one-, two-, or three-dimensional arrays + (with ownership semantics like `vec![]`) + - `Array` now implements `Clone::clone_from()` specifically, so that its + allocation is (possibly) reused. + - Add `.to_vec()` for one-dimensional arrays + - Add `RcArray::into_owned(self) -> Array`. + - Add crate categories - 0.7.2 - - Add array methods `.remove_axis()`, `.merge_axes()` and `.invert_axis()` - - Rename `Axis`’ accessor `axis` to `index`, old name is deprecated. + - Add array methods `.remove_axis()`, `.merge_axes()` and `.invert_axis()` + - Rename `Axis`’ accessor `axis` to `index`, old name is deprecated. - 0.7.1 - - Fix two bugs in `Array::clone()`; it did not support zero-size elements - like `()`, and for some negatively strided arrays it did not update the - first element offset correctly. - - Add `.axes()` which is an iterator over the axes of an array, yielding - its index, length and stride. - - Add method `.max_stride_axis()`. + - Fix two bugs in `Array::clone()`; it did not support zero-size elements + like `()`, and for some negatively strided arrays it did not update the + first element offset correctly. + - Add `.axes()` which is an iterator over the axes of an array, yielding + its index, length and stride. + - Add method `.max_stride_axis()`. - 0.6.10 - - Fix two bugs in `Array::clone()`; it did not support zero-size elements - like `()`, and for some negatively strided arrays it did not update the - first element offset correctly. + - Fix two bugs in `Array::clone()`; it did not support zero-size elements + like `()`, and for some negatively strided arrays it did not update the + first element offset correctly. - 0.7.0 - - Big overhaul of dimensions: Add type `Dim` with aliases - `Ix1, Ix2, Ix3, ...` etc for specific dimensionalities. - Instead of `Ix` for dimension use `Ix1`, instead of `(Ix, Ix)` use - `Ix2`, and so on. - - The dimension type `Dim` supports indexing and arithmetic. See - `Dimension` trait for new methods and inherited traits. - - Constructors and methods that take tuples for array sizes, like `Array::zeros,` - `Array::from_shape_vec`, `.into_shape()` and so on will continue to work - with tuples. - - The array method `.raw_dim()` returns the shape description - `D` as it is. `.dim()` continues to return the dimension as a tuple. - - Renamed iterators for consistency (each iterator is named for the - method that creates it, for example `.iter()` returns `Iter`). - - The index iterator is now created with free functions `indices` or - `indices_of`. - - Expanded the `ndarray::prelude` module with the dimensionality-specific - type aliases, and some other items - - `LinalgScalar` and related features no longer need to use `Any` for - static type dispatch. - - Serialization with `serde` now supports binary encoders like bincode - and others. - - `.assign_scalar()` was deprecated and replaced by `.fill()`, which - takes an element by value. - - Require Rust 1.13 + - Big overhaul of dimensions: Add type `Dim` with aliases + `Ix1, Ix2, Ix3, ...` etc for specific dimensionalities. + Instead of `Ix` for dimension use `Ix1`, instead of `(Ix, Ix)` use + `Ix2`, and so on. + - The dimension type `Dim` supports indexing and arithmetic. See + `Dimension` trait for new methods and inherited traits. + - Constructors and methods that take tuples for array sizes, like `Array::zeros,` + `Array::from_shape_vec`, `.into_shape()` and so on will continue to work + with tuples. + - The array method `.raw_dim()` returns the shape description + `D` as it is. `.dim()` continues to return the dimension as a tuple. + - Renamed iterators for consistency (each iterator is named for the + method that creates it, for example `.iter()` returns `Iter`). + - The index iterator is now created with free functions `indices` or + `indices_of`. + - Expanded the `ndarray::prelude` module with the dimensionality-specific + type aliases, and some other items + - `LinalgScalar` and related features no longer need to use `Any` for + static type dispatch. + - Serialization with `serde` now supports binary encoders like bincode + and others. + - `.assign_scalar()` was deprecated and replaced by `.fill()`, which + takes an element by value. + - Require Rust 1.13 - 0.6.9 - - Implement `ExactSizeIterator` for the indexed iterators + - Implement `ExactSizeIterator` for the indexed iterators - 0.6.8 - - Fix a bug in a partially consumed elements iterator's `.fold()`. - (**Note** that users are recommended to not use the elements iterator, - but the higher level functions which are the maps, folds and other methods - of the array types themselves.) + - Fix a bug in a partially consumed elements iterator's `.fold()`. + (**Note** that users are recommended to not use the elements iterator, + but the higher level functions which are the maps, folds and other methods + of the array types themselves.) - 0.6.7 - - Improve performance of a lot of basic operations for arrays where - the innermost dimension is not contiguous (`.fold(), .map(), - .to_owned()`, arithmetic operations with scalars). - - Require Rust 1.11 + - Improve performance of a lot of basic operations for arrays where + the innermost dimension is not contiguous (`.fold(), .map(), + .to_owned()`, arithmetic operations with scalars). + - Require Rust 1.11 - 0.6.6 - - Add dimensionality specific type aliases: `Array0, Array1, Array2, ...` - and so on (there are many), also `Ix0, Ix1, Ix2, ...`. - - Add constructor `Array::from_shape_fn(D, |D| -> A)`. - - Improve performance of `Array::default`, and `.fold()` for noncontiguous - array iterators. + - Add dimensionality specific type aliases: `Array0, Array1, Array2, ...` + and so on (there are many), also `Ix0, Ix1, Ix2, ...`. + - Add constructor `Array::from_shape_fn(D, |D| -> A)`. + - Improve performance of `Array::default`, and `.fold()` for noncontiguous + array iterators. - 0.6.5 - - Add method `.into_raw_vec()` to turn an `Array` into the its - underlying element storage vector, in whatever element order it is using. + - Add method `.into_raw_vec()` to turn an `Array` into the its + underlying element storage vector, in whatever element order it is using. - 0.6.4 - - Add method `.map_axis()` which is used to flatten an array along - one axis by mapping it to a scalar. + - Add method `.map_axis()` which is used to flatten an array along + one axis by mapping it to a scalar. - 0.6.3 - - Work around compilation issues in nightly (issue #217) - - Add `Default` implementations for owned arrays + - Work around compilation issues in nightly (issue #217) + - Add `Default` implementations for owned arrays - 0.6.2 - - Add serialization support for serde 0.8, under the crate feature name `serde` + - Add serialization support for serde 0.8, under the crate feature name `serde` - 0.6.1 - - Add `unsafe` array view constructors `ArrayView::from_shape_ptr` - for read-only and read-write array views. These make it easier to - create views from raw pointers. + - Add `unsafe` array view constructors `ArrayView::from_shape_ptr` + for read-only and read-write array views. These make it easier to + create views from raw pointers. - 0.6.0 - - Rename `OwnedArray` to `Array`. The old name is deprecated. - - Remove deprecated constructor methods. Use zeros, from_elem, from_shape_vec - or from_shape_vec_unchecked instead. - - Remove deprecated in place arithmetic methods like iadd et.c. Use += et.c. - instead. - - Remove deprecated method mat_mul, use dot instead. - - Require Rust 1.9 + - Rename `OwnedArray` to `Array`. The old name is deprecated. + - Remove deprecated constructor methods. Use zeros, from_elem, from_shape_vec + or from_shape_vec_unchecked instead. + - Remove deprecated in place arithmetic methods like iadd et.c. Use += et.c. + instead. + - Remove deprecated method mat_mul, use dot instead. + - Require Rust 1.9 - 0.5.2 - - Use num-traits, num-complex instead of num. + - Use num-traits, num-complex instead of num. - 0.5.1 - - Fix theoretical well-formedness issue with Data trait + - Fix theoretical well-formedness issue with Data trait - 0.5.0 - - Require Rust 1.8 and enable +=, -=, and the other assign operators. - All `iadd, iadd_scalar` and similar methods are now deprecated. - - ndarray now has a prelude: `use ndarray::prelude::*;`. - - Constructors from_elem, zeros, from_shape_vec now all support passing a custom - memory layout. A lot of specific constructors were deprecated. - - Add method `.select(Axis, &[Ix]) -> OwnedArray`, to create an array - from a non-contiguous pick of subviews along an axis. - - Rename `.mat_mul()` to just `.dot()` and add a function `general_mat_mul` - for matrix multiplication with scaling into an existing array. - - **Change .fold() to use arbitrary order.** - - See below for more details + - Require Rust 1.8 and enable +=, -=, and the other assign operators. + All `iadd, iadd_scalar` and similar methods are now deprecated. + - ndarray now has a prelude: `use ndarray::prelude::*;`. + - Constructors from_elem, zeros, from_shape_vec now all support passing a custom + memory layout. A lot of specific constructors were deprecated. + - Add method `.select(Axis, &[Ix]) -> OwnedArray`, to create an array + from a non-contiguous pick of subviews along an axis. + - Rename `.mat_mul()` to just `.dot()` and add a function `general_mat_mul` + for matrix multiplication with scaling into an existing array. + - **Change .fold() to use arbitrary order.** + - See below for more details - 0.5.0-alpha.2 - - Fix a namespace bug in the stack![] macro. - - Add method .select() that can pick an arbitrary set of rows (for example) - into a new array. + - Fix a namespace bug in the stack![] macro. + - Add method .select() that can pick an arbitrary set of rows (for example) + into a new array. - 0.4.9 - - Fix a namespace bug in the stack![] macro. - - Add deprecation messages to .iadd() and similar methods (use += instead). + - Fix a namespace bug in the stack![] macro. + - Add deprecation messages to .iadd() and similar methods (use += instead). - 0.5.0-alpha.1 - - Add .swap(i, j) for swapping two elements - - Add a prelude module `use ndarray::prelude::*;` - - Add ndarray::linalg::general_mat_mul which computes *C ← α A B + β C*, - i.e matrix multiplication into an existing array, with optional scaling. - - Add .fold_axis(Axis, folder) - - Implement .into_shape() for f-order arrays + - Add .swap(i, j) for swapping two elements + - Add a prelude module `use ndarray::prelude::*;` + - Add ndarray::linalg::general_mat_mul which computes *C ← α A B + β C*, + i.e matrix multiplication into an existing array, with optional scaling. + - Add .fold_axis(Axis, folder) + - Implement .into_shape() for f-order arrays - 0.5.0-alpha.0 - - Requires Rust 1.8. Compound assignment operators are now enabled by default. - - Rename `.mat_mul()` to `.dot()`. The same method name now handles - dot product and matrix multiplication. - - Remove deprecated items: raw_data, raw_data_mut, allclose, zeros, Array. - Docs for 0.4. lists the replacements. - - Remove deprecated crate features: rblas, assign_ops - - A few consuming arithmetic ops with ArrayViewMut were removed (this - was missed in the last version). - - **Change .fold() to use arbitrary order.** Its specification and - implementation has changed, to pick the most appropriate element traversal - order depending on memory layout. + - Requires Rust 1.8. Compound assignment operators are now enabled by default. + - Rename `.mat_mul()` to `.dot()`. The same method name now handles + dot product and matrix multiplication. + - Remove deprecated items: raw_data, raw_data_mut, allclose, zeros, Array. + Docs for 0.4. lists the replacements. + - Remove deprecated crate features: rblas, assign_ops + - A few consuming arithmetic ops with ArrayViewMut were removed (this + was missed in the last version). + - **Change .fold() to use arbitrary order.** Its specification and + implementation has changed, to pick the most appropriate element traversal + order depending on memory layout. - 0.4.8 - - Fix an error in `.dot()` when using BLAS and arrays with negative stride. + - Fix an error in `.dot()` when using BLAS and arrays with negative stride. - 0.4.7 - - Add dependency matrixmultiply to handle matrix multiplication - for floating point elements. It supports matrices of general stride - and is a great improvement for performance. See PR #175. + - Add dependency matrixmultiply to handle matrix multiplication + for floating point elements. It supports matrices of general stride + and is a great improvement for performance. See PR #175. - 0.4.6 - - Fix bug with crate feature blas; it would not compute matrix - multiplication correctly for arrays with negative or zero stride. - - Update blas-sys version (optional dependency). + - Fix bug with crate feature blas; it would not compute matrix + multiplication correctly for arrays with negative or zero stride. + - Update blas-sys version (optional dependency). - 0.4.5 - - Add `.all_close()` which replaces the now deprecated `.allclose()`. - The new method has a stricter protocol: it panics if the array - shapes are not compatible. We don't want errors to pass silently. - - Add a new illustration to the doc for `.axis_iter()`. - - Rename `OuterIter, OuterIterMut` to `AxisIter, AxisIterMut`. - The old name is now deprecated. + - Add `.all_close()` which replaces the now deprecated `.allclose()`. + The new method has a stricter protocol: it panics if the array + shapes are not compatible. We don't want errors to pass silently. + - Add a new illustration to the doc for `.axis_iter()`. + - Rename `OuterIter, OuterIterMut` to `AxisIter, AxisIterMut`. + The old name is now deprecated. - 0.4.4 - - Add mapping methods `.mapv(), .mapv_into(), .map_inplace(),` - `.mapv_inplace(), .visit()`. The `mapv` versions - have the transformation function receive the element by value (hence *v*). - - Add method `.scaled_add()` (a.k.a axpy) and constructor `from_vec_dim_f`. - - Add 2d array methods `.rows(), .cols()`. - - Deprecate method `.fold()` because it dictates a specific visit order. + - Add mapping methods `.mapv(), .mapv_into(), .map_inplace(),` + `.mapv_inplace(), .visit()`. The `mapv` versions + have the transformation function receive the element by value (hence *v*). + - Add method `.scaled_add()` (a.k.a axpy) and constructor `from_vec_dim_f`. + - Add 2d array methods `.rows(), .cols()`. + - Deprecate method `.fold()` because it dictates a specific visit order. - 0.4.3 - - Add array method `.t()` as a shorthand to create a transposed view. - - Fix `mat_mul` so that it accepts arguments of different array kind - - Fix a bug in `mat_mul` when using BLAS and multiplying with a column - matrix (#154) + - Add array method `.t()` as a shorthand to create a transposed view. + - Fix `mat_mul` so that it accepts arguments of different array kind + - Fix a bug in `mat_mul` when using BLAS and multiplying with a column + matrix (#154) - 0.4.2 - - Add new BLAS integration used by matrix multiplication - (selected with crate feature `blas`). Uses pluggable backend. - - Deprecate module `ndarray::blas` and crate feature `rblas`. This module - was moved to the crate `ndarray-rblas`. - - Add array methods `as_slice_memory_order, as_slice_memory_order_mut, as_ptr, - as_mut_ptr`. - - Deprecate `raw_data, raw_data_mut`. - - Add `Send + Sync` to `NdFloat`. - - Arrays now show shape & stride in their debug formatter. - - Fix a bug where `from_vec_dim_stride` did not accept arrays with unitary axes. - - Performance improvements for contiguous arrays in non-c order when using - methods `to_owned, map, scalar_sum, assign_scalar`, - and arithmetic operations between array and scalar. - - Some methods now return arrays in the same memory order of the input - if the input is contiguous: `to_owned, map, mat_mul` (matrix multiplication - only if both inputs are the same memory order), and arithmetic operations - that allocate a new result. - - Slight performance improvements in `dot, mat_mul` due to more efficient - glue code for calling BLAS. - - Performance improvements in `.assign_scalar`. + - Add new BLAS integration used by matrix multiplication + (selected with crate feature `blas`). Uses pluggable backend. + - Deprecate module `ndarray::blas` and crate feature `rblas`. This module + was moved to the crate `ndarray-rblas`. + - Add array methods `as_slice_memory_order, as_slice_memory_order_mut, as_ptr, + as_mut_ptr`. + - Deprecate `raw_data, raw_data_mut`. + - Add `Send + Sync` to `NdFloat`. + - Arrays now show shape & stride in their debug formatter. + - Fix a bug where `from_vec_dim_stride` did not accept arrays with unitary axes. + - Performance improvements for contiguous arrays in non-c order when using + methods `to_owned, map, scalar_sum, assign_scalar`, + and arithmetic operations between array and scalar. + - Some methods now return arrays in the same memory order of the input + if the input is contiguous: `to_owned, map, mat_mul` (matrix multiplication + only if both inputs are the same memory order), and arithmetic operations + that allocate a new result. + - Slight performance improvements in `dot, mat_mul` due to more efficient + glue code for calling BLAS. + - Performance improvements in `.assign_scalar`. - 0.4.1 - - Mark iterators `Send + Sync` when possible. + - Mark iterators `Send + Sync` when possible. - **0.4.0** [Release Announcement](http://bluss.github.io/rust/2016/03/06/ndarray-0.4/) - - New array splitting via `.split_at(Axis, Ix)` and `.axis_chunks_iter()` - - Added traits `NdFloat`, `AsArray` and `From for ArrayView` which - improve generic programming. - - Array constructors panic when attempting to create an array whose element - count overflows `usize`. (Would be a debug assertion for overflow before.) - - Performance improvements for `.map()`. - - Added `stack` and macro `stack![axis, arrays..]` to concatenate arrays. - - Added constructor `OwnedArray::range(start, end, step)`. - - The type alias `Array` was renamed to `RcArray` (and the old name deprecated). - - Binary operators are not defined when consuming a mutable array view as - the left hand side argument anymore. - - Remove methods and items deprecated since 0.3 or earlier; deprecated methods - have notes about replacements in 0.3 docs. - - See below for full changelog through alphas. + - New array splitting via `.split_at(Axis, Ix)` and `.axis_chunks_iter()` + - Added traits `NdFloat`, `AsArray` and `From for ArrayView` which + improve generic programming. + - Array constructors panic when attempting to create an array whose element + count overflows `usize`. (Would be a debug assertion for overflow before.) + - Performance improvements for `.map()`. + - Added `stack` and macro `stack![axis, arrays..]` to concatenate arrays. + - Added constructor `OwnedArray::range(start, end, step)`. + - The type alias `Array` was renamed to `RcArray` (and the old name deprecated). + - Binary operators are not defined when consuming a mutable array view as + the left hand side argument anymore. + - Remove methods and items deprecated since 0.3 or earlier; deprecated methods + have notes about replacements in 0.3 docs. + - See below for full changelog through alphas. - 0.4.0-alpha.8 - - In debug mode, indexing an array out of bounds now has a detailed - message about index and shape. (In release mode it does not.) - - Enable assign_ops feature automatically when it is supported (Rust 1.8 beta - or later). - - Add trait `NdFloat` which makes it easy to be generic over `f32, f64`. - - Add `From` implementations that convert slices or references to arrays - into array views. This replaces `from_slice` from a previous alpha. - - Add `AsArray` trait, which is simply based on those `From` implementations. - - Improve `.map()` so that it can autovectorize. - - Use `Axis` argument in `RemoveAxis` too. - - Require `DataOwned` in the raw data methods. - - Merged error types into a single `ShapeError`, which uses no allocated data. + - In debug mode, indexing an array out of bounds now has a detailed + message about index and shape. (In release mode it does not.) + - Enable assign_ops feature automatically when it is supported (Rust 1.8 beta + or later). + - Add trait `NdFloat` which makes it easy to be generic over `f32, f64`. + - Add `From` implementations that convert slices or references to arrays + into array views. This replaces `from_slice` from a previous alpha. + - Add `AsArray` trait, which is simply based on those `From` implementations. + - Improve `.map()` so that it can autovectorize. + - Use `Axis` argument in `RemoveAxis` too. + - Require `DataOwned` in the raw data methods. + - Merged error types into a single `ShapeError`, which uses no allocated data. - 0.4.0-alpha.7 - - Fix too strict lifetime bound in arithmetic operations like `&a @ &b`. - - Rename trait Scalar to ScalarOperand (and improve its docs). - - Implement <<= and >>= for arrays. + - Fix too strict lifetime bound in arithmetic operations like `&a @ &b`. + - Rename trait Scalar to ScalarOperand (and improve its docs). + - Implement <<= and >>= for arrays. - 0.4.0-alpha.6 - - All axis arguments must now be wrapped in newtype `Axis`. - - Add method `.split_at(Axis, Ix)` to read-only and read-write array views. - - Add constructors `ArrayView{,Mut}::from_slice` and array view methods - are now visible in the docs. + - All axis arguments must now be wrapped in newtype `Axis`. + - Add method `.split_at(Axis, Ix)` to read-only and read-write array views. + - Add constructors `ArrayView{,Mut}::from_slice` and array view methods + are now visible in the docs. - 0.4.0-alpha.5 - - Use new trait `LinalgScalar` for operations where we want type-based specialization. - This shrinks the set of types that allow dot product, matrix multiply, mean. - - Use BLAS acceleration transparently in `.dot()` (this is the first step). - - Only OwnedArray and RcArray and not ArrayViewMut can now be used as consumed - left hand operand for arithmetic operators. [See arithmetic operations docs!]( - https://docs.rs/ndarray/0.4.0-alpha.5/ndarray/struct.ArrayBase.html#arithmetic-operations) - - Remove deprecated module `linalg` (it was already mostly empty) - - Deprecate free function `zeros` in favour of static method `zeros`. + - Use new trait `LinalgScalar` for operations where we want type-based specialization. + This shrinks the set of types that allow dot product, matrix multiply, mean. + - Use BLAS acceleration transparently in `.dot()` (this is the first step). + - Only OwnedArray and RcArray and not ArrayViewMut can now be used as consumed + left hand operand for arithmetic operators. [See arithmetic operations docs!]( + https://docs.rs/ndarray/0.4.0-alpha.5/ndarray/struct.ArrayBase.html#arithmetic-operations) + - Remove deprecated module `linalg` (it was already mostly empty) + - Deprecate free function `zeros` in favour of static method `zeros`. - 0.4.0-alpha.4 - - Rename `Array` to `RcArray`. Old name is deprecated. - - Add methods `OuterIter::split_at`, `OuterIterMut::split_at` - - Change `arr0, arr1, arr2, arr3` to return `OwnedArray`. - Add `rcarr1, rcarr2, rcarr3` that return `RcArray`. + - Rename `Array` to `RcArray`. Old name is deprecated. + - Add methods `OuterIter::split_at`, `OuterIterMut::split_at` + - Change `arr0, arr1, arr2, arr3` to return `OwnedArray`. + Add `rcarr1, rcarr2, rcarr3` that return `RcArray`. - 0.4.0-alpha.3 - - Improve arithmetic operations where the RHS is a broadcast 0-dimensional - array. - - Add read-only and read-write array views to the `rblas` integration. - Added methods `AsBlas::{blas_view_checked, blas_view_mut_checked, bv, bvm}`. - - Use hash_slice in `Hash` impl for arrays. + - Improve arithmetic operations where the RHS is a broadcast 0-dimensional + array. + - Add read-only and read-write array views to the `rblas` integration. + Added methods `AsBlas::{blas_view_checked, blas_view_mut_checked, bv, bvm}`. + - Use hash_slice in `Hash` impl for arrays. - 0.4.0-alpha.2 - - Add `ArrayBase::reversed_axes` which transposes an array. + - Add `ArrayBase::reversed_axes` which transposes an array. - 0.4.0-alpha.1 - - Add checked and unchecked constructor methods for creating arrays - from a vector and explicit dimension and stride, or with - fortran (column major) memory order (marked `f`): - - + `ArrayBase::from_vec_dim`, `from_vec_dim_stride`, - `from_vec_dim_stride_unchecked`, - + `from_vec_dim_unchecked_f`, `from_elem_f`, `zeros_f` - + View constructors `ArrayView::from_slice_dim_stride`, - `ArrayViewMut::from_slice_dim_stride`. - + Rename old `ArrayBase::from_vec_dim` to `from_vec_dim_unchecked`. - - - Check better for wraparound when computing the number of elements in a shape; - this adds error cases that **panic** in `from_elem`, `zeros` etc, - however *the new check will only ever panic in cases that would - trigger debug assertions for overflow in the previous versions*!. - - Add an array chunks iterator `.axis_chunks_iter()` and mutable version; - it allows traversing the array in for example chunks of *n* rows at a time. - - Remove methods and items deprecated since 0.3 or earlier; deprecated methods - have notes about replacements in 0.3 docs. + - Add checked and unchecked constructor methods for creating arrays + from a vector and explicit dimension and stride, or with + fortran (column major) memory order (marked `f`): + + + `ArrayBase::from_vec_dim`, `from_vec_dim_stride`, + `from_vec_dim_stride_unchecked`, + + `from_vec_dim_unchecked_f`, `from_elem_f`, `zeros_f` + + View constructors `ArrayView::from_slice_dim_stride`, + `ArrayViewMut::from_slice_dim_stride`. + + Rename old `ArrayBase::from_vec_dim` to `from_vec_dim_unchecked`. + + - Check better for wraparound when computing the number of elements in a shape; + this adds error cases that **panic** in `from_elem`, `zeros` etc, + however *the new check will only ever panic in cases that would + trigger debug assertions for overflow in the previous versions*!. + - Add an array chunks iterator `.axis_chunks_iter()` and mutable version; + it allows traversing the array in for example chunks of *n* rows at a time. + - Remove methods and items deprecated since 0.3 or earlier; deprecated methods + have notes about replacements in 0.3 docs. - 0.3.1 - - Add `.row_mut()`, `.column_mut()` - - Add `.axis_iter()`, `.axis_iter_mut()` + - Add `.row_mut()`, `.column_mut()` + - Add `.axis_iter()`, `.axis_iter_mut()` - **0.3.0** - - Second round of API & consistency update is done - - 0.3.0 highlight: **Index type** `Ix` **changed to** `usize`. - - 0.3.0 highlight: Operator overloading for scalar and array arithmetic. - - 0.3.0 highlight: Indexing with `a[[i, j, k]]` syntax. - - Add `ArrayBase::eye(n)` - - See below for more info + - Second round of API & consistency update is done + - 0.3.0 highlight: **Index type** `Ix` **changed to** `usize`. + - 0.3.0 highlight: Operator overloading for scalar and array arithmetic. + - 0.3.0 highlight: Indexing with `a[[i, j, k]]` syntax. + - Add `ArrayBase::eye(n)` + - See below for more info - 0.3.0-alpha.4 - - Shrink array view structs by removing their redundant slice field (see #45). - Changed the definition of the view `type` aliases. - - `.mat_mul()` and `.mat_mul_col()` now return `OwnedArray`. - Use `.into_shared()` if you need an `Array`. - - impl ExactSizeIterator where possible for iterators. - - impl DoubleEndedIterator for `.outer_iter()` (and _mut). + - Shrink array view structs by removing their redundant slice field (see #45). + Changed the definition of the view `type` aliases. + - `.mat_mul()` and `.mat_mul_col()` now return `OwnedArray`. + Use `.into_shared()` if you need an `Array`. + - impl ExactSizeIterator where possible for iterators. + - impl DoubleEndedIterator for `.outer_iter()` (and _mut). - 0.3.0-alpha.3 - - `.subview()` changed to return an array view, also added `into_subview()`. - - Add `.outer_iter()` and `.outer_iter_mut()` for iteration along the - greatest axis of the array. Views also implement `into_outer_iter()` for - “lifetime preserving” iterators. + - `.subview()` changed to return an array view, also added `into_subview()`. + - Add `.outer_iter()` and `.outer_iter_mut()` for iteration along the + greatest axis of the array. Views also implement `into_outer_iter()` for + “lifetime preserving” iterators. - 0.3.0-alpha.2 - - Improve the strided last dimension case in `zip_mut_with` slightly - (affects all binary operations). - - Add `.row(i), .column(i)` for 2D arrays. - - Deprecate `.row_iter(), .col_iter()`. - - Add method `.dot()` for computing the dot product between two 1D arrays. + - Improve the strided last dimension case in `zip_mut_with` slightly + (affects all binary operations). + - Add `.row(i), .column(i)` for 2D arrays. + - Deprecate `.row_iter(), .col_iter()`. + - Add method `.dot()` for computing the dot product between two 1D arrays. - 0.3.0-alpha.1 - - **Index type** `Ix` **changed to** `usize` (#9). Gives better iterator codegen - and 64-bit size arrays. - - Support scalar operands with arithmetic operators. - - Change `.slice()` and `.diag()` to return array views, add `.into_diag()`. - - Add ability to use fixed size arrays for array indexing, enabling syntax - like `a[[i, j]]` for indexing. - - Add `.ndim()` + - **Index type** `Ix` **changed to** `usize` (#9). Gives better iterator codegen + and 64-bit size arrays. + - Support scalar operands with arithmetic operators. + - Change `.slice()` and `.diag()` to return array views, add `.into_diag()`. + - Add ability to use fixed size arrays for array indexing, enabling syntax + like `a[[i, j]]` for indexing. + - Add `.ndim()` - **0.2.0** - - First chapter of API and performance evolution is done \\o/ - - 0.2.0 highlight: Vectorized (efficient) arithmetic operations - - 0.2.0 highlight: Easier slicing using `s![]` - - 0.2.0 highlight: Nicer API using views - - 0.2.0 highlight: Bridging to BLAS functions. - - See below for more info + - First chapter of API and performance evolution is done \\o/ + - 0.2.0 highlight: Vectorized (efficient) arithmetic operations + - 0.2.0 highlight: Easier slicing using `s![]` + - 0.2.0 highlight: Nicer API using views + - 0.2.0 highlight: Bridging to BLAS functions. + - See below for more info - 0.2.0-alpha.9 - - Support strided matrices in `rblas` bridge, and fix a bug with - non square matrices. - - Deprecated all of module `linalg`. + - Support strided matrices in `rblas` bridge, and fix a bug with + non square matrices. + - Deprecated all of module `linalg`. - 0.2.0-alpha.8 - - **Note:** PACKAGE NAME CHANGED TO `ndarray`. Having package != crate ran - into many quirks of various tools. Changing the package name is easier for - everyone involved! - - Optimized `scalar_sum()` so that it will vectorize for the floating point - element case too. + - **Note:** PACKAGE NAME CHANGED TO `ndarray`. Having package != crate ran + into many quirks of various tools. Changing the package name is easier for + everyone involved! + - Optimized `scalar_sum()` so that it will vectorize for the floating point + element case too. - 0.2.0-alpha.7 - - Optimized arithmetic operations! + - Optimized arithmetic operations! - - For c-contiguous arrays or arrays with c-contiguous lowest dimension - they optimize very well, and can vectorize! + - For c-contiguous arrays or arrays with c-contiguous lowest dimension + they optimize very well, and can vectorize! - - Add `.inner_iter()`, `.inner_iter_mut()` - - Add `.fold()`, `.zip_mut_with()` - - Add `.scalar_sum()` - - Add example `examples/life.rs` + - Add `.inner_iter()`, `.inner_iter_mut()` + - Add `.fold()`, `.zip_mut_with()` + - Add `.scalar_sum()` + - Add example `examples/life.rs` - 0.2.0-alpha.6 - - Add `#[deprecated]` attributes (enabled with new enough nightly) - - Add `ArrayBase::linspace`, deprecate constructor `range`. + - Add `#[deprecated]` attributes (enabled with new enough nightly) + - Add `ArrayBase::linspace`, deprecate constructor `range`. - 0.2.0-alpha.5 - - Add `s![...]`, a slice argument macro. - - Add `aview_mut1()`, `zeros()` - - Add `.diag_mut()` and deprecate `.diag_iter_mut()`, `.sub_iter_mut()` - - Add `.uget()`, `.uget_mut()` for unchecked indexing and deprecate the - old names. - - Improve `ArrayBase::from_elem` - - Removed `SliceRange`, replaced by `From` impls for `Si`. + - Add `s![...]`, a slice argument macro. + - Add `aview_mut1()`, `zeros()` + - Add `.diag_mut()` and deprecate `.diag_iter_mut()`, `.sub_iter_mut()` + - Add `.uget()`, `.uget_mut()` for unchecked indexing and deprecate the + old names. + - Improve `ArrayBase::from_elem` + - Removed `SliceRange`, replaced by `From` impls for `Si`. - 0.2.0-alpha.4 - - Slicing methods like `.slice()` now take a fixed size array of `Si` - as the slice description. This allows more type checking to verify that the - number of axes is correct. - - Add experimental `rblas` integration. - - Add `into_shape()` which allows reshaping any array or view kind. + - Slicing methods like `.slice()` now take a fixed size array of `Si` + as the slice description. This allows more type checking to verify that the + number of axes is correct. + - Add experimental `rblas` integration. + - Add `into_shape()` which allows reshaping any array or view kind. - 0.2.0-alpha.3 - - Add and edit a lot of documentation + - Add and edit a lot of documentation - 0.2.0-alpha.2 - - Improve performance for iterators when the array data is in the default - memory layout. The iterator then wraps the default slice iterator and - loops will autovectorize. - - Remove method `.indexed()` on iterators. Changed `Indexed` and added - `ÌndexedMut`. - - Added `.as_slice(), .as_mut_slice()` - - Support rustc-serialize + - Improve performance for iterators when the array data is in the default + memory layout. The iterator then wraps the default slice iterator and + loops will autovectorize. + - Remove method `.indexed()` on iterators. Changed `Indexed` and added + `ÌndexedMut`. + - Added `.as_slice(), .as_mut_slice()` + - Support rustc-serialize - 0.2.0-alpha - - Alpha release! - - Introduce `ArrayBase`, `OwnedArray`, `ArrayView`, `ArrayViewMut` - - All arithmetic operations should accept any array type - - `Array` continues to refer to the default reference counted copy on write - array - - Add `.view()`, `.view_mut()`, `.to_owned()`, `.into_shared()` - - Add `.slice_mut()`, `.subview_mut()` - - Some operations now return `OwnedArray`: + - Alpha release! + - Introduce `ArrayBase`, `OwnedArray`, `ArrayView`, `ArrayViewMut` + - All arithmetic operations should accept any array type + - `Array` continues to refer to the default reference counted copy on write + array + - Add `.view()`, `.view_mut()`, `.to_owned()`, `.into_shared()` + - Add `.slice_mut()`, `.subview_mut()` + - Some operations now return `OwnedArray`: - - `.map()` - - `.sum()` - - `.mean()` + - `.map()` + - `.sum()` + - `.mean()` - - Add `get`, `get_mut` to replace the now deprecated `at`, `at_mut`. - - Fix bug in assign_scalar + - Add `get`, `get_mut` to replace the now deprecated `at`, `at_mut`. + - Fix bug in assign_scalar - 0.1.1 - - Add Array::default - - Fix bug in raw_data_mut + - Add Array::default + - Fix bug in raw_data_mut - 0.1.0 - - First release on crates.io - - Starting point for evolution to come - + - First release on crates.io + - Starting point for evolution to come [@adamreichold]: https://github.com/adamreichold + [@aganders3]: https://github.com/aganders3 + [@bluss]: https://github.com/bluss + [@jturner314]: https://github.com/jturner314 + [@LukeMathWalker]: https://github.com/LukeMathWalker + [@acj]: https://github.com/acj + [@adamreichold]: https://github.com/adamreichold + [@atouchet]: https://github.com/atouchet + [@andrei-papou]: https://github.com/andrei-papou + [@benkay]: https://github.com/benkay + [@cassiersg]: https://github.com/cassiersg + [@chohner]: https://github.com/chohner + [@dam5h]: https://github.com/dam5h + [@ethanhs]: https://github.com/ethanhs + [@d-dorazio]: https://github.com/d-dorazio + [@Eijebong]: https://github.com/Eijebong + [@HyeokSuLee]: https://github.com/HyeokSuLee + [@insideoutclub]: https://github.com/insideoutclub + [@JP-Ellis]: https://github.com/JP-Ellis + [@jimblandy]: https://github.com/jimblandy + [@LeSeulArtichaut]: https://github.com/LeSeulArtichaut + [@lifuyang]: https://github.com/liufuyang + [@kdubovikov]: https://github.com/kdubovikov + [@makotokato]: https://github.com/makotokato + [@max-sixty]: https://github.com/max-sixty + [@mneumann]: https://github.com/mneumann + [@mockersf]: https://github.com/mockersf + [@nilgoyette]: https://github.com/nilgoyette + [@nitsky]: https://github.com/nitsky + [@Rikorose]: https://github.com/Rikorose + [@rth]: https://github.com/rth + [@sebasv]: https://github.com/sebasv + [@SparrowLii]: https://github.com/SparrowLii + [@steffahn]: https://github.com/steffahn + [@stokhos]: https://github.com/stokhos + [@termoshtt]: https://github.com/termoshtt + [@TheLortex]: https://github.com/TheLortex + [@viniciusd]: https://github.com/viniciusd + [@VasanthakumarV]: https://github.com/VasanthakumarV + [@xd009642]: https://github.com/xd009642 + [@Zuse64]: https://github.com/Zuse64 diff --git a/benches/append.rs b/benches/append.rs index a37df256f..b9ca99c62 100644 --- a/benches/append.rs +++ b/benches/append.rs @@ -6,24 +6,21 @@ use test::Bencher; use ndarray::prelude::*; #[bench] -fn select_axis0(bench: &mut Bencher) -{ +fn select_axis0(bench: &mut Bencher) { let a = Array::::zeros((256, 256)); let selectable = vec![0, 1, 2, 0, 1, 3, 0, 4, 16, 32, 128, 147, 149, 220, 221, 255, 221, 0, 1]; bench.iter(|| a.select(Axis(0), &selectable)); } #[bench] -fn select_axis1(bench: &mut Bencher) -{ +fn select_axis1(bench: &mut Bencher) { let a = Array::::zeros((256, 256)); let selectable = vec![0, 1, 2, 0, 1, 3, 0, 4, 16, 32, 128, 147, 149, 220, 221, 255, 221, 0, 1]; bench.iter(|| a.select(Axis(1), &selectable)); } #[bench] -fn select_1d(bench: &mut Bencher) -{ +fn select_1d(bench: &mut Bencher) { let a = Array::::zeros(1024); let mut selectable = (0..a.len()).step_by(17).collect::>(); selectable.extend(selectable.clone().iter().rev()); diff --git a/benches/bench1.rs b/benches/bench1.rs index 33185844a..fb7f799d6 100644 --- a/benches/bench1.rs +++ b/benches/bench1.rs @@ -16,8 +16,7 @@ use ndarray::{Ix1, Ix2, Ix3, Ix5, IxDyn}; use test::black_box; #[bench] -fn iter_sum_1d_regular(bench: &mut test::Bencher) -{ +fn iter_sum_1d_regular(bench: &mut test::Bencher) { let a = Array::::zeros(64 * 64); let a = black_box(a); bench.iter(|| { @@ -30,8 +29,7 @@ fn iter_sum_1d_regular(bench: &mut test::Bencher) } #[bench] -fn iter_sum_1d_raw(bench: &mut test::Bencher) -{ +fn iter_sum_1d_raw(bench: &mut test::Bencher) { // this is autovectorized to death (= great performance) let a = Array::::zeros(64 * 64); let a = black_box(a); @@ -45,8 +43,7 @@ fn iter_sum_1d_raw(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_regular(bench: &mut test::Bencher) -{ +fn iter_sum_2d_regular(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let a = black_box(a); bench.iter(|| { @@ -59,8 +56,7 @@ fn iter_sum_2d_regular(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_by_row(bench: &mut test::Bencher) -{ +fn iter_sum_2d_by_row(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let a = black_box(a); bench.iter(|| { @@ -75,8 +71,7 @@ fn iter_sum_2d_by_row(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_raw(bench: &mut test::Bencher) -{ +fn iter_sum_2d_raw(bench: &mut test::Bencher) { // this is autovectorized to death (= great performance) let a = Array::::zeros((64, 64)); let a = black_box(a); @@ -90,8 +85,7 @@ fn iter_sum_2d_raw(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_cutout(bench: &mut test::Bencher) -{ +fn iter_sum_2d_cutout(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -105,8 +99,7 @@ fn iter_sum_2d_cutout(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_cutout_by_row(bench: &mut test::Bencher) -{ +fn iter_sum_2d_cutout_by_row(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -122,8 +115,7 @@ fn iter_sum_2d_cutout_by_row(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_cutout_outer_iter(bench: &mut test::Bencher) -{ +fn iter_sum_2d_cutout_outer_iter(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -139,8 +131,7 @@ fn iter_sum_2d_cutout_outer_iter(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_transpose_regular(bench: &mut test::Bencher) -{ +fn iter_sum_2d_transpose_regular(bench: &mut test::Bencher) { let mut a = Array::::zeros((64, 64)); a.swap_axes(0, 1); let a = black_box(a); @@ -154,8 +145,7 @@ fn iter_sum_2d_transpose_regular(bench: &mut test::Bencher) } #[bench] -fn iter_sum_2d_transpose_by_row(bench: &mut test::Bencher) -{ +fn iter_sum_2d_transpose_by_row(bench: &mut test::Bencher) { let mut a = Array::::zeros((64, 64)); a.swap_axes(0, 1); let a = black_box(a); @@ -171,16 +161,14 @@ fn iter_sum_2d_transpose_by_row(bench: &mut test::Bencher) } #[bench] -fn sum_2d_regular(bench: &mut test::Bencher) -{ +fn sum_2d_regular(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let a = black_box(a); bench.iter(|| a.sum()); } #[bench] -fn sum_2d_cutout(bench: &mut test::Bencher) -{ +fn sum_2d_cutout(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -188,16 +176,14 @@ fn sum_2d_cutout(bench: &mut test::Bencher) } #[bench] -fn sum_2d_float(bench: &mut test::Bencher) -{ +fn sum_2d_float(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let a = black_box(a.view()); bench.iter(|| a.sum()); } #[bench] -fn sum_2d_float_cutout(bench: &mut test::Bencher) -{ +fn sum_2d_float_cutout(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -205,8 +191,7 @@ fn sum_2d_float_cutout(bench: &mut test::Bencher) } #[bench] -fn sum_2d_float_t_cutout(bench: &mut test::Bencher) -{ +fn sum_2d_float_t_cutout(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]).reversed_axes(); let a = black_box(av); @@ -214,15 +199,13 @@ fn sum_2d_float_t_cutout(bench: &mut test::Bencher) } #[bench] -fn fold_sum_i32_2d_regular(bench: &mut test::Bencher) -{ +fn fold_sum_i32_2d_regular(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); bench.iter(|| a.fold(0, |acc, &x| acc + x)); } #[bench] -fn fold_sum_i32_2d_cutout(bench: &mut test::Bencher) -{ +fn fold_sum_i32_2d_cutout(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -230,8 +213,7 @@ fn fold_sum_i32_2d_cutout(bench: &mut test::Bencher) } #[bench] -fn fold_sum_i32_2d_stride(bench: &mut test::Bencher) -{ +fn fold_sum_i32_2d_stride(bench: &mut test::Bencher) { let a = Array::::zeros((64, 128)); let av = a.slice(s![.., ..;2]); let a = black_box(av); @@ -239,16 +221,14 @@ fn fold_sum_i32_2d_stride(bench: &mut test::Bencher) } #[bench] -fn fold_sum_i32_2d_transpose(bench: &mut test::Bencher) -{ +fn fold_sum_i32_2d_transpose(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let a = a.t(); bench.iter(|| a.fold(0, |acc, &x| acc + x)); } #[bench] -fn fold_sum_i32_2d_cutout_transpose(bench: &mut test::Bencher) -{ +fn fold_sum_i32_2d_cutout_transpose(bench: &mut test::Bencher) { let a = Array::::zeros((66, 66)); let mut av = a.slice(s![1..-1, 1..-1]); av.swap_axes(0, 1); @@ -259,8 +239,7 @@ fn fold_sum_i32_2d_cutout_transpose(bench: &mut test::Bencher) const ADD2DSZ: usize = 64; #[bench] -fn add_2d_regular(bench: &mut test::Bencher) -{ +fn add_2d_regular(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = b.view(); @@ -270,8 +249,7 @@ fn add_2d_regular(bench: &mut test::Bencher) } #[bench] -fn add_2d_zip(bench: &mut test::Bencher) -{ +fn add_2d_zip(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| { @@ -280,16 +258,14 @@ fn add_2d_zip(bench: &mut test::Bencher) } #[bench] -fn add_2d_alloc_plus(bench: &mut test::Bencher) -{ +fn add_2d_alloc_plus(bench: &mut test::Bencher) { let a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| &a + &b); } #[bench] -fn add_2d_alloc_zip_uninit(bench: &mut test::Bencher) -{ +fn add_2d_alloc_zip_uninit(bench: &mut test::Bencher) { let a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| unsafe { @@ -302,44 +278,38 @@ fn add_2d_alloc_zip_uninit(bench: &mut test::Bencher) } #[bench] -fn add_2d_alloc_zip_collect(bench: &mut test::Bencher) -{ +fn add_2d_alloc_zip_collect(bench: &mut test::Bencher) { let a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| Zip::from(&a).and(&b).map_collect(|&x, &y| x + y)); } #[bench] -fn vec_string_collect(bench: &mut test::Bencher) -{ +fn vec_string_collect(bench: &mut test::Bencher) { let v = vec![""; 10240]; bench.iter(|| v.iter().map(|s| s.to_owned()).collect::>()); } #[bench] -fn array_string_collect(bench: &mut test::Bencher) -{ +fn array_string_collect(bench: &mut test::Bencher) { let v = Array::from(vec![""; 10240]); bench.iter(|| Zip::from(&v).map_collect(|s| s.to_owned())); } #[bench] -fn vec_f64_collect(bench: &mut test::Bencher) -{ +fn vec_f64_collect(bench: &mut test::Bencher) { let v = vec![1.; 10240]; bench.iter(|| v.iter().map(|s| s + 1.).collect::>()); } #[bench] -fn array_f64_collect(bench: &mut test::Bencher) -{ +fn array_f64_collect(bench: &mut test::Bencher) { let v = Array::from(vec![1.; 10240]); bench.iter(|| Zip::from(&v).map_collect(|s| s + 1.)); } #[bench] -fn add_2d_assign_ops(bench: &mut test::Bencher) -{ +fn add_2d_assign_ops(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = b.view(); @@ -351,8 +321,7 @@ fn add_2d_assign_ops(bench: &mut test::Bencher) } #[bench] -fn add_2d_cutout(bench: &mut test::Bencher) -{ +fn add_2d_cutout(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ + 2, ADD2DSZ + 2)); let mut acut = a.slice_mut(s![1..-1, 1..-1]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -363,8 +332,7 @@ fn add_2d_cutout(bench: &mut test::Bencher) } #[bench] -fn add_2d_zip_cutout(bench: &mut test::Bencher) -{ +fn add_2d_zip_cutout(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ + 2, ADD2DSZ + 2)); let mut acut = a.slice_mut(s![1..-1, 1..-1]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -375,8 +343,7 @@ fn add_2d_zip_cutout(bench: &mut test::Bencher) #[bench] #[allow(clippy::identity_op)] -fn add_2d_cutouts_by_4(bench: &mut test::Bencher) -{ +fn add_2d_cutouts_by_4(bench: &mut test::Bencher) { let mut a = Array::::zeros((64 * 1, 64 * 1)); let b = Array::::zeros((64 * 1, 64 * 1)); let chunksz = (4, 4); @@ -389,8 +356,7 @@ fn add_2d_cutouts_by_4(bench: &mut test::Bencher) #[bench] #[allow(clippy::identity_op)] -fn add_2d_cutouts_by_16(bench: &mut test::Bencher) -{ +fn add_2d_cutouts_by_16(bench: &mut test::Bencher) { let mut a = Array::::zeros((64 * 1, 64 * 1)); let b = Array::::zeros((64 * 1, 64 * 1)); let chunksz = (16, 16); @@ -403,8 +369,7 @@ fn add_2d_cutouts_by_16(bench: &mut test::Bencher) #[bench] #[allow(clippy::identity_op)] -fn add_2d_cutouts_by_32(bench: &mut test::Bencher) -{ +fn add_2d_cutouts_by_32(bench: &mut test::Bencher) { let mut a = Array::::zeros((64 * 1, 64 * 1)); let b = Array::::zeros((64 * 1, 64 * 1)); let chunksz = (32, 32); @@ -416,8 +381,7 @@ fn add_2d_cutouts_by_32(bench: &mut test::Bencher) } #[bench] -fn add_2d_broadcast_1_to_2(bench: &mut test::Bencher) -{ +fn add_2d_broadcast_1_to_2(bench: &mut test::Bencher) { let mut a = Array2::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array1::::zeros(ADD2DSZ); let bv = b.view(); @@ -427,8 +391,7 @@ fn add_2d_broadcast_1_to_2(bench: &mut test::Bencher) } #[bench] -fn add_2d_broadcast_0_to_2(bench: &mut test::Bencher) -{ +fn add_2d_broadcast_0_to_2(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros(()); let bv = b.view(); @@ -438,55 +401,48 @@ fn add_2d_broadcast_0_to_2(bench: &mut test::Bencher) } #[bench] -fn scalar_toowned(bench: &mut test::Bencher) -{ +fn scalar_toowned(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); bench.iter(|| a.to_owned()); } #[bench] -fn scalar_add_1(bench: &mut test::Bencher) -{ +fn scalar_add_1(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let n = 1.; bench.iter(|| &a + n); } #[bench] -fn scalar_add_2(bench: &mut test::Bencher) -{ +fn scalar_add_2(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let n = 1.; bench.iter(|| n + &a); } #[bench] -fn scalar_add_strided_1(bench: &mut test::Bencher) -{ +fn scalar_add_strided_1(bench: &mut test::Bencher) { let a = Array::from_shape_fn((64, 64 * 2), |(i, j)| (i * 64 + j) as f32).slice_move(s![.., ..;2]); let n = 1.; bench.iter(|| &a + n); } #[bench] -fn scalar_add_strided_2(bench: &mut test::Bencher) -{ +fn scalar_add_strided_2(bench: &mut test::Bencher) { let a = Array::from_shape_fn((64, 64 * 2), |(i, j)| (i * 64 + j) as f32).slice_move(s![.., ..;2]); let n = 1.; bench.iter(|| n + &a); } #[bench] -fn scalar_sub_1(bench: &mut test::Bencher) -{ +fn scalar_sub_1(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let n = 1.; bench.iter(|| &a - n); } #[bench] -fn scalar_sub_2(bench: &mut test::Bencher) -{ +fn scalar_sub_2(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let n = 1.; bench.iter(|| n - &a); @@ -494,8 +450,7 @@ fn scalar_sub_2(bench: &mut test::Bencher) // This is for comparison with add_2d_broadcast_0_to_2 #[bench] -fn add_2d_0_to_2_iadd_scalar(bench: &mut test::Bencher) -{ +fn add_2d_0_to_2_iadd_scalar(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let n = black_box(0); bench.iter(|| { @@ -504,8 +459,7 @@ fn add_2d_0_to_2_iadd_scalar(bench: &mut test::Bencher) } #[bench] -fn add_2d_strided(bench: &mut test::Bencher) -{ +fn add_2d_strided(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ * 2)); let mut a = a.slice_mut(s![.., ..;2]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -516,8 +470,7 @@ fn add_2d_strided(bench: &mut test::Bencher) } #[bench] -fn add_2d_regular_dyn(bench: &mut test::Bencher) -{ +fn add_2d_regular_dyn(bench: &mut test::Bencher) { let mut a = Array::::zeros(&[ADD2DSZ, ADD2DSZ][..]); let b = Array::::zeros(&[ADD2DSZ, ADD2DSZ][..]); let bv = b.view(); @@ -527,8 +480,7 @@ fn add_2d_regular_dyn(bench: &mut test::Bencher) } #[bench] -fn add_2d_strided_dyn(bench: &mut test::Bencher) -{ +fn add_2d_strided_dyn(bench: &mut test::Bencher) { let mut a = Array::::zeros(&[ADD2DSZ, ADD2DSZ * 2][..]); let mut a = a.slice_mut(s![.., ..;2]); let b = Array::::zeros(&[ADD2DSZ, ADD2DSZ][..]); @@ -539,8 +491,7 @@ fn add_2d_strided_dyn(bench: &mut test::Bencher) } #[bench] -fn add_2d_zip_strided(bench: &mut test::Bencher) -{ +fn add_2d_zip_strided(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ * 2)); let mut a = a.slice_mut(s![.., ..;2]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -550,8 +501,7 @@ fn add_2d_zip_strided(bench: &mut test::Bencher) } #[bench] -fn add_2d_one_transposed(bench: &mut test::Bencher) -{ +fn add_2d_one_transposed(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -561,8 +511,7 @@ fn add_2d_one_transposed(bench: &mut test::Bencher) } #[bench] -fn add_2d_zip_one_transposed(bench: &mut test::Bencher) -{ +fn add_2d_zip_one_transposed(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -572,8 +521,7 @@ fn add_2d_zip_one_transposed(bench: &mut test::Bencher) } #[bench] -fn add_2d_both_transposed(bench: &mut test::Bencher) -{ +fn add_2d_both_transposed(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -584,8 +532,7 @@ fn add_2d_both_transposed(bench: &mut test::Bencher) } #[bench] -fn add_2d_zip_both_transposed(bench: &mut test::Bencher) -{ +fn add_2d_zip_both_transposed(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -596,8 +543,7 @@ fn add_2d_zip_both_transposed(bench: &mut test::Bencher) } #[bench] -fn add_2d_f32_regular(bench: &mut test::Bencher) -{ +fn add_2d_f32_regular(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = b.view(); @@ -609,8 +555,7 @@ fn add_2d_f32_regular(bench: &mut test::Bencher) const ADD3DSZ: usize = 16; #[bench] -fn add_3d_strided(bench: &mut test::Bencher) -{ +fn add_3d_strided(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD3DSZ, ADD3DSZ, ADD3DSZ * 2)); let mut a = a.slice_mut(s![.., .., ..;2]); let b = Array::::zeros(a.dim()); @@ -621,8 +566,7 @@ fn add_3d_strided(bench: &mut test::Bencher) } #[bench] -fn add_3d_strided_dyn(bench: &mut test::Bencher) -{ +fn add_3d_strided_dyn(bench: &mut test::Bencher) { let mut a = Array::::zeros(&[ADD3DSZ, ADD3DSZ, ADD3DSZ * 2][..]); let mut a = a.slice_mut(s![.., .., ..;2]); let b = Array::::zeros(a.dim()); @@ -635,8 +579,7 @@ fn add_3d_strided_dyn(bench: &mut test::Bencher) const ADD1D_SIZE: usize = 64 * 64; #[bench] -fn add_1d_regular(bench: &mut test::Bencher) -{ +fn add_1d_regular(bench: &mut test::Bencher) { let mut a = Array::::zeros(ADD1D_SIZE); let b = Array::::zeros(a.dim()); bench.iter(|| { @@ -645,8 +588,7 @@ fn add_1d_regular(bench: &mut test::Bencher) } #[bench] -fn add_1d_strided(bench: &mut test::Bencher) -{ +fn add_1d_strided(bench: &mut test::Bencher) { let mut a = Array::::zeros(ADD1D_SIZE * 2); let mut av = a.slice_mut(s![..;2]); let b = Array::::zeros(av.dim()); @@ -656,8 +598,7 @@ fn add_1d_strided(bench: &mut test::Bencher) } #[bench] -fn iadd_scalar_2d_regular(bench: &mut test::Bencher) -{ +fn iadd_scalar_2d_regular(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| { a += 1.; @@ -665,8 +606,7 @@ fn iadd_scalar_2d_regular(bench: &mut test::Bencher) } #[bench] -fn iadd_scalar_2d_strided(bench: &mut test::Bencher) -{ +fn iadd_scalar_2d_strided(bench: &mut test::Bencher) { let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ * 2)); let mut a = a.slice_mut(s![.., ..;2]); bench.iter(|| { @@ -675,8 +615,7 @@ fn iadd_scalar_2d_strided(bench: &mut test::Bencher) } #[bench] -fn iadd_scalar_2d_regular_dyn(bench: &mut test::Bencher) -{ +fn iadd_scalar_2d_regular_dyn(bench: &mut test::Bencher) { let mut a = Array::::zeros(vec![ADD2DSZ, ADD2DSZ]); bench.iter(|| { a += 1.; @@ -684,8 +623,7 @@ fn iadd_scalar_2d_regular_dyn(bench: &mut test::Bencher) } #[bench] -fn iadd_scalar_2d_strided_dyn(bench: &mut test::Bencher) -{ +fn iadd_scalar_2d_strided_dyn(bench: &mut test::Bencher) { let mut a = Array::::zeros(vec![ADD2DSZ, ADD2DSZ * 2]); let mut a = a.slice_mut(s![.., ..;2]); bench.iter(|| { @@ -694,8 +632,7 @@ fn iadd_scalar_2d_strided_dyn(bench: &mut test::Bencher) } #[bench] -fn scaled_add_2d_f32_regular(bench: &mut test::Bencher) -{ +fn scaled_add_2d_f32_regular(bench: &mut test::Bencher) { let mut av = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = Array::::zeros((ADD2DSZ, ADD2DSZ)); let scalar = std::f32::consts::PI; @@ -705,8 +642,7 @@ fn scaled_add_2d_f32_regular(bench: &mut test::Bencher) } #[bench] -fn assign_scalar_2d_corder(bench: &mut test::Bencher) -{ +fn assign_scalar_2d_corder(bench: &mut test::Bencher) { let a = Array::zeros((ADD2DSZ, ADD2DSZ)); let mut a = black_box(a); let s = 3.; @@ -714,8 +650,7 @@ fn assign_scalar_2d_corder(bench: &mut test::Bencher) } #[bench] -fn assign_scalar_2d_cutout(bench: &mut test::Bencher) -{ +fn assign_scalar_2d_cutout(bench: &mut test::Bencher) { let mut a = Array::zeros((66, 66)); let a = a.slice_mut(s![1..-1, 1..-1]); let mut a = black_box(a); @@ -724,8 +659,7 @@ fn assign_scalar_2d_cutout(bench: &mut test::Bencher) } #[bench] -fn assign_scalar_2d_forder(bench: &mut test::Bencher) -{ +fn assign_scalar_2d_forder(bench: &mut test::Bencher) { let mut a = Array::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut a = black_box(a); @@ -734,16 +668,14 @@ fn assign_scalar_2d_forder(bench: &mut test::Bencher) } #[bench] -fn assign_zero_2d_corder(bench: &mut test::Bencher) -{ +fn assign_zero_2d_corder(bench: &mut test::Bencher) { let a = Array::zeros((ADD2DSZ, ADD2DSZ)); let mut a = black_box(a); bench.iter(|| a.fill(0.)) } #[bench] -fn assign_zero_2d_cutout(bench: &mut test::Bencher) -{ +fn assign_zero_2d_cutout(bench: &mut test::Bencher) { let mut a = Array::zeros((66, 66)); let a = a.slice_mut(s![1..-1, 1..-1]); let mut a = black_box(a); @@ -751,8 +683,7 @@ fn assign_zero_2d_cutout(bench: &mut test::Bencher) } #[bench] -fn assign_zero_2d_forder(bench: &mut test::Bencher) -{ +fn assign_zero_2d_forder(bench: &mut test::Bencher) { let mut a = Array::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut a = black_box(a); @@ -760,8 +691,7 @@ fn assign_zero_2d_forder(bench: &mut test::Bencher) } #[bench] -fn bench_iter_diag(bench: &mut test::Bencher) -{ +fn bench_iter_diag(bench: &mut test::Bencher) { let a = Array::::zeros((1024, 1024)); bench.iter(|| { for elt in a.diag() { @@ -771,8 +701,7 @@ fn bench_iter_diag(bench: &mut test::Bencher) } #[bench] -fn bench_row_iter(bench: &mut test::Bencher) -{ +fn bench_row_iter(bench: &mut test::Bencher) { let a = Array::::zeros((1024, 1024)); let it = a.row(17); bench.iter(|| { @@ -783,8 +712,7 @@ fn bench_row_iter(bench: &mut test::Bencher) } #[bench] -fn bench_col_iter(bench: &mut test::Bencher) -{ +fn bench_col_iter(bench: &mut test::Bencher) { let a = Array::::zeros((1024, 1024)); let it = a.column(17); bench.iter(|| { @@ -854,8 +782,7 @@ mat_mul! {mat_mul_i32, i32, } #[bench] -fn create_iter_4d(bench: &mut test::Bencher) -{ +fn create_iter_4d(bench: &mut test::Bencher) { let mut a = Array::from_elem((4, 5, 3, 2), 1.0); a.swap_axes(0, 1); a.swap_axes(2, 1); @@ -865,94 +792,82 @@ fn create_iter_4d(bench: &mut test::Bencher) } #[bench] -fn bench_to_owned_n(bench: &mut test::Bencher) -{ +fn bench_to_owned_n(bench: &mut test::Bencher) { let a = Array::::zeros((32, 32)); bench.iter(|| a.to_owned()); } #[bench] -fn bench_to_owned_t(bench: &mut test::Bencher) -{ +fn bench_to_owned_t(bench: &mut test::Bencher) { let mut a = Array::::zeros((32, 32)); a.swap_axes(0, 1); bench.iter(|| a.to_owned()); } #[bench] -fn bench_to_owned_strided(bench: &mut test::Bencher) -{ +fn bench_to_owned_strided(bench: &mut test::Bencher) { let a = Array::::zeros((32, 64)); let a = a.slice(s![.., ..;2]); bench.iter(|| a.to_owned()); } #[bench] -fn equality_i32(bench: &mut test::Bencher) -{ +fn equality_i32(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let b = Array::::zeros((64, 64)); bench.iter(|| a == b); } #[bench] -fn equality_f32(bench: &mut test::Bencher) -{ +fn equality_f32(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let b = Array::::zeros((64, 64)); bench.iter(|| a == b); } #[bench] -fn equality_f32_mixorder(bench: &mut test::Bencher) -{ +fn equality_f32_mixorder(bench: &mut test::Bencher) { let a = Array::::zeros((64, 64)); let b = Array::::zeros((64, 64).f()); bench.iter(|| a == b); } #[bench] -fn dot_f32_16(bench: &mut test::Bencher) -{ +fn dot_f32_16(bench: &mut test::Bencher) { let a = Array::::zeros(16); let b = Array::::zeros(16); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_20(bench: &mut test::Bencher) -{ +fn dot_f32_20(bench: &mut test::Bencher) { let a = Array::::zeros(20); let b = Array::::zeros(20); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_32(bench: &mut test::Bencher) -{ +fn dot_f32_32(bench: &mut test::Bencher) { let a = Array::::zeros(32); let b = Array::::zeros(32); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_256(bench: &mut test::Bencher) -{ +fn dot_f32_256(bench: &mut test::Bencher) { let a = Array::::zeros(256); let b = Array::::zeros(256); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_1024(bench: &mut test::Bencher) -{ +fn dot_f32_1024(bench: &mut test::Bencher) { let av = Array::::zeros(1024); let bv = Array::::zeros(1024); bench.iter(|| av.dot(&bv)); } #[bench] -fn dot_f32_10e6(bench: &mut test::Bencher) -{ +fn dot_f32_10e6(bench: &mut test::Bencher) { let n = 1_000_000; let av = Array::::zeros(n); let bv = Array::::zeros(n); @@ -960,8 +875,7 @@ fn dot_f32_10e6(bench: &mut test::Bencher) } #[bench] -fn dot_extended(bench: &mut test::Bencher) -{ +fn dot_extended(bench: &mut test::Bencher) { let m = 10; let n = 33; let k = 10; @@ -982,8 +896,7 @@ fn dot_extended(bench: &mut test::Bencher) const MEAN_SUM_N: usize = 127; -fn range_mat(m: Ix, n: Ix) -> Array2 -{ +fn range_mat(m: Ix, n: Ix) -> Array2 { assert!(m * n != 0); Array::linspace(0., (m * n - 1) as f32, m * n) .into_shape_with_order((m, n)) @@ -991,100 +904,87 @@ fn range_mat(m: Ix, n: Ix) -> Array2 } #[bench] -fn mean_axis0(bench: &mut test::Bencher) -{ +fn mean_axis0(bench: &mut test::Bencher) { let a = range_mat(MEAN_SUM_N, MEAN_SUM_N); bench.iter(|| a.mean_axis(Axis(0))); } #[bench] -fn mean_axis1(bench: &mut test::Bencher) -{ +fn mean_axis1(bench: &mut test::Bencher) { let a = range_mat(MEAN_SUM_N, MEAN_SUM_N); bench.iter(|| a.mean_axis(Axis(1))); } #[bench] -fn sum_axis0(bench: &mut test::Bencher) -{ +fn sum_axis0(bench: &mut test::Bencher) { let a = range_mat(MEAN_SUM_N, MEAN_SUM_N); bench.iter(|| a.sum_axis(Axis(0))); } #[bench] -fn sum_axis1(bench: &mut test::Bencher) -{ +fn sum_axis1(bench: &mut test::Bencher) { let a = range_mat(MEAN_SUM_N, MEAN_SUM_N); bench.iter(|| a.sum_axis(Axis(1))); } #[bench] -fn into_dimensionality_ix1_ok(bench: &mut test::Bencher) -{ +fn into_dimensionality_ix1_ok(bench: &mut test::Bencher) { let a = Array::::zeros(Ix1(10)); let a = a.view(); bench.iter(|| a.into_dimensionality::()); } #[bench] -fn into_dimensionality_ix3_ok(bench: &mut test::Bencher) -{ +fn into_dimensionality_ix3_ok(bench: &mut test::Bencher) { let a = Array::::zeros(Ix3(10, 10, 10)); let a = a.view(); bench.iter(|| a.into_dimensionality::()); } #[bench] -fn into_dimensionality_ix3_err(bench: &mut test::Bencher) -{ +fn into_dimensionality_ix3_err(bench: &mut test::Bencher) { let a = Array::::zeros(Ix3(10, 10, 10)); let a = a.view(); bench.iter(|| a.into_dimensionality::()); } #[bench] -fn into_dimensionality_dyn_to_ix3(bench: &mut test::Bencher) -{ +fn into_dimensionality_dyn_to_ix3(bench: &mut test::Bencher) { let a = Array::::zeros(IxDyn(&[10, 10, 10])); let a = a.view(); bench.iter(|| a.clone().into_dimensionality::()); } #[bench] -fn into_dimensionality_dyn_to_dyn(bench: &mut test::Bencher) -{ +fn into_dimensionality_dyn_to_dyn(bench: &mut test::Bencher) { let a = Array::::zeros(IxDyn(&[10, 10, 10])); let a = a.view(); bench.iter(|| a.clone().into_dimensionality::()); } #[bench] -fn into_dyn_ix3(bench: &mut test::Bencher) -{ +fn into_dyn_ix3(bench: &mut test::Bencher) { let a = Array::::zeros(Ix3(10, 10, 10)); let a = a.view(); bench.iter(|| a.into_dyn()); } #[bench] -fn into_dyn_ix5(bench: &mut test::Bencher) -{ +fn into_dyn_ix5(bench: &mut test::Bencher) { let a = Array::::zeros(Ix5(2, 2, 2, 2, 2)); let a = a.view(); bench.iter(|| a.into_dyn()); } #[bench] -fn into_dyn_dyn(bench: &mut test::Bencher) -{ +fn into_dyn_dyn(bench: &mut test::Bencher) { let a = Array::::zeros(IxDyn(&[10, 10, 10])); let a = a.view(); bench.iter(|| a.clone().into_dyn()); } #[bench] -fn broadcast_same_dim(bench: &mut test::Bencher) -{ +fn broadcast_same_dim(bench: &mut test::Bencher) { let s = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; let s = Array4::from_shape_vec((2, 2, 3, 2), s.to_vec()).unwrap(); let a = s.slice(s![.., ..;-1, ..;2, ..]); @@ -1093,8 +993,7 @@ fn broadcast_same_dim(bench: &mut test::Bencher) } #[bench] -fn broadcast_one_side(bench: &mut test::Bencher) -{ +fn broadcast_one_side(bench: &mut test::Bencher) { let s = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; let s2 = [1, 2, 3, 4, 5, 6]; let a = Array4::from_shape_vec((4, 1, 3, 2), s.to_vec()).unwrap(); diff --git a/benches/chunks.rs b/benches/chunks.rs index 46780492f..5ea9ba466 100644 --- a/benches/chunks.rs +++ b/benches/chunks.rs @@ -7,8 +7,7 @@ use ndarray::prelude::*; use ndarray::NdProducer; #[bench] -fn chunk2x2_iter_sum(bench: &mut Bencher) -{ +fn chunk2x2_iter_sum(bench: &mut Bencher) { let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -20,8 +19,7 @@ fn chunk2x2_iter_sum(bench: &mut Bencher) } #[bench] -fn chunk2x2_sum(bench: &mut Bencher) -{ +fn chunk2x2_sum(bench: &mut Bencher) { let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -33,8 +31,7 @@ fn chunk2x2_sum(bench: &mut Bencher) } #[bench] -fn chunk2x2_sum_get1(bench: &mut Bencher) -{ +fn chunk2x2_sum_get1(bench: &mut Bencher) { let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -49,8 +46,7 @@ fn chunk2x2_sum_get1(bench: &mut Bencher) } #[bench] -fn chunk2x2_sum_uget1(bench: &mut Bencher) -{ +fn chunk2x2_sum_uget1(bench: &mut Bencher) { let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -68,8 +64,7 @@ fn chunk2x2_sum_uget1(bench: &mut Bencher) #[bench] #[allow(clippy::identity_op)] -fn chunk2x2_sum_get2(bench: &mut Bencher) -{ +fn chunk2x2_sum_get2(bench: &mut Bencher) { let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::::zeros(a.exact_chunks(chunksz).raw_dim()); diff --git a/benches/construct.rs b/benches/construct.rs index 278174388..8d323b4cd 100644 --- a/benches/construct.rs +++ b/benches/construct.rs @@ -8,20 +8,17 @@ use test::Bencher; use ndarray::prelude::*; #[bench] -fn default_f64(bench: &mut Bencher) -{ +fn default_f64(bench: &mut Bencher) { bench.iter(|| Array::::default((128, 128))) } #[bench] -fn zeros_f64(bench: &mut Bencher) -{ +fn zeros_f64(bench: &mut Bencher) { bench.iter(|| Array::::zeros((128, 128))) } #[bench] -fn map_regular(bench: &mut test::Bencher) -{ +fn map_regular(bench: &mut test::Bencher) { let a = Array::linspace(0., 127., 128) .into_shape_with_order((8, 16)) .unwrap(); @@ -29,8 +26,7 @@ fn map_regular(bench: &mut test::Bencher) } #[bench] -fn map_stride(bench: &mut test::Bencher) -{ +fn map_stride(bench: &mut test::Bencher) { let a = Array::linspace(0., 127., 256) .into_shape_with_order((8, 32)) .unwrap(); diff --git a/benches/gemv_gemm.rs b/benches/gemv_gemm.rs index 2d1642623..9dbd9a538 100644 --- a/benches/gemv_gemm.rs +++ b/benches/gemv_gemm.rs @@ -16,8 +16,7 @@ use ndarray::linalg::general_mat_vec_mul; use ndarray::LinalgScalar; #[bench] -fn gemv_64_64c(bench: &mut Bencher) -{ +fn gemv_64_64c(bench: &mut Bencher) { let a = Array::zeros((64, 64)); let (m, n) = a.dim(); let x = Array::zeros(n); @@ -28,8 +27,7 @@ fn gemv_64_64c(bench: &mut Bencher) } #[bench] -fn gemv_64_64f(bench: &mut Bencher) -{ +fn gemv_64_64f(bench: &mut Bencher) { let a = Array::zeros((64, 64).f()); let (m, n) = a.dim(); let x = Array::zeros(n); @@ -40,8 +38,7 @@ fn gemv_64_64f(bench: &mut Bencher) } #[bench] -fn gemv_64_32(bench: &mut Bencher) -{ +fn gemv_64_32(bench: &mut Bencher) { let a = Array::zeros((64, 32)); let (m, n) = a.dim(); let x = Array::zeros(n); @@ -52,20 +49,17 @@ fn gemv_64_32(bench: &mut Bencher) } #[bench] -fn cgemm_100(bench: &mut Bencher) -{ +fn cgemm_100(bench: &mut Bencher) { cgemm_bench::(100, bench); } #[bench] -fn zgemm_100(bench: &mut Bencher) -{ +fn zgemm_100(bench: &mut Bencher) { cgemm_bench::(100, bench); } fn cgemm_bench(size: usize, bench: &mut Bencher) -where A: LinalgScalar + Float -{ +where A: LinalgScalar + Float { let (m, k, n) = (size, size, size); let a = Array::, _>::zeros((m, k)); diff --git a/benches/higher-order.rs b/benches/higher-order.rs index 9cc3bd961..0a629fef3 100644 --- a/benches/higher-order.rs +++ b/benches/higher-order.rs @@ -13,22 +13,19 @@ const X: usize = 64; const Y: usize = 16; #[bench] -fn map_regular(bench: &mut Bencher) -{ +fn map_regular(bench: &mut Bencher) { let a = Array::linspace(0., 127., N) .into_shape_with_order((X, Y)) .unwrap(); bench.iter(|| a.map(|&x| 2. * x)); } -pub fn double_array(mut a: ArrayViewMut2<'_, f64>) -{ +pub fn double_array(mut a: ArrayViewMut2<'_, f64>) { a *= 2.0; } #[bench] -fn map_stride_double_f64(bench: &mut Bencher) -{ +fn map_stride_double_f64(bench: &mut Bencher) { let mut a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); @@ -39,8 +36,7 @@ fn map_stride_double_f64(bench: &mut Bencher) } #[bench] -fn map_stride_f64(bench: &mut Bencher) -{ +fn map_stride_f64(bench: &mut Bencher) { let a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); @@ -49,8 +45,7 @@ fn map_stride_f64(bench: &mut Bencher) } #[bench] -fn map_stride_u32(bench: &mut Bencher) -{ +fn map_stride_u32(bench: &mut Bencher) { let a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); @@ -60,8 +55,7 @@ fn map_stride_u32(bench: &mut Bencher) } #[bench] -fn fold_axis(bench: &mut Bencher) -{ +fn fold_axis(bench: &mut Bencher) { let a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); @@ -72,8 +66,7 @@ const MA: usize = 64; const MASZ: usize = MA * MA; #[bench] -fn map_axis_0(bench: &mut Bencher) -{ +fn map_axis_0(bench: &mut Bencher) { let a = Array::from_iter(0..MASZ as i32) .into_shape_with_order([MA, MA]) .unwrap(); @@ -81,8 +74,7 @@ fn map_axis_0(bench: &mut Bencher) } #[bench] -fn map_axis_1(bench: &mut Bencher) -{ +fn map_axis_1(bench: &mut Bencher) { let a = Array::from_iter(0..MASZ as i32) .into_shape_with_order([MA, MA]) .unwrap(); diff --git a/benches/iter.rs b/benches/iter.rs index 77f511745..422310103 100644 --- a/benches/iter.rs +++ b/benches/iter.rs @@ -13,15 +13,13 @@ use ndarray::Slice; use ndarray::{FoldWhile, Zip}; #[bench] -fn iter_sum_2d_regular(bench: &mut Bencher) -{ +fn iter_sum_2d_regular(bench: &mut Bencher) { let a = Array::::zeros((64, 64)); bench.iter(|| a.iter().sum::()); } #[bench] -fn iter_sum_2d_cutout(bench: &mut Bencher) -{ +fn iter_sum_2d_cutout(bench: &mut Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = av; @@ -29,8 +27,7 @@ fn iter_sum_2d_cutout(bench: &mut Bencher) } #[bench] -fn iter_all_2d_cutout(bench: &mut Bencher) -{ +fn iter_all_2d_cutout(bench: &mut Bencher) { let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = av; @@ -38,16 +35,14 @@ fn iter_all_2d_cutout(bench: &mut Bencher) } #[bench] -fn iter_sum_2d_transpose(bench: &mut Bencher) -{ +fn iter_sum_2d_transpose(bench: &mut Bencher) { let a = Array::::zeros((66, 66)); let a = a.t(); bench.iter(|| a.iter().sum::()); } #[bench] -fn iter_filter_sum_2d_u32(bench: &mut Bencher) -{ +fn iter_filter_sum_2d_u32(bench: &mut Bencher) { let a = Array::linspace(0., 1., 256) .into_shape_with_order((16, 16)) .unwrap(); @@ -56,8 +51,7 @@ fn iter_filter_sum_2d_u32(bench: &mut Bencher) } #[bench] -fn iter_filter_sum_2d_f32(bench: &mut Bencher) -{ +fn iter_filter_sum_2d_f32(bench: &mut Bencher) { let a = Array::linspace(0., 1., 256) .into_shape_with_order((16, 16)) .unwrap(); @@ -66,8 +60,7 @@ fn iter_filter_sum_2d_f32(bench: &mut Bencher) } #[bench] -fn iter_filter_sum_2d_stride_u32(bench: &mut Bencher) -{ +fn iter_filter_sum_2d_stride_u32(bench: &mut Bencher) { let a = Array::linspace(0., 1., 256) .into_shape_with_order((16, 16)) .unwrap(); @@ -77,8 +70,7 @@ fn iter_filter_sum_2d_stride_u32(bench: &mut Bencher) } #[bench] -fn iter_filter_sum_2d_stride_f32(bench: &mut Bencher) -{ +fn iter_filter_sum_2d_stride_f32(bench: &mut Bencher) { let a = Array::linspace(0., 1., 256) .into_shape_with_order((16, 16)) .unwrap(); @@ -88,8 +80,7 @@ fn iter_filter_sum_2d_stride_f32(bench: &mut Bencher) } #[bench] -fn iter_rev_step_by_contiguous(bench: &mut Bencher) -{ +fn iter_rev_step_by_contiguous(bench: &mut Bencher) { let a = Array::linspace(0., 1., 512); bench.iter(|| { a.iter().rev().step_by(2).for_each(|x| { @@ -99,8 +90,7 @@ fn iter_rev_step_by_contiguous(bench: &mut Bencher) } #[bench] -fn iter_rev_step_by_discontiguous(bench: &mut Bencher) -{ +fn iter_rev_step_by_discontiguous(bench: &mut Bencher) { let mut a = Array::linspace(0., 1., 1024); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); bench.iter(|| { @@ -113,8 +103,7 @@ fn iter_rev_step_by_discontiguous(bench: &mut Bencher) const ZIPSZ: usize = 10_000; #[bench] -fn sum_3_std_zip1(bench: &mut Bencher) -{ +fn sum_3_std_zip1(bench: &mut Bencher) { let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -126,8 +115,7 @@ fn sum_3_std_zip1(bench: &mut Bencher) } #[bench] -fn sum_3_std_zip2(bench: &mut Bencher) -{ +fn sum_3_std_zip2(bench: &mut Bencher) { let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -140,8 +128,7 @@ fn sum_3_std_zip2(bench: &mut Bencher) } #[bench] -fn sum_3_std_zip3(bench: &mut Bencher) -{ +fn sum_3_std_zip3(bench: &mut Bencher) { let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -155,8 +142,7 @@ fn sum_3_std_zip3(bench: &mut Bencher) } #[bench] -fn vector_sum_3_std_zip(bench: &mut Bencher) -{ +fn vector_sum_3_std_zip(bench: &mut Bencher) { let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -168,8 +154,7 @@ fn vector_sum_3_std_zip(bench: &mut Bencher) } #[bench] -fn sum_3_azip(bench: &mut Bencher) -{ +fn sum_3_azip(bench: &mut Bencher) { let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -183,8 +168,7 @@ fn sum_3_azip(bench: &mut Bencher) } #[bench] -fn sum_3_azip_fold(bench: &mut Bencher) -{ +fn sum_3_azip_fold(bench: &mut Bencher) { let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -198,8 +182,7 @@ fn sum_3_azip_fold(bench: &mut Bencher) } #[bench] -fn vector_sum_3_azip(bench: &mut Bencher) -{ +fn vector_sum_3_azip(bench: &mut Bencher) { let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -210,8 +193,7 @@ fn vector_sum_3_azip(bench: &mut Bencher) }); } -fn vector_sum3_unchecked(a: &[f64], b: &[f64], c: &mut [f64]) -{ +fn vector_sum3_unchecked(a: &[f64], b: &[f64], c: &mut [f64]) { for i in 0..c.len() { unsafe { *c.get_unchecked_mut(i) += *a.get_unchecked(i) + *b.get_unchecked(i); @@ -220,8 +202,7 @@ fn vector_sum3_unchecked(a: &[f64], b: &[f64], c: &mut [f64]) } #[bench] -fn vector_sum_3_zip_unchecked(bench: &mut Bencher) -{ +fn vector_sum_3_zip_unchecked(bench: &mut Bencher) { let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -231,8 +212,7 @@ fn vector_sum_3_zip_unchecked(bench: &mut Bencher) } #[bench] -fn vector_sum_3_zip_unchecked_manual(bench: &mut Bencher) -{ +fn vector_sum_3_zip_unchecked_manual(bench: &mut Bencher) { let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -252,8 +232,7 @@ const ISZ: usize = 16; const I2DSZ: usize = 64; #[bench] -fn indexed_iter_1d_ix1(bench: &mut Bencher) -{ +fn indexed_iter_1d_ix1(bench: &mut Bencher) { let mut a = Array::::zeros(I2DSZ * I2DSZ); for (i, elt) in a.indexed_iter_mut() { *elt = i as _; @@ -268,8 +247,7 @@ fn indexed_iter_1d_ix1(bench: &mut Bencher) } #[bench] -fn indexed_zip_1d_ix1(bench: &mut Bencher) -{ +fn indexed_zip_1d_ix1(bench: &mut Bencher) { let mut a = Array::::zeros(I2DSZ * I2DSZ); for (i, elt) in a.indexed_iter_mut() { *elt = i as _; @@ -284,8 +262,7 @@ fn indexed_zip_1d_ix1(bench: &mut Bencher) } #[bench] -fn indexed_iter_2d_ix2(bench: &mut Bencher) -{ +fn indexed_iter_2d_ix2(bench: &mut Bencher) { let mut a = Array::::zeros((I2DSZ, I2DSZ)); for ((i, j), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j) as _; @@ -299,8 +276,7 @@ fn indexed_iter_2d_ix2(bench: &mut Bencher) }) } #[bench] -fn indexed_zip_2d_ix2(bench: &mut Bencher) -{ +fn indexed_zip_2d_ix2(bench: &mut Bencher) { let mut a = Array::::zeros((I2DSZ, I2DSZ)); for ((i, j), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j) as _; @@ -315,8 +291,7 @@ fn indexed_zip_2d_ix2(bench: &mut Bencher) } #[bench] -fn indexed_iter_3d_ix3(bench: &mut Bencher) -{ +fn indexed_iter_3d_ix3(bench: &mut Bencher) { let mut a = Array::::zeros((ISZ, ISZ, ISZ)); for ((i, j, k), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j + 10000 * k) as _; @@ -331,8 +306,7 @@ fn indexed_iter_3d_ix3(bench: &mut Bencher) } #[bench] -fn indexed_zip_3d_ix3(bench: &mut Bencher) -{ +fn indexed_zip_3d_ix3(bench: &mut Bencher) { let mut a = Array::::zeros((ISZ, ISZ, ISZ)); for ((i, j, k), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j + 10000 * k) as _; @@ -347,8 +321,7 @@ fn indexed_zip_3d_ix3(bench: &mut Bencher) } #[bench] -fn indexed_iter_3d_dyn(bench: &mut Bencher) -{ +fn indexed_iter_3d_dyn(bench: &mut Bencher) { let mut a = Array::::zeros((ISZ, ISZ, ISZ)); for ((i, j, k), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j + 10000 * k) as _; @@ -364,31 +337,27 @@ fn indexed_iter_3d_dyn(bench: &mut Bencher) } #[bench] -fn iter_sum_1d_strided_fold(bench: &mut Bencher) -{ +fn iter_sum_1d_strided_fold(bench: &mut Bencher) { let mut a = Array::::ones(10240); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); bench.iter(|| a.iter().sum::()); } #[bench] -fn iter_sum_1d_strided_rfold(bench: &mut Bencher) -{ +fn iter_sum_1d_strided_rfold(bench: &mut Bencher) { let mut a = Array::::ones(10240); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); bench.iter(|| a.iter().rfold(0, |acc, &x| acc + x)); } #[bench] -fn iter_axis_iter_sum(bench: &mut Bencher) -{ +fn iter_axis_iter_sum(bench: &mut Bencher) { let a = Array::::zeros((64, 64)); bench.iter(|| a.axis_iter(Axis(0)).map(|plane| plane.sum()).sum::()); } #[bench] -fn iter_axis_chunks_1_iter_sum(bench: &mut Bencher) -{ +fn iter_axis_chunks_1_iter_sum(bench: &mut Bencher) { let a = Array::::zeros((64, 64)); bench.iter(|| { a.axis_chunks_iter(Axis(0), 1) @@ -398,8 +367,7 @@ fn iter_axis_chunks_1_iter_sum(bench: &mut Bencher) } #[bench] -fn iter_axis_chunks_5_iter_sum(bench: &mut Bencher) -{ +fn iter_axis_chunks_5_iter_sum(bench: &mut Bencher) { let a = Array::::zeros((64, 64)); bench.iter(|| { a.axis_chunks_iter(Axis(0), 5) @@ -408,24 +376,21 @@ fn iter_axis_chunks_5_iter_sum(bench: &mut Bencher) }); } -pub fn zip_mut_with(data: &Array3, out: &mut Array3) -{ +pub fn zip_mut_with(data: &Array3, out: &mut Array3) { out.zip_mut_with(&data, |o, &i| { *o = i; }); } #[bench] -fn zip_mut_with_cc(b: &mut Bencher) -{ +fn zip_mut_with_cc(b: &mut Bencher) { let data: Array3 = Array3::zeros((ISZ, ISZ, ISZ)); let mut out = Array3::zeros(data.dim()); b.iter(|| zip_mut_with(&data, &mut out)); } #[bench] -fn zip_mut_with_ff(b: &mut Bencher) -{ +fn zip_mut_with_ff(b: &mut Bencher) { let data: Array3 = Array3::zeros((ISZ, ISZ, ISZ).f()); let mut out = Array3::zeros(data.dim().f()); b.iter(|| zip_mut_with(&data, &mut out)); diff --git a/benches/numeric.rs b/benches/numeric.rs index e2ffa1b84..d9b9187ff 100644 --- a/benches/numeric.rs +++ b/benches/numeric.rs @@ -10,8 +10,7 @@ const X: usize = 64; const Y: usize = 16; #[bench] -fn clip(bench: &mut Bencher) -{ +fn clip(bench: &mut Bencher) { let mut a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); diff --git a/benches/par_rayon.rs b/benches/par_rayon.rs index 1301ae75a..91113b50c 100644 --- a/benches/par_rayon.rs +++ b/benches/par_rayon.rs @@ -12,8 +12,7 @@ use ndarray::Zip; const EXP_N: usize = 256; const ADDN: usize = 512; -fn set_threads() -{ +fn set_threads() { // Consider setting a fixed number of threads here, for example to avoid // oversubscribing on hyperthreaded cores. // let n = 4; @@ -21,8 +20,7 @@ fn set_threads() } #[bench] -fn map_exp_regular(bench: &mut Bencher) -{ +fn map_exp_regular(bench: &mut Bencher) { let mut a = Array2::::zeros((EXP_N, EXP_N)); a.swap_axes(0, 1); bench.iter(|| { @@ -31,8 +29,7 @@ fn map_exp_regular(bench: &mut Bencher) } #[bench] -fn rayon_exp_regular(bench: &mut Bencher) -{ +fn rayon_exp_regular(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((EXP_N, EXP_N)); a.swap_axes(0, 1); @@ -44,22 +41,19 @@ fn rayon_exp_regular(bench: &mut Bencher) const FASTEXP: usize = EXP_N; #[inline] -fn fastexp(x: f64) -> f64 -{ +fn fastexp(x: f64) -> f64 { let x = 1. + x / 1024.; x.powi(1024) } #[bench] -fn map_fastexp_regular(bench: &mut Bencher) -{ +fn map_fastexp_regular(bench: &mut Bencher) { let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| a.mapv_inplace(|x| fastexp(x))); } #[bench] -fn rayon_fastexp_regular(bench: &mut Bencher) -{ +fn rayon_fastexp_regular(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { @@ -68,16 +62,14 @@ fn rayon_fastexp_regular(bench: &mut Bencher) } #[bench] -fn map_fastexp_cut(bench: &mut Bencher) -{ +fn map_fastexp_cut(bench: &mut Bencher) { let mut a = Array2::::zeros((FASTEXP, FASTEXP)); let mut a = a.slice_mut(s![.., ..-1]); bench.iter(|| a.mapv_inplace(|x| fastexp(x))); } #[bench] -fn rayon_fastexp_cut(bench: &mut Bencher) -{ +fn rayon_fastexp_cut(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); let mut a = a.slice_mut(s![.., ..-1]); @@ -87,8 +79,7 @@ fn rayon_fastexp_cut(bench: &mut Bencher) } #[bench] -fn map_fastexp_by_axis(bench: &mut Bencher) -{ +fn map_fastexp_by_axis(bench: &mut Bencher) { let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { for mut sheet in a.axis_iter_mut(Axis(0)) { @@ -98,8 +89,7 @@ fn map_fastexp_by_axis(bench: &mut Bencher) } #[bench] -fn rayon_fastexp_by_axis(bench: &mut Bencher) -{ +fn rayon_fastexp_by_axis(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { @@ -110,8 +100,7 @@ fn rayon_fastexp_by_axis(bench: &mut Bencher) } #[bench] -fn rayon_fastexp_zip(bench: &mut Bencher) -{ +fn rayon_fastexp_zip(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { @@ -122,8 +111,7 @@ fn rayon_fastexp_zip(bench: &mut Bencher) } #[bench] -fn add(bench: &mut Bencher) -{ +fn add(bench: &mut Bencher) { let mut a = Array2::::zeros((ADDN, ADDN)); let b = Array2::::zeros((ADDN, ADDN)); let c = Array2::::zeros((ADDN, ADDN)); @@ -136,8 +124,7 @@ fn add(bench: &mut Bencher) } #[bench] -fn rayon_add(bench: &mut Bencher) -{ +fn rayon_add(bench: &mut Bencher) { set_threads(); let mut a = Array2::::zeros((ADDN, ADDN)); let b = Array2::::zeros((ADDN, ADDN)); @@ -154,29 +141,25 @@ const COLL_STRING_N: usize = 64; const COLL_F64_N: usize = 128; #[bench] -fn vec_string_collect(bench: &mut test::Bencher) -{ +fn vec_string_collect(bench: &mut test::Bencher) { let v = vec![""; COLL_STRING_N * COLL_STRING_N]; bench.iter(|| v.iter().map(|s| s.to_owned()).collect::>()); } #[bench] -fn array_string_collect(bench: &mut test::Bencher) -{ +fn array_string_collect(bench: &mut test::Bencher) { let v = Array::from_elem((COLL_STRING_N, COLL_STRING_N), ""); bench.iter(|| Zip::from(&v).par_map_collect(|s| s.to_owned())); } #[bench] -fn vec_f64_collect(bench: &mut test::Bencher) -{ +fn vec_f64_collect(bench: &mut test::Bencher) { let v = vec![1.; COLL_F64_N * COLL_F64_N]; bench.iter(|| v.iter().map(|s| s + 1.).collect::>()); } #[bench] -fn array_f64_collect(bench: &mut test::Bencher) -{ +fn array_f64_collect(bench: &mut test::Bencher) { let v = Array::from_elem((COLL_F64_N, COLL_F64_N), 1.); bench.iter(|| Zip::from(&v).par_map_collect(|s| s + 1.)); } diff --git a/benches/to_shape.rs b/benches/to_shape.rs index f056a9852..7c9f9144e 100644 --- a/benches/to_shape.rs +++ b/benches/to_shape.rs @@ -7,88 +7,77 @@ use ndarray::prelude::*; use ndarray::Order; #[bench] -fn to_shape2_1(bench: &mut Bencher) -{ +fn to_shape2_1(bench: &mut Bencher) { let a = Array::::zeros((4, 5)); let view = a.view(); bench.iter(|| view.to_shape(4 * 5).unwrap()); } #[bench] -fn to_shape2_2_same(bench: &mut Bencher) -{ +fn to_shape2_2_same(bench: &mut Bencher) { let a = Array::::zeros((4, 5)); let view = a.view(); bench.iter(|| view.to_shape((4, 5)).unwrap()); } #[bench] -fn to_shape2_2_flip(bench: &mut Bencher) -{ +fn to_shape2_2_flip(bench: &mut Bencher) { let a = Array::::zeros((4, 5)); let view = a.view(); bench.iter(|| view.to_shape((5, 4)).unwrap()); } #[bench] -fn to_shape2_3(bench: &mut Bencher) -{ +fn to_shape2_3(bench: &mut Bencher) { let a = Array::::zeros((4, 5)); let view = a.view(); bench.iter(|| view.to_shape((2, 5, 2)).unwrap()); } #[bench] -fn to_shape3_1(bench: &mut Bencher) -{ +fn to_shape3_1(bench: &mut Bencher) { let a = Array::::zeros((3, 4, 5)); let view = a.view(); bench.iter(|| view.to_shape(3 * 4 * 5).unwrap()); } #[bench] -fn to_shape3_2_order(bench: &mut Bencher) -{ +fn to_shape3_2_order(bench: &mut Bencher) { let a = Array::::zeros((3, 4, 5)); let view = a.view(); bench.iter(|| view.to_shape((12, 5)).unwrap()); } #[bench] -fn to_shape3_2_outoforder(bench: &mut Bencher) -{ +fn to_shape3_2_outoforder(bench: &mut Bencher) { let a = Array::::zeros((3, 4, 5)); let view = a.view(); bench.iter(|| view.to_shape((4, 15)).unwrap()); } #[bench] -fn to_shape3_3c(bench: &mut Bencher) -{ +fn to_shape3_3c(bench: &mut Bencher) { let a = Array::::zeros((3, 4, 5)); let view = a.view(); bench.iter(|| view.to_shape((3, 4, 5)).unwrap()); } #[bench] -fn to_shape3_3f(bench: &mut Bencher) -{ +fn to_shape3_3f(bench: &mut Bencher) { let a = Array::::zeros((3, 4, 5).f()); let view = a.view(); bench.iter(|| view.to_shape(((3, 4, 5), Order::F)).unwrap()); } #[bench] -fn to_shape3_4c(bench: &mut Bencher) -{ +fn to_shape3_4c(bench: &mut Bencher) { let a = Array::::zeros((3, 4, 5)); let view = a.view(); bench.iter(|| view.to_shape(((2, 3, 2, 5), Order::C)).unwrap()); } #[bench] -fn to_shape3_4f(bench: &mut Bencher) -{ +fn to_shape3_4f(bench: &mut Bencher) { let a = Array::::zeros((3, 4, 5).f()); let view = a.view(); bench.iter(|| view.to_shape(((2, 3, 2, 5), Order::F)).unwrap()); diff --git a/benches/zip.rs b/benches/zip.rs index 461497310..1194e450f 100644 --- a/benches/zip.rs +++ b/benches/zip.rs @@ -33,8 +33,7 @@ where z22.for_each(f); } -pub fn zip_indexed(data: &Array3, out: &mut Array3) -{ +pub fn zip_indexed(data: &Array3, out: &mut Array3) { Zip::indexed(data).and(out).for_each(|idx, &i, o| { let _ = black_box(idx); *o = i; @@ -45,56 +44,49 @@ pub fn zip_indexed(data: &Array3, out: &mut Array3) const SZ3: (usize, usize, usize) = (100, 110, 100); #[bench] -fn zip_cc(b: &mut Bencher) -{ +fn zip_cc(b: &mut Bencher) { let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim()); b.iter(|| zip_copy(&data, &mut out)); } #[bench] -fn zip_cf(b: &mut Bencher) -{ +fn zip_cf(b: &mut Bencher) { let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim().f()); b.iter(|| zip_copy(&data, &mut out)); } #[bench] -fn zip_fc(b: &mut Bencher) -{ +fn zip_fc(b: &mut Bencher) { let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim()); b.iter(|| zip_copy(&data, &mut out)); } #[bench] -fn zip_ff(b: &mut Bencher) -{ +fn zip_ff(b: &mut Bencher) { let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim().f()); b.iter(|| zip_copy(&data, &mut out)); } #[bench] -fn zip_indexed_cc(b: &mut Bencher) -{ +fn zip_indexed_cc(b: &mut Bencher) { let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim()); b.iter(|| zip_indexed(&data, &mut out)); } #[bench] -fn zip_indexed_ff(b: &mut Bencher) -{ +fn zip_indexed_ff(b: &mut Bencher) { let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim().f()); b.iter(|| zip_indexed(&data, &mut out)); } #[bench] -fn slice_zip_cc(b: &mut Bencher) -{ +fn slice_zip_cc(b: &mut Bencher) { let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim()); let data = data.slice(s![1.., 1.., 1..]); @@ -103,8 +95,7 @@ fn slice_zip_cc(b: &mut Bencher) } #[bench] -fn slice_zip_ff(b: &mut Bencher) -{ +fn slice_zip_ff(b: &mut Bencher) { let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim().f()); let data = data.slice(s![1.., 1.., 1..]); @@ -113,8 +104,7 @@ fn slice_zip_ff(b: &mut Bencher) } #[bench] -fn slice_split_zip_cc(b: &mut Bencher) -{ +fn slice_split_zip_cc(b: &mut Bencher) { let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim()); let data = data.slice(s![1.., 1.., 1..]); @@ -123,8 +113,7 @@ fn slice_split_zip_cc(b: &mut Bencher) } #[bench] -fn slice_split_zip_ff(b: &mut Bencher) -{ +fn slice_split_zip_ff(b: &mut Bencher) { let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim().f()); let data = data.slice(s![1.., 1.., 1..]); diff --git a/examples/axis_ops.rs b/examples/axis_ops.rs index 3a54a52fb..384ca6fab 100644 --- a/examples/axis_ops.rs +++ b/examples/axis_ops.rs @@ -55,8 +55,7 @@ where Ok(()) } -fn main() -{ +fn main() { let mut a = Array::::zeros((2, 3, 4)); for (i, elt) in (0..).zip(&mut a) { *elt = i; diff --git a/examples/bounds_check_elim.rs b/examples/bounds_check_elim.rs index e6b57c719..05dd52c48 100644 --- a/examples/bounds_check_elim.rs +++ b/examples/bounds_check_elim.rs @@ -35,8 +35,7 @@ pub fn testvec_as_slice(a: &Vec) -> f64 { */ #[no_mangle] -pub fn test1d_single(a: &Array1, i: usize) -> f64 -{ +pub fn test1d_single(a: &Array1, i: usize) -> f64 { if i < a.len() { a[i] } else { @@ -45,8 +44,7 @@ pub fn test1d_single(a: &Array1, i: usize) -> f64 } #[no_mangle] -pub fn test1d_single_mut(a: &mut Array1, i: usize) -> f64 -{ +pub fn test1d_single_mut(a: &mut Array1, i: usize) -> f64 { if i < a.len() { *&mut a[i] } else { @@ -55,8 +53,7 @@ pub fn test1d_single_mut(a: &mut Array1, i: usize) -> f64 } #[no_mangle] -pub fn test1d_len_of(a: &Array1) -> f64 -{ +pub fn test1d_len_of(a: &Array1) -> f64 { let a = &*a; let mut sum = 0.; for i in 0..a.len_of(Axis(0)) { @@ -66,8 +63,7 @@ pub fn test1d_len_of(a: &Array1) -> f64 } #[no_mangle] -pub fn test1d_range(a: &Array1) -> f64 -{ +pub fn test1d_range(a: &Array1) -> f64 { let mut sum = 0.; for i in 0..a.len() { sum += a[i]; @@ -76,8 +72,7 @@ pub fn test1d_range(a: &Array1) -> f64 } #[no_mangle] -pub fn test1d_while(a: &Array1) -> f64 -{ +pub fn test1d_while(a: &Array1) -> f64 { let mut sum = 0.; let mut i = 0; while i < a.len() { @@ -88,8 +83,7 @@ pub fn test1d_while(a: &Array1) -> f64 } #[no_mangle] -pub fn test2d_ranges(a: &Array2) -> f64 -{ +pub fn test2d_ranges(a: &Array2) -> f64 { let mut sum = 0.; for i in 0..a.nrows() { for j in 0..a.ncols() { @@ -100,8 +94,7 @@ pub fn test2d_ranges(a: &Array2) -> f64 } #[no_mangle] -pub fn test2d_whiles(a: &Array2) -> f64 -{ +pub fn test2d_whiles(a: &Array2) -> f64 { let mut sum = 0.; let mut i = 0; while i < a.nrows() { diff --git a/examples/column_standardize.rs b/examples/column_standardize.rs index 329ad2ccb..6a1840f03 100644 --- a/examples/column_standardize.rs +++ b/examples/column_standardize.rs @@ -2,8 +2,7 @@ use ndarray::prelude::*; #[cfg(feature = "std")] -fn main() -{ +fn main() { // This example recreates the following from python/numpy // counts -= np.mean(counts, axis=0) // counts /= np.std(counts, axis=0) diff --git a/examples/convo.rs b/examples/convo.rs index a59795e12..f26ab6a50 100644 --- a/examples/convo.rs +++ b/examples/convo.rs @@ -15,8 +15,7 @@ type Kernel3x3 = [[A; 3]; 3]; #[inline(never)] #[cfg(feature = "std")] fn conv_3x3(a: &ArrayView2<'_, F>, out: &mut ArrayViewMut2<'_, F>, kernel: &Kernel3x3) -where F: Float -{ +where F: Float { let (n, m) = a.dim(); let (np, mp) = out.dim(); if n < 3 || m < 3 { @@ -42,8 +41,7 @@ where F: Float } #[cfg(feature = "std")] -fn main() -{ +fn main() { let n = 16; let mut a = Array::zeros((n, n)); // make a circle diff --git a/examples/life.rs b/examples/life.rs index 7db384678..8b722186c 100644 --- a/examples/life.rs +++ b/examples/life.rs @@ -10,8 +10,7 @@ const N: usize = 100; type Board = Array2; -fn parse(x: &[u8]) -> Board -{ +fn parse(x: &[u8]) -> Board { // make a border of 0 cells let mut map = Board::from_elem(((N + 2), (N + 2)), 0); let a = Array::from_iter(x.iter().filter_map(|&b| match b { @@ -31,8 +30,7 @@ fn parse(x: &[u8]) -> Board // 3 neighbors: birth // otherwise: death -fn iterate(z: &mut Board, scratch: &mut Board) -{ +fn iterate(z: &mut Board, scratch: &mut Board) { // compute number of neighbors let mut neigh = scratch.view_mut(); neigh.fill(0); @@ -55,8 +53,7 @@ fn iterate(z: &mut Board, scratch: &mut Board) zv.zip_mut_with(&neigh, |y, &n| *y = ((n == 3) || (n == 2 && *y > 0)) as u8); } -fn turn_on_corners(z: &mut Board) -{ +fn turn_on_corners(z: &mut Board) { let n = z.nrows(); let m = z.ncols(); z[[1, 1]] = 1; @@ -65,8 +62,7 @@ fn turn_on_corners(z: &mut Board) z[[n - 2, m - 2]] = 1; } -fn render(a: &Board) -{ +fn render(a: &Board) { for row in a.rows() { for &x in row { if x > 0 { @@ -79,8 +75,7 @@ fn render(a: &Board) } } -fn main() -{ +fn main() { let mut a = parse(INPUT); let mut scratch = Board::zeros((N, N)); let steps = 100; diff --git a/examples/rollaxis.rs b/examples/rollaxis.rs index 82c381297..8efdd0ce0 100644 --- a/examples/rollaxis.rs +++ b/examples/rollaxis.rs @@ -22,8 +22,7 @@ where a } -fn main() -{ +fn main() { let mut data = array![ [[-1., 0., -2.], [1., 7., -3.]], [[1., 0., -3.], [1., 7., 5.]], diff --git a/examples/sort-axis.rs b/examples/sort-axis.rs index 17ce52e3a..ff4e804da 100644 --- a/examples/sort-axis.rs +++ b/examples/sort-axis.rs @@ -12,16 +12,13 @@ use std::ptr::copy_nonoverlapping; // Type invariant: Each index appears exactly once #[derive(Clone, Debug)] -pub struct Permutation -{ +pub struct Permutation { indices: Vec, } -impl Permutation -{ +impl Permutation { /// Checks if the permutation is correct - pub fn from_indices(v: Vec) -> Result - { + pub fn from_indices(v: Vec) -> Result { let perm = Permutation { indices: v }; if perm.correct() { Ok(perm) @@ -30,8 +27,7 @@ impl Permutation } } - fn correct(&self) -> bool - { + fn correct(&self) -> bool { let axis_len = self.indices.len(); let mut seen = vec![false; axis_len]; for &i in &self.indices { @@ -49,16 +45,14 @@ impl Permutation } } -pub trait SortArray -{ +pub trait SortArray { /// ***Panics*** if `axis` is out of bounds. fn identity(&self, axis: Axis) -> Permutation; fn sort_axis_by(&self, axis: Axis, less_than: F) -> Permutation where F: FnMut(usize, usize) -> bool; } -pub trait PermuteArray -{ +pub trait PermuteArray { type Elem; type Dim; fn permute_axis(self, axis: Axis, perm: &Permutation) -> Array @@ -72,16 +66,14 @@ where S: Data, D: Dimension, { - fn identity(&self, axis: Axis) -> Permutation - { + fn identity(&self, axis: Axis) -> Permutation { Permutation { indices: (0..self.len_of(axis)).collect(), } } fn sort_axis_by(&self, axis: Axis, mut less_than: F) -> Permutation - where F: FnMut(usize, usize) -> bool - { + where F: FnMut(usize, usize) -> bool { let mut perm = self.identity(axis); perm.indices.sort_by(move |&a, &b| { if less_than(a, b) { @@ -103,8 +95,7 @@ where D: Dimension type Dim = D; fn permute_axis(self, axis: Axis, perm: &Permutation) -> Array - where D: RemoveAxis - { + where D: RemoveAxis { let axis_len = self.len_of(axis); let axis_stride = self.stride_of(axis); assert_eq!(axis_len, perm.indices.len()); @@ -167,8 +158,7 @@ where D: Dimension } #[cfg(feature = "std")] -fn main() -{ +fn main() { let a = Array::linspace(0., 63., 64) .into_shape_with_order((8, 8)) .unwrap(); @@ -188,12 +178,10 @@ fn main() fn main() {} #[cfg(test)] -mod tests -{ +mod tests { use super::*; #[test] - fn test_permute_axis() - { + fn test_permute_axis() { let a = array![ [107998.96, 1.], [107999.08, 2.], diff --git a/examples/type_conversion.rs b/examples/type_conversion.rs index a419af740..7bec2542f 100644 --- a/examples/type_conversion.rs +++ b/examples/type_conversion.rs @@ -7,8 +7,7 @@ use approx::assert_abs_diff_eq; use ndarray::prelude::*; #[cfg(feature = "approx")] -fn main() -{ +fn main() { // Converting an array from one datatype to another is implemented with the // `ArrayBase::mapv()` function. We pass a closure that is applied to each // element independently. This allows for more control and flexiblity in diff --git a/examples/zip_many.rs b/examples/zip_many.rs index 57d66a956..9b649a278 100644 --- a/examples/zip_many.rs +++ b/examples/zip_many.rs @@ -5,8 +5,7 @@ use ndarray::prelude::*; use ndarray::Zip; -fn main() -{ +fn main() { let n = 6; let mut a = Array::::zeros((n, n)); diff --git a/misc/axis_iter.svg b/misc/axis_iter.svg index ea4157b0e..9394d110d 100644 --- a/misc/axis_iter.svg +++ b/misc/axis_iter.svg @@ -2,840 +2,841 @@ - - - - - - - - - - - - - - - - image/svg+xml - - - - - - + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:xlink="http://www.w3.org/1999/xlink" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + width="274.07394mm" + height="131.17035mm" + viewBox="0 0 971.12814 464.77682" + id="svg6730" + version="1.1" + inkscape:version="0.91 r13725" + sodipodi:docname="axis_iter.svg"> + + + + + + + + + + + + + + + + image/svg+xml + + + + + - - - - - - - - - - - - - - - - - - - - - - - + inkscape:label="Layer 1" + inkscape:groupmode="layer" + id="layer1" + style="opacity:1" + transform="translate(-7.6612808,-76.544459)"> + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Axis(0) - Axis(1) - Axis(2) - .axis_iter(Axis(2)) - Input shape: (3, 4, 5)Output shapes: (3, 4) - - - - - - - Input shape: (3, 4, 5) + Output shapes: (3, 4) + + + + + + + 0 - 1 - 2 - 3 - 4 + + - - diff --git a/misc/split_at.svg b/misc/split_at.svg index 57c429546..67e7439b2 100644 --- a/misc/split_at.svg +++ b/misc/split_at.svg @@ -2,796 +2,797 @@ - - - - - - - - - - - - - - - - image/svg+xml - - - - - - + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + xmlns:xlink="http://www.w3.org/1999/xlink" + xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" + xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" + width="189.43846mm" + height="171.03487mm" + viewBox="0 0 671.23864 606.02905" + id="svg6730" + version="1.1" + inkscape:version="0.91 r13725" + sodipodi:docname="split_at.svg"> + + + + + + + + + + + + + + + + image/svg+xml + + + + + - - - - - - - - - - - - - - - - - - - - - - - + inkscape:label="Layer 1" + inkscape:groupmode="layer" + id="layer1" + style="opacity:1" + transform="translate(-7.6612808,-76.544459)"> + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Axis(0) - Axis(1) - Axis(2) - .split_at(Axis(2), 2) - Input shape: (3, 5, 5)Output shapes: (3, 5, 2) and (3, 5, 3) - - + id="tspan8614-6">Input shape: (3, 5, 5) + Output shapes: (3, 5, 2) and (3, 5, 3) + + diff --git a/ndarray-rand/README.md b/ndarray-rand/README.md index 0109e9732..e7f42d95a 100644 --- a/ndarray-rand/README.md +++ b/ndarray-rand/README.md @@ -28,8 +28,8 @@ Dependencies ``ndarray-rand`` depends on ``rand``. -[`rand`](https://docs.rs/rand/) and [`rand-distr`](https://docs.rs/rand_distr/) are -re-exported as sub-modules, `ndarray_rand::rand` and `ndarray_rand::rand_distr` respectively. +[`rand`](https://docs.rs/rand/) and [`rand-distr`](https://docs.rs/rand_distr/) are +re-exported as sub-modules, `ndarray_rand::rand` and `ndarray_rand::rand_distr` respectively. Please rely on these submodules for guaranteed version compatibility. If you want to use a random number generator or distribution from another crate @@ -41,7 +41,7 @@ necessary trait). Recent changes ============== -Check _[RELEASES.md](https://github.com/rust-ndarray/ndarray/blob/master/ndarray-rand/RELEASES.md)_ to see +Check _[RELEASES.md](https://github.com/rust-ndarray/ndarray/blob/master/ndarray-rand/RELEASES.md)_ to see the changes introduced in previous releases. diff --git a/ndarray-rand/RELEASES.md b/ndarray-rand/RELEASES.md index 8d7586fbe..1557e1ddc 100644 --- a/ndarray-rand/RELEASES.md +++ b/ndarray-rand/RELEASES.md @@ -3,73 +3,73 @@ Recent Changes - 0.14.0 - - Require ndarray 0.15 - - Require rand 0.8 (unchanged from previous version) - - The F32 wrapper is now deprecated, it's redundant + - Require ndarray 0.15 + - Require rand 0.8 (unchanged from previous version) + - The F32 wrapper is now deprecated, it's redundant - 0.13.0 - - Require ndarray 0.14 (unchanged from previous version) - - Require rand 0.8 - - Require rand_distr 0.4 - - Fix methods `sample_axis` and `sample_axis_using` so that they can be used on array views too. + - Require ndarray 0.14 (unchanged from previous version) + - Require rand 0.8 + - Require rand_distr 0.4 + - Fix methods `sample_axis` and `sample_axis_using` so that they can be used on array views too. - 0.12.0 - - Require ndarray 0.14 - - Require rand 0.7 (unchanged from previous version) - - Require rand_distr 0.3 + - Require ndarray 0.14 + - Require rand 0.7 (unchanged from previous version) + - Require rand_distr 0.3 - 0.11.0 - - Require ndarray 0.13 - - Require rand 0.7 (unchanged from previous version) + - Require ndarray 0.13 + - Require rand 0.7 (unchanged from previous version) - 0.10.0 - - Require `rand` 0.7 - - Require Rust 1.32 or later - - Re-export `rand` as a submodule, `ndarray_rand::rand` - - Re-export `rand-distr` as a submodule, `ndarray_rand::rand_distr` + - Require `rand` 0.7 + - Require Rust 1.32 or later + - Re-export `rand` as a submodule, `ndarray_rand::rand` + - Re-export `rand-distr` as a submodule, `ndarray_rand::rand_distr` - 0.9.0 - - Require rand 0.6 + - Require rand 0.6 - 0.8.0 - - Require ndarray 0.12 - - Require rand 0.5 + - Require ndarray 0.12 + - Require rand 0.5 - 0.7.0 - - Require ndarray 0.11 - - Require rand 0.4 + - Require ndarray 0.11 + - Require rand 0.4 - 0.6.1 - - Clean up implementation of ``Array::random`` by @v-shmyhlo + - Clean up implementation of ``Array::random`` by @v-shmyhlo - 0.6.0 - - Require ndarray 0.10.0 + - Require ndarray 0.10.0 - 0.5.0 - - Require ndarray 0.9 + - Require ndarray 0.9 - 0.4.0 - - Require ndarray 0.8 + - Require ndarray 0.8 - 0.3.0 - - Require ndarray 0.7 + - Require ndarray 0.7 - 0.2.0 - - Require ndarray 0.6 + - Require ndarray 0.6 - 0.1.0 - - Initial release + - Initial release diff --git a/ndarray-rand/benches/bench.rs b/ndarray-rand/benches/bench.rs index 0e5eb2ff7..b58d80a88 100644 --- a/ndarray-rand/benches/bench.rs +++ b/ndarray-rand/benches/bench.rs @@ -10,22 +10,19 @@ use rand_distr::Uniform; use test::Bencher; #[bench] -fn uniform_f32(b: &mut Bencher) -{ +fn uniform_f32(b: &mut Bencher) { let m = 100; b.iter(|| Array::random((m, m), Uniform::new(-1f32, 1.))); } #[bench] -fn norm_f32(b: &mut Bencher) -{ +fn norm_f32(b: &mut Bencher) { let m = 100; b.iter(|| Array::random((m, m), Normal::new(0f32, 1.).unwrap())); } #[bench] -fn norm_f64(b: &mut Bencher) -{ +fn norm_f64(b: &mut Bencher) { let m = 100; b.iter(|| Array::random((m, m), Normal::new(0f64, 1.).unwrap())); } diff --git a/ndarray-rand/src/lib.rs b/ndarray-rand/src/lib.rs index 027198538..57124f3a7 100644 --- a/ndarray-rand/src/lib.rs +++ b/ndarray-rand/src/lib.rs @@ -40,14 +40,12 @@ use ndarray::{ArrayBase, Data, DataOwned, Dimension, RawData}; use quickcheck::{Arbitrary, Gen}; /// `rand`, re-exported for convenience and version-compatibility. -pub mod rand -{ +pub mod rand { pub use rand::*; } /// `rand-distr`, re-exported for convenience and version-compatibility. -pub mod rand_distr -{ +pub mod rand_distr { pub use rand_distr::*; } @@ -286,18 +284,15 @@ where /// [`sample_axis`]: RandomExt::sample_axis /// [`sample_axis_using`]: RandomExt::sample_axis_using #[derive(Debug, Clone)] -pub enum SamplingStrategy -{ +pub enum SamplingStrategy { WithReplacement, WithoutReplacement, } // `Arbitrary` enables `quickcheck` to generate random `SamplingStrategy` values for testing. #[cfg(feature = "quickcheck")] -impl Arbitrary for SamplingStrategy -{ - fn arbitrary(g: &mut Gen) -> Self - { +impl Arbitrary for SamplingStrategy { + fn arbitrary(g: &mut Gen) -> Self { if bool::arbitrary(g) { SamplingStrategy::WithReplacement } else { @@ -306,8 +301,7 @@ impl Arbitrary for SamplingStrategy } } -fn get_rng() -> SmallRng -{ +fn get_rng() -> SmallRng { SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng failed") } @@ -334,8 +328,7 @@ pub struct F32(pub S); impl Distribution for F32 where S: Distribution { - fn sample(&self, rng: &mut R) -> f32 - { + fn sample(&self, rng: &mut R) -> f32 { self.0.sample(rng) as f32 } } diff --git a/ndarray-rand/tests/tests.rs b/ndarray-rand/tests/tests.rs index 2db040310..a0e1584ad 100644 --- a/ndarray-rand/tests/tests.rs +++ b/ndarray-rand/tests/tests.rs @@ -8,8 +8,7 @@ use ndarray_rand::{RandomExt, SamplingStrategy}; use quickcheck::{quickcheck, TestResult}; #[test] -fn test_dim() -{ +fn test_dim() { let (mm, nn) = (5, 5); for m in 0..mm { for n in 0..nn { @@ -23,8 +22,7 @@ fn test_dim() } #[test] -fn test_dim_f() -{ +fn test_dim_f() { let (mm, nn) = (5, 5); for m in 0..mm { for n in 0..nn { @@ -38,8 +36,7 @@ fn test_dim_f() } #[test] -fn sample_axis_on_view() -{ +fn sample_axis_on_view() { let m = 5; let a = Array::random((m, 4), Uniform::new(0., 2.)); let _samples = a @@ -49,8 +46,7 @@ fn sample_axis_on_view() #[test] #[should_panic] -fn oversampling_without_replacement_should_panic() -{ +fn oversampling_without_replacement_should_panic() { let m = 5; let a = Array::random((m, 4), Uniform::new(0., 2.)); let _samples = a.sample_axis(Axis(0), m + 1, SamplingStrategy::WithoutReplacement); @@ -115,8 +111,7 @@ quickcheck! { } } -fn sampling_works(a: &Array2, strategy: SamplingStrategy, axis: Axis, n_samples: usize) -> bool -{ +fn sampling_works(a: &Array2, strategy: SamplingStrategy, axis: Axis, n_samples: usize) -> bool { let samples = a.sample_axis(axis, n_samples, strategy); samples .axis_iter(axis) @@ -124,15 +119,13 @@ fn sampling_works(a: &Array2, strategy: SamplingStrategy, axis: Axis, n_sam } // Check if, when sliced along `axis`, there is at least one lane in `a` equal to `b` -fn is_subset(a: &Array2, b: &ArrayView1, axis: Axis) -> bool -{ +fn is_subset(a: &Array2, b: &ArrayView1, axis: Axis) -> bool { a.axis_iter(axis).any(|lane| &lane == b) } #[test] #[should_panic] -fn sampling_without_replacement_from_a_zero_length_axis_should_panic() -{ +fn sampling_without_replacement_from_a_zero_length_axis_should_panic() { let n = 5; let a = Array::random((0, n), Uniform::new(0., 2.)); let _samples = a.sample_axis(Axis(0), 1, SamplingStrategy::WithoutReplacement); @@ -140,8 +133,7 @@ fn sampling_without_replacement_from_a_zero_length_axis_should_panic() #[test] #[should_panic] -fn sampling_with_replacement_from_a_zero_length_axis_should_panic() -{ +fn sampling_with_replacement_from_a_zero_length_axis_should_panic() { let n = 5; let a = Array::random((0, n), Uniform::new(0., 2.)); let _samples = a.sample_axis(Axis(0), 1, SamplingStrategy::WithReplacement); diff --git a/rustfmt.toml b/rustfmt.toml index f3e376ccc..f0eb0349a 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -3,7 +3,7 @@ array_width = 100 chain_width = 60 fn_call_width = 100 max_width = 120 -brace_style = "AlwaysNextLine" +# brace_style = "AlwaysNextLine" control_brace_style = "AlwaysSameLine" fn_params_layout = "Compressed" # ? format_macro_bodies = false diff --git a/src/aliases.rs b/src/aliases.rs index 5df0c95ec..9a8ea8f2c 100644 --- a/src/aliases.rs +++ b/src/aliases.rs @@ -7,58 +7,50 @@ use crate::{ArcArray, Array, ArrayView, ArrayViewMut, Ix, IxDynImpl}; /// Create a zero-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix0() -> Ix0 -{ +pub const fn Ix0() -> Ix0 { Dim::new([]) } /// Create a one-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix1(i0: Ix) -> Ix1 -{ +pub const fn Ix1(i0: Ix) -> Ix1 { Dim::new([i0]) } /// Create a two-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix2(i0: Ix, i1: Ix) -> Ix2 -{ +pub const fn Ix2(i0: Ix, i1: Ix) -> Ix2 { Dim::new([i0, i1]) } /// Create a three-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix3(i0: Ix, i1: Ix, i2: Ix) -> Ix3 -{ +pub const fn Ix3(i0: Ix, i1: Ix, i2: Ix) -> Ix3 { Dim::new([i0, i1, i2]) } /// Create a four-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix4(i0: Ix, i1: Ix, i2: Ix, i3: Ix) -> Ix4 -{ +pub const fn Ix4(i0: Ix, i1: Ix, i2: Ix, i3: Ix) -> Ix4 { Dim::new([i0, i1, i2, i3]) } /// Create a five-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix5(i0: Ix, i1: Ix, i2: Ix, i3: Ix, i4: Ix) -> Ix5 -{ +pub const fn Ix5(i0: Ix, i1: Ix, i2: Ix, i3: Ix, i4: Ix) -> Ix5 { Dim::new([i0, i1, i2, i3, i4]) } /// Create a six-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix6(i0: Ix, i1: Ix, i2: Ix, i3: Ix, i4: Ix, i5: Ix) -> Ix6 -{ +pub const fn Ix6(i0: Ix, i1: Ix, i2: Ix, i3: Ix, i4: Ix, i5: Ix) -> Ix6 { Dim::new([i0, i1, i2, i3, i4, i5]) } /// Create a dynamic-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub fn IxDyn(ix: &[Ix]) -> IxDyn -{ +pub fn IxDyn(ix: &[Ix]) -> IxDyn { Dim(ix) } diff --git a/src/argument_traits.rs b/src/argument_traits.rs index de8ac7f99..82d4869a9 100644 --- a/src/argument_traits.rs +++ b/src/argument_traits.rs @@ -4,45 +4,36 @@ use std::mem::MaybeUninit; use crate::math_cell::MathCell; /// A producer element that can be assigned to once -pub trait AssignElem -{ +pub trait AssignElem { /// Assign the value `input` to the element that self represents. fn assign_elem(self, input: T); } /// Assignable element, simply `*self = input`. -impl<'a, T> AssignElem for &'a mut T -{ - fn assign_elem(self, input: T) - { +impl<'a, T> AssignElem for &'a mut T { + fn assign_elem(self, input: T) { *self = input; } } /// Assignable element, simply `self.set(input)`. -impl<'a, T> AssignElem for &'a Cell -{ - fn assign_elem(self, input: T) - { +impl<'a, T> AssignElem for &'a Cell { + fn assign_elem(self, input: T) { self.set(input); } } /// Assignable element, simply `self.set(input)`. -impl<'a, T> AssignElem for &'a MathCell -{ - fn assign_elem(self, input: T) - { +impl<'a, T> AssignElem for &'a MathCell { + fn assign_elem(self, input: T) { self.set(input); } } /// Assignable element, the item in the MaybeUninit is overwritten (prior value, if any, is not /// read or dropped). -impl<'a, T> AssignElem for &'a mut MaybeUninit -{ - fn assign_elem(self, input: T) - { +impl<'a, T> AssignElem for &'a mut MaybeUninit { + fn assign_elem(self, input: T) { *self = MaybeUninit::new(input); } } diff --git a/src/array_approx.rs b/src/array_approx.rs index 286a1146c..4ad5ef201 100644 --- a/src/array_approx.rs +++ b/src/array_approx.rs @@ -1,6 +1,5 @@ #[cfg(feature = "approx")] -mod approx_methods -{ +mod approx_methods { use crate::imp_prelude::*; impl ArrayBase diff --git a/src/array_serde.rs b/src/array_serde.rs index 31b613d4c..aff268a51 100644 --- a/src/array_serde.rs +++ b/src/array_serde.rs @@ -24,8 +24,7 @@ use crate::IntoDimension; /// Verifies that the version of the deserialized array matches the current /// `ARRAY_FORMAT_VERSION`. pub fn verify_version(v: u8) -> Result<(), E> -where E: de::Error -{ +where E: de::Error { if v != ARRAY_FORMAT_VERSION { let err_msg = format!("unknown array version: {}", v); Err(de::Error::custom(err_msg)) @@ -39,8 +38,7 @@ impl Serialize for Dim where I: Serialize { fn serialize(&self, serializer: Se) -> Result - where Se: Serializer - { + where Se: Serializer { self.ix().serialize(serializer) } } @@ -50,28 +48,23 @@ impl<'de, I> Deserialize<'de> for Dim where I: Deserialize<'de> { fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> - { + where D: Deserializer<'de> { I::deserialize(deserializer).map(Dim::new) } } /// **Requires crate feature `"serde"`** -impl Serialize for IxDyn -{ +impl Serialize for IxDyn { fn serialize(&self, serializer: Se) -> Result - where Se: Serializer - { + where Se: Serializer { self.ix().serialize(serializer) } } /// **Requires crate feature `"serde"`** -impl<'de> Deserialize<'de> for IxDyn -{ +impl<'de> Deserialize<'de> for IxDyn { fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> - { + where D: Deserializer<'de> { let v = Vec::::deserialize(deserializer)?; Ok(v.into_dimension()) } @@ -85,8 +78,7 @@ where S: Data, { fn serialize(&self, serializer: Se) -> Result - where Se: Serializer - { + where Se: Serializer { let mut state = serializer.serialize_struct("Array", 3)?; state.serialize_field("v", &ARRAY_FORMAT_VERSION)?; state.serialize_field("dim", &self.raw_dim())?; @@ -104,8 +96,7 @@ where D: Dimension + Serialize, { fn serialize(&self, serializer: S) -> Result - where S: Serializer - { + where S: Serializer { let iter = &self.0; let mut seq = serializer.serialize_seq(Some(iter.len()))?; for elt in iter.clone() { @@ -115,23 +106,19 @@ where } } -struct ArrayVisitor -{ +struct ArrayVisitor { _marker_a: PhantomData, _marker_b: PhantomData, } -enum ArrayField -{ +enum ArrayField { Version, Dim, Data, } -impl ArrayVisitor -{ - pub fn new() -> Self - { +impl ArrayVisitor { + pub fn new() -> Self { ArrayVisitor { _marker_a: PhantomData, _marker_b: PhantomData, @@ -149,31 +136,25 @@ where S: DataOwned, { fn deserialize(deserializer: D) -> Result, D::Error> - where D: Deserializer<'de> - { + where D: Deserializer<'de> { deserializer.deserialize_struct("Array", ARRAY_FIELDS, ArrayVisitor::new()) } } -impl<'de> Deserialize<'de> for ArrayField -{ +impl<'de> Deserialize<'de> for ArrayField { fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> - { + where D: Deserializer<'de> { struct ArrayFieldVisitor; - impl<'de> Visitor<'de> for ArrayFieldVisitor - { + impl<'de> Visitor<'de> for ArrayFieldVisitor { type Value = ArrayField; - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result - { + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str(r#""v", "dim", or "data""#) } fn visit_str(self, value: &str) -> Result - where E: de::Error - { + where E: de::Error { match value { "v" => Ok(ArrayField::Version), "dim" => Ok(ArrayField::Dim), @@ -183,8 +164,7 @@ impl<'de> Deserialize<'de> for ArrayField } fn visit_bytes(self, value: &[u8]) -> Result - where E: de::Error - { + where E: de::Error { match value { b"v" => Ok(ArrayField::Version), b"dim" => Ok(ArrayField::Dim), @@ -206,14 +186,12 @@ where { type Value = ArrayBase; - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result - { + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("ndarray representation") } fn visit_seq(self, mut visitor: V) -> Result, V::Error> - where V: SeqAccess<'de> - { + where V: SeqAccess<'de> { let v: u8 = match visitor.next_element()? { Some(value) => value, None => { @@ -245,8 +223,7 @@ where } fn visit_map(self, mut visitor: V) -> Result, V::Error> - where V: MapAccess<'de> - { + where V: MapAccess<'de> { let mut v: Option = None; let mut data: Option> = None; let mut dim: Option = None; diff --git a/src/arrayformat.rs b/src/arrayformat.rs index 202805604..b71bb4509 100644 --- a/src/arrayformat.rs +++ b/src/arrayformat.rs @@ -29,17 +29,14 @@ const AXIS_2D_OVERFLOW_LIMIT: usize = 22; const ELLIPSIS: &str = "..."; #[derive(Clone, Debug)] -struct FormatOptions -{ +struct FormatOptions { axis_collapse_limit: usize, axis_collapse_limit_next_last: usize, axis_collapse_limit_last: usize, } -impl FormatOptions -{ - pub(crate) fn default_for_array(nelem: usize, no_limit: bool) -> Self - { +impl FormatOptions { + pub(crate) fn default_for_array(nelem: usize, no_limit: bool) -> Self { let default = Self { axis_collapse_limit: AXIS_LIMIT_STACKED, axis_collapse_limit_next_last: AXIS_LIMIT_COL, @@ -48,8 +45,7 @@ impl FormatOptions default.set_no_limit(no_limit || nelem < ARRAY_MANY_ELEMENT_LIMIT) } - fn set_no_limit(mut self, no_limit: bool) -> Self - { + fn set_no_limit(mut self, no_limit: bool) -> Self { if no_limit { self.axis_collapse_limit = std::usize::MAX; self.axis_collapse_limit_next_last = std::usize::MAX; @@ -60,8 +56,7 @@ impl FormatOptions /// Axis length collapse limit before ellipsizing, where `axis_rindex` is /// the index of the axis from the back. - pub(crate) fn collapse_limit(&self, axis_rindex: usize) -> usize - { + pub(crate) fn collapse_limit(&self, axis_rindex: usize) -> usize { match axis_rindex { 0 => self.axis_collapse_limit_last, 1 => self.axis_collapse_limit_next_last, @@ -85,8 +80,7 @@ impl FormatOptions fn format_with_overflow( f: &mut fmt::Formatter<'_>, length: usize, limit: usize, separator: &str, ellipsis: &str, fmt_elem: &mut dyn FnMut(&mut fmt::Formatter, usize) -> fmt::Result, -) -> fmt::Result -{ +) -> fmt::Result { if length == 0 { // no-op } else if length <= limit { @@ -175,8 +169,7 @@ where impl fmt::Display for ArrayBase where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result - { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } @@ -189,8 +182,7 @@ where S: Data impl fmt::Debug for ArrayBase where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result - { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt)?; @@ -217,8 +209,7 @@ where S: Data impl fmt::LowerExp for ArrayBase where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result - { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } @@ -231,8 +222,7 @@ where S: Data impl fmt::UpperExp for ArrayBase where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result - { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } @@ -244,8 +234,7 @@ where S: Data impl fmt::LowerHex for ArrayBase where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result - { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } @@ -258,16 +247,14 @@ where S: Data impl fmt::Binary for ArrayBase where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result - { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } } #[cfg(test)] -mod formatting_with_omit -{ +mod formatting_with_omit { #[cfg(not(feature = "std"))] use alloc::string::String; #[cfg(not(feature = "std"))] @@ -277,8 +264,7 @@ mod formatting_with_omit use super::*; use crate::prelude::*; - fn assert_str_eq(expected: &str, actual: &str) - { + fn assert_str_eq(expected: &str, actual: &str) { // use assert to avoid printing the strings twice on failure assert!( expected == actual, @@ -288,8 +274,7 @@ mod formatting_with_omit ); } - fn ellipsize(limit: usize, sep: &str, elements: impl IntoIterator) -> String - { + fn ellipsize(limit: usize, sep: &str, elements: impl IntoIterator) -> String { let elements = elements.into_iter().collect::>(); let edge = limit / 2; if elements.len() <= limit { @@ -307,8 +292,7 @@ mod formatting_with_omit } #[test] - fn empty_arrays() - { + fn empty_arrays() { let a: Array2 = arr2(&[[], []]); let actual = format!("{}", a); let expected = "[[]]"; @@ -316,8 +300,7 @@ mod formatting_with_omit } #[test] - fn zero_length_axes() - { + fn zero_length_axes() { let a = Array3::::zeros((3, 0, 4)); let actual = format!("{}", a); let expected = "[[[]]]"; @@ -325,8 +308,7 @@ mod formatting_with_omit } #[test] - fn dim_0() - { + fn dim_0() { let element = 12; let a = arr0(element); let actual = format!("{}", a); @@ -335,8 +317,7 @@ mod formatting_with_omit } #[test] - fn dim_1() - { + fn dim_1() { let overflow: usize = 2; let a = Array1::from_elem(ARRAY_MANY_ELEMENT_LIMIT + overflow, 1); let actual = format!("{}", a); @@ -345,8 +326,7 @@ mod formatting_with_omit } #[test] - fn dim_1_alternate() - { + fn dim_1_alternate() { let overflow: usize = 2; let a = Array1::from_elem(ARRAY_MANY_ELEMENT_LIMIT + overflow, 1); let actual = format!("{:#}", a); @@ -355,8 +335,7 @@ mod formatting_with_omit } #[test] - fn dim_2_last_axis_overflow() - { + fn dim_2_last_axis_overflow() { let overflow: usize = 2; let a = Array2::from_elem((AXIS_2D_OVERFLOW_LIMIT, AXIS_2D_OVERFLOW_LIMIT + overflow), 1); let actual = format!("{}", a); @@ -376,8 +355,7 @@ mod formatting_with_omit } #[test] - fn dim_2_non_last_axis_overflow() - { + fn dim_2_non_last_axis_overflow() { let a = Array2::from_elem((ARRAY_MANY_ELEMENT_LIMIT / 10, 10), 1); let actual = format!("{}", a); let row = format!("{}", a.row(0)); @@ -389,8 +367,7 @@ mod formatting_with_omit } #[test] - fn dim_2_non_last_axis_overflow_alternate() - { + fn dim_2_non_last_axis_overflow_alternate() { let a = Array2::from_elem((AXIS_LIMIT_COL * 4, 6), 1); let actual = format!("{:#}", a); let row = format!("{}", a.row(0)); @@ -399,8 +376,7 @@ mod formatting_with_omit } #[test] - fn dim_2_multi_directional_overflow() - { + fn dim_2_multi_directional_overflow() { let overflow: usize = 2; let a = Array2::from_elem((AXIS_2D_OVERFLOW_LIMIT + overflow, AXIS_2D_OVERFLOW_LIMIT + overflow), 1); let actual = format!("{}", a); @@ -413,8 +389,7 @@ mod formatting_with_omit } #[test] - fn dim_2_multi_directional_overflow_alternate() - { + fn dim_2_multi_directional_overflow_alternate() { let overflow: usize = 2; let a = Array2::from_elem((AXIS_2D_OVERFLOW_LIMIT + overflow, AXIS_2D_OVERFLOW_LIMIT + overflow), 1); let actual = format!("{:#}", a); @@ -424,8 +399,7 @@ mod formatting_with_omit } #[test] - fn dim_3_overflow_most() - { + fn dim_3_overflow_most() { let a = Array3::from_shape_fn((AXIS_LIMIT_STACKED + 1, AXIS_LIMIT_COL, AXIS_LIMIT_ROW + 1), |(i, j, k)| { 1000. + (100. * ((i as f64).sqrt() + (j as f64).sin() + k as f64)).round() / 100. }); @@ -508,8 +482,7 @@ mod formatting_with_omit } #[test] - fn dim_4_overflow_outer() - { + fn dim_4_overflow_outer() { let a = Array4::from_shape_fn((10, 10, 3, 3), |(i, j, k, l)| i + j + k + l); let actual = format!("{:2}", a); // Generated using NumPy with: diff --git a/src/arraytraits.rs b/src/arraytraits.rs index ea0b380ed..00250aada 100644 --- a/src/arraytraits.rs +++ b/src/arraytraits.rs @@ -27,8 +27,7 @@ use crate::{ #[cold] #[inline(never)] -pub(crate) fn array_out_of_bounds() -> ! -{ +pub(crate) fn array_out_of_bounds() -> ! { panic!("ndarray: index out of bounds"); } @@ -53,8 +52,7 @@ where { type Output = S::Elem; #[inline] - fn index(&self, index: I) -> &S::Elem - { + fn index(&self, index: I) -> &S::Elem { debug_bounds_check!(self, index); unsafe { &*self.ptr.as_ptr().offset( @@ -76,8 +74,7 @@ where S: DataMut, { #[inline] - fn index_mut(&mut self, index: I) -> &mut S::Elem - { + fn index_mut(&mut self, index: I) -> &mut S::Elem { debug_bounds_check!(self, index); unsafe { &mut *self.as_mut_ptr().offset( @@ -98,8 +95,7 @@ where S2: Data, D: Dimension, { - fn eq(&self, rhs: &ArrayBase) -> bool - { + fn eq(&self, rhs: &ArrayBase) -> bool { if self.shape() != rhs.shape() { return false; } @@ -131,8 +127,7 @@ where S2: Data, D: Dimension, { - fn eq(&self, rhs: &&ArrayBase) -> bool - { + fn eq(&self, rhs: &&ArrayBase) -> bool { *self == **rhs } } @@ -147,8 +142,7 @@ where S2: Data, D: Dimension, { - fn eq(&self, rhs: &ArrayBase) -> bool - { + fn eq(&self, rhs: &ArrayBase) -> bool { **self == *rhs } } @@ -167,8 +161,7 @@ where S: DataOwned /// Create a one-dimensional array from a boxed slice (no copying needed). /// /// **Panics** if the length is greater than `isize::MAX`. - fn from(b: Box<[A]>) -> Self - { + fn from(b: Box<[A]>) -> Self { Self::from_vec(b.into_vec()) } } @@ -185,8 +178,7 @@ where S: DataOwned /// /// let array = Array::from(vec![1., 2., 3., 4.]); /// ``` - fn from(v: Vec) -> Self - { + fn from(v: Vec) -> Self { Self::from_vec(v) } } @@ -206,8 +198,7 @@ where S: DataOwned /// assert!(array == arr1(&[0, 1, 4, 9, 16])) /// ``` fn from_iter(iterable: I) -> ArrayBase - where I: IntoIterator - { + where I: IntoIterator { Self::from_iter(iterable) } } @@ -220,8 +211,7 @@ where type Item = &'a S::Elem; type IntoIter = Iter<'a, S::Elem, D>; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { self.iter() } } @@ -234,8 +224,7 @@ where type Item = &'a mut S::Elem; type IntoIter = IterMut<'a, S::Elem, D>; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } @@ -246,8 +235,7 @@ where D: Dimension type Item = &'a A; type IntoIter = Iter<'a, A, D>; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { self.into_iter_() } } @@ -258,8 +246,7 @@ where D: Dimension type Item = &'a mut A; type IntoIter = IterMut<'a, A, D>; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { self.into_iter_() } } @@ -271,8 +258,7 @@ where S::Elem: hash::Hash, { // Note: elements are hashed in the logical order - fn hash(&self, state: &mut H) - { + fn hash(&self, state: &mut H) { self.shape().hash(state); if let Some(self_s) = self.as_slice() { hash::Hash::hash_slice(self_s, state); @@ -327,8 +313,7 @@ where Slice: AsRef<[A]> /// Create a one-dimensional read-only array view of the data in `slice`. /// /// **Panics** if the slice length is greater than `isize::MAX`. - fn from(slice: &'a Slice) -> Self - { + fn from(slice: &'a Slice) -> Self { aview1(slice.as_ref()) } } @@ -338,11 +323,9 @@ where Slice: AsRef<[A]> /// **Panics** if the product of non-zero axis lengths overflows `isize` (This can only occur if A /// is zero-sized because slices cannot contain more than `isize::MAX` number of bytes). /// **Panics** if N == 0 and the number of rows is greater than isize::MAX. -impl<'a, A, const M: usize, const N: usize> From<&'a [[A; N]; M]> for ArrayView<'a, A, Ix2> -{ +impl<'a, A, const M: usize, const N: usize> From<&'a [[A; N]; M]> for ArrayView<'a, A, Ix2> { /// Create a two-dimensional read-only array view of the data in `slice` - fn from(xs: &'a [[A; N]; M]) -> Self - { + fn from(xs: &'a [[A; N]; M]) -> Self { Self::from(&xs[..]) } } @@ -352,11 +335,9 @@ impl<'a, A, const M: usize, const N: usize> From<&'a [[A; N]; M]> for ArrayView< /// **Panics** if the product of non-zero axis lengths overflows `isize`. (This /// can only occur if A is zero-sized or if `N` is zero, because slices cannot /// contain more than `isize::MAX` number of bytes.) -impl<'a, A, const N: usize> From<&'a [[A; N]]> for ArrayView<'a, A, Ix2> -{ +impl<'a, A, const N: usize> From<&'a [[A; N]]> for ArrayView<'a, A, Ix2> { /// Create a two-dimensional read-only array view of the data in `slice` - fn from(xs: &'a [[A; N]]) -> Self - { + fn from(xs: &'a [[A; N]]) -> Self { aview2(xs) } } @@ -368,8 +349,7 @@ where D: Dimension, { /// Create a read-only array view of the array. - fn from(array: &'a ArrayBase) -> Self - { + fn from(array: &'a ArrayBase) -> Self { array.view() } } @@ -381,8 +361,7 @@ where Slice: AsMut<[A]> /// Create a one-dimensional read-write array view of the data in `slice`. /// /// **Panics** if the slice length is greater than `isize::MAX`. - fn from(slice: &'a mut Slice) -> Self - { + fn from(slice: &'a mut Slice) -> Self { let xs = slice.as_mut(); if mem::size_of::() == 0 { assert!( @@ -399,11 +378,9 @@ where Slice: AsMut<[A]> /// **Panics** if the product of non-zero axis lengths overflows `isize` (This can only occur if A /// is zero-sized because slices cannot contain more than `isize::MAX` number of bytes). /// **Panics** if N == 0 and the number of rows is greater than isize::MAX. -impl<'a, A, const M: usize, const N: usize> From<&'a mut [[A; N]; M]> for ArrayViewMut<'a, A, Ix2> -{ +impl<'a, A, const M: usize, const N: usize> From<&'a mut [[A; N]; M]> for ArrayViewMut<'a, A, Ix2> { /// Create a two-dimensional read-write array view of the data in `slice` - fn from(xs: &'a mut [[A; N]; M]) -> Self - { + fn from(xs: &'a mut [[A; N]; M]) -> Self { Self::from(&mut xs[..]) } } @@ -413,11 +390,9 @@ impl<'a, A, const M: usize, const N: usize> From<&'a mut [[A; N]; M]> for ArrayV /// **Panics** if the product of non-zero axis lengths overflows `isize`. (This /// can only occur if `A` is zero-sized or if `N` is zero, because slices /// cannot contain more than `isize::MAX` number of bytes.) -impl<'a, A, const N: usize> From<&'a mut [[A; N]]> for ArrayViewMut<'a, A, Ix2> -{ +impl<'a, A, const N: usize> From<&'a mut [[A; N]]> for ArrayViewMut<'a, A, Ix2> { /// Create a two-dimensional read-write array view of the data in `slice` - fn from(xs: &'a mut [[A; N]]) -> Self - { + fn from(xs: &'a mut [[A; N]]) -> Self { let cols = N; let rows = xs.len(); let dim = Ix2(rows, cols); @@ -446,8 +421,7 @@ where D: Dimension, { /// Create a read-write array view of the array. - fn from(array: &'a mut ArrayBase) -> Self - { + fn from(array: &'a mut ArrayBase) -> Self { array.view_mut() } } @@ -455,8 +429,7 @@ where impl From> for ArcArray where D: Dimension { - fn from(arr: Array) -> ArcArray - { + fn from(arr: Array) -> ArcArray { arr.into_shared() } } @@ -513,8 +486,7 @@ where { // NOTE: We can implement Default for non-zero dimensional array views by // using an empty slice, however we need a trait for nonzero Dimension. - fn default() -> Self - { + fn default() -> Self { ArrayBase::default(D::default()) } } diff --git a/src/data_repr.rs b/src/data_repr.rs index c64cbcfcf..1d0d8d2bd 100644 --- a/src/data_repr.rs +++ b/src/data_repr.rs @@ -1,15 +1,27 @@ +#![warn(clippy::pedantic, clippy::nursery)] + use crate::extension::nonnull; #[cfg(not(feature = "std"))] use alloc::borrow::ToOwned; use alloc::slice; #[cfg(not(feature = "std"))] use alloc::vec::Vec; +use std::ffi::c_void; use std::mem; use std::mem::ManuallyDrop; use std::ptr::NonNull; use rawpointer::PointerExt; +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum Device { + Host, + #[cfg(feature = "opencl")] + OpenCL, + #[cfg(feature = "cuda")] + CUDA, +} + /// Array's representation. /// /// *Don’t use this type directly—use the type alias @@ -20,68 +32,214 @@ use rawpointer::PointerExt; // transmutable A -> B. #[derive(Debug)] #[repr(C)] -pub struct OwnedRepr -{ +pub struct OwnedRepr { ptr: NonNull, len: usize, capacity: usize, + device: Device, } -impl OwnedRepr -{ - pub(crate) fn from(v: Vec) -> Self - { +impl OwnedRepr { + pub(crate) fn from(v: Vec) -> Self { let mut v = ManuallyDrop::new(v); let len = v.len(); let capacity = v.capacity(); let ptr = nonnull::nonnull_from_vec_data(&mut v); - Self { ptr, len, capacity } + let device = Device::Host; + Self { + ptr, + len, + capacity, + device, + } + } + + /// Move this storage object to a specified device. + pub(crate) fn copy_to_device(self, device: Device) -> Option { + // println!("Copying to {device:?}"); + // let mut self_ = ManuallyDrop::new(self); + // self_.device = device; + + let len = self.len; + let capacity = self.capacity; + + match (self.device, device) { + (Device::Host, Device::Host) => { + todo!() + } + + #[cfg(feature = "opencl")] + (Device::Host, Device::OpenCL) => { + let bytes = std::mem::size_of::() * capacity; + + unsafe { + if let Ok(buffer) = + hasty_::opencl::opencl_allocate(bytes, hasty_::opencl::OpenCLMemoryType::ReadWrite) + { + // println!("Allocated OpenCL Buffer"); + if let Ok(_) = + hasty_::opencl::opencl_write(buffer, self.ptr.as_ptr() as *const std::ffi::c_void, bytes) + { + // println!("Wrote to OpenCL Buffer"); + + Some(Self { + ptr: NonNull::new(buffer as *mut A)?, + len, + capacity, + device, + }) + } else { + // println!("Failed to write to OpenCL Buffer"); + None + } + } else { + // println!("Failed to allocate OpenCL Buffer"); + None + } + } + } + + #[cfg(feature = "opencl")] + (Device::OpenCL, Device::Host) => { + let bytes = std::mem::size_of::() * capacity; + + unsafe { + let mut data = ManuallyDrop::new(Vec::::with_capacity(self.capacity)); + data.set_len(self.len); + if let Ok(_) = hasty_::opencl::opencl_read( + data.as_mut_ptr() as *mut std::ffi::c_void, + self.ptr.as_ptr() as *mut c_void, + bytes, + ) { + Some(Self { + ptr: nonnull::nonnull_from_vec_data(&mut data), + len, + capacity, + device, + }) + } else { + None + } + } + } + + #[cfg(feature = "opencl")] + (Device::OpenCL, Device::OpenCL) => { + todo!(); + } + + _ => { + panic!("Not Implemented") + } + } } - pub(crate) fn into_vec(self) -> Vec - { + /// Drop the object and free the memory + pub(crate) unsafe fn drop_impl(&mut self) -> Vec { + let capacity = self.capacity; + let len = self.len; + self.len = 0; + self.capacity = 0; + let ptr = self.ptr.as_ptr(); + + match self.device { + Device::Host => unsafe { + // println!("Dropping Host pointer"); + Vec::from_raw_parts(ptr, len, capacity) + }, + + #[cfg(feature = "opencl")] + Device::OpenCL => { + // Free `ptr` + // println!("Freeing OpenCL pointer"); + + hasty_::opencl::opencl_free(ptr as *mut c_void); + + // Should be optimised out, since nothing is allocated + Vec::new() + } + + #[cfg(feature = "cuda")] + Device::CUDA => { + // Free `ptr` + println!("Freeing CUDA pointer"); + Vec::new() + } + } + } + + /// Convert `self` into a [Vec]. + /// + /// # Panics + /// Will panic if the underlying memory is not allocated on + /// the host device. + pub(crate) fn into_vec(self) -> Vec { + // Creating a Vec requires the data to be on the host device + assert_eq!(self.device, Device::Host); ManuallyDrop::new(self).take_as_vec() } - pub(crate) fn as_slice(&self) -> &[A] - { + /// Get a slice representation of `self`. + /// + /// # Panics + /// Will panic if the underlying memory is not allocated + /// on the host device. + pub(crate) fn as_slice(&self) -> &[A] { + // Cannot create a slice of a device pointer + assert_eq!(self.device, Device::Host); unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } - pub(crate) fn len(&self) -> usize - { + pub(crate) fn len(&self) -> usize { self.len } - pub(crate) fn as_ptr(&self) -> *const A - { + /// Extract the raw underlying pointer from this object. + /// + /// ## Safety + /// The pointer **may not necessarily point to the host**. + /// Using a non-host pointer on the host will almost certainly + /// cause a segmentation-fault. + pub(crate) fn as_ptr(&self) -> *const A { self.ptr.as_ptr() } - pub(crate) fn as_ptr_mut(&self) -> *mut A - { + /// Extract the raw underlying pointer from this object as mut + /// + /// ## Safety + /// The pointer **may not necessarily point to the host**. + /// Using a non-host pointer on the host will almost certainly + /// cause a segmentation-fault. + pub(crate) fn as_ptr_mut(&self) -> *mut A { self.ptr.as_ptr() } - pub(crate) fn as_nonnull_mut(&mut self) -> NonNull - { + /// Return underlying [NonNull] ptr. + /// + /// ## Safety + /// The pointer **may not necessarily point to the host**. + /// Using a non-host pointer on the host will almost certainly + /// cause a segmentation-fault. + pub(crate) fn as_nonnull_mut(&mut self) -> NonNull { self.ptr } /// Return end pointer - pub(crate) fn as_end_nonnull(&self) -> NonNull - { + /// + /// ## Safety + /// The pointer **may not necessarily point to the host**. + /// Using a non-host pointer on the host will almost certainly + /// cause a segmentation-fault. + pub(crate) fn as_end_nonnull(&self) -> NonNull { unsafe { self.ptr.add(self.len) } } /// Reserve `additional` elements; return the new pointer /// /// ## Safety - /// /// Note that existing pointers into the data are invalidated #[must_use = "must use new pointer to update existing pointers"] - pub(crate) fn reserve(&mut self, additional: usize) -> NonNull - { + pub(crate) fn reserve(&mut self, additional: usize) -> NonNull { self.modify_as_vec(|mut v| { v.reserve(additional); v @@ -92,17 +250,17 @@ impl OwnedRepr /// Set the valid length of the data /// /// ## Safety - /// /// The first `new_len` elements of the data should be valid. - pub(crate) unsafe fn set_len(&mut self, new_len: usize) - { + pub(crate) unsafe fn set_len(&mut self, new_len: usize) { debug_assert!(new_len <= self.capacity); self.len = new_len; } - /// Return the length (number of elements in total) - pub(crate) fn release_all_elements(&mut self) -> usize - { + /// Return the length (number of elements in total) and set + /// the internal length to zero. + /// + /// todo: Is this valid/safe? Mark as unsafe? + pub(crate) fn release_all_elements(&mut self) -> usize { let ret = self.len; self.len = 0; ret @@ -111,11 +269,9 @@ impl OwnedRepr /// Cast self into equivalent repr of other element type /// /// ## Safety - /// /// Caller must ensure the two types have the same representation. /// **Panics** if sizes don't match (which is not a sufficient check). - pub(crate) unsafe fn data_subst(self) -> OwnedRepr - { + pub(crate) unsafe fn data_subst(self) -> OwnedRepr { // necessary but not sufficient check assert_eq!(mem::size_of::(), mem::size_of::()); let self_ = ManuallyDrop::new(self); @@ -123,17 +279,23 @@ impl OwnedRepr ptr: self_.ptr.cast::(), len: self_.len, capacity: self_.capacity, + device: self_.device, } } - fn modify_as_vec(&mut self, f: impl FnOnce(Vec) -> Vec) - { + /// Apply a `f(Vec) -> Vec` to this storage object and update `self`. + fn modify_as_vec(&mut self, f: impl FnOnce(Vec) -> Vec) { let v = self.take_as_vec(); *self = Self::from(f(v)); } - fn take_as_vec(&mut self) -> Vec - { + /// Take `self` as a `Vec` object. This invalidates `self`. + /// + /// # Panics + /// Will panic if the underlying memory is not allocated + /// on the host device. + fn take_as_vec(&mut self) -> Vec { + assert_eq!(self.device, Device::Host); let capacity = self.capacity; let len = self.len; self.len = 0; @@ -145,30 +307,78 @@ impl OwnedRepr impl Clone for OwnedRepr where A: Clone { - fn clone(&self) -> Self - { - Self::from(self.as_slice().to_owned()) + fn clone(&self) -> Self { + match self.device { + Device::Host => Self::from(self.as_slice().to_owned()), + + #[cfg(feature = "opencl")] + Device::OpenCL => { + println!("Performing OpenCL Clone"); + // todo: OpenCL clone + Self::from(self.as_slice().to_owned()) + } + + #[cfg(feature = "cuda")] + Device::CUDA => { + println!("Performing CUDA Clone"); + // todo: CUDA clone + Self::from(self.as_slice().to_owned()) + } + } } - fn clone_from(&mut self, other: &Self) - { - let mut v = self.take_as_vec(); - let other = other.as_slice(); + fn clone_from(&mut self, other: &Self) { + match self.device { + Device::Host => { + let mut v = self.take_as_vec(); + let other = other.as_slice(); - if v.len() > other.len() { - v.truncate(other.len()); + if v.len() > other.len() { + v.truncate(other.len()); + } + let (front, back) = other.split_at(v.len()); + v.clone_from_slice(front); + v.extend_from_slice(back); + *self = Self::from(v); + } + + #[cfg(feature = "opencl")] + Device::OpenCL => { + println!("Performing OpenCL Clone From"); + // todo: OpenCL clone from + let mut v = self.take_as_vec(); + let other = other.as_slice(); + + if v.len() > other.len() { + v.truncate(other.len()); + } + let (front, back) = other.split_at(v.len()); + v.clone_from_slice(front); + v.extend_from_slice(back); + *self = Self::from(v); + } + + #[cfg(feature = "cuda")] + Device::CUDA => { + println!("Performing CUDA Clone From"); + // todo: CUDA clone from + let mut v = self.take_as_vec(); + let other = other.as_slice(); + + if v.len() > other.len() { + v.truncate(other.len()); + } + let (front, back) = other.split_at(v.len()); + v.clone_from_slice(front); + v.extend_from_slice(back); + *self = Self::from(v); + } } - let (front, back) = other.split_at(v.len()); - v.clone_from_slice(front); - v.extend_from_slice(back); - *self = Self::from(v); } } -impl Drop for OwnedRepr -{ - fn drop(&mut self) - { +impl Drop for OwnedRepr { + fn drop(&mut self) { if self.capacity > 0 { // correct because: If the elements don't need dropping, an // empty Vec is ok. Only the Vec's allocation needs dropping. @@ -183,7 +393,7 @@ impl Drop for OwnedRepr self.len = 0; } // drop as a Vec. - self.take_as_vec(); + unsafe { self.drop_impl() }; } } } diff --git a/src/data_traits.rs b/src/data_traits.rs index a2784b8d3..42b01ed0e 100644 --- a/src/data_traits.rs +++ b/src/data_traits.rs @@ -29,8 +29,7 @@ use crate::{ArcArray, Array, ArrayBase, CowRepr, Dimension, OwnedArcRepr, OwnedR /// Traits in Rust can serve many different roles. This trait is public because /// it is used as a bound on public methods. #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait RawData: Sized -{ +pub unsafe trait RawData: Sized { /// The array element type. type Elem; @@ -51,8 +50,7 @@ pub unsafe trait RawData: Sized /// /// ***Internal trait, see `RawData`.*** #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait RawDataMut: RawData -{ +pub unsafe trait RawDataMut: RawData { /// If possible, ensures that the array has unique access to its data. /// /// The implementer must ensure that if the input is contiguous, then the @@ -80,15 +78,13 @@ pub unsafe trait RawDataMut: RawData /// /// ***Internal trait, see `RawData`.*** #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait RawDataClone: RawData -{ +pub unsafe trait RawDataClone: RawData { #[doc(hidden)] /// Unsafe because, `ptr` must point inside the current storage. unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull); #[doc(hidden)] - unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull - { + unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull { let (data, ptr) = other.clone_with_ptr(ptr); *self = data; ptr @@ -101,8 +97,7 @@ pub unsafe trait RawDataClone: RawData /// /// ***Internal trait, see `RawData`.*** #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait Data: RawData -{ +pub unsafe trait Data: RawData { /// Converts the array to a uniquely owned array, cloning elements if necessary. #[doc(hidden)] #[allow(clippy::wrong_self_convention)] @@ -144,8 +139,7 @@ pub unsafe trait Data: RawData // the data is unique. You are also guaranteeing that `try_is_unique` always // returns `Some(_)`. #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait DataMut: Data + RawDataMut -{ +pub unsafe trait DataMut: Data + RawDataMut { /// Ensures that the array has unique access to its data. #[doc(hidden)] #[inline] @@ -161,60 +155,50 @@ pub unsafe trait DataMut: Data + RawDataMut #[doc(hidden)] #[inline] #[allow(clippy::wrong_self_convention)] // mut needed for Arc types - fn is_unique(&mut self) -> bool - { + fn is_unique(&mut self) -> bool { self.try_is_unique().unwrap() } } -unsafe impl RawData for RawViewRepr<*const A> -{ +unsafe impl RawData for RawViewRepr<*const A> { type Elem = A; #[inline] - fn _data_slice(&self) -> Option<&[A]> - { + fn _data_slice(&self) -> Option<&[A]> { None } #[inline(always)] - fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool - { + fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool { true } private_impl! {} } -unsafe impl RawDataClone for RawViewRepr<*const A> -{ - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) - { +unsafe impl RawDataClone for RawViewRepr<*const A> { + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { (*self, ptr) } } -unsafe impl RawData for RawViewRepr<*mut A> -{ +unsafe impl RawData for RawViewRepr<*mut A> { type Elem = A; #[inline] - fn _data_slice(&self) -> Option<&[A]> - { + fn _data_slice(&self) -> Option<&[A]> { None } #[inline(always)] - fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool - { + fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool { true } private_impl! {} } -unsafe impl RawDataMut for RawViewRepr<*mut A> -{ +unsafe impl RawDataMut for RawViewRepr<*mut A> { #[inline] fn try_ensure_unique(_: &mut ArrayBase) where @@ -224,30 +208,24 @@ unsafe impl RawDataMut for RawViewRepr<*mut A> } #[inline] - fn try_is_unique(&mut self) -> Option - { + fn try_is_unique(&mut self) -> Option { None } } -unsafe impl RawDataClone for RawViewRepr<*mut A> -{ - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) - { +unsafe impl RawDataClone for RawViewRepr<*mut A> { + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { (*self, ptr) } } -unsafe impl RawData for OwnedArcRepr -{ +unsafe impl RawData for OwnedArcRepr { type Elem = A; - fn _data_slice(&self) -> Option<&[A]> - { + fn _data_slice(&self) -> Option<&[A]> { Some(self.0.as_slice()) } - fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool - { + fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool { self.0._is_pointer_inbounds(self_ptr) } @@ -285,14 +263,12 @@ where A: Clone } } - fn try_is_unique(&mut self) -> Option - { + fn try_is_unique(&mut self) -> Option { Some(Arc::get_mut(&mut self.0).is_some()) } } -unsafe impl Data for OwnedArcRepr -{ +unsafe impl Data for OwnedArcRepr { fn into_owned(mut self_: ArrayBase) -> Array where A: Clone, @@ -305,8 +281,7 @@ unsafe impl Data for OwnedArcRepr } fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> - where D: Dimension - { + where D: Dimension { match Arc::try_unwrap(self_.data.0) { Ok(owned_data) => unsafe { // Safe because the data is equivalent. @@ -334,26 +309,21 @@ unsafe impl Data for OwnedArcRepr unsafe impl DataMut for OwnedArcRepr where A: Clone {} -unsafe impl RawDataClone for OwnedArcRepr -{ - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) - { +unsafe impl RawDataClone for OwnedArcRepr { + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { // pointer is preserved (self.clone(), ptr) } } -unsafe impl RawData for OwnedRepr -{ +unsafe impl RawData for OwnedRepr { type Elem = A; - fn _data_slice(&self) -> Option<&[A]> - { + fn _data_slice(&self) -> Option<&[A]> { Some(self.as_slice()) } - fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool - { + fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool { let slc = self.as_slice(); let ptr = slc.as_ptr() as *mut A; let end = unsafe { ptr.add(slc.len()) }; @@ -363,8 +333,7 @@ unsafe impl RawData for OwnedRepr private_impl! {} } -unsafe impl RawDataMut for OwnedRepr -{ +unsafe impl RawDataMut for OwnedRepr { #[inline] fn try_ensure_unique(_: &mut ArrayBase) where @@ -374,14 +343,12 @@ unsafe impl RawDataMut for OwnedRepr } #[inline] - fn try_is_unique(&mut self) -> Option - { + fn try_is_unique(&mut self) -> Option { Some(true) } } -unsafe impl Data for OwnedRepr -{ +unsafe impl Data for OwnedRepr { #[inline] fn into_owned(self_: ArrayBase) -> Array where @@ -393,8 +360,7 @@ unsafe impl Data for OwnedRepr #[inline] fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> - where D: Dimension - { + where D: Dimension { Ok(self_) } } @@ -404,8 +370,7 @@ unsafe impl DataMut for OwnedRepr {} unsafe impl RawDataClone for OwnedRepr where A: Clone { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) - { + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { let mut u = self.clone(); let mut new_ptr = u.as_nonnull_mut(); if size_of::() != 0 { @@ -415,8 +380,7 @@ where A: Clone (u, new_ptr) } - unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull - { + unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull { let our_off = if size_of::() != 0 { (ptr.as_ptr() as isize - other.as_ptr() as isize) / mem::size_of::() as isize } else { @@ -427,27 +391,23 @@ where A: Clone } } -unsafe impl<'a, A> RawData for ViewRepr<&'a A> -{ +unsafe impl<'a, A> RawData for ViewRepr<&'a A> { type Elem = A; #[inline] - fn _data_slice(&self) -> Option<&[A]> - { + fn _data_slice(&self) -> Option<&[A]> { None } #[inline(always)] - fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool - { + fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool { true } private_impl! {} } -unsafe impl<'a, A> Data for ViewRepr<&'a A> -{ +unsafe impl<'a, A> Data for ViewRepr<&'a A> { fn into_owned(self_: ArrayBase) -> Array where Self::Elem: Clone, @@ -457,41 +417,34 @@ unsafe impl<'a, A> Data for ViewRepr<&'a A> } fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> - where D: Dimension - { + where D: Dimension { Err(self_) } } -unsafe impl<'a, A> RawDataClone for ViewRepr<&'a A> -{ - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) - { +unsafe impl<'a, A> RawDataClone for ViewRepr<&'a A> { + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { (*self, ptr) } } -unsafe impl<'a, A> RawData for ViewRepr<&'a mut A> -{ +unsafe impl<'a, A> RawData for ViewRepr<&'a mut A> { type Elem = A; #[inline] - fn _data_slice(&self) -> Option<&[A]> - { + fn _data_slice(&self) -> Option<&[A]> { None } #[inline(always)] - fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool - { + fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool { true } private_impl! {} } -unsafe impl<'a, A> RawDataMut for ViewRepr<&'a mut A> -{ +unsafe impl<'a, A> RawDataMut for ViewRepr<&'a mut A> { #[inline] fn try_ensure_unique(_: &mut ArrayBase) where @@ -501,14 +454,12 @@ unsafe impl<'a, A> RawDataMut for ViewRepr<&'a mut A> } #[inline] - fn try_is_unique(&mut self) -> Option - { + fn try_is_unique(&mut self) -> Option { Some(true) } } -unsafe impl<'a, A> Data for ViewRepr<&'a mut A> -{ +unsafe impl<'a, A> Data for ViewRepr<&'a mut A> { fn into_owned(self_: ArrayBase) -> Array where Self::Elem: Clone, @@ -518,8 +469,7 @@ unsafe impl<'a, A> Data for ViewRepr<&'a mut A> } fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> - where D: Dimension - { + where D: Dimension { Err(self_) } } @@ -539,8 +489,7 @@ unsafe impl<'a, A> DataMut for ViewRepr<&'a mut A> {} // unsharing storage before mutating it. The initially allocated storage must be mutable so // that it can be mutated directly - through .raw_view_mut_unchecked() - for initialization. #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait DataOwned: Data -{ +pub unsafe trait DataOwned: Data { /// Corresponding owned data with MaybeUninit elements type MaybeUninit: DataOwned> + RawDataSubst; #[doc(hidden)] @@ -563,42 +512,34 @@ pub unsafe trait DataShared: Clone + Data + RawDataClone {} unsafe impl DataShared for OwnedArcRepr {} unsafe impl<'a, A> DataShared for ViewRepr<&'a A> {} -unsafe impl DataOwned for OwnedRepr -{ +unsafe impl DataOwned for OwnedRepr { type MaybeUninit = OwnedRepr>; - fn new(elements: Vec) -> Self - { + fn new(elements: Vec) -> Self { OwnedRepr::from(elements) } - fn into_shared(self) -> OwnedArcRepr - { + fn into_shared(self) -> OwnedArcRepr { OwnedArcRepr(Arc::new(self)) } } -unsafe impl DataOwned for OwnedArcRepr -{ +unsafe impl DataOwned for OwnedArcRepr { type MaybeUninit = OwnedArcRepr>; - fn new(elements: Vec) -> Self - { + fn new(elements: Vec) -> Self { OwnedArcRepr(Arc::new(OwnedRepr::from(elements))) } - fn into_shared(self) -> OwnedArcRepr - { + fn into_shared(self) -> OwnedArcRepr { self } } -unsafe impl<'a, A> RawData for CowRepr<'a, A> -{ +unsafe impl<'a, A> RawData for CowRepr<'a, A> { type Elem = A; - fn _data_slice(&self) -> Option<&[A]> - { + fn _data_slice(&self) -> Option<&[A]> { #[allow(deprecated)] match self { CowRepr::View(view) => view._data_slice(), @@ -607,8 +548,7 @@ unsafe impl<'a, A> RawData for CowRepr<'a, A> } #[inline] - fn _is_pointer_inbounds(&self, ptr: *const Self::Elem) -> bool - { + fn _is_pointer_inbounds(&self, ptr: *const Self::Elem) -> bool { match self { CowRepr::View(view) => view._is_pointer_inbounds(ptr), CowRepr::Owned(data) => data._is_pointer_inbounds(ptr), @@ -640,8 +580,7 @@ where A: Clone } #[inline] - fn try_is_unique(&mut self) -> Option - { + fn try_is_unique(&mut self) -> Option { Some(self.is_owned()) } } @@ -649,8 +588,7 @@ where A: Clone unsafe impl<'a, A> RawDataClone for CowRepr<'a, A> where A: Clone { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) - { + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { match self { CowRepr::View(view) => { let (new_view, ptr) = view.clone_with_ptr(ptr); @@ -663,8 +601,7 @@ where A: Clone } } - unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull - { + unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull { match (&mut *self, other) { (CowRepr::View(self_), CowRepr::View(other)) => self_.clone_from_with_ptr(other, ptr), (CowRepr::Owned(self_), CowRepr::Owned(other)) => self_.clone_from_with_ptr(other, ptr), @@ -682,8 +619,7 @@ where A: Clone } } -unsafe impl<'a, A> Data for CowRepr<'a, A> -{ +unsafe impl<'a, A> Data for CowRepr<'a, A> { #[inline] fn into_owned(self_: ArrayBase, D>) -> Array where @@ -700,8 +636,7 @@ unsafe impl<'a, A> Data for CowRepr<'a, A> } fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> - where D: Dimension - { + where D: Dimension { match self_.data { CowRepr::View(_) => Err(self_), CowRepr::Owned(data) => unsafe { @@ -720,8 +655,7 @@ unsafe impl<'a, A> DataMut for CowRepr<'a, A> where A: Clone {} /// keeping the same kind of storage. /// /// For example, `RawDataSubst` can map the type `OwnedRepr` to `OwnedRepr`. -pub trait RawDataSubst: RawData -{ +pub trait RawDataSubst: RawData { /// The resulting array storage of the same kind but substituted element type type Output: RawData; @@ -734,72 +668,58 @@ pub trait RawDataSubst: RawData unsafe fn data_subst(self) -> Self::Output; } -impl RawDataSubst for OwnedRepr -{ +impl RawDataSubst for OwnedRepr { type Output = OwnedRepr; - unsafe fn data_subst(self) -> Self::Output - { + unsafe fn data_subst(self) -> Self::Output { self.data_subst() } } -impl RawDataSubst for OwnedArcRepr -{ +impl RawDataSubst for OwnedArcRepr { type Output = OwnedArcRepr; - unsafe fn data_subst(self) -> Self::Output - { + unsafe fn data_subst(self) -> Self::Output { OwnedArcRepr(Arc::from_raw(Arc::into_raw(self.0) as *const OwnedRepr)) } } -impl RawDataSubst for RawViewRepr<*const A> -{ +impl RawDataSubst for RawViewRepr<*const A> { type Output = RawViewRepr<*const B>; - unsafe fn data_subst(self) -> Self::Output - { + unsafe fn data_subst(self) -> Self::Output { RawViewRepr::new() } } -impl RawDataSubst for RawViewRepr<*mut A> -{ +impl RawDataSubst for RawViewRepr<*mut A> { type Output = RawViewRepr<*mut B>; - unsafe fn data_subst(self) -> Self::Output - { + unsafe fn data_subst(self) -> Self::Output { RawViewRepr::new() } } -impl<'a, A: 'a, B: 'a> RawDataSubst for ViewRepr<&'a A> -{ +impl<'a, A: 'a, B: 'a> RawDataSubst for ViewRepr<&'a A> { type Output = ViewRepr<&'a B>; - unsafe fn data_subst(self) -> Self::Output - { + unsafe fn data_subst(self) -> Self::Output { ViewRepr::new() } } -impl<'a, A: 'a, B: 'a> RawDataSubst for ViewRepr<&'a mut A> -{ +impl<'a, A: 'a, B: 'a> RawDataSubst for ViewRepr<&'a mut A> { type Output = ViewRepr<&'a mut B>; - unsafe fn data_subst(self) -> Self::Output - { + unsafe fn data_subst(self) -> Self::Output { ViewRepr::new() } } -impl<'a, A: 'a, B: 'a> RawDataSubst for CowRepr<'a, A> -{ +impl<'a, A: 'a, B: 'a> RawDataSubst for CowRepr<'a, A> { type Output = CowRepr<'a, B>; - unsafe fn data_subst(self) -> Self::Output - { + unsafe fn data_subst(self) -> Self::Output { match self { CowRepr::View(view) => CowRepr::View(view.data_subst()), CowRepr::Owned(owned) => CowRepr::Owned(owned.data_subst()), diff --git a/src/dimension/axes.rs b/src/dimension/axes.rs index 925b257a7..f83a38e34 100644 --- a/src/dimension/axes.rs +++ b/src/dimension/axes.rs @@ -2,8 +2,7 @@ use crate::{Axis, Dimension, Ix, Ixs}; /// Create a new Axes iterator pub(crate) fn axes_of<'a, D>(d: &'a D, strides: &'a D) -> Axes<'a, D> -where D: Dimension -{ +where D: Dimension { Axes { dim: d, strides, @@ -37,8 +36,7 @@ where D: Dimension /// assert_eq!(largest_axis.len, 5); /// ``` #[derive(Debug)] -pub struct Axes<'a, D> -{ +pub struct Axes<'a, D> { dim: &'a D, strides: &'a D, start: usize, @@ -47,8 +45,7 @@ pub struct Axes<'a, D> /// Description of the axis, its length and its stride. #[derive(Debug)] -pub struct AxisDescription -{ +pub struct AxisDescription { /// Axis identifier (index) pub axis: Axis, /// Length in count of elements of the current axis @@ -62,27 +59,23 @@ copy_and_clone!(AxisDescription); // AxisDescription can't really be empty // https://github.com/rust-ndarray/ndarray/pull/642#discussion_r296051702 #[allow(clippy::len_without_is_empty)] -impl AxisDescription -{ +impl AxisDescription { /// Return axis #[deprecated(note = "Use .axis field instead", since = "0.15.0")] #[inline(always)] - pub fn axis(self) -> Axis - { + pub fn axis(self) -> Axis { self.axis } /// Return length #[deprecated(note = "Use .len field instead", since = "0.15.0")] #[inline(always)] - pub fn len(self) -> Ix - { + pub fn len(self) -> Ix { self.len } /// Return stride #[deprecated(note = "Use .stride field instead", since = "0.15.0")] #[inline(always)] - pub fn stride(self) -> Ixs - { + pub fn stride(self) -> Ixs { self.stride } } @@ -95,8 +88,7 @@ where D: Dimension /// Description of the axis, its length and its stride. type Item = AxisDescription; - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { if self.start < self.end { let i = self.start.post_inc(); Some(AxisDescription { @@ -110,8 +102,7 @@ where D: Dimension } fn fold(self, init: B, f: F) -> B - where F: FnMut(B, AxisDescription) -> B - { + where F: FnMut(B, AxisDescription) -> B { (self.start..self.end) .map(move |i| AxisDescription { axis: Axis(i), @@ -121,8 +112,7 @@ where D: Dimension .fold(init, f) } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { let len = self.end - self.start; (len, Some(len)) } @@ -131,8 +121,7 @@ where D: Dimension impl<'a, D> DoubleEndedIterator for Axes<'a, D> where D: Dimension { - fn next_back(&mut self) -> Option - { + fn next_back(&mut self) -> Option { if self.start < self.end { let i = self.end.pre_dec(); Some(AxisDescription { @@ -146,24 +135,20 @@ where D: Dimension } } -trait IncOps: Copy -{ +trait IncOps: Copy { fn post_inc(&mut self) -> Self; fn pre_dec(&mut self) -> Self; } -impl IncOps for usize -{ +impl IncOps for usize { #[inline(always)] - fn post_inc(&mut self) -> Self - { + fn post_inc(&mut self) -> Self { let x = *self; *self += 1; x } #[inline(always)] - fn pre_dec(&mut self) -> Self - { + fn pre_dec(&mut self) -> Self { *self -= 1; *self } diff --git a/src/dimension/axis.rs b/src/dimension/axis.rs index 8c896f6b7..611c62b31 100644 --- a/src/dimension/axis.rs +++ b/src/dimension/axis.rs @@ -26,12 +26,10 @@ #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Axis(pub usize); -impl Axis -{ +impl Axis { /// Return the index of the axis. #[inline(always)] - pub fn index(self) -> usize - { + pub fn index(self) -> usize { self.0 } } diff --git a/src/dimension/broadcast.rs b/src/dimension/broadcast.rs index d277cfea2..b2aa886a9 100644 --- a/src/dimension/broadcast.rs +++ b/src/dimension/broadcast.rs @@ -34,8 +34,7 @@ where Ok(out) } -pub trait DimMax -{ +pub trait DimMax { /// The resulting dimension type after broadcasting. type Output: Dimension; } @@ -43,8 +42,7 @@ pub trait DimMax /// Dimensions of the same type remain unchanged when co_broadcast. /// So you can directly use D as the resulting type. /// (Instead of >::BroadcastOutput) -impl DimMax for D -{ +impl DimMax for D { type Output = D; } @@ -91,14 +89,12 @@ impl_broadcast_distinct_fixed!(Ix6, IxDyn); #[cfg(test)] #[cfg(feature = "std")] -mod tests -{ +mod tests { use super::co_broadcast; use crate::{Dim, DimMax, Dimension, ErrorKind, Ix0, IxDynImpl, ShapeError}; #[test] - fn test_broadcast_shape() - { + fn test_broadcast_shape() { fn test_co(d1: &D1, d2: &D2, r: Result<>::Output, ShapeError>) where D1: Dimension + DimMax, diff --git a/src/dimension/conversion.rs b/src/dimension/conversion.rs index 0cf2e1296..d4ca00810 100644 --- a/src/dimension/conversion.rs +++ b/src/dimension/conversion.rs @@ -40,18 +40,15 @@ macro_rules! index_item { } /// Argument conversion a dimension. -pub trait IntoDimension -{ +pub trait IntoDimension { type Dim: Dimension; fn into_dimension(self) -> Self::Dim; } -impl IntoDimension for Ix -{ +impl IntoDimension for Ix { type Dim = Ix1; #[inline(always)] - fn into_dimension(self) -> Ix1 - { + fn into_dimension(self) -> Ix1 { Ix1(self) } } @@ -61,34 +58,28 @@ where D: Dimension { type Dim = D; #[inline(always)] - fn into_dimension(self) -> Self - { + fn into_dimension(self) -> Self { self } } -impl IntoDimension for IxDynImpl -{ +impl IntoDimension for IxDynImpl { type Dim = IxDyn; #[inline(always)] - fn into_dimension(self) -> Self::Dim - { + fn into_dimension(self) -> Self::Dim { Dim::new(self) } } -impl IntoDimension for Vec -{ +impl IntoDimension for Vec { type Dim = IxDyn; #[inline(always)] - fn into_dimension(self) -> Self::Dim - { + fn into_dimension(self) -> Self::Dim { Dim::new(IxDynImpl::from(self)) } } -pub trait Convert -{ +pub trait Convert { type To; fn convert(self) -> Self::To; } diff --git a/src/dimension/dim.rs b/src/dimension/dim.rs index 96e433bb3..ffc6ccbbd 100644 --- a/src/dimension/dim.rs +++ b/src/dimension/dim.rs @@ -35,26 +35,21 @@ use std::fmt; /// assert_eq!(array.raw_dim(), Dim([3, 2])); /// ``` #[derive(Copy, Clone, PartialEq, Eq, Hash, Default)] -pub struct Dim -{ +pub struct Dim { index: I, } -impl Dim -{ +impl Dim { /// Private constructor and accessors for Dim - pub(crate) const fn new(index: I) -> Dim - { + pub(crate) const fn new(index: I) -> Dim { Dim { index } } #[inline(always)] - pub(crate) fn ix(&self) -> &I - { + pub(crate) fn ix(&self) -> &I { &self.index } #[inline(always)] - pub(crate) fn ixm(&mut self) -> &mut I - { + pub(crate) fn ixm(&mut self) -> &mut I { &mut self.index } } @@ -62,16 +57,14 @@ impl Dim /// Create a new dimension value. #[allow(non_snake_case)] pub fn Dim(index: T) -> T::Dim -where T: IntoDimension -{ +where T: IntoDimension { index.into_dimension() } impl PartialEq for Dim where I: PartialEq { - fn eq(&self, rhs: &I) -> bool - { + fn eq(&self, rhs: &I) -> bool { self.index == *rhs } } @@ -79,8 +72,7 @@ where I: PartialEq impl fmt::Debug for Dim where I: fmt::Debug { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result - { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self.index) } } diff --git a/src/dimension/dimension_trait.rs b/src/dimension/dimension_trait.rs index 3544a7f3c..4f3b82e52 100644 --- a/src/dimension/dimension_trait.rs +++ b/src/dimension/dimension_trait.rs @@ -83,14 +83,12 @@ pub trait Dimension: fn into_pattern(self) -> Self::Pattern; /// Compute the size of the dimension (number of elements) - fn size(&self) -> usize - { + fn size(&self) -> usize { self.slice().iter().product() } /// Compute the size while checking for overflow. - fn size_checked(&self) -> Option - { + fn size_checked(&self) -> Option { self.slice() .iter() .try_fold(1_usize, |s, &a| s.checked_mul(a)) @@ -103,20 +101,17 @@ pub trait Dimension: fn slice_mut(&mut self) -> &mut [Ix]; /// Borrow as a read-only array view. - fn as_array_view(&self) -> ArrayView1<'_, Ix> - { + fn as_array_view(&self) -> ArrayView1<'_, Ix> { ArrayView1::from(self.slice()) } /// Borrow as a read-write array view. - fn as_array_view_mut(&mut self) -> ArrayViewMut1<'_, Ix> - { + fn as_array_view_mut(&mut self) -> ArrayViewMut1<'_, Ix> { ArrayViewMut1::from(self.slice_mut()) } #[doc(hidden)] - fn equal(&self, rhs: &Self) -> bool - { + fn equal(&self, rhs: &Self) -> bool { self.slice() == rhs.slice() } @@ -125,8 +120,7 @@ pub trait Dimension: /// If the array is non-empty, the strides result in contiguous layout; if /// the array is empty, the strides are all zeros. #[doc(hidden)] - fn default_strides(&self) -> Self - { + fn default_strides(&self) -> Self { // Compute default array strides // Shape (a, b, c) => Give strides (b * c, c, 1) let mut strides = Self::zeros(self.ndim()); @@ -151,8 +145,7 @@ pub trait Dimension: /// If the array is non-empty, the strides result in contiguous layout; if /// the array is empty, the strides are all zeros. #[doc(hidden)] - fn fortran_strides(&self) -> Self - { + fn fortran_strides(&self) -> Self { // Compute fortran array strides // Shape (a, b, c) => Give strides (1, a, a * b) let mut strides = Self::zeros(self.ndim()); @@ -182,8 +175,7 @@ pub trait Dimension: #[doc(hidden)] #[inline] - fn first_index(&self) -> Option - { + fn first_index(&self) -> Option { for ax in self.slice().iter() { if *ax == 0 { return None; @@ -197,8 +189,7 @@ pub trait Dimension: /// or None if there are no more. // FIXME: use &Self for index or even &mut? #[inline] - fn next_for(&self, index: Self) -> Option - { + fn next_for(&self, index: Self) -> Option { let mut index = index; let mut done = false; for (&dim, ix) in zip(self.slice(), index.slice_mut()).rev() { @@ -223,8 +214,7 @@ pub trait Dimension: /// /// Next in f-order #[inline] - fn next_for_f(&self, index: &mut Self) -> bool - { + fn next_for_f(&self, index: &mut Self) -> bool { let mut end_iteration = true; for (&dim, ix) in zip(self.slice(), index.slice_mut()) { *ix += 1; @@ -247,8 +237,7 @@ pub trait Dimension: /// Note: Returns `false` if any of the ndims don't match. #[doc(hidden)] fn strides_equivalent(&self, strides1: &Self, strides2: &D) -> bool - where D: Dimension - { + where D: Dimension { let shape_ndim = self.ndim(); shape_ndim == strides1.ndim() && shape_ndim == strides2.ndim() @@ -258,8 +247,7 @@ pub trait Dimension: #[doc(hidden)] /// Return stride offset for index. - fn stride_offset(index: &Self, strides: &Self) -> isize - { + fn stride_offset(index: &Self, strides: &Self) -> isize { let mut offset = 0; for (&i, &s) in izip!(index.slice(), strides.slice()) { offset += stride_offset(i, s); @@ -269,14 +257,12 @@ pub trait Dimension: #[doc(hidden)] /// Return stride offset for this dimension and index. - fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option - { + fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option { stride_offset_checked(self.slice(), strides.slice(), index.slice()) } #[doc(hidden)] - fn last_elem(&self) -> usize - { + fn last_elem(&self) -> usize { if self.ndim() == 0 { 0 } else { @@ -285,15 +271,13 @@ pub trait Dimension: } #[doc(hidden)] - fn set_last_elem(&mut self, i: usize) - { + fn set_last_elem(&mut self, i: usize) { let nd = self.ndim(); self.slice_mut()[nd - 1] = i; } #[doc(hidden)] - fn is_contiguous(dim: &Self, strides: &Self) -> bool - { + fn is_contiguous(dim: &Self, strides: &Self) -> bool { let defaults = dim.default_strides(); if strides.equal(&defaults) { return true; @@ -325,8 +309,7 @@ pub trait Dimension: /// /// Assumes that no stride value appears twice. #[doc(hidden)] - fn _fastest_varying_stride_order(&self) -> Self - { + fn _fastest_varying_stride_order(&self) -> Self { let mut indices = self.clone(); for (i, elt) in enumerate(indices.slice_mut()) { *elt = i; @@ -341,8 +324,7 @@ pub trait Dimension: /// Compute the minimum stride axis (absolute value), under the constraint /// that the length of the axis is > 1; #[doc(hidden)] - fn min_stride_axis(&self, strides: &Self) -> Axis - { + fn min_stride_axis(&self, strides: &Self) -> Axis { let n = match self.ndim() { 0 => panic!("min_stride_axis: Array must have ndim > 0"), 1 => return Axis(0), @@ -357,8 +339,7 @@ pub trait Dimension: /// Compute the maximum stride axis (absolute value), under the constraint /// that the length of the axis is > 1; #[doc(hidden)] - fn max_stride_axis(&self, strides: &Self) -> Axis - { + fn max_stride_axis(&self, strides: &Self) -> Axis { match self.ndim() { 0 => panic!("max_stride_axis: Array must have ndim > 0"), 1 => return Axis(0), @@ -371,14 +352,12 @@ pub trait Dimension: } /// Convert the dimensional into a dynamic dimensional (IxDyn). - fn into_dyn(self) -> IxDyn - { + fn into_dyn(self) -> IxDyn { IxDyn(self.slice()) } #[doc(hidden)] - fn from_dimension(d: &D2) -> Option - { + fn from_dimension(d: &D2) -> Option { let mut s = Self::default(); if s.ndim() == d.ndim() { for i in 0..d.ndim() { @@ -414,91 +393,76 @@ macro_rules! impl_insert_axis_array( ); ); -impl Dimension for Dim<[Ix; 0]> -{ +impl Dimension for Dim<[Ix; 0]> { const NDIM: Option = Some(0); type Pattern = (); type Smaller = Self; type Larger = Ix1; // empty product is 1 -> size is 1 #[inline] - fn ndim(&self) -> usize - { + fn ndim(&self) -> usize { 0 } #[inline] - fn slice(&self) -> &[Ix] - { + fn slice(&self) -> &[Ix] { &[] } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] - { + fn slice_mut(&mut self) -> &mut [Ix] { &mut [] } #[inline] - fn _fastest_varying_stride_order(&self) -> Self - { + fn _fastest_varying_stride_order(&self) -> Self { Ix0() } #[inline] fn into_pattern(self) -> Self::Pattern {} #[inline] - fn zeros(ndim: usize) -> Self - { + fn zeros(ndim: usize) -> Self { assert_eq!(ndim, 0); Self::default() } #[inline] - fn next_for(&self, _index: Self) -> Option - { + fn next_for(&self, _index: Self) -> Option { None } impl_insert_axis_array!(0); #[inline] - fn try_remove_axis(&self, _ignore: Axis) -> Self::Smaller - { + fn try_remove_axis(&self, _ignore: Axis) -> Self::Smaller { *self } private_impl! {} } -impl Dimension for Dim<[Ix; 1]> -{ +impl Dimension for Dim<[Ix; 1]> { const NDIM: Option = Some(1); type Pattern = Ix; type Smaller = Ix0; type Larger = Ix2; #[inline] - fn ndim(&self) -> usize - { + fn ndim(&self) -> usize { 1 } #[inline] - fn slice(&self) -> &[Ix] - { + fn slice(&self) -> &[Ix] { self.ix() } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] - { + fn slice_mut(&mut self) -> &mut [Ix] { self.ixm() } #[inline] - fn into_pattern(self) -> Self::Pattern - { + fn into_pattern(self) -> Self::Pattern { get!(&self, 0) } #[inline] - fn zeros(ndim: usize) -> Self - { + fn zeros(ndim: usize) -> Self { assert_eq!(ndim, 1); Self::default() } #[inline] - fn next_for(&self, mut index: Self) -> Option - { + fn next_for(&self, mut index: Self) -> Option { getm!(index, 0) += 1; if get!(&index, 0) < get!(self, 0) { Some(index) @@ -508,25 +472,21 @@ impl Dimension for Dim<[Ix; 1]> } #[inline] - fn equal(&self, rhs: &Self) -> bool - { + fn equal(&self, rhs: &Self) -> bool { get!(self, 0) == get!(rhs, 0) } #[inline] - fn size(&self) -> usize - { + fn size(&self) -> usize { get!(self, 0) } #[inline] - fn size_checked(&self) -> Option - { + fn size_checked(&self) -> Option { Some(get!(self, 0)) } #[inline] - fn default_strides(&self) -> Self - { + fn default_strides(&self) -> Self { if get!(self, 0) == 0 { Ix1(0) } else { @@ -535,26 +495,22 @@ impl Dimension for Dim<[Ix; 1]> } #[inline] - fn _fastest_varying_stride_order(&self) -> Self - { + fn _fastest_varying_stride_order(&self) -> Self { Ix1(0) } #[inline(always)] - fn min_stride_axis(&self, _: &Self) -> Axis - { + fn min_stride_axis(&self, _: &Self) -> Axis { Axis(0) } #[inline(always)] - fn max_stride_axis(&self, _: &Self) -> Axis - { + fn max_stride_axis(&self, _: &Self) -> Axis { Axis(0) } #[inline] - fn first_index(&self) -> Option - { + fn first_index(&self) -> Option { if get!(self, 0) != 0 { Some(Ix1(0)) } else { @@ -564,15 +520,13 @@ impl Dimension for Dim<[Ix; 1]> /// Self is an index, return the stride offset #[inline(always)] - fn stride_offset(index: &Self, stride: &Self) -> isize - { + fn stride_offset(index: &Self, stride: &Self) -> isize { stride_offset(get!(index, 0), get!(stride, 0)) } /// Return stride offset for this dimension and index. #[inline] - fn stride_offset_checked(&self, stride: &Self, index: &Self) -> Option - { + fn stride_offset_checked(&self, stride: &Self, index: &Self) -> Option { if get!(index, 0) < get!(self, 0) { Some(stride_offset(get!(index, 0), get!(stride, 0))) } else { @@ -581,13 +535,11 @@ impl Dimension for Dim<[Ix; 1]> } impl_insert_axis_array!(1); #[inline] - fn try_remove_axis(&self, axis: Axis) -> Self::Smaller - { + fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { self.remove_axis(axis) } - fn from_dimension(d: &D2) -> Option - { + fn from_dimension(d: &D2) -> Option { if 1 == d.ndim() { Some(Ix1(d[0])) } else { @@ -597,41 +549,34 @@ impl Dimension for Dim<[Ix; 1]> private_impl! {} } -impl Dimension for Dim<[Ix; 2]> -{ +impl Dimension for Dim<[Ix; 2]> { const NDIM: Option = Some(2); type Pattern = (Ix, Ix); type Smaller = Ix1; type Larger = Ix3; #[inline] - fn ndim(&self) -> usize - { + fn ndim(&self) -> usize { 2 } #[inline] - fn into_pattern(self) -> Self::Pattern - { + fn into_pattern(self) -> Self::Pattern { self.ix().convert() } #[inline] - fn slice(&self) -> &[Ix] - { + fn slice(&self) -> &[Ix] { self.ix() } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] - { + fn slice_mut(&mut self) -> &mut [Ix] { self.ixm() } #[inline] - fn zeros(ndim: usize) -> Self - { + fn zeros(ndim: usize) -> Self { assert_eq!(ndim, 2); Self::default() } #[inline] - fn next_for(&self, index: Self) -> Option - { + fn next_for(&self, index: Self) -> Option { let mut i = get!(&index, 0); let mut j = get!(&index, 1); let imax = get!(self, 0); @@ -648,40 +593,34 @@ impl Dimension for Dim<[Ix; 2]> } #[inline] - fn equal(&self, rhs: &Self) -> bool - { + fn equal(&self, rhs: &Self) -> bool { get!(self, 0) == get!(rhs, 0) && get!(self, 1) == get!(rhs, 1) } #[inline] - fn size(&self) -> usize - { + fn size(&self) -> usize { get!(self, 0) * get!(self, 1) } #[inline] - fn size_checked(&self) -> Option - { + fn size_checked(&self) -> Option { let m = get!(self, 0); let n = get!(self, 1); m.checked_mul(n) } #[inline] - fn last_elem(&self) -> usize - { + fn last_elem(&self) -> usize { get!(self, 1) } #[inline] - fn set_last_elem(&mut self, i: usize) - { + fn set_last_elem(&mut self, i: usize) { getm!(self, 1) = i; } #[inline] - fn default_strides(&self) -> Self - { + fn default_strides(&self) -> Self { let m = get!(self, 0); let n = get!(self, 1); if m == 0 || n == 0 { @@ -691,8 +630,7 @@ impl Dimension for Dim<[Ix; 2]> } } #[inline] - fn fortran_strides(&self) -> Self - { + fn fortran_strides(&self) -> Self { let m = get!(self, 0); let n = get!(self, 1); if m == 0 || n == 0 { @@ -703,8 +641,7 @@ impl Dimension for Dim<[Ix; 2]> } #[inline] - fn _fastest_varying_stride_order(&self) -> Self - { + fn _fastest_varying_stride_order(&self) -> Self { if (get!(self, 0) as Ixs).abs() <= (get!(self, 1) as Ixs).abs() { Ix2(0, 1) } else { @@ -713,8 +650,7 @@ impl Dimension for Dim<[Ix; 2]> } #[inline] - fn min_stride_axis(&self, strides: &Self) -> Axis - { + fn min_stride_axis(&self, strides: &Self) -> Axis { let s = get!(strides, 0) as Ixs; let t = get!(strides, 1) as Ixs; if s.abs() < t.abs() { @@ -725,8 +661,7 @@ impl Dimension for Dim<[Ix; 2]> } #[inline] - fn first_index(&self) -> Option - { + fn first_index(&self) -> Option { let m = get!(self, 0); let n = get!(self, 1); if m != 0 && n != 0 { @@ -738,8 +673,7 @@ impl Dimension for Dim<[Ix; 2]> /// Self is an index, return the stride offset #[inline(always)] - fn stride_offset(index: &Self, strides: &Self) -> isize - { + fn stride_offset(index: &Self, strides: &Self) -> isize { let i = get!(index, 0); let j = get!(index, 1); let s = get!(strides, 0); @@ -749,8 +683,7 @@ impl Dimension for Dim<[Ix; 2]> /// Return stride offset for this dimension and index. #[inline] - fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option - { + fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option { let m = get!(self, 0); let n = get!(self, 1); let i = get!(index, 0); @@ -765,43 +698,36 @@ impl Dimension for Dim<[Ix; 2]> } impl_insert_axis_array!(2); #[inline] - fn try_remove_axis(&self, axis: Axis) -> Self::Smaller - { + fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { self.remove_axis(axis) } private_impl! {} } -impl Dimension for Dim<[Ix; 3]> -{ +impl Dimension for Dim<[Ix; 3]> { const NDIM: Option = Some(3); type Pattern = (Ix, Ix, Ix); type Smaller = Ix2; type Larger = Ix4; #[inline] - fn ndim(&self) -> usize - { + fn ndim(&self) -> usize { 3 } #[inline] - fn into_pattern(self) -> Self::Pattern - { + fn into_pattern(self) -> Self::Pattern { self.ix().convert() } #[inline] - fn slice(&self) -> &[Ix] - { + fn slice(&self) -> &[Ix] { self.ix() } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] - { + fn slice_mut(&mut self) -> &mut [Ix] { self.ixm() } #[inline] - fn size(&self) -> usize - { + fn size(&self) -> usize { let m = get!(self, 0); let n = get!(self, 1); let o = get!(self, 2); @@ -809,15 +735,13 @@ impl Dimension for Dim<[Ix; 3]> } #[inline] - fn zeros(ndim: usize) -> Self - { + fn zeros(ndim: usize) -> Self { assert_eq!(ndim, 3); Self::default() } #[inline] - fn next_for(&self, index: Self) -> Option - { + fn next_for(&self, index: Self) -> Option { let mut i = get!(&index, 0); let mut j = get!(&index, 1); let mut k = get!(&index, 2); @@ -841,8 +765,7 @@ impl Dimension for Dim<[Ix; 3]> /// Self is an index, return the stride offset #[inline] - fn stride_offset(index: &Self, strides: &Self) -> isize - { + fn stride_offset(index: &Self, strides: &Self) -> isize { let i = get!(index, 0); let j = get!(index, 1); let k = get!(index, 2); @@ -854,8 +777,7 @@ impl Dimension for Dim<[Ix; 3]> /// Return stride offset for this dimension and index. #[inline] - fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option - { + fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option { let m = get!(self, 0); let n = get!(self, 1); let l = get!(self, 2); @@ -873,8 +795,7 @@ impl Dimension for Dim<[Ix; 3]> } #[inline] - fn _fastest_varying_stride_order(&self) -> Self - { + fn _fastest_varying_stride_order(&self) -> Self { let mut stride = *self; let mut order = Ix3(0, 1, 2); macro_rules! swap { @@ -896,8 +817,7 @@ impl Dimension for Dim<[Ix; 3]> } impl_insert_axis_array!(3); #[inline] - fn try_remove_axis(&self, axis: Axis) -> Self::Smaller - { + fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { self.remove_axis(axis) } private_impl! {} @@ -954,49 +874,41 @@ large_dim!(6, Ix6, (Ix, Ix, Ix, Ix, Ix, Ix), IxDyn, { /// IxDyn is a "dynamic" index, pretty hard to use when indexing, /// and memory wasteful, but it allows an arbitrary and dynamic number of axes. -impl Dimension for IxDyn -{ +impl Dimension for IxDyn { const NDIM: Option = None; type Pattern = Self; type Smaller = Self; type Larger = Self; #[inline] - fn ndim(&self) -> usize - { + fn ndim(&self) -> usize { self.ix().len() } #[inline] - fn slice(&self) -> &[Ix] - { + fn slice(&self) -> &[Ix] { self.ix() } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] - { + fn slice_mut(&mut self) -> &mut [Ix] { self.ixm() } #[inline] - fn into_pattern(self) -> Self::Pattern - { + fn into_pattern(self) -> Self::Pattern { self } #[inline] - fn zeros(ndim: usize) -> Self - { + fn zeros(ndim: usize) -> Self { IxDyn::zeros(ndim) } #[inline] - fn insert_axis(&self, axis: Axis) -> Self::Larger - { + fn insert_axis(&self, axis: Axis) -> Self::Larger { debug_assert!(axis.index() <= self.ndim()); Dim::new(self.ix().insert(axis.index())) } #[inline] - fn try_remove_axis(&self, axis: Axis) -> Self::Smaller - { + fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { if self.ndim() > 0 { self.remove_axis(axis) } else { @@ -1004,32 +916,26 @@ impl Dimension for IxDyn } } - fn from_dimension(d: &D2) -> Option - { + fn from_dimension(d: &D2) -> Option { Some(IxDyn(d.slice())) } - fn into_dyn(self) -> IxDyn - { + fn into_dyn(self) -> IxDyn { self } private_impl! {} } -impl Index for Dim -{ +impl Index for Dim { type Output = >::Output; - fn index(&self, index: usize) -> &Self::Output - { + fn index(&self, index: usize) -> &Self::Output { &self.ix()[index] } } -impl IndexMut for Dim -{ - fn index_mut(&mut self, index: usize) -> &mut Self::Output - { +impl IndexMut for Dim { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { &mut self.ixm()[index] } } diff --git a/src/dimension/dynindeximpl.rs b/src/dimension/dynindeximpl.rs index 60aeacd80..e25fa7717 100644 --- a/src/dimension/dynindeximpl.rs +++ b/src/dimension/dynindeximpl.rs @@ -10,17 +10,14 @@ const CAP: usize = 4; /// T is usize or isize #[derive(Debug)] -enum IxDynRepr -{ +enum IxDynRepr { Inline(u32, [T; CAP]), Alloc(Box<[T]>), } -impl Deref for IxDynRepr -{ +impl Deref for IxDynRepr { type Target = [T]; - fn deref(&self) -> &[T] - { + fn deref(&self) -> &[T] { match *self { IxDynRepr::Inline(len, ref ar) => { debug_assert!(len as usize <= ar.len()); @@ -31,10 +28,8 @@ impl Deref for IxDynRepr } } -impl DerefMut for IxDynRepr -{ - fn deref_mut(&mut self) -> &mut [T] - { +impl DerefMut for IxDynRepr { + fn deref_mut(&mut self) -> &mut [T] { match *self { IxDynRepr::Inline(len, ref mut ar) => { debug_assert!(len as usize <= ar.len()); @@ -46,20 +41,16 @@ impl DerefMut for IxDynRepr } /// The default is equivalent to `Self::from(&[0])`. -impl Default for IxDynRepr -{ - fn default() -> Self - { +impl Default for IxDynRepr { + fn default() -> Self { Self::copy_from(&[0]) } } use num_traits::Zero; -impl IxDynRepr -{ - pub fn copy_from(x: &[T]) -> Self - { +impl IxDynRepr { + pub fn copy_from(x: &[T]) -> Self { if x.len() <= CAP { let mut arr = [T::zero(); CAP]; arr[..x.len()].copy_from_slice(x); @@ -70,11 +61,9 @@ impl IxDynRepr } } -impl IxDynRepr -{ +impl IxDynRepr { // make an Inline or Alloc version as appropriate - fn from_vec_auto(v: Vec) -> Self - { + fn from_vec_auto(v: Vec) -> Self { if v.len() <= CAP { Self::copy_from(&v) } else { @@ -83,23 +72,18 @@ impl IxDynRepr } } -impl IxDynRepr -{ - fn from_vec(v: Vec) -> Self - { +impl IxDynRepr { + fn from_vec(v: Vec) -> Self { IxDynRepr::Alloc(v.into_boxed_slice()) } - fn from(x: &[T]) -> Self - { + fn from(x: &[T]) -> Self { Self::from_vec(x.to_vec()) } } -impl Clone for IxDynRepr -{ - fn clone(&self) -> Self - { +impl Clone for IxDynRepr { + fn clone(&self) -> Self { match *self { IxDynRepr::Inline(len, arr) => IxDynRepr::Inline(len, arr), _ => Self::from(&self[..]), @@ -109,10 +93,8 @@ impl Clone for IxDynRepr impl Eq for IxDynRepr {} -impl PartialEq for IxDynRepr -{ - fn eq(&self, rhs: &Self) -> bool - { +impl PartialEq for IxDynRepr { + fn eq(&self, rhs: &Self) -> bool { match (self, rhs) { (&IxDynRepr::Inline(slen, ref sarr), &IxDynRepr::Inline(rlen, ref rarr)) => slen == rlen @@ -124,10 +106,8 @@ impl PartialEq for IxDynRepr } } -impl Hash for IxDynRepr -{ - fn hash(&self, state: &mut H) - { +impl Hash for IxDynRepr { + fn hash(&self, state: &mut H) { Hash::hash(&self[..], state) } } @@ -140,10 +120,8 @@ impl Hash for IxDynRepr #[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] pub struct IxDynImpl(IxDynRepr); -impl IxDynImpl -{ - pub(crate) fn insert(&self, i: usize) -> Self - { +impl IxDynImpl { + pub(crate) fn insert(&self, i: usize) -> Self { let len = self.len(); debug_assert!(i <= len); IxDynImpl(if len < CAP { @@ -160,8 +138,7 @@ impl IxDynImpl }) } - fn remove(&self, i: usize) -> Self - { + fn remove(&self, i: usize) -> Self { IxDynImpl(match self.0 { IxDynRepr::Inline(0, _) => IxDynRepr::Inline(0, [0; CAP]), IxDynRepr::Inline(1, _) => IxDynRepr::Inline(0, [0; CAP]), @@ -182,20 +159,16 @@ impl IxDynImpl } } -impl<'a> From<&'a [Ix]> for IxDynImpl -{ +impl<'a> From<&'a [Ix]> for IxDynImpl { #[inline] - fn from(ix: &'a [Ix]) -> Self - { + fn from(ix: &'a [Ix]) -> Self { IxDynImpl(IxDynRepr::copy_from(ix)) } } -impl From> for IxDynImpl -{ +impl From> for IxDynImpl { #[inline] - fn from(ix: Vec) -> Self - { + fn from(ix: Vec) -> Self { IxDynImpl(IxDynRepr::from_vec_auto(ix)) } } @@ -204,8 +177,7 @@ impl Index for IxDynImpl where [Ix]: Index { type Output = <[Ix] as Index>::Output; - fn index(&self, index: J) -> &Self::Output - { + fn index(&self, index: J) -> &Self::Output { &self.0[index] } } @@ -213,57 +185,46 @@ where [Ix]: Index impl IndexMut for IxDynImpl where [Ix]: IndexMut { - fn index_mut(&mut self, index: J) -> &mut Self::Output - { + fn index_mut(&mut self, index: J) -> &mut Self::Output { &mut self.0[index] } } -impl Deref for IxDynImpl -{ +impl Deref for IxDynImpl { type Target = [Ix]; #[inline] - fn deref(&self) -> &[Ix] - { + fn deref(&self) -> &[Ix] { &self.0 } } -impl DerefMut for IxDynImpl -{ +impl DerefMut for IxDynImpl { #[inline] - fn deref_mut(&mut self) -> &mut [Ix] - { + fn deref_mut(&mut self) -> &mut [Ix] { &mut self.0 } } -impl<'a> IntoIterator for &'a IxDynImpl -{ +impl<'a> IntoIterator for &'a IxDynImpl { type Item = &'a Ix; type IntoIter = <&'a [Ix] as IntoIterator>::IntoIter; #[inline] - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { self[..].iter() } } -impl RemoveAxis for Dim -{ - fn remove_axis(&self, axis: Axis) -> Self - { +impl RemoveAxis for Dim { + fn remove_axis(&self, axis: Axis) -> Self { debug_assert!(axis.index() < self.ndim()); Dim::new(self.ix().remove(axis.index())) } } -impl IxDyn -{ +impl IxDyn { /// Create a new dimension value with `n` axes, all zeros #[inline] - pub fn zeros(n: usize) -> IxDyn - { + pub fn zeros(n: usize) -> IxDyn { const ZEROS: &[usize] = &[0; 4]; if n <= ZEROS.len() { Dim(&ZEROS[..n]) diff --git a/src/dimension/mod.rs b/src/dimension/mod.rs index e1563613e..4d1349b57 100644 --- a/src/dimension/mod.rs +++ b/src/dimension/mod.rs @@ -46,8 +46,7 @@ mod sequence; /// Calculate offset from `Ix` stride converting sign properly #[inline(always)] -pub fn stride_offset(n: Ix, stride: Ix) -> isize -{ +pub fn stride_offset(n: Ix, stride: Ix) -> isize { (n as isize) * (stride as Ixs) } @@ -56,8 +55,7 @@ pub fn stride_offset(n: Ix, stride: Ix) -> isize /// There is overlap if, when iterating through the dimensions in order of /// increasing stride, the current stride is less than or equal to the maximum /// possible offset along the preceding axes. (Axes of length ≤1 are ignored.) -pub fn dim_stride_overlap(dim: &D, strides: &D) -> bool -{ +pub fn dim_stride_overlap(dim: &D, strides: &D) -> bool { let order = strides._fastest_varying_stride_order(); let mut sum_prev_offsets = 0; for &index in order.slice() { @@ -86,8 +84,7 @@ pub fn dim_stride_overlap(dim: &D, strides: &D) -> bool /// are met to construct an array from the data buffer, `dim`, and `strides`. /// (The data buffer being a slice or `Vec` guarantees that it contains no more /// than `isize::MAX` bytes.) -pub fn size_of_shape_checked(dim: &D) -> Result -{ +pub fn size_of_shape_checked(dim: &D) -> Result { let size_nonzero = dim .slice() .iter() @@ -127,8 +124,7 @@ pub fn size_of_shape_checked(dim: &D) -> Result /// accessible by moving along all axes does not exceed `isize::MAX`. pub(crate) fn can_index_slice_with_strides( data: &[A], dim: &D, strides: &Strides, -) -> Result<(), ShapeError> -{ +) -> Result<(), ShapeError> { if let Strides::Custom(strides) = strides { can_index_slice(data, dim, strides) } else { @@ -136,8 +132,7 @@ pub(crate) fn can_index_slice_with_strides( } } -pub(crate) fn can_index_slice_not_custom(data_len: usize, dim: &D) -> Result<(), ShapeError> -{ +pub(crate) fn can_index_slice_not_custom(data_len: usize, dim: &D) -> Result<(), ShapeError> { // Condition 1. let len = size_of_shape_checked(dim)?; // Condition 2. @@ -162,14 +157,12 @@ pub(crate) fn can_index_slice_not_custom(data_len: usize, dim: &D) /// also implies that the length of any individual axis does not exceed /// `isize::MAX`.) pub fn max_abs_offset_check_overflow(dim: &D, strides: &D) -> Result -where D: Dimension -{ +where D: Dimension { max_abs_offset_check_overflow_impl(mem::size_of::(), dim, strides) } fn max_abs_offset_check_overflow_impl(elem_size: usize, dim: &D, strides: &D) -> Result -where D: Dimension -{ +where D: Dimension { // Condition 1. if dim.ndim() != strides.ndim() { return Err(from_kind(ErrorKind::IncompatibleLayout)); @@ -240,8 +233,7 @@ where D: Dimension /// allocation. (In other words, the pointer to the first element of the array /// must be computed using `offset_from_low_addr_ptr_to_logical_ptr` so that /// negative strides are correctly handled.) -pub(crate) fn can_index_slice(data: &[A], dim: &D, strides: &D) -> Result<(), ShapeError> -{ +pub(crate) fn can_index_slice(data: &[A], dim: &D, strides: &D) -> Result<(), ShapeError> { // Check conditions 1 and 2 and calculate `max_offset`. let max_offset = max_abs_offset_check_overflow::(dim, strides)?; can_index_slice_impl(max_offset, data.len(), dim, strides) @@ -249,8 +241,7 @@ pub(crate) fn can_index_slice(data: &[A], dim: &D, strides: &D) fn can_index_slice_impl( max_offset: usize, data_len: usize, dim: &D, strides: &D, -) -> Result<(), ShapeError> -{ +) -> Result<(), ShapeError> { // Check condition 3. let is_empty = dim.slice().iter().any(|&d| d == 0); if is_empty && max_offset > data_len { @@ -270,8 +261,7 @@ fn can_index_slice_impl( /// Stride offset checked general version (slices) #[inline] -pub fn stride_offset_checked(dim: &[Ix], strides: &[Ix], index: &[Ix]) -> Option -{ +pub fn stride_offset_checked(dim: &[Ix], strides: &[Ix], index: &[Ix]) -> Option { if index.len() != dim.len() { return None; } @@ -287,8 +277,7 @@ pub fn stride_offset_checked(dim: &[Ix], strides: &[Ix], index: &[Ix]) -> Option /// Checks if strides are non-negative. pub fn strides_non_negative(strides: &D) -> Result<(), ShapeError> -where D: Dimension -{ +where D: Dimension { for &stride in strides.slice() { if (stride as isize) < 0 { return Err(from_kind(ErrorKind::Unsupported)); @@ -298,8 +287,7 @@ where D: Dimension } /// Implementation-specific extensions to `Dimension` -pub trait DimensionExt -{ +pub trait DimensionExt { // note: many extensions go in the main trait if they need to be special- // cased per dimension /// Get the dimension at `axis`. @@ -319,29 +307,24 @@ impl DimensionExt for D where D: Dimension { #[inline] - fn axis(&self, axis: Axis) -> Ix - { + fn axis(&self, axis: Axis) -> Ix { self[axis.index()] } #[inline] - fn set_axis(&mut self, axis: Axis, value: Ix) - { + fn set_axis(&mut self, axis: Axis, value: Ix) { self[axis.index()] = value; } } -impl DimensionExt for [Ix] -{ +impl DimensionExt for [Ix] { #[inline] - fn axis(&self, axis: Axis) -> Ix - { + fn axis(&self, axis: Axis) -> Ix { self[axis.index()] } #[inline] - fn set_axis(&mut self, axis: Axis, value: Ix) - { + fn set_axis(&mut self, axis: Axis, value: Ix) { self[axis.index()] = value; } } @@ -352,8 +335,7 @@ impl DimensionExt for [Ix] /// **Panics** if `index` is larger than the size of the axis #[track_caller] // FIXME: Move to Dimension trait -pub fn do_collapse_axis(dims: &mut D, strides: &D, axis: usize, index: usize) -> isize -{ +pub fn do_collapse_axis(dims: &mut D, strides: &D, axis: usize, index: usize) -> isize { let dim = dims.slice()[axis]; let stride = strides.slice()[axis]; ndassert!( @@ -370,8 +352,7 @@ pub fn do_collapse_axis(dims: &mut D, strides: &D, axis: usize, in /// Compute the equivalent unsigned index given the axis length and signed index. #[inline] -pub fn abs_index(len: Ix, index: Ixs) -> Ix -{ +pub fn abs_index(len: Ix, index: Ixs) -> Ix { if index < 0 { len - (-index as Ix) } else { @@ -385,8 +366,7 @@ pub fn abs_index(len: Ix, index: Ixs) -> Ix /// /// **Panics** if stride is 0 or if any index is out of bounds. #[track_caller] -fn to_abs_slice(axis_len: usize, slice: Slice) -> (usize, usize, isize) -{ +fn to_abs_slice(axis_len: usize, slice: Slice) -> (usize, usize, isize) { let Slice { start, end, step } = slice; let start = abs_index(axis_len, start); let mut end = abs_index(axis_len, end.unwrap_or(axis_len as isize)); @@ -411,8 +391,7 @@ fn to_abs_slice(axis_len: usize, slice: Slice) -> (usize, usize, isize) /// Returns the offset from the lowest-address element to the logically first /// element. -pub fn offset_from_low_addr_ptr_to_logical_ptr(dim: &D, strides: &D) -> usize -{ +pub fn offset_from_low_addr_ptr_to_logical_ptr(dim: &D, strides: &D) -> usize { let offset = izip!(dim.slice(), strides.slice()).fold(0, |_offset, (&d, &s)| { let s = s as isize; if s < 0 && d > 1 { @@ -429,8 +408,7 @@ pub fn offset_from_low_addr_ptr_to_logical_ptr(dim: &D, strides: & /// /// **Panics** if stride is 0 or if any index is out of bounds. #[track_caller] -pub fn do_slice(dim: &mut usize, stride: &mut usize, slice: Slice) -> isize -{ +pub fn do_slice(dim: &mut usize, stride: &mut usize, slice: Slice) -> isize { let (start, end, step) = to_abs_slice(*dim, slice); let m = end - start; @@ -483,8 +461,7 @@ pub fn do_slice(dim: &mut usize, stride: &mut usize, slice: Slice) -> isize /// nonnegative. /// /// See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm -fn extended_gcd(a: isize, b: isize) -> (isize, (isize, isize)) -{ +fn extended_gcd(a: isize, b: isize) -> (isize, (isize, isize)) { if a == 0 { (b.abs(), (0, b.signum())) } else if b == 0 { @@ -520,8 +497,7 @@ fn extended_gcd(a: isize, b: isize) -> (isize, (isize, isize)) /// /// See https://en.wikipedia.org/wiki/Diophantine_equation#One_equation /// and https://math.stackexchange.com/questions/1656120#1656138 -fn solve_linear_diophantine_eq(a: isize, b: isize, c: isize) -> Option<(isize, isize)> -{ +fn solve_linear_diophantine_eq(a: isize, b: isize, c: isize) -> Option<(isize, isize)> { debug_assert_ne!(a, 0); debug_assert_ne!(b, 0); let (g, (u, _)) = extended_gcd(a, b); @@ -539,8 +515,7 @@ fn solve_linear_diophantine_eq(a: isize, b: isize, c: isize) -> Option<(isize, i /// consecutive elements (the sign is irrelevant). /// /// **Note** `step1` and `step2` must be nonzero. -fn arith_seq_intersect((min1, max1, step1): (isize, isize, isize), (min2, max2, step2): (isize, isize, isize)) -> bool -{ +fn arith_seq_intersect((min1, max1, step1): (isize, isize, isize), (min2, max2, step2): (isize, isize, isize)) -> bool { debug_assert!(max1 >= min1); debug_assert!(max2 >= min2); debug_assert_eq!((max1 - min1) % step1, 0); @@ -596,8 +571,7 @@ fn arith_seq_intersect((min1, max1, step1): (isize, isize, isize), (min2, max2, /// Returns the minimum and maximum values of the indices (inclusive). /// /// If the slice is empty, then returns `None`, otherwise returns `Some((min, max))`. -fn slice_min_max(axis_len: usize, slice: Slice) -> Option<(usize, usize)> -{ +fn slice_min_max(axis_len: usize, slice: Slice) -> Option<(usize, usize)> { let (start, end, step) = to_abs_slice(axis_len, slice); if start == end { None @@ -609,8 +583,7 @@ fn slice_min_max(axis_len: usize, slice: Slice) -> Option<(usize, usize)> } /// Returns `true` iff the slices intersect. -pub fn slices_intersect(dim: &D, indices1: impl SliceArg, indices2: impl SliceArg) -> bool -{ +pub fn slices_intersect(dim: &D, indices1: impl SliceArg, indices2: impl SliceArg) -> bool { debug_assert_eq!(indices1.in_ndim(), indices2.in_ndim()); for (&axis_len, &si1, &si2) in izip!( dim.slice(), @@ -667,8 +640,7 @@ pub fn slices_intersect(dim: &D, indices1: impl SliceArg, indic true } -pub(crate) fn is_layout_c(dim: &D, strides: &D) -> bool -{ +pub(crate) fn is_layout_c(dim: &D, strides: &D) -> bool { if let Some(1) = D::NDIM { return strides[0] == 1 || dim[0] <= 1; } @@ -693,8 +665,7 @@ pub(crate) fn is_layout_c(dim: &D, strides: &D) -> bool true } -pub(crate) fn is_layout_f(dim: &D, strides: &D) -> bool -{ +pub(crate) fn is_layout_f(dim: &D, strides: &D) -> bool { if let Some(1) = D::NDIM { return strides[0] == 1 || dim[0] <= 1; } @@ -720,8 +691,7 @@ pub(crate) fn is_layout_f(dim: &D, strides: &D) -> bool } pub fn merge_axes(dim: &mut D, strides: &mut D, take: Axis, into: Axis) -> bool -where D: Dimension -{ +where D: Dimension { let into_len = dim.axis(into); let into_stride = strides.axis(into) as isize; let take_len = dim.axis(take); @@ -748,8 +718,7 @@ where D: Dimension /// Move the axis which has the smallest absolute stride and a length /// greater than one to be the last axis. pub fn move_min_stride_axis_to_last(dim: &mut D, strides: &mut D) -where D: Dimension -{ +where D: Dimension { debug_assert_eq!(dim.ndim(), strides.ndim()); match dim.ndim() { 0 | 1 => {} @@ -772,8 +741,7 @@ where D: Dimension } #[cfg(test)] -mod test -{ +mod test { use super::{ arith_seq_intersect, can_index_slice, @@ -792,8 +760,7 @@ mod test use quickcheck::{quickcheck, TestResult}; #[test] - fn slice_indexing_uncommon_strides() - { + fn slice_indexing_uncommon_strides() { let v: alloc::vec::Vec<_> = (0..12).collect(); let dim = (2, 3, 2).into_dimension(); let strides = (1, 2, 6).into_dimension(); @@ -807,8 +774,7 @@ mod test } #[test] - fn overlapping_strides_dim() - { + fn overlapping_strides_dim() { let dim = (2, 3, 2).into_dimension(); let strides = (5, 2, 1).into_dimension(); assert!(super::dim_stride_overlap(&dim, &strides)); @@ -830,8 +796,7 @@ mod test } #[test] - fn max_abs_offset_check_overflow_examples() - { + fn max_abs_offset_check_overflow_examples() { let dim = (1, ::std::isize::MAX as usize, 1).into_dimension(); let strides = (1, 1, 1).into_dimension(); max_abs_offset_check_overflow::(&dim, &strides).unwrap(); @@ -847,15 +812,13 @@ mod test } #[test] - fn can_index_slice_ix0() - { + fn can_index_slice_ix0() { can_index_slice::(&[1], &Ix0(), &Ix0()).unwrap(); can_index_slice::(&[], &Ix0(), &Ix0()).unwrap_err(); } #[test] - fn can_index_slice_ix1() - { + fn can_index_slice_ix1() { can_index_slice::(&[], &Ix1(0), &Ix1(0)).unwrap(); can_index_slice::(&[], &Ix1(0), &Ix1(1)).unwrap(); can_index_slice::(&[], &Ix1(1), &Ix1(0)).unwrap_err(); @@ -870,8 +833,7 @@ mod test } #[test] - fn can_index_slice_ix2() - { + fn can_index_slice_ix2() { can_index_slice::(&[], &Ix2(0, 0), &Ix2(0, 0)).unwrap(); can_index_slice::(&[], &Ix2(0, 0), &Ix2(2, 1)).unwrap(); can_index_slice::(&[], &Ix2(0, 1), &Ix2(0, 0)).unwrap(); @@ -886,8 +848,7 @@ mod test } #[test] - fn can_index_slice_ix3() - { + fn can_index_slice_ix3() { can_index_slice::(&[], &Ix3(0, 0, 1), &Ix3(2, 1, 3)).unwrap(); can_index_slice::(&[], &Ix3(1, 1, 1), &Ix3(2, 1, 3)).unwrap_err(); can_index_slice::(&[1], &Ix3(1, 1, 1), &Ix3(2, 1, 3)).unwrap(); @@ -896,8 +857,7 @@ mod test } #[test] - fn can_index_slice_zero_size_elem() - { + fn can_index_slice_zero_size_elem() { can_index_slice::<(), _>(&[], &Ix1(0), &Ix1(1)).unwrap(); can_index_slice::<(), _>(&[()], &Ix1(1), &Ix1(1)).unwrap(); can_index_slice::<(), _>(&[(), ()], &Ix1(2), &Ix1(1)).unwrap(); @@ -947,8 +907,7 @@ mod test } #[test] - fn extended_gcd_zero() - { + fn extended_gcd_zero() { assert_eq!(extended_gcd(0, 0), (0, (0, 0))); assert_eq!(extended_gcd(0, 5), (5, (0, 1))); assert_eq!(extended_gcd(5, 0), (5, (1, 0))); @@ -1038,8 +997,7 @@ mod test } #[test] - fn slice_min_max_empty() - { + fn slice_min_max_empty() { assert_eq!(slice_min_max(0, Slice::new(0, None, 3)), None); assert_eq!(slice_min_max(10, Slice::new(1, Some(1), 3)), None); assert_eq!(slice_min_max(10, Slice::new(-1, Some(-1), 3)), None); @@ -1048,8 +1006,7 @@ mod test } #[test] - fn slice_min_max_pos_step() - { + fn slice_min_max_pos_step() { assert_eq!(slice_min_max(10, Slice::new(1, Some(8), 3)), Some((1, 7))); assert_eq!(slice_min_max(10, Slice::new(1, Some(9), 3)), Some((1, 7))); assert_eq!(slice_min_max(10, Slice::new(-9, Some(8), 3)), Some((1, 7))); @@ -1065,8 +1022,7 @@ mod test } #[test] - fn slice_min_max_neg_step() - { + fn slice_min_max_neg_step() { assert_eq!(slice_min_max(10, Slice::new(1, Some(8), -3)), Some((1, 7))); assert_eq!(slice_min_max(10, Slice::new(2, Some(8), -3)), Some((4, 7))); assert_eq!(slice_min_max(10, Slice::new(-9, Some(8), -3)), Some((1, 7))); @@ -1088,8 +1044,7 @@ mod test } #[test] - fn slices_intersect_true() - { + fn slices_intersect_true() { assert!(slices_intersect( &Dim([4, 5]), s![NewAxis, .., NewAxis, ..], @@ -1114,8 +1069,7 @@ mod test } #[test] - fn slices_intersect_false() - { + fn slices_intersect_false() { assert!(!slices_intersect( &Dim([4, 5]), s![..;2, ..], diff --git a/src/dimension/ndindex.rs b/src/dimension/ndindex.rs index e27e68c99..5792f1a22 100644 --- a/src/dimension/ndindex.rs +++ b/src/dimension/ndindex.rs @@ -17,8 +17,7 @@ use crate::{Dim, Dimension, IntoDimension, Ix, Ix0, Ix1, Ix2, Ix3, Ix4, Ix5, Ix6 /// assert_eq!(a[(1, 1)], 4); /// ``` #[allow(clippy::missing_safety_doc)] // TODO: Add doc -pub unsafe trait NdIndex: Debug -{ +pub unsafe trait NdIndex: Debug { #[doc(hidden)] fn index_checked(&self, dim: &E, strides: &E) -> Option; #[doc(hidden)] @@ -28,116 +27,93 @@ pub unsafe trait NdIndex: Debug unsafe impl NdIndex for D where D: Dimension { - fn index_checked(&self, dim: &D, strides: &D) -> Option - { + fn index_checked(&self, dim: &D, strides: &D) -> Option { dim.stride_offset_checked(strides, self) } - fn index_unchecked(&self, strides: &D) -> isize - { + fn index_unchecked(&self, strides: &D) -> isize { D::stride_offset(self, strides) } } -unsafe impl NdIndex for () -{ +unsafe impl NdIndex for () { #[inline] - fn index_checked(&self, dim: &Ix0, strides: &Ix0) -> Option - { + fn index_checked(&self, dim: &Ix0, strides: &Ix0) -> Option { dim.stride_offset_checked(strides, &Ix0()) } #[inline(always)] - fn index_unchecked(&self, _strides: &Ix0) -> isize - { + fn index_unchecked(&self, _strides: &Ix0) -> isize { 0 } } -unsafe impl NdIndex for (Ix, Ix) -{ +unsafe impl NdIndex for (Ix, Ix) { #[inline] - fn index_checked(&self, dim: &Ix2, strides: &Ix2) -> Option - { + fn index_checked(&self, dim: &Ix2, strides: &Ix2) -> Option { dim.stride_offset_checked(strides, &Ix2(self.0, self.1)) } #[inline] - fn index_unchecked(&self, strides: &Ix2) -> isize - { + fn index_unchecked(&self, strides: &Ix2) -> isize { stride_offset(self.0, get!(strides, 0)) + stride_offset(self.1, get!(strides, 1)) } } -unsafe impl NdIndex for (Ix, Ix, Ix) -{ +unsafe impl NdIndex for (Ix, Ix, Ix) { #[inline] - fn index_checked(&self, dim: &Ix3, strides: &Ix3) -> Option - { + fn index_checked(&self, dim: &Ix3, strides: &Ix3) -> Option { dim.stride_offset_checked(strides, &self.into_dimension()) } #[inline] - fn index_unchecked(&self, strides: &Ix3) -> isize - { + fn index_unchecked(&self, strides: &Ix3) -> isize { stride_offset(self.0, get!(strides, 0)) + stride_offset(self.1, get!(strides, 1)) + stride_offset(self.2, get!(strides, 2)) } } -unsafe impl NdIndex for (Ix, Ix, Ix, Ix) -{ +unsafe impl NdIndex for (Ix, Ix, Ix, Ix) { #[inline] - fn index_checked(&self, dim: &Ix4, strides: &Ix4) -> Option - { + fn index_checked(&self, dim: &Ix4, strides: &Ix4) -> Option { dim.stride_offset_checked(strides, &self.into_dimension()) } #[inline] - fn index_unchecked(&self, strides: &Ix4) -> isize - { + fn index_unchecked(&self, strides: &Ix4) -> isize { zip(strides.ix(), self.into_dimension().ix()) .map(|(&s, &i)| stride_offset(i, s)) .sum() } } -unsafe impl NdIndex for (Ix, Ix, Ix, Ix, Ix) -{ +unsafe impl NdIndex for (Ix, Ix, Ix, Ix, Ix) { #[inline] - fn index_checked(&self, dim: &Ix5, strides: &Ix5) -> Option - { + fn index_checked(&self, dim: &Ix5, strides: &Ix5) -> Option { dim.stride_offset_checked(strides, &self.into_dimension()) } #[inline] - fn index_unchecked(&self, strides: &Ix5) -> isize - { + fn index_unchecked(&self, strides: &Ix5) -> isize { zip(strides.ix(), self.into_dimension().ix()) .map(|(&s, &i)| stride_offset(i, s)) .sum() } } -unsafe impl NdIndex for Ix -{ +unsafe impl NdIndex for Ix { #[inline] - fn index_checked(&self, dim: &Ix1, strides: &Ix1) -> Option - { + fn index_checked(&self, dim: &Ix1, strides: &Ix1) -> Option { dim.stride_offset_checked(strides, &Ix1(*self)) } #[inline(always)] - fn index_unchecked(&self, strides: &Ix1) -> isize - { + fn index_unchecked(&self, strides: &Ix1) -> isize { stride_offset(*self, get!(strides, 0)) } } -unsafe impl NdIndex for Ix -{ +unsafe impl NdIndex for Ix { #[inline] - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option - { + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { debug_assert_eq!(dim.ndim(), 1); stride_offset_checked(dim.ix(), strides.ix(), &[*self]) } #[inline(always)] - fn index_unchecked(&self, strides: &IxDyn) -> isize - { + fn index_unchecked(&self, strides: &IxDyn) -> isize { debug_assert_eq!(strides.ndim(), 1); stride_offset(*self, get!(strides, 0)) } @@ -176,11 +152,9 @@ ndindex_with_array! { } // implement NdIndex for Dim<[Ix; 2]> and so on -unsafe impl NdIndex for Dim<[Ix; N]> -{ +unsafe impl NdIndex for Dim<[Ix; N]> { #[inline] - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option - { + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { debug_assert_eq!( strides.ndim(), N, @@ -192,8 +166,7 @@ unsafe impl NdIndex for Dim<[Ix; N]> } #[inline] - fn index_unchecked(&self, strides: &IxDyn) -> isize - { + fn index_unchecked(&self, strides: &IxDyn) -> isize { debug_assert_eq!( strides.ndim(), N, @@ -208,11 +181,9 @@ unsafe impl NdIndex for Dim<[Ix; N]> } // implement NdIndex for [Ix; 2] and so on -unsafe impl NdIndex for [Ix; N] -{ +unsafe impl NdIndex for [Ix; N] { #[inline] - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option - { + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { debug_assert_eq!( strides.ndim(), N, @@ -224,8 +195,7 @@ unsafe impl NdIndex for [Ix; N] } #[inline] - fn index_unchecked(&self, strides: &IxDyn) -> isize - { + fn index_unchecked(&self, strides: &IxDyn) -> isize { debug_assert_eq!( strides.ndim(), N, @@ -239,35 +209,27 @@ unsafe impl NdIndex for [Ix; N] } } -impl<'a> IntoDimension for &'a [Ix] -{ +impl<'a> IntoDimension for &'a [Ix] { type Dim = IxDyn; - fn into_dimension(self) -> Self::Dim - { + fn into_dimension(self) -> Self::Dim { Dim(IxDynImpl::from(self)) } } -unsafe impl<'a> NdIndex for &'a IxDyn -{ - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option - { +unsafe impl<'a> NdIndex for &'a IxDyn { + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { (**self).index_checked(dim, strides) } - fn index_unchecked(&self, strides: &IxDyn) -> isize - { + fn index_unchecked(&self, strides: &IxDyn) -> isize { (**self).index_unchecked(strides) } } -unsafe impl<'a> NdIndex for &'a [Ix] -{ - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option - { +unsafe impl<'a> NdIndex for &'a [Ix] { + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { stride_offset_checked(dim.ix(), strides.ix(), self) } - fn index_unchecked(&self, strides: &IxDyn) -> isize - { + fn index_unchecked(&self, strides: &IxDyn) -> isize { zip(strides.ix(), *self) .map(|(&s, &i)| stride_offset(i, s)) .sum() diff --git a/src/dimension/ops.rs b/src/dimension/ops.rs index 1365ab488..dd23216f6 100644 --- a/src/dimension/ops.rs +++ b/src/dimension/ops.rs @@ -1,8 +1,7 @@ use crate::imp_prelude::*; /// Adds the two dimensions at compile time. -pub trait DimAdd -{ +pub trait DimAdd { /// The sum of the two dimensions. type Output: Dimension; } @@ -28,8 +27,7 @@ macro_rules! impl_dimadd_const_out_dyn { }; } -impl DimAdd for Ix0 -{ +impl DimAdd for Ix0 { type Output = D; } @@ -87,7 +85,6 @@ impl_dimadd_const_out_dyn!(6, 5); impl_dimadd_const_out_dyn!(6, 6); impl_dimadd_const_out_dyn!(6, IxDyn); -impl DimAdd for IxDyn -{ +impl DimAdd for IxDyn { type Output = IxDyn; } diff --git a/src/dimension/remove_axis.rs b/src/dimension/remove_axis.rs index cbb039fc5..da366ae17 100644 --- a/src/dimension/remove_axis.rs +++ b/src/dimension/remove_axis.rs @@ -12,26 +12,21 @@ use crate::{Axis, Dim, Dimension, Ix, Ix0, Ix1}; /// /// `RemoveAxis` defines a larger-than relation for array shapes: /// removing one axis from *Self* gives smaller dimension *Smaller*. -pub trait RemoveAxis: Dimension -{ +pub trait RemoveAxis: Dimension { fn remove_axis(&self, axis: Axis) -> Self::Smaller; } -impl RemoveAxis for Dim<[Ix; 1]> -{ +impl RemoveAxis for Dim<[Ix; 1]> { #[inline] - fn remove_axis(&self, axis: Axis) -> Ix0 - { + fn remove_axis(&self, axis: Axis) -> Ix0 { debug_assert!(axis.index() < self.ndim()); Ix0() } } -impl RemoveAxis for Dim<[Ix; 2]> -{ +impl RemoveAxis for Dim<[Ix; 2]> { #[inline] - fn remove_axis(&self, axis: Axis) -> Ix1 - { + fn remove_axis(&self, axis: Axis) -> Ix1 { let axis = axis.index(); debug_assert!(axis < self.ndim()); if axis == 0 { diff --git a/src/dimension/reshape.rs b/src/dimension/reshape.rs index 52d9e719a..99ab66d8f 100644 --- a/src/dimension/reshape.rs +++ b/src/dimension/reshape.rs @@ -146,8 +146,7 @@ where #[cfg(feature = "std")] #[test] -fn test_reshape() -{ +fn test_reshape() { use crate::Dim; macro_rules! test_reshape { diff --git a/src/dimension/sequence.rs b/src/dimension/sequence.rs index ed3605d57..c407ece51 100644 --- a/src/dimension/sequence.rs +++ b/src/dimension/sequence.rs @@ -12,8 +12,7 @@ where D: Dimension type Output = usize; #[inline] - fn index(&self, index: usize) -> &usize - { + fn index(&self, index: usize) -> &usize { &self.0[index] } } @@ -24,8 +23,7 @@ where D: Dimension type Output = usize; #[inline] - fn index(&self, index: usize) -> &usize - { + fn index(&self, index: usize) -> &usize { &self.0[index] } } @@ -34,8 +32,7 @@ impl IndexMut for Forward<&mut D> where D: Dimension { #[inline] - fn index_mut(&mut self, index: usize) -> &mut usize - { + fn index_mut(&mut self, index: usize) -> &mut usize { &mut self.0[index] } } @@ -46,8 +43,7 @@ where D: Dimension type Output = usize; #[inline] - fn index(&self, index: usize) -> &usize - { + fn index(&self, index: usize) -> &usize { &self.0[self.len() - index - 1] } } @@ -58,8 +54,7 @@ where D: Dimension type Output = usize; #[inline] - fn index(&self, index: usize) -> &usize - { + fn index(&self, index: usize) -> &usize { &self.0[self.len() - index - 1] } } @@ -68,16 +63,14 @@ impl IndexMut for Reverse<&mut D> where D: Dimension { #[inline] - fn index_mut(&mut self, index: usize) -> &mut usize - { + fn index_mut(&mut self, index: usize) -> &mut usize { let len = self.len(); &mut self.0[len - index - 1] } } /// Indexable sequence with length -pub(in crate::dimension) trait Sequence: Index -{ +pub(in crate::dimension) trait Sequence: Index { fn len(&self) -> usize; } @@ -88,8 +81,7 @@ impl Sequence for Forward<&D> where D: Dimension { #[inline] - fn len(&self) -> usize - { + fn len(&self) -> usize { self.0.ndim() } } @@ -98,8 +90,7 @@ impl Sequence for Forward<&mut D> where D: Dimension { #[inline] - fn len(&self) -> usize - { + fn len(&self) -> usize { self.0.ndim() } } @@ -110,8 +101,7 @@ impl Sequence for Reverse<&D> where D: Dimension { #[inline] - fn len(&self) -> usize - { + fn len(&self) -> usize { self.0.ndim() } } @@ -120,8 +110,7 @@ impl Sequence for Reverse<&mut D> where D: Dimension { #[inline] - fn len(&self) -> usize - { + fn len(&self) -> usize { self.0.ndim() } } diff --git a/src/error.rs b/src/error.rs index eb7395ad8..c45496142 100644 --- a/src/error.rs +++ b/src/error.rs @@ -12,24 +12,20 @@ use std::fmt; /// An error related to array shape or layout. #[derive(Clone)] -pub struct ShapeError -{ +pub struct ShapeError { // we want to be able to change this representation later repr: ErrorKind, } -impl ShapeError -{ +impl ShapeError { /// Return the `ErrorKind` of this error. #[inline] - pub fn kind(&self) -> ErrorKind - { + pub fn kind(&self) -> ErrorKind { self.repr } /// Create a new `ShapeError` - pub fn from_kind(error: ErrorKind) -> Self - { + pub fn from_kind(error: ErrorKind) -> Self { from_kind(error) } } @@ -40,8 +36,7 @@ impl ShapeError /// is not guaranteed. #[non_exhaustive] #[derive(Copy, Clone, Debug)] -pub enum ErrorKind -{ +pub enum ErrorKind { /// incompatible shape IncompatibleShape = 1, /// incompatible memory layout @@ -57,25 +52,20 @@ pub enum ErrorKind } #[inline(always)] -pub fn from_kind(k: ErrorKind) -> ShapeError -{ +pub fn from_kind(k: ErrorKind) -> ShapeError { ShapeError { repr: k } } -impl PartialEq for ErrorKind -{ +impl PartialEq for ErrorKind { #[inline(always)] - fn eq(&self, rhs: &Self) -> bool - { + fn eq(&self, rhs: &Self) -> bool { *self as u8 == *rhs as u8 } } -impl PartialEq for ShapeError -{ +impl PartialEq for ShapeError { #[inline(always)] - fn eq(&self, rhs: &Self) -> bool - { + fn eq(&self, rhs: &Self) -> bool { self.repr == rhs.repr } } @@ -83,10 +73,8 @@ impl PartialEq for ShapeError #[cfg(feature = "std")] impl Error for ShapeError {} -impl fmt::Display for ShapeError -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result - { +impl fmt::Display for ShapeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let description = match self.kind() { ErrorKind::IncompatibleShape => "incompatible shapes", ErrorKind::IncompatibleLayout => "incompatible memory layout", @@ -99,10 +87,8 @@ impl fmt::Display for ShapeError } } -impl fmt::Debug for ShapeError -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result - { +impl fmt::Debug for ShapeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self) } } diff --git a/src/extension/nonnull.rs b/src/extension/nonnull.rs index 08f80927e..043abee13 100644 --- a/src/extension/nonnull.rs +++ b/src/extension/nonnull.rs @@ -3,8 +3,7 @@ use alloc::vec::Vec; use std::ptr::NonNull; /// Return a NonNull pointer to the vector's data -pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull -{ +pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull { // this pointer is guaranteed to be non-null unsafe { NonNull::new_unchecked(v.as_mut_ptr()) } } @@ -15,8 +14,7 @@ pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull /// This is checked with a debug assertion, and will panic if this is not true, /// but treat this as an unconditional conversion. #[inline] -pub(crate) unsafe fn nonnull_debug_checked_from_ptr(ptr: *mut T) -> NonNull -{ +pub(crate) unsafe fn nonnull_debug_checked_from_ptr(ptr: *mut T) -> NonNull { debug_assert!(!ptr.is_null()); NonNull::new_unchecked(ptr) } diff --git a/src/free_functions.rs b/src/free_functions.rs index 3adf2d8f3..971e89ee2 100644 --- a/src/free_functions.rs +++ b/src/free_functions.rs @@ -51,26 +51,22 @@ macro_rules! array { } /// Create a zero-dimensional array with the element `x`. -pub fn arr0(x: A) -> Array0 -{ +pub fn arr0(x: A) -> Array0 { unsafe { ArrayBase::from_shape_vec_unchecked((), vec![x]) } } /// Create a one-dimensional array with elements from `xs`. -pub fn arr1(xs: &[A]) -> Array1 -{ +pub fn arr1(xs: &[A]) -> Array1 { ArrayBase::from(xs.to_vec()) } /// Create a one-dimensional array with elements from `xs`. -pub fn rcarr1(xs: &[A]) -> ArcArray1 -{ +pub fn rcarr1(xs: &[A]) -> ArcArray1 { arr1(xs).into_shared() } /// Create a zero-dimensional array view borrowing `x`. -pub const fn aview0(x: &A) -> ArrayView0<'_, A> -{ +pub const fn aview0(x: &A) -> ArrayView0<'_, A> { ArrayBase { data: ViewRepr::new(), // Safe because references are always non-null. @@ -101,8 +97,7 @@ pub const fn aview0(x: &A) -> ArrayView0<'_, A> /// /// assert_eq!(C.sum(), 6.); /// ``` -pub const fn aview1(xs: &[A]) -> ArrayView1<'_, A> -{ +pub const fn aview1(xs: &[A]) -> ArrayView1<'_, A> { if size_of::() == 0 { assert!( xs.len() <= isize::MAX as usize, @@ -136,8 +131,7 @@ pub const fn aview1(xs: &[A]) -> ArrayView1<'_, A> /// const C: ArrayView2<'static, f64> = aview2(&[[1., 2., 3.], [4., 5., 6.]]); /// assert_eq!(C.sum(), 21.); /// ``` -pub const fn aview2(xs: &[[A; N]]) -> ArrayView2<'_, A> -{ +pub const fn aview2(xs: &[[A; N]]) -> ArrayView2<'_, A> { let cols = N; let rows = xs.len(); if size_of::() == 0 { @@ -185,8 +179,7 @@ pub const fn aview2(xs: &[[A; N]]) -> ArrayView2<'_, A> /// } /// assert_eq!(&data[..10], [5, 0, 0, 5, 0, 0, 5, 0, 0, 5]); /// ``` -pub fn aview_mut1(xs: &mut [A]) -> ArrayViewMut1<'_, A> -{ +pub fn aview_mut1(xs: &mut [A]) -> ArrayViewMut1<'_, A> { ArrayViewMut::from(xs) } @@ -212,8 +205,7 @@ pub fn aview_mut1(xs: &mut [A]) -> ArrayViewMut1<'_, A> /// // look at the start of the result /// assert_eq!(&data[..3], [[1., -1.], [1., -1.], [1., -1.]]); /// ``` -pub fn aview_mut2(xs: &mut [[A; N]]) -> ArrayViewMut2<'_, A> -{ +pub fn aview_mut2(xs: &mut [[A; N]]) -> ArrayViewMut2<'_, A> { ArrayViewMut2::from(xs) } @@ -228,18 +220,15 @@ pub fn aview_mut2(xs: &mut [[A; N]]) -> ArrayViewMut2<'_, A> /// a.shape() == [2, 3] /// ); /// ``` -pub fn arr2(xs: &[[A; N]]) -> Array2 -{ +pub fn arr2(xs: &[[A; N]]) -> Array2 { Array2::from(xs.to_vec()) } -impl From> for Array2 -{ +impl From> for Array2 { /// Converts the `Vec` of arrays to an owned 2-D array. /// /// **Panics** if the product of non-zero axis lengths overflows `isize`. - fn from(mut xs: Vec<[A; N]>) -> Self - { + fn from(mut xs: Vec<[A; N]>) -> Self { let dim = Ix2(xs.len(), N); let ptr = xs.as_mut_ptr(); let cap = xs.capacity(); @@ -262,13 +251,11 @@ impl From> for Array2 } } -impl From> for Array3 -{ +impl From> for Array3 { /// Converts the `Vec` of arrays to an owned 3-D array. /// /// **Panics** if the product of non-zero axis lengths overflows `isize`. - fn from(mut xs: Vec<[[A; M]; N]>) -> Self - { + fn from(mut xs: Vec<[[A; M]; N]>) -> Self { let dim = Ix3(xs.len(), N, M); let ptr = xs.as_mut_ptr(); let cap = xs.capacity(); @@ -293,8 +280,7 @@ impl From> for Array3 /// Create a two-dimensional array with elements from `xs`. /// -pub fn rcarr2(xs: &[[A; N]]) -> ArcArray2 -{ +pub fn rcarr2(xs: &[[A; N]]) -> ArcArray2 { arr2(xs).into_shared() } @@ -315,13 +301,11 @@ pub fn rcarr2(xs: &[[A; N]]) -> ArcArray2 /// a.shape() == [3, 2, 2] /// ); /// ``` -pub fn arr3(xs: &[[[A; M]; N]]) -> Array3 -{ +pub fn arr3(xs: &[[[A; M]; N]]) -> Array3 { Array3::from(xs.to_vec()) } /// Create a three-dimensional array with elements from `xs`. -pub fn rcarr3(xs: &[[[A; M]; N]]) -> ArcArray -{ +pub fn rcarr3(xs: &[[[A; M]; N]]) -> ArcArray { arr3(xs).into_shared() } diff --git a/src/geomspace.rs b/src/geomspace.rs index 0ac91f529..23ee073c3 100644 --- a/src/geomspace.rs +++ b/src/geomspace.rs @@ -11,8 +11,7 @@ use num_traits::Float; /// An iterator of a sequence of geometrically spaced floats. /// /// Iterator element type is `F`. -pub struct Geomspace -{ +pub struct Geomspace { sign: F, start: F, step: F, @@ -26,8 +25,7 @@ where F: Float type Item = F; #[inline] - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { if self.index >= self.len { None } else { @@ -40,8 +38,7 @@ where F: Float } #[inline] - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { let n = self.len - self.index; (n, Some(n)) } @@ -51,8 +48,7 @@ impl DoubleEndedIterator for Geomspace where F: Float { #[inline] - fn next_back(&mut self) -> Option - { + fn next_back(&mut self) -> Option { if self.index >= self.len { None } else { @@ -82,8 +78,7 @@ impl ExactSizeIterator for Geomspace where Geomspace: Iterator {} /// **Panics** if converting `n - 1` to type `F` fails. #[inline] pub fn geomspace(a: F, b: F, n: usize) -> Option> -where F: Float -{ +where F: Float { if a == F::zero() || b == F::zero() || a.is_sign_negative() != b.is_sign_negative() { return None; } @@ -105,14 +100,12 @@ where F: Float } #[cfg(test)] -mod tests -{ +mod tests { use super::geomspace; #[test] #[cfg(feature = "approx")] - fn valid() - { + fn valid() { use crate::{arr1, Array1}; use approx::assert_abs_diff_eq; @@ -130,8 +123,7 @@ mod tests } #[test] - fn iter_forward() - { + fn iter_forward() { let mut iter = geomspace(1.0f64, 1e3, 4).unwrap(); assert!(iter.size_hint() == (4, Some(4))); @@ -146,8 +138,7 @@ mod tests } #[test] - fn iter_backward() - { + fn iter_backward() { let mut iter = geomspace(1.0f64, 1e3, 4).unwrap(); assert!(iter.size_hint() == (4, Some(4))); @@ -162,20 +153,17 @@ mod tests } #[test] - fn zero_lower() - { + fn zero_lower() { assert!(geomspace(0.0, 1.0, 4).is_none()); } #[test] - fn zero_upper() - { + fn zero_upper() { assert!(geomspace(1.0, 0.0, 4).is_none()); } #[test] - fn zero_included() - { + fn zero_included() { assert!(geomspace(-1.0, 1.0, 4).is_none()); } } diff --git a/src/impl_1d.rs b/src/impl_1d.rs index e49fdd731..18ad72869 100644 --- a/src/impl_1d.rs +++ b/src/impl_1d.rs @@ -34,8 +34,7 @@ where S: RawData /// Rotate the elements of the array by 1 element towards the front; /// the former first element becomes the last. pub(crate) fn rotate1_front(&mut self) - where S: DataMut - { + where S: DataMut { // use swapping to keep all elements initialized (as required by owned storage) let mut lane_iter = self.iter_mut(); let mut dst = if let Some(dst) = lane_iter.next() { dst } else { return }; diff --git a/src/impl_2d.rs b/src/impl_2d.rs index c2e9725ac..8f5c96eea 100644 --- a/src/impl_2d.rs +++ b/src/impl_2d.rs @@ -24,8 +24,7 @@ where S: RawData /// ``` #[track_caller] pub fn row(&self, index: Ix) -> ArrayView1<'_, A> - where S: Data - { + where S: Data { self.index_axis(Axis(0), index) } @@ -41,8 +40,7 @@ where S: RawData /// ``` #[track_caller] pub fn row_mut(&mut self, index: Ix) -> ArrayViewMut1<'_, A> - where S: DataMut - { + where S: DataMut { self.index_axis_mut(Axis(0), index) } @@ -63,8 +61,7 @@ where S: RawData /// // get length of any particular axis with .len_of() /// assert_eq!(m, array.len_of(Axis(0))); /// ``` - pub fn nrows(&self) -> usize - { + pub fn nrows(&self) -> usize { self.len_of(Axis(0)) } @@ -79,8 +76,7 @@ where S: RawData /// ``` #[track_caller] pub fn column(&self, index: Ix) -> ArrayView1<'_, A> - where S: Data - { + where S: Data { self.index_axis(Axis(1), index) } @@ -96,8 +92,7 @@ where S: RawData /// ``` #[track_caller] pub fn column_mut(&mut self, index: Ix) -> ArrayViewMut1<'_, A> - where S: DataMut - { + where S: DataMut { self.index_axis_mut(Axis(1), index) } @@ -118,8 +113,7 @@ where S: RawData /// // get length of any particular axis with .len_of() /// assert_eq!(n, array.len_of(Axis(1))); /// ``` - pub fn ncols(&self) -> usize - { + pub fn ncols(&self) -> usize { self.len_of(Axis(1)) } @@ -138,8 +132,7 @@ where S: RawData /// let array = array![[1., 2., 5.], [3., 4., 6.]]; /// assert!(!array.is_square()); /// ``` - pub fn is_square(&self) -> bool - { + pub fn is_square(&self) -> bool { let (m, n) = self.dim(); m == n } diff --git a/src/impl_clone.rs b/src/impl_clone.rs index d65f6c338..e2e111a12 100644 --- a/src/impl_clone.rs +++ b/src/impl_clone.rs @@ -9,10 +9,8 @@ use crate::imp_prelude::*; use crate::RawDataClone; -impl Clone for ArrayBase -{ - fn clone(&self) -> ArrayBase - { +impl Clone for ArrayBase { + fn clone(&self) -> ArrayBase { // safe because `clone_with_ptr` promises to provide equivalent data and ptr unsafe { let (data, ptr) = self.data.clone_with_ptr(self.ptr); @@ -28,8 +26,7 @@ impl Clone for ArrayBase /// `Array` implements `.clone_from()` to reuse an array's existing /// allocation. Semantically equivalent to `*self = other.clone()`, but /// potentially more efficient. - fn clone_from(&mut self, other: &Self) - { + fn clone_from(&mut self, other: &Self) { unsafe { self.ptr = self.data.clone_from_with_ptr(&other.data, other.ptr); self.dim.clone_from(&other.dim); diff --git a/src/impl_constructors.rs b/src/impl_constructors.rs index e5f19a837..4229666b3 100644 --- a/src/impl_constructors.rs +++ b/src/impl_constructors.rs @@ -54,8 +54,7 @@ where S: DataOwned /// /// let array = Array::from_vec(vec![1., 2., 3., 4.]); /// ``` - pub fn from_vec(v: Vec) -> Self - { + pub fn from_vec(v: Vec) -> Self { if mem::size_of::() == 0 { assert!( v.len() <= isize::MAX as usize, @@ -75,8 +74,7 @@ where S: DataOwned /// let array = Array::from_iter(0..10); /// ``` #[allow(clippy::should_implement_trait)] - pub fn from_iter>(iterable: I) -> Self - { + pub fn from_iter>(iterable: I) -> Self { Self::from_vec(iterable.into_iter().collect()) } @@ -99,8 +97,7 @@ where S: DataOwned /// ``` #[cfg(feature = "std")] pub fn linspace(start: A, end: A, n: usize) -> Self - where A: Float - { + where A: Float { Self::from(to_vec(linspace::linspace(start, end, n))) } @@ -117,8 +114,7 @@ where S: DataOwned /// ``` #[cfg(feature = "std")] pub fn range(start: A, end: A, step: A) -> Self - where A: Float - { + where A: Float { Self::from(to_vec(linspace::range(start, end, step))) } @@ -145,8 +141,7 @@ where S: DataOwned /// ``` #[cfg(feature = "std")] pub fn logspace(base: A, start: A, end: A, n: usize) -> Self - where A: Float - { + where A: Float { Self::from(to_vec(logspace::logspace(base, start, end, n))) } @@ -179,8 +174,7 @@ where S: DataOwned /// ``` #[cfg(feature = "std")] pub fn geomspace(start: A, end: A, n: usize) -> Option - where A: Float - { + where A: Float { Some(Self::from(to_vec(geomspace::geomspace(start, end, n)?))) } } @@ -455,14 +449,12 @@ where /// ); /// ``` pub fn from_shape_vec(shape: Sh, v: Vec) -> Result - where Sh: Into> - { + where Sh: Into> { // eliminate the type parameter Sh as soon as possible Self::from_shape_vec_impl(shape.into(), v) } - fn from_shape_vec_impl(shape: StrideShape, v: Vec) -> Result - { + fn from_shape_vec_impl(shape: StrideShape, v: Vec) -> Result { let dim = shape.dim; let is_custom = shape.strides.is_custom(); dimension::can_index_slice_with_strides(&v, &dim, &shape.strides)?; @@ -498,16 +490,14 @@ where /// 5. The strides must not allow any element to be referenced by two different /// indices. pub unsafe fn from_shape_vec_unchecked(shape: Sh, v: Vec) -> Self - where Sh: Into> - { + where Sh: Into> { let shape = shape.into(); let dim = shape.dim; let strides = shape.strides.strides_for_dim(&dim); Self::from_vec_dim_stride_unchecked(dim, strides, v) } - unsafe fn from_vec_dim_stride_unchecked(dim: D, strides: D, mut v: Vec) -> Self - { + unsafe fn from_vec_dim_stride_unchecked(dim: D, strides: D, mut v: Vec) -> Self { // debug check for issues that indicates wrong use of this constructor debug_assert!(dimension::can_index_slice(&v, &dim, &strides).is_ok()); @@ -580,8 +570,7 @@ where /// # let _ = shift_by_two; /// ``` pub fn uninit(shape: Sh) -> ArrayBase - where Sh: ShapeBuilder - { + where Sh: ShapeBuilder { unsafe { let shape = shape.into_shape_with_order(); let size = size_of_shape_checked_unwrap!(&shape.dim); @@ -676,8 +665,7 @@ where /// This method has been renamed to `uninit` #[deprecated(note = "Renamed to `uninit`", since = "0.15.0")] pub fn maybe_uninit(shape: Sh) -> Self - where Sh: ShapeBuilder - { + where Sh: ShapeBuilder { unsafe { let shape = shape.into_shape_with_order(); let size = size_of_shape_checked_unwrap!(&shape.dim); diff --git a/src/impl_cow.rs b/src/impl_cow.rs index f064ce7bd..a04b04b7f 100644 --- a/src/impl_cow.rs +++ b/src/impl_cow.rs @@ -15,14 +15,12 @@ impl<'a, A, D> CowArray<'a, A, D> where D: Dimension { /// Returns `true` iff the array is the view (borrowed) variant. - pub fn is_view(&self) -> bool - { + pub fn is_view(&self) -> bool { self.data.is_view() } /// Returns `true` iff the array is the owned variant. - pub fn is_owned(&self) -> bool - { + pub fn is_owned(&self) -> bool { self.data.is_owned() } } @@ -30,8 +28,7 @@ where D: Dimension impl<'a, A, D> From> for CowArray<'a, A, D> where D: Dimension { - fn from(view: ArrayView<'a, A, D>) -> CowArray<'a, A, D> - { + fn from(view: ArrayView<'a, A, D>) -> CowArray<'a, A, D> { // safe because equivalent data unsafe { ArrayBase::from_data_ptr(CowRepr::View(view.data), view.ptr).with_strides_dim(view.strides, view.dim) } } @@ -40,8 +37,7 @@ where D: Dimension impl<'a, A, D> From> for CowArray<'a, A, D> where D: Dimension { - fn from(array: Array) -> CowArray<'a, A, D> - { + fn from(array: Array) -> CowArray<'a, A, D> { // safe because equivalent data unsafe { ArrayBase::from_data_ptr(CowRepr::Owned(array.data), array.ptr).with_strides_dim(array.strides, array.dim) @@ -63,8 +59,7 @@ where Slice: AsRef<[A]> /// assert!(array.is_view()); /// assert_eq!(array, array![1., 2., 3., 4.]); /// ``` - fn from(slice: &'a Slice) -> Self - { + fn from(slice: &'a Slice) -> Self { Self::from(ArrayView1::from(slice)) } } @@ -75,8 +70,7 @@ where D: Dimension, { /// Create a read-only clone-on-write view of the array. - fn from(array: &'a ArrayBase) -> Self - { + fn from(array: &'a ArrayBase) -> Self { Self::from(array.view()) } } diff --git a/src/impl_dyn.rs b/src/impl_dyn.rs index 836234cec..4dc911a7e 100644 --- a/src/impl_dyn.rs +++ b/src/impl_dyn.rs @@ -29,8 +29,7 @@ where S: Data /// assert_eq!(a.shape(), &[2, 1, 3]); /// ``` #[track_caller] - pub fn insert_axis_inplace(&mut self, axis: Axis) - { + pub fn insert_axis_inplace(&mut self, axis: Axis) { assert!(axis.index() <= self.ndim()); self.dim = self.dim.insert_axis(axis); self.strides = self.strides.insert_axis(axis); @@ -52,8 +51,7 @@ where S: Data /// assert_eq!(a.shape(), &[2]); /// ``` #[track_caller] - pub fn index_axis_inplace(&mut self, axis: Axis, index: usize) - { + pub fn index_axis_inplace(&mut self, axis: Axis, index: usize) { self.collapse_axis(axis, index); self.dim = self.dim.remove_axis(axis); self.strides = self.strides.remove_axis(axis); diff --git a/src/impl_internal_constructors.rs b/src/impl_internal_constructors.rs index ebb2e26e0..ad0462040 100644 --- a/src/impl_internal_constructors.rs +++ b/src/impl_internal_constructors.rs @@ -22,8 +22,7 @@ where S: RawData /// The caller must ensure that the data storage and pointer is valid. /// /// See ArrayView::from_shape_ptr for general pointer validity documentation. - pub(crate) unsafe fn from_data_ptr(data: S, ptr: NonNull) -> Self - { + pub(crate) unsafe fn from_data_ptr(data: S, ptr: NonNull) -> Self { let array = ArrayBase { data, ptr, @@ -51,8 +50,7 @@ where /// The caller needs to ensure that the new strides and dimensions are correct /// for the array data. pub(crate) unsafe fn with_strides_dim(self, strides: E, dim: E) -> ArrayBase - where E: Dimension - { + where E: Dimension { debug_assert_eq!(strides.ndim(), dim.ndim()); ArrayBase { data: self.data, diff --git a/src/impl_methods.rs b/src/impl_methods.rs index d1250ec28..e9f9a01ec 100644 --- a/src/impl_methods.rs +++ b/src/impl_methods.rs @@ -16,6 +16,7 @@ use std::mem::{size_of, ManuallyDrop}; use crate::imp_prelude::*; use crate::argument_traits::AssignElem; +use crate::data_repr::{Device, OwnedRepr}; use crate::dimension; use crate::dimension::broadcast::co_broadcast; use crate::dimension::reshape_dim; @@ -66,8 +67,7 @@ where D: Dimension, { /// Return the total number of elements in the array. - pub fn len(&self) -> usize - { + pub fn len(&self) -> usize { self.dim.size() } @@ -78,28 +78,24 @@ where /// /// ***Panics*** if the axis is out of bounds. #[track_caller] - pub fn len_of(&self, axis: Axis) -> usize - { + pub fn len_of(&self, axis: Axis) -> usize { self.dim[axis.index()] } /// Return whether the array has any elements - pub fn is_empty(&self) -> bool - { + pub fn is_empty(&self) -> bool { self.len() == 0 } /// Return the number of dimensions (axes) in the array - pub fn ndim(&self) -> usize - { + pub fn ndim(&self) -> usize { self.dim.ndim() } /// Return the shape of the array in its “pattern” form, /// an integer in the one-dimensional case, tuple in the n-dimensional cases /// and so on. - pub fn dim(&self) -> D::Pattern - { + pub fn dim(&self) -> D::Pattern { self.dim.clone().into_pattern() } @@ -117,8 +113,7 @@ where /// // Create an array of zeros that's the same shape and dimensionality as `a`. /// let b = Array::::zeros(a.raw_dim()); /// ``` - pub fn raw_dim(&self) -> D - { + pub fn raw_dim(&self) -> D { self.dim.clone() } @@ -146,14 +141,12 @@ where /// let c = Array::zeros(a.raw_dim()); /// assert_eq!(a, c); /// ``` - pub fn shape(&self) -> &[usize] - { + pub fn shape(&self) -> &[usize] { self.dim.slice() } /// Return the strides of the array as a slice. - pub fn strides(&self) -> &[isize] - { + pub fn strides(&self) -> &[isize] { let s = self.strides.slice(); // reinterpret unsigned integer as signed unsafe { slice::from_raw_parts(s.as_ptr() as *const _, s.len()) } @@ -166,24 +159,21 @@ where /// /// ***Panics*** if the axis is out of bounds. #[track_caller] - pub fn stride_of(&self, axis: Axis) -> isize - { + pub fn stride_of(&self, axis: Axis) -> isize { // strides are reinterpreted as isize self.strides[axis.index()] as isize } /// Return a read-only view of the array pub fn view(&self) -> ArrayView<'_, A, D> - where S: Data - { + where S: Data { debug_assert!(self.pointer_is_inbounds()); unsafe { ArrayView::new(self.ptr, self.dim.clone(), self.strides.clone()) } } /// Return a read-write view of the array pub fn view_mut(&mut self) -> ArrayViewMut<'_, A, D> - where S: DataMut - { + where S: DataMut { self.ensure_unique(); unsafe { ArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) } } @@ -196,8 +186,7 @@ where /// The view acts "as if" the elements are temporarily in cells, and elements /// can be changed through shared references using the regular cell methods. pub fn cell_view(&mut self) -> ArrayView<'_, MathCell, D> - where S: DataMut - { + where S: DataMut { self.view_mut().into_cell_view() } @@ -285,16 +274,14 @@ where /// assert_eq!(unique, array![[1., 2.], [3., 4.]]); /// ``` pub fn try_into_owned_nocopy(self) -> Result, Self> - where S: Data - { + where S: Data { S::try_into_owned_nocopy(self) } /// Turn the array into a shared ownership (copy on write) array, /// without any copying. pub fn into_shared(self) -> ArcArray - where S: DataOwned - { + where S: DataOwned { let data = self.data.into_shared(); // safe because: equivalent unmoved data, ptr and dims remain valid unsafe { ArrayBase::from_data_ptr(data, self.ptr).with_strides_dim(self.strides, self.dim) } @@ -316,8 +303,7 @@ where /// assert_eq!(b.first(), None); /// ``` pub fn first(&self) -> Option<&A> - where S: Data - { + where S: Data { if self.is_empty() { None } else { @@ -341,8 +327,7 @@ where /// assert_eq!(b.first_mut(), None); /// ``` pub fn first_mut(&mut self) -> Option<&mut A> - where S: DataMut - { + where S: DataMut { if self.is_empty() { None } else { @@ -366,8 +351,7 @@ where /// assert_eq!(b.last(), None); /// ``` pub fn last(&self) -> Option<&A> - where S: Data - { + where S: Data { if self.is_empty() { None } else { @@ -395,8 +379,7 @@ where /// assert_eq!(b.last_mut(), None); /// ``` pub fn last_mut(&mut self) -> Option<&mut A> - where S: DataMut - { + where S: DataMut { if self.is_empty() { None } else { @@ -415,8 +398,7 @@ where /// /// Iterator element type is `&A`. pub fn iter(&self) -> Iter<'_, A, D> - where S: Data - { + where S: Data { debug_assert!(self.pointer_is_inbounds()); self.view().into_iter_() } @@ -428,8 +410,7 @@ where /// /// Iterator element type is `&mut A`. pub fn iter_mut(&mut self) -> IterMut<'_, A, D> - where S: DataMut - { + where S: DataMut { self.view_mut().into_iter_() } @@ -442,8 +423,7 @@ where /// /// See also [`Zip::indexed`] pub fn indexed_iter(&self) -> IndexedIter<'_, A, D> - where S: Data - { + where S: Data { IndexedIter::new(self.view().into_elements_base()) } @@ -454,8 +434,7 @@ where /// /// Iterator element type is `(D::Pattern, &mut A)`. pub fn indexed_iter_mut(&mut self) -> IndexedIterMut<'_, A, D> - where S: DataMut - { + where S: DataMut { IndexedIterMut::new(self.view_mut().into_elements_base()) } @@ -532,8 +511,7 @@ where /// (**Panics** if `D` is `IxDyn` and `info` does not match the number of array axes.) #[track_caller] pub fn slice_move(mut self, info: I) -> ArrayBase - where I: SliceArg - { + where I: SliceArg { assert_eq!( info.in_ndim(), self.ndim(), @@ -601,8 +579,7 @@ where /// - if `D` is `IxDyn` and `info` does not match the number of array axes #[track_caller] pub fn slice_collapse(&mut self, info: I) - where I: SliceArg - { + where I: SliceArg { assert_eq!( info.in_ndim(), self.ndim(), @@ -631,8 +608,7 @@ where #[track_caller] #[must_use = "slice_axis returns an array view with the sliced result"] pub fn slice_axis(&self, axis: Axis, indices: Slice) -> ArrayView<'_, A, D> - where S: Data - { + where S: Data { let mut view = self.view(); view.slice_axis_inplace(axis, indices); view @@ -645,8 +621,7 @@ where #[track_caller] #[must_use = "slice_axis_mut returns an array view with the sliced result"] pub fn slice_axis_mut(&mut self, axis: Axis, indices: Slice) -> ArrayViewMut<'_, A, D> - where S: DataMut - { + where S: DataMut { let mut view_mut = self.view_mut(); view_mut.slice_axis_inplace(axis, indices); view_mut @@ -657,8 +632,7 @@ where /// **Panics** if an index is out of bounds or step size is zero.
/// **Panics** if `axis` is out of bounds. #[track_caller] - pub fn slice_axis_inplace(&mut self, axis: Axis, indices: Slice) - { + pub fn slice_axis_inplace(&mut self, axis: Axis, indices: Slice) { let offset = do_slice(&mut self.dim.slice_mut()[axis.index()], &mut self.strides.slice_mut()[axis.index()], indices); unsafe { @@ -672,8 +646,7 @@ where /// **Panics** if an index is out of bounds or step size is zero.
/// **Panics** if `axis` is out of bounds. #[must_use = "slice_axis_move returns an array with the sliced result"] - pub fn slice_axis_move(mut self, axis: Axis, indices: Slice) -> Self - { + pub fn slice_axis_move(mut self, axis: Axis, indices: Slice) -> Self { self.slice_axis_inplace(axis, indices); self } @@ -723,8 +696,7 @@ where /// **Panics** if an index is out of bounds or step size is zero. #[track_caller] pub fn slice_each_axis_inplace(&mut self, mut f: F) - where F: FnMut(AxisDescription) -> Slice - { + where F: FnMut(AxisDescription) -> Slice { for ax in 0..self.ndim() { self.slice_axis_inplace( Axis(ax), @@ -777,8 +749,7 @@ where /// assert_eq!(unsafe { *p }, 2.); /// ``` pub fn get_ptr(&self, index: I) -> Option<*const A> - where I: NdIndex - { + where I: NdIndex { let ptr = self.ptr; index .index_checked(&self.dim, &self.strides) @@ -926,8 +897,7 @@ where // `get` for zero-dimensional arrays // panics if dimension is not zero. otherwise an element is always present. fn get_0d(&self) -> &A - where S: Data - { + where S: Data { assert!(self.ndim() == 0); unsafe { &*self.as_ptr() } } @@ -1002,8 +972,7 @@ where /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] pub fn index_axis_move(mut self, axis: Axis, index: usize) -> ArrayBase - where D: RemoveAxis - { + where D: RemoveAxis { self.collapse_axis(axis, index); let dim = self.dim.remove_axis(axis); let strides = self.strides.remove_axis(axis); @@ -1015,8 +984,7 @@ where /// /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] - pub fn collapse_axis(&mut self, axis: Axis, index: usize) - { + pub fn collapse_axis(&mut self, axis: Axis, index: usize) { let offset = dimension::do_collapse_axis(&mut self.dim, &self.strides, axis.index(), index); self.ptr = unsafe { self.ptr.offset(offset) }; debug_assert!(self.pointer_is_inbounds()); @@ -1109,8 +1077,7 @@ where /// } /// ``` pub fn rows(&self) -> Lanes<'_, A, D::Smaller> - where S: Data - { + where S: Data { let mut n = self.ndim(); if n == 0 { n += 1; @@ -1120,8 +1087,7 @@ where #[deprecated(note = "Renamed to .rows()", since = "0.15.0")] pub fn genrows(&self) -> Lanes<'_, A, D::Smaller> - where S: Data - { + where S: Data { self.rows() } @@ -1130,8 +1096,7 @@ where /// /// Iterator element is `ArrayView1
` (1D read-write array view). pub fn rows_mut(&mut self) -> LanesMut<'_, A, D::Smaller> - where S: DataMut - { + where S: DataMut { let mut n = self.ndim(); if n == 0 { n += 1; @@ -1141,8 +1106,7 @@ where #[deprecated(note = "Renamed to .rows_mut()", since = "0.15.0")] pub fn genrows_mut(&mut self) -> LanesMut<'_, A, D::Smaller> - where S: DataMut - { + where S: DataMut { self.rows_mut() } @@ -1173,8 +1137,7 @@ where /// } /// ``` pub fn columns(&self) -> Lanes<'_, A, D::Smaller> - where S: Data - { + where S: Data { Lanes::new(self.view(), Axis(0)) } @@ -1184,8 +1147,7 @@ where /// Renamed to `.columns()` #[deprecated(note = "Renamed to .columns()", since = "0.15.0")] pub fn gencolumns(&self) -> Lanes<'_, A, D::Smaller> - where S: Data - { + where S: Data { self.columns() } @@ -1194,8 +1156,7 @@ where /// /// Iterator element is `ArrayView1` (1D read-write array view). pub fn columns_mut(&mut self) -> LanesMut<'_, A, D::Smaller> - where S: DataMut - { + where S: DataMut { LanesMut::new(self.view_mut(), Axis(0)) } @@ -1205,8 +1166,7 @@ where /// Renamed to `.columns_mut()` #[deprecated(note = "Renamed to .columns_mut()", since = "0.15.0")] pub fn gencolumns_mut(&mut self) -> LanesMut<'_, A, D::Smaller> - where S: DataMut - { + where S: DataMut { self.columns_mut() } @@ -1239,8 +1199,7 @@ where /// assert_eq!(inner2.into_iter().next().unwrap(), aview1(&[0, 1, 2])); /// ``` pub fn lanes(&self, axis: Axis) -> Lanes<'_, A, D::Smaller> - where S: Data - { + where S: Data { Lanes::new(self.view(), axis) } @@ -1249,8 +1208,7 @@ where /// /// Iterator element is `ArrayViewMut1` (1D read-write array view). pub fn lanes_mut(&mut self, axis: Axis) -> LanesMut<'_, A, D::Smaller> - where S: DataMut - { + where S: DataMut { LanesMut::new(self.view_mut(), axis) } @@ -1352,8 +1310,7 @@ where /// ``` #[track_caller] pub fn axis_chunks_iter(&self, axis: Axis, size: usize) -> AxisChunksIter<'_, A, D> - where S: Data - { + where S: Data { AxisChunksIter::new(self.view(), axis, size) } @@ -1365,8 +1322,7 @@ where /// **Panics** if `axis` is out of bounds or if `size` is zero. #[track_caller] pub fn axis_chunks_iter_mut(&mut self, axis: Axis, size: usize) -> AxisChunksIterMut<'_, A, D> - where S: DataMut - { + where S: DataMut { AxisChunksIterMut::new(self.view_mut(), axis, size) } @@ -1521,8 +1477,7 @@ where /// } /// ``` pub fn axis_windows(&self, axis: Axis, window_size: usize) -> Windows<'_, A, D> - where S: Data - { + where S: Data { let axis_index = axis.index(); ndassert!( @@ -1543,8 +1498,7 @@ where } // Return (length, stride) for diagonal - fn diag_params(&self) -> (Ix, Ixs) - { + fn diag_params(&self) -> (Ix, Ixs) { /* empty shape has len 1 */ let len = self.dim.slice().iter().cloned().min().unwrap_or(1); let stride = self.strides().iter().sum(); @@ -1556,21 +1510,18 @@ where /// The diagonal is simply the sequence indexed by *(0, 0, .., 0)*, /// *(1, 1, ..., 1)* etc as long as all axes have elements. pub fn diag(&self) -> ArrayView1<'_, A> - where S: Data - { + where S: Data { self.view().into_diag() } /// Return a read-write view over the diagonal elements of the array. pub fn diag_mut(&mut self) -> ArrayViewMut1<'_, A> - where S: DataMut - { + where S: DataMut { self.view_mut().into_diag() } /// Return the diagonal as a one-dimensional array. - pub fn into_diag(self) -> ArrayBase - { + pub fn into_diag(self) -> ArrayBase { let (len, stride) = self.diag_params(); // safe because new len stride allows access to a subset of the current elements unsafe { self.with_strides_dim(Ix1(stride as Ix), Ix1(len)) } @@ -1582,8 +1533,7 @@ where /// /// This method is mostly only useful with unsafe code. fn try_ensure_unique(&mut self) - where S: RawDataMut - { + where S: RawDataMut { debug_assert!(self.pointer_is_inbounds()); S::try_ensure_unique(self); debug_assert!(self.pointer_is_inbounds()); @@ -1593,8 +1543,7 @@ where /// /// This method is mostly only useful with unsafe code. fn ensure_unique(&mut self) - where S: DataMut - { + where S: DataMut { debug_assert!(self.pointer_is_inbounds()); S::ensure_unique(self); debug_assert!(self.pointer_is_inbounds()); @@ -1605,14 +1554,12 @@ where /// /// Return `false` otherwise, i.e. the array is possibly not /// contiguous in memory, it has custom strides, etc. - pub fn is_standard_layout(&self) -> bool - { + pub fn is_standard_layout(&self) -> bool { dimension::is_layout_c(&self.dim, &self.strides) } /// Return true if the array is known to be contiguous. - pub(crate) fn is_contiguous(&self) -> bool - { + pub(crate) fn is_contiguous(&self) -> bool { D::is_contiguous(&self.dim, &self.strides) } @@ -1668,8 +1615,7 @@ where /// /// where *d* is `self.ndim()`. #[inline(always)] - pub fn as_ptr(&self) -> *const A - { + pub fn as_ptr(&self) -> *const A { self.ptr.as_ptr() as *const A } @@ -1685,16 +1631,14 @@ where /// the data may change the strides. #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut A - where S: RawDataMut - { + where S: RawDataMut { self.try_ensure_unique(); // for ArcArray self.ptr.as_ptr() } /// Return a raw view of the array. #[inline] - pub fn raw_view(&self) -> RawArrayView - { + pub fn raw_view(&self) -> RawArrayView { unsafe { RawArrayView::new(self.ptr, self.dim.clone(), self.strides.clone()) } } @@ -1704,8 +1648,7 @@ where /// data is guaranteed to be uniquely held on return. #[inline] pub fn raw_view_mut(&mut self) -> RawArrayViewMut - where S: RawDataMut - { + where S: RawDataMut { self.try_ensure_unique(); // for ArcArray unsafe { RawArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) } } @@ -1715,8 +1658,7 @@ where /// Safety: The caller must ensure that the owned array is unshared when this is called #[inline] pub(crate) unsafe fn raw_view_mut_unchecked(&mut self) -> RawArrayViewMut - where S: DataOwned - { + where S: DataOwned { RawArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) } @@ -1726,8 +1668,7 @@ where /// If this function returns `Some(_)`, then the element order in the slice /// corresponds to the logical order of the array’s elements. pub fn as_slice(&self) -> Option<&[A]> - where S: Data - { + where S: Data { if self.is_standard_layout() { unsafe { Some(slice::from_raw_parts(self.ptr.as_ptr(), self.len())) } } else { @@ -1738,8 +1679,7 @@ where /// Return the array’s data as a slice, if it is contiguous and in standard order. /// Return `None` otherwise. pub fn as_slice_mut(&mut self) -> Option<&mut [A]> - where S: DataMut - { + where S: DataMut { if self.is_standard_layout() { self.ensure_unique(); unsafe { Some(slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len())) } @@ -1754,8 +1694,7 @@ where /// If this function returns `Some(_)`, then the elements in the slice /// have whatever order the elements have in memory. pub fn as_slice_memory_order(&self) -> Option<&[A]> - where S: Data - { + where S: Data { if self.is_contiguous() { let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides); unsafe { Some(slice::from_raw_parts(self.ptr.sub(offset).as_ptr(), self.len())) } @@ -1771,16 +1710,14 @@ where /// method unshares the data if necessary, but it preserves the existing /// strides. pub fn as_slice_memory_order_mut(&mut self) -> Option<&mut [A]> - where S: DataMut - { + where S: DataMut { self.try_as_slice_memory_order_mut().ok() } /// Return the array’s data as a slice if it is contiguous, otherwise /// return `self` in the `Err` variant. pub(crate) fn try_as_slice_memory_order_mut(&mut self) -> Result<&mut [A], &mut Self> - where S: DataMut - { + where S: DataMut { if self.is_contiguous() { self.ensure_unique(); let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides); @@ -1940,15 +1877,13 @@ where /// ); /// ``` pub fn into_shape_with_order(self, shape: E) -> Result, ShapeError> - where E: ShapeArg - { + where E: ShapeArg { let (shape, order) = shape.into_shape_and_order(); self.into_shape_with_order_impl(shape, order.unwrap_or(Order::RowMajor)) } fn into_shape_with_order_impl(self, shape: E, order: Order) -> Result, ShapeError> - where E: Dimension - { + where E: Dimension { let shape = shape.into_dimension(); if size_of_shape_checked(&shape) != Ok(self.dim.size()) { return Err(error::incompatible_shapes(&self.dim, &shape)); @@ -1992,8 +1927,7 @@ where /// ``` #[deprecated = "Use `.into_shape_with_order()` or `.to_shape()`"] pub fn into_shape(self, shape: E) -> Result, ShapeError> - where E: IntoDimension - { + where E: IntoDimension { let shape = shape.into_dimension(); if size_of_shape_checked(&shape) != Ok(self.dim.size()) { return Err(error::incompatible_shapes(&self.dim, &shape)); @@ -2139,8 +2073,7 @@ where /// let array: ArrayD = arr2(&[[1, 2], /// [3, 4]]).into_dyn(); /// ``` - pub fn into_dyn(self) -> ArrayBase - { + pub fn into_dyn(self) -> ArrayBase { // safe because new dims equivalent unsafe { ArrayBase::from_data_ptr(self.data, self.ptr).with_strides_dim(self.strides.into_dyn(), self.dim.into_dyn()) @@ -2164,8 +2097,7 @@ where /// assert!(array.into_dimensionality::().is_ok()); /// ``` pub fn into_dimensionality(self) -> Result, ShapeError> - where D2: Dimension - { + where D2: Dimension { unsafe { if D::NDIM == D2::NDIM { // safe because D == D2 @@ -2227,8 +2159,7 @@ where /// /// **Note:** Cannot be used for mutable iterators, since repeating /// elements would create aliasing pointers. - fn upcast(to: &D, from: &E, stride: &E) -> Option - { + fn upcast(to: &D, from: &E, stride: &E) -> Option { // Make sure the product of non-zero axis lengths does not exceed // `isize::MAX`. This is the only safety check we need to perform // because all the other constraints of `ArrayBase` are guaranteed @@ -2334,8 +2265,7 @@ where /// ); /// ``` #[track_caller] - pub fn swap_axes(&mut self, ax: usize, bx: usize) - { + pub fn swap_axes(&mut self, ax: usize, bx: usize) { self.dim.slice_mut().swap(ax, bx); self.strides.slice_mut().swap(ax, bx); } @@ -2364,8 +2294,7 @@ where /// ``` #[track_caller] pub fn permuted_axes(self, axes: T) -> ArrayBase - where T: IntoDimension - { + where T: IntoDimension { let axes = axes.into_dimension(); // Ensure that each axis is used exactly once. let mut usage_counts = D::zeros(self.ndim()); @@ -2394,8 +2323,7 @@ where /// /// Transposition reverses the order of the axes (dimensions and strides) /// while retaining the same data. - pub fn reversed_axes(mut self) -> ArrayBase - { + pub fn reversed_axes(mut self) -> ArrayBase { self.dim.slice_mut().reverse(); self.strides.slice_mut().reverse(); self @@ -2407,14 +2335,12 @@ where /// /// See also the more general methods `.reversed_axes()` and `.swap_axes()`. pub fn t(&self) -> ArrayView<'_, A, D> - where S: Data - { + where S: Data { self.view().reversed_axes() } /// Return an iterator over the length and stride of each axis. - pub fn axes(&self) -> Axes<'_, D> - { + pub fn axes(&self) -> Axes<'_, D> { axes_of(&self.dim, &self.strides) } @@ -2427,8 +2353,7 @@ where /// Return the axis with the greatest stride (by absolute value), /// preferring axes with len > 1. - pub fn max_stride_axis(&self) -> Axis - { + pub fn max_stride_axis(&self) -> Axis { self.dim.max_stride_axis(&self.strides) } @@ -2436,8 +2361,7 @@ where /// /// ***Panics*** if the axis is out of bounds. #[track_caller] - pub fn invert_axis(&mut self, axis: Axis) - { + pub fn invert_axis(&mut self, axis: Axis) { unsafe { let s = self.strides.axis(axis) as Ixs; let m = self.dim.axis(axis); @@ -2484,8 +2408,7 @@ where /// /// ***Panics*** if an axis is out of bounds. #[track_caller] - pub fn merge_axes(&mut self, take: Axis, into: Axis) -> bool - { + pub fn merge_axes(&mut self, take: Axis, into: Axis) -> bool { merge_axes(&mut self.dim, &mut self.strides, take, into) } @@ -2511,8 +2434,7 @@ where /// /// ***Panics*** if the axis is out of bounds. #[track_caller] - pub fn insert_axis(self, axis: Axis) -> ArrayBase - { + pub fn insert_axis(self, axis: Axis) -> ArrayBase { assert!(axis.index() <= self.ndim()); // safe because a new axis of length one does not affect memory layout unsafe { @@ -2530,13 +2452,11 @@ where /// **Panics** if the axis is out of bounds or its length is zero. #[track_caller] pub fn remove_axis(self, axis: Axis) -> ArrayBase - where D: RemoveAxis - { + where D: RemoveAxis { self.index_axis_move(axis, 0) } - pub(crate) fn pointer_is_inbounds(&self) -> bool - { + pub(crate) fn pointer_is_inbounds(&self) -> bool { self.data._is_pointer_inbounds(self.as_ptr()) } @@ -2979,8 +2899,7 @@ where /// ***Panics*** if `axis` is out of bounds
/// ***Panics*** if not `index < self.len_of(axis)`. pub fn remove_index(&mut self, axis: Axis, index: usize) - where S: DataOwned + DataMut - { + where S: DataOwned + DataMut { assert!(index < self.len_of(axis), "index {} must be less than length of Axis({})", index, axis.index()); let (_, mut tail) = self.view_mut().split_at(axis, index); @@ -3055,8 +2974,7 @@ where /// **Panics** if the size of A and B are different. #[track_caller] #[inline] -unsafe fn unlimited_transmute(data: A) -> B -{ +unsafe fn unlimited_transmute(data: A) -> B { // safe when sizes are equal and caller guarantees that representations are equal assert_eq!(size_of::
(), size_of::()); let old_data = ManuallyDrop::new(data); @@ -3064,3 +2982,21 @@ unsafe fn unlimited_transmute(data: A) -> B } type DimMaxOf = >::Output; + +impl ArrayBase, D> +where A: std::fmt::Debug +{ + pub fn copy_to_device(self, device: Device) -> Option { + let dim = self.dim; + let strides = self.strides; + let data = self.data.copy_to_device(device)?; + let ptr = std::ptr::NonNull::new(data.as_ptr() as *mut A).unwrap(); + + Some(Self { + data, + ptr, + dim, + strides, + }) + } +} diff --git a/src/impl_ops.rs b/src/impl_ops.rs index 46ea18a7c..8d02364d1 100644 --- a/src/impl_ops.rs +++ b/src/impl_ops.rs @@ -288,25 +288,21 @@ impl<'a, S, D> $trt<&'a ArrayBase> for $scalar ); } -mod arithmetic_ops -{ +mod arithmetic_ops { use super::*; use crate::imp_prelude::*; use std::ops::*; - fn clone_opf(f: impl Fn(A, B) -> C) -> impl FnMut(&A, &B) -> C - { + fn clone_opf(f: impl Fn(A, B) -> C) -> impl FnMut(&A, &B) -> C { move |x, y| f(x.clone(), y.clone()) } - fn clone_iopf(f: impl Fn(A, B) -> A) -> impl FnMut(&mut A, &B) - { + fn clone_iopf(f: impl Fn(A, B) -> A) -> impl FnMut(&mut A, &B) { move |x, y| *x = f(x.clone(), y.clone()) } - fn clone_iopf_rev(f: impl Fn(A, B) -> B) -> impl FnMut(&mut B, &A) - { + fn clone_iopf_rev(f: impl Fn(A, B) -> B) -> impl FnMut(&mut B, &A) { move |x, y| *x = f(y.clone(), x.clone()) } @@ -382,8 +378,7 @@ mod arithmetic_ops { type Output = Self; /// Perform an elementwise negation of `self` and return the result. - fn neg(mut self) -> Self - { + fn neg(mut self) -> Self { self.map_inplace(|elt| { *elt = -elt.clone(); }); @@ -400,8 +395,7 @@ mod arithmetic_ops type Output = Array; /// Perform an elementwise negation of reference `self` and return the /// result as a new `Array`. - fn neg(self) -> Array - { + fn neg(self) -> Array { self.map(Neg::neg) } } @@ -414,8 +408,7 @@ mod arithmetic_ops { type Output = Self; /// Perform an elementwise unary not of `self` and return the result. - fn not(mut self) -> Self - { + fn not(mut self) -> Self { self.map_inplace(|elt| { *elt = !elt.clone(); }); @@ -432,15 +425,13 @@ mod arithmetic_ops type Output = Array; /// Perform an elementwise unary not of reference `self` and return the /// result as a new `Array`. - fn not(self) -> Array - { + fn not(self) -> Array { self.map(Not::not) } } } -mod assign_ops -{ +mod assign_ops { use super::*; use crate::imp_prelude::*; diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs index 53be9e48c..ebe76fd99 100644 --- a/src/impl_owned_array.rs +++ b/src/impl_owned_array.rs @@ -17,8 +17,7 @@ use crate::Zip; /// Methods specific to `Array0`. /// /// ***See also all methods for [`ArrayBase`]*** -impl Array -{ +impl Array { /// Returns the single element in the array without cloning it. /// /// ``` @@ -32,8 +31,7 @@ impl Array /// let scalar: Foo = array.into_scalar(); /// assert_eq!(scalar, Foo); /// ``` - pub fn into_scalar(self) -> A - { + pub fn into_scalar(self) -> A { let size = mem::size_of::(); if size == 0 { // Any index in the `Vec` is fine since all elements are identical. @@ -64,8 +62,7 @@ where D: Dimension /// /// If the array is in standard memory layout, the logical element order /// of the array (`.iter()` order) and of the returned vector will be the same. - pub fn into_raw_vec(self) -> Vec - { + pub fn into_raw_vec(self) -> Vec { self.data.into_vec() } } @@ -73,8 +70,7 @@ where D: Dimension /// Methods specific to `Array2`. /// /// ***See also all methods for [`ArrayBase`]*** -impl Array -{ +impl Array { /// Append a row to an array /// /// The elements from `row` are cloned and added as a new row in the array. @@ -115,8 +111,7 @@ impl Array /// [-1., -2., -3., -4.]]); /// ``` pub fn push_row(&mut self, row: ArrayView) -> Result<(), ShapeError> - where A: Clone - { + where A: Clone { self.append(Axis(0), row.insert_axis(Axis(0))) } @@ -160,8 +155,7 @@ impl Array /// [2., -2.]]); /// ``` pub fn push_column(&mut self, column: ArrayView) -> Result<(), ShapeError> - where A: Clone - { + where A: Clone { self.append(Axis(1), column.insert_axis(Axis(1))) } } @@ -203,8 +197,7 @@ where D: Dimension } } - fn move_into_needs_drop(mut self, new_array: ArrayViewMut) - { + fn move_into_needs_drop(mut self, new_array: ArrayViewMut) { // Simple case where `A` has a destructor: just swap values between self and new_array. // Afterwards, `self` drops full of initialized values and dropping works as usual. // This avoids moving out of owned values in `self` while at the same time managing @@ -249,8 +242,7 @@ where D: Dimension self.move_into_impl(new_array.into()) } - fn move_into_impl(mut self, new_array: ArrayViewMut, D>) - { + fn move_into_impl(mut self, new_array: ArrayViewMut, D>) { unsafe { // Safety: copy_to_nonoverlapping cannot panic let guard = AbortIfPanic(&"move_into: moving out of owned value"); @@ -275,8 +267,7 @@ where D: Dimension /// # Safety /// /// This is a panic critical section since `self` is already moved-from. - fn drop_unreachable_elements(mut self) -> OwnedRepr - { + fn drop_unreachable_elements(mut self) -> OwnedRepr { let self_len = self.len(); // "deconstruct" self; the owned repr releases ownership of all elements and we @@ -296,8 +287,7 @@ where D: Dimension #[inline(never)] #[cold] - fn drop_unreachable_elements_slow(mut self) -> OwnedRepr - { + fn drop_unreachable_elements_slow(mut self) -> OwnedRepr { // "deconstruct" self; the owned repr releases ownership of all elements and we // carry on with raw view methods let data_len = self.data.len(); @@ -318,8 +308,7 @@ where D: Dimension /// Create an empty array with an all-zeros shape /// /// ***Panics*** if D is zero-dimensional, because it can't be empty - pub(crate) fn empty() -> Array - { + pub(crate) fn empty() -> Array { assert_ne!(D::NDIM, Some(0)); let ndim = D::NDIM.unwrap_or(1); Array::from_shape_simple_fn(D::zeros(ndim), || unreachable!()) @@ -327,8 +316,7 @@ where D: Dimension /// Create new_array with the right layout for appending to `growing_axis` #[cold] - fn change_to_contig_append_layout(&mut self, growing_axis: Axis) - { + fn change_to_contig_append_layout(&mut self, growing_axis: Axis) { let ndim = self.ndim(); let mut dim = self.raw_dim(); @@ -627,16 +615,13 @@ where D: Dimension // on scope exit (panic or loop finish). This "indirect" way to // write the length is used to help the compiler, the len store to self.data may // otherwise be mistaken to alias with other stores in the loop. - struct SetLenOnDrop<'a, A: 'a> - { + struct SetLenOnDrop<'a, A: 'a> { len: usize, data: &'a mut OwnedRepr, } - impl Drop for SetLenOnDrop<'_, A> - { - fn drop(&mut self) - { + impl Drop for SetLenOnDrop<'_, A> { + fn drop(&mut self) { unsafe { self.data.set_len(self.len); } @@ -678,8 +663,7 @@ where D: Dimension /// This is an internal function for use by move_into and IntoIter only, safety invariants may need /// to be upheld across the calls from those implementations. pub(crate) unsafe fn drop_unreachable_raw(mut self_: RawArrayViewMut, data_ptr: *mut A, data_len: usize) -where D: Dimension -{ +where D: Dimension { let self_len = self_.len(); for i in 0..self_.ndim() { @@ -761,8 +745,7 @@ where } fn sort_axes1_impl(adim: &mut D, astrides: &mut D) -where D: Dimension -{ +where D: Dimension { debug_assert!(adim.ndim() > 1); debug_assert_eq!(adim.ndim(), astrides.ndim()); // bubble sort axes @@ -801,8 +784,7 @@ where } fn sort_axes2_impl(adim: &mut D, astrides: &mut D, bdim: &mut D, bstrides: &mut D) -where D: Dimension -{ +where D: Dimension { debug_assert!(adim.ndim() > 1); debug_assert_eq!(adim.ndim(), bdim.ndim()); // bubble sort axes diff --git a/src/impl_raw_views.rs b/src/impl_raw_views.rs index aeee75cb2..237a94898 100644 --- a/src/impl_raw_views.rs +++ b/src/impl_raw_views.rs @@ -16,13 +16,11 @@ where D: Dimension /// Unsafe because caller is responsible for ensuring that the array will /// meet all of the invariants of the `ArrayBase` type. #[inline] - pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self - { + pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self { RawArrayView::from_data_ptr(RawViewRepr::new(), ptr).with_strides_dim(strides, dim) } - unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self - { + unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self { Self::new(nonnull_debug_checked_from_ptr(ptr as *mut A), dim, strides) } @@ -67,8 +65,7 @@ where D: Dimension /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *const A) -> Self - where Sh: Into> - { + where Sh: Into> { let shape = shape.into(); let dim = shape.dim; if cfg!(debug_assertions) { @@ -93,8 +90,7 @@ where D: Dimension /// data is valid, ensure that the pointer is aligned, and choose the /// correct lifetime. #[inline] - pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D> - { + pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D> { debug_assert!( is_aligned(self.ptr.as_ptr()), "The pointer must be aligned." @@ -107,8 +103,7 @@ where D: Dimension /// /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] - pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) - { + pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { assert!(index <= self.len_of(axis)); let left_ptr = self.ptr.as_ptr(); let right_ptr = if index == self.len_of(axis) { @@ -142,8 +137,7 @@ where D: Dimension /// casts are safe, access through the produced raw view is only possible /// in an unsafe block or function. #[track_caller] - pub fn cast(self) -> RawArrayView - { + pub fn cast(self) -> RawArrayView { assert_eq!( mem::size_of::(), mem::size_of::(), @@ -159,8 +153,7 @@ where D: Dimension { /// Splits the view into views of the real and imaginary components of the /// elements. - pub fn split_complex(self) -> Complex> - { + pub fn split_complex(self) -> Complex> { // Check that the size and alignment of `Complex` are as expected. // These assertions should always pass, for arbitrary `T`. assert_eq!( @@ -229,13 +222,11 @@ where D: Dimension /// Unsafe because caller is responsible for ensuring that the array will /// meet all of the invariants of the `ArrayBase` type. #[inline] - pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self - { + pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self { RawArrayViewMut::from_data_ptr(RawViewRepr::new(), ptr).with_strides_dim(strides, dim) } - unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self - { + unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self { Self::new(nonnull_debug_checked_from_ptr(ptr), dim, strides) } @@ -280,8 +271,7 @@ where D: Dimension /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *mut A) -> Self - where Sh: Into> - { + where Sh: Into> { let shape = shape.into(); let dim = shape.dim; if cfg!(debug_assertions) { @@ -299,8 +289,7 @@ where D: Dimension /// Converts to a non-mutable `RawArrayView`. #[inline] - pub(crate) fn into_raw_view(self) -> RawArrayView - { + pub(crate) fn into_raw_view(self) -> RawArrayView { unsafe { RawArrayView::new(self.ptr, self.dim, self.strides) } } @@ -313,8 +302,7 @@ where D: Dimension /// data is valid, ensure that the pointer is aligned, and choose the /// correct lifetime. #[inline] - pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D> - { + pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D> { debug_assert!( is_aligned(self.ptr.as_ptr()), "The pointer must be aligned." @@ -331,8 +319,7 @@ where D: Dimension /// data is valid, ensure that the pointer is aligned, and choose the /// correct lifetime. #[inline] - pub unsafe fn deref_into_view_mut<'a>(self) -> ArrayViewMut<'a, A, D> - { + pub unsafe fn deref_into_view_mut<'a>(self) -> ArrayViewMut<'a, A, D> { debug_assert!( is_aligned(self.ptr.as_ptr()), "The pointer must be aligned." @@ -345,8 +332,7 @@ where D: Dimension /// /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] - pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) - { + pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { let (left, right) = self.into_raw_view().split_at(axis, index); unsafe { (Self::new(left.ptr, left.dim, left.strides), Self::new(right.ptr, right.dim, right.strides)) } } @@ -362,8 +348,7 @@ where D: Dimension /// casts are safe, access through the produced raw view is only possible /// in an unsafe block or function. #[track_caller] - pub fn cast(self) -> RawArrayViewMut - { + pub fn cast(self) -> RawArrayViewMut { assert_eq!( mem::size_of::(), mem::size_of::(), @@ -379,8 +364,7 @@ where D: Dimension { /// Splits the view into views of the real and imaginary components of the /// elements. - pub fn split_complex(self) -> Complex> - { + pub fn split_complex(self) -> Complex> { let Complex { re, im } = self.into_raw_view().split_complex(); unsafe { Complex { diff --git a/src/impl_special_element_types.rs b/src/impl_special_element_types.rs index e430b20bc..65e878963 100644 --- a/src/impl_special_element_types.rs +++ b/src/impl_special_element_types.rs @@ -31,8 +31,7 @@ where /// Note that for owned and shared ownership arrays, the promise must include all of the /// array's storage; it is for example possible to slice these in place, but that must /// only be done after all elements have been initialized. - pub unsafe fn assume_init(self) -> ArrayBase<>::Output, D> - { + pub unsafe fn assume_init(self) -> ArrayBase<>::Output, D> { let ArrayBase { data, ptr, diff --git a/src/impl_views/constructors.rs b/src/impl_views/constructors.rs index 33c7b15be..136ba0ece 100644 --- a/src/impl_views/constructors.rs +++ b/src/impl_views/constructors.rs @@ -45,14 +45,12 @@ where D: Dimension /// assert!(a.strides() == &[1, 4, 2]); /// ``` pub fn from_shape(shape: Sh, xs: &'a [A]) -> Result - where Sh: Into> - { + where Sh: Into> { // eliminate the type parameter Sh as soon as possible Self::from_shape_impl(shape.into(), xs) } - fn from_shape_impl(shape: StrideShape, xs: &'a [A]) -> Result - { + fn from_shape_impl(shape: StrideShape, xs: &'a [A]) -> Result { let dim = shape.dim; dimension::can_index_slice_with_strides(xs, &dim, &shape.strides)?; let strides = shape.strides.strides_for_dim(&dim); @@ -111,8 +109,7 @@ where D: Dimension /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *const A) -> Self - where Sh: Into> - { + where Sh: Into> { RawArrayView::from_shape_ptr(shape, ptr).deref_into_view() } } @@ -147,14 +144,12 @@ where D: Dimension /// assert!(a.strides() == &[1, 4, 2]); /// ``` pub fn from_shape(shape: Sh, xs: &'a mut [A]) -> Result - where Sh: Into> - { + where Sh: Into> { // eliminate the type parameter Sh as soon as possible Self::from_shape_impl(shape.into(), xs) } - fn from_shape_impl(shape: StrideShape, xs: &'a mut [A]) -> Result - { + fn from_shape_impl(shape: StrideShape, xs: &'a mut [A]) -> Result { let dim = shape.dim; dimension::can_index_slice_with_strides(xs, &dim, &shape.strides)?; let strides = shape.strides.strides_for_dim(&dim); @@ -213,16 +208,14 @@ where D: Dimension /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *mut A) -> Self - where Sh: Into> - { + where Sh: Into> { RawArrayViewMut::from_shape_ptr(shape, ptr).deref_into_view_mut() } /// Convert the view into an `ArrayViewMut<'b, A, D>` where `'b` is a lifetime /// outlived by `'a'`. pub fn reborrow<'b>(self) -> ArrayViewMut<'b, A, D> - where 'a: 'b - { + where 'a: 'b { unsafe { ArrayViewMut::new(self.ptr, self.dim, self.strides) } } } @@ -235,8 +228,7 @@ where D: Dimension /// /// Unsafe because: `ptr` must be valid for the given dimension and strides. #[inline(always)] - pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self - { + pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self { if cfg!(debug_assertions) { assert!(is_aligned(ptr.as_ptr()), "The pointer must be aligned."); dimension::max_abs_offset_check_overflow::(&dim, &strides).unwrap(); @@ -246,8 +238,7 @@ where D: Dimension /// Unsafe because: `ptr` must be valid for the given dimension and strides. #[inline] - pub(crate) unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self - { + pub(crate) unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self { Self::new(nonnull_debug_checked_from_ptr(ptr as *mut A), dim, strides) } } @@ -259,8 +250,7 @@ where D: Dimension /// /// Unsafe because: `ptr` must be valid for the given dimension and strides. #[inline(always)] - pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self - { + pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self { if cfg!(debug_assertions) { assert!(is_aligned(ptr.as_ptr()), "The pointer must be aligned."); dimension::max_abs_offset_check_overflow::(&dim, &strides).unwrap(); @@ -272,8 +262,7 @@ where D: Dimension /// /// Unsafe because: `ptr` must be valid for the given dimension and strides. #[inline(always)] - pub(crate) unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self - { + pub(crate) unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self { Self::new(nonnull_debug_checked_from_ptr(ptr), dim, strides) } } diff --git a/src/impl_views/conversions.rs b/src/impl_views/conversions.rs index f545ebdd0..a364f3e70 100644 --- a/src/impl_views/conversions.rs +++ b/src/impl_views/conversions.rs @@ -26,8 +26,7 @@ where D: Dimension /// Convert the view into an `ArrayView<'b, A, D>` where `'b` is a lifetime /// outlived by `'a'`. pub fn reborrow<'b>(self) -> ArrayView<'b, A, D> - where 'a: 'b - { + where 'a: 'b { unsafe { ArrayView::new(self.ptr, self.dim, self.strides) } } @@ -36,8 +35,7 @@ where D: Dimension /// /// Note that while the method is similar to [`ArrayBase::as_slice()`], this method transfers /// the view's lifetime to the slice, so it is a bit more powerful. - pub fn to_slice(&self) -> Option<&'a [A]> - { + pub fn to_slice(&self) -> Option<&'a [A]> { if self.is_standard_layout() { unsafe { Some(slice::from_raw_parts(self.ptr.as_ptr(), self.len())) } } else { @@ -51,8 +49,7 @@ where D: Dimension /// Note that while the method is similar to /// [`ArrayBase::as_slice_memory_order()`], this method transfers the view's /// lifetime to the slice, so it is a bit more powerful. - pub fn to_slice_memory_order(&self) -> Option<&'a [A]> - { + pub fn to_slice_memory_order(&self) -> Option<&'a [A]> { if self.is_contiguous() { let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides); unsafe { Some(slice::from_raw_parts(self.ptr.sub(offset).as_ptr(), self.len())) } @@ -62,8 +59,7 @@ where D: Dimension } /// Converts to a raw array view. - pub(crate) fn into_raw_view(self) -> RawArrayView - { + pub(crate) fn into_raw_view(self) -> RawArrayView { unsafe { RawArrayView::new(self.ptr, self.dim, self.strides) } } } @@ -71,8 +67,7 @@ where D: Dimension /// Methods specific to `ArrayView0`. /// /// ***See also all methods for [`ArrayView`] and [`ArrayBase`]*** -impl<'a, A> ArrayView<'a, A, Ix0> -{ +impl<'a, A> ArrayView<'a, A, Ix0> { /// Consume the view and return a reference to the single element in the array. /// /// The lifetime of the returned reference matches the lifetime of the data @@ -90,8 +85,7 @@ impl<'a, A> ArrayView<'a, A, Ix0> /// let scalar: &Foo = view.into_scalar(); /// assert_eq!(scalar, &Foo); /// ``` - pub fn into_scalar(self) -> &'a A - { + pub fn into_scalar(self) -> &'a A { self.index(Ix0()) } } @@ -99,8 +93,7 @@ impl<'a, A> ArrayView<'a, A, Ix0> /// Methods specific to `ArrayViewMut0`. /// /// ***See also all methods for [`ArrayViewMut`] and [`ArrayBase`]*** -impl<'a, A> ArrayViewMut<'a, A, Ix0> -{ +impl<'a, A> ArrayViewMut<'a, A, Ix0> { /// Consume the mutable view and return a mutable reference to the single element in the array. /// /// The lifetime of the returned reference matches the lifetime of the data @@ -116,8 +109,7 @@ impl<'a, A> ArrayViewMut<'a, A, Ix0> /// assert_eq!(scalar, &7.); /// assert_eq!(array[()], 7.); /// ``` - pub fn into_scalar(self) -> &'a mut A - { + pub fn into_scalar(self) -> &'a mut A { self.index(Ix0()) } } @@ -131,8 +123,7 @@ where D: Dimension /// /// Note that while this is similar to [`ArrayBase::as_slice_mut()`], this method transfers the /// view's lifetime to the slice. - pub fn into_slice(self) -> Option<&'a mut [A]> - { + pub fn into_slice(self) -> Option<&'a mut [A]> { self.try_into_slice().ok() } @@ -142,8 +133,7 @@ where D: Dimension /// Note that while this is similar to /// [`ArrayBase::as_slice_memory_order_mut()`], this method transfers the /// view's lifetime to the slice. - pub fn into_slice_memory_order(self) -> Option<&'a mut [A]> - { + pub fn into_slice_memory_order(self) -> Option<&'a mut [A]> { self.try_into_slice_memory_order().ok() } @@ -153,8 +143,7 @@ where D: Dimension /// /// The view acts "as if" the elements are temporarily in cells, and elements /// can be changed through shared references using the regular cell methods. - pub fn into_cell_view(self) -> ArrayView<'a, MathCell, D> - { + pub fn into_cell_view(self) -> ArrayView<'a, MathCell, D> { // safety: valid because // A and MathCell have the same representation // &'a mut T is interchangeable with &'a Cell -- see method Cell::from_mut in std @@ -178,8 +167,7 @@ where D: Dimension /// This method allows writing uninitialized data into the view, which could leave any /// original array that we borrow from in an inconsistent state. This is not allowed /// when using the resulting array view. - pub(crate) unsafe fn into_maybe_uninit(self) -> ArrayViewMut<'a, MaybeUninit, D> - { + pub(crate) unsafe fn into_maybe_uninit(self) -> ArrayViewMut<'a, MaybeUninit, D> { // Safe because: A and MaybeUninit have the same representation; // and we can go from initialized to (maybe) not unconditionally in terms of // representation. However, the user must be careful to not write uninit elements @@ -195,8 +183,7 @@ impl RawArrayView where D: Dimension { #[inline] - pub(crate) fn into_base_iter(self) -> Baseiter - { + pub(crate) fn into_base_iter(self) -> Baseiter { unsafe { Baseiter::new(self.ptr.as_ptr(), self.dim, self.strides) } } } @@ -205,8 +192,7 @@ impl RawArrayViewMut where D: Dimension { #[inline] - pub(crate) fn into_base_iter(self) -> Baseiter - { + pub(crate) fn into_base_iter(self) -> Baseiter { unsafe { Baseiter::new(self.ptr.as_ptr(), self.dim, self.strides) } } } @@ -216,19 +202,16 @@ impl<'a, A, D> ArrayView<'a, A, D> where D: Dimension { #[inline] - pub(crate) fn into_base_iter(self) -> Baseiter - { + pub(crate) fn into_base_iter(self) -> Baseiter { unsafe { Baseiter::new(self.ptr.as_ptr(), self.dim, self.strides) } } #[inline] - pub(crate) fn into_elements_base(self) -> ElementsBase<'a, A, D> - { + pub(crate) fn into_elements_base(self) -> ElementsBase<'a, A, D> { ElementsBase::new(self) } - pub(crate) fn into_iter_(self) -> Iter<'a, A, D> - { + pub(crate) fn into_iter_(self) -> Iter<'a, A, D> { Iter::new(self) } @@ -236,8 +219,7 @@ where D: Dimension #[doc(hidden)] // not official #[deprecated(note = "This method will be replaced.")] pub fn into_outer_iter(self) -> iter::AxisIter<'a, A, D::Smaller> - where D: RemoveAxis - { + where D: RemoveAxis { AxisIter::new(self, Axis(0)) } } @@ -246,33 +228,28 @@ impl<'a, A, D> ArrayViewMut<'a, A, D> where D: Dimension { // Convert into a read-only view - pub(crate) fn into_view(self) -> ArrayView<'a, A, D> - { + pub(crate) fn into_view(self) -> ArrayView<'a, A, D> { unsafe { ArrayView::new(self.ptr, self.dim, self.strides) } } /// Converts to a mutable raw array view. - pub(crate) fn into_raw_view_mut(self) -> RawArrayViewMut - { + pub(crate) fn into_raw_view_mut(self) -> RawArrayViewMut { unsafe { RawArrayViewMut::new(self.ptr, self.dim, self.strides) } } #[inline] - pub(crate) fn into_base_iter(self) -> Baseiter - { + pub(crate) fn into_base_iter(self) -> Baseiter { unsafe { Baseiter::new(self.ptr.as_ptr(), self.dim, self.strides) } } #[inline] - pub(crate) fn into_elements_base(self) -> ElementsBaseMut<'a, A, D> - { + pub(crate) fn into_elements_base(self) -> ElementsBaseMut<'a, A, D> { ElementsBaseMut::new(self) } /// Return the array’s data as a slice, if it is contiguous and in standard order. /// Otherwise return self in the Err branch of the result. - pub(crate) fn try_into_slice(self) -> Result<&'a mut [A], Self> - { + pub(crate) fn try_into_slice(self) -> Result<&'a mut [A], Self> { if self.is_standard_layout() { unsafe { Ok(slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len())) } } else { @@ -282,8 +259,7 @@ where D: Dimension /// Return the array’s data as a slice, if it is contiguous. /// Otherwise return self in the Err branch of the result. - fn try_into_slice_memory_order(self) -> Result<&'a mut [A], Self> - { + fn try_into_slice_memory_order(self) -> Result<&'a mut [A], Self> { if self.is_contiguous() { let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides); unsafe { Ok(slice::from_raw_parts_mut(self.ptr.sub(offset).as_ptr(), self.len())) } @@ -292,8 +268,7 @@ where D: Dimension } } - pub(crate) fn into_iter_(self) -> IterMut<'a, A, D> - { + pub(crate) fn into_iter_(self) -> IterMut<'a, A, D> { IterMut::new(self) } @@ -301,8 +276,7 @@ where D: Dimension #[doc(hidden)] // not official #[deprecated(note = "This method will be replaced.")] pub fn into_outer_iter(self) -> iter::AxisIterMut<'a, A, D::Smaller> - where D: RemoveAxis - { + where D: RemoveAxis { AxisIterMut::new(self, Axis(0)) } } diff --git a/src/impl_views/indexing.rs b/src/impl_views/indexing.rs index 2b72c2142..3494b91db 100644 --- a/src/impl_views/indexing.rs +++ b/src/impl_views/indexing.rs @@ -46,8 +46,7 @@ use crate::NdIndex; /// assert_eq!(long_life_ref, &0.); /// /// ``` -pub trait IndexLonger -{ +pub trait IndexLonger { /// The type of the reference to the element that is produced, including /// its lifetime. type Output; @@ -120,14 +119,12 @@ where /// /// **Panics** if index is out of bounds. #[track_caller] - fn index(self, index: I) -> &'a A - { + fn index(self, index: I) -> &'a A { debug_bounds_check!(self, index); unsafe { &*self.get_ptr(index).unwrap_or_else(|| array_out_of_bounds()) } } - fn get(self, index: I) -> Option<&'a A> - { + fn get(self, index: I) -> Option<&'a A> { unsafe { self.get_ptr(index).map(|ptr| &*ptr) } } @@ -142,8 +139,7 @@ where /// [1]: ArrayBase::uget /// /// **Note:** only unchecked for non-debug builds of ndarray. - unsafe fn uget(self, index: I) -> &'a A - { + unsafe fn uget(self, index: I) -> &'a A { debug_bounds_check!(self, index); &*self.as_ptr().offset(index.index_unchecked(&self.strides)) } @@ -169,8 +165,7 @@ where /// /// **Panics** if index is out of bounds. #[track_caller] - fn index(mut self, index: I) -> &'a mut A - { + fn index(mut self, index: I) -> &'a mut A { debug_bounds_check!(self, index); unsafe { match self.get_mut_ptr(index) { @@ -188,8 +183,7 @@ where /// /// [1]: ArrayBase::get_mut /// - fn get(mut self, index: I) -> Option<&'a mut A> - { + fn get(mut self, index: I) -> Option<&'a mut A> { debug_bounds_check!(self, index); unsafe { match self.get_mut_ptr(index) { @@ -208,8 +202,7 @@ where /// [1]: ArrayBase::uget_mut /// /// **Note:** only unchecked for non-debug builds of ndarray. - unsafe fn uget(mut self, index: I) -> &'a mut A - { + unsafe fn uget(mut self, index: I) -> &'a mut A { debug_bounds_check!(self, index); &mut *self .as_mut_ptr() diff --git a/src/impl_views/splitting.rs b/src/impl_views/splitting.rs index e26900984..f2f4e8f82 100644 --- a/src/impl_views/splitting.rs +++ b/src/impl_views/splitting.rs @@ -88,8 +88,7 @@ where D: Dimension /// along Axis(1) /// ``` #[track_caller] - pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) - { + pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { unsafe { let (left, right) = self.into_raw_view().split_at(axis, index); (left.deref_into_view(), right.deref_into_view()) @@ -116,8 +115,7 @@ where D: Dimension /// assert_eq!(re, array![[1., 3.], [5., 7.], [9., 11.]]); /// assert_eq!(im, array![[2., 4.], [6., 8.], [10., 12.]]); /// ``` - pub fn split_complex(self) -> Complex> - { + pub fn split_complex(self) -> Complex> { unsafe { let Complex { re, im } = self.into_raw_view().split_complex(); Complex { @@ -137,8 +135,7 @@ where D: Dimension /// /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] - pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) - { + pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { unsafe { let (left, right) = self.into_raw_view_mut().split_at(axis, index); (left.deref_into_view_mut(), right.deref_into_view_mut()) @@ -164,8 +161,7 @@ where D: Dimension /// * if `D` is `IxDyn` and `info` does not match the number of array axes #[track_caller] pub fn multi_slice_move(self, info: M) -> M::Output - where M: MultiSliceArg<'a, A, D> - { + where M: MultiSliceArg<'a, A, D> { info.multi_slice_move(self) } } @@ -196,8 +192,7 @@ where D: Dimension /// assert_eq!(arr[[0, 1]], Complex64::new(13., 4.)); /// assert_eq!(arr[[2, 0]], Complex64::new(9., 14.)); /// ``` - pub fn split_complex(self) -> Complex> - { + pub fn split_complex(self) -> Complex> { unsafe { let Complex { re, im } = self.into_raw_view_mut().split_complex(); Complex { diff --git a/src/indexes.rs b/src/indexes.rs index 0fa2b50fb..368303840 100644 --- a/src/indexes.rs +++ b/src/indexes.rs @@ -18,8 +18,7 @@ use crate::{ArrayBase, Data}; /// /// Iterator element type is `D`. #[derive(Clone)] -pub struct IndicesIter -{ +pub struct IndicesIter { dim: D, index: Option, } @@ -29,8 +28,7 @@ pub struct IndicesIter /// *Note:* prefer higher order methods, arithmetic operations and /// non-indexed iteration before using indices. pub fn indices(shape: E) -> Indices -where E: IntoDimension -{ +where E: IntoDimension { let dim = shape.into_dimension(); Indices { start: E::Dim::zeros(dim.ndim()), @@ -55,8 +53,7 @@ where D: Dimension { type Item = D::Pattern; #[inline] - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { let index = match self.index { None => return None, Some(ref ix) => ix.clone(), @@ -65,8 +62,7 @@ where D: Dimension Some(index.into_pattern()) } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { let l = match self.index { None => 0, Some(ref ix) => { @@ -84,8 +80,7 @@ where D: Dimension } fn fold(self, init: B, mut f: F) -> B - where F: FnMut(B, D::Pattern) -> B - { + where F: FnMut(B, D::Pattern) -> B { let IndicesIter { mut index, dim } = self; let ndim = dim.ndim(); if ndim == 0 { @@ -116,8 +111,7 @@ where D: Dimension { type Item = D::Pattern; type IntoIter = IndicesIter; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { let sz = self.dim.size(); let index = if sz != 0 { Some(self.start) } else { None }; IndicesIter { index, dim: self.dim } @@ -136,8 +130,7 @@ where D: Dimension } #[derive(Copy, Clone, Debug)] -pub struct IndexPtr -{ +pub struct IndexPtr { index: D, } @@ -147,8 +140,7 @@ where D: Dimension + Copy // stride: The axis to increment type Stride = usize; - unsafe fn stride_offset(mut self, stride: Self::Stride, index: usize) -> Self - { + unsafe fn stride_offset(mut self, stride: Self::Stride, index: usize) -> Self { self.index[stride] += index; self } @@ -169,8 +161,7 @@ where D: Dimension + Copy // [0, 0, 0].stride_offset(1, 10) => [0, 10, 0] axis 1 is incremented by 10. // // .as_ref() converts the Ptr value to an Item. For example [0, 10, 0] => (0, 10, 0) -impl NdProducer for Indices -{ +impl NdProducer for Indices { type Item = D::Pattern; type Dim = D; type Ptr = IndexPtr; @@ -178,23 +169,19 @@ impl NdProducer for Indices private_impl! {} - fn raw_dim(&self) -> Self::Dim - { + fn raw_dim(&self) -> Self::Dim { self.dim } - fn equal_dim(&self, dim: &Self::Dim) -> bool - { + fn equal_dim(&self, dim: &Self::Dim) -> bool { self.dim.equal(dim) } - fn as_ptr(&self) -> Self::Ptr - { + fn as_ptr(&self) -> Self::Ptr { IndexPtr { index: self.start } } - fn layout(&self) -> Layout - { + fn layout(&self) -> Layout { if self.dim.ndim() <= 1 { Layout::one_dimensional() } else { @@ -202,31 +189,26 @@ impl NdProducer for Indices } } - unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item - { + unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { ptr.index.into_pattern() } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr - { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { let mut index = *i; index += &self.start; IndexPtr { index } } - fn stride_of(&self, axis: Axis) -> Self::Stride - { + fn stride_of(&self, axis: Axis) -> Self::Stride { axis.index() } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride - { + fn contiguous_stride(&self) -> Self::Stride { 0 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) - { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { let start_a = self.start; let mut start_b = start_a; let (a, b) = self.dim.split_at(axis, index); @@ -239,16 +221,14 @@ impl NdProducer for Indices /// /// Iterator element type is `D`. #[derive(Clone)] -pub struct IndicesIterF -{ +pub struct IndicesIterF { dim: D, index: D, has_remaining: bool, } pub fn indices_iter_f(shape: E) -> IndicesIterF -where E: IntoDimension -{ +where E: IntoDimension { let dim = shape.into_dimension(); let zero = E::Dim::zeros(dim.ndim()); IndicesIterF { @@ -263,8 +243,7 @@ where D: Dimension { type Item = D::Pattern; #[inline] - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { if !self.has_remaining { None } else { @@ -274,8 +253,7 @@ where D: Dimension } } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { if !self.has_remaining { return (0, Some(0)); } @@ -294,14 +272,12 @@ where D: Dimension impl ExactSizeIterator for IndicesIterF where D: Dimension {} #[cfg(test)] -mod tests -{ +mod tests { use super::indices; use super::indices_iter_f; #[test] - fn test_indices_iter_c_size_hint() - { + fn test_indices_iter_c_size_hint() { let dim = (3, 4); let mut it = indices(dim).into_iter(); let mut len = dim.0 * dim.1; @@ -314,8 +290,7 @@ mod tests } #[test] - fn test_indices_iter_c_fold() - { + fn test_indices_iter_c_fold() { macro_rules! run_test { ($dim:expr) => { for num_consume in 0..3 { @@ -343,8 +318,7 @@ mod tests } #[test] - fn test_indices_iter_f_size_hint() - { + fn test_indices_iter_f_size_hint() { let dim = (3, 4); let mut it = indices_iter_f(dim); let mut len = dim.0 * dim.1; diff --git a/src/iterators/chunks.rs b/src/iterators/chunks.rs index 465428968..2be5092f1 100644 --- a/src/iterators/chunks.rs +++ b/src/iterators/chunks.rs @@ -30,22 +30,19 @@ impl_ndproducer! { /// See [`.exact_chunks()`](ArrayBase::exact_chunks) for more /// information. //#[derive(Debug)] -pub struct ExactChunks<'a, A, D> -{ +pub struct ExactChunks<'a, A, D> { base: RawArrayView, life: PhantomData<&'a A>, chunk: D, inner_strides: D, } -impl<'a, A, D: Dimension> ExactChunks<'a, A, D> -{ +impl<'a, A, D: Dimension> ExactChunks<'a, A, D> { /// Creates a new exact chunks producer. /// /// **Panics** if any chunk dimension is zero pub(crate) fn new(a: ArrayView<'a, A, D>, chunk: E) -> Self - where E: IntoDimension - { + where E: IntoDimension { let mut a = a.into_raw_view(); let chunk = chunk.into_dimension(); ndassert!( @@ -80,8 +77,7 @@ where { type Item = ::Item; type IntoIter = ExactChunksIter<'a, A, D>; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { ExactChunksIter { iter: self.base.into_base_iter(), life: self.life, @@ -95,8 +91,7 @@ where /// /// See [`.exact_chunks()`](ArrayBase::exact_chunks) for more /// information. -pub struct ExactChunksIter<'a, A, D> -{ +pub struct ExactChunksIter<'a, A, D> { iter: Baseiter, life: PhantomData<&'a A>, chunk: D, @@ -129,22 +124,19 @@ impl_ndproducer! { /// See [`.exact_chunks_mut()`](ArrayBase::exact_chunks_mut) /// for more information. //#[derive(Debug)] -pub struct ExactChunksMut<'a, A, D> -{ +pub struct ExactChunksMut<'a, A, D> { base: RawArrayViewMut, life: PhantomData<&'a mut A>, chunk: D, inner_strides: D, } -impl<'a, A, D: Dimension> ExactChunksMut<'a, A, D> -{ +impl<'a, A, D: Dimension> ExactChunksMut<'a, A, D> { /// Creates a new exact chunks producer. /// /// **Panics** if any chunk dimension is zero pub(crate) fn new(a: ArrayViewMut<'a, A, D>, chunk: E) -> Self - where E: IntoDimension - { + where E: IntoDimension { let mut a = a.into_raw_view_mut(); let chunk = chunk.into_dimension(); ndassert!( @@ -179,8 +171,7 @@ where { type Item = ::Item; type IntoIter = ExactChunksIterMut<'a, A, D>; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { ExactChunksIterMut { iter: self.base.into_base_iter(), life: self.life, @@ -239,8 +230,7 @@ impl_iterator! { /// /// See [`.exact_chunks_mut()`](ArrayBase::exact_chunks_mut) /// for more information. -pub struct ExactChunksIterMut<'a, A, D> -{ +pub struct ExactChunksIterMut<'a, A, D> { iter: Baseiter, life: PhantomData<&'a mut A>, chunk: D, diff --git a/src/iterators/into_iter.rs b/src/iterators/into_iter.rs index fcc2e4b8c..a07c8042d 100644 --- a/src/iterators/into_iter.rs +++ b/src/iterators/into_iter.rs @@ -33,8 +33,7 @@ impl IntoIter where D: Dimension { /// Create a new by-value iterator that consumes `array` - pub(crate) fn new(mut array: Array) -> Self - { + pub(crate) fn new(mut array: Array) -> Self { unsafe { let array_head_ptr = array.ptr; let ptr = array.as_mut_ptr(); @@ -55,26 +54,21 @@ where D: Dimension } } -impl Iterator for IntoIter -{ +impl Iterator for IntoIter { type Item = A; #[inline] - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { self.inner.next().map(|p| unsafe { p.read() }) } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } } -impl ExactSizeIterator for IntoIter -{ - fn len(&self) -> usize - { +impl ExactSizeIterator for IntoIter { + fn len(&self) -> usize { self.inner.len() } } @@ -82,8 +76,7 @@ impl ExactSizeIterator for IntoIter impl Drop for IntoIter where D: Dimension { - fn drop(&mut self) - { + fn drop(&mut self) { if !self.has_unreachable_elements || mem::size_of::() == 0 || !mem::needs_drop::() { return; } @@ -107,8 +100,7 @@ where D: Dimension type Item = A; type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { IntoIter::new(self) } } @@ -121,8 +113,7 @@ where type Item = A; type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { IntoIter::new(self.into_owned()) } } @@ -135,8 +126,7 @@ where type Item = A; type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { IntoIter::new(self.into_owned()) } } diff --git a/src/iterators/lanes.rs b/src/iterators/lanes.rs index 11c83d002..0eefa05c4 100644 --- a/src/iterators/lanes.rs +++ b/src/iterators/lanes.rs @@ -25,18 +25,15 @@ impl_ndproducer! { /// See [`.lanes()`](ArrayBase::lanes) /// for more information. -pub struct Lanes<'a, A, D> -{ +pub struct Lanes<'a, A, D> { base: ArrayView<'a, A, D>, inner_len: Ix, inner_stride: Ixs, } -impl<'a, A, D: Dimension> Lanes<'a, A, D> -{ +impl<'a, A, D: Dimension> Lanes<'a, A, D> { pub(crate) fn new(v: ArrayView<'a, A, Di>, axis: Axis) -> Self - where Di: Dimension - { + where Di: Dimension { let ndim = v.ndim(); let len; let stride; @@ -81,8 +78,7 @@ where D: Dimension { type Item = ::Item; type IntoIter = LanesIter<'a, A, D>; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { LanesIter { iter: self.base.into_base_iter(), inner_len: self.inner_len, @@ -94,18 +90,15 @@ where D: Dimension /// See [`.lanes_mut()`](ArrayBase::lanes_mut) /// for more information. -pub struct LanesMut<'a, A, D> -{ +pub struct LanesMut<'a, A, D> { base: ArrayViewMut<'a, A, D>, inner_len: Ix, inner_stride: Ixs, } -impl<'a, A, D: Dimension> LanesMut<'a, A, D> -{ +impl<'a, A, D: Dimension> LanesMut<'a, A, D> { pub(crate) fn new(v: ArrayViewMut<'a, A, Di>, axis: Axis) -> Self - where Di: Dimension - { + where Di: Dimension { let ndim = v.ndim(); let len; let stride; @@ -132,8 +125,7 @@ where D: Dimension { type Item = ::Item; type IntoIter = LanesIterMut<'a, A, D>; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { LanesIterMut { iter: self.base.into_base_iter(), inner_len: self.inner_len, diff --git a/src/iterators/mod.rs b/src/iterators/mod.rs index 4851b2827..9dedddd79 100644 --- a/src/iterators/mod.rs +++ b/src/iterators/mod.rs @@ -36,22 +36,19 @@ use std::slice::{self, Iter as SliceIter, IterMut as SliceIterMut}; /// /// Iterator element type is `*mut A`. #[derive(Debug)] -pub struct Baseiter -{ +pub struct Baseiter { ptr: *mut A, dim: D, strides: D, index: Option, } -impl Baseiter -{ +impl Baseiter { /// Creating a Baseiter is unsafe because shape and stride parameters need /// to be correct to avoid performing an unsafe pointer offset while /// iterating. #[inline] - pub unsafe fn new(ptr: *mut A, len: D, stride: D) -> Baseiter - { + pub unsafe fn new(ptr: *mut A, len: D, stride: D) -> Baseiter { Baseiter { ptr, index: len.first_index(), @@ -61,13 +58,11 @@ impl Baseiter } } -impl Iterator for Baseiter -{ +impl Iterator for Baseiter { type Item = *mut A; #[inline] - fn next(&mut self) -> Option<*mut A> - { + fn next(&mut self) -> Option<*mut A> { let index = match self.index { None => return None, Some(ref ix) => ix.clone(), @@ -77,15 +72,13 @@ impl Iterator for Baseiter unsafe { Some(self.ptr.offset(offset)) } } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { let len = self.len(); (len, Some(len)) } fn fold(mut self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, *mut A) -> Acc - { + where G: FnMut(Acc, *mut A) -> Acc { let ndim = self.dim.ndim(); debug_assert_ne!(ndim, 0); let mut accum = init; @@ -110,10 +103,8 @@ impl Iterator for Baseiter } } -impl ExactSizeIterator for Baseiter -{ - fn len(&self) -> usize - { +impl ExactSizeIterator for Baseiter { + fn len(&self) -> usize { match self.index { None => 0, Some(ref ix) => { @@ -130,11 +121,9 @@ impl ExactSizeIterator for Baseiter } } -impl DoubleEndedIterator for Baseiter -{ +impl DoubleEndedIterator for Baseiter { #[inline] - fn next_back(&mut self) -> Option<*mut A> - { + fn next_back(&mut self) -> Option<*mut A> { let index = match self.index { None => return None, Some(ix) => ix, @@ -148,8 +137,7 @@ impl DoubleEndedIterator for Baseiter unsafe { Some(self.ptr.offset(offset)) } } - fn nth_back(&mut self, n: usize) -> Option<*mut A> - { + fn nth_back(&mut self, n: usize) -> Option<*mut A> { let index = self.index?; let len = self.dim[0] - index[0]; if n < len { @@ -166,8 +154,7 @@ impl DoubleEndedIterator for Baseiter } fn rfold(mut self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, *mut A) -> Acc - { + where G: FnMut(Acc, *mut A) -> Acc { let mut accum = init; if let Some(index) = self.index { let elem_index = index[0]; @@ -209,10 +196,8 @@ clone_bounds!( } ); -impl<'a, A, D: Dimension> ElementsBase<'a, A, D> -{ - pub fn new(v: ArrayView<'a, A, D>) -> Self - { +impl<'a, A, D: Dimension> ElementsBase<'a, A, D> { + pub fn new(v: ArrayView<'a, A, D>) -> Self { ElementsBase { inner: v.into_base_iter(), life: PhantomData, @@ -220,38 +205,31 @@ impl<'a, A, D: Dimension> ElementsBase<'a, A, D> } } -impl<'a, A, D: Dimension> Iterator for ElementsBase<'a, A, D> -{ +impl<'a, A, D: Dimension> Iterator for ElementsBase<'a, A, D> { type Item = &'a A; #[inline] - fn next(&mut self) -> Option<&'a A> - { + fn next(&mut self) -> Option<&'a A> { self.inner.next().map(|p| unsafe { &*p }) } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } fn fold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc - { + where G: FnMut(Acc, Self::Item) -> Acc { unsafe { self.inner.fold(init, move |acc, ptr| g(acc, &*ptr)) } } } -impl<'a, A> DoubleEndedIterator for ElementsBase<'a, A, Ix1> -{ +impl<'a, A> DoubleEndedIterator for ElementsBase<'a, A, Ix1> { #[inline] - fn next_back(&mut self) -> Option<&'a A> - { + fn next_back(&mut self) -> Option<&'a A> { self.inner.next_back().map(|p| unsafe { &*p }) } fn rfold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc - { + where G: FnMut(Acc, Self::Item) -> Acc { unsafe { self.inner.rfold(init, move |acc, ptr| g(acc, &*ptr)) } } } @@ -259,8 +237,7 @@ impl<'a, A> DoubleEndedIterator for ElementsBase<'a, A, Ix1> impl<'a, A, D> ExactSizeIterator for ElementsBase<'a, A, D> where D: Dimension { - fn len(&self) -> usize - { + fn len(&self) -> usize { self.inner.len() } } @@ -295,8 +272,7 @@ clone_bounds!( impl<'a, A, D> Iter<'a, A, D> where D: Dimension { - pub(crate) fn new(self_: ArrayView<'a, A, D>) -> Self - { + pub(crate) fn new(self_: ArrayView<'a, A, D>) -> Self { Iter { inner: if let Some(slc) = self_.to_slice() { ElementsRepr::Slice(slc.iter()) @@ -310,8 +286,7 @@ where D: Dimension impl<'a, A, D> IterMut<'a, A, D> where D: Dimension { - pub(crate) fn new(self_: ArrayViewMut<'a, A, D>) -> Self - { + pub(crate) fn new(self_: ArrayViewMut<'a, A, D>) -> Self { IterMut { inner: match self_.try_into_slice() { Ok(x) => ElementsRepr::Slice(x.iter_mut()), @@ -322,8 +297,7 @@ where D: Dimension } #[derive(Clone, Debug)] -pub enum ElementsRepr -{ +pub enum ElementsRepr { Slice(S), Counted(C), } @@ -334,15 +308,13 @@ pub enum ElementsRepr /// /// See [`.iter()`](ArrayBase::iter) for more information. #[derive(Debug)] -pub struct Iter<'a, A, D> -{ +pub struct Iter<'a, A, D> { inner: ElementsRepr, ElementsBase<'a, A, D>>, } /// Counted read only iterator #[derive(Debug)] -pub struct ElementsBase<'a, A, D> -{ +pub struct ElementsBase<'a, A, D> { inner: Baseiter, life: PhantomData<&'a A>, } @@ -353,8 +325,7 @@ pub struct ElementsBase<'a, A, D> /// /// See [`.iter_mut()`](ArrayBase::iter_mut) for more information. #[derive(Debug)] -pub struct IterMut<'a, A, D> -{ +pub struct IterMut<'a, A, D> { inner: ElementsRepr, ElementsBaseMut<'a, A, D>>, } @@ -362,16 +333,13 @@ pub struct IterMut<'a, A, D> /// /// Iterator element type is `&'a mut A`. #[derive(Debug)] -pub struct ElementsBaseMut<'a, A, D> -{ +pub struct ElementsBaseMut<'a, A, D> { inner: Baseiter, life: PhantomData<&'a mut A>, } -impl<'a, A, D: Dimension> ElementsBaseMut<'a, A, D> -{ - pub fn new(v: ArrayViewMut<'a, A, D>) -> Self - { +impl<'a, A, D: Dimension> ElementsBaseMut<'a, A, D> { + pub fn new(v: ArrayViewMut<'a, A, D>) -> Self { ElementsBaseMut { inner: v.into_base_iter(), life: PhantomData, @@ -392,8 +360,7 @@ pub struct IndexedIterMut<'a, A, D>(ElementsBaseMut<'a, A, D>); impl<'a, A, D> IndexedIter<'a, A, D> where D: Dimension { - pub(crate) fn new(x: ElementsBase<'a, A, D>) -> Self - { + pub(crate) fn new(x: ElementsBase<'a, A, D>) -> Self { IndexedIter(x) } } @@ -401,100 +368,82 @@ where D: Dimension impl<'a, A, D> IndexedIterMut<'a, A, D> where D: Dimension { - pub(crate) fn new(x: ElementsBaseMut<'a, A, D>) -> Self - { + pub(crate) fn new(x: ElementsBaseMut<'a, A, D>) -> Self { IndexedIterMut(x) } } -impl<'a, A, D: Dimension> Iterator for Iter<'a, A, D> -{ +impl<'a, A, D: Dimension> Iterator for Iter<'a, A, D> { type Item = &'a A; #[inline] - fn next(&mut self) -> Option<&'a A> - { + fn next(&mut self) -> Option<&'a A> { either_mut!(self.inner, iter => iter.next()) } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { either!(self.inner, ref iter => iter.size_hint()) } fn fold(self, init: Acc, g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc - { + where G: FnMut(Acc, Self::Item) -> Acc { either!(self.inner, iter => iter.fold(init, g)) } - fn nth(&mut self, n: usize) -> Option - { + fn nth(&mut self, n: usize) -> Option { either_mut!(self.inner, iter => iter.nth(n)) } fn collect(self) -> B - where B: FromIterator - { + where B: FromIterator { either!(self.inner, iter => iter.collect()) } fn all(&mut self, f: F) -> bool - where F: FnMut(Self::Item) -> bool - { + where F: FnMut(Self::Item) -> bool { either_mut!(self.inner, iter => iter.all(f)) } fn any(&mut self, f: F) -> bool - where F: FnMut(Self::Item) -> bool - { + where F: FnMut(Self::Item) -> bool { either_mut!(self.inner, iter => iter.any(f)) } fn find

(&mut self, predicate: P) -> Option - where P: FnMut(&Self::Item) -> bool - { + where P: FnMut(&Self::Item) -> bool { either_mut!(self.inner, iter => iter.find(predicate)) } fn find_map(&mut self, f: F) -> Option - where F: FnMut(Self::Item) -> Option - { + where F: FnMut(Self::Item) -> Option { either_mut!(self.inner, iter => iter.find_map(f)) } - fn count(self) -> usize - { + fn count(self) -> usize { either!(self.inner, iter => iter.count()) } - fn last(self) -> Option - { + fn last(self) -> Option { either!(self.inner, iter => iter.last()) } fn position

(&mut self, predicate: P) -> Option - where P: FnMut(Self::Item) -> bool - { + where P: FnMut(Self::Item) -> bool { either_mut!(self.inner, iter => iter.position(predicate)) } } -impl<'a, A> DoubleEndedIterator for Iter<'a, A, Ix1> -{ +impl<'a, A> DoubleEndedIterator for Iter<'a, A, Ix1> { #[inline] - fn next_back(&mut self) -> Option<&'a A> - { + fn next_back(&mut self) -> Option<&'a A> { either_mut!(self.inner, iter => iter.next_back()) } - fn nth_back(&mut self, n: usize) -> Option<&'a A> - { + fn nth_back(&mut self, n: usize) -> Option<&'a A> { either_mut!(self.inner, iter => iter.nth_back(n)) } fn rfold(self, init: Acc, g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc - { + where G: FnMut(Acc, Self::Item) -> Acc { either!(self.inner, iter => iter.rfold(init, g)) } } @@ -502,18 +451,15 @@ impl<'a, A> DoubleEndedIterator for Iter<'a, A, Ix1> impl<'a, A, D> ExactSizeIterator for Iter<'a, A, D> where D: Dimension { - fn len(&self) -> usize - { + fn len(&self) -> usize { either!(self.inner, ref iter => iter.len()) } } -impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> -{ +impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> { type Item = (D::Pattern, &'a A); #[inline] - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { let index = match self.0.inner.index { None => return None, Some(ref ix) => ix.clone(), @@ -524,8 +470,7 @@ impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> } } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { self.0.size_hint() } } @@ -533,100 +478,82 @@ impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> impl<'a, A, D> ExactSizeIterator for IndexedIter<'a, A, D> where D: Dimension { - fn len(&self) -> usize - { + fn len(&self) -> usize { self.0.inner.len() } } -impl<'a, A, D: Dimension> Iterator for IterMut<'a, A, D> -{ +impl<'a, A, D: Dimension> Iterator for IterMut<'a, A, D> { type Item = &'a mut A; #[inline] - fn next(&mut self) -> Option<&'a mut A> - { + fn next(&mut self) -> Option<&'a mut A> { either_mut!(self.inner, iter => iter.next()) } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { either!(self.inner, ref iter => iter.size_hint()) } fn fold(self, init: Acc, g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc - { + where G: FnMut(Acc, Self::Item) -> Acc { either!(self.inner, iter => iter.fold(init, g)) } - fn nth(&mut self, n: usize) -> Option - { + fn nth(&mut self, n: usize) -> Option { either_mut!(self.inner, iter => iter.nth(n)) } fn collect(self) -> B - where B: FromIterator - { + where B: FromIterator { either!(self.inner, iter => iter.collect()) } fn all(&mut self, f: F) -> bool - where F: FnMut(Self::Item) -> bool - { + where F: FnMut(Self::Item) -> bool { either_mut!(self.inner, iter => iter.all(f)) } fn any(&mut self, f: F) -> bool - where F: FnMut(Self::Item) -> bool - { + where F: FnMut(Self::Item) -> bool { either_mut!(self.inner, iter => iter.any(f)) } fn find

(&mut self, predicate: P) -> Option - where P: FnMut(&Self::Item) -> bool - { + where P: FnMut(&Self::Item) -> bool { either_mut!(self.inner, iter => iter.find(predicate)) } fn find_map(&mut self, f: F) -> Option - where F: FnMut(Self::Item) -> Option - { + where F: FnMut(Self::Item) -> Option { either_mut!(self.inner, iter => iter.find_map(f)) } - fn count(self) -> usize - { + fn count(self) -> usize { either!(self.inner, iter => iter.count()) } - fn last(self) -> Option - { + fn last(self) -> Option { either!(self.inner, iter => iter.last()) } fn position

(&mut self, predicate: P) -> Option - where P: FnMut(Self::Item) -> bool - { + where P: FnMut(Self::Item) -> bool { either_mut!(self.inner, iter => iter.position(predicate)) } } -impl<'a, A> DoubleEndedIterator for IterMut<'a, A, Ix1> -{ +impl<'a, A> DoubleEndedIterator for IterMut<'a, A, Ix1> { #[inline] - fn next_back(&mut self) -> Option<&'a mut A> - { + fn next_back(&mut self) -> Option<&'a mut A> { either_mut!(self.inner, iter => iter.next_back()) } - fn nth_back(&mut self, n: usize) -> Option<&'a mut A> - { + fn nth_back(&mut self, n: usize) -> Option<&'a mut A> { either_mut!(self.inner, iter => iter.nth_back(n)) } fn rfold(self, init: Acc, g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc - { + where G: FnMut(Acc, Self::Item) -> Acc { either!(self.inner, iter => iter.rfold(init, g)) } } @@ -634,44 +561,36 @@ impl<'a, A> DoubleEndedIterator for IterMut<'a, A, Ix1> impl<'a, A, D> ExactSizeIterator for IterMut<'a, A, D> where D: Dimension { - fn len(&self) -> usize - { + fn len(&self) -> usize { either!(self.inner, ref iter => iter.len()) } } -impl<'a, A, D: Dimension> Iterator for ElementsBaseMut<'a, A, D> -{ +impl<'a, A, D: Dimension> Iterator for ElementsBaseMut<'a, A, D> { type Item = &'a mut A; #[inline] - fn next(&mut self) -> Option<&'a mut A> - { + fn next(&mut self) -> Option<&'a mut A> { self.inner.next().map(|p| unsafe { &mut *p }) } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } fn fold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc - { + where G: FnMut(Acc, Self::Item) -> Acc { unsafe { self.inner.fold(init, move |acc, ptr| g(acc, &mut *ptr)) } } } -impl<'a, A> DoubleEndedIterator for ElementsBaseMut<'a, A, Ix1> -{ +impl<'a, A> DoubleEndedIterator for ElementsBaseMut<'a, A, Ix1> { #[inline] - fn next_back(&mut self) -> Option<&'a mut A> - { + fn next_back(&mut self) -> Option<&'a mut A> { self.inner.next_back().map(|p| unsafe { &mut *p }) } fn rfold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc - { + where G: FnMut(Acc, Self::Item) -> Acc { unsafe { self.inner.rfold(init, move |acc, ptr| g(acc, &mut *ptr)) } } } @@ -679,18 +598,15 @@ impl<'a, A> DoubleEndedIterator for ElementsBaseMut<'a, A, Ix1> impl<'a, A, D> ExactSizeIterator for ElementsBaseMut<'a, A, D> where D: Dimension { - fn len(&self) -> usize - { + fn len(&self) -> usize { self.inner.len() } } -impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> -{ +impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> { type Item = (D::Pattern, &'a mut A); #[inline] - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { let index = match self.0.inner.index { None => return None, Some(ref ix) => ix.clone(), @@ -701,8 +617,7 @@ impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> } } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { self.0.size_hint() } } @@ -710,8 +625,7 @@ impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> impl<'a, A, D> ExactSizeIterator for IndexedIterMut<'a, A, D> where D: Dimension { - fn len(&self) -> usize - { + fn len(&self) -> usize { self.0.inner.len() } } @@ -720,8 +634,7 @@ where D: Dimension /// each lane along that axis. /// /// See [`.lanes()`](ArrayBase::lanes) for more information. -pub struct LanesIter<'a, A, D> -{ +pub struct LanesIter<'a, A, D> { inner_len: Ix, inner_stride: Ixs, iter: Baseiter, @@ -744,15 +657,13 @@ impl<'a, A, D> Iterator for LanesIter<'a, A, D> where D: Dimension { type Item = ArrayView<'a, A, Ix1>; - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { self.iter .next() .map(|ptr| unsafe { ArrayView::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } } @@ -760,16 +671,13 @@ where D: Dimension impl<'a, A, D> ExactSizeIterator for LanesIter<'a, A, D> where D: Dimension { - fn len(&self) -> usize - { + fn len(&self) -> usize { self.iter.len() } } -impl<'a, A> DoubleEndedIterator for LanesIter<'a, A, Ix1> -{ - fn next_back(&mut self) -> Option - { +impl<'a, A> DoubleEndedIterator for LanesIter<'a, A, Ix1> { + fn next_back(&mut self) -> Option { self.iter .next_back() .map(|ptr| unsafe { ArrayView::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) @@ -784,8 +692,7 @@ impl<'a, A> DoubleEndedIterator for LanesIter<'a, A, Ix1> /// /// See [`.lanes_mut()`](ArrayBase::lanes_mut) /// for more information. -pub struct LanesIterMut<'a, A, D> -{ +pub struct LanesIterMut<'a, A, D> { inner_len: Ix, inner_stride: Ixs, iter: Baseiter, @@ -796,15 +703,13 @@ impl<'a, A, D> Iterator for LanesIterMut<'a, A, D> where D: Dimension { type Item = ArrayViewMut<'a, A, Ix1>; - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { self.iter .next() .map(|ptr| unsafe { ArrayViewMut::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } } @@ -812,16 +717,13 @@ where D: Dimension impl<'a, A, D> ExactSizeIterator for LanesIterMut<'a, A, D> where D: Dimension { - fn len(&self) -> usize - { + fn len(&self) -> usize { self.iter.len() } } -impl<'a, A> DoubleEndedIterator for LanesIterMut<'a, A, Ix1> -{ - fn next_back(&mut self) -> Option - { +impl<'a, A> DoubleEndedIterator for LanesIterMut<'a, A, Ix1> { + fn next_back(&mut self) -> Option { self.iter .next_back() .map(|ptr| unsafe { ArrayViewMut::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) @@ -829,8 +731,7 @@ impl<'a, A> DoubleEndedIterator for LanesIterMut<'a, A, Ix1> } #[derive(Debug)] -pub struct AxisIterCore -{ +pub struct AxisIterCore { /// Index along the axis of the value of `.next()`, relative to the start /// of the axis. index: Ix, @@ -861,8 +762,7 @@ clone_bounds!( } ); -impl AxisIterCore -{ +impl AxisIterCore { /// Constructs a new iterator over the specified axis. fn new(v: ArrayBase, axis: Axis) -> Self where @@ -880,8 +780,7 @@ impl AxisIterCore } #[inline] - unsafe fn offset(&self, index: usize) -> *mut A - { + unsafe fn offset(&self, index: usize) -> *mut A { debug_assert!( index < self.end, "index={}, end={}, stride={}", @@ -900,8 +799,7 @@ impl AxisIterCore /// **Panics** if `index` is strictly greater than the iterator's remaining /// length. #[track_caller] - fn split_at(self, index: usize) -> (Self, Self) - { + fn split_at(self, index: usize) -> (Self, Self) { assert!(index <= self.len()); let mid = self.index + index; let left = AxisIterCore { @@ -925,16 +823,14 @@ impl AxisIterCore /// Does the same thing as `.next()` but also returns the index of the item /// relative to the start of the axis. - fn next_with_index(&mut self) -> Option<(usize, *mut A)> - { + fn next_with_index(&mut self) -> Option<(usize, *mut A)> { let index = self.index; self.next().map(|ptr| (index, ptr)) } /// Does the same thing as `.next_back()` but also returns the index of the /// item relative to the start of the axis. - fn next_back_with_index(&mut self) -> Option<(usize, *mut A)> - { + fn next_back_with_index(&mut self) -> Option<(usize, *mut A)> { self.next_back().map(|ptr| (self.end, ptr)) } } @@ -944,8 +840,7 @@ where D: Dimension { type Item = *mut A; - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { if self.index >= self.end { None } else { @@ -955,8 +850,7 @@ where D: Dimension } } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { let len = self.len(); (len, Some(len)) } @@ -965,8 +859,7 @@ where D: Dimension impl DoubleEndedIterator for AxisIterCore where D: Dimension { - fn next_back(&mut self) -> Option - { + fn next_back(&mut self) -> Option { if self.index >= self.end { None } else { @@ -980,8 +873,7 @@ where D: Dimension impl ExactSizeIterator for AxisIterCore where D: Dimension { - fn len(&self) -> usize - { + fn len(&self) -> usize { self.end - self.index } } @@ -1001,8 +893,7 @@ where D: Dimension /// or [`.axis_iter()`](ArrayBase::axis_iter) /// for more information. #[derive(Debug)] -pub struct AxisIter<'a, A, D> -{ +pub struct AxisIter<'a, A, D> { iter: AxisIterCore, life: PhantomData<&'a A>, } @@ -1017,12 +908,10 @@ clone_bounds!( } ); -impl<'a, A, D: Dimension> AxisIter<'a, A, D> -{ +impl<'a, A, D: Dimension> AxisIter<'a, A, D> { /// Creates a new iterator over the specified axis. pub(crate) fn new(v: ArrayView<'a, A, Di>, axis: Axis) -> Self - where Di: RemoveAxis - { + where Di: RemoveAxis { AxisIter { iter: AxisIterCore::new(v, axis), life: PhantomData, @@ -1037,8 +926,7 @@ impl<'a, A, D: Dimension> AxisIter<'a, A, D> /// **Panics** if `index` is strictly greater than the iterator's remaining /// length. #[track_caller] - pub fn split_at(self, index: usize) -> (Self, Self) - { + pub fn split_at(self, index: usize) -> (Self, Self) { let (left, right) = self.iter.split_at(index); ( AxisIter { @@ -1058,13 +946,11 @@ where D: Dimension { type Item = ArrayView<'a, A, D>; - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { self.iter.next().map(|ptr| unsafe { self.as_ref(ptr) }) } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } } @@ -1072,8 +958,7 @@ where D: Dimension impl<'a, A, D> DoubleEndedIterator for AxisIter<'a, A, D> where D: Dimension { - fn next_back(&mut self) -> Option - { + fn next_back(&mut self) -> Option { self.iter.next_back().map(|ptr| unsafe { self.as_ref(ptr) }) } } @@ -1081,8 +966,7 @@ where D: Dimension impl<'a, A, D> ExactSizeIterator for AxisIter<'a, A, D> where D: Dimension { - fn len(&self) -> usize - { + fn len(&self) -> usize { self.iter.len() } } @@ -1101,18 +985,15 @@ where D: Dimension /// See [`.outer_iter_mut()`](ArrayBase::outer_iter_mut) /// or [`.axis_iter_mut()`](ArrayBase::axis_iter_mut) /// for more information. -pub struct AxisIterMut<'a, A, D> -{ +pub struct AxisIterMut<'a, A, D> { iter: AxisIterCore, life: PhantomData<&'a mut A>, } -impl<'a, A, D: Dimension> AxisIterMut<'a, A, D> -{ +impl<'a, A, D: Dimension> AxisIterMut<'a, A, D> { /// Creates a new iterator over the specified axis. pub(crate) fn new(v: ArrayViewMut<'a, A, Di>, axis: Axis) -> Self - where Di: RemoveAxis - { + where Di: RemoveAxis { AxisIterMut { iter: AxisIterCore::new(v, axis), life: PhantomData, @@ -1127,8 +1008,7 @@ impl<'a, A, D: Dimension> AxisIterMut<'a, A, D> /// **Panics** if `index` is strictly greater than the iterator's remaining /// length. #[track_caller] - pub fn split_at(self, index: usize) -> (Self, Self) - { + pub fn split_at(self, index: usize) -> (Self, Self) { let (left, right) = self.iter.split_at(index); ( AxisIterMut { @@ -1148,13 +1028,11 @@ where D: Dimension { type Item = ArrayViewMut<'a, A, D>; - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { self.iter.next().map(|ptr| unsafe { self.as_ref(ptr) }) } - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } } @@ -1162,8 +1040,7 @@ where D: Dimension impl<'a, A, D> DoubleEndedIterator for AxisIterMut<'a, A, D> where D: Dimension { - fn next_back(&mut self) -> Option - { + fn next_back(&mut self) -> Option { self.iter.next_back().map(|ptr| unsafe { self.as_ref(ptr) }) } } @@ -1171,31 +1048,26 @@ where D: Dimension impl<'a, A, D> ExactSizeIterator for AxisIterMut<'a, A, D> where D: Dimension { - fn len(&self) -> usize - { + fn len(&self) -> usize { self.iter.len() } } -impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> -{ +impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> { type Item = ::Item; type Dim = Ix1; type Ptr = *mut A; type Stride = isize; - fn layout(&self) -> crate::Layout - { + fn layout(&self) -> crate::Layout { crate::Layout::one_dimensional() } - fn raw_dim(&self) -> Self::Dim - { + fn raw_dim(&self) -> Self::Dim { Ix1(self.len()) } - fn as_ptr(&self) -> Self::Ptr - { + fn as_ptr(&self) -> Self::Ptr { if self.len() > 0 { // `self.iter.index` is guaranteed to be in-bounds if any of the // iterator remains (i.e. if `self.len() > 0`). @@ -1208,53 +1080,44 @@ impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> } } - fn contiguous_stride(&self) -> isize - { + fn contiguous_stride(&self) -> isize { self.iter.stride } - unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item - { + unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { ArrayView::new_(ptr, self.iter.inner_dim.clone(), self.iter.inner_strides.clone()) } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr - { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { self.iter.offset(self.iter.index + i[0]) } - fn stride_of(&self, _axis: Axis) -> isize - { + fn stride_of(&self, _axis: Axis) -> isize { self.contiguous_stride() } - fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) - { + fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) { self.split_at(index) } private_impl! {} } -impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> -{ +impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> { type Item = ::Item; type Dim = Ix1; type Ptr = *mut A; type Stride = isize; - fn layout(&self) -> crate::Layout - { + fn layout(&self) -> crate::Layout { crate::Layout::one_dimensional() } - fn raw_dim(&self) -> Self::Dim - { + fn raw_dim(&self) -> Self::Dim { Ix1(self.len()) } - fn as_ptr(&self) -> Self::Ptr - { + fn as_ptr(&self) -> Self::Ptr { if self.len() > 0 { // `self.iter.index` is guaranteed to be in-bounds if any of the // iterator remains (i.e. if `self.len() > 0`). @@ -1267,28 +1130,23 @@ impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> } } - fn contiguous_stride(&self) -> isize - { + fn contiguous_stride(&self) -> isize { self.iter.stride } - unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item - { + unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { ArrayViewMut::new_(ptr, self.iter.inner_dim.clone(), self.iter.inner_strides.clone()) } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr - { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { self.iter.offset(self.iter.index + i[0]) } - fn stride_of(&self, _axis: Axis) -> isize - { + fn stride_of(&self, _axis: Axis) -> isize { self.contiguous_stride() } - fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) - { + fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) { self.split_at(index) } @@ -1305,8 +1163,7 @@ impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> /// Iterator element type is `ArrayView<'a, A, D>`. /// /// See [`.axis_chunks_iter()`](ArrayBase::axis_chunks_iter) for more information. -pub struct AxisChunksIter<'a, A, D> -{ +pub struct AxisChunksIter<'a, A, D> { iter: AxisIterCore, /// Index of the partial chunk (the chunk smaller than the specified chunk /// size due to the axis length not being evenly divisible). If the axis @@ -1339,9 +1196,9 @@ clone_bounds!( /// /// **Panics** if `size == 0`. #[track_caller] -fn chunk_iter_parts(v: ArrayView<'_, A, D>, axis: Axis, size: usize) - -> (AxisIterCore, usize, D) -{ +fn chunk_iter_parts( + v: ArrayView<'_, A, D>, axis: Axis, size: usize, +) -> (AxisIterCore, usize, D) { assert_ne!(size, 0, "Chunk size must be nonzero."); let axis_len = v.len_of(axis); let n_whole_chunks = axis_len / size; @@ -1378,10 +1235,8 @@ fn chunk_iter_parts(v: ArrayView<'_, A, D>, axis: Axis, size: u (iter, partial_chunk_index, partial_chunk_dim) } -impl<'a, A, D: Dimension> AxisChunksIter<'a, A, D> -{ - pub(crate) fn new(v: ArrayView<'a, A, D>, axis: Axis, size: usize) -> Self - { +impl<'a, A, D: Dimension> AxisChunksIter<'a, A, D> { + pub(crate) fn new(v: ArrayView<'a, A, D>, axis: Axis, size: usize) -> Self { let (iter, partial_chunk_index, partial_chunk_dim) = chunk_iter_parts(v, axis, size); AxisChunksIter { iter, @@ -1488,18 +1343,15 @@ macro_rules! chunk_iter_impl { /// /// See [`.axis_chunks_iter_mut()`](ArrayBase::axis_chunks_iter_mut) /// for more information. -pub struct AxisChunksIterMut<'a, A, D> -{ +pub struct AxisChunksIterMut<'a, A, D> { iter: AxisIterCore, partial_chunk_index: usize, partial_chunk_dim: D, life: PhantomData<&'a mut A>, } -impl<'a, A, D: Dimension> AxisChunksIterMut<'a, A, D> -{ - pub(crate) fn new(v: ArrayViewMut<'a, A, D>, axis: Axis, size: usize) -> Self - { +impl<'a, A, D: Dimension> AxisChunksIterMut<'a, A, D> { + pub(crate) fn new(v: ArrayViewMut<'a, A, D>, axis: Axis, size: usize) -> Self { let (iter, partial_chunk_index, partial_chunk_dim) = chunk_iter_parts(v.into_view(), axis, size); AxisChunksIterMut { iter, @@ -1559,8 +1411,7 @@ unsafe impl TrustedIterator for IntoIter where D: Dimension {} /// Like Iterator::collect, but only for trusted length iterators pub fn to_vec(iter: I) -> Vec -where I: TrustedIterator + ExactSizeIterator -{ +where I: TrustedIterator + ExactSizeIterator { to_vec_mapped(iter, |x| x) } diff --git a/src/iterators/windows.rs b/src/iterators/windows.rs index ec1afb634..050071450 100644 --- a/src/iterators/windows.rs +++ b/src/iterators/windows.rs @@ -11,19 +11,16 @@ use crate::Slice; /// /// See [`.windows()`](ArrayBase::windows) for more /// information. -pub struct Windows<'a, A, D> -{ +pub struct Windows<'a, A, D> { base: RawArrayView, life: PhantomData<&'a A>, window: D, strides: D, } -impl<'a, A, D: Dimension> Windows<'a, A, D> -{ +impl<'a, A, D: Dimension> Windows<'a, A, D> { pub(crate) fn new(a: ArrayView<'a, A, D>, window_size: E) -> Self - where E: IntoDimension - { + where E: IntoDimension { let window = window_size.into_dimension(); let ndim = window.ndim(); @@ -34,8 +31,7 @@ impl<'a, A, D: Dimension> Windows<'a, A, D> } pub(crate) fn new_with_stride(a: ArrayView<'a, A, D>, window_size: E, axis_strides: E) -> Self - where E: IntoDimension - { + where E: IntoDimension { let window = window_size.into_dimension(); let strides = axis_strides.into_dimension(); @@ -112,8 +108,7 @@ where { type Item = ::Item; type IntoIter = WindowsIter<'a, A, D>; - fn into_iter(self) -> Self::IntoIter - { + fn into_iter(self) -> Self::IntoIter { WindowsIter { iter: self.base.into_base_iter(), life: self.life, @@ -127,8 +122,7 @@ where /// /// See [`.windows()`](ArrayBase::windows) for more /// information. -pub struct WindowsIter<'a, A, D> -{ +pub struct WindowsIter<'a, A, D> { iter: Baseiter, life: PhantomData<&'a A>, window: D, diff --git a/src/itertools.rs b/src/itertools.rs index d3562e687..ccfb852be 100644 --- a/src/itertools.rs +++ b/src/itertools.rs @@ -23,8 +23,7 @@ use std::iter; /// } /// ``` pub(crate) fn enumerate(iterable: I) -> iter::Enumerate -where I: IntoIterator -{ +where I: IntoIterator { iterable.into_iter().enumerate() } diff --git a/src/layout/layoutfmt.rs b/src/layout/layoutfmt.rs index f20f0caaa..3d7fad00a 100644 --- a/src/layout/layoutfmt.rs +++ b/src/layout/layoutfmt.rs @@ -12,10 +12,8 @@ const LAYOUT_NAMES: &[&str] = &["C", "F", "c", "f"]; use std::fmt; -impl fmt::Debug for Layout -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result - { +impl fmt::Debug for Layout { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.0 == 0 { write!(f, "Custom")? } else { diff --git a/src/layout/mod.rs b/src/layout/mod.rs index 026688d63..d3c9e5fcb 100644 --- a/src/layout/mod.rs +++ b/src/layout/mod.rs @@ -8,82 +8,70 @@ mod layoutfmt; #[derive(Copy, Clone)] pub struct Layout(u32); -impl Layout -{ +impl Layout { pub(crate) const CORDER: u32 = 0b01; pub(crate) const FORDER: u32 = 0b10; pub(crate) const CPREFER: u32 = 0b0100; pub(crate) const FPREFER: u32 = 0b1000; #[inline(always)] - pub(crate) fn is(self, flag: u32) -> bool - { + pub(crate) fn is(self, flag: u32) -> bool { self.0 & flag != 0 } /// Return layout common to both inputs #[inline(always)] - pub(crate) fn intersect(self, other: Layout) -> Layout - { + pub(crate) fn intersect(self, other: Layout) -> Layout { Layout(self.0 & other.0) } /// Return a layout that simultaneously "is" what both of the inputs are #[inline(always)] - pub(crate) fn also(self, other: Layout) -> Layout - { + pub(crate) fn also(self, other: Layout) -> Layout { Layout(self.0 | other.0) } #[inline(always)] - pub(crate) fn one_dimensional() -> Layout - { + pub(crate) fn one_dimensional() -> Layout { Layout::c().also(Layout::f()) } #[inline(always)] - pub(crate) fn c() -> Layout - { + pub(crate) fn c() -> Layout { Layout(Layout::CORDER | Layout::CPREFER) } #[inline(always)] - pub(crate) fn f() -> Layout - { + pub(crate) fn f() -> Layout { Layout(Layout::FORDER | Layout::FPREFER) } #[inline(always)] - pub(crate) fn cpref() -> Layout - { + pub(crate) fn cpref() -> Layout { Layout(Layout::CPREFER) } #[inline(always)] - pub(crate) fn fpref() -> Layout - { + pub(crate) fn fpref() -> Layout { Layout(Layout::FPREFER) } #[inline(always)] - pub(crate) fn none() -> Layout - { + pub(crate) fn none() -> Layout { Layout(0) } /// A simple "score" method which scores positive for preferring C-order, negative for F-order /// Subject to change when we can describe other layouts #[inline] - pub(crate) fn tendency(self) -> i32 - { + pub(crate) fn tendency(self) -> i32 { (self.is(Layout::CORDER) as i32 - self.is(Layout::FORDER) as i32) + (self.is(Layout::CPREFER) as i32 - self.is(Layout::FPREFER) as i32) } } #[cfg(test)] -mod tests -{ +mod tests { use super::*; use crate::imp_prelude::*; use crate::NdProducer; @@ -117,8 +105,7 @@ mod tests } #[test] - fn contig_layouts() - { + fn contig_layouts() { let a = M::zeros((5, 5)); let b = M::zeros((5, 5).f()); let ac = a.view().layout(); @@ -130,8 +117,7 @@ mod tests } #[test] - fn contig_cf_layouts() - { + fn contig_cf_layouts() { let a = M::zeros((5, 1)); let b = M::zeros((1, 5).f()); assert_layouts!(a, CORDER, CPREFER, FORDER, FPREFER); @@ -159,8 +145,7 @@ mod tests } #[test] - fn stride_layouts() - { + fn stride_layouts() { let a = M::zeros((5, 5)); { @@ -187,8 +172,7 @@ mod tests } #[test] - fn no_layouts() - { + fn no_layouts() { let a = M::zeros((5, 5)); let b = M::zeros((5, 5).f()); @@ -216,8 +200,7 @@ mod tests } #[test] - fn skip_layouts() - { + fn skip_layouts() { let a = M::zeros((5, 5)); { let v1 = a.slice(s![..;2, ..]).layout(); diff --git a/src/lib.rs b/src/lib.rs index 37af0adfe..83d62991d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,4 @@ +#![feature(non_null_convenience)] // Copyright 2014-2020 bluss and ndarray developers. // // Licensed under the Apache License, Version 2.0 (()) +/// // Ok::<(), ndarray::ShapeError>(()) /// ``` /// /// If neither of these options works for you, and you really need to convert @@ -1153,7 +1154,7 @@ pub type Ixs = isize; /// [[1, 2, 3], [4, 5, 6]], /// [[7, 8, 9], [10, 11, 12]], /// ]); -/// # Ok::<(), ndarray::ShapeError>(()) +/// // Ok::<(), ndarray::ShapeError>(()) /// ``` /// /// Note that this implementation assumes that the nested `Vec`s are all the @@ -1436,10 +1437,8 @@ pub use data_repr::OwnedRepr; #[derive(Debug)] pub struct OwnedArcRepr(Arc>); -impl Clone for OwnedArcRepr -{ - fn clone(&self) -> Self - { +impl Clone for OwnedArcRepr { + fn clone(&self) -> Self { OwnedArcRepr(self.0.clone()) } } @@ -1450,16 +1449,13 @@ impl Clone for OwnedArcRepr /// [`RawArrayView`] / [`RawArrayViewMut`] for the array type!* #[derive(Copy, Clone)] // This is just a marker type, to carry the mutability and element type. -pub struct RawViewRepr -{ +pub struct RawViewRepr { ptr: PhantomData, } -impl RawViewRepr -{ +impl RawViewRepr { #[inline(always)] - const fn new() -> Self - { + const fn new() -> Self { RawViewRepr { ptr: PhantomData } } } @@ -1470,16 +1466,13 @@ impl RawViewRepr /// [`ArrayView`] / [`ArrayViewMut`] for the array type!* #[derive(Copy, Clone)] // This is just a marker type, to carry the lifetime parameter. -pub struct ViewRepr -{ +pub struct ViewRepr { life: PhantomData, } -impl ViewRepr -{ +impl ViewRepr { #[inline(always)] - const fn new() -> Self - { + const fn new() -> Self { ViewRepr { life: PhantomData } } } @@ -1488,19 +1481,16 @@ impl ViewRepr /// /// *Don't use this type directly—use the type alias /// [`CowArray`] for the array type!* -pub enum CowRepr<'a, A> -{ +pub enum CowRepr<'a, A> { /// Borrowed data. View(ViewRepr<&'a A>), /// Owned data. Owned(OwnedRepr), } -impl<'a, A> CowRepr<'a, A> -{ +impl<'a, A> CowRepr<'a, A> { /// Returns `true` iff the data is the `View` variant. - pub fn is_view(&self) -> bool - { + pub fn is_view(&self) -> bool { match self { CowRepr::View(_) => true, CowRepr::Owned(_) => false, @@ -1508,8 +1498,7 @@ impl<'a, A> CowRepr<'a, A> } /// Returns `true` iff the data is the `Owned` variant. - pub fn is_owned(&self) -> bool - { + pub fn is_owned(&self) -> bool { match self { CowRepr::View(_) => false, CowRepr::Owned(_) => true, @@ -1537,8 +1526,7 @@ where { #[inline] fn broadcast_unwrap(&self, dim: E) -> ArrayView<'_, A, E> - where E: Dimension - { + where E: Dimension { #[cold] #[inline(never)] fn broadcast_panic(from: &D, to: &E) -> ! @@ -1563,8 +1551,7 @@ where // (Checked in debug assertions). #[inline] fn broadcast_assume(&self, dim: E) -> ArrayView<'_, A, E> - where E: Dimension - { + where E: Dimension { let dim = dim.into_dimension(); debug_assert_eq!(self.shape(), dim.slice()); let ptr = self.ptr; @@ -1574,8 +1561,7 @@ where } /// Remove array axis `axis` and return the result. - fn try_remove_axis(self, axis: Axis) -> ArrayBase - { + fn try_remove_axis(self, axis: Axis) -> ArrayBase { let d = self.dim.try_remove_axis(axis); let s = self.strides.try_remove_axis(axis); // safe because new dimension, strides allow access to a subset of old data @@ -1613,7 +1599,13 @@ mod impl_raw_views; mod impl_cow; /// Returns `true` if the pointer is aligned. -pub(crate) fn is_aligned(ptr: *const T) -> bool -{ +pub(crate) fn is_aligned(ptr: *const T) -> bool { (ptr as usize) % ::std::mem::align_of::() == 0 } + +pub fn configure() { + #[cfg(feature = "opencl")] + unsafe { + hasty_::opencl::configure_opencl(); + } +} diff --git a/src/linalg/impl_linalg.rs b/src/linalg/impl_linalg.rs index bcfcba94e..67d8db6e6 100644 --- a/src/linalg/impl_linalg.rs +++ b/src/linalg/impl_linalg.rs @@ -66,8 +66,7 @@ where S: Data /// layout allows. #[track_caller] pub fn dot(&self, rhs: &Rhs) -> >::Output - where Self: Dot - { + where Self: Dot { Dot::dot(self, rhs) } @@ -145,8 +144,7 @@ where S: Data /// which agrees with our pointer for non-negative strides, but /// is at the opposite end for negative strides. #[cfg(feature = "blas")] -unsafe fn blas_1d_params(ptr: *const A, len: usize, stride: isize) -> (*const A, blas_index, blas_index) -{ +unsafe fn blas_1d_params(ptr: *const A, len: usize, stride: isize) -> (*const A, blas_index, blas_index) { // [x x x x] // ^--ptr // stride = -1 @@ -163,8 +161,7 @@ unsafe fn blas_1d_params(ptr: *const A, len: usize, stride: isize) -> (*const /// /// For two-dimensional arrays, the dot method computes the matrix /// multiplication. -pub trait Dot -{ +pub trait Dot { /// The result of the operation. /// /// For two-dimensional arrays: a rectangular array. @@ -189,8 +186,7 @@ where /// *Note:* If enabled, uses blas `dot` for elements of `f32, f64` when memory /// layout allows. #[track_caller] - fn dot(&self, rhs: &ArrayBase) -> A - { + fn dot(&self, rhs: &ArrayBase) -> A { self.dot_impl(rhs) } } @@ -213,8 +209,7 @@ where /// /// **Panics** if shapes are incompatible. #[track_caller] - fn dot(&self, rhs: &ArrayBase) -> Array - { + fn dot(&self, rhs: &ArrayBase) -> Array { rhs.t().dot(self) } } @@ -253,8 +248,7 @@ where S: Data /// ``` #[track_caller] pub fn dot(&self, rhs: &Rhs) -> >::Output - where Self: Dot - { + where Self: Dot { Dot::dot(self, rhs) } } @@ -266,8 +260,7 @@ where A: LinalgScalar, { type Output = Array2; - fn dot(&self, b: &ArrayBase) -> Array2 - { + fn dot(&self, b: &ArrayBase) -> Array2 { let a = self.view(); let b = b.view(); let ((m, k), (k2, n)) = (a.dim(), b.dim()); @@ -293,8 +286,7 @@ where /// Assumes that `m` and `n` are ≤ `isize::MAX`. #[cold] #[inline(never)] -fn dot_shape_error(m: usize, k: usize, k2: usize, n: usize) -> ! -{ +fn dot_shape_error(m: usize, k: usize, k2: usize, n: usize) -> ! { match m.checked_mul(n) { Some(len) if len <= ::std::isize::MAX as usize => {} _ => panic!("ndarray: shape {} × {} overflows isize", m, n), @@ -307,8 +299,7 @@ fn dot_shape_error(m: usize, k: usize, k2: usize, n: usize) -> ! #[cold] #[inline(never)] -fn general_dot_shape_error(m: usize, k: usize, k2: usize, n: usize, c1: usize, c2: usize) -> ! -{ +fn general_dot_shape_error(m: usize, k: usize, k2: usize, n: usize, c1: usize, c2: usize) -> ! { panic!("ndarray: inputs {} × {}, {} × {}, and output {} × {} are not compatible for matrix multiplication", m, k, k2, n, c1, c2); } @@ -330,8 +321,7 @@ where { type Output = Array; #[track_caller] - fn dot(&self, rhs: &ArrayBase) -> Array - { + fn dot(&self, rhs: &ArrayBase) -> Array { let ((m, a), n) = (self.dim(), rhs.dim()); if a != n { dot_shape_error(m, a, n, 1); @@ -497,8 +487,7 @@ fn mat_mul_impl( /// C ← α A B + β C fn mat_mul_general( alpha: A, lhs: &ArrayView2<'_, A>, rhs: &ArrayView2<'_, A>, beta: A, c: &mut ArrayViewMut2<'_, A>, -) where A: LinalgScalar -{ +) where A: LinalgScalar { let ((m, k), (_, n)) = (lhs.dim(), rhs.dim()); // common parameters for gemm @@ -791,16 +780,14 @@ where #[inline(always)] /// Return `true` if `A` and `B` are the same type -fn same_type() -> bool -{ +fn same_type() -> bool { TypeId::of::() == TypeId::of::() } // Read pointer to type `A` as type `B`. // // **Panics** if `A` and `B` are not the same type -fn cast_as(a: &A) -> B -{ +fn cast_as(a: &A) -> B { assert!(same_type::(), "expect type {} and {} to match", std::any::type_name::(), std::any::type_name::()); unsafe { ::std::ptr::read(a as *const _ as *const B) } @@ -808,8 +795,7 @@ fn cast_as(a: &A) -> B /// Return the complex in the form of an array [re, im] #[inline] -fn complex_array(z: Complex) -> [A; 2] -{ +fn complex_array(z: Complex) -> [A; 2] { [z.re, z.im] } @@ -834,8 +820,7 @@ where } #[cfg(feature = "blas")] -enum MemoryOrder -{ +enum MemoryOrder { C, F, } @@ -867,8 +852,7 @@ where } #[cfg(feature = "blas")] -fn is_blas_2d(dim: &Ix2, stride: &Ix2, order: MemoryOrder) -> bool -{ +fn is_blas_2d(dim: &Ix2, stride: &Ix2, order: MemoryOrder) -> bool { let (m, n) = dim.into_pattern(); let s0 = stride[0] as isize; let s1 = stride[1] as isize; @@ -911,37 +895,32 @@ where #[cfg(test)] #[cfg(feature = "blas")] -mod blas_tests -{ +mod blas_tests { use super::*; #[test] - fn blas_row_major_2d_normal_matrix() - { + fn blas_row_major_2d_normal_matrix() { let m: Array2 = Array2::zeros((3, 5)); assert!(blas_row_major_2d::(&m)); assert!(!blas_column_major_2d::(&m)); } #[test] - fn blas_row_major_2d_row_matrix() - { + fn blas_row_major_2d_row_matrix() { let m: Array2 = Array2::zeros((1, 5)); assert!(blas_row_major_2d::(&m)); assert!(blas_column_major_2d::(&m)); } #[test] - fn blas_row_major_2d_column_matrix() - { + fn blas_row_major_2d_column_matrix() { let m: Array2 = Array2::zeros((5, 1)); assert!(blas_row_major_2d::(&m)); assert!(blas_column_major_2d::(&m)); } #[test] - fn blas_row_major_2d_transposed_row_matrix() - { + fn blas_row_major_2d_transposed_row_matrix() { let m: Array2 = Array2::zeros((1, 5)); let m_t = m.t(); assert!(blas_row_major_2d::(&m_t)); @@ -949,8 +928,7 @@ mod blas_tests } #[test] - fn blas_row_major_2d_transposed_column_matrix() - { + fn blas_row_major_2d_transposed_column_matrix() { let m: Array2 = Array2::zeros((5, 1)); let m_t = m.t(); assert!(blas_row_major_2d::(&m_t)); @@ -958,8 +936,7 @@ mod blas_tests } #[test] - fn blas_column_major_2d_normal_matrix() - { + fn blas_column_major_2d_normal_matrix() { let m: Array2 = Array2::zeros((3, 5).f()); assert!(!blas_row_major_2d::(&m)); assert!(blas_column_major_2d::(&m)); diff --git a/src/linspace.rs b/src/linspace.rs index 411c480db..ec03d5b7a 100644 --- a/src/linspace.rs +++ b/src/linspace.rs @@ -11,8 +11,7 @@ use num_traits::Float; /// An iterator of a sequence of evenly spaced floats. /// /// Iterator element type is `F`. -pub struct Linspace -{ +pub struct Linspace { start: F, step: F, index: usize, @@ -25,8 +24,7 @@ where F: Float type Item = F; #[inline] - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { if self.index >= self.len { None } else { @@ -38,8 +36,7 @@ where F: Float } #[inline] - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { let n = self.len - self.index; (n, Some(n)) } @@ -49,8 +46,7 @@ impl DoubleEndedIterator for Linspace where F: Float { #[inline] - fn next_back(&mut self) -> Option - { + fn next_back(&mut self) -> Option { if self.index >= self.len { None } else { @@ -74,8 +70,7 @@ impl ExactSizeIterator for Linspace where Linspace: Iterator {} /// **Panics** if converting `n - 1` to type `F` fails. #[inline] pub fn linspace(a: F, b: F, n: usize) -> Linspace -where F: Float -{ +where F: Float { let step = if n > 1 { let num_steps = F::from(n - 1).expect("Converting number of steps to `A` must not fail."); (b - a) / num_steps @@ -101,8 +96,7 @@ where F: Float /// **Panics** if converting `((b - a) / step).ceil()` to type `F` fails. #[inline] pub fn range(a: F, b: F, step: F) -> Linspace -where F: Float -{ +where F: Float { let len = b - a; let steps = F::ceil(len / step); Linspace { diff --git a/src/logspace.rs b/src/logspace.rs index 6f8de885d..ee67d09c7 100644 --- a/src/logspace.rs +++ b/src/logspace.rs @@ -11,8 +11,7 @@ use num_traits::Float; /// An iterator of a sequence of logarithmically spaced number. /// /// Iterator element type is `F`. -pub struct Logspace -{ +pub struct Logspace { sign: F, base: F, start: F, @@ -27,8 +26,7 @@ where F: Float type Item = F; #[inline] - fn next(&mut self) -> Option - { + fn next(&mut self) -> Option { if self.index >= self.len { None } else { @@ -41,8 +39,7 @@ where F: Float } #[inline] - fn size_hint(&self) -> (usize, Option) - { + fn size_hint(&self) -> (usize, Option) { let n = self.len - self.index; (n, Some(n)) } @@ -52,8 +49,7 @@ impl DoubleEndedIterator for Logspace where F: Float { #[inline] - fn next_back(&mut self) -> Option - { + fn next_back(&mut self) -> Option { if self.index >= self.len { None } else { @@ -80,8 +76,7 @@ impl ExactSizeIterator for Logspace where Logspace: Iterator {} /// **Panics** if converting `n - 1` to type `F` fails. #[inline] pub fn logspace(base: F, a: F, b: F, n: usize) -> Logspace -where F: Float -{ +where F: Float { let step = if n > 1 { let num_steps = F::from(n - 1).expect("Converting number of steps to `A` must not fail."); (b - a) / num_steps @@ -99,14 +94,12 @@ where F: Float } #[cfg(test)] -mod tests -{ +mod tests { use super::logspace; #[test] #[cfg(feature = "approx")] - fn valid() - { + fn valid() { use crate::{arr1, Array1}; use approx::assert_abs_diff_eq; @@ -124,8 +117,7 @@ mod tests } #[test] - fn iter_forward() - { + fn iter_forward() { let mut iter = logspace(10.0f64, 0.0, 3.0, 4); assert!(iter.size_hint() == (4, Some(4))); @@ -140,8 +132,7 @@ mod tests } #[test] - fn iter_backward() - { + fn iter_backward() { let mut iter = logspace(10.0f64, 0.0, 3.0, 4); assert!(iter.size_hint() == (4, Some(4))); diff --git a/src/low_level_util.rs b/src/low_level_util.rs index 5a615a187..e75554889 100644 --- a/src/low_level_util.rs +++ b/src/low_level_util.rs @@ -13,22 +13,18 @@ #[must_use] pub(crate) struct AbortIfPanic(pub(crate) &'static &'static str); -impl AbortIfPanic -{ +impl AbortIfPanic { /// Defuse the AbortIfPanic guard. This *must* be done when finished. #[inline] - pub(crate) fn defuse(self) - { + pub(crate) fn defuse(self) { std::mem::forget(self); } } -impl Drop for AbortIfPanic -{ +impl Drop for AbortIfPanic { // The compiler should be able to remove this, if it can see through that there // is no panic in the code section. - fn drop(&mut self) - { + fn drop(&mut self) { #[cfg(feature = "std")] { eprintln!("ndarray: panic in no-panic section, aborting: {}", self.0); diff --git a/src/math_cell.rs b/src/math_cell.rs index 6ed1ed71f..c68926250 100644 --- a/src/math_cell.rs +++ b/src/math_cell.rs @@ -13,43 +13,35 @@ use std::ops::{Deref, DerefMut}; #[derive(Default)] pub struct MathCell(Cell); -impl MathCell -{ +impl MathCell { /// Create a new cell with the given value #[inline(always)] - pub const fn new(value: T) -> Self - { + pub const fn new(value: T) -> Self { MathCell(Cell::new(value)) } /// Return the inner value - pub fn into_inner(self) -> T - { + pub fn into_inner(self) -> T { Cell::into_inner(self.0) } /// Swap value with another cell - pub fn swap(&self, other: &Self) - { + pub fn swap(&self, other: &Self) { Cell::swap(&self.0, &other.0) } } -impl Deref for MathCell -{ +impl Deref for MathCell { type Target = Cell; #[inline(always)] - fn deref(&self) -> &Self::Target - { + fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for MathCell -{ +impl DerefMut for MathCell { #[inline(always)] - fn deref_mut(&mut self) -> &mut Self::Target - { + fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } @@ -57,8 +49,7 @@ impl DerefMut for MathCell impl Clone for MathCell where T: Copy { - fn clone(&self) -> Self - { + fn clone(&self) -> Self { MathCell::new(self.get()) } } @@ -66,8 +57,7 @@ where T: Copy impl PartialEq for MathCell where T: Copy + PartialEq { - fn eq(&self, rhs: &Self) -> bool - { + fn eq(&self, rhs: &Self) -> bool { self.get() == rhs.get() } } @@ -77,25 +67,20 @@ impl Eq for MathCell where T: Copy + Eq {} impl PartialOrd for MathCell where T: Copy + PartialOrd { - fn partial_cmp(&self, rhs: &Self) -> Option - { + fn partial_cmp(&self, rhs: &Self) -> Option { self.get().partial_cmp(&rhs.get()) } - fn lt(&self, rhs: &Self) -> bool - { + fn lt(&self, rhs: &Self) -> bool { self.get().lt(&rhs.get()) } - fn le(&self, rhs: &Self) -> bool - { + fn le(&self, rhs: &Self) -> bool { self.get().le(&rhs.get()) } - fn gt(&self, rhs: &Self) -> bool - { + fn gt(&self, rhs: &Self) -> bool { self.get().gt(&rhs.get()) } - fn ge(&self, rhs: &Self) -> bool - { + fn ge(&self, rhs: &Self) -> bool { self.get().ge(&rhs.get()) } } @@ -103,8 +88,7 @@ where T: Copy + PartialOrd impl Ord for MathCell where T: Copy + Ord { - fn cmp(&self, rhs: &Self) -> Ordering - { + fn cmp(&self, rhs: &Self) -> Ordering { self.get().cmp(&rhs.get()) } } @@ -112,20 +96,17 @@ where T: Copy + Ord impl fmt::Debug for MathCell where T: Copy + fmt::Debug { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result - { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.get().fmt(f) } } #[cfg(test)] -mod tests -{ +mod tests { use super::MathCell; #[test] - fn test_basic() - { + fn test_basic() { let c = &MathCell::new(0); c.set(1); assert_eq!(c.get(), 1); diff --git a/src/numeric/impl_float_maths.rs b/src/numeric/impl_float_maths.rs index 54fed49c2..4b3208800 100644 --- a/src/numeric/impl_float_maths.rs +++ b/src/numeric/impl_float_maths.rs @@ -137,8 +137,7 @@ where /// Square (two powers) of each element. #[must_use = "method returns a new array and does not mutate the original value"] - pub fn pow2(&self) -> Array - { + pub fn pow2(&self) -> Array { self.mapv(|v: A| v * v) } } @@ -162,8 +161,7 @@ where /// # Panics /// /// Panics if `!(min <= max)`. - pub fn clamp(&self, min: A, max: A) -> Array - { + pub fn clamp(&self, min: A, max: A) -> Array { assert!(min <= max, "min must be less than or equal to max"); self.mapv(|a| num_traits::clamp(a, min.clone(), max.clone())) } diff --git a/src/numeric/impl_numeric.rs b/src/numeric/impl_numeric.rs index ca6f24bbe..6caad239f 100644 --- a/src/numeric/impl_numeric.rs +++ b/src/numeric/impl_numeric.rs @@ -30,8 +30,7 @@ where /// assert_eq!(a.sum(), 10.); /// ``` pub fn sum(&self) -> A - where A: Clone + Add + num_traits::Zero - { + where A: Clone + Add + num_traits::Zero { if let Some(slc) = self.as_slice_memory_order() { return numeric_util::unrolled_fold(slc, A::zero, A::add); } @@ -51,8 +50,7 @@ where /// *This method has been renamed to `.sum()`* #[deprecated(note = "renamed to `sum`", since = "0.15.0")] pub fn scalar_sum(&self) -> A - where A: Clone + Add + num_traits::Zero - { + where A: Clone + Add + num_traits::Zero { self.sum() } @@ -70,8 +68,7 @@ where /// /// [arithmetic mean]: https://en.wikipedia.org/wiki/Arithmetic_mean pub fn mean(&self) -> Option - where A: Clone + FromPrimitive + Add + Div + Zero - { + where A: Clone + FromPrimitive + Add + Div + Zero { let n_elements = self.len(); if n_elements == 0 { None @@ -91,8 +88,7 @@ where /// assert_eq!(a.product(), 24.); /// ``` pub fn product(&self) -> A - where A: Clone + Mul + num_traits::One - { + where A: Clone + Mul + num_traits::One { if let Some(slc) = self.as_slice_memory_order() { return numeric_util::unrolled_fold(slc, A::one, A::mul); } @@ -149,8 +145,7 @@ where #[track_caller] #[cfg(feature = "std")] pub fn var(&self, ddof: A) -> A - where A: Float + FromPrimitive - { + where A: Float + FromPrimitive { let zero = A::from_usize(0).expect("Converting 0 to `A` must not fail."); let n = A::from_usize(self.len()).expect("Converting length to `A` must not fail."); assert!( @@ -214,8 +209,7 @@ where #[track_caller] #[cfg(feature = "std")] pub fn std(&self, ddof: A) -> A - where A: Float + FromPrimitive - { + where A: Float + FromPrimitive { self.var(ddof).sqrt() } diff --git a/src/numeric_util.rs b/src/numeric_util.rs index 9d5ce66c5..1ed6d75a5 100644 --- a/src/numeric_util.rs +++ b/src/numeric_util.rs @@ -54,8 +54,7 @@ where /// /// `xs` and `ys` must be the same length pub fn unrolled_dot(xs: &[A], ys: &[A]) -> A -where A: LinalgScalar -{ +where A: LinalgScalar { debug_assert_eq!(xs.len(), ys.len()); // eightfold unrolled so that floating point can be vectorized // (even with strict floating point accuracy semantics) @@ -96,8 +95,7 @@ where A: LinalgScalar /// /// `xs` and `ys` must be the same length pub fn unrolled_eq(xs: &[A], ys: &[B]) -> bool -where A: PartialEq -{ +where A: PartialEq { debug_assert_eq!(xs.len(), ys.len()); // eightfold unrolled for performance (this is not done by llvm automatically) let len = cmp::min(xs.len(), ys.len()); diff --git a/src/order.rs b/src/order.rs index a52a32e2c..4ab8c84e8 100644 --- a/src/order.rs +++ b/src/order.rs @@ -30,16 +30,14 @@ /// or "Fortran" order. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[non_exhaustive] -pub enum Order -{ +pub enum Order { /// Row major or "C" order RowMajor, /// Column major or "F" order ColumnMajor, } -impl Order -{ +impl Order { /// "C" is an alias for row major ordering pub const C: Order = Order::RowMajor; @@ -48,8 +46,7 @@ impl Order /// Return true if input is Order::RowMajor, false otherwise #[inline] - pub fn is_row_major(self) -> bool - { + pub fn is_row_major(self) -> bool { match self { Order::RowMajor => true, Order::ColumnMajor => false, @@ -58,15 +55,13 @@ impl Order /// Return true if input is Order::ColumnMajor, false otherwise #[inline] - pub fn is_column_major(self) -> bool - { + pub fn is_column_major(self) -> bool { !self.is_row_major() } /// Return Order::RowMajor if the input is true, Order::ColumnMajor otherwise #[inline] - pub fn row_major(row_major: bool) -> Order - { + pub fn row_major(row_major: bool) -> Order { if row_major { Order::RowMajor } else { @@ -76,15 +71,13 @@ impl Order /// Return Order::ColumnMajor if the input is true, Order::RowMajor otherwise #[inline] - pub fn column_major(column_major: bool) -> Order - { + pub fn column_major(column_major: bool) -> Order { Self::row_major(!column_major) } /// Return the transpose: row major becomes column major and vice versa. #[inline] - pub fn transpose(self) -> Order - { + pub fn transpose(self) -> Order { match self { Order::RowMajor => Order::ColumnMajor, Order::ColumnMajor => Order::RowMajor, diff --git a/src/parallel/impl_par_methods.rs b/src/parallel/impl_par_methods.rs index b3fbdedc8..7bb513f21 100644 --- a/src/parallel/impl_par_methods.rs +++ b/src/parallel/impl_par_methods.rs @@ -22,8 +22,7 @@ where /// /// Elements are visited in arbitrary order. pub fn par_map_inplace(&mut self, f: F) - where F: Fn(&mut A) + Sync + Send - { + where F: Fn(&mut A) + Sync + Send { self.view_mut().into_par_iter().for_each(f) } diff --git a/src/parallel/into_impls.rs b/src/parallel/into_impls.rs index 75bded7de..c1a5388fd 100644 --- a/src/parallel/into_impls.rs +++ b/src/parallel/into_impls.rs @@ -11,8 +11,7 @@ where { type Item = &'a A; type Iter = Parallel>; - fn into_par_iter(self) -> Self::Iter - { + fn into_par_iter(self) -> Self::Iter { self.view().into_par_iter() } } @@ -26,8 +25,7 @@ where { type Item = &'a A; type Iter = Parallel>; - fn into_par_iter(self) -> Self::Iter - { + fn into_par_iter(self) -> Self::Iter { self.view().into_par_iter() } } @@ -40,8 +38,7 @@ where { type Item = &'a mut A; type Iter = Parallel>; - fn into_par_iter(self) -> Self::Iter - { + fn into_par_iter(self) -> Self::Iter { self.view_mut().into_par_iter() } } @@ -55,8 +52,7 @@ where { type Item = &'a mut A; type Iter = Parallel>; - fn into_par_iter(self) -> Self::Iter - { + fn into_par_iter(self) -> Self::Iter { self.view_mut().into_par_iter() } } diff --git a/src/parallel/mod.rs b/src/parallel/mod.rs index 0c84baa91..e5a41ac02 100644 --- a/src/parallel/mod.rs +++ b/src/parallel/mod.rs @@ -123,8 +123,7 @@ use crate::iter::{AxisChunksIter, AxisChunksIterMut, AxisIter, AxisIterMut}; use crate::{ArcArray, Array, ArrayBase, ArrayView, ArrayViewMut, Zip}; /// Into- traits for creating parallelized iterators and/or using [`par_azip!`] -pub mod prelude -{ +pub mod prelude { #[doc(no_inline)] pub use rayon::prelude::{ IndexedParallelIterator, diff --git a/src/parallel/par.rs b/src/parallel/par.rs index b59af4c8e..5caef372a 100644 --- a/src/parallel/par.rs +++ b/src/parallel/par.rs @@ -19,8 +19,7 @@ use crate::{ArrayView, ArrayViewMut}; /// Parallel iterator wrapper. #[derive(Copy, Clone, Debug)] -pub struct Parallel -{ +pub struct Parallel { iter: I, min_len: usize, } @@ -317,8 +316,7 @@ where D: Dimension /// to begin with. /// /// ***Panics*** if `min_len` is zero. - pub fn with_min_len(self, min_len: usize) -> Self - { + pub fn with_min_len(self, min_len: usize) -> Self { assert_ne!(min_len, 0, "Minimum number of elements must at least be one to avoid splitting off empty tasks."); Self { min_len, ..self } @@ -327,8 +325,7 @@ where D: Dimension /// A parallel iterator (unindexed) that produces the splits of the array /// or producer `P`. -pub(crate) struct ParallelSplits

-{ +pub(crate) struct ParallelSplits

{ pub(crate) iter: P, pub(crate) max_splits: usize, } @@ -339,13 +336,11 @@ where P: SplitPreference + Send type Item = P; fn drive_unindexed(self, consumer: C) -> C::Result - where C: UnindexedConsumer - { + where C: UnindexedConsumer { bridge_unindexed(self, consumer) } - fn opt_len(&self) -> Option - { + fn opt_len(&self) -> Option { None } } @@ -355,8 +350,7 @@ where P: SplitPreference + Send { type Item = P; - fn split(self) -> (Self, Option) - { + fn split(self) -> (Self, Option) { if self.max_splits == 0 || !self.iter.can_split() { return (self, None); } @@ -374,8 +368,7 @@ where P: SplitPreference + Send } fn fold_with(self, folder: Fold) -> Fold - where Fold: Folder - { + where Fold: Folder { folder.consume(self.iter) } } diff --git a/src/parallel/send_producer.rs b/src/parallel/send_producer.rs index ecfb77af0..23d6cd475 100644 --- a/src/parallel/send_producer.rs +++ b/src/parallel/send_producer.rs @@ -4,35 +4,28 @@ use std::ops::{Deref, DerefMut}; /// An NdProducer that is unconditionally `Send`. #[repr(transparent)] -pub(crate) struct SendProducer -{ +pub(crate) struct SendProducer { inner: T, } -impl SendProducer -{ +impl SendProducer { /// Create an unconditionally `Send` ndproducer from the producer - pub(crate) unsafe fn new(producer: T) -> Self - { + pub(crate) unsafe fn new(producer: T) -> Self { Self { inner: producer } } } unsafe impl

Send for SendProducer

{} -impl

Deref for SendProducer

-{ +impl

Deref for SendProducer

{ type Target = P; - fn deref(&self) -> &P - { + fn deref(&self) -> &P { &self.inner } } -impl

DerefMut for SendProducer

-{ - fn deref_mut(&mut self) -> &mut P - { +impl

DerefMut for SendProducer

{ + fn deref_mut(&mut self) -> &mut P { &mut self.inner } } @@ -48,55 +41,46 @@ where P: NdProducer private_impl! {} #[inline(always)] - fn raw_dim(&self) -> Self::Dim - { + fn raw_dim(&self) -> Self::Dim { self.inner.raw_dim() } #[inline(always)] - fn equal_dim(&self, dim: &Self::Dim) -> bool - { + fn equal_dim(&self, dim: &Self::Dim) -> bool { self.inner.equal_dim(dim) } #[inline(always)] - fn as_ptr(&self) -> Self::Ptr - { + fn as_ptr(&self) -> Self::Ptr { self.inner.as_ptr() } #[inline(always)] - fn layout(&self) -> Layout - { + fn layout(&self) -> Layout { self.inner.layout() } #[inline(always)] - unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item - { + unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { self.inner.as_ref(ptr) } #[inline(always)] - unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr - { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { self.inner.uget_ptr(i) } #[inline(always)] - fn stride_of(&self, axis: Axis) -> Self::Stride - { + fn stride_of(&self, axis: Axis) -> Self::Stride { self.inner.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride - { + fn contiguous_stride(&self) -> Self::Stride { self.inner.contiguous_stride() } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) - { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { let (a, b) = self.inner.split_at(axis, index); (Self { inner: a }, Self { inner: b }) } diff --git a/src/partial.rs b/src/partial.rs index 99aba75a8..6054b6ba8 100644 --- a/src/partial.rs +++ b/src/partial.rs @@ -12,16 +12,14 @@ use std::ptr; /// it is the owner of the elements, but not the allocation, /// and will drop the elements on drop. #[must_use] -pub(crate) struct Partial -{ +pub(crate) struct Partial { /// Data pointer ptr: *mut T, /// Current length pub(crate) len: usize, } -impl Partial -{ +impl Partial { /// Create an empty partial for this data pointer /// /// ## Safety @@ -31,14 +29,12 @@ impl Partial /// the `len` elements following it valid. /// /// The Partial has an accessible length field which must only be modified in trusted code. - pub(crate) unsafe fn new(ptr: *mut T) -> Self - { + pub(crate) unsafe fn new(ptr: *mut T) -> Self { Self { ptr, len: 0 } } #[cfg(feature = "rayon")] - pub(crate) fn stub() -> Self - { + pub(crate) fn stub() -> Self { Self { len: 0, ptr: ptr::null_mut(), @@ -46,14 +42,12 @@ impl Partial } #[cfg(feature = "rayon")] - pub(crate) fn is_stub(&self) -> bool - { + pub(crate) fn is_stub(&self) -> bool { self.ptr.is_null() } /// Release Partial's ownership of the written elements, and return the current length - pub(crate) fn release_ownership(mut self) -> usize - { + pub(crate) fn release_ownership(mut self) -> usize { let ret = self.len; self.len = 0; ret @@ -62,8 +56,7 @@ impl Partial #[cfg(feature = "rayon")] /// Merge if they are in order (left to right) and contiguous. /// Skips merge if T does not need drop. - pub(crate) fn try_merge(mut left: Self, right: Self) -> Self - { + pub(crate) fn try_merge(mut left: Self, right: Self) -> Self { if !std::mem::needs_drop::() { return left; } @@ -84,10 +77,8 @@ impl Partial unsafe impl Send for Partial where T: Send {} -impl Drop for Partial -{ - fn drop(&mut self) - { +impl Drop for Partial { + fn drop(&mut self) { if !self.ptr.is_null() { unsafe { ptr::drop_in_place(alloc::slice::from_raw_parts_mut(self.ptr, self.len)); diff --git a/src/shape_builder.rs b/src/shape_builder.rs index 8b25f71e7..f373e0e4f 100644 --- a/src/shape_builder.rs +++ b/src/shape_builder.rs @@ -6,8 +6,7 @@ use crate::Dimension; /// /// Either c- or f- memory ordered (*c* a.k.a *row major* is the default). #[derive(Copy, Clone, Debug)] -pub struct Shape -{ +pub struct Shape { /// Shape (axis lengths) pub(crate) dim: D, /// Strides can only be C or F here @@ -17,18 +16,15 @@ pub struct Shape #[derive(Copy, Clone, Debug)] pub(crate) enum Contiguous {} -impl Shape -{ - pub(crate) fn is_c(&self) -> bool - { +impl Shape { + pub(crate) fn is_c(&self) -> bool { matches!(self.strides, Strides::C) } } /// An array shape of n dimensions in c-order, f-order or custom strides. #[derive(Copy, Clone, Debug)] -pub struct StrideShape -{ +pub struct StrideShape { pub(crate) dim: D, pub(crate) strides: Strides, } @@ -37,21 +33,18 @@ impl StrideShape where D: Dimension { /// Return a reference to the dimension - pub fn raw_dim(&self) -> &D - { + pub fn raw_dim(&self) -> &D { &self.dim } /// Return the size of the shape in number of elements - pub fn size(&self) -> usize - { + pub fn size(&self) -> usize { self.dim.size() } } /// Stride description #[derive(Copy, Clone, Debug)] -pub(crate) enum Strides -{ +pub(crate) enum Strides { /// Row-major ("C"-order) C, /// Column-major ("F"-order) @@ -60,12 +53,10 @@ pub(crate) enum Strides Custom(D), } -impl Strides -{ +impl Strides { /// Return strides for `dim` (computed from dimension if c/f, else return the custom stride) pub(crate) fn strides_for_dim(self, dim: &D) -> D - where D: Dimension - { + where D: Dimension { match self { Strides::C => dim.default_strides(), Strides::F => dim.fortran_strides(), @@ -82,8 +73,7 @@ impl Strides } } - pub(crate) fn is_custom(&self) -> bool - { + pub(crate) fn is_custom(&self) -> bool { matches!(*self, Strides::Custom(_)) } } @@ -93,8 +83,7 @@ impl Strides /// /// This trait is used together with array constructor methods like /// `Array::from_shape_vec`. -pub trait ShapeBuilder -{ +pub trait ShapeBuilder { type Dim: Dimension; type Strides; @@ -108,8 +97,7 @@ impl From for Shape where D: Dimension { /// Create a `Shape` from `dimension`, using the default memory layout. - fn from(dimension: D) -> Shape - { + fn from(dimension: D) -> Shape { dimension.into_shape_with_order() } } @@ -119,8 +107,7 @@ where D: Dimension, T: ShapeBuilder, { - fn from(value: T) -> Self - { + fn from(value: T) -> Self { let shape = value.into_shape_with_order(); let st = if shape.is_c() { Strides::C } else { Strides::F }; StrideShape { @@ -135,23 +122,19 @@ where T: IntoDimension { type Dim = T::Dim; type Strides = T; - fn into_shape_with_order(self) -> Shape - { + fn into_shape_with_order(self) -> Shape { Shape { dim: self.into_dimension(), strides: Strides::C, } } - fn f(self) -> Shape - { + fn f(self) -> Shape { self.set_f(true) } - fn set_f(self, is_f: bool) -> Shape - { + fn set_f(self, is_f: bool) -> Shape { self.into_shape_with_order().set_f(is_f) } - fn strides(self, st: T) -> StrideShape - { + fn strides(self, st: T) -> StrideShape { self.into_shape_with_order().strides(st.into_dimension()) } } @@ -162,24 +145,20 @@ where D: Dimension type Dim = D; type Strides = D; - fn into_shape_with_order(self) -> Shape - { + fn into_shape_with_order(self) -> Shape { self } - fn f(self) -> Self - { + fn f(self) -> Self { self.set_f(true) } - fn set_f(mut self, is_f: bool) -> Self - { + fn set_f(mut self, is_f: bool) -> Self { self.strides = if !is_f { Strides::C } else { Strides::F }; self } - fn strides(self, st: D) -> StrideShape - { + fn strides(self, st: D) -> StrideShape { StrideShape { dim: self.dim, strides: Strides::Custom(st), @@ -191,13 +170,11 @@ impl Shape where D: Dimension { /// Return a reference to the dimension - pub fn raw_dim(&self) -> &D - { + pub fn raw_dim(&self) -> &D { &self.dim } /// Return the size of the shape in number of elements - pub fn size(&self) -> usize - { + pub fn size(&self) -> usize { self.dim.size() } } @@ -210,8 +187,7 @@ where D: Dimension /// (optionally) an ordering argument. /// /// See for example [`.to_shape()`](crate::ArrayBase::to_shape). -pub trait ShapeArg -{ +pub trait ShapeArg { type Dim: Dimension; fn into_shape_and_order(self) -> (Self::Dim, Option); } @@ -221,8 +197,7 @@ where T: IntoDimension { type Dim = T::Dim; - fn into_shape_and_order(self) -> (Self::Dim, Option) - { + fn into_shape_and_order(self) -> (Self::Dim, Option) { (self.into_dimension(), None) } } @@ -232,8 +207,7 @@ where T: IntoDimension { type Dim = T::Dim; - fn into_shape_and_order(self) -> (Self::Dim, Option) - { + fn into_shape_and_order(self) -> (Self::Dim, Option) { (self.0.into_dimension(), Some(self.1)) } } diff --git a/src/slice.rs b/src/slice.rs index 9e6acc449..8d6fc03d8 100644 --- a/src/slice.rs +++ b/src/slice.rs @@ -36,8 +36,7 @@ use std::ops::{Deref, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, Rang /// reverse order. It can also be created with `Slice::from(a..).step_by(-1)`. /// The Python equivalent is `[a::-1]`. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub struct Slice -{ +pub struct Slice { /// start index; negative are counted from the back of the axis pub start: isize, /// end index; negative are counted from the back of the axis; when not present @@ -47,8 +46,7 @@ pub struct Slice pub step: isize, } -impl Slice -{ +impl Slice { /// Create a new `Slice` with the given extents. /// /// See also the `From` impls, converting from ranges; for example @@ -56,8 +54,7 @@ impl Slice /// /// `step` must be nonzero. /// (This method checks with a debug assertion that `step` is not zero.) - pub fn new(start: isize, end: Option, step: isize) -> Slice - { + pub fn new(start: isize, end: Option, step: isize) -> Slice { debug_assert_ne!(step, 0, "Slice::new: step must be nonzero"); Slice { start, end, step } } @@ -68,8 +65,7 @@ impl Slice /// `step` must be nonzero. /// (This method checks with a debug assertion that `step` is not zero.) #[inline] - pub fn step_by(self, step: isize) -> Self - { + pub fn step_by(self, step: isize) -> Self { debug_assert_ne!(step, 0, "Slice::step_by: step must be nonzero"); Slice { step: self.step * step, @@ -113,13 +109,11 @@ pub struct NewAxis; /// with `SliceInfoElem::from(NewAxis)`. The Python equivalent is /// `[np.newaxis]`. The macro equivalent is `s![NewAxis]`. #[derive(Debug, PartialEq, Eq, Hash)] -pub enum SliceInfoElem -{ +pub enum SliceInfoElem { /// A range with step size. `end` is an exclusive index. Negative `start` /// or `end` indexes are counted from the back of the axis. If `end` is /// `None`, the slice extends to the end of the axis. - Slice - { + Slice { /// start index; negative are counted from the back of the axis start: isize, /// end index; negative are counted from the back of the axis; when not present @@ -136,31 +130,25 @@ pub enum SliceInfoElem copy_and_clone! {SliceInfoElem} -impl SliceInfoElem -{ +impl SliceInfoElem { /// Returns `true` if `self` is a `Slice` value. - pub fn is_slice(&self) -> bool - { + pub fn is_slice(&self) -> bool { matches!(self, SliceInfoElem::Slice { .. }) } /// Returns `true` if `self` is an `Index` value. - pub fn is_index(&self) -> bool - { + pub fn is_index(&self) -> bool { matches!(self, SliceInfoElem::Index(_)) } /// Returns `true` if `self` is a `NewAxis` value. - pub fn is_new_axis(&self) -> bool - { + pub fn is_new_axis(&self) -> bool { matches!(self, SliceInfoElem::NewAxis) } } -impl fmt::Display for SliceInfoElem -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result - { +impl fmt::Display for SliceInfoElem { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { SliceInfoElem::Index(index) => write!(f, "{}", index)?, SliceInfoElem::Slice { start, end, step } => { @@ -248,11 +236,9 @@ impl_slice_variant_from_range!(SliceInfoElem, SliceInfoElem::Slice, isize); impl_slice_variant_from_range!(SliceInfoElem, SliceInfoElem::Slice, usize); impl_slice_variant_from_range!(SliceInfoElem, SliceInfoElem::Slice, i32); -impl From for Slice -{ +impl From for Slice { #[inline] - fn from(_: RangeFull) -> Slice - { + fn from(_: RangeFull) -> Slice { Slice { start: 0, end: None, @@ -261,11 +247,9 @@ impl From for Slice } } -impl From for SliceInfoElem -{ +impl From for SliceInfoElem { #[inline] - fn from(_: RangeFull) -> SliceInfoElem - { + fn from(_: RangeFull) -> SliceInfoElem { SliceInfoElem::Slice { start: 0, end: None, @@ -274,11 +258,9 @@ impl From for SliceInfoElem } } -impl From for SliceInfoElem -{ +impl From for SliceInfoElem { #[inline] - fn from(s: Slice) -> SliceInfoElem - { + fn from(s: Slice) -> SliceInfoElem { SliceInfoElem::Slice { start: s.start, end: s.end, @@ -301,11 +283,9 @@ impl_sliceinfoelem_from_index!(isize); impl_sliceinfoelem_from_index!(usize); impl_sliceinfoelem_from_index!(i32); -impl From for SliceInfoElem -{ +impl From for SliceInfoElem { #[inline] - fn from(_: NewAxis) -> SliceInfoElem - { + fn from(_: NewAxis) -> SliceInfoElem { SliceInfoElem::NewAxis } } @@ -317,8 +297,7 @@ impl From for SliceInfoElem /// consistent with the `&[SliceInfoElem]` returned by `self.as_ref()` and that /// `self.as_ref()` always returns the same value when called multiple times. #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait SliceArg: AsRef<[SliceInfoElem]> -{ +pub unsafe trait SliceArg: AsRef<[SliceInfoElem]> { /// Dimensionality of the output array. type OutDim: Dimension; @@ -338,13 +317,11 @@ where { type OutDim = T::OutDim; - fn in_ndim(&self) -> usize - { + fn in_ndim(&self) -> usize { T::in_ndim(self) } - fn out_ndim(&self) -> usize - { + fn out_ndim(&self) -> usize { T::out_ndim(self) } @@ -388,30 +365,25 @@ where { type OutDim = Dout; - fn in_ndim(&self) -> usize - { + fn in_ndim(&self) -> usize { self.in_ndim() } - fn out_ndim(&self) -> usize - { + fn out_ndim(&self) -> usize { self.out_ndim() } private_impl! {} } -unsafe impl SliceArg for [SliceInfoElem] -{ +unsafe impl SliceArg for [SliceInfoElem] { type OutDim = IxDyn; - fn in_ndim(&self) -> usize - { + fn in_ndim(&self) -> usize { self.iter().filter(|s| !s.is_new_axis()).count() } - fn out_ndim(&self) -> usize - { + fn out_ndim(&self) -> usize { self.iter().filter(|s| !s.is_index()).count() } @@ -429,8 +401,7 @@ unsafe impl SliceArg for [SliceInfoElem] /// /// [`.slice()`]: crate::ArrayBase::slice #[derive(Debug)] -pub struct SliceInfo -{ +pub struct SliceInfo { in_dim: PhantomData, out_dim: PhantomData, indices: T, @@ -442,8 +413,7 @@ where Dout: Dimension, { type Target = T; - fn deref(&self) -> &Self::Target - { + fn deref(&self) -> &Self::Target { &self.indices } } @@ -484,8 +454,7 @@ where #[doc(hidden)] pub unsafe fn new_unchecked( indices: T, in_dim: PhantomData, out_dim: PhantomData, - ) -> SliceInfo - { + ) -> SliceInfo { if cfg!(debug_assertions) { check_dims_for_sliceinfo::(indices.as_ref()) .expect("`Din` and `Dout` must be consistent with `indices`."); @@ -507,8 +476,7 @@ where /// /// The caller must ensure `indices.as_ref()` always returns the same value /// when called multiple times. - pub unsafe fn new(indices: T) -> Result, ShapeError> - { + pub unsafe fn new(indices: T) -> Result, ShapeError> { check_dims_for_sliceinfo::(indices.as_ref())?; Ok(SliceInfo { in_dim: PhantomData, @@ -523,8 +491,7 @@ where /// If `Din` is a fixed-size dimension type, then this is equivalent to /// `Din::NDIM.unwrap()`. Otherwise, the value is calculated by iterating /// over the `SliceInfoElem` elements. - pub fn in_ndim(&self) -> usize - { + pub fn in_ndim(&self) -> usize { if let Some(ndim) = Din::NDIM { ndim } else { @@ -539,8 +506,7 @@ where /// If `Dout` is a fixed-size dimension type, then this is equivalent to /// `Dout::NDIM.unwrap()`. Otherwise, the value is calculated by iterating /// over the `SliceInfoElem` elements. - pub fn out_ndim(&self) -> usize - { + pub fn out_ndim(&self) -> usize { if let Some(ndim) = Dout::NDIM { ndim } else { @@ -556,8 +522,7 @@ where { type Error = ShapeError; - fn try_from(indices: &'a [SliceInfoElem]) -> Result, ShapeError> - { + fn try_from(indices: &'a [SliceInfoElem]) -> Result, ShapeError> { unsafe { // This is okay because `&[SliceInfoElem]` always returns the same // value for `.as_ref()`. @@ -573,8 +538,7 @@ where { type Error = ShapeError; - fn try_from(indices: Vec) -> Result, Din, Dout>, ShapeError> - { + fn try_from(indices: Vec) -> Result, Din, Dout>, ShapeError> { unsafe { // This is okay because `Vec` always returns the same value for // `.as_ref()`. @@ -621,8 +585,7 @@ where Din: Dimension, Dout: Dimension, { - fn as_ref(&self) -> &[SliceInfoElem] - { + fn as_ref(&self) -> &[SliceInfoElem] { self.indices.as_ref() } } @@ -633,8 +596,7 @@ where Din: Dimension, Dout: Dimension, { - fn from(info: &'a SliceInfo) -> SliceInfo<&'a [SliceInfoElem], Din, Dout> - { + fn from(info: &'a SliceInfo) -> SliceInfo<&'a [SliceInfoElem], Din, Dout> { SliceInfo { in_dim: info.in_dim, out_dim: info.out_dim, @@ -657,8 +619,7 @@ where Din: Dimension, Dout: Dimension, { - fn clone(&self) -> Self - { + fn clone(&self) -> Self { SliceInfo { in_dim: PhantomData, out_dim: PhantomData, @@ -669,22 +630,19 @@ where /// Trait for determining dimensionality of input and output for [`s!`] macro. #[doc(hidden)] -pub trait SliceNextDim -{ +pub trait SliceNextDim { /// Number of dimensions that this slicing argument consumes in the input array. type InDim: Dimension; /// Number of dimensions that this slicing argument produces in the output array. type OutDim: Dimension; fn next_in_dim(&self, _: PhantomData) -> PhantomData<>::Output> - where D: Dimension + DimAdd - { + where D: Dimension + DimAdd { PhantomData } fn next_out_dim(&self, _: PhantomData) -> PhantomData<>::Output> - where D: Dimension + DimAdd - { + where D: Dimension + DimAdd { PhantomData } } @@ -946,8 +904,7 @@ where { type Output = (ArrayViewMut<'a, A, I0::OutDim>,); - fn multi_slice_move(&self, view: ArrayViewMut<'a, A, D>) -> Self::Output - { + fn multi_slice_move(&self, view: ArrayViewMut<'a, A, D>) -> Self::Output { (view.slice_move(&self.0),) } @@ -1009,8 +966,7 @@ where { type Output = T::Output; - fn multi_slice_move(&self, view: ArrayViewMut<'a, A, D>) -> Self::Output - { + fn multi_slice_move(&self, view: ArrayViewMut<'a, A, D>) -> Self::Output { T::multi_slice_move(self, view) } diff --git a/src/split_at.rs b/src/split_at.rs index 4af1403c0..67c46d06f 100644 --- a/src/split_at.rs +++ b/src/split_at.rs @@ -1,20 +1,17 @@ use crate::imp_prelude::*; /// Arrays and similar that can be split along an axis -pub(crate) trait SplitAt -{ +pub(crate) trait SplitAt { fn split_at(self, axis: Axis, index: usize) -> (Self, Self) where Self: Sized; } -pub(crate) trait SplitPreference: SplitAt -{ +pub(crate) trait SplitPreference: SplitAt { #[allow(dead_code)] // used only when Rayon support is enabled fn can_split(&self) -> bool; fn split_preference(&self) -> (Axis, usize); fn split(self) -> (Self, Self) - where Self: Sized - { + where Self: Sized { let (axis, index) = self.split_preference(); self.split_at(axis, index) } @@ -23,8 +20,7 @@ pub(crate) trait SplitPreference: SplitAt impl SplitAt for D where D: Dimension { - fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) - { + fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { let mut d1 = self; let mut d2 = d1.clone(); let i = axis.index(); @@ -38,8 +34,7 @@ where D: Dimension impl<'a, A, D> SplitAt for ArrayViewMut<'a, A, D> where D: Dimension { - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) - { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { self.split_at(axis, index) } } @@ -47,8 +42,7 @@ where D: Dimension impl SplitAt for RawArrayViewMut where D: Dimension { - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) - { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { self.split_at(axis, index) } } diff --git a/src/zip/mod.rs b/src/zip/mod.rs index aadc93032..82edb3d61 100644 --- a/src/zip/mod.rs +++ b/src/zip/mod.rs @@ -51,8 +51,7 @@ where E: IntoDimension } /// Compute `Layout` hints for array shape dim, strides -fn array_layout(dim: &D, strides: &D) -> Layout -{ +fn array_layout(dim: &D, strides: &D) -> Layout { let n = dim.ndim(); if dimension::is_layout_c(dim, strides) { // effectively one-dimensional => C and F layout compatible @@ -81,8 +80,7 @@ where S: RawData, D: Dimension, { - pub(crate) fn layout_impl(&self) -> Layout - { + pub(crate) fn layout_impl(&self) -> Layout { array_layout(&self.dim, &self.strides) } } @@ -93,8 +91,7 @@ where D: Dimension, { type Output = ArrayView<'a, A, E::Dim>; - fn broadcast_unwrap(self, shape: E) -> Self::Output - { + fn broadcast_unwrap(self, shape: E) -> Self::Output { #[allow(clippy::needless_borrow)] let res: ArrayView<'_, A, E::Dim> = (&self).broadcast_unwrap(shape.into_dimension()); unsafe { ArrayView::new(res.ptr, res.dim, res.strides) } @@ -102,8 +99,7 @@ where private_impl! {} } -trait ZippableTuple: Sized -{ +trait ZippableTuple: Sized { type Item; type Ptr: OffsetTuple + Copy; type Dim: Dimension; @@ -192,8 +188,7 @@ trait ZippableTuple: Sized /// ``` #[derive(Debug, Clone)] #[must_use = "zipping producers is lazy and does nothing unless consumed"] -pub struct Zip -{ +pub struct Zip { parts: Parts, dimension: D, layout: Layout, @@ -212,8 +207,7 @@ where /// The Zip will take the exact dimension of `p` and all inputs /// must have the same dimensions (or be broadcast to them). pub fn from(p: IP) -> Self - where IP: IntoNdProducer - { + where IP: IntoNdProducer { let array = p.into_producer(); let dim = array.raw_dim(); let layout = array.layout(); @@ -237,8 +231,7 @@ where /// /// *Note:* Indexed zip has overhead. pub fn indexed(p: IP) -> Self - where IP: IntoNdProducer - { + where IP: IntoNdProducer { let array = p.into_producer(); let dim = array.raw_dim(); Zip::from(indices(dim)).and(array) @@ -263,8 +256,7 @@ impl Zip where D: Dimension { /// Return a the number of element tuples in the Zip - pub fn size(&self) -> usize - { + pub fn size(&self) -> usize { self.dimension.size() } @@ -272,21 +264,18 @@ where D: Dimension /// /// ***Panics*** if `axis` is out of bounds. #[track_caller] - fn len_of(&self, axis: Axis) -> usize - { + fn len_of(&self, axis: Axis) -> usize { self.dimension[axis.index()] } - fn prefer_f(&self) -> bool - { + fn prefer_f(&self) -> bool { !self.layout.is(Layout::CORDER) && (self.layout.is(Layout::FORDER) || self.layout_tendency < 0) } /// Return an *approximation* to the max stride axis; if /// component arrays disagree, there may be no choice better than the /// others. - fn max_stride_axis(&self) -> Axis - { + fn max_stride_axis(&self) -> Axis { let i = if self.prefer_f() { self.dimension .slice() @@ -426,8 +415,7 @@ where D: Dimension } #[cfg(feature = "rayon")] - pub(crate) fn uninitialized_for_current_layout(&self) -> Array, D> - { + pub(crate) fn uninitialized_for_current_layout(&self) -> Array, D> { let is_f = self.prefer_f(); Array::uninit(self.dimension.clone().set_f(is_f)) } @@ -442,8 +430,7 @@ where /// Debug assert traversal order is like c (including 1D case) // Method placement: only used for binary Zip at the moment. #[inline] - pub(crate) fn debug_assert_c_order(self) -> Self - { + pub(crate) fn debug_assert_c_order(self) -> Self { debug_assert!(self.layout.is(Layout::CORDER) || self.layout_tendency >= 0 || self.dimension.slice().iter().filter(|&&d| d > 1).count() <= 1, "Assertion failed: traversal is not c-order or 1D for \ @@ -468,17 +455,14 @@ impl Offset for *mut T { } */ -trait OffsetTuple -{ +trait OffsetTuple { type Args; unsafe fn stride_offset(self, stride: Self::Args, index: usize) -> Self; } -impl OffsetTuple for *mut T -{ +impl OffsetTuple for *mut T { type Args = isize; - unsafe fn stride_offset(self, stride: Self::Args, index: usize) -> Self - { + unsafe fn stride_offset(self, stride: Self::Args, index: usize) -> Self { self.offset(index as isize * stride) } } @@ -952,27 +936,23 @@ map_impl! { /// Value controlling the execution of `.fold_while` on `Zip`. #[derive(Debug, Copy, Clone)] -pub enum FoldWhile -{ +pub enum FoldWhile { /// Continue folding with this value Continue(T), /// Fold is complete and will return this value Done(T), } -impl FoldWhile -{ +impl FoldWhile { /// Return the inner value - pub fn into_inner(self) -> T - { + pub fn into_inner(self) -> T { match self { FoldWhile::Continue(x) | FoldWhile::Done(x) => x, } } /// Return true if it is `Done`, false if `Continue` - pub fn is_done(&self) -> bool - { + pub fn is_done(&self) -> bool { match *self { FoldWhile::Continue(_) => false, FoldWhile::Done(_) => true, diff --git a/src/zip/ndproducer.rs b/src/zip/ndproducer.rs index 1d1b3391b..b69626bcf 100644 --- a/src/zip/ndproducer.rs +++ b/src/zip/ndproducer.rs @@ -9,8 +9,7 @@ use alloc::vec::Vec; /// Slices and vectors can be used (equivalent to 1-dimensional array views). /// /// This trait is like `IntoIterator` for `NdProducers` instead of iterators. -pub trait IntoNdProducer -{ +pub trait IntoNdProducer { /// The element produced per iteration. type Item; /// Dimension type of the producer @@ -26,8 +25,7 @@ where P: NdProducer type Item = P::Item; type Dim = P::Dim; type Output = Self; - fn into_producer(self) -> Self::Output - { + fn into_producer(self) -> Self::Output { self } } @@ -52,8 +50,7 @@ where P: NdProducer /// *producing* multidimensional items). /// /// See also [`IntoNdProducer`] -pub trait NdProducer -{ +pub trait NdProducer { /// The element produced per iteration. type Item; // Internal use / Pointee type @@ -76,8 +73,7 @@ pub trait NdProducer /// Return the shape of the producer. fn raw_dim(&self) -> Self::Dim; #[doc(hidden)] - fn equal_dim(&self, dim: &Self::Dim) -> bool - { + fn equal_dim(&self, dim: &Self::Dim) -> bool { self.raw_dim() == *dim } #[doc(hidden)] @@ -97,28 +93,23 @@ pub trait NdProducer private_decl! {} } -pub trait Offset: Copy -{ +pub trait Offset: Copy { type Stride: Copy; unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self; private_decl! {} } -impl Offset for *const T -{ +impl Offset for *const T { type Stride = isize; - unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self - { + unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self { self.offset(s * (index as isize)) } private_impl! {} } -impl Offset for *mut T -{ +impl Offset for *mut T { type Stride = isize; - unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self - { + unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self { self.offset(s * (index as isize)) } private_impl! {} @@ -134,8 +125,7 @@ where type Item = &'a A; type Dim = D; type Output = ArrayView<'a, A, D>; - fn into_producer(self) -> Self::Output - { + fn into_producer(self) -> Self::Output { self.view() } } @@ -150,86 +140,72 @@ where type Item = &'a mut A; type Dim = D; type Output = ArrayViewMut<'a, A, D>; - fn into_producer(self) -> Self::Output - { + fn into_producer(self) -> Self::Output { self.view_mut() } } /// A slice is a one-dimensional producer -impl<'a, A: 'a> IntoNdProducer for &'a [A] -{ +impl<'a, A: 'a> IntoNdProducer for &'a [A] { type Item = ::Item; type Dim = Ix1; type Output = ArrayView1<'a, A>; - fn into_producer(self) -> Self::Output - { + fn into_producer(self) -> Self::Output { <_>::from(self) } } /// A mutable slice is a mutable one-dimensional producer -impl<'a, A: 'a> IntoNdProducer for &'a mut [A] -{ +impl<'a, A: 'a> IntoNdProducer for &'a mut [A] { type Item = ::Item; type Dim = Ix1; type Output = ArrayViewMut1<'a, A>; - fn into_producer(self) -> Self::Output - { + fn into_producer(self) -> Self::Output { <_>::from(self) } } /// A one-dimensional array is a one-dimensional producer -impl<'a, A: 'a, const N: usize> IntoNdProducer for &'a [A; N] -{ +impl<'a, A: 'a, const N: usize> IntoNdProducer for &'a [A; N] { type Item = ::Item; type Dim = Ix1; type Output = ArrayView1<'a, A>; - fn into_producer(self) -> Self::Output - { + fn into_producer(self) -> Self::Output { <_>::from(self) } } /// A mutable one-dimensional array is a mutable one-dimensional producer -impl<'a, A: 'a, const N: usize> IntoNdProducer for &'a mut [A; N] -{ +impl<'a, A: 'a, const N: usize> IntoNdProducer for &'a mut [A; N] { type Item = ::Item; type Dim = Ix1; type Output = ArrayViewMut1<'a, A>; - fn into_producer(self) -> Self::Output - { + fn into_producer(self) -> Self::Output { <_>::from(self) } } /// A Vec is a one-dimensional producer -impl<'a, A: 'a> IntoNdProducer for &'a Vec -{ +impl<'a, A: 'a> IntoNdProducer for &'a Vec { type Item = ::Item; type Dim = Ix1; type Output = ArrayView1<'a, A>; - fn into_producer(self) -> Self::Output - { + fn into_producer(self) -> Self::Output { <_>::from(self) } } /// A mutable Vec is a mutable one-dimensional producer -impl<'a, A: 'a> IntoNdProducer for &'a mut Vec -{ +impl<'a, A: 'a> IntoNdProducer for &'a mut Vec { type Item = ::Item; type Dim = Ix1; type Output = ArrayViewMut1<'a, A>; - fn into_producer(self) -> Self::Output - { + fn into_producer(self) -> Self::Output { <_>::from(self) } } -impl<'a, A, D: Dimension> NdProducer for ArrayView<'a, A, D> -{ +impl<'a, A, D: Dimension> NdProducer for ArrayView<'a, A, D> { type Item = &'a A; type Dim = D; type Ptr = *mut A; @@ -237,55 +213,45 @@ impl<'a, A, D: Dimension> NdProducer for ArrayView<'a, A, D> private_impl! {} - fn raw_dim(&self) -> Self::Dim - { + fn raw_dim(&self) -> Self::Dim { self.raw_dim() } - fn equal_dim(&self, dim: &Self::Dim) -> bool - { + fn equal_dim(&self, dim: &Self::Dim) -> bool { self.dim.equal(dim) } - fn as_ptr(&self) -> *mut A - { + fn as_ptr(&self) -> *mut A { self.as_ptr() as _ } - fn layout(&self) -> Layout - { + fn layout(&self) -> Layout { self.layout_impl() } - unsafe fn as_ref(&self, ptr: *mut A) -> Self::Item - { + unsafe fn as_ref(&self, ptr: *mut A) -> Self::Item { &*ptr } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A - { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A { self.ptr.as_ptr().offset(i.index_unchecked(&self.strides)) } - fn stride_of(&self, axis: Axis) -> isize - { + fn stride_of(&self, axis: Axis) -> isize { self.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride - { + fn contiguous_stride(&self) -> Self::Stride { 1 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) - { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { self.split_at(axis, index) } } -impl<'a, A, D: Dimension> NdProducer for ArrayViewMut<'a, A, D> -{ +impl<'a, A, D: Dimension> NdProducer for ArrayViewMut<'a, A, D> { type Item = &'a mut A; type Dim = D; type Ptr = *mut A; @@ -293,55 +259,45 @@ impl<'a, A, D: Dimension> NdProducer for ArrayViewMut<'a, A, D> private_impl! {} - fn raw_dim(&self) -> Self::Dim - { + fn raw_dim(&self) -> Self::Dim { self.raw_dim() } - fn equal_dim(&self, dim: &Self::Dim) -> bool - { + fn equal_dim(&self, dim: &Self::Dim) -> bool { self.dim.equal(dim) } - fn as_ptr(&self) -> *mut A - { + fn as_ptr(&self) -> *mut A { self.as_ptr() as _ } - fn layout(&self) -> Layout - { + fn layout(&self) -> Layout { self.layout_impl() } - unsafe fn as_ref(&self, ptr: *mut A) -> Self::Item - { + unsafe fn as_ref(&self, ptr: *mut A) -> Self::Item { &mut *ptr } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A - { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A { self.ptr.as_ptr().offset(i.index_unchecked(&self.strides)) } - fn stride_of(&self, axis: Axis) -> isize - { + fn stride_of(&self, axis: Axis) -> isize { self.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride - { + fn contiguous_stride(&self) -> Self::Stride { 1 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) - { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { self.split_at(axis, index) } } -impl NdProducer for RawArrayView -{ +impl NdProducer for RawArrayView { type Item = *const A; type Dim = D; type Ptr = *const A; @@ -349,55 +305,45 @@ impl NdProducer for RawArrayView private_impl! {} - fn raw_dim(&self) -> Self::Dim - { + fn raw_dim(&self) -> Self::Dim { self.raw_dim() } - fn equal_dim(&self, dim: &Self::Dim) -> bool - { + fn equal_dim(&self, dim: &Self::Dim) -> bool { self.dim.equal(dim) } - fn as_ptr(&self) -> *const A - { + fn as_ptr(&self) -> *const A { self.as_ptr() } - fn layout(&self) -> Layout - { + fn layout(&self) -> Layout { self.layout_impl() } - unsafe fn as_ref(&self, ptr: *const A) -> *const A - { + unsafe fn as_ref(&self, ptr: *const A) -> *const A { ptr } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> *const A - { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> *const A { self.ptr.as_ptr().offset(i.index_unchecked(&self.strides)) } - fn stride_of(&self, axis: Axis) -> isize - { + fn stride_of(&self, axis: Axis) -> isize { self.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride - { + fn contiguous_stride(&self) -> Self::Stride { 1 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) - { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { self.split_at(axis, index) } } -impl NdProducer for RawArrayViewMut -{ +impl NdProducer for RawArrayViewMut { type Item = *mut A; type Dim = D; type Ptr = *mut A; @@ -405,49 +351,40 @@ impl NdProducer for RawArrayViewMut private_impl! {} - fn raw_dim(&self) -> Self::Dim - { + fn raw_dim(&self) -> Self::Dim { self.raw_dim() } - fn equal_dim(&self, dim: &Self::Dim) -> bool - { + fn equal_dim(&self, dim: &Self::Dim) -> bool { self.dim.equal(dim) } - fn as_ptr(&self) -> *mut A - { + fn as_ptr(&self) -> *mut A { self.as_ptr() as _ } - fn layout(&self) -> Layout - { + fn layout(&self) -> Layout { self.layout_impl() } - unsafe fn as_ref(&self, ptr: *mut A) -> *mut A - { + unsafe fn as_ref(&self, ptr: *mut A) -> *mut A { ptr } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A - { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A { self.ptr.as_ptr().offset(i.index_unchecked(&self.strides)) } - fn stride_of(&self, axis: Axis) -> isize - { + fn stride_of(&self, axis: Axis) -> isize { self.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride - { + fn contiguous_stride(&self) -> Self::Stride { 1 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) - { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { self.split_at(axis, index) } } diff --git a/tests/append.rs b/tests/append.rs index cf5397de1..131a1f0a5 100644 --- a/tests/append.rs +++ b/tests/append.rs @@ -2,8 +2,7 @@ use ndarray::prelude::*; use ndarray::{ErrorKind, ShapeError}; #[test] -fn push_row() -{ +fn push_row() { let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -25,8 +24,7 @@ fn push_row() } #[test] -fn push_row_wrong_layout() -{ +fn push_row_wrong_layout() { let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -58,8 +56,7 @@ fn push_row_wrong_layout() } #[test] -fn push_row_neg_stride_1() -{ +fn push_row_neg_stride_1() { let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -102,8 +99,7 @@ fn push_row_neg_stride_1() } #[test] -fn push_row_neg_stride_2() -{ +fn push_row_neg_stride_2() { let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -146,8 +142,7 @@ fn push_row_neg_stride_2() } #[test] -fn push_row_error() -{ +fn push_row_error() { let mut a = Array::zeros((3, 4)); assert_eq!(a.push_row(aview1(&[1.])), @@ -165,8 +160,7 @@ fn push_row_error() } #[test] -fn push_row_existing() -{ +fn push_row_existing() { let mut a = Array::zeros((1, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -190,8 +184,7 @@ fn push_row_existing() } #[test] -fn push_row_col_len_1() -{ +fn push_row_col_len_1() { // Test appending 1 row and then cols from shape 1 x 1 let mut a = Array::zeros((1, 1)); a.push_row(aview1(&[1.])).unwrap(); // shape 2 x 1 @@ -208,8 +201,7 @@ fn push_row_col_len_1() } #[test] -fn push_column() -{ +fn push_column() { let mut a = Array::zeros((4, 0)); a.push_column(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_column(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -221,8 +213,7 @@ fn push_column() } #[test] -fn append_array1() -{ +fn append_array1() { let mut a = Array::zeros((0, 4)); a.append(Axis(0), aview2(&[[0., 1., 2., 3.]])).unwrap(); println!("{:?}", a); @@ -246,8 +237,7 @@ fn append_array1() } #[test] -fn append_array_3d() -{ +fn append_array_3d() { let mut a = Array::zeros((0, 2, 2)); a.append(Axis(0), array![[[0, 1], [2, 3]]].view()).unwrap(); println!("{:?}", a); @@ -288,8 +278,7 @@ fn append_array_3d() } #[test] -fn test_append_2d() -{ +fn test_append_2d() { // create an empty array and append let mut a = Array::zeros((0, 4)); let ones = ArrayView::from(&[1.; 12]) @@ -325,8 +314,7 @@ fn test_append_2d() } #[test] -fn test_append_middle_axis() -{ +fn test_append_middle_axis() { // ensure we can append to Axis(1) by letting it become outermost let mut a = Array::::zeros((3, 0, 2)); a.append( @@ -371,8 +359,7 @@ fn test_append_middle_axis() } #[test] -fn test_append_zero_size() -{ +fn test_append_zero_size() { { let mut a = Array::::zeros((0, 0)); a.append(Axis(0), aview2(&[[]])).unwrap(); @@ -393,8 +380,7 @@ fn test_append_zero_size() } #[test] -fn push_row_neg_stride_3() -{ +fn push_row_neg_stride_3() { let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.invert_axis(Axis(1)); @@ -405,8 +391,7 @@ fn push_row_neg_stride_3() } #[test] -fn push_row_ignore_strides_length_one_axes() -{ +fn push_row_ignore_strides_length_one_axes() { let strides = &[0, 1, 10, 20]; for invert in &[vec![], vec![0], vec![1], vec![0, 1]] { for &stride0 in strides { @@ -426,23 +411,20 @@ fn push_row_ignore_strides_length_one_axes() #[test] #[should_panic(expected = "IncompatibleShape")] -fn zero_dimensional_error1() -{ +fn zero_dimensional_error1() { let mut a = Array::zeros(()).into_dyn(); a.append(Axis(0), arr0(0).into_dyn().view()).unwrap(); } #[test] #[should_panic(expected = "IncompatibleShape")] -fn zero_dimensional_error2() -{ +fn zero_dimensional_error2() { let mut a = Array::zeros(()).into_dyn(); a.push(Axis(0), arr0(0).into_dyn().view()).unwrap(); } #[test] -fn zero_dimensional_ok() -{ +fn zero_dimensional_ok() { let mut a = Array::zeros(0); let one = aview0(&1); let two = aview0(&2); diff --git a/tests/array-construct.rs b/tests/array-construct.rs index f7339dff6..8df0596b7 100644 --- a/tests/array-construct.rs +++ b/tests/array-construct.rs @@ -8,16 +8,14 @@ use ndarray::prelude::*; use ndarray::Zip; #[test] -fn test_from_shape_fn() -{ +fn test_from_shape_fn() { let step = 3.1; let h = Array::from_shape_fn((5, 5), |(i, j)| f64::sin(i as f64 / step) * f64::cos(j as f64 / step)); assert_eq!(h.shape(), &[5, 5]); } #[test] -fn test_dimension_zero() -{ +fn test_dimension_zero() { let a: Array2 = Array2::from(vec![[], [], []]); assert_eq!(vec![0.; 0], a.into_raw_vec()); let a: Array3 = Array3::from(vec![[[]], [[]], [[]]]); @@ -26,8 +24,7 @@ fn test_dimension_zero() #[test] #[cfg(feature = "approx")] -fn test_arc_into_owned() -{ +fn test_arc_into_owned() { use approx::assert_abs_diff_ne; let a = Array2::from_elem((5, 5), 1.).into_shared(); @@ -40,8 +37,7 @@ fn test_arc_into_owned() } #[test] -fn test_arcarray_thread_safe() -{ +fn test_arcarray_thread_safe() { fn is_send(_t: &T) {} fn is_sync(_t: &T) {} let a = Array2::from_elem((5, 5), 1.).into_shared(); @@ -53,8 +49,7 @@ fn test_arcarray_thread_safe() #[test] #[cfg(feature = "std")] #[allow(deprecated)] // uninitialized -fn test_uninit() -{ +fn test_uninit() { unsafe { let mut a = Array::::uninitialized((3, 4).f()); assert_eq!(a.dim(), (3, 4)); @@ -69,8 +64,7 @@ fn test_uninit() } #[test] -fn test_from_fn_c0() -{ +fn test_from_fn_c0() { let a = Array::from_shape_fn((), |i| i); assert_eq!(a[()], ()); assert_eq!(a.len(), 1); @@ -78,8 +72,7 @@ fn test_from_fn_c0() } #[test] -fn test_from_fn_c1() -{ +fn test_from_fn_c1() { let a = Array::from_shape_fn(28, |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -87,8 +80,7 @@ fn test_from_fn_c1() } #[test] -fn test_from_fn_c() -{ +fn test_from_fn_c() { let a = Array::from_shape_fn((4, 7), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -96,8 +88,7 @@ fn test_from_fn_c() } #[test] -fn test_from_fn_c3() -{ +fn test_from_fn_c3() { let a = Array::from_shape_fn((4, 3, 7), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -105,8 +96,7 @@ fn test_from_fn_c3() } #[test] -fn test_from_fn_f0() -{ +fn test_from_fn_f0() { let a = Array::from_shape_fn(().f(), |i| i); assert_eq!(a[()], ()); assert_eq!(a.len(), 1); @@ -114,8 +104,7 @@ fn test_from_fn_f0() } #[test] -fn test_from_fn_f1() -{ +fn test_from_fn_f1() { let a = Array::from_shape_fn(28.f(), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -123,8 +112,7 @@ fn test_from_fn_f1() } #[test] -fn test_from_fn_f() -{ +fn test_from_fn_f() { let a = Array::from_shape_fn((4, 7).f(), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -132,8 +120,7 @@ fn test_from_fn_f() } #[test] -fn test_from_fn_f_with_zero() -{ +fn test_from_fn_f_with_zero() { defmac!(test_from_fn_f_with_zero shape => { let a = Array::from_shape_fn(shape.f(), |i| i); assert_eq!(a.len(), 0); @@ -148,8 +135,7 @@ fn test_from_fn_f_with_zero() } #[test] -fn test_from_fn_f3() -{ +fn test_from_fn_f3() { let a = Array::from_shape_fn((4, 2, 7).f(), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -157,8 +143,7 @@ fn test_from_fn_f3() } #[test] -fn deny_wraparound_from_vec() -{ +fn deny_wraparound_from_vec() { let five = vec![0; 5]; let five_large = Array::from_shape_vec((3, 7, 29, 36760123, 823996703), five.clone()); println!("{:?}", five_large); @@ -168,8 +153,7 @@ fn deny_wraparound_from_vec() } #[test] -fn test_ones() -{ +fn test_ones() { let mut a = Array::::zeros((2, 3, 4)); a.fill(1.0); let b = Array::::ones((2, 3, 4)); @@ -177,8 +161,7 @@ fn test_ones() } #[test] -fn test_from_shape_empty_with_neg_stride() -{ +fn test_from_shape_empty_with_neg_stride() { // Issue #998, negative strides for an axis where it doesn't matter. let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]; let v = s[..12].to_vec(); @@ -189,8 +172,7 @@ fn test_from_shape_empty_with_neg_stride() } #[test] -fn test_from_shape_with_neg_stride() -{ +fn test_from_shape_with_neg_stride() { // Issue #998, negative strides for an axis where it doesn't matter. let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]; let v = s[..12].to_vec(); @@ -202,8 +184,7 @@ fn test_from_shape_with_neg_stride() } #[test] -fn test_from_shape_2_2_2_with_neg_stride() -{ +fn test_from_shape_2_2_2_with_neg_stride() { // Issue #998, negative strides for an axis where it doesn't matter. let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]; let v = s[..12].to_vec(); @@ -218,16 +199,14 @@ fn test_from_shape_2_2_2_with_neg_stride() #[should_panic] #[test] -fn deny_wraparound_zeros() -{ +fn deny_wraparound_zeros() { //2^64 + 5 = 18446744073709551621 = 3×7×29×36760123×823996703 (5 distinct prime factors) let _five_large = Array::::zeros((3, 7, 29, 36760123, 823996703)); } #[should_panic] #[test] -fn deny_wraparound_reshape() -{ +fn deny_wraparound_reshape() { //2^64 + 5 = 18446744073709551621 = 3×7×29×36760123×823996703 (5 distinct prime factors) let five = Array::::zeros(5); let _five_large = five @@ -237,23 +216,20 @@ fn deny_wraparound_reshape() #[should_panic] #[test] -fn deny_wraparound_default() -{ +fn deny_wraparound_default() { let _five_large = Array::::default((3, 7, 29, 36760123, 823996703)); } #[should_panic] #[test] -fn deny_wraparound_from_shape_fn() -{ +fn deny_wraparound_from_shape_fn() { let _five_large = Array::::from_shape_fn((3, 7, 29, 36760123, 823996703), |_| 0.); } #[should_panic] #[test] #[allow(deprecated)] // uninitialized -fn deny_wraparound_uninitialized() -{ +fn deny_wraparound_uninitialized() { unsafe { let _five_large = Array::::uninitialized((3, 7, 29, 36760123, 823996703)); } @@ -261,42 +237,36 @@ fn deny_wraparound_uninitialized() #[should_panic] #[test] -fn deny_wraparound_uninit() -{ +fn deny_wraparound_uninit() { let _five_large = Array::::uninit((3, 7, 29, 36760123, 823996703)); } #[should_panic] #[test] -fn deny_slice_with_too_many_rows_to_arrayview2() -{ +fn deny_slice_with_too_many_rows_to_arrayview2() { let _view = ArrayView2::from(&[[0u8; 0]; usize::MAX][..]); } #[should_panic] #[test] -fn deny_slice_with_too_many_zero_sized_elems_to_arrayview2() -{ +fn deny_slice_with_too_many_zero_sized_elems_to_arrayview2() { let _view = ArrayView2::from(&[[(); isize::MAX as usize]; isize::MAX as usize][..]); } #[should_panic] #[test] -fn deny_slice_with_too_many_rows_to_arrayviewmut2() -{ +fn deny_slice_with_too_many_rows_to_arrayviewmut2() { let _view = ArrayViewMut2::from(&mut [[0u8; 0]; usize::MAX][..]); } #[should_panic] #[test] -fn deny_slice_with_too_many_zero_sized_elems_to_arrayviewmut2() -{ +fn deny_slice_with_too_many_zero_sized_elems_to_arrayviewmut2() { let _view = ArrayViewMut2::from(&mut [[(); isize::MAX as usize]; isize::MAX as usize][..]); } #[test] -fn maybe_uninit_1() -{ +fn maybe_uninit_1() { use std::mem::MaybeUninit; unsafe { diff --git a/tests/array.rs b/tests/array.rs index 3f2c38a62..ccc499b4f 100644 --- a/tests/array.rs +++ b/tests/array.rs @@ -30,8 +30,7 @@ macro_rules! assert_panics { } #[test] -fn test_matmul_arcarray() -{ +fn test_matmul_arcarray() { let mut A = ArcArray::::zeros((2, 3)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -55,21 +54,18 @@ fn test_matmul_arcarray() } #[allow(unused)] -fn arrayview_shrink_lifetime<'a, 'b: 'a>(view: ArrayView1<'b, f64>) -> ArrayView1<'a, f64> -{ +fn arrayview_shrink_lifetime<'a, 'b: 'a>(view: ArrayView1<'b, f64>) -> ArrayView1<'a, f64> { view.reborrow() } #[allow(unused)] -fn arrayviewmut_shrink_lifetime<'a, 'b: 'a>(view: ArrayViewMut1<'b, f64>) -> ArrayViewMut1<'a, f64> -{ +fn arrayviewmut_shrink_lifetime<'a, 'b: 'a>(view: ArrayViewMut1<'b, f64>) -> ArrayViewMut1<'a, f64> { view.reborrow() } #[test] #[cfg(feature = "std")] -fn test_mat_mul() -{ +fn test_mat_mul() { // smoke test, a big matrix multiplication of uneven size let (n, m) = (45, 33); let a = ArcArray::linspace(0., ((n * m) - 1) as f32, n as usize * m as usize) @@ -83,8 +79,7 @@ fn test_mat_mul() #[deny(unsafe_code)] #[test] -fn test_slice() -{ +fn test_slice() { let mut A = ArcArray::::zeros((3, 4, 5)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -99,15 +94,13 @@ fn test_slice() #[deny(unsafe_code)] #[test] -fn test_slice_ix0() -{ +fn test_slice_ix0() { let arr = arr0(5); assert_eq!(arr.slice(s![]), aview0(&5)); } #[test] -fn test_slice_edge_cases() -{ +fn test_slice_edge_cases() { let mut arr = Array3::::zeros((3, 4, 5)); arr.slice_collapse(s![0..0;-1, .., ..]); assert_eq!(arr.shape(), &[0, 4, 5]); @@ -117,8 +110,7 @@ fn test_slice_edge_cases() } #[test] -fn test_slice_inclusive_range() -{ +fn test_slice_inclusive_range() { let arr = array![[1, 2, 3], [4, 5, 6]]; assert_eq!(arr.slice(s![1..=1, 1..=2]), array![[5, 6]]); assert_eq!(arr.slice(s![1..=-1, -2..=2;-1]), array![[6, 5]]); @@ -132,8 +124,7 @@ fn test_slice_inclusive_range() /// `ArrayView1` and `ArrayView2`, so the compiler needs to determine which /// type is the correct result for the `.slice()` call. #[test] -fn test_slice_infer() -{ +fn test_slice_infer() { let a = array![1., 2.]; let b = array![[3., 4.], [5., 6.]]; b.slice(s![..-1, ..]).dot(&a); @@ -141,8 +132,7 @@ fn test_slice_infer() } #[test] -fn test_slice_with_many_dim() -{ +fn test_slice_with_many_dim() { let mut A = ArcArray::::zeros(&[3, 1, 4, 1, 3, 2, 1][..]); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -169,16 +159,14 @@ fn test_slice_with_many_dim() } #[test] -fn test_slice_range_variable() -{ +fn test_slice_range_variable() { let range = 1..4; let arr = array![0, 1, 2, 3, 4]; assert_eq!(arr.slice(s![range]), array![1, 2, 3]); } #[test] -fn test_slice_args_eval_range_once() -{ +fn test_slice_args_eval_range_once() { let mut eval_count = 0; { let mut range = || { @@ -192,8 +180,7 @@ fn test_slice_args_eval_range_once() } #[test] -fn test_slice_args_eval_step_once() -{ +fn test_slice_args_eval_step_once() { let mut eval_count = 0; { let mut step = || { @@ -207,8 +194,7 @@ fn test_slice_args_eval_step_once() } #[test] -fn test_slice_array_fixed() -{ +fn test_slice_array_fixed() { let mut arr = Array3::::zeros((5, 2, 5)); let info = s![1.., 1, NewAxis, ..;2]; arr.slice(info); @@ -219,8 +205,7 @@ fn test_slice_array_fixed() } #[test] -fn test_slice_dyninput_array_fixed() -{ +fn test_slice_dyninput_array_fixed() { let mut arr = Array3::::zeros((5, 2, 5)).into_dyn(); let info = s![1.., 1, NewAxis, ..;2]; arr.slice(info); @@ -231,8 +216,7 @@ fn test_slice_dyninput_array_fixed() } #[test] -fn test_slice_array_dyn() -{ +fn test_slice_array_dyn() { let mut arr = Array3::::zeros((5, 2, 5)); let info = SliceInfo::<_, Ix3, IxDyn>::try_from([ SliceInfoElem::from(1..), @@ -254,8 +238,7 @@ fn test_slice_array_dyn() } #[test] -fn test_slice_dyninput_array_dyn() -{ +fn test_slice_dyninput_array_dyn() { let mut arr = Array3::::zeros((5, 2, 5)).into_dyn(); let info = SliceInfo::<_, Ix3, IxDyn>::try_from([ SliceInfoElem::from(1..), @@ -277,8 +260,7 @@ fn test_slice_dyninput_array_dyn() } #[test] -fn test_slice_dyninput_vec_fixed() -{ +fn test_slice_dyninput_vec_fixed() { let mut arr = Array3::::zeros((5, 2, 5)).into_dyn(); let info = &SliceInfo::<_, Ix3, Ix3>::try_from(vec![ SliceInfoElem::from(1..), @@ -300,8 +282,7 @@ fn test_slice_dyninput_vec_fixed() } #[test] -fn test_slice_dyninput_vec_dyn() -{ +fn test_slice_dyninput_vec_dyn() { let mut arr = Array3::::zeros((5, 2, 5)).into_dyn(); let info = &SliceInfo::<_, Ix3, IxDyn>::try_from(vec![ SliceInfoElem::from(1..), @@ -323,8 +304,7 @@ fn test_slice_dyninput_vec_dyn() } #[test] -fn test_slice_with_subview_and_new_axis() -{ +fn test_slice_with_subview_and_new_axis() { let mut arr = ArcArray::::zeros((3, 5, 4)); for (i, elt) in arr.iter_mut().enumerate() { *elt = i; @@ -361,8 +341,7 @@ fn test_slice_with_subview_and_new_axis() } #[test] -fn test_slice_collapse_with_indices() -{ +fn test_slice_collapse_with_indices() { let mut arr = ArcArray::::zeros((3, 5, 4)); for (i, elt) in arr.iter_mut().enumerate() { *elt = i; @@ -401,15 +380,13 @@ fn test_slice_collapse_with_indices() #[test] #[should_panic] -fn test_slice_collapse_with_newaxis() -{ +fn test_slice_collapse_with_newaxis() { let mut arr = Array2::::zeros((2, 3)); arr.slice_collapse(s![0, 0, NewAxis]); } #[test] -fn test_multislice() -{ +fn test_multislice() { macro_rules! do_test { ($arr:expr, $($s:expr),*) => { { @@ -447,8 +424,7 @@ fn test_multislice() } #[test] -fn test_multislice_intersecting() -{ +fn test_multislice_intersecting() { assert_panics!({ let mut arr = Array2::::zeros((8, 6)); arr.multi_slice_mut((s![3, .., NewAxis], s![3, ..])); @@ -489,39 +465,34 @@ fn test_multislice_intersecting() #[should_panic] #[test] -fn index_out_of_bounds() -{ +fn index_out_of_bounds() { let mut a = Array::::zeros((3, 4)); a[[3, 2]] = 1; } #[should_panic] #[test] -fn slice_oob() -{ +fn slice_oob() { let a = ArcArray::::zeros((3, 4)); let _vi = a.slice(s![..10, ..]); } #[should_panic] #[test] -fn slice_axis_oob() -{ +fn slice_axis_oob() { let a = ArcArray::::zeros((3, 4)); let _vi = a.slice_axis(Axis(0), Slice::new(0, Some(10), 1)); } #[should_panic] #[test] -fn slice_wrong_dim() -{ +fn slice_wrong_dim() { let a = ArcArray::::zeros(vec![3, 4, 5]); let _vi = a.slice(s![.., ..]); } #[test] -fn test_index() -{ +fn test_index() { let mut A = ArcArray::::zeros((2, 3)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -542,8 +513,7 @@ fn test_index() } #[test] -fn test_index_arrays() -{ +fn test_index_arrays() { let a = Array1::from_iter(0..12); assert_eq!(a[1], a[[1]]); let v = a.view().into_shape_with_order((3, 4)).unwrap(); @@ -554,8 +524,7 @@ fn test_index_arrays() #[test] #[allow(clippy::assign_op_pattern)] -fn test_add() -{ +fn test_add() { let mut A = ArcArray::::zeros((2, 2)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -570,8 +539,7 @@ fn test_add() } #[test] -fn test_multidim() -{ +fn test_multidim() { let mut mat = ArcArray::zeros(2 * 3 * 4 * 5 * 6) .into_shape_with_order((2, 3, 4, 5, 6)) .unwrap(); @@ -596,8 +564,7 @@ array([[[ 7, 6], [ 9, 8]]]) */ #[test] -fn test_negative_stride_arcarray() -{ +fn test_negative_stride_arcarray() { let mut mat = ArcArray::zeros((2, 4, 2)); mat[[0, 0, 0]] = 1.0f32; for (i, elt) in mat.iter_mut().enumerate() { @@ -623,8 +590,7 @@ fn test_negative_stride_arcarray() } #[test] -fn test_cow() -{ +fn test_cow() { let mut mat = ArcArray::zeros((2, 2)); mat[[0, 0]] = 1; let n = mat.clone(); @@ -656,8 +622,7 @@ fn test_cow() } #[test] -fn test_cow_shrink() -{ +fn test_cow_shrink() { // A test for clone-on-write in the case that // mutation shrinks the array and gives it different strides // @@ -692,8 +657,7 @@ fn test_cow_shrink() #[test] #[cfg(feature = "std")] -fn test_sub() -{ +fn test_sub() { let mat = ArcArray::linspace(0., 15., 16) .into_shape_with_order((2, 4, 2)) .unwrap(); @@ -714,8 +678,7 @@ fn test_sub() #[should_panic] #[test] #[cfg(feature = "std")] -fn test_sub_oob_1() -{ +fn test_sub_oob_1() { let mat = ArcArray::linspace(0., 15., 16) .into_shape_with_order((2, 4, 2)) .unwrap(); @@ -724,8 +687,7 @@ fn test_sub_oob_1() #[test] #[cfg(feature = "approx")] -fn test_select() -{ +fn test_select() { use approx::assert_abs_diff_eq; // test for 2-d array @@ -748,8 +710,7 @@ fn test_select() } #[test] -fn test_select_1d() -{ +fn test_select_1d() { let x = arr1(&[0, 1, 2, 3, 4, 5, 6]); let r1 = x.select(Axis(0), &[1, 3, 4, 2, 2, 5]); assert_eq!(r1, arr1(&[1, 3, 4, 2, 2, 5])); @@ -762,8 +723,7 @@ fn test_select_1d() } #[test] -fn diag() -{ +fn diag() { let d = arr2(&[[1., 2., 3.0f32]]).into_diag(); assert_eq!(d.dim(), 1); let a = arr2(&[[1., 2., 3.0f32], [0., 0., 0.]]); @@ -780,8 +740,7 @@ fn diag() /// Note that this does not check the strides in the "merged" case! #[test] #[allow(clippy::cognitive_complexity)] -fn merge_axes() -{ +fn merge_axes() { macro_rules! assert_merged { ($arr:expr, $slice:expr, $take:expr, $into:expr) => { let mut v = $arr.slice($slice); @@ -869,8 +828,7 @@ fn merge_axes() } #[test] -fn swapaxes() -{ +fn swapaxes() { let mut a = arr2(&[[1., 2.], [3., 4.0f32]]); let b = arr2(&[[1., 3.], [2., 4.0f32]]); assert!(a != b); @@ -883,8 +841,7 @@ fn swapaxes() } #[test] -fn permuted_axes() -{ +fn permuted_axes() { let a = array![1].index_axis_move(Axis(0), 0); let permuted = a.view().permuted_axes([]); assert_eq!(a, permuted); @@ -920,8 +877,7 @@ fn permuted_axes() #[should_panic] #[test] -fn permuted_axes_repeated_axis() -{ +fn permuted_axes_repeated_axis() { let a = Array::from_iter(0..24) .into_shape_with_order((2, 3, 4)) .unwrap(); @@ -930,8 +886,7 @@ fn permuted_axes_repeated_axis() #[should_panic] #[test] -fn permuted_axes_missing_axis() -{ +fn permuted_axes_missing_axis() { let a = Array::from_iter(0..24) .into_shape_with_order((2, 3, 4)) .unwrap() @@ -941,8 +896,7 @@ fn permuted_axes_missing_axis() #[should_panic] #[test] -fn permuted_axes_oob() -{ +fn permuted_axes_oob() { let a = Array::from_iter(0..24) .into_shape_with_order((2, 3, 4)) .unwrap(); @@ -950,8 +904,7 @@ fn permuted_axes_oob() } #[test] -fn standard_layout() -{ +fn standard_layout() { let mut a = arr2(&[[1., 2.], [3., 4.0]]); assert!(a.is_standard_layout()); a.swap_axes(0, 1); @@ -969,8 +922,7 @@ fn standard_layout() } #[test] -fn iter_size_hint() -{ +fn iter_size_hint() { let mut a = arr2(&[[1., 2.], [3., 4.]]); { let mut it = a.iter(); @@ -1005,8 +957,7 @@ fn iter_size_hint() } #[test] -fn zero_axes() -{ +fn zero_axes() { let mut a = arr1::(&[]); for _ in a.iter() { panic!(); @@ -1024,8 +975,7 @@ fn zero_axes() } #[test] -fn equality() -{ +fn equality() { let a = arr2(&[[1., 2.], [3., 4.]]); let mut b = arr2(&[[1., 2.], [2., 4.]]); assert!(a != b); @@ -1038,8 +988,7 @@ fn equality() } #[test] -fn map1() -{ +fn map1() { let a = arr2(&[[1., 2.], [3., 4.]]); let b = a.map(|&x| (x / 3.) as isize); assert_eq!(b, arr2(&[[0, 0], [1, 1]])); @@ -1049,24 +998,21 @@ fn map1() } #[test] -fn mapv_into_any_same_type() -{ +fn mapv_into_any_same_type() { let a: Array = array![[1., 2., 3.], [4., 5., 6.]]; let a_plus_one: Array = array![[2., 3., 4.], [5., 6., 7.]]; assert_eq!(a.mapv_into_any(|a| a + 1.), a_plus_one); } #[test] -fn mapv_into_any_diff_types() -{ +fn mapv_into_any_diff_types() { let a: Array = array![[1., 2., 3.], [4., 5., 6.]]; let a_even: Array = array![[false, true, false], [true, false, true]]; assert_eq!(a.mapv_into_any(|a| a.round() as i32 % 2 == 0), a_even); } #[test] -fn as_slice_memory_order_mut_arcarray() -{ +fn as_slice_memory_order_mut_arcarray() { // Test that mutation breaks sharing for `ArcArray`. let a = rcarr2(&[[1., 2.], [3., 4.0f32]]); let mut b = a.clone(); @@ -1077,8 +1023,7 @@ fn as_slice_memory_order_mut_arcarray() } #[test] -fn as_slice_memory_order_mut_cowarray() -{ +fn as_slice_memory_order_mut_cowarray() { // Test that mutation breaks sharing for `CowArray`. let a = arr2(&[[1., 2.], [3., 4.0f32]]); let mut b = CowArray::from(a.view()); @@ -1089,8 +1034,7 @@ fn as_slice_memory_order_mut_cowarray() } #[test] -fn as_slice_memory_order_mut_contiguous_arcarray() -{ +fn as_slice_memory_order_mut_contiguous_arcarray() { // Test that unsharing preserves the strides in the contiguous case for `ArcArray`. let a = rcarr2(&[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]]).reversed_axes(); let mut b = a.clone().slice_move(s![.., ..2]); @@ -1100,8 +1044,7 @@ fn as_slice_memory_order_mut_contiguous_arcarray() } #[test] -fn as_slice_memory_order_mut_contiguous_cowarray() -{ +fn as_slice_memory_order_mut_contiguous_cowarray() { // Test that unsharing preserves the strides in the contiguous case for `CowArray`. let a = arr2(&[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]]).reversed_axes(); let mut b = CowArray::from(a.slice(s![.., ..2])); @@ -1112,8 +1055,7 @@ fn as_slice_memory_order_mut_contiguous_cowarray() } #[test] -fn to_slice_memory_order() -{ +fn to_slice_memory_order() { for shape in vec![[2, 0, 3, 5], [2, 1, 3, 5], [2, 4, 3, 5]] { let data: Vec = (0..shape.iter().product()).collect(); let mut orig = Array1::from(data.clone()) @@ -1130,8 +1072,7 @@ fn to_slice_memory_order() } #[test] -fn to_slice_memory_order_discontiguous() -{ +fn to_slice_memory_order_discontiguous() { let mut orig = Array3::::zeros([3, 2, 4]); assert!(orig .slice(s![.., 1.., ..]) @@ -1152,8 +1093,7 @@ fn to_slice_memory_order_discontiguous() } #[test] -fn array0_into_scalar() -{ +fn array0_into_scalar() { // With this kind of setup, the `Array`'s pointer is not the same as the // underlying `Vec`'s pointer. let a: Array0 = array![4, 5, 6, 7].index_axis_move(Axis(0), 2); @@ -1168,8 +1108,7 @@ fn array0_into_scalar() } #[test] -fn array_view0_into_scalar() -{ +fn array_view0_into_scalar() { // With this kind of setup, the `Array`'s pointer is not the same as the // underlying `Vec`'s pointer. let a: Array0 = array![4, 5, 6, 7].index_axis_move(Axis(0), 2); @@ -1184,8 +1123,7 @@ fn array_view0_into_scalar() } #[test] -fn array_view_mut0_into_scalar() -{ +fn array_view_mut0_into_scalar() { // With this kind of setup, the `Array`'s pointer is not the same as the // underlying `Vec`'s pointer. let a: Array0 = array![4, 5, 6, 7].index_axis_move(Axis(0), 2); @@ -1200,8 +1138,7 @@ fn array_view_mut0_into_scalar() } #[test] -fn owned_array1() -{ +fn owned_array1() { let mut a = Array::from(vec![1, 2, 3, 4]); for elt in a.iter_mut() { *elt = 2; @@ -1226,8 +1163,7 @@ fn owned_array1() } #[test] -fn owned_array_with_stride() -{ +fn owned_array_with_stride() { let v: Vec<_> = (0..12).collect(); let dim = (2, 3, 2); let strides = (1, 4, 2); @@ -1237,8 +1173,7 @@ fn owned_array_with_stride() } #[test] -fn owned_array_discontiguous() -{ +fn owned_array_discontiguous() { use std::iter::repeat; let v: Vec<_> = (0..12).flat_map(|x| repeat(x).take(2)).collect(); let dim = (3, 2, 2); @@ -1251,17 +1186,14 @@ fn owned_array_discontiguous() } #[test] -fn owned_array_discontiguous_drop() -{ +fn owned_array_discontiguous_drop() { use std::cell::RefCell; use std::collections::BTreeSet; use std::rc::Rc; struct InsertOnDrop(Rc>>, Option); - impl Drop for InsertOnDrop - { - fn drop(&mut self) - { + impl Drop for InsertOnDrop { + fn drop(&mut self) { let InsertOnDrop(ref set, ref mut value) = *self; set.borrow_mut().insert(value.take().expect("double drop!")); } @@ -1295,15 +1227,13 @@ macro_rules! assert_matches { } #[test] -fn from_vec_dim_stride_empty_1d() -{ +fn from_vec_dim_stride_empty_1d() { let empty: [f32; 0] = []; assert_matches!(Array::from_shape_vec(0.strides(1), empty.to_vec()), Ok(_)); } #[test] -fn from_vec_dim_stride_0d() -{ +fn from_vec_dim_stride_0d() { let empty: [f32; 0] = []; let one = [1.]; let two = [1., 2.]; @@ -1319,8 +1249,7 @@ fn from_vec_dim_stride_0d() } #[test] -fn from_vec_dim_stride_2d_1() -{ +fn from_vec_dim_stride_2d_1() { let two = [1., 2.]; let d = Ix2(2, 1); let s = d.default_strides(); @@ -1328,8 +1257,7 @@ fn from_vec_dim_stride_2d_1() } #[test] -fn from_vec_dim_stride_2d_2() -{ +fn from_vec_dim_stride_2d_2() { let two = [1., 2.]; let d = Ix2(1, 2); let s = d.default_strides(); @@ -1337,8 +1265,7 @@ fn from_vec_dim_stride_2d_2() } #[test] -fn from_vec_dim_stride_2d_3() -{ +fn from_vec_dim_stride_2d_3() { let a = arr3(&[[[1]], [[2]], [[3]]]); let d = a.raw_dim(); let s = d.default_strides(); @@ -1349,8 +1276,7 @@ fn from_vec_dim_stride_2d_3() } #[test] -fn from_vec_dim_stride_2d_4() -{ +fn from_vec_dim_stride_2d_4() { let a = arr3(&[[[1]], [[2]], [[3]]]); let d = a.raw_dim(); let s = d.fortran_strides(); @@ -1361,8 +1287,7 @@ fn from_vec_dim_stride_2d_4() } #[test] -fn from_vec_dim_stride_2d_5() -{ +fn from_vec_dim_stride_2d_5() { let a = arr3(&[[[1, 2, 3]]]); let d = a.raw_dim(); let s = d.fortran_strides(); @@ -1373,8 +1298,7 @@ fn from_vec_dim_stride_2d_5() } #[test] -fn from_vec_dim_stride_2d_6() -{ +fn from_vec_dim_stride_2d_6() { let a = [1., 2., 3., 4., 5., 6.]; let d = (2, 1, 1); let s = (2, 2, 1); @@ -1386,8 +1310,7 @@ fn from_vec_dim_stride_2d_6() } #[test] -fn from_vec_dim_stride_2d_7() -{ +fn from_vec_dim_stride_2d_7() { // empty arrays can have 0 strides let a: [f32; 0] = []; // [[]] shape=[4, 0], strides=[0, 1] @@ -1397,8 +1320,7 @@ fn from_vec_dim_stride_2d_7() } #[test] -fn from_vec_dim_stride_2d_8() -{ +fn from_vec_dim_stride_2d_8() { // strides of length 1 axes can be zero let a = [1.]; let d = (1, 1); @@ -1407,8 +1329,7 @@ fn from_vec_dim_stride_2d_8() } #[test] -fn from_vec_dim_stride_2d_rejects() -{ +fn from_vec_dim_stride_2d_rejects() { let two = [1., 2.]; let d = (2, 2); let s = (1, 0); @@ -1420,8 +1341,7 @@ fn from_vec_dim_stride_2d_rejects() } #[test] -fn views() -{ +fn views() { let a = ArcArray::from(vec![1, 2, 3, 4]) .into_shape_with_order((2, 2)) .unwrap(); @@ -1440,8 +1360,7 @@ fn views() } #[test] -fn view_mut() -{ +fn view_mut() { let mut a = ArcArray::from(vec![1, 2, 3, 4]) .into_shape_with_order((2, 2)) .unwrap(); @@ -1462,8 +1381,7 @@ fn view_mut() } #[test] -fn slice_mut() -{ +fn slice_mut() { let mut a = ArcArray::from(vec![1, 2, 3, 4]) .into_shape_with_order((2, 2)) .unwrap(); @@ -1487,8 +1405,7 @@ fn slice_mut() } #[test] -fn assign_ops() -{ +fn assign_ops() { let mut a = arr2(&[[1., 2.], [3., 4.]]); let b = arr2(&[[1., 3.], [2., 4.]]); (*&mut a.view_mut()) += &b; @@ -1506,8 +1423,7 @@ fn assign_ops() } #[test] -fn aview() -{ +fn aview() { let a = arr2(&[[1., 2., 3.], [4., 5., 6.]]); let data = [[1., 2., 3.], [4., 5., 6.]]; let b = aview2(&data); @@ -1516,8 +1432,7 @@ fn aview() } #[test] -fn aview_mut() -{ +fn aview_mut() { let mut data = [0; 16]; { let mut a = aview_mut1(&mut data).into_shape_with_order((4, 4)).unwrap(); @@ -1530,8 +1445,7 @@ fn aview_mut() } #[test] -fn transpose_view() -{ +fn transpose_view() { let a = arr2(&[[1, 2], [3, 4]]); let at = a.view().reversed_axes(); assert_eq!(at, arr2(&[[1, 3], [2, 4]])); @@ -1542,8 +1456,7 @@ fn transpose_view() } #[test] -fn transpose_view_mut() -{ +fn transpose_view_mut() { let mut a = arr2(&[[1, 2], [3, 4]]); let mut at = a.view_mut().reversed_axes(); at[[0, 1]] = 5; @@ -1557,8 +1470,7 @@ fn transpose_view_mut() #[test] #[allow(clippy::cognitive_complexity)] -fn insert_axis() -{ +fn insert_axis() { defmac!(test_insert orig, index, new => { let res = orig.insert_axis(Axis(index)); assert_eq!(res, new); @@ -1653,8 +1565,7 @@ fn insert_axis() } #[test] -fn insert_axis_f() -{ +fn insert_axis_f() { defmac!(test_insert_f orig, index, new => { let res = orig.insert_axis(Axis(index)); assert_eq!(res, new); @@ -1701,8 +1612,7 @@ fn insert_axis_f() } #[test] -fn insert_axis_view() -{ +fn insert_axis_view() { let a = array![[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]; assert_eq!( @@ -1720,8 +1630,7 @@ fn insert_axis_view() } #[test] -fn arithmetic_broadcast() -{ +fn arithmetic_broadcast() { let mut a = arr2(&[[1., 2.], [3., 4.]]); let b = a.clone() * aview0(&1.); assert_eq!(a, b); @@ -1780,8 +1689,7 @@ fn arithmetic_broadcast() } #[test] -fn char_array() -{ +fn char_array() { // test compilation & basics of non-numerical array let cc = ArcArray::from_iter("alphabet".chars()) .into_shape_with_order((4, 2)) @@ -1790,8 +1698,7 @@ fn char_array() } #[test] -fn scalar_ops() -{ +fn scalar_ops() { let a = Array::::zeros((5, 5)); let b = &a + 1; let c = (&a + &a + 2) - 3; @@ -1829,8 +1736,7 @@ fn scalar_ops() #[test] #[cfg(feature = "std")] -fn split_at() -{ +fn split_at() { let mut a = arr2(&[[1., 2.], [3., 4.]]); { @@ -1870,24 +1776,21 @@ fn split_at() #[test] #[should_panic] -fn deny_split_at_axis_out_of_bounds() -{ +fn deny_split_at_axis_out_of_bounds() { let a = arr2(&[[1., 2.], [3., 4.]]); a.view().split_at(Axis(2), 0); } #[test] #[should_panic] -fn deny_split_at_index_out_of_bounds() -{ +fn deny_split_at_index_out_of_bounds() { let a = arr2(&[[1., 2.], [3., 4.]]); a.view().split_at(Axis(1), 3); } #[test] #[cfg(feature = "std")] -fn test_range() -{ +fn test_range() { let a = Array::range(0., 5., 1.); assert_eq!(a.len(), 5); assert_eq!(a[0], 0.); @@ -1916,8 +1819,7 @@ fn test_range() } #[test] -fn test_f_order() -{ +fn test_f_order() { // Test that arrays are logically equal in every way, // even if the underlying memory order is different let c = arr2(&[[1, 2, 3], [4, 5, 6]]); @@ -1939,8 +1841,7 @@ fn test_f_order() } #[test] -fn to_owned_memory_order() -{ +fn to_owned_memory_order() { // check that .to_owned() makes f-contiguous arrays out of f-contiguous // input. let c = arr2(&[[1, 2, 3], [4, 5, 6]]); @@ -1960,8 +1861,7 @@ fn to_owned_memory_order() } #[test] -fn to_owned_neg_stride() -{ +fn to_owned_neg_stride() { let mut c = arr2(&[[1, 2, 3], [4, 5, 6]]); c.slice_collapse(s![.., ..;-1]); let co = c.to_owned(); @@ -1970,8 +1870,7 @@ fn to_owned_neg_stride() } #[test] -fn discontiguous_owned_to_owned() -{ +fn discontiguous_owned_to_owned() { let mut c = arr2(&[[1, 2, 3], [4, 5, 6]]); c.slice_collapse(s![.., ..;2]); @@ -1982,8 +1881,7 @@ fn discontiguous_owned_to_owned() } #[test] -fn map_memory_order() -{ +fn map_memory_order() { let a = arr3(&[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [0, -1, -2]]]); let mut v = a.view(); v.swap_axes(0, 1); @@ -1993,8 +1891,7 @@ fn map_memory_order() } #[test] -fn map_mut_with_unsharing() -{ +fn map_mut_with_unsharing() { // Fortran-layout `ArcArray`. let a = rcarr2(&[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]]).reversed_axes(); assert_eq!(a.shape(), &[2, 5]); @@ -2021,8 +1918,7 @@ fn map_mut_with_unsharing() } #[test] -fn test_view_from_shape() -{ +fn test_view_from_shape() { let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let a = ArrayView::from_shape((2, 3, 2), &s).unwrap(); let mut answer = Array::from(s.to_vec()) @@ -2045,8 +1941,7 @@ fn test_view_from_shape() } #[test] -fn test_contiguous() -{ +fn test_contiguous() { let c = arr3(&[[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 7, 7]]]); assert!(c.is_standard_layout()); assert!(c.as_slice_memory_order().is_some()); @@ -2080,8 +1975,7 @@ fn test_contiguous() } #[test] -fn test_contiguous_single_element() -{ +fn test_contiguous_single_element() { assert_matches!(array![1].as_slice_memory_order(), Some(&[1])); let arr1 = array![1, 2, 3]; @@ -2096,8 +1990,7 @@ fn test_contiguous_single_element() } #[test] -fn test_contiguous_neg_strides() -{ +fn test_contiguous_neg_strides() { let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]; let a = ArrayView::from_shape((2, 3, 2).strides((1, 4, 2)), &s).unwrap(); assert_eq!( @@ -2155,8 +2048,7 @@ fn test_contiguous_neg_strides() } #[test] -fn test_swap() -{ +fn test_swap() { let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9]]); let b = a.clone(); @@ -2169,8 +2061,7 @@ fn test_swap() } #[test] -fn test_uswap() -{ +fn test_uswap() { let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9]]); let b = a.clone(); @@ -2183,8 +2074,7 @@ fn test_uswap() } #[test] -fn test_shape() -{ +fn test_shape() { let data = [0, 1, 2, 3, 4, 5]; let a = Array::from_shape_vec((1, 2, 3), data.to_vec()).unwrap(); let b = Array::from_shape_vec((1, 2, 3).f(), data.to_vec()).unwrap(); @@ -2198,8 +2088,7 @@ fn test_shape() } #[test] -fn test_view_from_shape_ptr() -{ +fn test_view_from_shape_ptr() { let data = [0, 1, 2, 3, 4, 5]; let view = unsafe { ArrayView::from_shape_ptr((2, 3), data.as_ptr()) }; assert_eq!(view, aview2(&[[0, 1, 2], [3, 4, 5]])); @@ -2215,8 +2104,7 @@ fn test_view_from_shape_ptr() #[should_panic(expected = "Unsupported")] #[cfg(debug_assertions)] #[test] -fn test_view_from_shape_ptr_deny_neg_strides() -{ +fn test_view_from_shape_ptr_deny_neg_strides() { let data = [0, 1, 2, 3, 4, 5]; let _view = unsafe { ArrayView::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_ptr()) }; } @@ -2224,8 +2112,7 @@ fn test_view_from_shape_ptr_deny_neg_strides() #[should_panic(expected = "Unsupported")] #[cfg(debug_assertions)] #[test] -fn test_view_mut_from_shape_ptr_deny_neg_strides() -{ +fn test_view_mut_from_shape_ptr_deny_neg_strides() { let mut data = [0, 1, 2, 3, 4, 5]; let _view = unsafe { ArrayViewMut::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_mut_ptr()) }; } @@ -2233,8 +2120,7 @@ fn test_view_mut_from_shape_ptr_deny_neg_strides() #[should_panic(expected = "Unsupported")] #[cfg(debug_assertions)] #[test] -fn test_raw_view_from_shape_ptr_deny_neg_strides() -{ +fn test_raw_view_from_shape_ptr_deny_neg_strides() { let data = [0, 1, 2, 3, 4, 5]; let _view = unsafe { RawArrayView::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_ptr()) }; } @@ -2242,15 +2128,13 @@ fn test_raw_view_from_shape_ptr_deny_neg_strides() #[should_panic(expected = "Unsupported")] #[cfg(debug_assertions)] #[test] -fn test_raw_view_mut_from_shape_ptr_deny_neg_strides() -{ +fn test_raw_view_mut_from_shape_ptr_deny_neg_strides() { let mut data = [0, 1, 2, 3, 4, 5]; let _view = unsafe { RawArrayViewMut::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_mut_ptr()) }; } #[test] -fn test_default() -{ +fn test_default() { let a = as Default>::default(); assert_eq!(a, aview2(&[[0.0; 0]; 0])); @@ -2261,16 +2145,14 @@ fn test_default() } #[test] -fn test_default_ixdyn() -{ +fn test_default_ixdyn() { let a = as Default>::default(); let b = >::zeros(IxDyn(&[0])); assert_eq!(a, b); } #[test] -fn test_map_axis() -{ +fn test_map_axis() { let a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); let b = a.map_axis(Axis(0), |view| view.sum()); @@ -2303,8 +2185,7 @@ fn test_map_axis() } #[test] -fn test_accumulate_axis_inplace_noop() -{ +fn test_accumulate_axis_inplace_noop() { let mut a = Array2::::zeros((0, 3)); a.accumulate_axis_inplace(Axis(0), |&prev, curr| *curr += prev); assert_eq!(a, Array2::zeros((0, 3))); @@ -2346,8 +2227,7 @@ fn test_accumulate_axis_inplace_nonstandard_layout() { } #[test] -fn test_to_vec() -{ +fn test_to_vec() { let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); a.slice_collapse(s![..;-1, ..]); @@ -2358,8 +2238,7 @@ fn test_to_vec() } #[test] -fn test_array_clone_unalias() -{ +fn test_array_clone_unalias() { let a = Array::::zeros((3, 3)); let mut b = a.clone(); b.fill(1); @@ -2368,8 +2247,7 @@ fn test_array_clone_unalias() } #[test] -fn test_array_clone_same_view() -{ +fn test_array_clone_same_view() { let mut a = Array::from_iter(0..9) .into_shape_with_order((3, 3)) .unwrap(); @@ -2379,8 +2257,7 @@ fn test_array_clone_same_view() } #[test] -fn test_array2_from_diag() -{ +fn test_array2_from_diag() { let diag = arr1(&[0, 1, 2]); let x = Array2::from_diag(&diag); let x_exp = arr2(&[[0, 0, 0], [0, 1, 0], [0, 0, 2]]); @@ -2394,8 +2271,7 @@ fn test_array2_from_diag() } #[test] -fn array_macros() -{ +fn array_macros() { // array let a1 = array![1, 2, 3]; assert_eq!(a1, arr1(&[1, 2, 3])); @@ -2423,8 +2299,7 @@ fn array_macros() } #[cfg(test)] -mod as_standard_layout_tests -{ +mod as_standard_layout_tests { use super::*; use ndarray::Data; use std::fmt::Debug; @@ -2443,8 +2318,7 @@ mod as_standard_layout_tests } #[test] - fn test_f_layout() - { + fn test_f_layout() { let shape = (2, 2).f(); let arr = Array::::from_shape_vec(shape, vec![1, 2, 3, 4]).unwrap(); assert!(!arr.is_standard_layout()); @@ -2452,16 +2326,14 @@ mod as_standard_layout_tests } #[test] - fn test_c_layout() - { + fn test_c_layout() { let arr = Array::::from_shape_vec((2, 2), vec![1, 2, 3, 4]).unwrap(); assert!(arr.is_standard_layout()); test_as_standard_layout_for(arr); } #[test] - fn test_f_layout_view() - { + fn test_f_layout_view() { let shape = (2, 2).f(); let arr = Array::::from_shape_vec(shape, vec![1, 2, 3, 4]).unwrap(); let arr_view = arr.view(); @@ -2470,8 +2342,7 @@ mod as_standard_layout_tests } #[test] - fn test_c_layout_view() - { + fn test_c_layout_view() { let arr = Array::::from_shape_vec((2, 2), vec![1, 2, 3, 4]).unwrap(); let arr_view = arr.view(); assert!(arr_view.is_standard_layout()); @@ -2479,16 +2350,14 @@ mod as_standard_layout_tests } #[test] - fn test_zero_dimensional_array() - { + fn test_zero_dimensional_array() { let arr_view = ArrayView1::::from(&[]); assert!(arr_view.is_standard_layout()); test_as_standard_layout_for(arr_view); } #[test] - fn test_custom_layout() - { + fn test_custom_layout() { let shape = (1, 2, 3, 2).strides((12, 1, 2, 6)); let arr_data: Vec = (0..12).collect(); let arr = Array::::from_shape_vec(shape, arr_data).unwrap(); @@ -2498,13 +2367,11 @@ mod as_standard_layout_tests } #[cfg(test)] -mod array_cow_tests -{ +mod array_cow_tests { use super::*; #[test] - fn test_is_variant() - { + fn test_is_variant() { let arr: Array = array![[1, 2], [3, 4]]; let arr_cow = CowArray::::from(arr.view()); assert!(arr_cow.is_view()); @@ -2514,8 +2381,7 @@ mod array_cow_tests assert!(!arr_cow.is_view()); } - fn run_with_various_layouts(mut f: impl FnMut(Array2)) - { + fn run_with_various_layouts(mut f: impl FnMut(Array2)) { for all in vec![ Array2::from_shape_vec((7, 8), (0..7 * 8).collect()).unwrap(), Array2::from_shape_vec((7, 8).f(), (0..7 * 8).collect()).unwrap(), @@ -2533,8 +2399,7 @@ mod array_cow_tests } #[test] - fn test_element_mutation() - { + fn test_element_mutation() { run_with_various_layouts(|arr: Array2| { let mut expected = arr.clone(); expected[(1, 1)] = 2; @@ -2554,8 +2419,7 @@ mod array_cow_tests } #[test] - fn test_clone() - { + fn test_clone() { run_with_various_layouts(|arr: Array2| { let arr_cow = CowArray::::from(arr.view()); let arr_cow_clone = arr_cow.clone(); @@ -2574,10 +2438,8 @@ mod array_cow_tests } #[test] - fn test_clone_from() - { - fn assert_eq_contents_and_layout(arr1: &CowArray<'_, i32, Ix2>, arr2: &CowArray<'_, i32, Ix2>) - { + fn test_clone_from() { + fn assert_eq_contents_and_layout(arr1: &CowArray<'_, i32, Ix2>, arr2: &CowArray<'_, i32, Ix2>) { assert_eq!(arr1, arr2); assert_eq!(arr1.dim(), arr2.dim()); assert_eq!(arr1.strides(), arr2.strides()); @@ -2613,8 +2475,7 @@ mod array_cow_tests } #[test] - fn test_into_owned() - { + fn test_into_owned() { run_with_various_layouts(|arr: Array2| { let before = CowArray::::from(arr.view()); let after = before.into_owned(); @@ -2630,8 +2491,7 @@ mod array_cow_tests } #[test] -fn test_remove_index() -{ +fn test_remove_index() { let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); a.remove_index(Axis(0), 1); a.remove_index(Axis(1), 2); @@ -2668,16 +2528,14 @@ fn test_remove_index() #[should_panic(expected = "must be less")] #[test] -fn test_remove_index_oob1() -{ +fn test_remove_index_oob1() { let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); a.remove_index(Axis(0), 4); } #[should_panic(expected = "must be less")] #[test] -fn test_remove_index_oob2() -{ +fn test_remove_index_oob2() { let mut a = array![[10], [4], [1]]; a.remove_index(Axis(1), 0); assert_eq!(a.shape(), &[3, 0]); @@ -2694,15 +2552,13 @@ fn test_remove_index_oob2() #[should_panic(expected = "index out of bounds")] #[test] -fn test_remove_index_oob3() -{ +fn test_remove_index_oob3() { let mut a = array![[10], [4], [1]]; a.remove_index(Axis(2), 0); } #[test] -fn test_split_complex_view() -{ +fn test_split_complex_view() { let a = Array3::from_shape_fn((3, 4, 5), |(i, j, k)| Complex::::new(i as f32 * j as f32, k as f32)); let Complex { re, im } = a.view().split_complex(); assert_relative_eq!(re.sum(), 90.); @@ -2710,8 +2566,7 @@ fn test_split_complex_view() } #[test] -fn test_split_complex_view_roundtrip() -{ +fn test_split_complex_view_roundtrip() { let a_re = Array3::from_shape_fn((3, 1, 5), |(i, j, _k)| i * j); let a_im = Array3::from_shape_fn((3, 1, 5), |(_i, _j, k)| k); let a = Array3::from_shape_fn((3, 1, 5), |(i, j, k)| Complex::new(a_re[[i, j, k]], a_im[[i, j, k]])); @@ -2721,8 +2576,7 @@ fn test_split_complex_view_roundtrip() } #[test] -fn test_split_complex_view_mut() -{ +fn test_split_complex_view_mut() { let eye_scalar = Array2::::eye(4); let eye_complex = Array2::>::eye(4); let mut a = Array2::>::zeros((4, 4)); @@ -2733,8 +2587,7 @@ fn test_split_complex_view_mut() } #[test] -fn test_split_complex_zerod() -{ +fn test_split_complex_zerod() { let mut a = Array0::from_elem((), Complex::new(42, 32)); let Complex { re, im } = a.view().split_complex(); assert_eq!(re.get(()), Some(&42)); @@ -2745,8 +2598,7 @@ fn test_split_complex_zerod() } #[test] -fn test_split_complex_permuted() -{ +fn test_split_complex_permuted() { let a = Array3::from_shape_fn((3, 4, 5), |(i, j, k)| Complex::new(i * k + j, k)); let permuted = a.view().permuted_axes([1, 0, 2]); let Complex { re, im } = permuted.split_complex(); @@ -2755,8 +2607,7 @@ fn test_split_complex_permuted() } #[test] -fn test_split_complex_invert_axis() -{ +fn test_split_complex_invert_axis() { let mut a = Array::from_shape_fn((2, 3, 2), |(i, j, k)| Complex::new(i as f64 + j as f64, i as f64 + k as f64)); a.invert_axis(Axis(1)); let cmplx = a.view().split_complex(); diff --git a/tests/assign.rs b/tests/assign.rs index 29a6b851a..8205828c2 100644 --- a/tests/assign.rs +++ b/tests/assign.rs @@ -3,8 +3,7 @@ use ndarray::prelude::*; use std::sync::atomic::{AtomicUsize, Ordering}; #[test] -fn assign() -{ +fn assign() { let mut a = arr2(&[[1., 2.], [3., 4.]]); let b = arr2(&[[1., 3.], [2., 4.]]); a.assign(&b); @@ -29,8 +28,7 @@ fn assign() } #[test] -fn assign_to() -{ +fn assign_to() { let mut a = arr2(&[[1., 2.], [3., 4.]]); let b = arr2(&[[0., 3.], [2., 0.]]); b.assign_to(&mut a); @@ -38,8 +36,7 @@ fn assign_to() } #[test] -fn move_into_copy() -{ +fn move_into_copy() { let a = arr2(&[[1., 2.], [3., 4.]]); let acopy = a.clone(); let mut b = Array::uninit(a.dim()); @@ -56,8 +53,7 @@ fn move_into_copy() } #[test] -fn move_into_owned() -{ +fn move_into_owned() { // Test various memory layouts and holes while moving String elements. for &use_f_order in &[false, true] { for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { @@ -87,8 +83,7 @@ fn move_into_owned() } #[test] -fn move_into_slicing() -{ +fn move_into_slicing() { // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes). for &use_f_order in &[false, true] { for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { @@ -122,8 +117,7 @@ fn move_into_slicing() } #[test] -fn move_into_diag() -{ +fn move_into_diag() { // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes). for &use_f_order in &[false, true] { let counter = DropCounter::default(); @@ -148,8 +142,7 @@ fn move_into_diag() } #[test] -fn move_into_0dim() -{ +fn move_into_0dim() { // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes). for &use_f_order in &[false, true] { let counter = DropCounter::default(); @@ -176,8 +169,7 @@ fn move_into_0dim() } #[test] -fn move_into_empty() -{ +fn move_into_empty() { // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes). for &use_f_order in &[false, true] { let counter = DropCounter::default(); @@ -203,8 +195,7 @@ fn move_into_empty() } #[test] -fn move_into() -{ +fn move_into() { // Test various memory layouts and holes while moving String elements with move_into for &use_f_order in &[false, true] { for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { @@ -235,34 +226,28 @@ fn move_into() /// This counter can create elements, and then count and verify /// the number of which have actually been dropped again. #[derive(Default)] -struct DropCounter -{ +struct DropCounter { created: AtomicUsize, dropped: AtomicUsize, } struct Element<'a>(&'a AtomicUsize); -impl DropCounter -{ - fn created(&self) -> usize - { +impl DropCounter { + fn created(&self) -> usize { self.created.load(Ordering::Relaxed) } - fn dropped(&self) -> usize - { + fn dropped(&self) -> usize { self.dropped.load(Ordering::Relaxed) } - fn element(&self) -> Element<'_> - { + fn element(&self) -> Element<'_> { self.created.fetch_add(1, Ordering::Relaxed); Element(&self.dropped) } - fn assert_drop_count(&self) - { + fn assert_drop_count(&self) { assert_eq!( self.created(), self.dropped(), @@ -273,10 +258,8 @@ impl DropCounter } } -impl<'a> Drop for Element<'a> -{ - fn drop(&mut self) - { +impl<'a> Drop for Element<'a> { + fn drop(&mut self) { self.0.fetch_add(1, Ordering::Relaxed); } } diff --git a/tests/azip.rs b/tests/azip.rs index a4bb6ffac..14a639ea0 100644 --- a/tests/azip.rs +++ b/tests/azip.rs @@ -11,8 +11,7 @@ use itertools::{assert_equal, cloned}; use std::mem::swap; #[test] -fn test_azip1() -{ +fn test_azip1() { let mut a = Array::zeros(62); let mut x = 0; azip!((a in &mut a) { *a = x; x += 1; }); @@ -20,8 +19,7 @@ fn test_azip1() } #[test] -fn test_azip2() -{ +fn test_azip2() { let mut a = Array::zeros((5, 7)); let b = Array::from_shape_fn(a.dim(), |(i, j)| 1. / (i + 2 * j) as f32); azip!((a in &mut a, &b in &b) *a = b); @@ -29,8 +27,7 @@ fn test_azip2() } #[test] -fn test_azip2_1() -{ +fn test_azip2_1() { let mut a = Array::zeros((5, 7)); let b = Array::from_shape_fn((5, 10), |(i, j)| 1. / (i + 2 * j) as f32); let b = b.slice(s![..;-1, 3..]); @@ -39,8 +36,7 @@ fn test_azip2_1() } #[test] -fn test_azip2_3() -{ +fn test_azip2_3() { let mut b = Array::from_shape_fn((5, 10), |(i, j)| 1. / (i + 2 * j) as f32); let mut c = Array::from_shape_fn((5, 10), |(i, j)| f32::exp((i + j) as f32)); let a = b.clone(); @@ -51,8 +47,7 @@ fn test_azip2_3() #[test] #[cfg(feature = "approx")] -fn test_zip_collect() -{ +fn test_zip_collect() { use approx::assert_abs_diff_eq; // test Zip::map_collect and that it preserves c/f layout. @@ -80,8 +75,7 @@ fn test_zip_collect() #[test] #[cfg(feature = "approx")] -fn test_zip_assign_into() -{ +fn test_zip_assign_into() { use approx::assert_abs_diff_eq; let mut a = Array::::zeros((5, 10)); @@ -95,8 +89,7 @@ fn test_zip_assign_into() #[test] #[cfg(feature = "approx")] -fn test_zip_assign_into_cell() -{ +fn test_zip_assign_into_cell() { use approx::assert_abs_diff_eq; use std::cell::Cell; @@ -111,40 +104,33 @@ fn test_zip_assign_into_cell() } #[test] -fn test_zip_collect_drop() -{ +fn test_zip_collect_drop() { use std::cell::RefCell; use std::panic; struct Recorddrop<'a>((usize, usize), &'a RefCell>); - impl<'a> Drop for Recorddrop<'a> - { - fn drop(&mut self) - { + impl<'a> Drop for Recorddrop<'a> { + fn drop(&mut self) { self.1.borrow_mut().push(self.0); } } #[derive(Copy, Clone)] - enum Config - { + enum Config { CC, CF, FF, } - impl Config - { - fn a_is_f(self) -> bool - { + impl Config { + fn a_is_f(self) -> bool { match self { Config::CC | Config::CF => false, _ => true, } } - fn b_is_f(self) -> bool - { + fn b_is_f(self) -> bool { match self { Config::CC => false, _ => true, @@ -190,8 +176,7 @@ fn test_zip_collect_drop() } #[test] -fn test_azip_syntax_trailing_comma() -{ +fn test_azip_syntax_trailing_comma() { let mut b = Array::::zeros((5, 5)); let mut c = Array::::ones((5, 5)); let a = b.clone(); @@ -202,8 +187,7 @@ fn test_azip_syntax_trailing_comma() #[test] #[cfg(feature = "approx")] -fn test_azip2_sum() -{ +fn test_azip2_sum() { use approx::assert_abs_diff_eq; let c = Array::from_shape_fn((5, 10), |(i, j)| f32::exp((i + j) as f32)); @@ -217,8 +201,7 @@ fn test_azip2_sum() #[test] #[cfg(feature = "approx")] -fn test_azip3_slices() -{ +fn test_azip3_slices() { use approx::assert_abs_diff_eq; let mut a = [0.; 32]; @@ -238,8 +221,7 @@ fn test_azip3_slices() #[test] #[cfg(feature = "approx")] -fn test_broadcast() -{ +fn test_broadcast() { use approx::assert_abs_diff_eq; let n = 16; @@ -264,8 +246,7 @@ fn test_broadcast() #[should_panic] #[test] -fn test_zip_dim_mismatch_1() -{ +fn test_zip_dim_mismatch_1() { let mut a = Array::zeros((5, 7)); let mut d = a.raw_dim(); d[0] += 1; @@ -277,8 +258,7 @@ fn test_zip_dim_mismatch_1() // Zip::from(A).and(B) // where A is F-contiguous and B contiguous but neither F nor C contiguous. #[test] -fn test_contiguous_but_not_c_or_f() -{ +fn test_contiguous_but_not_c_or_f() { let a = Array::from_iter(0..27) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -304,8 +284,7 @@ fn test_contiguous_but_not_c_or_f() } #[test] -fn test_clone() -{ +fn test_clone() { let a = Array::from_iter(0..27) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -324,8 +303,7 @@ fn test_clone() } #[test] -fn test_indices_0() -{ +fn test_indices_0() { let a1 = arr0(3); let mut count = 0; @@ -338,8 +316,7 @@ fn test_indices_0() } #[test] -fn test_indices_1() -{ +fn test_indices_1() { let mut a1 = Array::default(12); for (i, elt) in a1.indexed_iter_mut() { *elt = i; @@ -369,8 +346,7 @@ fn test_indices_1() } #[test] -fn test_indices_2() -{ +fn test_indices_2() { let mut a1 = Array::default((10, 12)); for (i, elt) in a1.indexed_iter_mut() { *elt = i; @@ -400,8 +376,7 @@ fn test_indices_2() } #[test] -fn test_indices_3() -{ +fn test_indices_3() { let mut a1 = Array::default((4, 5, 6)); for (i, elt) in a1.indexed_iter_mut() { *elt = i; @@ -431,8 +406,7 @@ fn test_indices_3() } #[test] -fn test_indices_split_1() -{ +fn test_indices_split_1() { for m in (0..4).chain(10..12) { for n in (0..4).chain(10..12) { let a1 = Array::::default((m, n)); @@ -464,8 +438,7 @@ fn test_indices_split_1() } #[test] -fn test_zip_all() -{ +fn test_zip_all() { let a = Array::::zeros(62); let b = Array::::ones(62); let mut c = Array::::ones(62); @@ -476,8 +449,7 @@ fn test_zip_all() } #[test] -fn test_zip_all_empty_array() -{ +fn test_zip_all_empty_array() { let a = Array::::zeros(0); let b = Array::::ones(0); assert_eq!(true, Zip::from(&a).and(&b).all(|&_x, &_y| true)); diff --git a/tests/broadcast.rs b/tests/broadcast.rs index 288ccb38a..82047db60 100644 --- a/tests/broadcast.rs +++ b/tests/broadcast.rs @@ -2,8 +2,7 @@ use ndarray::prelude::*; #[test] #[cfg(feature = "std")] -fn broadcast_1() -{ +fn broadcast_1() { let a_dim = Dim([2, 4, 2, 2]); let b_dim = Dim([2, 1, 2, 1]); let a = ArcArray::linspace(0., 1., a_dim.size()) @@ -35,8 +34,7 @@ fn broadcast_1() #[test] #[cfg(feature = "std")] -fn test_add() -{ +fn test_add() { let a_dim = Dim([2, 4, 2, 2]); let b_dim = Dim([2, 1, 2, 1]); let mut a = ArcArray::linspace(0.0, 1., a_dim.size()) @@ -53,8 +51,7 @@ fn test_add() #[test] #[should_panic] #[cfg(feature = "std")] -fn test_add_incompat() -{ +fn test_add_incompat() { let a_dim = Dim([2, 4, 2, 2]); let mut a = ArcArray::linspace(0.0, 1., a_dim.size()) .into_shape_with_order(a_dim) @@ -64,8 +61,7 @@ fn test_add_incompat() } #[test] -fn test_broadcast() -{ +fn test_broadcast() { let (_, n, k) = (16, 16, 16); let x1 = 1.; // b0 broadcast 1 -> n, k @@ -85,8 +81,7 @@ fn test_broadcast() } #[test] -fn test_broadcast_1d() -{ +fn test_broadcast_1d() { let n = 16; let x1 = 1.; // b0 broadcast 1 -> n diff --git a/tests/clone.rs b/tests/clone.rs index 4a7e50b8e..e1914ba7f 100644 --- a/tests/clone.rs +++ b/tests/clone.rs @@ -1,8 +1,7 @@ use ndarray::arr2; #[test] -fn test_clone_from() -{ +fn test_clone_from() { let a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9]]); let b = arr2(&[[7, 7, 7]]); let mut c = b.clone(); diff --git a/tests/complex.rs b/tests/complex.rs index 824e296a4..1b52b2671 100644 --- a/tests/complex.rs +++ b/tests/complex.rs @@ -3,14 +3,12 @@ use ndarray::{arr1, arr2, Axis}; use num_complex::Complex; use num_traits::Num; -fn c(re: T, im: T) -> Complex -{ +fn c(re: T, im: T) -> Complex { Complex::new(re, im) } #[test] -fn complex_mat_mul() -{ +fn complex_mat_mul() { let a = arr2(&[[c(3., 4.), c(2., 0.)], [c(0., -2.), c(3., 0.)]]); let b = (&a * c(3., 0.)).map(|c| 5. * c / c.norm_sqr()); println!("{:>8.2}", b); diff --git a/tests/dimension.rs b/tests/dimension.rs index 6a9207e4c..2500164a0 100644 --- a/tests/dimension.rs +++ b/tests/dimension.rs @@ -7,8 +7,7 @@ use ndarray::{arr2, ArcArray, Array, Axis, Dim, Dimension, IxDyn, RemoveAxis}; use std::hash::{Hash, Hasher}; #[test] -fn insert_axis() -{ +fn insert_axis() { assert_eq!(Dim([]).insert_axis(Axis(0)), Dim([1])); assert_eq!(Dim([3]).insert_axis(Axis(0)), Dim([1, 3])); @@ -42,8 +41,7 @@ fn insert_axis() } #[test] -fn remove_axis() -{ +fn remove_axis() { assert_eq!(Dim([3]).remove_axis(Axis(0)), Dim([])); assert_eq!(Dim([1, 2]).remove_axis(Axis(0)), Dim([2])); assert_eq!(Dim([4, 5, 6]).remove_axis(Axis(1)), Dim([4, 6])); @@ -65,8 +63,7 @@ fn remove_axis() #[test] #[allow(clippy::eq_op)] -fn dyn_dimension() -{ +fn dyn_dimension() { let a = arr2(&[[1., 2.], [3., 4.0]]) .into_shape_with_order(vec![2, 2]) .unwrap(); @@ -82,8 +79,7 @@ fn dyn_dimension() } #[test] -fn dyn_insert() -{ +fn dyn_insert() { let mut v = vec![2, 3, 4, 5]; let mut dim = Dim(v.clone()); defmac!(test_insert index => { @@ -102,8 +98,7 @@ fn dyn_insert() } #[test] -fn dyn_remove() -{ +fn dyn_remove() { let mut v = vec![1, 2, 3, 4, 5, 6, 7]; let mut dim = Dim(v.clone()); defmac!(test_remove index => { @@ -122,8 +117,7 @@ fn dyn_remove() } #[test] -fn fastest_varying_order() -{ +fn fastest_varying_order() { let strides = Dim([2, 8, 4, 1]); let order = strides._fastest_varying_stride_order(); assert_eq!(order.slice(), &[3, 0, 2, 1]); @@ -196,8 +190,7 @@ fn min_stride_axis() { */ #[test] -fn max_stride_axis() -{ +fn max_stride_axis() { let a = ArrayF32::zeros(10); assert_eq!(a.max_stride_axis(), Axis(0)); @@ -224,8 +217,7 @@ fn max_stride_axis() } #[test] -fn test_indexing() -{ +fn test_indexing() { let mut x = Dim([1, 2]); assert_eq!(x[0], 1); @@ -236,8 +228,7 @@ fn test_indexing() } #[test] -fn test_operations() -{ +fn test_operations() { let mut x = Dim([1, 2]); let mut y = Dim([1, 1]); @@ -254,10 +245,8 @@ fn test_operations() #[test] #[allow(clippy::cognitive_complexity)] -fn test_hash() -{ - fn calc_hash(value: &T) -> u64 - { +fn test_hash() { + fn calc_hash(value: &T) -> u64 { let mut hasher = std::collections::hash_map::DefaultHasher::new(); value.hash(&mut hasher); hasher.finish() @@ -292,10 +281,8 @@ fn test_hash() } #[test] -fn test_generic_operations() -{ - fn test_dim(d: &D) - { +fn test_generic_operations() { + fn test_dim(d: &D) { let mut x = d.clone(); x[0] += 1; assert_eq!(x[0], 3); @@ -309,10 +296,8 @@ fn test_generic_operations() } #[test] -fn test_array_view() -{ - fn test_dim(d: &D) - { +fn test_array_view() { + fn test_dim(d: &D) { assert_eq!(d.as_array_view().sum(), 7); assert_eq!(d.as_array_view().strides(), &[1]); } @@ -325,8 +310,7 @@ fn test_array_view() #[test] #[cfg(feature = "std")] #[allow(clippy::cognitive_complexity)] -fn test_all_ndindex() -{ +fn test_all_ndindex() { use ndarray::IntoDimension; macro_rules! ndindex { ($($i:expr),*) => { diff --git a/tests/format.rs b/tests/format.rs index 35909871f..4b21fe39d 100644 --- a/tests/format.rs +++ b/tests/format.rs @@ -2,8 +2,7 @@ use ndarray::prelude::*; use ndarray::rcarr1; #[test] -fn formatting() -{ +fn formatting() { let a = rcarr1::(&[1., 2., 3., 4.]); assert_eq!(format!("{}", a), "[1, 2, 3, 4]"); assert_eq!(format!("{:4}", a), "[ 1, 2, 3, 4]"); @@ -56,8 +55,7 @@ fn formatting() } #[test] -fn debug_format() -{ +fn debug_format() { let a = Array2::::zeros((3, 4)); assert_eq!( format!("{:?}", a), diff --git a/tests/higher_order_f.rs b/tests/higher_order_f.rs index 72245412f..c567eb3e0 100644 --- a/tests/higher_order_f.rs +++ b/tests/higher_order_f.rs @@ -2,8 +2,7 @@ use ndarray::prelude::*; #[test] #[should_panic] -fn test_fold_axis_oob() -{ +fn test_fold_axis_oob() { let a = arr2(&[[1., 2.], [3., 4.]]); a.fold_axis(Axis(2), 0., |x, y| x + y); } diff --git a/tests/indices.rs b/tests/indices.rs index a9414f9a7..ca6ca9887 100644 --- a/tests/indices.rs +++ b/tests/indices.rs @@ -3,8 +3,7 @@ use ndarray::prelude::*; use ndarray::Order; #[test] -fn test_ixdyn_index_iterate() -{ +fn test_ixdyn_index_iterate() { for &order in &[Order::C, Order::F] { let mut a = Array::zeros((2, 3, 4).set_f(order.is_column_major())); let dim = a.shape().to_vec(); diff --git a/tests/into-ixdyn.rs b/tests/into-ixdyn.rs index 6e7bf9607..ef5b75be4 100644 --- a/tests/into-ixdyn.rs +++ b/tests/into-ixdyn.rs @@ -6,14 +6,12 @@ use ndarray::prelude::*; #[test] -fn test_arr0_into_dyn() -{ +fn test_arr0_into_dyn() { assert!(arr0(1.234).into_dyn()[IxDyn(&[])] == 1.234); } #[test] -fn test_arr2_into_arrd_nonstandard_strides() -{ +fn test_arr2_into_arrd_nonstandard_strides() { let arr = Array2::from_shape_fn((12, 34).f(), |(i, j)| i * 34 + j).into_dyn(); let brr = ArrayD::from_shape_fn(vec![12, 34], |d| d[0] * 34 + d[1]); diff --git a/tests/iterator_chunks.rs b/tests/iterator_chunks.rs index 79b5403ef..04ec9cc42 100644 --- a/tests/iterator_chunks.rs +++ b/tests/iterator_chunks.rs @@ -7,8 +7,7 @@ use ndarray::prelude::*; #[test] #[cfg(feature = "std")] -fn chunks() -{ +fn chunks() { use ndarray::NdProducer; let a = >::linspace(1., 100., 10 * 10) .into_shape_with_order((10, 10)) @@ -47,15 +46,13 @@ fn chunks() #[should_panic] #[test] -fn chunks_different_size_1() -{ +fn chunks_different_size_1() { let a = Array::::zeros(vec![2, 3]); a.exact_chunks(vec![2]); } #[test] -fn chunks_ok_size() -{ +fn chunks_ok_size() { let mut a = Array::::zeros(vec![2, 3]); a.fill(1.); let mut c = 0; @@ -69,15 +66,13 @@ fn chunks_ok_size() #[should_panic] #[test] -fn chunks_different_size_2() -{ +fn chunks_different_size_2() { let a = Array::::zeros(vec![2, 3]); a.exact_chunks(vec![2, 3, 4]); } #[test] -fn chunks_mut() -{ +fn chunks_mut() { let mut a = Array::zeros((7, 8)); for (i, mut chunk) in a.exact_chunks_mut((2, 3)).into_iter().enumerate() { chunk.fill(i); @@ -97,8 +92,7 @@ fn chunks_mut() #[should_panic] #[test] -fn chunks_different_size_3() -{ +fn chunks_different_size_3() { let mut a = Array::::zeros(vec![2, 3]); a.exact_chunks_mut(vec![2, 3, 4]); } diff --git a/tests/iterators.rs b/tests/iterators.rs index 23175fd40..d8e6cb4a6 100644 --- a/tests/iterators.rs +++ b/tests/iterators.rs @@ -25,8 +25,7 @@ macro_rules! assert_panics { #[test] #[cfg(feature = "std")] -fn double_ended() -{ +fn double_ended() { let a = ArcArray::linspace(0., 7., 8); let mut it = a.iter().cloned(); assert_eq!(it.next(), Some(0.)); @@ -38,8 +37,7 @@ fn double_ended() } #[test] -fn double_ended_rows() -{ +fn double_ended_rows() { let a = ArcArray::from_iter(0..8).into_shape_clone((4, 2)).unwrap(); let mut row_it = a.rows().into_iter(); assert_equal(row_it.next_back().unwrap(), &[6, 7]); @@ -60,8 +58,7 @@ fn double_ended_rows() } #[test] -fn iter_size_hint() -{ +fn iter_size_hint() { // Check that the size hint is correctly computed let a = ArcArray::from_iter(0..24) .into_shape_with_order((2, 3, 4)) @@ -82,8 +79,7 @@ fn iter_size_hint() #[test] #[cfg(feature = "std")] -fn indexed() -{ +fn indexed() { let a = ArcArray::linspace(0., 7., 8); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt as usize); @@ -103,8 +99,7 @@ fn indexed() #[test] #[cfg(feature = "std")] -fn as_slice() -{ +fn as_slice() { use ndarray::Data; fn assert_slice_correct(v: &ArrayBase) @@ -161,8 +156,7 @@ fn as_slice() } #[test] -fn inner_iter() -{ +fn inner_iter() { let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -193,8 +187,7 @@ fn inner_iter() } #[test] -fn inner_iter_corner_cases() -{ +fn inner_iter_corner_cases() { let a0 = ArcArray::::zeros(()); assert_equal(a0.rows(), vec![aview1(&[0])]); @@ -206,8 +199,7 @@ fn inner_iter_corner_cases() } #[test] -fn inner_iter_size_hint() -{ +fn inner_iter_size_hint() { // Check that the size hint is correctly computed let a = ArcArray::from_iter(0..24) .into_shape_with_order((2, 3, 4)) @@ -224,8 +216,7 @@ fn inner_iter_size_hint() #[allow(deprecated)] // into_outer_iter #[test] -fn outer_iter() -{ +fn outer_iter() { let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -275,8 +266,7 @@ fn outer_iter() } #[test] -fn axis_iter() -{ +fn axis_iter() { let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -293,8 +283,7 @@ fn axis_iter() } #[test] -fn axis_iter_split_at() -{ +fn axis_iter_split_at() { let a = Array::from_iter(0..5); let iter = a.axis_iter(Axis(0)); let all: Vec<_> = iter.clone().collect(); @@ -306,8 +295,7 @@ fn axis_iter_split_at() } #[test] -fn axis_iter_split_at_partially_consumed() -{ +fn axis_iter_split_at_partially_consumed() { let a = Array::from_iter(0..5); let mut iter = a.axis_iter(Axis(0)); while iter.next().is_some() { @@ -321,8 +309,7 @@ fn axis_iter_split_at_partially_consumed() } #[test] -fn axis_iter_zip() -{ +fn axis_iter_zip() { let a = Array::from_iter(0..5); let iter = a.axis_iter(Axis(0)); let mut b = Array::zeros(5); @@ -331,8 +318,7 @@ fn axis_iter_zip() } #[test] -fn axis_iter_zip_partially_consumed() -{ +fn axis_iter_zip_partially_consumed() { let a = Array::from_iter(0..5); let mut iter = a.axis_iter(Axis(0)); let mut consumed = 0; @@ -347,8 +333,7 @@ fn axis_iter_zip_partially_consumed() } #[test] -fn axis_iter_zip_partially_consumed_discontiguous() -{ +fn axis_iter_zip_partially_consumed_discontiguous() { let a = Array::from_iter(0..5); let mut iter = a.axis_iter(Axis(0)); let mut consumed = 0; @@ -364,8 +349,7 @@ fn axis_iter_zip_partially_consumed_discontiguous() } #[test] -fn outer_iter_corner_cases() -{ +fn outer_iter_corner_cases() { let a2 = ArcArray::::zeros((0, 3)); assert_equal(a2.outer_iter(), vec![aview1(&[]); 0]); @@ -375,8 +359,7 @@ fn outer_iter_corner_cases() #[allow(deprecated)] #[test] -fn outer_iter_mut() -{ +fn outer_iter_mut() { let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -400,8 +383,7 @@ fn outer_iter_mut() } #[test] -fn axis_iter_mut() -{ +fn axis_iter_mut() { let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -421,8 +403,7 @@ fn axis_iter_mut() } #[test] -fn axis_chunks_iter() -{ +fn axis_chunks_iter() { let a = ArcArray::from_iter(0..24); let a = a.into_shape_with_order((2, 6, 2)).unwrap(); @@ -460,8 +441,7 @@ fn axis_chunks_iter() } #[test] -fn axis_iter_mut_split_at() -{ +fn axis_iter_mut_split_at() { let mut a = Array::from_iter(0..5); let mut a_clone = a.clone(); let all: Vec<_> = a_clone.axis_iter_mut(Axis(0)).collect(); @@ -473,8 +453,7 @@ fn axis_iter_mut_split_at() } #[test] -fn axis_iter_mut_split_at_partially_consumed() -{ +fn axis_iter_mut_split_at_partially_consumed() { let mut a = Array::from_iter(0..5); for consumed in 1..=a.len() { for mid in 0..=(a.len() - consumed) { @@ -500,8 +479,7 @@ fn axis_iter_mut_split_at_partially_consumed() } #[test] -fn axis_iter_mut_zip() -{ +fn axis_iter_mut_zip() { let orig = Array::from_iter(0..5); let mut cloned = orig.clone(); let iter = cloned.axis_iter_mut(Axis(0)); @@ -515,8 +493,7 @@ fn axis_iter_mut_zip() } #[test] -fn axis_iter_mut_zip_partially_consumed() -{ +fn axis_iter_mut_zip_partially_consumed() { let mut a = Array::from_iter(0..5); for consumed in 1..=a.len() { let remaining = a.len() - consumed; @@ -531,8 +508,7 @@ fn axis_iter_mut_zip_partially_consumed() } #[test] -fn axis_iter_mut_zip_partially_consumed_discontiguous() -{ +fn axis_iter_mut_zip_partially_consumed_discontiguous() { let mut a = Array::from_iter(0..5); for consumed in 1..=a.len() { let remaining = a.len() - consumed; @@ -549,8 +525,7 @@ fn axis_iter_mut_zip_partially_consumed_discontiguous() #[test] #[cfg(feature = "std")] -fn axis_chunks_iter_corner_cases() -{ +fn axis_chunks_iter_corner_cases() { // examples provided by @bluss in PR #65 // these tests highlight corner cases of the axis_chunks_iter implementation // and enable checking if no pointer offsetting is out of bounds. However @@ -581,8 +556,7 @@ fn axis_chunks_iter_corner_cases() } #[test] -fn axis_chunks_iter_zero_stride() -{ +fn axis_chunks_iter_zero_stride() { { // stride 0 case let b = Array::from(vec![0f32; 0]) @@ -618,22 +592,19 @@ fn axis_chunks_iter_zero_stride() #[should_panic] #[test] -fn axis_chunks_iter_zero_chunk_size() -{ +fn axis_chunks_iter_zero_chunk_size() { let a = Array::from_iter(0..5); a.axis_chunks_iter(Axis(0), 0); } #[test] -fn axis_chunks_iter_zero_axis_len() -{ +fn axis_chunks_iter_zero_axis_len() { let a = Array::from_iter(0..0); assert!(a.axis_chunks_iter(Axis(0), 5).next().is_none()); } #[test] -fn axis_chunks_iter_split_at() -{ +fn axis_chunks_iter_split_at() { let mut a = Array2::::zeros((11, 3)); a.iter_mut().enumerate().for_each(|(i, elt)| *elt = i); for source in &[ @@ -660,8 +631,7 @@ fn axis_chunks_iter_split_at() } #[test] -fn axis_chunks_iter_mut() -{ +fn axis_chunks_iter_mut() { let a = ArcArray::from_iter(0..24); let mut a = a.into_shape_with_order((2, 6, 2)).unwrap(); @@ -673,22 +643,19 @@ fn axis_chunks_iter_mut() #[should_panic] #[test] -fn axis_chunks_iter_mut_zero_chunk_size() -{ +fn axis_chunks_iter_mut_zero_chunk_size() { let mut a = Array::from_iter(0..5); a.axis_chunks_iter_mut(Axis(0), 0); } #[test] -fn axis_chunks_iter_mut_zero_axis_len() -{ +fn axis_chunks_iter_mut_zero_axis_len() { let mut a = Array::from_iter(0..0); assert!(a.axis_chunks_iter_mut(Axis(0), 5).next().is_none()); } #[test] -fn outer_iter_size_hint() -{ +fn outer_iter_size_hint() { // Check that the size hint is correctly computed let a = ArcArray::from_iter(0..24) .into_shape_with_order((4, 3, 2)) @@ -723,8 +690,7 @@ fn outer_iter_size_hint() } #[test] -fn outer_iter_split_at() -{ +fn outer_iter_split_at() { let a = ArcArray::from_iter(0..30) .into_shape_with_order((5, 3, 2)) .unwrap(); @@ -748,8 +714,7 @@ fn outer_iter_split_at() #[test] #[should_panic] -fn outer_iter_split_at_panics() -{ +fn outer_iter_split_at_panics() { let a = ArcArray::from_iter(0..30) .into_shape_with_order((5, 3, 2)) .unwrap(); @@ -759,8 +724,7 @@ fn outer_iter_split_at_panics() } #[test] -fn outer_iter_mut_split_at() -{ +fn outer_iter_mut_split_at() { let mut a = ArcArray::from_iter(0..30) .into_shape_with_order((5, 3, 2)) .unwrap(); @@ -782,8 +746,7 @@ fn outer_iter_mut_split_at() } #[test] -fn iterators_are_send_sync() -{ +fn iterators_are_send_sync() { // When the element type is Send + Sync, then the iterators and views // are too. fn _send_sync(_: &T) {} @@ -815,8 +778,7 @@ fn iterators_are_send_sync() #[test] #[allow(clippy::unnecessary_fold)] -fn test_fold() -{ +fn test_fold() { let mut a = Array2::::default((20, 20)); a += 1; let mut iter = a.iter(); @@ -829,8 +791,7 @@ fn test_fold() } #[test] -fn nth_back_examples() -{ +fn nth_back_examples() { let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); assert_eq!(a.iter().nth_back(0), Some(&a[a.len() - 1])); @@ -843,8 +804,7 @@ fn nth_back_examples() } #[test] -fn nth_back_zero_n() -{ +fn nth_back_zero_n() { let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter1 = a.iter(); @@ -856,8 +816,7 @@ fn nth_back_zero_n() } #[test] -fn nth_back_nonzero_n() -{ +fn nth_back_nonzero_n() { let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter1 = a.iter(); @@ -873,8 +832,7 @@ fn nth_back_nonzero_n() } #[test] -fn nth_back_past_end() -{ +fn nth_back_past_end() { let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter = a.iter(); @@ -883,8 +841,7 @@ fn nth_back_past_end() } #[test] -fn nth_back_partially_consumed() -{ +fn nth_back_partially_consumed() { let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter = a.iter(); @@ -902,8 +859,7 @@ fn nth_back_partially_consumed() } #[test] -fn test_rfold() -{ +fn test_rfold() { { let mut a = Array1::::default(256); a += 1; @@ -949,16 +905,14 @@ fn test_rfold() } #[test] -fn test_into_iter() -{ +fn test_into_iter() { let a = Array1::from(vec![1, 2, 3, 4]); let v = a.into_iter().collect::>(); assert_eq!(v, [1, 2, 3, 4]); } #[test] -fn test_into_iter_2d() -{ +fn test_into_iter_2d() { let a = Array1::from(vec![1, 2, 3, 4]) .into_shape_with_order((2, 2)) .unwrap(); @@ -974,8 +928,7 @@ fn test_into_iter_2d() } #[test] -fn test_into_iter_sliced() -{ +fn test_into_iter_sliced() { let (m, n) = (4, 5); let drops = Cell::new(0); @@ -1019,25 +972,20 @@ fn test_into_iter_sliced() /// /// Compares equal by its "represented value". #[derive(Clone, Debug)] -struct DropCount<'a> -{ +struct DropCount<'a> { value: i32, my_drops: usize, drops: &'a Cell, } -impl PartialEq for DropCount<'_> -{ - fn eq(&self, other: &Self) -> bool - { +impl PartialEq for DropCount<'_> { + fn eq(&self, other: &Self) -> bool { self.value == other.value } } -impl<'a> DropCount<'a> -{ - fn new(value: i32, drops: &'a Cell) -> Self - { +impl<'a> DropCount<'a> { + fn new(value: i32, drops: &'a Cell) -> Self { DropCount { value, my_drops: 0, @@ -1046,10 +994,8 @@ impl<'a> DropCount<'a> } } -impl Drop for DropCount<'_> -{ - fn drop(&mut self) - { +impl Drop for DropCount<'_> { + fn drop(&mut self) { assert_eq!(self.my_drops, 0); self.my_drops += 1; self.drops.set(self.drops.get() + 1); diff --git a/tests/ix0.rs b/tests/ix0.rs index f1038556a..714d499df 100644 --- a/tests/ix0.rs +++ b/tests/ix0.rs @@ -8,8 +8,7 @@ use ndarray::Ix0; use ndarray::ShapeBuilder; #[test] -fn test_ix0() -{ +fn test_ix0() { let mut a = Array::zeros(Ix0()); assert_eq!(a[()], 0.); a[()] = 1.; @@ -28,8 +27,7 @@ fn test_ix0() } #[test] -fn test_ix0_add() -{ +fn test_ix0_add() { let mut a = Array::zeros(Ix0()); a += 1.; assert_eq!(a[()], 1.); @@ -38,8 +36,7 @@ fn test_ix0_add() } #[test] -fn test_ix0_add_add() -{ +fn test_ix0_add_add() { let mut a = Array::zeros(Ix0()); a += 1.; let mut b = Array::zeros(Ix0()); @@ -49,8 +46,7 @@ fn test_ix0_add_add() } #[test] -fn test_ix0_add_broad() -{ +fn test_ix0_add_broad() { let mut b = Array::from(vec![5., 6.]); let mut a = Array::zeros(Ix0()); a += 1.; diff --git a/tests/ixdyn.rs b/tests/ixdyn.rs index 05f123ba1..ba85688cf 100644 --- a/tests/ixdyn.rs +++ b/tests/ixdyn.rs @@ -10,8 +10,7 @@ use ndarray::Order; use ndarray::ShapeBuilder; #[test] -fn test_ixdyn() -{ +fn test_ixdyn() { // check that we can use fixed size arrays for indexing let mut a = Array::zeros(vec![2, 3, 4]); a[[1, 1, 1]] = 1.; @@ -20,8 +19,7 @@ fn test_ixdyn() #[should_panic] #[test] -fn test_ixdyn_wrong_dim() -{ +fn test_ixdyn_wrong_dim() { // check that we can use but it panics at runtime, if number of axes is wrong let mut a = Array::zeros(vec![2, 3, 4]); a[[1, 1, 1]] = 1.; @@ -30,8 +28,7 @@ fn test_ixdyn_wrong_dim() } #[test] -fn test_ixdyn_out_of_bounds() -{ +fn test_ixdyn_out_of_bounds() { // check that we are out of bounds let a = Array::::zeros(vec![2, 3, 4]); let res = a.get([0, 3, 0]); @@ -39,8 +36,7 @@ fn test_ixdyn_out_of_bounds() } #[test] -fn test_ixdyn_iterate() -{ +fn test_ixdyn_iterate() { for &order in &[Order::C, Order::F] { let mut a = Array::zeros((2, 3, 4).set_f(order.is_column_major())); let dim = a.shape().to_vec(); @@ -60,8 +56,7 @@ fn test_ixdyn_iterate() } #[test] -fn test_ixdyn_index_iterate() -{ +fn test_ixdyn_index_iterate() { for &order in &[Order::C, Order::F] { let mut a = Array::zeros((2, 3, 4).set_f(order.is_column_major())); let dim = a.shape().to_vec(); @@ -80,8 +75,7 @@ fn test_ixdyn_index_iterate() } #[test] -fn test_ixdyn_uget() -{ +fn test_ixdyn_uget() { // check that we are out of bounds let mut a = Array::::zeros(vec![2, 3, 4]); @@ -110,8 +104,7 @@ fn test_ixdyn_uget() } #[test] -fn test_0() -{ +fn test_0() { let mut a = Array::zeros(vec![]); let z = vec![].into_dimension(); assert_eq!(a[z.clone()], 0.); @@ -131,8 +124,7 @@ fn test_0() } #[test] -fn test_0_add() -{ +fn test_0_add() { let mut a = Array::zeros(vec![]); a += 1.; assert_eq!(a[[]], 1.); @@ -141,8 +133,7 @@ fn test_0_add() } #[test] -fn test_0_add_add() -{ +fn test_0_add_add() { let mut a = Array::zeros(vec![]); a += 1.; let mut b = Array::zeros(vec![]); @@ -152,8 +143,7 @@ fn test_0_add_add() } #[test] -fn test_0_add_broad() -{ +fn test_0_add_broad() { let mut b = Array::from(vec![5., 6.]); let mut a = Array::zeros(vec![]); a += 1.; @@ -164,8 +154,7 @@ fn test_0_add_broad() #[test] #[cfg(feature = "std")] -fn test_into_dimension() -{ +fn test_into_dimension() { use ndarray::{Ix0, Ix1, Ix2, IxDyn}; let a = Array::linspace(0., 41., 6 * 7) diff --git a/tests/numeric.rs b/tests/numeric.rs index 4d70d4502..6f1c52dd0 100644 --- a/tests/numeric.rs +++ b/tests/numeric.rs @@ -8,22 +8,19 @@ use ndarray::{arr0, arr1, arr2, array, aview1, Array, Array1, Array2, Array3, Ax use std::f64; #[test] -fn test_mean_with_nan_values() -{ +fn test_mean_with_nan_values() { let a = array![f64::NAN, 1.]; assert!(a.mean().unwrap().is_nan()); } #[test] -fn test_mean_with_empty_array_of_floats() -{ +fn test_mean_with_empty_array_of_floats() { let a: Array1 = array![]; assert!(a.mean().is_none()); } #[test] -fn test_mean_with_array_of_floats() -{ +fn test_mean_with_array_of_floats() { let a: Array1 = array![ 0.99889651, 0.0150731, 0.28492482, 0.83819218, 0.48413156, 0.80710412, 0.41762936, 0.22879429, 0.43997224, 0.23831807, 0.02416466, 0.6269962, 0.47420614, 0.56275487, @@ -39,8 +36,7 @@ fn test_mean_with_array_of_floats() } #[test] -fn sum_mean() -{ +fn sum_mean() { let a: Array2 = arr2(&[[1., 2.], [3., 4.]]); assert_eq!(a.sum_axis(Axis(0)), arr1(&[4., 6.])); assert_eq!(a.sum_axis(Axis(1)), arr1(&[3., 7.])); @@ -52,8 +48,7 @@ fn sum_mean() } #[test] -fn sum_mean_empty() -{ +fn sum_mean_empty() { assert_eq!(Array3::::ones((2, 0, 3)).sum(), 0.); assert_eq!(Array1::::ones(0).sum_axis(Axis(0)), arr0(0.)); assert_eq!( @@ -68,8 +63,7 @@ fn sum_mean_empty() #[test] #[cfg(feature = "std")] -fn var() -{ +fn var() { let a = array![1., -4.32, 1.14, 0.32]; assert_abs_diff_eq!(a.var(0.), 5.049875, epsilon = 1e-8); } @@ -77,8 +71,7 @@ fn var() #[test] #[cfg(feature = "std")] #[should_panic] -fn var_negative_ddof() -{ +fn var_negative_ddof() { let a = array![1., 2., 3.]; a.var(-1.); } @@ -86,16 +79,14 @@ fn var_negative_ddof() #[test] #[cfg(feature = "std")] #[should_panic] -fn var_too_large_ddof() -{ +fn var_too_large_ddof() { let a = array![1., 2., 3.]; a.var(4.); } #[test] #[cfg(feature = "std")] -fn var_nan_ddof() -{ +fn var_nan_ddof() { let a = Array2::::zeros((2, 3)); let v = a.var(::std::f64::NAN); assert!(v.is_nan()); @@ -103,16 +94,14 @@ fn var_nan_ddof() #[test] #[cfg(feature = "std")] -fn var_empty_arr() -{ +fn var_empty_arr() { let a: Array1 = array![]; assert!(a.var(0.0).is_nan()); } #[test] #[cfg(feature = "std")] -fn std() -{ +fn std() { let a = array![1., -4.32, 1.14, 0.32]; assert_abs_diff_eq!(a.std(0.), 2.24719, epsilon = 1e-5); } @@ -120,8 +109,7 @@ fn std() #[test] #[cfg(feature = "std")] #[should_panic] -fn std_negative_ddof() -{ +fn std_negative_ddof() { let a = array![1., 2., 3.]; a.std(-1.); } @@ -129,16 +117,14 @@ fn std_negative_ddof() #[test] #[cfg(feature = "std")] #[should_panic] -fn std_too_large_ddof() -{ +fn std_too_large_ddof() { let a = array![1., 2., 3.]; a.std(4.); } #[test] #[cfg(feature = "std")] -fn std_nan_ddof() -{ +fn std_nan_ddof() { let a = Array2::::zeros((2, 3)); let v = a.std(::std::f64::NAN); assert!(v.is_nan()); @@ -146,16 +132,14 @@ fn std_nan_ddof() #[test] #[cfg(feature = "std")] -fn std_empty_arr() -{ +fn std_empty_arr() { let a: Array1 = array![]; assert!(a.std(0.0).is_nan()); } #[test] #[cfg(feature = "approx")] -fn var_axis() -{ +fn var_axis() { use ndarray::{aview0, aview2}; let a = array![ @@ -213,8 +197,7 @@ fn var_axis() #[test] #[cfg(feature = "approx")] -fn std_axis() -{ +fn std_axis() { use ndarray::aview2; let a = array![ @@ -274,8 +257,7 @@ fn std_axis() #[test] #[should_panic] #[cfg(feature = "std")] -fn var_axis_negative_ddof() -{ +fn var_axis_negative_ddof() { let a = array![1., 2., 3.]; a.var_axis(Axis(0), -1.); } @@ -283,16 +265,14 @@ fn var_axis_negative_ddof() #[test] #[should_panic] #[cfg(feature = "std")] -fn var_axis_too_large_ddof() -{ +fn var_axis_too_large_ddof() { let a = array![1., 2., 3.]; a.var_axis(Axis(0), 4.); } #[test] #[cfg(feature = "std")] -fn var_axis_nan_ddof() -{ +fn var_axis_nan_ddof() { let a = Array2::::zeros((2, 3)); let v = a.var_axis(Axis(1), ::std::f64::NAN); assert_eq!(v.shape(), &[2]); @@ -301,8 +281,7 @@ fn var_axis_nan_ddof() #[test] #[cfg(feature = "std")] -fn var_axis_empty_axis() -{ +fn var_axis_empty_axis() { let a = Array2::::zeros((2, 0)); let v = a.var_axis(Axis(1), 0.); assert_eq!(v.shape(), &[2]); @@ -312,16 +291,14 @@ fn var_axis_empty_axis() #[test] #[should_panic] #[cfg(feature = "std")] -fn std_axis_bad_dof() -{ +fn std_axis_bad_dof() { let a = array![1., 2., 3.]; a.std_axis(Axis(0), 4.); } #[test] #[cfg(feature = "std")] -fn std_axis_empty_axis() -{ +fn std_axis_empty_axis() { let a = Array2::::zeros((2, 0)); let v = a.std_axis(Axis(1), 0.); assert_eq!(v.shape(), &[2]); diff --git a/tests/oper.rs b/tests/oper.rs index 294a762c6..de1347752 100644 --- a/tests/oper.rs +++ b/tests/oper.rs @@ -13,8 +13,7 @@ use num_traits::Zero; use approx::assert_abs_diff_eq; use defmac::defmac; -fn test_oper(op: &str, a: &[f32], b: &[f32], c: &[f32]) -{ +fn test_oper(op: &str, a: &[f32], b: &[f32], c: &[f32]) { let aa = CowArray::from(arr1(a)); let bb = CowArray::from(arr1(b)); let cc = CowArray::from(arr1(c)); @@ -32,8 +31,7 @@ fn test_oper(op: &str, a: &[f32], b: &[f32], c: &[f32]) } fn test_oper_arr(op: &str, mut aa: CowArray, bb: CowArray, cc: CowArray) -where D: Dimension -{ +where D: Dimension { match op { "+" => { assert_eq!(&aa + &bb, cc); @@ -69,8 +67,7 @@ where D: Dimension } #[test] -fn operations() -{ +fn operations() { test_oper("+", &[1.0, 2.0, 3.0, 4.0], &[0.0, 1.0, 2.0, 3.0], &[1.0, 3.0, 5.0, 7.0]); test_oper("-", &[1.0, 2.0, 3.0, 4.0], &[0.0, 1.0, 2.0, 3.0], &[1.0, 1.0, 1.0, 1.0]); test_oper("*", &[1.0, 2.0, 3.0, 4.0], &[0.0, 1.0, 2.0, 3.0], &[0.0, 2.0, 6.0, 12.0]); @@ -80,8 +77,7 @@ fn operations() } #[test] -fn scalar_operations() -{ +fn scalar_operations() { let a = arr0::(1.); let b = rcarr1::(&[1., 1.]); let c = rcarr2(&[[1., 1.], [1., 1.]]); @@ -127,8 +123,7 @@ where } #[test] -fn dot_product() -{ +fn dot_product() { let a = Array::range(0., 69., 1.); let b = &a * 2. - 7.; let dot = 197846.; @@ -166,8 +161,7 @@ fn dot_product() // test that we can dot product with a broadcast array #[test] -fn dot_product_0() -{ +fn dot_product_0() { let a = Array::range(0., 69., 1.); let x = 1.5; let b = aview0(&x); @@ -187,8 +181,7 @@ fn dot_product_0() } #[test] -fn dot_product_neg_stride() -{ +fn dot_product_neg_stride() { // test that we can dot with negative stride let a = Array::range(0., 69., 1.); let b = &a * 2. - 7.; @@ -207,8 +200,7 @@ fn dot_product_neg_stride() } #[test] -fn fold_and_sum() -{ +fn fold_and_sum() { let a = Array::linspace(0., 127., 128) .into_shape_with_order((8, 16)) .unwrap(); @@ -249,8 +241,7 @@ fn fold_and_sum() } #[test] -fn product() -{ +fn product() { let a = Array::linspace(0.5, 2., 128) .into_shape_with_order((8, 16)) .unwrap(); @@ -271,28 +262,24 @@ fn product() } } -fn range_mat(m: Ix, n: Ix) -> Array2 -{ +fn range_mat(m: Ix, n: Ix) -> Array2 { Array::linspace(0., (m * n) as f32 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() } -fn range_mat64(m: Ix, n: Ix) -> Array2 -{ +fn range_mat64(m: Ix, n: Ix) -> Array2 { Array::linspace(0., (m * n) as f64 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() } #[cfg(feature = "approx")] -fn range1_mat64(m: Ix) -> Array1 -{ +fn range1_mat64(m: Ix) -> Array1 { Array::linspace(0., m as f64 - 1., m) } -fn range_i32(m: Ix, n: Ix) -> Array2 -{ +fn range_i32(m: Ix, n: Ix) -> Array2 { Array::from_iter(0..(m * n) as i32) .into_shape_with_order((m, n)) .unwrap() @@ -329,8 +316,7 @@ where } #[test] -fn mat_mul() -{ +fn mat_mul() { let (m, n, k) = (8, 8, 8); let a = range_mat(m, n); let b = range_mat(n, k); @@ -392,8 +378,7 @@ fn mat_mul() // Check that matrix multiplication of contiguous matrices returns a // matrix with the same order #[test] -fn mat_mul_order() -{ +fn mat_mul_order() { let (m, n, k) = (8, 8, 8); let a = range_mat(m, n); let b = range_mat(n, k); @@ -412,8 +397,7 @@ fn mat_mul_order() // test matrix multiplication shape mismatch #[test] #[should_panic] -fn mat_mul_shape_mismatch() -{ +fn mat_mul_shape_mismatch() { let (m, k, k2, n) = (8, 8, 9, 8); let a = range_mat(m, k); let b = range_mat(k2, n); @@ -423,8 +407,7 @@ fn mat_mul_shape_mismatch() // test matrix multiplication shape mismatch #[test] #[should_panic] -fn mat_mul_shape_mismatch_2() -{ +fn mat_mul_shape_mismatch_2() { let (m, k, k2, n) = (8, 8, 8, 8); let a = range_mat(m, k); let b = range_mat(k2, n); @@ -435,8 +418,7 @@ fn mat_mul_shape_mismatch_2() // Check that matrix multiplication // supports broadcast arrays. #[test] -fn mat_mul_broadcast() -{ +fn mat_mul_broadcast() { let (m, n, k) = (16, 16, 16); let a = range_mat(m, n); let x1 = 1.; @@ -455,8 +437,7 @@ fn mat_mul_broadcast() // Check that matrix multiplication supports reversed axes #[test] -fn mat_mul_rev() -{ +fn mat_mul_rev() { let (m, n, k) = (16, 16, 16); let a = range_mat(m, n); let b = range_mat(n, k); @@ -472,8 +453,7 @@ fn mat_mul_rev() // Check that matrix multiplication supports arrays with zero rows or columns #[test] -fn mat_mut_zero_len() -{ +fn mat_mut_zero_len() { defmac!(mat_mul_zero_len range_mat_fn => { for n in 0..4 { for m in 0..4 { @@ -494,8 +474,7 @@ fn mat_mut_zero_len() } #[test] -fn scaled_add() -{ +fn scaled_add() { let a = range_mat(16, 15); let mut b = range_mat(16, 15); b.mapv_inplace(f32::exp); @@ -510,8 +489,7 @@ fn scaled_add() #[cfg(feature = "approx")] #[test] -fn scaled_add_2() -{ +fn scaled_add_2() { let beta = -2.3; let sizes = vec![ (4, 4, 1, 4), @@ -548,8 +526,7 @@ fn scaled_add_2() #[cfg(feature = "approx")] #[test] -fn scaled_add_3() -{ +fn scaled_add_3() { use approx::assert_relative_eq; use ndarray::{Slice, SliceInfo, SliceInfoElem}; use std::convert::TryFrom; @@ -600,8 +577,7 @@ fn scaled_add_3() #[cfg(feature = "approx")] #[test] -fn gen_mat_mul() -{ +fn gen_mat_mul() { let alpha = -2.3; let beta = 3.14; let sizes = vec![ @@ -643,8 +619,7 @@ fn gen_mat_mul() // Test y = A x where A is f-order #[cfg(feature = "approx")] #[test] -fn gemm_64_1_f() -{ +fn gemm_64_1_f() { let a = range_mat64(64, 64).reversed_axes(); let (m, n) = a.dim(); // m x n times n x 1 == m x 1 @@ -656,8 +631,7 @@ fn gemm_64_1_f() } #[test] -fn gen_mat_mul_i32() -{ +fn gen_mat_mul_i32() { let alpha = -1; let beta = 2; let sizes = if cfg!(miri) { @@ -688,8 +662,7 @@ fn gen_mat_mul_i32() #[cfg(feature = "approx")] #[test] -fn gen_mat_vec_mul() -{ +fn gen_mat_vec_mul() { use approx::assert_relative_eq; use ndarray::linalg::general_mat_vec_mul; @@ -757,8 +730,7 @@ fn gen_mat_vec_mul() #[cfg(feature = "approx")] #[test] -fn vec_mat_mul() -{ +fn vec_mat_mul() { use approx::assert_relative_eq; // simple, slow, correct (hopefully) mat mul @@ -821,8 +793,7 @@ fn vec_mat_mul() } #[test] -fn kron_square_f64() -{ +fn kron_square_f64() { let a = arr2(&[[1.0, 0.0], [0.0, 1.0]]); let b = arr2(&[[0.0, 1.0], [1.0, 0.0]]); @@ -848,8 +819,7 @@ fn kron_square_f64() } #[test] -fn kron_square_i64() -{ +fn kron_square_i64() { let a = arr2(&[[1, 0], [0, 1]]); let b = arr2(&[[0, 1], [1, 0]]); @@ -865,8 +835,7 @@ fn kron_square_i64() } #[test] -fn kron_i64() -{ +fn kron_i64() { let a = arr2(&[[1, 0]]); let b = arr2(&[[0, 1], [1, 0]]); let r = arr2(&[[0, 1, 0, 0], [1, 0, 0, 0]]); diff --git a/tests/par_azip.rs b/tests/par_azip.rs index 418c21ef8..e5dc02c4e 100644 --- a/tests/par_azip.rs +++ b/tests/par_azip.rs @@ -7,8 +7,7 @@ use ndarray::prelude::*; use std::sync::atomic::{AtomicUsize, Ordering}; #[test] -fn test_par_azip1() -{ +fn test_par_azip1() { let mut a = Array::zeros(62); let b = Array::from_elem(62, 42); par_azip!((a in &mut a) { *a = 42 }); @@ -16,8 +15,7 @@ fn test_par_azip1() } #[test] -fn test_par_azip2() -{ +fn test_par_azip2() { let mut a = Array::zeros((5, 7)); let b = Array::from_shape_fn(a.dim(), |(i, j)| 1. / (i + 2 * j) as f32); par_azip!((a in &mut a, &b in &b, ) *a = b ); @@ -26,8 +24,7 @@ fn test_par_azip2() #[test] #[cfg(feature = "approx")] -fn test_par_azip3() -{ +fn test_par_azip3() { use approx::assert_abs_diff_eq; let mut a = [0.; 32]; @@ -47,8 +44,7 @@ fn test_par_azip3() #[should_panic] #[test] -fn test_zip_dim_mismatch_1() -{ +fn test_zip_dim_mismatch_1() { let mut a = Array::zeros((5, 7)); let mut d = a.raw_dim(); d[0] += 1; @@ -57,8 +53,7 @@ fn test_zip_dim_mismatch_1() } #[test] -fn test_indices_1() -{ +fn test_indices_1() { let mut a1 = Array::default(12); for (i, elt) in a1.indexed_iter_mut() { *elt = i; diff --git a/tests/par_rayon.rs b/tests/par_rayon.rs index 13669763f..40670c6bf 100644 --- a/tests/par_rayon.rs +++ b/tests/par_rayon.rs @@ -9,8 +9,7 @@ const CHUNK_SIZE: usize = 100; const N_CHUNKS: usize = (M + CHUNK_SIZE - 1) / CHUNK_SIZE; #[test] -fn test_axis_iter() -{ +fn test_axis_iter() { let mut a = Array2::::zeros((M, N)); for (i, mut v) in a.axis_iter_mut(Axis(0)).enumerate() { v.fill(i as _); @@ -23,8 +22,7 @@ fn test_axis_iter() #[test] #[cfg(feature = "approx")] -fn test_axis_iter_mut() -{ +fn test_axis_iter_mut() { use approx::assert_abs_diff_eq; let mut a = Array::linspace(0., 1.0f64, M * N) .into_shape_with_order((M, N)) @@ -38,8 +36,7 @@ fn test_axis_iter_mut() } #[test] -fn test_regular_iter() -{ +fn test_regular_iter() { let mut a = Array2::::zeros((M, N)); for (i, mut v) in a.axis_iter_mut(Axis(0)).enumerate() { v.fill(i as _); @@ -50,8 +47,7 @@ fn test_regular_iter() } #[test] -fn test_regular_iter_collect() -{ +fn test_regular_iter_collect() { let mut a = Array2::::zeros((M, N)); for (i, mut v) in a.axis_iter_mut(Axis(0)).enumerate() { v.fill(i as _); @@ -61,8 +57,7 @@ fn test_regular_iter_collect() } #[test] -fn test_axis_chunks_iter() -{ +fn test_axis_chunks_iter() { let mut a = Array2::::zeros((M, N)); for (i, mut v) in a.axis_chunks_iter_mut(Axis(0), CHUNK_SIZE).enumerate() { v.fill(i as _); @@ -79,8 +74,7 @@ fn test_axis_chunks_iter() #[test] #[cfg(feature = "approx")] -fn test_axis_chunks_iter_mut() -{ +fn test_axis_chunks_iter_mut() { use approx::assert_abs_diff_eq; let mut a = Array::linspace(0., 1.0f64, M * N) .into_shape_with_order((M, N)) diff --git a/tests/par_zip.rs b/tests/par_zip.rs index 9f10d9fd5..ec96c1bb9 100644 --- a/tests/par_zip.rs +++ b/tests/par_zip.rs @@ -8,16 +8,14 @@ const M: usize = 1024 * 10; const N: usize = 100; #[test] -fn test_zip_1() -{ +fn test_zip_1() { let mut a = Array2::::zeros((M, N)); Zip::from(&mut a).par_for_each(|x| *x = x.exp()); } #[test] -fn test_zip_index_1() -{ +fn test_zip_index_1() { let mut a = Array2::default((10, 10)); Zip::indexed(&mut a).par_for_each(|i, x| { @@ -30,8 +28,7 @@ fn test_zip_index_1() } #[test] -fn test_zip_index_2() -{ +fn test_zip_index_2() { let mut a = Array2::default((M, N)); Zip::indexed(&mut a).par_for_each(|i, x| { @@ -44,8 +41,7 @@ fn test_zip_index_2() } #[test] -fn test_zip_index_3() -{ +fn test_zip_index_3() { let mut a = Array::default((1, 2, 1, 2, 3)); Zip::indexed(&mut a).par_for_each(|i, x| { @@ -58,8 +54,7 @@ fn test_zip_index_3() } #[test] -fn test_zip_index_4() -{ +fn test_zip_index_4() { let mut a = Array2::zeros((M, N)); let mut b = Array2::zeros((M, N)); @@ -80,8 +75,7 @@ fn test_zip_index_4() #[test] #[cfg(feature = "approx")] -fn test_zip_collect() -{ +fn test_zip_collect() { use approx::assert_abs_diff_eq; // test Zip::map_collect and that it preserves c/f layout. @@ -109,8 +103,7 @@ fn test_zip_collect() #[test] #[cfg(feature = "approx")] -fn test_zip_small_collect() -{ +fn test_zip_small_collect() { use approx::assert_abs_diff_eq; for m in 0..32 { @@ -136,8 +129,7 @@ fn test_zip_small_collect() #[test] #[cfg(feature = "approx")] -fn test_zip_assign_into() -{ +fn test_zip_assign_into() { use approx::assert_abs_diff_eq; let mut a = Array::::zeros((M, N)); diff --git a/tests/raw_views.rs b/tests/raw_views.rs index 929e969d7..bb39547e8 100644 --- a/tests/raw_views.rs +++ b/tests/raw_views.rs @@ -4,8 +4,7 @@ use ndarray::Zip; use std::cell::Cell; #[test] -fn raw_view_cast_cell() -{ +fn raw_view_cast_cell() { // Test .cast() by creating an ArrayView> let mut a = Array::from_shape_fn((10, 5), |(i, j)| (i * j) as f32); @@ -21,8 +20,7 @@ fn raw_view_cast_cell() } #[test] -fn raw_view_cast_reinterpret() -{ +fn raw_view_cast_reinterpret() { // Test .cast() by reinterpreting u16 as [u8; 2] let a = Array::from_shape_fn((5, 5).f(), |(i, j)| (i as u16) << 8 | j as u16); let answer = a.mapv(u16::to_ne_bytes); @@ -33,8 +31,7 @@ fn raw_view_cast_reinterpret() } #[test] -fn raw_view_cast_zst() -{ +fn raw_view_cast_zst() { struct Zst; let a = Array::<(), _>::default((250, 250)); @@ -45,16 +42,14 @@ fn raw_view_cast_zst() #[test] #[should_panic] -fn raw_view_invalid_size_cast() -{ +fn raw_view_invalid_size_cast() { let data = [0i32; 16]; ArrayView::from(&data[..]).raw_view().cast::(); } #[test] #[should_panic] -fn raw_view_mut_invalid_size_cast() -{ +fn raw_view_mut_invalid_size_cast() { let mut data = [0i32; 16]; ArrayViewMut::from(&mut data[..]) .raw_view_mut() @@ -62,8 +57,7 @@ fn raw_view_mut_invalid_size_cast() } #[test] -fn raw_view_misaligned() -{ +fn raw_view_misaligned() { let data: [u16; 2] = [0x0011, 0x2233]; let ptr: *const u16 = data.as_ptr(); unsafe { @@ -75,10 +69,8 @@ fn raw_view_misaligned() #[test] #[cfg(debug_assertions)] #[should_panic = "The pointer must be aligned."] -fn raw_view_deref_into_view_misaligned() -{ - fn misaligned_deref(data: &[u16; 2]) -> ArrayView1<'_, u16> - { +fn raw_view_deref_into_view_misaligned() { + fn misaligned_deref(data: &[u16; 2]) -> ArrayView1<'_, u16> { let ptr: *const u16 = data.as_ptr(); unsafe { let misaligned_ptr = (ptr as *const u8).add(1) as *const u16; @@ -93,10 +85,8 @@ fn raw_view_deref_into_view_misaligned() #[test] #[cfg(debug_assertions)] #[should_panic = "Unsupported"] -fn raw_view_negative_strides() -{ - fn misaligned_deref(data: &[u16; 2]) -> ArrayView1<'_, u16> - { +fn raw_view_negative_strides() { + fn misaligned_deref(data: &[u16; 2]) -> ArrayView1<'_, u16> { let ptr: *const u16 = data.as_ptr(); unsafe { let raw_view = RawArrayView::from_shape_ptr(1.strides((-1isize) as usize), ptr); diff --git a/tests/reshape.rs b/tests/reshape.rs index a13a5c05f..533b124fd 100644 --- a/tests/reshape.rs +++ b/tests/reshape.rs @@ -5,8 +5,7 @@ use itertools::enumerate; use ndarray::Order; #[test] -fn reshape() -{ +fn reshape() { let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); let u = v.into_shape_with_order((3, 3)); @@ -22,8 +21,7 @@ fn reshape() #[test] #[should_panic(expected = "IncompatibleShape")] -fn reshape_error1() -{ +fn reshape_error1() { let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); let _u = v.into_shape_with_order((2, 5)).unwrap(); @@ -31,8 +29,7 @@ fn reshape_error1() #[test] #[should_panic(expected = "IncompatibleLayout")] -fn reshape_error2() -{ +fn reshape_error2() { let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); let mut u = v.into_shape_with_order((2, 2, 2)).unwrap(); @@ -41,8 +38,7 @@ fn reshape_error2() } #[test] -fn reshape_f() -{ +fn reshape_f() { let mut u = Array::zeros((3, 4).f()); for (i, elt) in enumerate(u.as_slice_memory_order_mut().unwrap()) { *elt = i as i32; @@ -67,8 +63,7 @@ fn reshape_f() } #[test] -fn to_shape_easy() -{ +fn to_shape_easy() { // 1D -> C -> C let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); @@ -107,8 +102,7 @@ fn to_shape_easy() } #[test] -fn to_shape_copy() -{ +fn to_shape_copy() { // 1D -> C -> F let v = ArrayView::from(&[1, 2, 3, 4, 5, 6, 7, 8]); let u = v.to_shape(((4, 2), Order::RowMajor)).unwrap(); @@ -131,8 +125,7 @@ fn to_shape_copy() } #[test] -fn to_shape_add_axis() -{ +fn to_shape_add_axis() { // 1D -> C -> C let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); @@ -143,8 +136,7 @@ fn to_shape_add_axis() } #[test] -fn to_shape_copy_stride() -{ +fn to_shape_copy_stride() { let v = array![[1, 2, 3, 4], [5, 6, 7, 8]]; let vs = v.slice(s![.., ..3]); let lin1 = vs.to_shape(6).unwrap(); @@ -157,8 +149,7 @@ fn to_shape_copy_stride() } #[test] -fn to_shape_zero_len() -{ +fn to_shape_zero_len() { let v = array![[1, 2, 3, 4], [5, 6, 7, 8]]; let vs = v.slice(s![.., ..0]); let lin1 = vs.to_shape(0).unwrap(); @@ -168,8 +159,7 @@ fn to_shape_zero_len() #[test] #[should_panic(expected = "IncompatibleShape")] -fn to_shape_error1() -{ +fn to_shape_error1() { let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); let _u = v.to_shape((2, 5)).unwrap(); @@ -177,8 +167,7 @@ fn to_shape_error1() #[test] #[should_panic(expected = "IncompatibleShape")] -fn to_shape_error2() -{ +fn to_shape_error2() { // overflow let data = [3, 4, 5, 6, 7, 8]; let v = aview1(&data); @@ -186,8 +175,7 @@ fn to_shape_error2() } #[test] -fn to_shape_discontig() -{ +fn to_shape_discontig() { for &create_order in &[Order::C, Order::F] { let a = Array::from_iter(0..64); let mut a1 = a.to_shape(((4, 4, 4), create_order)).unwrap(); @@ -214,8 +202,7 @@ fn to_shape_discontig() } #[test] -fn to_shape_broadcast() -{ +fn to_shape_broadcast() { for &create_order in &[Order::C, Order::F] { let a = Array::from_iter(0..64); let mut a1 = a.to_shape(((4, 4, 4), create_order)).unwrap(); @@ -242,8 +229,7 @@ fn to_shape_broadcast() } #[test] -fn into_shape_with_order() -{ +fn into_shape_with_order() { // 1D -> C -> C let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); @@ -282,8 +268,7 @@ fn into_shape_with_order() } #[test] -fn into_shape_clone() -{ +fn into_shape_clone() { // 1D -> C -> C { let data = [1, 2, 3, 4, 5, 6, 7, 8]; diff --git a/tests/s.rs b/tests/s.rs index edb3f071a..56eed03a1 100644 --- a/tests/s.rs +++ b/tests/s.rs @@ -5,8 +5,7 @@ use ndarray::{s, Array}; #[test] -fn test_s() -{ +fn test_s() { let a = Array::::zeros((3, 4)); let vi = a.slice(s![1.., ..;2]); assert_eq!(vi.shape(), &[2, 2]); diff --git a/tests/stacking.rs b/tests/stacking.rs index bdfe478b4..0c4e79c79 100644 --- a/tests/stacking.rs +++ b/tests/stacking.rs @@ -1,8 +1,7 @@ use ndarray::{arr2, arr3, aview1, aview2, concatenate, stack, Array2, Axis, ErrorKind, Ix1}; #[test] -fn concatenating() -{ +fn concatenating() { let a = arr2(&[[2., 2.], [3., 3.]]); let b = ndarray::concatenate(Axis(0), &[a.view(), a.view()]).unwrap(); assert_eq!(b, arr2(&[[2., 2.], [3., 3.], [2., 2.], [3., 3.]])); @@ -34,8 +33,7 @@ fn concatenating() } #[test] -fn stacking() -{ +fn stacking() { let a = arr2(&[[2., 2.], [3., 3.]]); let b = ndarray::stack(Axis(0), &[a.view(), a.view()]).unwrap(); assert_eq!(b, arr3(&[[[2., 2.], [3., 3.]], [[2., 2.], [3., 3.]]])); diff --git a/tests/views.rs b/tests/views.rs index 02970b1b7..ecef72fe8 100644 --- a/tests/views.rs +++ b/tests/views.rs @@ -2,8 +2,7 @@ use ndarray::prelude::*; use ndarray::Zip; #[test] -fn cell_view() -{ +fn cell_view() { let mut a = Array::from_shape_fn((10, 5), |(i, j)| (i * j) as f32); let answer = &a + 1.; diff --git a/tests/windows.rs b/tests/windows.rs index d8d5b699e..692e71e5a 100644 --- a/tests/windows.rs +++ b/tests/windows.rs @@ -22,8 +22,7 @@ use ndarray::{arr3, Zip}; /// Test that verifies the `Windows` iterator panics on window sizes equal to zero. #[test] #[should_panic] -fn windows_iterator_zero_size() -{ +fn windows_iterator_zero_size() { let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -32,8 +31,7 @@ fn windows_iterator_zero_size() /// Test that verifies that no windows are yielded on oversized window sizes. #[test] -fn windows_iterator_oversized() -{ +fn windows_iterator_oversized() { let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -43,8 +41,7 @@ fn windows_iterator_oversized() /// Simple test for iterating 1d-arrays via `Windows`. #[test] -fn windows_iterator_1d() -{ +fn windows_iterator_1d() { let a = Array::from_iter(10..20).into_shape_with_order(10).unwrap(); itertools::assert_equal(a.windows(Dim(4)), vec![ arr1(&[10, 11, 12, 13]), @@ -59,8 +56,7 @@ fn windows_iterator_1d() /// Simple test for iterating 2d-arrays via `Windows`. #[test] -fn windows_iterator_2d() -{ +fn windows_iterator_2d() { let a = Array::from_iter(10..30) .into_shape_with_order((5, 4)) .unwrap(); @@ -79,8 +75,7 @@ fn windows_iterator_2d() /// Simple test for iterating 3d-arrays via `Windows`. #[test] -fn windows_iterator_3d() -{ +fn windows_iterator_3d() { let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -99,8 +94,7 @@ fn windows_iterator_3d() /// Test that verifies the `Windows` iterator panics when stride has an axis equal to zero. #[test] #[should_panic] -fn windows_iterator_stride_axis_zero() -{ +fn windows_iterator_stride_axis_zero() { let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -109,8 +103,7 @@ fn windows_iterator_stride_axis_zero() /// Test that verifies that only first window is yielded when stride is oversized on every axis. #[test] -fn windows_iterator_only_one_valid_window_for_oversized_stride() -{ +fn windows_iterator_only_one_valid_window_for_oversized_stride() { let a = Array::from_iter(10..135) .into_shape_with_order((5, 5, 5)) .unwrap(); @@ -120,8 +113,7 @@ fn windows_iterator_only_one_valid_window_for_oversized_stride() /// Simple test for iterating 1d-arrays via `Windows` with stride. #[test] -fn windows_iterator_1d_with_stride() -{ +fn windows_iterator_1d_with_stride() { let a = Array::from_iter(10..20).into_shape_with_order(10).unwrap(); itertools::assert_equal(a.windows_with_stride(4, 2), vec![ arr1(&[10, 11, 12, 13]), @@ -133,8 +125,7 @@ fn windows_iterator_1d_with_stride() /// Simple test for iterating 2d-arrays via `Windows` with stride. #[test] -fn windows_iterator_2d_with_stride() -{ +fn windows_iterator_2d_with_stride() { let a = Array::from_iter(10..30) .into_shape_with_order((5, 4)) .unwrap(); @@ -150,8 +141,7 @@ fn windows_iterator_2d_with_stride() /// Simple test for iterating 3d-arrays via `Windows` with stride. #[test] -fn windows_iterator_3d_with_stride() -{ +fn windows_iterator_3d_with_stride() { let a = Array::from_iter(10..74) .into_shape_with_order((4, 4, 4)) .unwrap(); @@ -168,8 +158,7 @@ fn windows_iterator_3d_with_stride() } #[test] -fn test_window_zip() -{ +fn test_window_zip() { let a = Array::from_iter(0..64) .into_shape_with_order((4, 4, 4)) .unwrap(); @@ -194,8 +183,7 @@ fn test_window_zip() /// Test verifies that non existent Axis results in panic #[test] #[should_panic] -fn axis_windows_outofbound() -{ +fn axis_windows_outofbound() { let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -205,8 +193,7 @@ fn axis_windows_outofbound() /// Test verifies that zero sizes results in panic #[test] #[should_panic] -fn axis_windows_zero_size() -{ +fn axis_windows_zero_size() { let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -215,8 +202,7 @@ fn axis_windows_zero_size() /// Test verifies that over sized windows yield nothing #[test] -fn axis_windows_oversized() -{ +fn axis_windows_oversized() { let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -226,8 +212,7 @@ fn axis_windows_oversized() /// Simple test for iterating 1d-arrays via `Axis Windows`. #[test] -fn test_axis_windows_1d() -{ +fn test_axis_windows_1d() { let a = Array::from_iter(10..20).into_shape_with_order(10).unwrap(); itertools::assert_equal(a.axis_windows(Axis(0), 5), vec![ @@ -242,8 +227,7 @@ fn test_axis_windows_1d() /// Simple test for iterating 2d-arrays via `Axis Windows`. #[test] -fn test_axis_windows_2d() -{ +fn test_axis_windows_2d() { let a = Array::from_iter(10..30) .into_shape_with_order((5, 4)) .unwrap(); @@ -258,8 +242,7 @@ fn test_axis_windows_2d() /// Simple test for iterating 3d-arrays via `Axis Windows`. #[test] -fn test_axis_windows_3d() -{ +fn test_axis_windows_3d() { let a = Array::from_iter(0..27) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -279,8 +262,7 @@ fn test_axis_windows_3d() } #[test] -fn test_window_neg_stride() -{ +fn test_window_neg_stride() { let array = Array::from_iter(1..10) .into_shape_with_order((3, 3)) .unwrap(); @@ -310,8 +292,7 @@ fn test_window_neg_stride() } #[test] -fn test_windows_with_stride_on_inverted_axis() -{ +fn test_windows_with_stride_on_inverted_axis() { let mut array = Array::from_iter(1..17) .into_shape_with_order((4, 4)) .unwrap(); diff --git a/tests/zst.rs b/tests/zst.rs index f5f2c8e32..c3c779d2c 100644 --- a/tests/zst.rs +++ b/tests/zst.rs @@ -2,8 +2,7 @@ use ndarray::arr2; use ndarray::ArcArray; #[test] -fn test_swap() -{ +fn test_swap() { let mut a = arr2(&[[(); 3]; 3]); let b = a.clone(); @@ -17,8 +16,7 @@ fn test_swap() } #[test] -fn test() -{ +fn test() { let c = ArcArray::<(), _>::default((3, 4)); let mut d = c.clone(); for _ in d.iter_mut() {} diff --git a/xtest-blas/tests/oper.rs b/xtest-blas/tests/oper.rs index 3ed81915e..1cedc9018 100644 --- a/xtest-blas/tests/oper.rs +++ b/xtest-blas/tests/oper.rs @@ -17,8 +17,7 @@ use num_complex::Complex32; use num_complex::Complex64; #[test] -fn mat_vec_product_1d() -{ +fn mat_vec_product_1d() { let a = arr2(&[[1.], [2.]]); let b = arr1(&[1., 2.]); let ans = arr1(&[5.]); @@ -26,8 +25,7 @@ fn mat_vec_product_1d() } #[test] -fn mat_vec_product_1d_broadcast() -{ +fn mat_vec_product_1d_broadcast() { let a = arr2(&[[1.], [2.], [3.]]); let b = arr1(&[1.]); let b = b.broadcast(3).unwrap(); @@ -36,8 +34,7 @@ fn mat_vec_product_1d_broadcast() } #[test] -fn mat_vec_product_1d_inverted_axis() -{ +fn mat_vec_product_1d_inverted_axis() { let a = arr2(&[[1.], [2.], [3.]]); let mut b = arr1(&[1., 2., 3.]); b.invert_axis(Axis(0)); @@ -46,43 +43,37 @@ fn mat_vec_product_1d_inverted_axis() assert_eq!(a.t().dot(&b), ans); } -fn range_mat(m: Ix, n: Ix) -> Array2 -{ +fn range_mat(m: Ix, n: Ix) -> Array2 { Array::linspace(0., (m * n) as f32 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() } -fn range_mat64(m: Ix, n: Ix) -> Array2 -{ +fn range_mat64(m: Ix, n: Ix) -> Array2 { Array::linspace(0., (m * n) as f64 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() } -fn range_mat_complex(m: Ix, n: Ix) -> Array2 -{ +fn range_mat_complex(m: Ix, n: Ix) -> Array2 { Array::linspace(0., (m * n) as f32 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() .map(|&f| Complex32::new(f, 0.)) } -fn range_mat_complex64(m: Ix, n: Ix) -> Array2 -{ +fn range_mat_complex64(m: Ix, n: Ix) -> Array2 { Array::linspace(0., (m * n) as f64 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() .map(|&f| Complex64::new(f, 0.)) } -fn range1_mat64(m: Ix) -> Array1 -{ +fn range1_mat64(m: Ix) -> Array1 { Array::linspace(0., m as f64 - 1., m) } -fn range_i32(m: Ix, n: Ix) -> Array2 -{ +fn range_i32(m: Ix, n: Ix) -> Array2 { Array::from_iter(0..(m * n) as i32) .into_shape_with_order((m, n)) .unwrap() @@ -157,8 +148,7 @@ where // Check that matrix multiplication of contiguous matrices returns a // matrix with the same order #[test] -fn mat_mul_order() -{ +fn mat_mul_order() { let (m, n, k) = (50, 50, 50); let a = range_mat(m, n); let b = range_mat(n, k); @@ -177,8 +167,7 @@ fn mat_mul_order() // Check that matrix multiplication // supports broadcast arrays. #[test] -fn mat_mul_broadcast() -{ +fn mat_mul_broadcast() { let (m, n, k) = (16, 16, 16); let a = range_mat(m, n); let x1 = 1.; @@ -197,8 +186,7 @@ fn mat_mul_broadcast() // Check that matrix multiplication supports reversed axes #[test] -fn mat_mul_rev() -{ +fn mat_mul_rev() { let (m, n, k) = (16, 16, 16); let a = range_mat(m, n); let b = range_mat(n, k); @@ -214,8 +202,7 @@ fn mat_mul_rev() // Check that matrix multiplication supports arrays with zero rows or columns #[test] -fn mat_mut_zero_len() -{ +fn mat_mut_zero_len() { defmac!(mat_mul_zero_len range_mat_fn => { for n in 0..4 { for m in 0..4 { @@ -236,8 +223,7 @@ fn mat_mut_zero_len() } #[test] -fn gen_mat_mul() -{ +fn gen_mat_mul() { let alpha = -2.3; let beta = 3.14; let sizes = vec![ @@ -278,8 +264,7 @@ fn gen_mat_mul() // Test y = A x where A is f-order #[test] -fn gemm_64_1_f() -{ +fn gemm_64_1_f() { let a = range_mat64(64, 64).reversed_axes(); let (m, n) = a.dim(); // m x n times n x 1 == m x 1 @@ -291,8 +276,7 @@ fn gemm_64_1_f() } #[test] -fn gemm_c64_1_f() -{ +fn gemm_c64_1_f() { let a = range_mat_complex64(64, 64).reversed_axes(); let (m, n) = a.dim(); // m x n times n x 1 == m x 1 @@ -309,8 +293,7 @@ fn gemm_c64_1_f() } #[test] -fn gemm_c32_1_f() -{ +fn gemm_c32_1_f() { let a = range_mat_complex(64, 64).reversed_axes(); let (m, n) = a.dim(); // m x n times n x 1 == m x 1 @@ -327,8 +310,7 @@ fn gemm_c32_1_f() } #[test] -fn gemm_c64_actually_complex() -{ +fn gemm_c64_actually_complex() { let mut a = range_mat_complex64(4, 4); a = a.map(|&i| if i.re > 8. { i.conj() } else { i }); let mut b = range_mat_complex64(4, 6); @@ -347,8 +329,7 @@ fn gemm_c64_actually_complex() } #[test] -fn gen_mat_vec_mul() -{ +fn gen_mat_vec_mul() { let alpha = -2.3; let beta = 3.14; let sizes = vec![ @@ -394,8 +375,7 @@ fn gen_mat_vec_mul() } #[test] -fn vec_mat_mul() -{ +fn vec_mat_mul() { let sizes = vec![ (4, 4), (8, 8), diff --git a/xtest-numeric/tests/accuracy.rs b/xtest-numeric/tests/accuracy.rs index e98fb3c4d..b64a71d22 100644 --- a/xtest-numeric/tests/accuracy.rs +++ b/xtest-numeric/tests/accuracy.rs @@ -23,8 +23,7 @@ use rand_distr::{Distribution, Normal, StandardNormal}; use approx::{assert_abs_diff_eq, assert_relative_eq}; fn kahan_sum(iter: impl Iterator) -> A -where A: LinalgScalar -{ +where A: LinalgScalar { let mut sum = A::zero(); let mut compensation = A::zero(); @@ -84,8 +83,7 @@ where } #[test] -fn accurate_eye_f32() -{ +fn accurate_eye_f32() { let rng = &mut SmallRng::from_entropy(); for i in 0..20 { let eye = Array::eye(i); @@ -112,8 +110,7 @@ fn accurate_eye_f32() } #[test] -fn accurate_eye_f64() -{ +fn accurate_eye_f64() { let rng = &mut SmallRng::from_entropy(); let abs_tol = 1e-15; for i in 0..20 { @@ -141,26 +138,22 @@ fn accurate_eye_f64() } #[test] -fn accurate_mul_f32_dot() -{ +fn accurate_mul_f32_dot() { accurate_mul_float_general::(1e-5, false); } #[test] -fn accurate_mul_f32_general() -{ +fn accurate_mul_f32_general() { accurate_mul_float_general::(1e-5, true); } #[test] -fn accurate_mul_f64_dot() -{ +fn accurate_mul_f64_dot() { accurate_mul_float_general::(1e-14, false); } #[test] -fn accurate_mul_f64_general() -{ +fn accurate_mul_f64_general() { accurate_mul_float_general::(1e-14, true); } @@ -170,8 +163,7 @@ fn accurate_mul_f64_general() fn random_matrix_mul( rng: &mut SmallRng, use_stride: bool, use_general: bool, generator: fn(Ix2, &mut SmallRng) -> Array2, ) -> (Array2, Array2) -where A: LinalgScalar -{ +where A: LinalgScalar { let m = rng.gen_range(15..512); let k = rng.gen_range(15..512); let n = rng.gen_range(15..1560); @@ -223,14 +215,12 @@ where } #[test] -fn accurate_mul_complex32() -{ +fn accurate_mul_complex32() { accurate_mul_complex_general::(1e-5); } #[test] -fn accurate_mul_complex64() -{ +fn accurate_mul_complex64() { accurate_mul_complex_general::(1e-14); } @@ -256,8 +246,7 @@ where } #[test] -fn accurate_mul_with_column_f64() -{ +fn accurate_mul_with_column_f64() { // pick a few random sizes let rng = &mut SmallRng::from_entropy(); for i in 0..10 { diff --git a/xtest-serialization/tests/serialize.rs b/xtest-serialization/tests/serialize.rs index 95e93e4fb..6e00f3af5 100644 --- a/xtest-serialization/tests/serialize.rs +++ b/xtest-serialization/tests/serialize.rs @@ -12,8 +12,7 @@ extern crate ron; use ndarray::{arr0, arr1, arr2, s, ArcArray, ArcArray2, ArrayD, IxDyn}; #[test] -fn serial_many_dim_serde() -{ +fn serial_many_dim_serde() { { let a = arr0::(2.72); let serial = serde_json::to_string(&a).unwrap(); @@ -59,8 +58,7 @@ fn serial_many_dim_serde() } #[test] -fn serial_ixdyn_serde() -{ +fn serial_ixdyn_serde() { { let a = arr0::(2.72).into_dyn(); let serial = serde_json::to_string(&a).unwrap(); @@ -99,8 +97,7 @@ fn serial_ixdyn_serde() } #[test] -fn serial_wrong_count_serde() -{ +fn serial_wrong_count_serde() { // one element too few let text = r##"{"v":1,"dim":[2,3],"data":[3,1,2.2,3.1,4]}"##; let arr = serde_json::from_str::>(text); @@ -115,8 +112,7 @@ fn serial_wrong_count_serde() } #[test] -fn serial_many_dim_serde_msgpack() -{ +fn serial_many_dim_serde_msgpack() { { let a = arr0::(2.72); @@ -180,8 +176,7 @@ fn serial_many_dim_serde_msgpack() #[test] #[cfg(feature = "ron")] -fn serial_many_dim_ron() -{ +fn serial_many_dim_ron() { use ron::de::from_str as ron_deserialize; use ron::ser::to_string as ron_serialize; From aa19194f88b1aac4021b47b84f2bf2a9b507802f Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Mon, 25 Mar 2024 14:28:54 +0000 Subject: [PATCH 02/23] Update CI --- .github/workflows/ci.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 35f72acf4..c8c6ce1c9 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,6 +1,11 @@ on: + push: + branches: + - "**" pull_request: merge_group: + workflow_dispatch: + workflow_call: name: Continuous integration From 24bcf1dbd07bbd7752f75a76ac2eb932bd0abe81 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Mon, 25 Mar 2024 15:42:43 +0000 Subject: [PATCH 03/23] Why is CI failing? --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4332daf4a..50b5c4dfc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,8 +32,8 @@ num-traits = { version = "0.2", default-features = false } num-complex = { version = "0.4", default-features = false } # Use via the `opencl` crate feature! -#hasty_ = { version = "0.2", optional = true, package = "hasty", default-features = false } -hasty_ = { path = "../../hasty_dev/hasty", optional = true, package = "hasty", default-features = false } +hasty_ = { version = "0.2", optional = true, package = "hasty", default-features = false } +#hasty_ = { path = "../../hasty_dev/hasty", optional = true, package = "hasty", default-features = false } # Use via the `rayon` crate feature! rayon_ = { version = "1.0.3", optional = true, package = "rayon" } From 931dde4a9a0906eb06cf4288dde2f2b81293cb8d Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Tue, 26 Mar 2024 02:04:12 +0000 Subject: [PATCH 04/23] Remove some clippy warnings and fix some bugs --- Cargo.toml | 2 +- src/data_repr.rs | 47 +++++++++++++++++++++++++++++++++++------------ src/lib.rs | 1 - 3 files changed, 36 insertions(+), 14 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 50b5c4dfc..96a069ca4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,7 @@ num-traits = { version = "0.2", default-features = false } num-complex = { version = "0.4", default-features = false } # Use via the `opencl` crate feature! -hasty_ = { version = "0.2", optional = true, package = "hasty", default-features = false } + hasty_ = { version = "0.2", optional = true, package = "hasty", default-features = false } #hasty_ = { path = "../../hasty_dev/hasty", optional = true, package = "hasty", default-features = false } # Use via the `rayon` crate feature! diff --git a/src/data_repr.rs b/src/data_repr.rs index 1d0d8d2bd..240f30c4e 100644 --- a/src/data_repr.rs +++ b/src/data_repr.rs @@ -6,7 +6,6 @@ use alloc::borrow::ToOwned; use alloc::slice; #[cfg(not(feature = "std"))] use alloc::vec::Vec; -use std::ffi::c_void; use std::mem; use std::mem::ManuallyDrop; use std::ptr::NonNull; @@ -55,6 +54,7 @@ impl OwnedRepr { } /// Move this storage object to a specified device. + #[allow(clippy::unnecessary_wraps)] pub(crate) fn copy_to_device(self, device: Device) -> Option { // println!("Copying to {device:?}"); // let mut self_ = ManuallyDrop::new(self); @@ -65,12 +65,13 @@ impl OwnedRepr { match (self.device, device) { (Device::Host, Device::Host) => { - todo!() + // println!("Copying to Host"); + Some(self) } #[cfg(feature = "opencl")] (Device::Host, Device::OpenCL) => { - let bytes = std::mem::size_of::() * capacity; + let bytes = std::mem::size_of::() * self.capacity; unsafe { if let Ok(buffer) = @@ -108,7 +109,7 @@ impl OwnedRepr { data.set_len(self.len); if let Ok(_) = hasty_::opencl::opencl_read( data.as_mut_ptr() as *mut std::ffi::c_void, - self.ptr.as_ptr() as *mut c_void, + self.ptr.as_ptr() as *mut std::ffi::c_void, bytes, ) { Some(Self { @@ -128,8 +129,29 @@ impl OwnedRepr { todo!(); } - _ => { - panic!("Not Implemented") + #[cfg(feature = "cuda")] + (Device::Host, Device::CUDA) => { + todo!(); + } + + #[cfg(feature = "cuda")] + (Device::CUDA, Device::Host) => { + todo!(); + } + + #[cfg(feature = "cuda")] + (Device::CUDA, Device::CUDA) => { + todo!(); + } + + #[cfg(all(feature = "opencl", feature = "cuda"))] + (Device::OpenCL, Device::CUDA) => { + todo!(); + } + + #[cfg(all(feature = "opencl", feature = "cuda"))] + (Device::CUDA, Device::OpenCL) => { + todo!(); } } } @@ -153,7 +175,7 @@ impl OwnedRepr { // Free `ptr` // println!("Freeing OpenCL pointer"); - hasty_::opencl::opencl_free(ptr as *mut c_void); + hasty_::opencl::opencl_free(ptr as *mut std::ffi::c_void); // Should be optimised out, since nothing is allocated Vec::new() @@ -190,7 +212,7 @@ impl OwnedRepr { unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } - pub(crate) fn len(&self) -> usize { + pub(crate) const fn len(&self) -> usize { self.len } @@ -200,7 +222,7 @@ impl OwnedRepr { /// The pointer **may not necessarily point to the host**. /// Using a non-host pointer on the host will almost certainly /// cause a segmentation-fault. - pub(crate) fn as_ptr(&self) -> *const A { + pub(crate) const fn as_ptr(&self) -> *const A { self.ptr.as_ptr() } @@ -210,11 +232,11 @@ impl OwnedRepr { /// The pointer **may not necessarily point to the host**. /// Using a non-host pointer on the host will almost certainly /// cause a segmentation-fault. - pub(crate) fn as_ptr_mut(&self) -> *mut A { + pub(crate) const fn as_ptr_mut(&self) -> *mut A { self.ptr.as_ptr() } - /// Return underlying [NonNull] ptr. + /// Return underlying [`NonNull`] ptr. /// /// ## Safety /// The pointer **may not necessarily point to the host**. @@ -305,7 +327,8 @@ impl OwnedRepr { } impl Clone for OwnedRepr -where A: Clone +where + A: Clone, { fn clone(&self) -> Self { match self.device { diff --git a/src/lib.rs b/src/lib.rs index 83d62991d..0e42aeb43 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,3 @@ -#![feature(non_null_convenience)] // Copyright 2014-2020 bluss and ndarray developers. // // Licensed under the Apache License, Version 2.0 Date: Tue, 26 Mar 2024 02:08:49 +0000 Subject: [PATCH 05/23] Fix --- src/data_repr.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/data_repr.rs b/src/data_repr.rs index 240f30c4e..311e4653b 100644 --- a/src/data_repr.rs +++ b/src/data_repr.rs @@ -60,9 +60,6 @@ impl OwnedRepr { // let mut self_ = ManuallyDrop::new(self); // self_.device = device; - let len = self.len; - let capacity = self.capacity; - match (self.device, device) { (Device::Host, Device::Host) => { // println!("Copying to Host"); @@ -71,7 +68,9 @@ impl OwnedRepr { #[cfg(feature = "opencl")] (Device::Host, Device::OpenCL) => { - let bytes = std::mem::size_of::() * self.capacity; + let len = self.len; + let capacity = self.capacity; + let bytes = std::mem::size_of::() * capacity; unsafe { if let Ok(buffer) = @@ -102,6 +101,8 @@ impl OwnedRepr { #[cfg(feature = "opencl")] (Device::OpenCL, Device::Host) => { + let len = self.len; + let capacity = self.capacity; let bytes = std::mem::size_of::() * capacity; unsafe { @@ -327,8 +328,7 @@ impl OwnedRepr { } impl Clone for OwnedRepr -where - A: Clone, +where A: Clone { fn clone(&self) -> Self { match self.device { From b03ff6fed40222c5c47fef95756c4294471a2f85 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Tue, 26 Mar 2024 16:00:03 +0000 Subject: [PATCH 06/23] Fix tests --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 0e42aeb43..5490dd52a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1129,7 +1129,7 @@ pub type Ixs = isize; /// } /// let arr = Array2::from_shape_vec((nrows, ncols), data)?; /// assert_eq!(arr, array![[0, 0, 0], [1, 1, 1]]); -/// // Ok::<(), ndarray::ShapeError>(()) +/// Ok::<(), ndarray::ShapeError>(()) /// ``` /// /// If neither of these options works for you, and you really need to convert @@ -1153,7 +1153,7 @@ pub type Ixs = isize; /// [[1, 2, 3], [4, 5, 6]], /// [[7, 8, 9], [10, 11, 12]], /// ]); -/// // Ok::<(), ndarray::ShapeError>(()) +/// Ok::<(), ndarray::ShapeError>(()) /// ``` /// /// Note that this implementation assumes that the nested `Vec`s are all the From 8dd39d1a634f6fd470e6f2a66e9b3dbf53c1aaa1 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Tue, 26 Mar 2024 23:38:48 +0000 Subject: [PATCH 07/23] Update bench config --- Cargo.toml | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 96a069ca4..36cceffed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,8 @@ edition = "2018" rust-version = "1.57" authors = [ "Ulrik Sverdrup \"bluss\"", - "Jim Turner" + "Jim Turner", + "Toby Davis \"Pencilcaseman\"", ] license = "MIT OR Apache-2.0" readme = "README-crates.io.md" @@ -32,7 +33,7 @@ num-traits = { version = "0.2", default-features = false } num-complex = { version = "0.4", default-features = false } # Use via the `opencl` crate feature! - hasty_ = { version = "0.2", optional = true, package = "hasty", default-features = false } +hasty_ = { version = "0.2", optional = true, package = "hasty", default-features = false } #hasty_ = { path = "../../hasty_dev/hasty", optional = true, package = "hasty", default-features = false } # Use via the `rayon` crate feature! @@ -45,16 +46,20 @@ approx-0_5 = { package = "approx", version = "0.5", optional = true, default-fea cblas-sys = { version = "0.1.4", optional = true, default-features = false } libc = { version = "0.2.82", optional = true } -matrixmultiply = { version = "0.3.2", default-features = false, features = ["cgemm"] } +matrixmultiply = { version = "0.3.2", default-features = false, features = [ + "cgemm", +] } -serde = { version = "1.0", optional = true, default-features = false, features = ["alloc"] } +serde = { version = "1.0", optional = true, default-features = false, features = [ + "alloc", +] } rawpointer = { version = "0.2" } [dev-dependencies] defmac = "0.2" quickcheck = { version = "1.0", default-features = false } -approx = "0.4" -itertools = { version = "0.10.0", default-features = false, features = ["use_std"] } +approx = "0.5" +itertools = { version = "0.12", default-features = false, features = ["use_std"] } [features] default = ["std"] @@ -82,9 +87,19 @@ rayon = ["rayon_", "std"] matrixmultiply-threading = ["matrixmultiply/threading"] [profile.bench] -debug = true +# debug = true +panic = "abort" +codegen-units = 1 +lto = true +opt-level = 3 +strip = true +debug = false +debug-assertions = false + + [profile.dev.package.numeric-tests] opt-level = 2 + [profile.test.package.numeric-tests] opt-level = 2 From 772bda49629614d78214069b12d3c0faf64569a7 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Wed, 27 Mar 2024 12:55:43 +0000 Subject: [PATCH 08/23] Do not allow operations on mismatched devices --- Cargo.toml | 2 +- src/data_repr.rs | 36 +++++++++++++++++--- src/data_traits.rs | 11 +++++- src/impl_methods.rs | 14 ++++++-- src/impl_ops.rs | 83 ++++++++++++++++++++++++++------------------- 5 files changed, 104 insertions(+), 42 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 36cceffed..feec26d33 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ num-complex = { version = "0.4", default-features = false } # Use via the `opencl` crate feature! hasty_ = { version = "0.2", optional = true, package = "hasty", default-features = false } -#hasty_ = { path = "../../hasty_dev/hasty", optional = true, package = "hasty", default-features = false } +# hasty_ = { path = "../../hasty_dev/hasty", optional = true, package = "hasty", default-features = false } # Use via the `rayon` crate feature! rayon_ = { version = "1.0.3", optional = true, package = "rayon" } diff --git a/src/data_repr.rs b/src/data_repr.rs index 311e4653b..781fc9350 100644 --- a/src/data_repr.rs +++ b/src/data_repr.rs @@ -53,9 +53,13 @@ impl OwnedRepr { } } + pub(crate) fn device(&self) -> Device { + self.device + } + /// Move this storage object to a specified device. #[allow(clippy::unnecessary_wraps)] - pub(crate) fn copy_to_device(self, device: Device) -> Option { + pub(crate) fn move_to_device(self, device: Device) -> Option { // println!("Copying to {device:?}"); // let mut self_ = ManuallyDrop::new(self); // self_.device = device; @@ -209,7 +213,8 @@ impl OwnedRepr { /// on the host device. pub(crate) fn as_slice(&self) -> &[A] { // Cannot create a slice of a device pointer - assert_eq!(self.device, Device::Host); + debug_assert_eq!(self.device, Device::Host, "Cannot create a slice of a device pointer"); + unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } @@ -337,8 +342,31 @@ where A: Clone #[cfg(feature = "opencl")] Device::OpenCL => { println!("Performing OpenCL Clone"); - // todo: OpenCL clone - Self::from(self.as_slice().to_owned()) + unsafe { + // Allocate new buffer + let bytes = std::mem::size_of::() * self.len(); + + match hasty_::opencl::opencl_allocate(bytes, hasty_::opencl::OpenCLMemoryType::ReadWrite) { + Ok(buffer_ptr) => { + if let Err(err_code) = + hasty_::opencl::opencl_copy(buffer_ptr, self.as_ptr() as *const std::ffi::c_void, bytes) + { + panic!("Failed to copy to OpenCL buffer. Exited with status: {:?}", err_code); + } + + Self { + ptr: NonNull::new(buffer_ptr as *mut A).unwrap(), + len: self.len, + capacity: self.capacity, + device: self.device, + } + } + + Err(err_code) => { + panic!("Failed to clone OpenCL buffer. Exited with status: {:?}", err_code); + } + } + } } #[cfg(feature = "cuda")] diff --git a/src/data_traits.rs b/src/data_traits.rs index 42b01ed0e..b06163081 100644 --- a/src/data_traits.rs +++ b/src/data_traits.rs @@ -17,7 +17,7 @@ use std::mem::MaybeUninit; use std::mem::{self, size_of}; use std::ptr::NonNull; -use crate::{ArcArray, Array, ArrayBase, CowRepr, Dimension, OwnedArcRepr, OwnedRepr, RawViewRepr, ViewRepr}; +use crate::{ArcArray, Array, ArrayBase, CowRepr, Device, Dimension, OwnedArcRepr, OwnedRepr, RawViewRepr, ViewRepr}; /// Array representation trait. /// @@ -41,6 +41,11 @@ pub unsafe trait RawData: Sized { #[doc(hidden)] fn _is_pointer_inbounds(&self, ptr: *const Self::Elem) -> bool; + #[doc(hidden)] + fn _device(&self) -> Option { + None + } + private_decl! {} } @@ -330,6 +335,10 @@ unsafe impl RawData for OwnedRepr { self_ptr >= ptr && self_ptr <= end } + fn _device(&self) -> Option { + Some(self.device()) + } + private_impl! {} } diff --git a/src/impl_methods.rs b/src/impl_methods.rs index e9f9a01ec..1faa3436b 100644 --- a/src/impl_methods.rs +++ b/src/impl_methods.rs @@ -2964,6 +2964,12 @@ where f(&*prev, &mut *curr) }); } + + pub fn device(&self) -> Device { + // If a device is returned, use that. Otherwise, it's fairly safe to + // assume that the data is on the host. + self.data._device().unwrap_or(Device::Host) + } } /// Transmute from A to B. @@ -2986,10 +2992,14 @@ type DimMaxOf = >::Output; impl ArrayBase, D> where A: std::fmt::Debug { - pub fn copy_to_device(self, device: Device) -> Option { + // pub fn device(&self) -> Device { + // self.data.device() + // } + + pub fn move_to_device(self, device: Device) -> Option { let dim = self.dim; let strides = self.strides; - let data = self.data.copy_to_device(device)?; + let data = self.data.move_to_device(device)?; let ptr = std::ptr::NonNull::new(data.as_ptr() as *mut A).unwrap(); Some(Self { diff --git a/src/impl_ops.rs b/src/impl_ops.rs index 8d02364d1..9475ebba3 100644 --- a/src/impl_ops.rs +++ b/src/impl_ops.rs @@ -50,10 +50,18 @@ impl ScalarOperand for f64 {} impl ScalarOperand for Complex {} impl ScalarOperand for Complex {} +macro_rules! device_check_assert( + ($self:expr, $rhs:expr) => { + debug_assert_eq!($self.device(), $rhs.device(), + "Cannot perform operation on arrays on different devices. \ + Please move them to the same device first."); + } +); + macro_rules! impl_binary_op( - ($trt:ident, $operator:tt, $mth:ident, $iop:tt, $doc:expr) => ( + ($rs_trait:ident, $operator:tt, $math_op:ident, $inplace_op:tt, $docstring:expr) => ( /// Perform elementwise -#[doc=$doc] +#[doc=$docstring] /// between `self` and `rhs`, /// and return the result. /// @@ -62,9 +70,9 @@ macro_rules! impl_binary_op( /// If their shapes disagree, `self` is broadcast to their broadcast shape. /// /// **Panics** if broadcasting isn’t possible. -impl $trt> for ArrayBase +impl $rs_trait> for ArrayBase where - A: Clone + $trt, + A: Clone + $rs_trait, B: Clone, S: DataOwned + DataMut, S2: Data, @@ -73,14 +81,15 @@ where { type Output = ArrayBase>::Output>; #[track_caller] - fn $mth(self, rhs: ArrayBase) -> Self::Output + fn $math_op(self, rhs: ArrayBase) -> Self::Output { - self.$mth(&rhs) + device_check_assert!(self, rhs); + self.$math_op(&rhs) } } /// Perform elementwise -#[doc=$doc] +#[doc=$docstring] /// between `self` and reference `rhs`, /// and return the result. /// @@ -90,9 +99,9 @@ where /// cloning the data if needed. /// /// **Panics** if broadcasting isn’t possible. -impl<'a, A, B, S, S2, D, E> $trt<&'a ArrayBase> for ArrayBase +impl<'a, A, B, S, S2, D, E> $rs_trait<&'a ArrayBase> for ArrayBase where - A: Clone + $trt, + A: Clone + $rs_trait, B: Clone, S: DataOwned + DataMut, S2: Data, @@ -101,27 +110,29 @@ where { type Output = ArrayBase>::Output>; #[track_caller] - fn $mth(self, rhs: &ArrayBase) -> Self::Output + fn $math_op(self, rhs: &ArrayBase) -> Self::Output { + device_check_assert!(self, rhs); + if self.ndim() == rhs.ndim() && self.shape() == rhs.shape() { let mut out = self.into_dimensionality::<>::Output>().unwrap(); - out.zip_mut_with_same_shape(rhs, clone_iopf(A::$mth)); + out.zip_mut_with_same_shape(rhs, clone_iopf(A::$math_op)); out } else { let (lhs_view, rhs_view) = self.broadcast_with(&rhs).unwrap(); if lhs_view.shape() == self.shape() { let mut out = self.into_dimensionality::<>::Output>().unwrap(); - out.zip_mut_with_same_shape(&rhs_view, clone_iopf(A::$mth)); + out.zip_mut_with_same_shape(&rhs_view, clone_iopf(A::$math_op)); out } else { - Zip::from(&lhs_view).and(&rhs_view).map_collect_owned(clone_opf(A::$mth)) + Zip::from(&lhs_view).and(&rhs_view).map_collect_owned(clone_opf(A::$math_op)) } } } } /// Perform elementwise -#[doc=$doc] +#[doc=$docstring] /// between reference `self` and `rhs`, /// and return the result. /// @@ -131,9 +142,9 @@ where /// cloning the data if needed. /// /// **Panics** if broadcasting isn’t possible. -impl<'a, A, B, S, S2, D, E> $trt> for &'a ArrayBase +impl<'a, A, B, S, S2, D, E> $rs_trait> for &'a ArrayBase where - A: Clone + $trt, + A: Clone + $rs_trait, B: Clone, S: Data, S2: DataOwned + DataMut, @@ -142,28 +153,30 @@ where { type Output = ArrayBase>::Output>; #[track_caller] - fn $mth(self, rhs: ArrayBase) -> Self::Output - where + fn $math_op(self, rhs: ArrayBase) -> Self::Output + // where { + device_check_assert!(self, rhs); + if self.ndim() == rhs.ndim() && self.shape() == rhs.shape() { let mut out = rhs.into_dimensionality::<>::Output>().unwrap(); - out.zip_mut_with_same_shape(self, clone_iopf_rev(A::$mth)); + out.zip_mut_with_same_shape(self, clone_iopf_rev(A::$math_op)); out } else { let (rhs_view, lhs_view) = rhs.broadcast_with(self).unwrap(); if rhs_view.shape() == rhs.shape() { let mut out = rhs.into_dimensionality::<>::Output>().unwrap(); - out.zip_mut_with_same_shape(&lhs_view, clone_iopf_rev(A::$mth)); + out.zip_mut_with_same_shape(&lhs_view, clone_iopf_rev(A::$math_op)); out } else { - Zip::from(&lhs_view).and(&rhs_view).map_collect_owned(clone_opf(A::$mth)) + Zip::from(&lhs_view).and(&rhs_view).map_collect_owned(clone_opf(A::$math_op)) } } } } /// Perform elementwise -#[doc=$doc] +#[doc=$docstring] /// between references `self` and `rhs`, /// and return the result as a new `Array`. /// @@ -171,9 +184,9 @@ where /// cloning the data if needed. /// /// **Panics** if broadcasting isn’t possible. -impl<'a, A, B, S, S2, D, E> $trt<&'a ArrayBase> for &'a ArrayBase +impl<'a, A, B, S, S2, D, E> $rs_trait<&'a ArrayBase> for &'a ArrayBase where - A: Clone + $trt, + A: Clone + $rs_trait, B: Clone, S: Data, S2: Data, @@ -182,7 +195,9 @@ where { type Output = Array>::Output>; #[track_caller] - fn $mth(self, rhs: &'a ArrayBase) -> Self::Output { + fn $math_op(self, rhs: &'a ArrayBase) -> Self::Output { + device_check_assert!(self, rhs); + let (lhs, rhs) = if self.ndim() == rhs.ndim() && self.shape() == rhs.shape() { let lhs = self.view().into_dimensionality::<>::Output>().unwrap(); let rhs = rhs.view().into_dimensionality::<>::Output>().unwrap(); @@ -190,24 +205,24 @@ where } else { self.broadcast_with(rhs).unwrap() }; - Zip::from(lhs).and(rhs).map_collect(clone_opf(A::$mth)) + Zip::from(lhs).and(rhs).map_collect(clone_opf(A::$math_op)) } } /// Perform elementwise -#[doc=$doc] +#[doc=$docstring] /// between `self` and the scalar `x`, /// and return the result (based on `self`). /// /// `self` must be an `Array` or `ArcArray`. -impl $trt for ArrayBase - where A: Clone + $trt, +impl $rs_trait for ArrayBase + where A: Clone + $rs_trait, S: DataOwned + DataMut, D: Dimension, B: ScalarOperand, { type Output = ArrayBase; - fn $mth(mut self, x: B) -> ArrayBase { + fn $math_op(mut self, x: B) -> ArrayBase { self.map_inplace(move |elt| { *elt = elt.clone() $operator x.clone(); }); @@ -216,17 +231,17 @@ impl $trt for ArrayBase } /// Perform elementwise -#[doc=$doc] +#[doc=$docstring] /// between the reference `self` and the scalar `x`, /// and return the result as a new `Array`. -impl<'a, A, S, D, B> $trt for &'a ArrayBase - where A: Clone + $trt, +impl<'a, A, S, D, B> $rs_trait for &'a ArrayBase + where A: Clone + $rs_trait, S: Data, D: Dimension, B: ScalarOperand, { type Output = Array; - fn $mth(self, x: B) -> Self::Output { + fn $math_op(self, x: B) -> Self::Output { self.map(move |elt| elt.clone() $operator x.clone()) } } From 4ac3f969ea1e9efaa4932f0a8d96daff7c4ead9a Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Wed, 27 Mar 2024 13:02:10 +0000 Subject: [PATCH 09/23] oops --- Cargo.toml | 2 +- src/data_repr.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index feec26d33..f940562a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,7 +58,7 @@ rawpointer = { version = "0.2" } [dev-dependencies] defmac = "0.2" quickcheck = { version = "1.0", default-features = false } -approx = "0.5" +approx = "0.4" itertools = { version = "0.12", default-features = false, features = ["use_std"] } [features] diff --git a/src/data_repr.rs b/src/data_repr.rs index 781fc9350..f59d86f78 100644 --- a/src/data_repr.rs +++ b/src/data_repr.rs @@ -53,7 +53,7 @@ impl OwnedRepr { } } - pub(crate) fn device(&self) -> Device { + pub(crate) const fn device(&self) -> Device { self.device } From 00e39558a161117a97c6c8fef1ea87766aff86fd Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Wed, 27 Mar 2024 13:14:40 +0000 Subject: [PATCH 10/23] Apparently strip isn't allowed in 1.57.0 --- Cargo.toml | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f940562a8..4335adfef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,15 +87,7 @@ rayon = ["rayon_", "std"] matrixmultiply-threading = ["matrixmultiply/threading"] [profile.bench] -# debug = true -panic = "abort" -codegen-units = 1 -lto = true -opt-level = 3 -strip = true -debug = false -debug-assertions = false - +debug = true [profile.dev.package.numeric-tests] opt-level = 2 From 06427665a0ff57bce9ce1c326d3fd7815d416de4 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Wed, 27 Mar 2024 13:31:55 +0000 Subject: [PATCH 11/23] Bump MSRV --- .github/workflows/ci.yaml | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c8c6ce1c9..13c9c9e6d 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -55,7 +55,7 @@ jobs: - stable - beta - nightly - - 1.57.0 # MSRV + - 1.60.0 # MSRV name: tests/${{ matrix.rust }} steps: diff --git a/Cargo.toml b/Cargo.toml index 4335adfef..414bbea41 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "ndarray" version = "0.15.6" edition = "2018" -rust-version = "1.57" +rust-version = "1.60" authors = [ "Ulrik Sverdrup \"bluss\"", "Jim Turner", From 1b5d086898b83ac61a60347ae0ddbfd2874af81e Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Wed, 27 Mar 2024 13:38:08 +0000 Subject: [PATCH 12/23] Remove unnecessary unsafe block --- src/data_repr.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/data_repr.rs b/src/data_repr.rs index f59d86f78..ff94a6bf9 100644 --- a/src/data_repr.rs +++ b/src/data_repr.rs @@ -170,10 +170,11 @@ impl OwnedRepr { let ptr = self.ptr.as_ptr(); match self.device { - Device::Host => unsafe { + #[cfg(rust_version < "1.70.0")] + Device::Host => { // println!("Dropping Host pointer"); Vec::from_raw_parts(ptr, len, capacity) - }, + } #[cfg(feature = "opencl")] Device::OpenCL => { From f92fe0d3dfa72e049112d43604ec3a1829a4f925 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Wed, 27 Mar 2024 13:39:13 +0000 Subject: [PATCH 13/23] Fix issue --- src/data_repr.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/data_repr.rs b/src/data_repr.rs index ff94a6bf9..10ed14f39 100644 --- a/src/data_repr.rs +++ b/src/data_repr.rs @@ -170,7 +170,6 @@ impl OwnedRepr { let ptr = self.ptr.as_ptr(); match self.device { - #[cfg(rust_version < "1.70.0")] Device::Host => { // println!("Dropping Host pointer"); Vec::from_raw_parts(ptr, len, capacity) From b04c5330b0af91a02145b63af0fdde879fbdc98b Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Wed, 27 Mar 2024 13:43:35 +0000 Subject: [PATCH 14/23] Why did that not work... Bump MSRV again --- .github/workflows/ci.yaml | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 13c9c9e6d..5a3964c3b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -55,7 +55,7 @@ jobs: - stable - beta - nightly - - 1.60.0 # MSRV + - 1.63.0 # MSRV name: tests/${{ matrix.rust }} steps: diff --git a/Cargo.toml b/Cargo.toml index 414bbea41..879785f70 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "ndarray" version = "0.15.6" edition = "2018" -rust-version = "1.60" +rust-version = "1.63" authors = [ "Ulrik Sverdrup \"bluss\"", "Jim Turner", From 722c4f35921609ea6e619f5303b0aee314ae184a Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Wed, 27 Mar 2024 18:59:48 +0000 Subject: [PATCH 15/23] Update CI and tests. Decrease MSRV. --- .github/workflows/ci.yaml | 7 +++++++ Cargo.toml | 2 +- scripts/all-tests.sh | 9 ++++++--- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 5a3964c3b..9a332713a 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -18,6 +18,7 @@ env: jobs: clippy: runs-on: ubuntu-latest + fail-fast: false strategy: matrix: rust: @@ -34,6 +35,7 @@ jobs: format: runs-on: ubuntu-latest + fail-fast: false strategy: matrix: rust: @@ -49,6 +51,7 @@ jobs: tests: runs-on: ubuntu-latest + fail-fast: false strategy: matrix: rust: @@ -71,6 +74,7 @@ jobs: cross_test: if: ${{ github.event_name == 'merge_group' }} runs-on: ubuntu-latest + fail-fast: false strategy: matrix: include: @@ -94,6 +98,7 @@ jobs: cargo-careful: if: ${{ github.event_name == 'merge_group' }} runs-on: ubuntu-latest + fail-fast: false name: cargo-careful steps: - uses: actions/checkout@v4 @@ -109,6 +114,7 @@ jobs: docs: if: ${{ github.event_name == 'merge_group' }} runs-on: ubuntu-latest + fail-fast: false strategy: matrix: rust: @@ -131,6 +137,7 @@ jobs: - docs if: always() runs-on: ubuntu-latest + fail-fast: false steps: - name: Result run: | diff --git a/Cargo.toml b/Cargo.toml index 879785f70..414bbea41 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "ndarray" version = "0.15.6" edition = "2018" -rust-version = "1.63" +rust-version = "1.60" authors = [ "Ulrik Sverdrup \"bluss\"", "Jim Turner", diff --git a/scripts/all-tests.sh b/scripts/all-tests.sh index cbea6dba7..e0f710fda 100755 --- a/scripts/all-tests.sh +++ b/scripts/all-tests.sh @@ -6,7 +6,7 @@ set -e FEATURES=$1 CHANNEL=$2 -if [ "$CHANNEL" = "1.57.0" ]; then +if [ "$CHANNEL" = "1.60.0" ]; then cargo update --package openblas-src --precise 0.10.5 cargo update --package openblas-build --precise 0.10.5 cargo update --package once_cell --precise 1.14.0 @@ -21,8 +21,11 @@ if [ "$CHANNEL" = "1.57.0" ]; then cargo update --package serde_json --precise 1.0.99 cargo update --package serde --precise 1.0.156 cargo update --package thiserror --precise 1.0.39 - cargo update --package quote --precise 1.0.30 - cargo update --package proc-macro2 --precise 1.0.65 + cargo update --package quote --precise 1.0.35 + cargo update --package proc-macro2 --precise 1.0.79 + cargo update --package regex --precise 1.9.6 + cargo update --package home --precise 0.5.5 + cargo update --package which --precise 4.4.2 fi cargo build --verbose --no-default-features From fdcd653e306b67c63fcc1cf06f0106e542791ce8 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Wed, 27 Mar 2024 19:04:06 +0000 Subject: [PATCH 16/23] Update CI --- .github/workflows/ci.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 9a332713a..b18801ca6 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -20,6 +20,7 @@ jobs: runs-on: ubuntu-latest fail-fast: false strategy: + fail-fast: false matrix: rust: - beta @@ -37,6 +38,7 @@ jobs: runs-on: ubuntu-latest fail-fast: false strategy: + fail-fast: false matrix: rust: - nightly @@ -53,6 +55,7 @@ jobs: runs-on: ubuntu-latest fail-fast: false strategy: + fail-fast: false matrix: rust: - stable @@ -76,6 +79,7 @@ jobs: runs-on: ubuntu-latest fail-fast: false strategy: + fail-fast: false matrix: include: - rust: stable @@ -100,6 +104,8 @@ jobs: runs-on: ubuntu-latest fail-fast: false name: cargo-careful + strategy: + fail-fast: false steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master @@ -116,6 +122,7 @@ jobs: runs-on: ubuntu-latest fail-fast: false strategy: + fail-fast: false matrix: rust: - stable @@ -137,7 +144,6 @@ jobs: - docs if: always() runs-on: ubuntu-latest - fail-fast: false steps: - name: Result run: | From d813d66811af49e9a9b7ca8e59383bed6d85581d Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Wed, 27 Mar 2024 19:05:09 +0000 Subject: [PATCH 17/23] Further update CI --- .github/workflows/ci.yaml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index b18801ca6..6c99577d4 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -18,7 +18,6 @@ env: jobs: clippy: runs-on: ubuntu-latest - fail-fast: false strategy: fail-fast: false matrix: @@ -36,7 +35,6 @@ jobs: format: runs-on: ubuntu-latest - fail-fast: false strategy: fail-fast: false matrix: @@ -53,7 +51,6 @@ jobs: tests: runs-on: ubuntu-latest - fail-fast: false strategy: fail-fast: false matrix: @@ -77,7 +74,6 @@ jobs: cross_test: if: ${{ github.event_name == 'merge_group' }} runs-on: ubuntu-latest - fail-fast: false strategy: fail-fast: false matrix: @@ -102,7 +98,6 @@ jobs: cargo-careful: if: ${{ github.event_name == 'merge_group' }} runs-on: ubuntu-latest - fail-fast: false name: cargo-careful strategy: fail-fast: false @@ -120,7 +115,6 @@ jobs: docs: if: ${{ github.event_name == 'merge_group' }} runs-on: ubuntu-latest - fail-fast: false strategy: fail-fast: false matrix: @@ -143,6 +137,8 @@ jobs: - cargo-careful - docs if: always() + strategy: + fail-fast: false runs-on: ubuntu-latest steps: - name: Result From 2253f78a56d19c5197332a554e6ba5e15ce40b77 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Wed, 27 Mar 2024 19:10:41 +0000 Subject: [PATCH 18/23] Version matching is hard --- .github/workflows/ci.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 6c99577d4..23dde6e0d 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -58,7 +58,7 @@ jobs: - stable - beta - nightly - - 1.63.0 # MSRV + - 1.60.0 # MSRV name: tests/${{ matrix.rust }} steps: From ed26986492f58e3ca360fb69a86695a11e774cb0 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Wed, 27 Mar 2024 19:53:50 +0000 Subject: [PATCH 19/23] Again, change MSRV --- .github/workflows/ci.yaml | 2 +- Cargo.toml | 2 +- scripts/all-tests.sh | 24 +++++++++++++++++++++++- 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 23dde6e0d..6c99577d4 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -58,7 +58,7 @@ jobs: - stable - beta - nightly - - 1.60.0 # MSRV + - 1.63.0 # MSRV name: tests/${{ matrix.rust }} steps: diff --git a/Cargo.toml b/Cargo.toml index 414bbea41..879785f70 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "ndarray" version = "0.15.6" edition = "2018" -rust-version = "1.60" +rust-version = "1.63" authors = [ "Ulrik Sverdrup \"bluss\"", "Jim Turner", diff --git a/scripts/all-tests.sh b/scripts/all-tests.sh index e0f710fda..569f84c36 100755 --- a/scripts/all-tests.sh +++ b/scripts/all-tests.sh @@ -6,7 +6,7 @@ set -e FEATURES=$1 CHANNEL=$2 -if [ "$CHANNEL" = "1.60.0" ]; then +if [ "$CHANNEL" = "1.63.0" ]; then cargo update --package openblas-src --precise 0.10.5 cargo update --package openblas-build --precise 0.10.5 cargo update --package once_cell --precise 1.14.0 @@ -26,19 +26,41 @@ if [ "$CHANNEL" = "1.60.0" ]; then cargo update --package regex --precise 1.9.6 cargo update --package home --precise 0.5.5 cargo update --package which --precise 4.4.2 + cargo update --package rustix --precise 0.38.23 + cargo update --package memchr --precise 2.6.2 fi cargo build --verbose --no-default-features + # Testing both dev and release profiles helps find bugs, especially in low level code cargo test --verbose --no-default-features cargo test --release --verbose --no-default-features + cargo build --verbose --features "$FEATURES" cargo test --verbose --features "$FEATURES" + +cargo build --release --verbose --features "$FEATURES" +cargo test --release --verbose --features "$FEATURES" + cargo test --manifest-path=ndarray-rand/Cargo.toml --no-default-features --verbose +cargo test --release --manifest-path=ndarray-rand/Cargo.toml --no-default-features --verbose + cargo test --manifest-path=ndarray-rand/Cargo.toml --features quickcheck --verbose +cargo test --release --manifest-path=ndarray-rand/Cargo.toml --features quickcheck --verbose + cargo test --manifest-path=xtest-serialization/Cargo.toml --verbose +cargo test --release --manifest-path=xtest-serialization/Cargo.toml --verbose + cargo test --manifest-path=xtest-blas/Cargo.toml --verbose --features openblas-system +cargo test --release --manifest-path=xtest-blas/Cargo.toml --verbose --features openblas-system + cargo test --examples +cargo test --release --examples + cargo test --manifest-path=xtest-numeric/Cargo.toml --verbose +cargo test --release --manifest-path=xtest-numeric/Cargo.toml --verbose + cargo test --manifest-path=xtest-numeric/Cargo.toml --verbose --features test_blas +cargo test --release --manifest-path=xtest-numeric/Cargo.toml --verbose --features test_blas + ([ "$CHANNEL" != "nightly" ] || cargo bench --no-run --verbose --features "$FEATURES") From c4541f58c90f04952715063b020cace93d7ec869 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Sat, 30 Mar 2024 05:01:58 +0000 Subject: [PATCH 20/23] First OpenCL kernels! --- Cargo.toml | 3 +- src/arrayformat.rs | 3 +- src/data_repr.rs | 9 +++ src/data_traits.rs | 20 ++++--- src/impl_constructors.rs | 9 +++ src/impl_methods.rs | 6 +- src/impl_ops.rs | 126 ++++++++++++++++++++++++++++++++++----- src/layout/mod.rs | 14 +++++ src/lib.rs | 3 + src/opencl.rs | 44 ++++++++++++++ 10 files changed, 212 insertions(+), 25 deletions(-) create mode 100644 src/opencl.rs diff --git a/Cargo.toml b/Cargo.toml index 879785f70..ed35fc83a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,8 @@ num-complex = { version = "0.4", default-features = false } # Use via the `opencl` crate feature! hasty_ = { version = "0.2", optional = true, package = "hasty", default-features = false } -# hasty_ = { path = "../../hasty_dev/hasty", optional = true, package = "hasty", default-features = false } +#hasty_ = { path = "../../hasty_dev/hasty", optional = true, package = "hasty", default-features = false } +#once_cell_ = { version = "1.19", optional = true, package = "once_cell" } # Use via the `rayon` crate feature! rayon_ = { version = "1.0.3", optional = true, package = "rayon" } diff --git a/src/arrayformat.rs b/src/arrayformat.rs index b71bb4509..9c4c0fc82 100644 --- a/src/arrayformat.rs +++ b/src/arrayformat.rs @@ -5,7 +5,7 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::{ArrayBase, ArrayView, Axis, Data, Dimension, NdProducer}; +use super::{ArrayBase, ArrayView, Axis, Data, Device, Dimension, NdProducer}; use crate::aliases::{Ix1, IxDyn}; use alloc::format; use std::fmt; @@ -116,6 +116,7 @@ where { // Cast into a dynamically dimensioned view // This is required to be able to use `index_axis` for the recursive case + assert_eq!(array.device(), Device::Host, "Cannot print an array that is not on the Host."); format_array_inner(array.view().into_dyn(), f, format, fmt_opt, 0, array.ndim()) } diff --git a/src/data_repr.rs b/src/data_repr.rs index 10ed14f39..235ac3276 100644 --- a/src/data_repr.rs +++ b/src/data_repr.rs @@ -57,6 +57,15 @@ impl OwnedRepr { self.device } + pub(crate) const unsafe fn from_components(ptr: NonNull, len: usize, capacity: usize, device: Device) -> Self { + Self { + ptr, + len, + capacity, + device, + } + } + /// Move this storage object to a specified device. #[allow(clippy::unnecessary_wraps)] pub(crate) fn move_to_device(self, device: Device) -> Option { diff --git a/src/data_traits.rs b/src/data_traits.rs index b06163081..4f5c369da 100644 --- a/src/data_traits.rs +++ b/src/data_traits.rs @@ -8,15 +8,15 @@ //! The data (inner representation) traits for ndarray -use rawpointer::PointerExt; - use alloc::sync::Arc; #[cfg(not(feature = "std"))] use alloc::vec::Vec; -use std::mem::MaybeUninit; use std::mem::{self, size_of}; +use std::mem::MaybeUninit; use std::ptr::NonNull; +use rawpointer::PointerExt; + use crate::{ArcArray, Array, ArrayBase, CowRepr, Device, Dimension, OwnedArcRepr, OwnedRepr, RawViewRepr, ViewRepr}; /// Array representation trait. @@ -329,10 +329,16 @@ unsafe impl RawData for OwnedRepr { } fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool { - let slc = self.as_slice(); - let ptr = slc.as_ptr() as *mut A; - let end = unsafe { ptr.add(slc.len()) }; - self_ptr >= ptr && self_ptr <= end + // let slc = self.as_slice(); + // let ptr = slc.as_ptr() as *mut A; + // let end = unsafe { ptr.add(slc.len()) }; + // self_ptr >= ptr && self_ptr <= end + + // Instead of using a slice, we just get the raw pointer. This assumes that `self.len()` + // is correct, but since this is internally managed, it's safe to assume it is + let ptr = self.as_ptr(); + let end = unsafe { ptr.add(self.len()) }; + ptr <= self_ptr && self_ptr <= end } fn _device(&self) -> Option { diff --git a/src/impl_constructors.rs b/src/impl_constructors.rs index 4229666b3..d01ada8fa 100644 --- a/src/impl_constructors.rs +++ b/src/impl_constructors.rs @@ -614,6 +614,15 @@ where array } + pub(crate) const unsafe fn from_parts(data: S, ptr: std::ptr::NonNull, dim: D, strides: D) -> Self { + Self { + data, + ptr, + dim, + strides, + } + } + #[deprecated( note = "This method is hard to use correctly. Use `uninit` instead.", since = "0.15.0" diff --git a/src/impl_methods.rs b/src/impl_methods.rs index 1faa3436b..9669effc9 100644 --- a/src/impl_methods.rs +++ b/src/impl_methods.rs @@ -117,6 +117,10 @@ where self.dim.clone() } + pub fn raw_strides(&self) -> D { + self.strides.clone() + } + /// Return the shape of the array as a slice. /// /// Note that you probably don't want to use this to create an array of the @@ -2990,7 +2994,7 @@ unsafe fn unlimited_transmute(data: A) -> B { type DimMaxOf = >::Output; impl ArrayBase, D> -where A: std::fmt::Debug +// where A: std::fmt::Debug { // pub fn device(&self) -> Device { // self.data.device() diff --git a/src/impl_ops.rs b/src/impl_ops.rs index 9475ebba3..5f04bf0bf 100644 --- a/src/impl_ops.rs +++ b/src/impl_ops.rs @@ -6,8 +6,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::mem::ManuallyDrop; use crate::dimension::DimMax; +use crate::Device; use crate::Zip; +use crate::Layout; +use crate::OwnedRepr; use num_complex::Complex; /// Elements that can be used as direct operands in arithmetic with arrays. @@ -58,6 +62,16 @@ macro_rules! device_check_assert( } ); +// Pick the expression $a for commutative and $b for ordered binop +macro_rules! if_commutative { + (Commute { $a:expr } or { $b:expr }) => { + $a + }; + (Ordered { $a:expr } or { $b:expr }) => { + $b + }; +} + macro_rules! impl_binary_op( ($rs_trait:ident, $operator:tt, $math_op:ident, $inplace_op:tt, $docstring:expr) => ( /// Perform elementwise @@ -186,7 +200,7 @@ where /// **Panics** if broadcasting isn’t possible. impl<'a, A, B, S, S2, D, E> $rs_trait<&'a ArrayBase> for &'a ArrayBase where - A: Clone + $rs_trait, + A: Clone + $rs_trait + std::fmt::Debug, B: Clone, S: Data, S2: Data, @@ -205,7 +219,97 @@ where } else { self.broadcast_with(rhs).unwrap() }; - Zip::from(lhs).and(rhs).map_collect(clone_opf(A::$math_op)) + + match self.device() { + Device::Host => { + Zip::from(lhs).and(rhs).map_collect(clone_opf(A::$math_op)) + } + + #[cfg(feature = "opencl")] + Device::OpenCL => { + if lhs.raw_dim().ndim() == 0 && rhs.raw_dim().ndim() == 0 { + // println!("Scalar"); + todo!(); + } else if lhs.layout_impl().is(Layout::CORDER | Layout::FORDER) && + rhs.layout_impl().is(Layout::CORDER | Layout::FORDER) && + lhs.layout_impl().matches(rhs.layout_impl()) { + // println!("Contiguous"); + + static mut KERNEL_BUILT: bool = false; // todo: fix monomorphization issue + + let typename = match crate::opencl::rust_type_to_c_name::() { + Some(x) => x, + None => panic!("The Rust type {} is not supported by the \ + OpenCL backend", std::any::type_name::()) + }; + + let kernel_name = format!("binary_op_{}_{}", stringify!($math_op), typename); + + #[cold] + if unsafe { !KERNEL_BUILT } { + let kernel = crate::opencl::gen_contiguous_linear_kernel_3( + &kernel_name, + typename, + stringify!($operator)); + + unsafe { + hasty_::opencl::opencl_add_kernel(&kernel); + KERNEL_BUILT = true; + } + } + + unsafe { + let elements = self.len(); + let self_ptr = self.as_ptr() as *mut std::ffi::c_void; + let other_ptr = rhs.as_ptr() as *mut std::ffi::c_void; + let res_ptr = match hasty_::opencl::opencl_allocate( + elements * std::mem::size_of::(), + hasty_::opencl::OpenCLMemoryType::ReadWrite + ) { + Ok(buf) => buf, + Err(e) => panic!("Failed to allocate OpenCL buffer. Exited with: {:?}", e) + }; + + match hasty_::opencl::opencl_run_contiguous_linear_kernel_3( + &kernel_name, + elements, + self_ptr, + other_ptr, + res_ptr, + ) { + Ok(()) => { + use std::ptr::NonNull; + + let ptr = NonNull::new(res_ptr as *mut A).unwrap(); + let data = OwnedRepr::::from_components( + ptr, + self.len(), + self.len(), + self.device(), + ); + + Self::Output::from_parts( + data, + ptr, + >::Output::from_dimension(&self.raw_dim()).unwrap(), + >::Output::from_dimension(&self.raw_strides()).unwrap(), + ) + } + Err(e) => panic!("Failed to run OpenCL kernel '{}'. \ + Exited with code: {:?}", kernel_name, e), + } + } + } else { + println!("Strided"); + todo!(); + } + } + + #[cfg(feature = "cuda")] + Device::CUDA => { + todo!(); + } + } } } @@ -248,16 +352,6 @@ impl<'a, A, S, D, B> $rs_trait for &'a ArrayBase ); ); -// Pick the expression $a for commutative and $b for ordered binop -macro_rules! if_commutative { - (Commute { $a:expr } or { $b:expr }) => { - $a - }; - (Ordered { $a:expr } or { $b:expr }) => { - $b - }; -} - macro_rules! impl_scalar_lhs_op { // $commutative flag. Reuse the self + scalar impl if we can. // We can do this safely since these are the primitive numeric types @@ -304,10 +398,11 @@ impl<'a, S, D> $trt<&'a ArrayBase> for $scalar } mod arithmetic_ops { - use super::*; + use std::ops::*; + use crate::imp_prelude::*; - use std::ops::*; + use super::*; fn clone_opf(f: impl Fn(A, B) -> C) -> impl FnMut(&A, &B) -> C { move |x, y| f(x.clone(), y.clone()) @@ -447,9 +542,10 @@ mod arithmetic_ops { } mod assign_ops { - use super::*; use crate::imp_prelude::*; + use super::*; + macro_rules! impl_assign_op { ($trt:ident, $method:ident, $doc:expr) => { use std::ops::$trt; diff --git a/src/layout/mod.rs b/src/layout/mod.rs index d3c9e5fcb..9477d29c2 100644 --- a/src/layout/mod.rs +++ b/src/layout/mod.rs @@ -68,6 +68,20 @@ impl Layout { (self.is(Layout::CORDER) as i32 - self.is(Layout::FORDER) as i32) + (self.is(Layout::CPREFER) as i32 - self.is(Layout::FPREFER) as i32) } + + /// Return true if the layout order of `self` matches the layout order of `other` + /// + /// **Note**: We ignore the preference bits + #[inline(always)] + pub(crate) fn matches(self, other: Self) -> bool { + self.0 & (0b11) == other.0 & (0b11) + } + + /// Return true if this layout exactly matches the other layout + #[inline(always)] + pub(crate) fn matches_exact(self, other: Self) -> bool { + self.0 == other.0 + } } #[cfg(test)] diff --git a/src/lib.rs b/src/lib.rs index 5490dd52a..e24bcc3a3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1602,6 +1602,9 @@ pub(crate) fn is_aligned(ptr: *const T) -> bool { (ptr as usize) % ::std::mem::align_of::() == 0 } +#[cfg(feature = "opencl")] +mod opencl; + pub fn configure() { #[cfg(feature = "opencl")] unsafe { diff --git a/src/opencl.rs b/src/opencl.rs new file mode 100644 index 000000000..f9b127457 --- /dev/null +++ b/src/opencl.rs @@ -0,0 +1,44 @@ +pub(crate) fn rust_type_to_c_name() -> Option<&'static str> { + match std::any::type_name::() { + "f32" => Some("float"), + "f64" => Some("double"), + "i8" => Some("int8_t"), + "i16" => Some("int16_t"), + "i32" => Some("int32_t"), + "i64" => Some("int64_t"), + "u8" => Some("uint8_t"), + "u16" => Some("uint16_t"), + "u32" => Some("uint32_t"), + "u64" | "usize" => Some("uint64_t"), + _ => None, + } +} + +pub(crate) fn gen_contiguous_linear_kernel_3(kernel_name: &str, typename: &str, op: &str) -> String { + format!( + r#" + #ifndef NDARRAY_INCLUDE_STDINT + #define NDARRAY_INCLUDE_STDINT + + // We should probably verify that these are, in fact, correct + typedef char int8_t; + typedef short int16_t; + typedef int int32_t; + typedef long int64_t; + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; + typedef unsigned long uint64_t; + #endif // NDARRAY_INCLUDE_STDINT + + __kernel void {kernel_name}(__global const {typename} *a, __global const {typename} *b, __global {typename} *c) {{ + // Get id as 64-bit integer to avoid overflow + uint64_t i = get_global_id(0); + c[i] = a[i] {op} b[i]; + }} + "#, + kernel_name = kernel_name, + typename = typename, + op = op, + ) +} From 77bfad65a87144d3de2ad00fd08fc313676df6b0 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Sat, 30 Mar 2024 13:40:31 +0000 Subject: [PATCH 21/23] Clean up some code and update CI --- .github/workflows/ci.yaml | 2 +- src/impl_ops.rs | 8 ++++---- src/layout/mod.rs | 10 +++++----- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 6c99577d4..7dfc3bdc1 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -13,7 +13,7 @@ env: CARGO_TERM_COLOR: always HOST: x86_64-unknown-linux-gnu FEATURES: "test docs" - RUSTFLAGS: "-D warnings" + # RUSTFLAGS: "-D warnings" # For now, we don't mind a couple warnings jobs: clippy: diff --git a/src/impl_ops.rs b/src/impl_ops.rs index 5f04bf0bf..2e2228f31 100644 --- a/src/impl_ops.rs +++ b/src/impl_ops.rs @@ -6,12 +6,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::mem::ManuallyDrop; use crate::dimension::DimMax; use crate::Device; -use crate::Zip; use crate::Layout; use crate::OwnedRepr; +use crate::Zip; use num_complex::Complex; /// Elements that can be used as direct operands in arithmetic with arrays. @@ -245,7 +244,6 @@ where let kernel_name = format!("binary_op_{}_{}", stringify!($math_op), typename); - #[cold] if unsafe { !KERNEL_BUILT } { let kernel = crate::opencl::gen_contiguous_linear_kernel_3( &kernel_name, @@ -253,7 +251,9 @@ where stringify!($operator)); unsafe { - hasty_::opencl::opencl_add_kernel(&kernel); + if let Err(e) = hasty_::opencl::opencl_add_kernel(&kernel) { + panic!("Failed to add OpenCL kernel. Errored with code {:?}", e); + } KERNEL_BUILT = true; } } diff --git a/src/layout/mod.rs b/src/layout/mod.rs index 9477d29c2..291c9d4ca 100644 --- a/src/layout/mod.rs +++ b/src/layout/mod.rs @@ -77,11 +77,11 @@ impl Layout { self.0 & (0b11) == other.0 & (0b11) } - /// Return true if this layout exactly matches the other layout - #[inline(always)] - pub(crate) fn matches_exact(self, other: Self) -> bool { - self.0 == other.0 - } + // /// Return true if this layout exactly matches the other layout + // #[inline(always)] + // pub(crate) fn matches_exact(self, other: Self) -> bool { + // self.0 == other.0 + // } } #[cfg(test)] From 48c5ee2901f29e264fccddbabae28866ffc8edb6 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Sat, 30 Mar 2024 13:42:01 +0000 Subject: [PATCH 22/23] Fix include order --- src/data_traits.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/data_traits.rs b/src/data_traits.rs index 4f5c369da..fb786a604 100644 --- a/src/data_traits.rs +++ b/src/data_traits.rs @@ -11,8 +11,8 @@ use alloc::sync::Arc; #[cfg(not(feature = "std"))] use alloc::vec::Vec; -use std::mem::{self, size_of}; use std::mem::MaybeUninit; +use std::mem::{self, size_of}; use std::ptr::NonNull; use rawpointer::PointerExt; From 96aba357bbf9024f32d01c337e44ff338f482e22 Mon Sep 17 00:00:00 2001 From: Toby Davis Date: Sat, 30 Mar 2024 18:11:11 +0000 Subject: [PATCH 23/23] Revert formatting changes :( --- benches/append.rs | 9 +- benches/bench1.rs | 303 +++++++++++------ benches/chunks.rs | 15 +- benches/construct.rs | 12 +- benches/gemv_gemm.rs | 18 +- benches/higher-order.rs | 24 +- benches/iter.rs | 105 ++++-- benches/numeric.rs | 3 +- benches/par_rayon.rs | 51 ++- benches/to_shape.rs | 33 +- benches/zip.rs | 33 +- examples/axis_ops.rs | 3 +- examples/bounds_check_elim.rs | 21 +- examples/column_standardize.rs | 3 +- examples/convo.rs | 6 +- examples/life.rs | 15 +- examples/rollaxis.rs | 3 +- examples/sort-axis.rs | 36 +- examples/type_conversion.rs | 3 +- examples/zip_many.rs | 3 +- ndarray-rand/benches/bench.rs | 9 +- ndarray-rand/src/lib.rs | 21 +- ndarray-rand/tests/tests.rs | 24 +- rustfmt.toml | 2 +- src/aliases.rs | 24 +- src/argument_traits.rs | 27 +- src/array_approx.rs | 3 +- src/array_serde.rs | 69 ++-- src/arrayformat.rs | 81 +++-- src/arraytraits.rs | 84 +++-- src/data_repr.rs | 75 ++-- src/data_traits.rs | 246 +++++++++----- src/dimension/axes.rs | 45 ++- src/dimension/axis.rs | 6 +- src/dimension/broadcast.rs | 12 +- src/dimension/conversion.rs | 27 +- src/dimension/dim.rs | 24 +- src/dimension/dimension_trait.rs | 282 ++++++++++----- src/dimension/dynindeximpl.rs | 117 ++++--- src/dimension/mod.rs | 138 +++++--- src/dimension/ndindex.rs | 114 ++++--- src/dimension/ops.rs | 9 +- src/dimension/remove_axis.rs | 15 +- src/dimension/reshape.rs | 3 +- src/dimension/sequence.rs | 33 +- src/error.rs | 42 ++- src/extension/nonnull.rs | 6 +- src/free_functions.rs | 48 ++- src/geomspace.rs | 36 +- src/impl_1d.rs | 3 +- src/impl_2d.rs | 21 +- src/impl_clone.rs | 9 +- src/impl_constructors.rs | 39 ++- src/impl_cow.rs | 18 +- src/impl_dyn.rs | 6 +- src/impl_internal_constructors.rs | 6 +- src/impl_methods.rs | 258 +++++++++----- src/impl_ops.rs | 27 +- src/impl_owned_array.rs | 54 ++- src/impl_raw_views.rs | 48 ++- src/impl_special_element_types.rs | 3 +- src/impl_views/constructors.rs | 33 +- src/impl_views/conversions.rs | 78 +++-- src/impl_views/indexing.rs | 21 +- src/impl_views/splitting.rs | 15 +- src/indexes.rs | 78 +++-- src/iterators/chunks.rs | 30 +- src/iterators/into_iter.rs | 30 +- src/iterators/lanes.rs | 24 +- src/iterators/mod.rs | 453 ++++++++++++++++--------- src/iterators/windows.rs | 18 +- src/itertools.rs | 3 +- src/layout/layoutfmt.rs | 6 +- src/layout/mod.rs | 54 ++- src/lib.rs | 54 ++- src/linalg/impl_linalg.rs | 69 ++-- src/linspace.rs | 18 +- src/logspace.rs | 27 +- src/low_level_util.rs | 12 +- src/math_cell.rs | 57 ++-- src/numeric/impl_float_maths.rs | 6 +- src/numeric/impl_numeric.rs | 18 +- src/numeric_util.rs | 6 +- src/opencl.rs | 6 +- src/order.rs | 21 +- src/parallel/impl_par_methods.rs | 3 +- src/parallel/into_impls.rs | 12 +- src/parallel/mod.rs | 3 +- src/parallel/par.rs | 21 +- src/parallel/send_producer.rs | 48 ++- src/partial.rs | 27 +- src/shape_builder.rs | 78 +++-- src/slice.rs | 132 ++++--- src/split_at.rs | 18 +- src/zip/mod.rs | 60 ++-- src/zip/ndproducer.rs | 189 +++++++---- tests/append.rs | 54 ++- tests/array-construct.rs | 90 +++-- tests/array.rs | 447 ++++++++++++++++-------- tests/assign.rs | 51 ++- tests/azip.rs | 84 +++-- tests/broadcast.rs | 15 +- tests/clone.rs | 3 +- tests/complex.rs | 6 +- tests/dimension.rs | 48 ++- tests/format.rs | 6 +- tests/higher_order_f.rs | 3 +- tests/indices.rs | 3 +- tests/into-ixdyn.rs | 6 +- tests/iterator_chunks.rs | 18 +- tests/iterators.rs | 162 ++++++--- tests/ix0.rs | 12 +- tests/ixdyn.rs | 33 +- tests/numeric.rs | 69 ++-- tests/oper.rs | 93 +++-- tests/par_azip.rs | 15 +- tests/par_rayon.rs | 18 +- tests/par_zip.rs | 24 +- tests/raw_views.rs | 30 +- tests/reshape.rs | 45 ++- tests/s.rs | 3 +- tests/stacking.rs | 6 +- tests/views.rs | 3 +- tests/windows.rs | 57 ++-- tests/zst.rs | 6 +- xtest-blas/tests/oper.rs | 60 ++-- xtest-numeric/tests/accuracy.rs | 33 +- xtest-serialization/tests/serialize.rs | 15 +- 128 files changed, 4038 insertions(+), 2021 deletions(-) diff --git a/benches/append.rs b/benches/append.rs index b9ca99c62..a37df256f 100644 --- a/benches/append.rs +++ b/benches/append.rs @@ -6,21 +6,24 @@ use test::Bencher; use ndarray::prelude::*; #[bench] -fn select_axis0(bench: &mut Bencher) { +fn select_axis0(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let selectable = vec![0, 1, 2, 0, 1, 3, 0, 4, 16, 32, 128, 147, 149, 220, 221, 255, 221, 0, 1]; bench.iter(|| a.select(Axis(0), &selectable)); } #[bench] -fn select_axis1(bench: &mut Bencher) { +fn select_axis1(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let selectable = vec![0, 1, 2, 0, 1, 3, 0, 4, 16, 32, 128, 147, 149, 220, 221, 255, 221, 0, 1]; bench.iter(|| a.select(Axis(1), &selectable)); } #[bench] -fn select_1d(bench: &mut Bencher) { +fn select_1d(bench: &mut Bencher) +{ let a = Array::::zeros(1024); let mut selectable = (0..a.len()).step_by(17).collect::>(); selectable.extend(selectable.clone().iter().rev()); diff --git a/benches/bench1.rs b/benches/bench1.rs index fb7f799d6..33185844a 100644 --- a/benches/bench1.rs +++ b/benches/bench1.rs @@ -16,7 +16,8 @@ use ndarray::{Ix1, Ix2, Ix3, Ix5, IxDyn}; use test::black_box; #[bench] -fn iter_sum_1d_regular(bench: &mut test::Bencher) { +fn iter_sum_1d_regular(bench: &mut test::Bencher) +{ let a = Array::::zeros(64 * 64); let a = black_box(a); bench.iter(|| { @@ -29,7 +30,8 @@ fn iter_sum_1d_regular(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_1d_raw(bench: &mut test::Bencher) { +fn iter_sum_1d_raw(bench: &mut test::Bencher) +{ // this is autovectorized to death (= great performance) let a = Array::::zeros(64 * 64); let a = black_box(a); @@ -43,7 +45,8 @@ fn iter_sum_1d_raw(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_regular(bench: &mut test::Bencher) { +fn iter_sum_2d_regular(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let a = black_box(a); bench.iter(|| { @@ -56,7 +59,8 @@ fn iter_sum_2d_regular(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_by_row(bench: &mut test::Bencher) { +fn iter_sum_2d_by_row(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let a = black_box(a); bench.iter(|| { @@ -71,7 +75,8 @@ fn iter_sum_2d_by_row(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_raw(bench: &mut test::Bencher) { +fn iter_sum_2d_raw(bench: &mut test::Bencher) +{ // this is autovectorized to death (= great performance) let a = Array::::zeros((64, 64)); let a = black_box(a); @@ -85,7 +90,8 @@ fn iter_sum_2d_raw(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_cutout(bench: &mut test::Bencher) { +fn iter_sum_2d_cutout(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -99,7 +105,8 @@ fn iter_sum_2d_cutout(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_cutout_by_row(bench: &mut test::Bencher) { +fn iter_sum_2d_cutout_by_row(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -115,7 +122,8 @@ fn iter_sum_2d_cutout_by_row(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_cutout_outer_iter(bench: &mut test::Bencher) { +fn iter_sum_2d_cutout_outer_iter(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -131,7 +139,8 @@ fn iter_sum_2d_cutout_outer_iter(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_transpose_regular(bench: &mut test::Bencher) { +fn iter_sum_2d_transpose_regular(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((64, 64)); a.swap_axes(0, 1); let a = black_box(a); @@ -145,7 +154,8 @@ fn iter_sum_2d_transpose_regular(bench: &mut test::Bencher) { } #[bench] -fn iter_sum_2d_transpose_by_row(bench: &mut test::Bencher) { +fn iter_sum_2d_transpose_by_row(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((64, 64)); a.swap_axes(0, 1); let a = black_box(a); @@ -161,14 +171,16 @@ fn iter_sum_2d_transpose_by_row(bench: &mut test::Bencher) { } #[bench] -fn sum_2d_regular(bench: &mut test::Bencher) { +fn sum_2d_regular(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let a = black_box(a); bench.iter(|| a.sum()); } #[bench] -fn sum_2d_cutout(bench: &mut test::Bencher) { +fn sum_2d_cutout(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -176,14 +188,16 @@ fn sum_2d_cutout(bench: &mut test::Bencher) { } #[bench] -fn sum_2d_float(bench: &mut test::Bencher) { +fn sum_2d_float(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let a = black_box(a.view()); bench.iter(|| a.sum()); } #[bench] -fn sum_2d_float_cutout(bench: &mut test::Bencher) { +fn sum_2d_float_cutout(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -191,7 +205,8 @@ fn sum_2d_float_cutout(bench: &mut test::Bencher) { } #[bench] -fn sum_2d_float_t_cutout(bench: &mut test::Bencher) { +fn sum_2d_float_t_cutout(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]).reversed_axes(); let a = black_box(av); @@ -199,13 +214,15 @@ fn sum_2d_float_t_cutout(bench: &mut test::Bencher) { } #[bench] -fn fold_sum_i32_2d_regular(bench: &mut test::Bencher) { +fn fold_sum_i32_2d_regular(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); bench.iter(|| a.fold(0, |acc, &x| acc + x)); } #[bench] -fn fold_sum_i32_2d_cutout(bench: &mut test::Bencher) { +fn fold_sum_i32_2d_cutout(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = black_box(av); @@ -213,7 +230,8 @@ fn fold_sum_i32_2d_cutout(bench: &mut test::Bencher) { } #[bench] -fn fold_sum_i32_2d_stride(bench: &mut test::Bencher) { +fn fold_sum_i32_2d_stride(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 128)); let av = a.slice(s![.., ..;2]); let a = black_box(av); @@ -221,14 +239,16 @@ fn fold_sum_i32_2d_stride(bench: &mut test::Bencher) { } #[bench] -fn fold_sum_i32_2d_transpose(bench: &mut test::Bencher) { +fn fold_sum_i32_2d_transpose(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let a = a.t(); bench.iter(|| a.fold(0, |acc, &x| acc + x)); } #[bench] -fn fold_sum_i32_2d_cutout_transpose(bench: &mut test::Bencher) { +fn fold_sum_i32_2d_cutout_transpose(bench: &mut test::Bencher) +{ let a = Array::::zeros((66, 66)); let mut av = a.slice(s![1..-1, 1..-1]); av.swap_axes(0, 1); @@ -239,7 +259,8 @@ fn fold_sum_i32_2d_cutout_transpose(bench: &mut test::Bencher) { const ADD2DSZ: usize = 64; #[bench] -fn add_2d_regular(bench: &mut test::Bencher) { +fn add_2d_regular(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = b.view(); @@ -249,7 +270,8 @@ fn add_2d_regular(bench: &mut test::Bencher) { } #[bench] -fn add_2d_zip(bench: &mut test::Bencher) { +fn add_2d_zip(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| { @@ -258,14 +280,16 @@ fn add_2d_zip(bench: &mut test::Bencher) { } #[bench] -fn add_2d_alloc_plus(bench: &mut test::Bencher) { +fn add_2d_alloc_plus(bench: &mut test::Bencher) +{ let a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| &a + &b); } #[bench] -fn add_2d_alloc_zip_uninit(bench: &mut test::Bencher) { +fn add_2d_alloc_zip_uninit(bench: &mut test::Bencher) +{ let a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| unsafe { @@ -278,38 +302,44 @@ fn add_2d_alloc_zip_uninit(bench: &mut test::Bencher) { } #[bench] -fn add_2d_alloc_zip_collect(bench: &mut test::Bencher) { +fn add_2d_alloc_zip_collect(bench: &mut test::Bencher) +{ let a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| Zip::from(&a).and(&b).map_collect(|&x, &y| x + y)); } #[bench] -fn vec_string_collect(bench: &mut test::Bencher) { +fn vec_string_collect(bench: &mut test::Bencher) +{ let v = vec![""; 10240]; bench.iter(|| v.iter().map(|s| s.to_owned()).collect::>()); } #[bench] -fn array_string_collect(bench: &mut test::Bencher) { +fn array_string_collect(bench: &mut test::Bencher) +{ let v = Array::from(vec![""; 10240]); bench.iter(|| Zip::from(&v).map_collect(|s| s.to_owned())); } #[bench] -fn vec_f64_collect(bench: &mut test::Bencher) { +fn vec_f64_collect(bench: &mut test::Bencher) +{ let v = vec![1.; 10240]; bench.iter(|| v.iter().map(|s| s + 1.).collect::>()); } #[bench] -fn array_f64_collect(bench: &mut test::Bencher) { +fn array_f64_collect(bench: &mut test::Bencher) +{ let v = Array::from(vec![1.; 10240]); bench.iter(|| Zip::from(&v).map_collect(|s| s + 1.)); } #[bench] -fn add_2d_assign_ops(bench: &mut test::Bencher) { +fn add_2d_assign_ops(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = b.view(); @@ -321,7 +351,8 @@ fn add_2d_assign_ops(bench: &mut test::Bencher) { } #[bench] -fn add_2d_cutout(bench: &mut test::Bencher) { +fn add_2d_cutout(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ + 2, ADD2DSZ + 2)); let mut acut = a.slice_mut(s![1..-1, 1..-1]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -332,7 +363,8 @@ fn add_2d_cutout(bench: &mut test::Bencher) { } #[bench] -fn add_2d_zip_cutout(bench: &mut test::Bencher) { +fn add_2d_zip_cutout(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ + 2, ADD2DSZ + 2)); let mut acut = a.slice_mut(s![1..-1, 1..-1]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -343,7 +375,8 @@ fn add_2d_zip_cutout(bench: &mut test::Bencher) { #[bench] #[allow(clippy::identity_op)] -fn add_2d_cutouts_by_4(bench: &mut test::Bencher) { +fn add_2d_cutouts_by_4(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((64 * 1, 64 * 1)); let b = Array::::zeros((64 * 1, 64 * 1)); let chunksz = (4, 4); @@ -356,7 +389,8 @@ fn add_2d_cutouts_by_4(bench: &mut test::Bencher) { #[bench] #[allow(clippy::identity_op)] -fn add_2d_cutouts_by_16(bench: &mut test::Bencher) { +fn add_2d_cutouts_by_16(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((64 * 1, 64 * 1)); let b = Array::::zeros((64 * 1, 64 * 1)); let chunksz = (16, 16); @@ -369,7 +403,8 @@ fn add_2d_cutouts_by_16(bench: &mut test::Bencher) { #[bench] #[allow(clippy::identity_op)] -fn add_2d_cutouts_by_32(bench: &mut test::Bencher) { +fn add_2d_cutouts_by_32(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((64 * 1, 64 * 1)); let b = Array::::zeros((64 * 1, 64 * 1)); let chunksz = (32, 32); @@ -381,7 +416,8 @@ fn add_2d_cutouts_by_32(bench: &mut test::Bencher) { } #[bench] -fn add_2d_broadcast_1_to_2(bench: &mut test::Bencher) { +fn add_2d_broadcast_1_to_2(bench: &mut test::Bencher) +{ let mut a = Array2::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array1::::zeros(ADD2DSZ); let bv = b.view(); @@ -391,7 +427,8 @@ fn add_2d_broadcast_1_to_2(bench: &mut test::Bencher) { } #[bench] -fn add_2d_broadcast_0_to_2(bench: &mut test::Bencher) { +fn add_2d_broadcast_0_to_2(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros(()); let bv = b.view(); @@ -401,48 +438,55 @@ fn add_2d_broadcast_0_to_2(bench: &mut test::Bencher) { } #[bench] -fn scalar_toowned(bench: &mut test::Bencher) { +fn scalar_toowned(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); bench.iter(|| a.to_owned()); } #[bench] -fn scalar_add_1(bench: &mut test::Bencher) { +fn scalar_add_1(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let n = 1.; bench.iter(|| &a + n); } #[bench] -fn scalar_add_2(bench: &mut test::Bencher) { +fn scalar_add_2(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let n = 1.; bench.iter(|| n + &a); } #[bench] -fn scalar_add_strided_1(bench: &mut test::Bencher) { +fn scalar_add_strided_1(bench: &mut test::Bencher) +{ let a = Array::from_shape_fn((64, 64 * 2), |(i, j)| (i * 64 + j) as f32).slice_move(s![.., ..;2]); let n = 1.; bench.iter(|| &a + n); } #[bench] -fn scalar_add_strided_2(bench: &mut test::Bencher) { +fn scalar_add_strided_2(bench: &mut test::Bencher) +{ let a = Array::from_shape_fn((64, 64 * 2), |(i, j)| (i * 64 + j) as f32).slice_move(s![.., ..;2]); let n = 1.; bench.iter(|| n + &a); } #[bench] -fn scalar_sub_1(bench: &mut test::Bencher) { +fn scalar_sub_1(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let n = 1.; bench.iter(|| &a - n); } #[bench] -fn scalar_sub_2(bench: &mut test::Bencher) { +fn scalar_sub_2(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let n = 1.; bench.iter(|| n - &a); @@ -450,7 +494,8 @@ fn scalar_sub_2(bench: &mut test::Bencher) { // This is for comparison with add_2d_broadcast_0_to_2 #[bench] -fn add_2d_0_to_2_iadd_scalar(bench: &mut test::Bencher) { +fn add_2d_0_to_2_iadd_scalar(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let n = black_box(0); bench.iter(|| { @@ -459,7 +504,8 @@ fn add_2d_0_to_2_iadd_scalar(bench: &mut test::Bencher) { } #[bench] -fn add_2d_strided(bench: &mut test::Bencher) { +fn add_2d_strided(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ * 2)); let mut a = a.slice_mut(s![.., ..;2]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -470,7 +516,8 @@ fn add_2d_strided(bench: &mut test::Bencher) { } #[bench] -fn add_2d_regular_dyn(bench: &mut test::Bencher) { +fn add_2d_regular_dyn(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(&[ADD2DSZ, ADD2DSZ][..]); let b = Array::::zeros(&[ADD2DSZ, ADD2DSZ][..]); let bv = b.view(); @@ -480,7 +527,8 @@ fn add_2d_regular_dyn(bench: &mut test::Bencher) { } #[bench] -fn add_2d_strided_dyn(bench: &mut test::Bencher) { +fn add_2d_strided_dyn(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(&[ADD2DSZ, ADD2DSZ * 2][..]); let mut a = a.slice_mut(s![.., ..;2]); let b = Array::::zeros(&[ADD2DSZ, ADD2DSZ][..]); @@ -491,7 +539,8 @@ fn add_2d_strided_dyn(bench: &mut test::Bencher) { } #[bench] -fn add_2d_zip_strided(bench: &mut test::Bencher) { +fn add_2d_zip_strided(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ * 2)); let mut a = a.slice_mut(s![.., ..;2]); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -501,7 +550,8 @@ fn add_2d_zip_strided(bench: &mut test::Bencher) { } #[bench] -fn add_2d_one_transposed(bench: &mut test::Bencher) { +fn add_2d_one_transposed(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -511,7 +561,8 @@ fn add_2d_one_transposed(bench: &mut test::Bencher) { } #[bench] -fn add_2d_zip_one_transposed(bench: &mut test::Bencher) { +fn add_2d_zip_one_transposed(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -521,7 +572,8 @@ fn add_2d_zip_one_transposed(bench: &mut test::Bencher) { } #[bench] -fn add_2d_both_transposed(bench: &mut test::Bencher) { +fn add_2d_both_transposed(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -532,7 +584,8 @@ fn add_2d_both_transposed(bench: &mut test::Bencher) { } #[bench] -fn add_2d_zip_both_transposed(bench: &mut test::Bencher) { +fn add_2d_zip_both_transposed(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut b = Array::::zeros((ADD2DSZ, ADD2DSZ)); @@ -543,7 +596,8 @@ fn add_2d_zip_both_transposed(bench: &mut test::Bencher) { } #[bench] -fn add_2d_f32_regular(bench: &mut test::Bencher) { +fn add_2d_f32_regular(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); let b = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = b.view(); @@ -555,7 +609,8 @@ fn add_2d_f32_regular(bench: &mut test::Bencher) { const ADD3DSZ: usize = 16; #[bench] -fn add_3d_strided(bench: &mut test::Bencher) { +fn add_3d_strided(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD3DSZ, ADD3DSZ, ADD3DSZ * 2)); let mut a = a.slice_mut(s![.., .., ..;2]); let b = Array::::zeros(a.dim()); @@ -566,7 +621,8 @@ fn add_3d_strided(bench: &mut test::Bencher) { } #[bench] -fn add_3d_strided_dyn(bench: &mut test::Bencher) { +fn add_3d_strided_dyn(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(&[ADD3DSZ, ADD3DSZ, ADD3DSZ * 2][..]); let mut a = a.slice_mut(s![.., .., ..;2]); let b = Array::::zeros(a.dim()); @@ -579,7 +635,8 @@ fn add_3d_strided_dyn(bench: &mut test::Bencher) { const ADD1D_SIZE: usize = 64 * 64; #[bench] -fn add_1d_regular(bench: &mut test::Bencher) { +fn add_1d_regular(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(ADD1D_SIZE); let b = Array::::zeros(a.dim()); bench.iter(|| { @@ -588,7 +645,8 @@ fn add_1d_regular(bench: &mut test::Bencher) { } #[bench] -fn add_1d_strided(bench: &mut test::Bencher) { +fn add_1d_strided(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(ADD1D_SIZE * 2); let mut av = a.slice_mut(s![..;2]); let b = Array::::zeros(av.dim()); @@ -598,7 +656,8 @@ fn add_1d_strided(bench: &mut test::Bencher) { } #[bench] -fn iadd_scalar_2d_regular(bench: &mut test::Bencher) { +fn iadd_scalar_2d_regular(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ)); bench.iter(|| { a += 1.; @@ -606,7 +665,8 @@ fn iadd_scalar_2d_regular(bench: &mut test::Bencher) { } #[bench] -fn iadd_scalar_2d_strided(bench: &mut test::Bencher) { +fn iadd_scalar_2d_strided(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((ADD2DSZ, ADD2DSZ * 2)); let mut a = a.slice_mut(s![.., ..;2]); bench.iter(|| { @@ -615,7 +675,8 @@ fn iadd_scalar_2d_strided(bench: &mut test::Bencher) { } #[bench] -fn iadd_scalar_2d_regular_dyn(bench: &mut test::Bencher) { +fn iadd_scalar_2d_regular_dyn(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(vec![ADD2DSZ, ADD2DSZ]); bench.iter(|| { a += 1.; @@ -623,7 +684,8 @@ fn iadd_scalar_2d_regular_dyn(bench: &mut test::Bencher) { } #[bench] -fn iadd_scalar_2d_strided_dyn(bench: &mut test::Bencher) { +fn iadd_scalar_2d_strided_dyn(bench: &mut test::Bencher) +{ let mut a = Array::::zeros(vec![ADD2DSZ, ADD2DSZ * 2]); let mut a = a.slice_mut(s![.., ..;2]); bench.iter(|| { @@ -632,7 +694,8 @@ fn iadd_scalar_2d_strided_dyn(bench: &mut test::Bencher) { } #[bench] -fn scaled_add_2d_f32_regular(bench: &mut test::Bencher) { +fn scaled_add_2d_f32_regular(bench: &mut test::Bencher) +{ let mut av = Array::::zeros((ADD2DSZ, ADD2DSZ)); let bv = Array::::zeros((ADD2DSZ, ADD2DSZ)); let scalar = std::f32::consts::PI; @@ -642,7 +705,8 @@ fn scaled_add_2d_f32_regular(bench: &mut test::Bencher) { } #[bench] -fn assign_scalar_2d_corder(bench: &mut test::Bencher) { +fn assign_scalar_2d_corder(bench: &mut test::Bencher) +{ let a = Array::zeros((ADD2DSZ, ADD2DSZ)); let mut a = black_box(a); let s = 3.; @@ -650,7 +714,8 @@ fn assign_scalar_2d_corder(bench: &mut test::Bencher) { } #[bench] -fn assign_scalar_2d_cutout(bench: &mut test::Bencher) { +fn assign_scalar_2d_cutout(bench: &mut test::Bencher) +{ let mut a = Array::zeros((66, 66)); let a = a.slice_mut(s![1..-1, 1..-1]); let mut a = black_box(a); @@ -659,7 +724,8 @@ fn assign_scalar_2d_cutout(bench: &mut test::Bencher) { } #[bench] -fn assign_scalar_2d_forder(bench: &mut test::Bencher) { +fn assign_scalar_2d_forder(bench: &mut test::Bencher) +{ let mut a = Array::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut a = black_box(a); @@ -668,14 +734,16 @@ fn assign_scalar_2d_forder(bench: &mut test::Bencher) { } #[bench] -fn assign_zero_2d_corder(bench: &mut test::Bencher) { +fn assign_zero_2d_corder(bench: &mut test::Bencher) +{ let a = Array::zeros((ADD2DSZ, ADD2DSZ)); let mut a = black_box(a); bench.iter(|| a.fill(0.)) } #[bench] -fn assign_zero_2d_cutout(bench: &mut test::Bencher) { +fn assign_zero_2d_cutout(bench: &mut test::Bencher) +{ let mut a = Array::zeros((66, 66)); let a = a.slice_mut(s![1..-1, 1..-1]); let mut a = black_box(a); @@ -683,7 +751,8 @@ fn assign_zero_2d_cutout(bench: &mut test::Bencher) { } #[bench] -fn assign_zero_2d_forder(bench: &mut test::Bencher) { +fn assign_zero_2d_forder(bench: &mut test::Bencher) +{ let mut a = Array::zeros((ADD2DSZ, ADD2DSZ)); a.swap_axes(0, 1); let mut a = black_box(a); @@ -691,7 +760,8 @@ fn assign_zero_2d_forder(bench: &mut test::Bencher) { } #[bench] -fn bench_iter_diag(bench: &mut test::Bencher) { +fn bench_iter_diag(bench: &mut test::Bencher) +{ let a = Array::::zeros((1024, 1024)); bench.iter(|| { for elt in a.diag() { @@ -701,7 +771,8 @@ fn bench_iter_diag(bench: &mut test::Bencher) { } #[bench] -fn bench_row_iter(bench: &mut test::Bencher) { +fn bench_row_iter(bench: &mut test::Bencher) +{ let a = Array::::zeros((1024, 1024)); let it = a.row(17); bench.iter(|| { @@ -712,7 +783,8 @@ fn bench_row_iter(bench: &mut test::Bencher) { } #[bench] -fn bench_col_iter(bench: &mut test::Bencher) { +fn bench_col_iter(bench: &mut test::Bencher) +{ let a = Array::::zeros((1024, 1024)); let it = a.column(17); bench.iter(|| { @@ -782,7 +854,8 @@ mat_mul! {mat_mul_i32, i32, } #[bench] -fn create_iter_4d(bench: &mut test::Bencher) { +fn create_iter_4d(bench: &mut test::Bencher) +{ let mut a = Array::from_elem((4, 5, 3, 2), 1.0); a.swap_axes(0, 1); a.swap_axes(2, 1); @@ -792,82 +865,94 @@ fn create_iter_4d(bench: &mut test::Bencher) { } #[bench] -fn bench_to_owned_n(bench: &mut test::Bencher) { +fn bench_to_owned_n(bench: &mut test::Bencher) +{ let a = Array::::zeros((32, 32)); bench.iter(|| a.to_owned()); } #[bench] -fn bench_to_owned_t(bench: &mut test::Bencher) { +fn bench_to_owned_t(bench: &mut test::Bencher) +{ let mut a = Array::::zeros((32, 32)); a.swap_axes(0, 1); bench.iter(|| a.to_owned()); } #[bench] -fn bench_to_owned_strided(bench: &mut test::Bencher) { +fn bench_to_owned_strided(bench: &mut test::Bencher) +{ let a = Array::::zeros((32, 64)); let a = a.slice(s![.., ..;2]); bench.iter(|| a.to_owned()); } #[bench] -fn equality_i32(bench: &mut test::Bencher) { +fn equality_i32(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let b = Array::::zeros((64, 64)); bench.iter(|| a == b); } #[bench] -fn equality_f32(bench: &mut test::Bencher) { +fn equality_f32(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let b = Array::::zeros((64, 64)); bench.iter(|| a == b); } #[bench] -fn equality_f32_mixorder(bench: &mut test::Bencher) { +fn equality_f32_mixorder(bench: &mut test::Bencher) +{ let a = Array::::zeros((64, 64)); let b = Array::::zeros((64, 64).f()); bench.iter(|| a == b); } #[bench] -fn dot_f32_16(bench: &mut test::Bencher) { +fn dot_f32_16(bench: &mut test::Bencher) +{ let a = Array::::zeros(16); let b = Array::::zeros(16); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_20(bench: &mut test::Bencher) { +fn dot_f32_20(bench: &mut test::Bencher) +{ let a = Array::::zeros(20); let b = Array::::zeros(20); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_32(bench: &mut test::Bencher) { +fn dot_f32_32(bench: &mut test::Bencher) +{ let a = Array::::zeros(32); let b = Array::::zeros(32); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_256(bench: &mut test::Bencher) { +fn dot_f32_256(bench: &mut test::Bencher) +{ let a = Array::::zeros(256); let b = Array::::zeros(256); bench.iter(|| a.dot(&b)); } #[bench] -fn dot_f32_1024(bench: &mut test::Bencher) { +fn dot_f32_1024(bench: &mut test::Bencher) +{ let av = Array::::zeros(1024); let bv = Array::::zeros(1024); bench.iter(|| av.dot(&bv)); } #[bench] -fn dot_f32_10e6(bench: &mut test::Bencher) { +fn dot_f32_10e6(bench: &mut test::Bencher) +{ let n = 1_000_000; let av = Array::::zeros(n); let bv = Array::::zeros(n); @@ -875,7 +960,8 @@ fn dot_f32_10e6(bench: &mut test::Bencher) { } #[bench] -fn dot_extended(bench: &mut test::Bencher) { +fn dot_extended(bench: &mut test::Bencher) +{ let m = 10; let n = 33; let k = 10; @@ -896,7 +982,8 @@ fn dot_extended(bench: &mut test::Bencher) { const MEAN_SUM_N: usize = 127; -fn range_mat(m: Ix, n: Ix) -> Array2 { +fn range_mat(m: Ix, n: Ix) -> Array2 +{ assert!(m * n != 0); Array::linspace(0., (m * n - 1) as f32, m * n) .into_shape_with_order((m, n)) @@ -904,87 +991,100 @@ fn range_mat(m: Ix, n: Ix) -> Array2 { } #[bench] -fn mean_axis0(bench: &mut test::Bencher) { +fn mean_axis0(bench: &mut test::Bencher) +{ let a = range_mat(MEAN_SUM_N, MEAN_SUM_N); bench.iter(|| a.mean_axis(Axis(0))); } #[bench] -fn mean_axis1(bench: &mut test::Bencher) { +fn mean_axis1(bench: &mut test::Bencher) +{ let a = range_mat(MEAN_SUM_N, MEAN_SUM_N); bench.iter(|| a.mean_axis(Axis(1))); } #[bench] -fn sum_axis0(bench: &mut test::Bencher) { +fn sum_axis0(bench: &mut test::Bencher) +{ let a = range_mat(MEAN_SUM_N, MEAN_SUM_N); bench.iter(|| a.sum_axis(Axis(0))); } #[bench] -fn sum_axis1(bench: &mut test::Bencher) { +fn sum_axis1(bench: &mut test::Bencher) +{ let a = range_mat(MEAN_SUM_N, MEAN_SUM_N); bench.iter(|| a.sum_axis(Axis(1))); } #[bench] -fn into_dimensionality_ix1_ok(bench: &mut test::Bencher) { +fn into_dimensionality_ix1_ok(bench: &mut test::Bencher) +{ let a = Array::::zeros(Ix1(10)); let a = a.view(); bench.iter(|| a.into_dimensionality::()); } #[bench] -fn into_dimensionality_ix3_ok(bench: &mut test::Bencher) { +fn into_dimensionality_ix3_ok(bench: &mut test::Bencher) +{ let a = Array::::zeros(Ix3(10, 10, 10)); let a = a.view(); bench.iter(|| a.into_dimensionality::()); } #[bench] -fn into_dimensionality_ix3_err(bench: &mut test::Bencher) { +fn into_dimensionality_ix3_err(bench: &mut test::Bencher) +{ let a = Array::::zeros(Ix3(10, 10, 10)); let a = a.view(); bench.iter(|| a.into_dimensionality::()); } #[bench] -fn into_dimensionality_dyn_to_ix3(bench: &mut test::Bencher) { +fn into_dimensionality_dyn_to_ix3(bench: &mut test::Bencher) +{ let a = Array::::zeros(IxDyn(&[10, 10, 10])); let a = a.view(); bench.iter(|| a.clone().into_dimensionality::()); } #[bench] -fn into_dimensionality_dyn_to_dyn(bench: &mut test::Bencher) { +fn into_dimensionality_dyn_to_dyn(bench: &mut test::Bencher) +{ let a = Array::::zeros(IxDyn(&[10, 10, 10])); let a = a.view(); bench.iter(|| a.clone().into_dimensionality::()); } #[bench] -fn into_dyn_ix3(bench: &mut test::Bencher) { +fn into_dyn_ix3(bench: &mut test::Bencher) +{ let a = Array::::zeros(Ix3(10, 10, 10)); let a = a.view(); bench.iter(|| a.into_dyn()); } #[bench] -fn into_dyn_ix5(bench: &mut test::Bencher) { +fn into_dyn_ix5(bench: &mut test::Bencher) +{ let a = Array::::zeros(Ix5(2, 2, 2, 2, 2)); let a = a.view(); bench.iter(|| a.into_dyn()); } #[bench] -fn into_dyn_dyn(bench: &mut test::Bencher) { +fn into_dyn_dyn(bench: &mut test::Bencher) +{ let a = Array::::zeros(IxDyn(&[10, 10, 10])); let a = a.view(); bench.iter(|| a.clone().into_dyn()); } #[bench] -fn broadcast_same_dim(bench: &mut test::Bencher) { +fn broadcast_same_dim(bench: &mut test::Bencher) +{ let s = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; let s = Array4::from_shape_vec((2, 2, 3, 2), s.to_vec()).unwrap(); let a = s.slice(s![.., ..;-1, ..;2, ..]); @@ -993,7 +1093,8 @@ fn broadcast_same_dim(bench: &mut test::Bencher) { } #[bench] -fn broadcast_one_side(bench: &mut test::Bencher) { +fn broadcast_one_side(bench: &mut test::Bencher) +{ let s = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; let s2 = [1, 2, 3, 4, 5, 6]; let a = Array4::from_shape_vec((4, 1, 3, 2), s.to_vec()).unwrap(); diff --git a/benches/chunks.rs b/benches/chunks.rs index 5ea9ba466..46780492f 100644 --- a/benches/chunks.rs +++ b/benches/chunks.rs @@ -7,7 +7,8 @@ use ndarray::prelude::*; use ndarray::NdProducer; #[bench] -fn chunk2x2_iter_sum(bench: &mut Bencher) { +fn chunk2x2_iter_sum(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -19,7 +20,8 @@ fn chunk2x2_iter_sum(bench: &mut Bencher) { } #[bench] -fn chunk2x2_sum(bench: &mut Bencher) { +fn chunk2x2_sum(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -31,7 +33,8 @@ fn chunk2x2_sum(bench: &mut Bencher) { } #[bench] -fn chunk2x2_sum_get1(bench: &mut Bencher) { +fn chunk2x2_sum_get1(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -46,7 +49,8 @@ fn chunk2x2_sum_get1(bench: &mut Bencher) { } #[bench] -fn chunk2x2_sum_uget1(bench: &mut Bencher) { +fn chunk2x2_sum_uget1(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::::zeros(a.exact_chunks(chunksz).raw_dim()); @@ -64,7 +68,8 @@ fn chunk2x2_sum_uget1(bench: &mut Bencher) { #[bench] #[allow(clippy::identity_op)] -fn chunk2x2_sum_get2(bench: &mut Bencher) { +fn chunk2x2_sum_get2(bench: &mut Bencher) +{ let a = Array::::zeros((256, 256)); let chunksz = (2, 2); let mut sum = Array::::zeros(a.exact_chunks(chunksz).raw_dim()); diff --git a/benches/construct.rs b/benches/construct.rs index 8d323b4cd..278174388 100644 --- a/benches/construct.rs +++ b/benches/construct.rs @@ -8,17 +8,20 @@ use test::Bencher; use ndarray::prelude::*; #[bench] -fn default_f64(bench: &mut Bencher) { +fn default_f64(bench: &mut Bencher) +{ bench.iter(|| Array::::default((128, 128))) } #[bench] -fn zeros_f64(bench: &mut Bencher) { +fn zeros_f64(bench: &mut Bencher) +{ bench.iter(|| Array::::zeros((128, 128))) } #[bench] -fn map_regular(bench: &mut test::Bencher) { +fn map_regular(bench: &mut test::Bencher) +{ let a = Array::linspace(0., 127., 128) .into_shape_with_order((8, 16)) .unwrap(); @@ -26,7 +29,8 @@ fn map_regular(bench: &mut test::Bencher) { } #[bench] -fn map_stride(bench: &mut test::Bencher) { +fn map_stride(bench: &mut test::Bencher) +{ let a = Array::linspace(0., 127., 256) .into_shape_with_order((8, 32)) .unwrap(); diff --git a/benches/gemv_gemm.rs b/benches/gemv_gemm.rs index 9dbd9a538..2d1642623 100644 --- a/benches/gemv_gemm.rs +++ b/benches/gemv_gemm.rs @@ -16,7 +16,8 @@ use ndarray::linalg::general_mat_vec_mul; use ndarray::LinalgScalar; #[bench] -fn gemv_64_64c(bench: &mut Bencher) { +fn gemv_64_64c(bench: &mut Bencher) +{ let a = Array::zeros((64, 64)); let (m, n) = a.dim(); let x = Array::zeros(n); @@ -27,7 +28,8 @@ fn gemv_64_64c(bench: &mut Bencher) { } #[bench] -fn gemv_64_64f(bench: &mut Bencher) { +fn gemv_64_64f(bench: &mut Bencher) +{ let a = Array::zeros((64, 64).f()); let (m, n) = a.dim(); let x = Array::zeros(n); @@ -38,7 +40,8 @@ fn gemv_64_64f(bench: &mut Bencher) { } #[bench] -fn gemv_64_32(bench: &mut Bencher) { +fn gemv_64_32(bench: &mut Bencher) +{ let a = Array::zeros((64, 32)); let (m, n) = a.dim(); let x = Array::zeros(n); @@ -49,17 +52,20 @@ fn gemv_64_32(bench: &mut Bencher) { } #[bench] -fn cgemm_100(bench: &mut Bencher) { +fn cgemm_100(bench: &mut Bencher) +{ cgemm_bench::(100, bench); } #[bench] -fn zgemm_100(bench: &mut Bencher) { +fn zgemm_100(bench: &mut Bencher) +{ cgemm_bench::(100, bench); } fn cgemm_bench(size: usize, bench: &mut Bencher) -where A: LinalgScalar + Float { +where A: LinalgScalar + Float +{ let (m, k, n) = (size, size, size); let a = Array::, _>::zeros((m, k)); diff --git a/benches/higher-order.rs b/benches/higher-order.rs index 0a629fef3..9cc3bd961 100644 --- a/benches/higher-order.rs +++ b/benches/higher-order.rs @@ -13,19 +13,22 @@ const X: usize = 64; const Y: usize = 16; #[bench] -fn map_regular(bench: &mut Bencher) { +fn map_regular(bench: &mut Bencher) +{ let a = Array::linspace(0., 127., N) .into_shape_with_order((X, Y)) .unwrap(); bench.iter(|| a.map(|&x| 2. * x)); } -pub fn double_array(mut a: ArrayViewMut2<'_, f64>) { +pub fn double_array(mut a: ArrayViewMut2<'_, f64>) +{ a *= 2.0; } #[bench] -fn map_stride_double_f64(bench: &mut Bencher) { +fn map_stride_double_f64(bench: &mut Bencher) +{ let mut a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); @@ -36,7 +39,8 @@ fn map_stride_double_f64(bench: &mut Bencher) { } #[bench] -fn map_stride_f64(bench: &mut Bencher) { +fn map_stride_f64(bench: &mut Bencher) +{ let a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); @@ -45,7 +49,8 @@ fn map_stride_f64(bench: &mut Bencher) { } #[bench] -fn map_stride_u32(bench: &mut Bencher) { +fn map_stride_u32(bench: &mut Bencher) +{ let a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); @@ -55,7 +60,8 @@ fn map_stride_u32(bench: &mut Bencher) { } #[bench] -fn fold_axis(bench: &mut Bencher) { +fn fold_axis(bench: &mut Bencher) +{ let a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); @@ -66,7 +72,8 @@ const MA: usize = 64; const MASZ: usize = MA * MA; #[bench] -fn map_axis_0(bench: &mut Bencher) { +fn map_axis_0(bench: &mut Bencher) +{ let a = Array::from_iter(0..MASZ as i32) .into_shape_with_order([MA, MA]) .unwrap(); @@ -74,7 +81,8 @@ fn map_axis_0(bench: &mut Bencher) { } #[bench] -fn map_axis_1(bench: &mut Bencher) { +fn map_axis_1(bench: &mut Bencher) +{ let a = Array::from_iter(0..MASZ as i32) .into_shape_with_order([MA, MA]) .unwrap(); diff --git a/benches/iter.rs b/benches/iter.rs index 422310103..77f511745 100644 --- a/benches/iter.rs +++ b/benches/iter.rs @@ -13,13 +13,15 @@ use ndarray::Slice; use ndarray::{FoldWhile, Zip}; #[bench] -fn iter_sum_2d_regular(bench: &mut Bencher) { +fn iter_sum_2d_regular(bench: &mut Bencher) +{ let a = Array::::zeros((64, 64)); bench.iter(|| a.iter().sum::()); } #[bench] -fn iter_sum_2d_cutout(bench: &mut Bencher) { +fn iter_sum_2d_cutout(bench: &mut Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = av; @@ -27,7 +29,8 @@ fn iter_sum_2d_cutout(bench: &mut Bencher) { } #[bench] -fn iter_all_2d_cutout(bench: &mut Bencher) { +fn iter_all_2d_cutout(bench: &mut Bencher) +{ let a = Array::::zeros((66, 66)); let av = a.slice(s![1..-1, 1..-1]); let a = av; @@ -35,14 +38,16 @@ fn iter_all_2d_cutout(bench: &mut Bencher) { } #[bench] -fn iter_sum_2d_transpose(bench: &mut Bencher) { +fn iter_sum_2d_transpose(bench: &mut Bencher) +{ let a = Array::::zeros((66, 66)); let a = a.t(); bench.iter(|| a.iter().sum::()); } #[bench] -fn iter_filter_sum_2d_u32(bench: &mut Bencher) { +fn iter_filter_sum_2d_u32(bench: &mut Bencher) +{ let a = Array::linspace(0., 1., 256) .into_shape_with_order((16, 16)) .unwrap(); @@ -51,7 +56,8 @@ fn iter_filter_sum_2d_u32(bench: &mut Bencher) { } #[bench] -fn iter_filter_sum_2d_f32(bench: &mut Bencher) { +fn iter_filter_sum_2d_f32(bench: &mut Bencher) +{ let a = Array::linspace(0., 1., 256) .into_shape_with_order((16, 16)) .unwrap(); @@ -60,7 +66,8 @@ fn iter_filter_sum_2d_f32(bench: &mut Bencher) { } #[bench] -fn iter_filter_sum_2d_stride_u32(bench: &mut Bencher) { +fn iter_filter_sum_2d_stride_u32(bench: &mut Bencher) +{ let a = Array::linspace(0., 1., 256) .into_shape_with_order((16, 16)) .unwrap(); @@ -70,7 +77,8 @@ fn iter_filter_sum_2d_stride_u32(bench: &mut Bencher) { } #[bench] -fn iter_filter_sum_2d_stride_f32(bench: &mut Bencher) { +fn iter_filter_sum_2d_stride_f32(bench: &mut Bencher) +{ let a = Array::linspace(0., 1., 256) .into_shape_with_order((16, 16)) .unwrap(); @@ -80,7 +88,8 @@ fn iter_filter_sum_2d_stride_f32(bench: &mut Bencher) { } #[bench] -fn iter_rev_step_by_contiguous(bench: &mut Bencher) { +fn iter_rev_step_by_contiguous(bench: &mut Bencher) +{ let a = Array::linspace(0., 1., 512); bench.iter(|| { a.iter().rev().step_by(2).for_each(|x| { @@ -90,7 +99,8 @@ fn iter_rev_step_by_contiguous(bench: &mut Bencher) { } #[bench] -fn iter_rev_step_by_discontiguous(bench: &mut Bencher) { +fn iter_rev_step_by_discontiguous(bench: &mut Bencher) +{ let mut a = Array::linspace(0., 1., 1024); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); bench.iter(|| { @@ -103,7 +113,8 @@ fn iter_rev_step_by_discontiguous(bench: &mut Bencher) { const ZIPSZ: usize = 10_000; #[bench] -fn sum_3_std_zip1(bench: &mut Bencher) { +fn sum_3_std_zip1(bench: &mut Bencher) +{ let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -115,7 +126,8 @@ fn sum_3_std_zip1(bench: &mut Bencher) { } #[bench] -fn sum_3_std_zip2(bench: &mut Bencher) { +fn sum_3_std_zip2(bench: &mut Bencher) +{ let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -128,7 +140,8 @@ fn sum_3_std_zip2(bench: &mut Bencher) { } #[bench] -fn sum_3_std_zip3(bench: &mut Bencher) { +fn sum_3_std_zip3(bench: &mut Bencher) +{ let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -142,7 +155,8 @@ fn sum_3_std_zip3(bench: &mut Bencher) { } #[bench] -fn vector_sum_3_std_zip(bench: &mut Bencher) { +fn vector_sum_3_std_zip(bench: &mut Bencher) +{ let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -154,7 +168,8 @@ fn vector_sum_3_std_zip(bench: &mut Bencher) { } #[bench] -fn sum_3_azip(bench: &mut Bencher) { +fn sum_3_azip(bench: &mut Bencher) +{ let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -168,7 +183,8 @@ fn sum_3_azip(bench: &mut Bencher) { } #[bench] -fn sum_3_azip_fold(bench: &mut Bencher) { +fn sum_3_azip_fold(bench: &mut Bencher) +{ let a = vec![1; ZIPSZ]; let b = vec![1; ZIPSZ]; let c = vec![1; ZIPSZ]; @@ -182,7 +198,8 @@ fn sum_3_azip_fold(bench: &mut Bencher) { } #[bench] -fn vector_sum_3_azip(bench: &mut Bencher) { +fn vector_sum_3_azip(bench: &mut Bencher) +{ let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -193,7 +210,8 @@ fn vector_sum_3_azip(bench: &mut Bencher) { }); } -fn vector_sum3_unchecked(a: &[f64], b: &[f64], c: &mut [f64]) { +fn vector_sum3_unchecked(a: &[f64], b: &[f64], c: &mut [f64]) +{ for i in 0..c.len() { unsafe { *c.get_unchecked_mut(i) += *a.get_unchecked(i) + *b.get_unchecked(i); @@ -202,7 +220,8 @@ fn vector_sum3_unchecked(a: &[f64], b: &[f64], c: &mut [f64]) { } #[bench] -fn vector_sum_3_zip_unchecked(bench: &mut Bencher) { +fn vector_sum_3_zip_unchecked(bench: &mut Bencher) +{ let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -212,7 +231,8 @@ fn vector_sum_3_zip_unchecked(bench: &mut Bencher) { } #[bench] -fn vector_sum_3_zip_unchecked_manual(bench: &mut Bencher) { +fn vector_sum_3_zip_unchecked_manual(bench: &mut Bencher) +{ let a = vec![1.; ZIPSZ]; let b = vec![1.; ZIPSZ]; let mut c = vec![1.; ZIPSZ]; @@ -232,7 +252,8 @@ const ISZ: usize = 16; const I2DSZ: usize = 64; #[bench] -fn indexed_iter_1d_ix1(bench: &mut Bencher) { +fn indexed_iter_1d_ix1(bench: &mut Bencher) +{ let mut a = Array::::zeros(I2DSZ * I2DSZ); for (i, elt) in a.indexed_iter_mut() { *elt = i as _; @@ -247,7 +268,8 @@ fn indexed_iter_1d_ix1(bench: &mut Bencher) { } #[bench] -fn indexed_zip_1d_ix1(bench: &mut Bencher) { +fn indexed_zip_1d_ix1(bench: &mut Bencher) +{ let mut a = Array::::zeros(I2DSZ * I2DSZ); for (i, elt) in a.indexed_iter_mut() { *elt = i as _; @@ -262,7 +284,8 @@ fn indexed_zip_1d_ix1(bench: &mut Bencher) { } #[bench] -fn indexed_iter_2d_ix2(bench: &mut Bencher) { +fn indexed_iter_2d_ix2(bench: &mut Bencher) +{ let mut a = Array::::zeros((I2DSZ, I2DSZ)); for ((i, j), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j) as _; @@ -276,7 +299,8 @@ fn indexed_iter_2d_ix2(bench: &mut Bencher) { }) } #[bench] -fn indexed_zip_2d_ix2(bench: &mut Bencher) { +fn indexed_zip_2d_ix2(bench: &mut Bencher) +{ let mut a = Array::::zeros((I2DSZ, I2DSZ)); for ((i, j), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j) as _; @@ -291,7 +315,8 @@ fn indexed_zip_2d_ix2(bench: &mut Bencher) { } #[bench] -fn indexed_iter_3d_ix3(bench: &mut Bencher) { +fn indexed_iter_3d_ix3(bench: &mut Bencher) +{ let mut a = Array::::zeros((ISZ, ISZ, ISZ)); for ((i, j, k), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j + 10000 * k) as _; @@ -306,7 +331,8 @@ fn indexed_iter_3d_ix3(bench: &mut Bencher) { } #[bench] -fn indexed_zip_3d_ix3(bench: &mut Bencher) { +fn indexed_zip_3d_ix3(bench: &mut Bencher) +{ let mut a = Array::::zeros((ISZ, ISZ, ISZ)); for ((i, j, k), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j + 10000 * k) as _; @@ -321,7 +347,8 @@ fn indexed_zip_3d_ix3(bench: &mut Bencher) { } #[bench] -fn indexed_iter_3d_dyn(bench: &mut Bencher) { +fn indexed_iter_3d_dyn(bench: &mut Bencher) +{ let mut a = Array::::zeros((ISZ, ISZ, ISZ)); for ((i, j, k), elt) in a.indexed_iter_mut() { *elt = (i + 100 * j + 10000 * k) as _; @@ -337,27 +364,31 @@ fn indexed_iter_3d_dyn(bench: &mut Bencher) { } #[bench] -fn iter_sum_1d_strided_fold(bench: &mut Bencher) { +fn iter_sum_1d_strided_fold(bench: &mut Bencher) +{ let mut a = Array::::ones(10240); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); bench.iter(|| a.iter().sum::()); } #[bench] -fn iter_sum_1d_strided_rfold(bench: &mut Bencher) { +fn iter_sum_1d_strided_rfold(bench: &mut Bencher) +{ let mut a = Array::::ones(10240); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); bench.iter(|| a.iter().rfold(0, |acc, &x| acc + x)); } #[bench] -fn iter_axis_iter_sum(bench: &mut Bencher) { +fn iter_axis_iter_sum(bench: &mut Bencher) +{ let a = Array::::zeros((64, 64)); bench.iter(|| a.axis_iter(Axis(0)).map(|plane| plane.sum()).sum::()); } #[bench] -fn iter_axis_chunks_1_iter_sum(bench: &mut Bencher) { +fn iter_axis_chunks_1_iter_sum(bench: &mut Bencher) +{ let a = Array::::zeros((64, 64)); bench.iter(|| { a.axis_chunks_iter(Axis(0), 1) @@ -367,7 +398,8 @@ fn iter_axis_chunks_1_iter_sum(bench: &mut Bencher) { } #[bench] -fn iter_axis_chunks_5_iter_sum(bench: &mut Bencher) { +fn iter_axis_chunks_5_iter_sum(bench: &mut Bencher) +{ let a = Array::::zeros((64, 64)); bench.iter(|| { a.axis_chunks_iter(Axis(0), 5) @@ -376,21 +408,24 @@ fn iter_axis_chunks_5_iter_sum(bench: &mut Bencher) { }); } -pub fn zip_mut_with(data: &Array3, out: &mut Array3) { +pub fn zip_mut_with(data: &Array3, out: &mut Array3) +{ out.zip_mut_with(&data, |o, &i| { *o = i; }); } #[bench] -fn zip_mut_with_cc(b: &mut Bencher) { +fn zip_mut_with_cc(b: &mut Bencher) +{ let data: Array3 = Array3::zeros((ISZ, ISZ, ISZ)); let mut out = Array3::zeros(data.dim()); b.iter(|| zip_mut_with(&data, &mut out)); } #[bench] -fn zip_mut_with_ff(b: &mut Bencher) { +fn zip_mut_with_ff(b: &mut Bencher) +{ let data: Array3 = Array3::zeros((ISZ, ISZ, ISZ).f()); let mut out = Array3::zeros(data.dim().f()); b.iter(|| zip_mut_with(&data, &mut out)); diff --git a/benches/numeric.rs b/benches/numeric.rs index d9b9187ff..e2ffa1b84 100644 --- a/benches/numeric.rs +++ b/benches/numeric.rs @@ -10,7 +10,8 @@ const X: usize = 64; const Y: usize = 16; #[bench] -fn clip(bench: &mut Bencher) { +fn clip(bench: &mut Bencher) +{ let mut a = Array::linspace(0., 127., N * 2) .into_shape_with_order([X, Y * 2]) .unwrap(); diff --git a/benches/par_rayon.rs b/benches/par_rayon.rs index 91113b50c..1301ae75a 100644 --- a/benches/par_rayon.rs +++ b/benches/par_rayon.rs @@ -12,7 +12,8 @@ use ndarray::Zip; const EXP_N: usize = 256; const ADDN: usize = 512; -fn set_threads() { +fn set_threads() +{ // Consider setting a fixed number of threads here, for example to avoid // oversubscribing on hyperthreaded cores. // let n = 4; @@ -20,7 +21,8 @@ fn set_threads() { } #[bench] -fn map_exp_regular(bench: &mut Bencher) { +fn map_exp_regular(bench: &mut Bencher) +{ let mut a = Array2::::zeros((EXP_N, EXP_N)); a.swap_axes(0, 1); bench.iter(|| { @@ -29,7 +31,8 @@ fn map_exp_regular(bench: &mut Bencher) { } #[bench] -fn rayon_exp_regular(bench: &mut Bencher) { +fn rayon_exp_regular(bench: &mut Bencher) +{ set_threads(); let mut a = Array2::::zeros((EXP_N, EXP_N)); a.swap_axes(0, 1); @@ -41,19 +44,22 @@ fn rayon_exp_regular(bench: &mut Bencher) { const FASTEXP: usize = EXP_N; #[inline] -fn fastexp(x: f64) -> f64 { +fn fastexp(x: f64) -> f64 +{ let x = 1. + x / 1024.; x.powi(1024) } #[bench] -fn map_fastexp_regular(bench: &mut Bencher) { +fn map_fastexp_regular(bench: &mut Bencher) +{ let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| a.mapv_inplace(|x| fastexp(x))); } #[bench] -fn rayon_fastexp_regular(bench: &mut Bencher) { +fn rayon_fastexp_regular(bench: &mut Bencher) +{ set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { @@ -62,14 +68,16 @@ fn rayon_fastexp_regular(bench: &mut Bencher) { } #[bench] -fn map_fastexp_cut(bench: &mut Bencher) { +fn map_fastexp_cut(bench: &mut Bencher) +{ let mut a = Array2::::zeros((FASTEXP, FASTEXP)); let mut a = a.slice_mut(s![.., ..-1]); bench.iter(|| a.mapv_inplace(|x| fastexp(x))); } #[bench] -fn rayon_fastexp_cut(bench: &mut Bencher) { +fn rayon_fastexp_cut(bench: &mut Bencher) +{ set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); let mut a = a.slice_mut(s![.., ..-1]); @@ -79,7 +87,8 @@ fn rayon_fastexp_cut(bench: &mut Bencher) { } #[bench] -fn map_fastexp_by_axis(bench: &mut Bencher) { +fn map_fastexp_by_axis(bench: &mut Bencher) +{ let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { for mut sheet in a.axis_iter_mut(Axis(0)) { @@ -89,7 +98,8 @@ fn map_fastexp_by_axis(bench: &mut Bencher) { } #[bench] -fn rayon_fastexp_by_axis(bench: &mut Bencher) { +fn rayon_fastexp_by_axis(bench: &mut Bencher) +{ set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { @@ -100,7 +110,8 @@ fn rayon_fastexp_by_axis(bench: &mut Bencher) { } #[bench] -fn rayon_fastexp_zip(bench: &mut Bencher) { +fn rayon_fastexp_zip(bench: &mut Bencher) +{ set_threads(); let mut a = Array2::::zeros((FASTEXP, FASTEXP)); bench.iter(|| { @@ -111,7 +122,8 @@ fn rayon_fastexp_zip(bench: &mut Bencher) { } #[bench] -fn add(bench: &mut Bencher) { +fn add(bench: &mut Bencher) +{ let mut a = Array2::::zeros((ADDN, ADDN)); let b = Array2::::zeros((ADDN, ADDN)); let c = Array2::::zeros((ADDN, ADDN)); @@ -124,7 +136,8 @@ fn add(bench: &mut Bencher) { } #[bench] -fn rayon_add(bench: &mut Bencher) { +fn rayon_add(bench: &mut Bencher) +{ set_threads(); let mut a = Array2::::zeros((ADDN, ADDN)); let b = Array2::::zeros((ADDN, ADDN)); @@ -141,25 +154,29 @@ const COLL_STRING_N: usize = 64; const COLL_F64_N: usize = 128; #[bench] -fn vec_string_collect(bench: &mut test::Bencher) { +fn vec_string_collect(bench: &mut test::Bencher) +{ let v = vec![""; COLL_STRING_N * COLL_STRING_N]; bench.iter(|| v.iter().map(|s| s.to_owned()).collect::>()); } #[bench] -fn array_string_collect(bench: &mut test::Bencher) { +fn array_string_collect(bench: &mut test::Bencher) +{ let v = Array::from_elem((COLL_STRING_N, COLL_STRING_N), ""); bench.iter(|| Zip::from(&v).par_map_collect(|s| s.to_owned())); } #[bench] -fn vec_f64_collect(bench: &mut test::Bencher) { +fn vec_f64_collect(bench: &mut test::Bencher) +{ let v = vec![1.; COLL_F64_N * COLL_F64_N]; bench.iter(|| v.iter().map(|s| s + 1.).collect::>()); } #[bench] -fn array_f64_collect(bench: &mut test::Bencher) { +fn array_f64_collect(bench: &mut test::Bencher) +{ let v = Array::from_elem((COLL_F64_N, COLL_F64_N), 1.); bench.iter(|| Zip::from(&v).par_map_collect(|s| s + 1.)); } diff --git a/benches/to_shape.rs b/benches/to_shape.rs index 7c9f9144e..f056a9852 100644 --- a/benches/to_shape.rs +++ b/benches/to_shape.rs @@ -7,77 +7,88 @@ use ndarray::prelude::*; use ndarray::Order; #[bench] -fn to_shape2_1(bench: &mut Bencher) { +fn to_shape2_1(bench: &mut Bencher) +{ let a = Array::::zeros((4, 5)); let view = a.view(); bench.iter(|| view.to_shape(4 * 5).unwrap()); } #[bench] -fn to_shape2_2_same(bench: &mut Bencher) { +fn to_shape2_2_same(bench: &mut Bencher) +{ let a = Array::::zeros((4, 5)); let view = a.view(); bench.iter(|| view.to_shape((4, 5)).unwrap()); } #[bench] -fn to_shape2_2_flip(bench: &mut Bencher) { +fn to_shape2_2_flip(bench: &mut Bencher) +{ let a = Array::::zeros((4, 5)); let view = a.view(); bench.iter(|| view.to_shape((5, 4)).unwrap()); } #[bench] -fn to_shape2_3(bench: &mut Bencher) { +fn to_shape2_3(bench: &mut Bencher) +{ let a = Array::::zeros((4, 5)); let view = a.view(); bench.iter(|| view.to_shape((2, 5, 2)).unwrap()); } #[bench] -fn to_shape3_1(bench: &mut Bencher) { +fn to_shape3_1(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5)); let view = a.view(); bench.iter(|| view.to_shape(3 * 4 * 5).unwrap()); } #[bench] -fn to_shape3_2_order(bench: &mut Bencher) { +fn to_shape3_2_order(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5)); let view = a.view(); bench.iter(|| view.to_shape((12, 5)).unwrap()); } #[bench] -fn to_shape3_2_outoforder(bench: &mut Bencher) { +fn to_shape3_2_outoforder(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5)); let view = a.view(); bench.iter(|| view.to_shape((4, 15)).unwrap()); } #[bench] -fn to_shape3_3c(bench: &mut Bencher) { +fn to_shape3_3c(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5)); let view = a.view(); bench.iter(|| view.to_shape((3, 4, 5)).unwrap()); } #[bench] -fn to_shape3_3f(bench: &mut Bencher) { +fn to_shape3_3f(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5).f()); let view = a.view(); bench.iter(|| view.to_shape(((3, 4, 5), Order::F)).unwrap()); } #[bench] -fn to_shape3_4c(bench: &mut Bencher) { +fn to_shape3_4c(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5)); let view = a.view(); bench.iter(|| view.to_shape(((2, 3, 2, 5), Order::C)).unwrap()); } #[bench] -fn to_shape3_4f(bench: &mut Bencher) { +fn to_shape3_4f(bench: &mut Bencher) +{ let a = Array::::zeros((3, 4, 5).f()); let view = a.view(); bench.iter(|| view.to_shape(((2, 3, 2, 5), Order::F)).unwrap()); diff --git a/benches/zip.rs b/benches/zip.rs index 1194e450f..461497310 100644 --- a/benches/zip.rs +++ b/benches/zip.rs @@ -33,7 +33,8 @@ where z22.for_each(f); } -pub fn zip_indexed(data: &Array3, out: &mut Array3) { +pub fn zip_indexed(data: &Array3, out: &mut Array3) +{ Zip::indexed(data).and(out).for_each(|idx, &i, o| { let _ = black_box(idx); *o = i; @@ -44,49 +45,56 @@ pub fn zip_indexed(data: &Array3, out: &mut Array3) { const SZ3: (usize, usize, usize) = (100, 110, 100); #[bench] -fn zip_cc(b: &mut Bencher) { +fn zip_cc(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim()); b.iter(|| zip_copy(&data, &mut out)); } #[bench] -fn zip_cf(b: &mut Bencher) { +fn zip_cf(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim().f()); b.iter(|| zip_copy(&data, &mut out)); } #[bench] -fn zip_fc(b: &mut Bencher) { +fn zip_fc(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim()); b.iter(|| zip_copy(&data, &mut out)); } #[bench] -fn zip_ff(b: &mut Bencher) { +fn zip_ff(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim().f()); b.iter(|| zip_copy(&data, &mut out)); } #[bench] -fn zip_indexed_cc(b: &mut Bencher) { +fn zip_indexed_cc(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim()); b.iter(|| zip_indexed(&data, &mut out)); } #[bench] -fn zip_indexed_ff(b: &mut Bencher) { +fn zip_indexed_ff(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim().f()); b.iter(|| zip_indexed(&data, &mut out)); } #[bench] -fn slice_zip_cc(b: &mut Bencher) { +fn slice_zip_cc(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim()); let data = data.slice(s![1.., 1.., 1..]); @@ -95,7 +103,8 @@ fn slice_zip_cc(b: &mut Bencher) { } #[bench] -fn slice_zip_ff(b: &mut Bencher) { +fn slice_zip_ff(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim().f()); let data = data.slice(s![1.., 1.., 1..]); @@ -104,7 +113,8 @@ fn slice_zip_ff(b: &mut Bencher) { } #[bench] -fn slice_split_zip_cc(b: &mut Bencher) { +fn slice_split_zip_cc(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3); let mut out = Array3::zeros(data.dim()); let data = data.slice(s![1.., 1.., 1..]); @@ -113,7 +123,8 @@ fn slice_split_zip_cc(b: &mut Bencher) { } #[bench] -fn slice_split_zip_ff(b: &mut Bencher) { +fn slice_split_zip_ff(b: &mut Bencher) +{ let data: Array3 = Array3::zeros(SZ3.f()); let mut out = Array3::zeros(data.dim().f()); let data = data.slice(s![1.., 1.., 1..]); diff --git a/examples/axis_ops.rs b/examples/axis_ops.rs index 384ca6fab..3a54a52fb 100644 --- a/examples/axis_ops.rs +++ b/examples/axis_ops.rs @@ -55,7 +55,8 @@ where Ok(()) } -fn main() { +fn main() +{ let mut a = Array::::zeros((2, 3, 4)); for (i, elt) in (0..).zip(&mut a) { *elt = i; diff --git a/examples/bounds_check_elim.rs b/examples/bounds_check_elim.rs index 05dd52c48..e6b57c719 100644 --- a/examples/bounds_check_elim.rs +++ b/examples/bounds_check_elim.rs @@ -35,7 +35,8 @@ pub fn testvec_as_slice(a: &Vec) -> f64 { */ #[no_mangle] -pub fn test1d_single(a: &Array1, i: usize) -> f64 { +pub fn test1d_single(a: &Array1, i: usize) -> f64 +{ if i < a.len() { a[i] } else { @@ -44,7 +45,8 @@ pub fn test1d_single(a: &Array1, i: usize) -> f64 { } #[no_mangle] -pub fn test1d_single_mut(a: &mut Array1, i: usize) -> f64 { +pub fn test1d_single_mut(a: &mut Array1, i: usize) -> f64 +{ if i < a.len() { *&mut a[i] } else { @@ -53,7 +55,8 @@ pub fn test1d_single_mut(a: &mut Array1, i: usize) -> f64 { } #[no_mangle] -pub fn test1d_len_of(a: &Array1) -> f64 { +pub fn test1d_len_of(a: &Array1) -> f64 +{ let a = &*a; let mut sum = 0.; for i in 0..a.len_of(Axis(0)) { @@ -63,7 +66,8 @@ pub fn test1d_len_of(a: &Array1) -> f64 { } #[no_mangle] -pub fn test1d_range(a: &Array1) -> f64 { +pub fn test1d_range(a: &Array1) -> f64 +{ let mut sum = 0.; for i in 0..a.len() { sum += a[i]; @@ -72,7 +76,8 @@ pub fn test1d_range(a: &Array1) -> f64 { } #[no_mangle] -pub fn test1d_while(a: &Array1) -> f64 { +pub fn test1d_while(a: &Array1) -> f64 +{ let mut sum = 0.; let mut i = 0; while i < a.len() { @@ -83,7 +88,8 @@ pub fn test1d_while(a: &Array1) -> f64 { } #[no_mangle] -pub fn test2d_ranges(a: &Array2) -> f64 { +pub fn test2d_ranges(a: &Array2) -> f64 +{ let mut sum = 0.; for i in 0..a.nrows() { for j in 0..a.ncols() { @@ -94,7 +100,8 @@ pub fn test2d_ranges(a: &Array2) -> f64 { } #[no_mangle] -pub fn test2d_whiles(a: &Array2) -> f64 { +pub fn test2d_whiles(a: &Array2) -> f64 +{ let mut sum = 0.; let mut i = 0; while i < a.nrows() { diff --git a/examples/column_standardize.rs b/examples/column_standardize.rs index 6a1840f03..329ad2ccb 100644 --- a/examples/column_standardize.rs +++ b/examples/column_standardize.rs @@ -2,7 +2,8 @@ use ndarray::prelude::*; #[cfg(feature = "std")] -fn main() { +fn main() +{ // This example recreates the following from python/numpy // counts -= np.mean(counts, axis=0) // counts /= np.std(counts, axis=0) diff --git a/examples/convo.rs b/examples/convo.rs index f26ab6a50..a59795e12 100644 --- a/examples/convo.rs +++ b/examples/convo.rs @@ -15,7 +15,8 @@ type Kernel3x3 = [[A; 3]; 3]; #[inline(never)] #[cfg(feature = "std")] fn conv_3x3(a: &ArrayView2<'_, F>, out: &mut ArrayViewMut2<'_, F>, kernel: &Kernel3x3) -where F: Float { +where F: Float +{ let (n, m) = a.dim(); let (np, mp) = out.dim(); if n < 3 || m < 3 { @@ -41,7 +42,8 @@ where F: Float { } #[cfg(feature = "std")] -fn main() { +fn main() +{ let n = 16; let mut a = Array::zeros((n, n)); // make a circle diff --git a/examples/life.rs b/examples/life.rs index 8b722186c..7db384678 100644 --- a/examples/life.rs +++ b/examples/life.rs @@ -10,7 +10,8 @@ const N: usize = 100; type Board = Array2; -fn parse(x: &[u8]) -> Board { +fn parse(x: &[u8]) -> Board +{ // make a border of 0 cells let mut map = Board::from_elem(((N + 2), (N + 2)), 0); let a = Array::from_iter(x.iter().filter_map(|&b| match b { @@ -30,7 +31,8 @@ fn parse(x: &[u8]) -> Board { // 3 neighbors: birth // otherwise: death -fn iterate(z: &mut Board, scratch: &mut Board) { +fn iterate(z: &mut Board, scratch: &mut Board) +{ // compute number of neighbors let mut neigh = scratch.view_mut(); neigh.fill(0); @@ -53,7 +55,8 @@ fn iterate(z: &mut Board, scratch: &mut Board) { zv.zip_mut_with(&neigh, |y, &n| *y = ((n == 3) || (n == 2 && *y > 0)) as u8); } -fn turn_on_corners(z: &mut Board) { +fn turn_on_corners(z: &mut Board) +{ let n = z.nrows(); let m = z.ncols(); z[[1, 1]] = 1; @@ -62,7 +65,8 @@ fn turn_on_corners(z: &mut Board) { z[[n - 2, m - 2]] = 1; } -fn render(a: &Board) { +fn render(a: &Board) +{ for row in a.rows() { for &x in row { if x > 0 { @@ -75,7 +79,8 @@ fn render(a: &Board) { } } -fn main() { +fn main() +{ let mut a = parse(INPUT); let mut scratch = Board::zeros((N, N)); let steps = 100; diff --git a/examples/rollaxis.rs b/examples/rollaxis.rs index 8efdd0ce0..82c381297 100644 --- a/examples/rollaxis.rs +++ b/examples/rollaxis.rs @@ -22,7 +22,8 @@ where a } -fn main() { +fn main() +{ let mut data = array![ [[-1., 0., -2.], [1., 7., -3.]], [[1., 0., -3.], [1., 7., 5.]], diff --git a/examples/sort-axis.rs b/examples/sort-axis.rs index ff4e804da..17ce52e3a 100644 --- a/examples/sort-axis.rs +++ b/examples/sort-axis.rs @@ -12,13 +12,16 @@ use std::ptr::copy_nonoverlapping; // Type invariant: Each index appears exactly once #[derive(Clone, Debug)] -pub struct Permutation { +pub struct Permutation +{ indices: Vec, } -impl Permutation { +impl Permutation +{ /// Checks if the permutation is correct - pub fn from_indices(v: Vec) -> Result { + pub fn from_indices(v: Vec) -> Result + { let perm = Permutation { indices: v }; if perm.correct() { Ok(perm) @@ -27,7 +30,8 @@ impl Permutation { } } - fn correct(&self) -> bool { + fn correct(&self) -> bool + { let axis_len = self.indices.len(); let mut seen = vec![false; axis_len]; for &i in &self.indices { @@ -45,14 +49,16 @@ impl Permutation { } } -pub trait SortArray { +pub trait SortArray +{ /// ***Panics*** if `axis` is out of bounds. fn identity(&self, axis: Axis) -> Permutation; fn sort_axis_by(&self, axis: Axis, less_than: F) -> Permutation where F: FnMut(usize, usize) -> bool; } -pub trait PermuteArray { +pub trait PermuteArray +{ type Elem; type Dim; fn permute_axis(self, axis: Axis, perm: &Permutation) -> Array @@ -66,14 +72,16 @@ where S: Data, D: Dimension, { - fn identity(&self, axis: Axis) -> Permutation { + fn identity(&self, axis: Axis) -> Permutation + { Permutation { indices: (0..self.len_of(axis)).collect(), } } fn sort_axis_by(&self, axis: Axis, mut less_than: F) -> Permutation - where F: FnMut(usize, usize) -> bool { + where F: FnMut(usize, usize) -> bool + { let mut perm = self.identity(axis); perm.indices.sort_by(move |&a, &b| { if less_than(a, b) { @@ -95,7 +103,8 @@ where D: Dimension type Dim = D; fn permute_axis(self, axis: Axis, perm: &Permutation) -> Array - where D: RemoveAxis { + where D: RemoveAxis + { let axis_len = self.len_of(axis); let axis_stride = self.stride_of(axis); assert_eq!(axis_len, perm.indices.len()); @@ -158,7 +167,8 @@ where D: Dimension } #[cfg(feature = "std")] -fn main() { +fn main() +{ let a = Array::linspace(0., 63., 64) .into_shape_with_order((8, 8)) .unwrap(); @@ -178,10 +188,12 @@ fn main() { fn main() {} #[cfg(test)] -mod tests { +mod tests +{ use super::*; #[test] - fn test_permute_axis() { + fn test_permute_axis() + { let a = array![ [107998.96, 1.], [107999.08, 2.], diff --git a/examples/type_conversion.rs b/examples/type_conversion.rs index 7bec2542f..a419af740 100644 --- a/examples/type_conversion.rs +++ b/examples/type_conversion.rs @@ -7,7 +7,8 @@ use approx::assert_abs_diff_eq; use ndarray::prelude::*; #[cfg(feature = "approx")] -fn main() { +fn main() +{ // Converting an array from one datatype to another is implemented with the // `ArrayBase::mapv()` function. We pass a closure that is applied to each // element independently. This allows for more control and flexiblity in diff --git a/examples/zip_many.rs b/examples/zip_many.rs index 9b649a278..57d66a956 100644 --- a/examples/zip_many.rs +++ b/examples/zip_many.rs @@ -5,7 +5,8 @@ use ndarray::prelude::*; use ndarray::Zip; -fn main() { +fn main() +{ let n = 6; let mut a = Array::::zeros((n, n)); diff --git a/ndarray-rand/benches/bench.rs b/ndarray-rand/benches/bench.rs index b58d80a88..0e5eb2ff7 100644 --- a/ndarray-rand/benches/bench.rs +++ b/ndarray-rand/benches/bench.rs @@ -10,19 +10,22 @@ use rand_distr::Uniform; use test::Bencher; #[bench] -fn uniform_f32(b: &mut Bencher) { +fn uniform_f32(b: &mut Bencher) +{ let m = 100; b.iter(|| Array::random((m, m), Uniform::new(-1f32, 1.))); } #[bench] -fn norm_f32(b: &mut Bencher) { +fn norm_f32(b: &mut Bencher) +{ let m = 100; b.iter(|| Array::random((m, m), Normal::new(0f32, 1.).unwrap())); } #[bench] -fn norm_f64(b: &mut Bencher) { +fn norm_f64(b: &mut Bencher) +{ let m = 100; b.iter(|| Array::random((m, m), Normal::new(0f64, 1.).unwrap())); } diff --git a/ndarray-rand/src/lib.rs b/ndarray-rand/src/lib.rs index 57124f3a7..027198538 100644 --- a/ndarray-rand/src/lib.rs +++ b/ndarray-rand/src/lib.rs @@ -40,12 +40,14 @@ use ndarray::{ArrayBase, Data, DataOwned, Dimension, RawData}; use quickcheck::{Arbitrary, Gen}; /// `rand`, re-exported for convenience and version-compatibility. -pub mod rand { +pub mod rand +{ pub use rand::*; } /// `rand-distr`, re-exported for convenience and version-compatibility. -pub mod rand_distr { +pub mod rand_distr +{ pub use rand_distr::*; } @@ -284,15 +286,18 @@ where /// [`sample_axis`]: RandomExt::sample_axis /// [`sample_axis_using`]: RandomExt::sample_axis_using #[derive(Debug, Clone)] -pub enum SamplingStrategy { +pub enum SamplingStrategy +{ WithReplacement, WithoutReplacement, } // `Arbitrary` enables `quickcheck` to generate random `SamplingStrategy` values for testing. #[cfg(feature = "quickcheck")] -impl Arbitrary for SamplingStrategy { - fn arbitrary(g: &mut Gen) -> Self { +impl Arbitrary for SamplingStrategy +{ + fn arbitrary(g: &mut Gen) -> Self + { if bool::arbitrary(g) { SamplingStrategy::WithReplacement } else { @@ -301,7 +306,8 @@ impl Arbitrary for SamplingStrategy { } } -fn get_rng() -> SmallRng { +fn get_rng() -> SmallRng +{ SmallRng::from_rng(thread_rng()).expect("create SmallRng from thread_rng failed") } @@ -328,7 +334,8 @@ pub struct F32(pub S); impl Distribution for F32 where S: Distribution { - fn sample(&self, rng: &mut R) -> f32 { + fn sample(&self, rng: &mut R) -> f32 + { self.0.sample(rng) as f32 } } diff --git a/ndarray-rand/tests/tests.rs b/ndarray-rand/tests/tests.rs index a0e1584ad..2db040310 100644 --- a/ndarray-rand/tests/tests.rs +++ b/ndarray-rand/tests/tests.rs @@ -8,7 +8,8 @@ use ndarray_rand::{RandomExt, SamplingStrategy}; use quickcheck::{quickcheck, TestResult}; #[test] -fn test_dim() { +fn test_dim() +{ let (mm, nn) = (5, 5); for m in 0..mm { for n in 0..nn { @@ -22,7 +23,8 @@ fn test_dim() { } #[test] -fn test_dim_f() { +fn test_dim_f() +{ let (mm, nn) = (5, 5); for m in 0..mm { for n in 0..nn { @@ -36,7 +38,8 @@ fn test_dim_f() { } #[test] -fn sample_axis_on_view() { +fn sample_axis_on_view() +{ let m = 5; let a = Array::random((m, 4), Uniform::new(0., 2.)); let _samples = a @@ -46,7 +49,8 @@ fn sample_axis_on_view() { #[test] #[should_panic] -fn oversampling_without_replacement_should_panic() { +fn oversampling_without_replacement_should_panic() +{ let m = 5; let a = Array::random((m, 4), Uniform::new(0., 2.)); let _samples = a.sample_axis(Axis(0), m + 1, SamplingStrategy::WithoutReplacement); @@ -111,7 +115,8 @@ quickcheck! { } } -fn sampling_works(a: &Array2, strategy: SamplingStrategy, axis: Axis, n_samples: usize) -> bool { +fn sampling_works(a: &Array2, strategy: SamplingStrategy, axis: Axis, n_samples: usize) -> bool +{ let samples = a.sample_axis(axis, n_samples, strategy); samples .axis_iter(axis) @@ -119,13 +124,15 @@ fn sampling_works(a: &Array2, strategy: SamplingStrategy, axis: Axis, n_sam } // Check if, when sliced along `axis`, there is at least one lane in `a` equal to `b` -fn is_subset(a: &Array2, b: &ArrayView1, axis: Axis) -> bool { +fn is_subset(a: &Array2, b: &ArrayView1, axis: Axis) -> bool +{ a.axis_iter(axis).any(|lane| &lane == b) } #[test] #[should_panic] -fn sampling_without_replacement_from_a_zero_length_axis_should_panic() { +fn sampling_without_replacement_from_a_zero_length_axis_should_panic() +{ let n = 5; let a = Array::random((0, n), Uniform::new(0., 2.)); let _samples = a.sample_axis(Axis(0), 1, SamplingStrategy::WithoutReplacement); @@ -133,7 +140,8 @@ fn sampling_without_replacement_from_a_zero_length_axis_should_panic() { #[test] #[should_panic] -fn sampling_with_replacement_from_a_zero_length_axis_should_panic() { +fn sampling_with_replacement_from_a_zero_length_axis_should_panic() +{ let n = 5; let a = Array::random((0, n), Uniform::new(0., 2.)); let _samples = a.sample_axis(Axis(0), 1, SamplingStrategy::WithReplacement); diff --git a/rustfmt.toml b/rustfmt.toml index f0eb0349a..f3e376ccc 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -3,7 +3,7 @@ array_width = 100 chain_width = 60 fn_call_width = 100 max_width = 120 -# brace_style = "AlwaysNextLine" +brace_style = "AlwaysNextLine" control_brace_style = "AlwaysSameLine" fn_params_layout = "Compressed" # ? format_macro_bodies = false diff --git a/src/aliases.rs b/src/aliases.rs index 9a8ea8f2c..5df0c95ec 100644 --- a/src/aliases.rs +++ b/src/aliases.rs @@ -7,50 +7,58 @@ use crate::{ArcArray, Array, ArrayView, ArrayViewMut, Ix, IxDynImpl}; /// Create a zero-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix0() -> Ix0 { +pub const fn Ix0() -> Ix0 +{ Dim::new([]) } /// Create a one-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix1(i0: Ix) -> Ix1 { +pub const fn Ix1(i0: Ix) -> Ix1 +{ Dim::new([i0]) } /// Create a two-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix2(i0: Ix, i1: Ix) -> Ix2 { +pub const fn Ix2(i0: Ix, i1: Ix) -> Ix2 +{ Dim::new([i0, i1]) } /// Create a three-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix3(i0: Ix, i1: Ix, i2: Ix) -> Ix3 { +pub const fn Ix3(i0: Ix, i1: Ix, i2: Ix) -> Ix3 +{ Dim::new([i0, i1, i2]) } /// Create a four-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix4(i0: Ix, i1: Ix, i2: Ix, i3: Ix) -> Ix4 { +pub const fn Ix4(i0: Ix, i1: Ix, i2: Ix, i3: Ix) -> Ix4 +{ Dim::new([i0, i1, i2, i3]) } /// Create a five-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix5(i0: Ix, i1: Ix, i2: Ix, i3: Ix, i4: Ix) -> Ix5 { +pub const fn Ix5(i0: Ix, i1: Ix, i2: Ix, i3: Ix, i4: Ix) -> Ix5 +{ Dim::new([i0, i1, i2, i3, i4]) } /// Create a six-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub const fn Ix6(i0: Ix, i1: Ix, i2: Ix, i3: Ix, i4: Ix, i5: Ix) -> Ix6 { +pub const fn Ix6(i0: Ix, i1: Ix, i2: Ix, i3: Ix, i4: Ix, i5: Ix) -> Ix6 +{ Dim::new([i0, i1, i2, i3, i4, i5]) } /// Create a dynamic-dimensional index #[allow(non_snake_case)] #[inline(always)] -pub fn IxDyn(ix: &[Ix]) -> IxDyn { +pub fn IxDyn(ix: &[Ix]) -> IxDyn +{ Dim(ix) } diff --git a/src/argument_traits.rs b/src/argument_traits.rs index 82d4869a9..de8ac7f99 100644 --- a/src/argument_traits.rs +++ b/src/argument_traits.rs @@ -4,36 +4,45 @@ use std::mem::MaybeUninit; use crate::math_cell::MathCell; /// A producer element that can be assigned to once -pub trait AssignElem { +pub trait AssignElem +{ /// Assign the value `input` to the element that self represents. fn assign_elem(self, input: T); } /// Assignable element, simply `*self = input`. -impl<'a, T> AssignElem for &'a mut T { - fn assign_elem(self, input: T) { +impl<'a, T> AssignElem for &'a mut T +{ + fn assign_elem(self, input: T) + { *self = input; } } /// Assignable element, simply `self.set(input)`. -impl<'a, T> AssignElem for &'a Cell { - fn assign_elem(self, input: T) { +impl<'a, T> AssignElem for &'a Cell +{ + fn assign_elem(self, input: T) + { self.set(input); } } /// Assignable element, simply `self.set(input)`. -impl<'a, T> AssignElem for &'a MathCell { - fn assign_elem(self, input: T) { +impl<'a, T> AssignElem for &'a MathCell +{ + fn assign_elem(self, input: T) + { self.set(input); } } /// Assignable element, the item in the MaybeUninit is overwritten (prior value, if any, is not /// read or dropped). -impl<'a, T> AssignElem for &'a mut MaybeUninit { - fn assign_elem(self, input: T) { +impl<'a, T> AssignElem for &'a mut MaybeUninit +{ + fn assign_elem(self, input: T) + { *self = MaybeUninit::new(input); } } diff --git a/src/array_approx.rs b/src/array_approx.rs index 4ad5ef201..286a1146c 100644 --- a/src/array_approx.rs +++ b/src/array_approx.rs @@ -1,5 +1,6 @@ #[cfg(feature = "approx")] -mod approx_methods { +mod approx_methods +{ use crate::imp_prelude::*; impl ArrayBase diff --git a/src/array_serde.rs b/src/array_serde.rs index aff268a51..31b613d4c 100644 --- a/src/array_serde.rs +++ b/src/array_serde.rs @@ -24,7 +24,8 @@ use crate::IntoDimension; /// Verifies that the version of the deserialized array matches the current /// `ARRAY_FORMAT_VERSION`. pub fn verify_version(v: u8) -> Result<(), E> -where E: de::Error { +where E: de::Error +{ if v != ARRAY_FORMAT_VERSION { let err_msg = format!("unknown array version: {}", v); Err(de::Error::custom(err_msg)) @@ -38,7 +39,8 @@ impl Serialize for Dim where I: Serialize { fn serialize(&self, serializer: Se) -> Result - where Se: Serializer { + where Se: Serializer + { self.ix().serialize(serializer) } } @@ -48,23 +50,28 @@ impl<'de, I> Deserialize<'de> for Dim where I: Deserialize<'de> { fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> { + where D: Deserializer<'de> + { I::deserialize(deserializer).map(Dim::new) } } /// **Requires crate feature `"serde"`** -impl Serialize for IxDyn { +impl Serialize for IxDyn +{ fn serialize(&self, serializer: Se) -> Result - where Se: Serializer { + where Se: Serializer + { self.ix().serialize(serializer) } } /// **Requires crate feature `"serde"`** -impl<'de> Deserialize<'de> for IxDyn { +impl<'de> Deserialize<'de> for IxDyn +{ fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> { + where D: Deserializer<'de> + { let v = Vec::::deserialize(deserializer)?; Ok(v.into_dimension()) } @@ -78,7 +85,8 @@ where S: Data, { fn serialize(&self, serializer: Se) -> Result - where Se: Serializer { + where Se: Serializer + { let mut state = serializer.serialize_struct("Array", 3)?; state.serialize_field("v", &ARRAY_FORMAT_VERSION)?; state.serialize_field("dim", &self.raw_dim())?; @@ -96,7 +104,8 @@ where D: Dimension + Serialize, { fn serialize(&self, serializer: S) -> Result - where S: Serializer { + where S: Serializer + { let iter = &self.0; let mut seq = serializer.serialize_seq(Some(iter.len()))?; for elt in iter.clone() { @@ -106,19 +115,23 @@ where } } -struct ArrayVisitor { +struct ArrayVisitor +{ _marker_a: PhantomData, _marker_b: PhantomData, } -enum ArrayField { +enum ArrayField +{ Version, Dim, Data, } -impl ArrayVisitor { - pub fn new() -> Self { +impl ArrayVisitor +{ + pub fn new() -> Self + { ArrayVisitor { _marker_a: PhantomData, _marker_b: PhantomData, @@ -136,25 +149,31 @@ where S: DataOwned, { fn deserialize(deserializer: D) -> Result, D::Error> - where D: Deserializer<'de> { + where D: Deserializer<'de> + { deserializer.deserialize_struct("Array", ARRAY_FIELDS, ArrayVisitor::new()) } } -impl<'de> Deserialize<'de> for ArrayField { +impl<'de> Deserialize<'de> for ArrayField +{ fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> { + where D: Deserializer<'de> + { struct ArrayFieldVisitor; - impl<'de> Visitor<'de> for ArrayFieldVisitor { + impl<'de> Visitor<'de> for ArrayFieldVisitor + { type Value = ArrayField; - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result + { formatter.write_str(r#""v", "dim", or "data""#) } fn visit_str(self, value: &str) -> Result - where E: de::Error { + where E: de::Error + { match value { "v" => Ok(ArrayField::Version), "dim" => Ok(ArrayField::Dim), @@ -164,7 +183,8 @@ impl<'de> Deserialize<'de> for ArrayField { } fn visit_bytes(self, value: &[u8]) -> Result - where E: de::Error { + where E: de::Error + { match value { b"v" => Ok(ArrayField::Version), b"dim" => Ok(ArrayField::Dim), @@ -186,12 +206,14 @@ where { type Value = ArrayBase; - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result + { formatter.write_str("ndarray representation") } fn visit_seq(self, mut visitor: V) -> Result, V::Error> - where V: SeqAccess<'de> { + where V: SeqAccess<'de> + { let v: u8 = match visitor.next_element()? { Some(value) => value, None => { @@ -223,7 +245,8 @@ where } fn visit_map(self, mut visitor: V) -> Result, V::Error> - where V: MapAccess<'de> { + where V: MapAccess<'de> + { let mut v: Option = None; let mut data: Option> = None; let mut dim: Option = None; diff --git a/src/arrayformat.rs b/src/arrayformat.rs index 9c4c0fc82..7089c48c7 100644 --- a/src/arrayformat.rs +++ b/src/arrayformat.rs @@ -29,14 +29,17 @@ const AXIS_2D_OVERFLOW_LIMIT: usize = 22; const ELLIPSIS: &str = "..."; #[derive(Clone, Debug)] -struct FormatOptions { +struct FormatOptions +{ axis_collapse_limit: usize, axis_collapse_limit_next_last: usize, axis_collapse_limit_last: usize, } -impl FormatOptions { - pub(crate) fn default_for_array(nelem: usize, no_limit: bool) -> Self { +impl FormatOptions +{ + pub(crate) fn default_for_array(nelem: usize, no_limit: bool) -> Self + { let default = Self { axis_collapse_limit: AXIS_LIMIT_STACKED, axis_collapse_limit_next_last: AXIS_LIMIT_COL, @@ -45,7 +48,8 @@ impl FormatOptions { default.set_no_limit(no_limit || nelem < ARRAY_MANY_ELEMENT_LIMIT) } - fn set_no_limit(mut self, no_limit: bool) -> Self { + fn set_no_limit(mut self, no_limit: bool) -> Self + { if no_limit { self.axis_collapse_limit = std::usize::MAX; self.axis_collapse_limit_next_last = std::usize::MAX; @@ -56,7 +60,8 @@ impl FormatOptions { /// Axis length collapse limit before ellipsizing, where `axis_rindex` is /// the index of the axis from the back. - pub(crate) fn collapse_limit(&self, axis_rindex: usize) -> usize { + pub(crate) fn collapse_limit(&self, axis_rindex: usize) -> usize + { match axis_rindex { 0 => self.axis_collapse_limit_last, 1 => self.axis_collapse_limit_next_last, @@ -80,7 +85,8 @@ impl FormatOptions { fn format_with_overflow( f: &mut fmt::Formatter<'_>, length: usize, limit: usize, separator: &str, ellipsis: &str, fmt_elem: &mut dyn FnMut(&mut fmt::Formatter, usize) -> fmt::Result, -) -> fmt::Result { +) -> fmt::Result +{ if length == 0 { // no-op } else if length <= limit { @@ -170,7 +176,8 @@ where impl fmt::Display for ArrayBase where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } @@ -183,7 +190,8 @@ where S: Data impl fmt::Debug for ArrayBase where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt)?; @@ -210,7 +218,8 @@ where S: Data impl fmt::LowerExp for ArrayBase where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } @@ -223,7 +232,8 @@ where S: Data impl fmt::UpperExp for ArrayBase where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } @@ -235,7 +245,8 @@ where S: Data impl fmt::LowerHex for ArrayBase where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } @@ -248,14 +259,16 @@ where S: Data impl fmt::Binary for ArrayBase where S: Data { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let fmt_opt = FormatOptions::default_for_array(self.len(), f.alternate()); format_array(self, f, <_>::fmt, &fmt_opt) } } #[cfg(test)] -mod formatting_with_omit { +mod formatting_with_omit +{ #[cfg(not(feature = "std"))] use alloc::string::String; #[cfg(not(feature = "std"))] @@ -265,7 +278,8 @@ mod formatting_with_omit { use super::*; use crate::prelude::*; - fn assert_str_eq(expected: &str, actual: &str) { + fn assert_str_eq(expected: &str, actual: &str) + { // use assert to avoid printing the strings twice on failure assert!( expected == actual, @@ -275,7 +289,8 @@ mod formatting_with_omit { ); } - fn ellipsize(limit: usize, sep: &str, elements: impl IntoIterator) -> String { + fn ellipsize(limit: usize, sep: &str, elements: impl IntoIterator) -> String + { let elements = elements.into_iter().collect::>(); let edge = limit / 2; if elements.len() <= limit { @@ -293,7 +308,8 @@ mod formatting_with_omit { } #[test] - fn empty_arrays() { + fn empty_arrays() + { let a: Array2 = arr2(&[[], []]); let actual = format!("{}", a); let expected = "[[]]"; @@ -301,7 +317,8 @@ mod formatting_with_omit { } #[test] - fn zero_length_axes() { + fn zero_length_axes() + { let a = Array3::::zeros((3, 0, 4)); let actual = format!("{}", a); let expected = "[[[]]]"; @@ -309,7 +326,8 @@ mod formatting_with_omit { } #[test] - fn dim_0() { + fn dim_0() + { let element = 12; let a = arr0(element); let actual = format!("{}", a); @@ -318,7 +336,8 @@ mod formatting_with_omit { } #[test] - fn dim_1() { + fn dim_1() + { let overflow: usize = 2; let a = Array1::from_elem(ARRAY_MANY_ELEMENT_LIMIT + overflow, 1); let actual = format!("{}", a); @@ -327,7 +346,8 @@ mod formatting_with_omit { } #[test] - fn dim_1_alternate() { + fn dim_1_alternate() + { let overflow: usize = 2; let a = Array1::from_elem(ARRAY_MANY_ELEMENT_LIMIT + overflow, 1); let actual = format!("{:#}", a); @@ -336,7 +356,8 @@ mod formatting_with_omit { } #[test] - fn dim_2_last_axis_overflow() { + fn dim_2_last_axis_overflow() + { let overflow: usize = 2; let a = Array2::from_elem((AXIS_2D_OVERFLOW_LIMIT, AXIS_2D_OVERFLOW_LIMIT + overflow), 1); let actual = format!("{}", a); @@ -356,7 +377,8 @@ mod formatting_with_omit { } #[test] - fn dim_2_non_last_axis_overflow() { + fn dim_2_non_last_axis_overflow() + { let a = Array2::from_elem((ARRAY_MANY_ELEMENT_LIMIT / 10, 10), 1); let actual = format!("{}", a); let row = format!("{}", a.row(0)); @@ -368,7 +390,8 @@ mod formatting_with_omit { } #[test] - fn dim_2_non_last_axis_overflow_alternate() { + fn dim_2_non_last_axis_overflow_alternate() + { let a = Array2::from_elem((AXIS_LIMIT_COL * 4, 6), 1); let actual = format!("{:#}", a); let row = format!("{}", a.row(0)); @@ -377,7 +400,8 @@ mod formatting_with_omit { } #[test] - fn dim_2_multi_directional_overflow() { + fn dim_2_multi_directional_overflow() + { let overflow: usize = 2; let a = Array2::from_elem((AXIS_2D_OVERFLOW_LIMIT + overflow, AXIS_2D_OVERFLOW_LIMIT + overflow), 1); let actual = format!("{}", a); @@ -390,7 +414,8 @@ mod formatting_with_omit { } #[test] - fn dim_2_multi_directional_overflow_alternate() { + fn dim_2_multi_directional_overflow_alternate() + { let overflow: usize = 2; let a = Array2::from_elem((AXIS_2D_OVERFLOW_LIMIT + overflow, AXIS_2D_OVERFLOW_LIMIT + overflow), 1); let actual = format!("{:#}", a); @@ -400,7 +425,8 @@ mod formatting_with_omit { } #[test] - fn dim_3_overflow_most() { + fn dim_3_overflow_most() + { let a = Array3::from_shape_fn((AXIS_LIMIT_STACKED + 1, AXIS_LIMIT_COL, AXIS_LIMIT_ROW + 1), |(i, j, k)| { 1000. + (100. * ((i as f64).sqrt() + (j as f64).sin() + k as f64)).round() / 100. }); @@ -483,7 +509,8 @@ mod formatting_with_omit { } #[test] - fn dim_4_overflow_outer() { + fn dim_4_overflow_outer() + { let a = Array4::from_shape_fn((10, 10, 3, 3), |(i, j, k, l)| i + j + k + l); let actual = format!("{:2}", a); // Generated using NumPy with: diff --git a/src/arraytraits.rs b/src/arraytraits.rs index 00250aada..ea0b380ed 100644 --- a/src/arraytraits.rs +++ b/src/arraytraits.rs @@ -27,7 +27,8 @@ use crate::{ #[cold] #[inline(never)] -pub(crate) fn array_out_of_bounds() -> ! { +pub(crate) fn array_out_of_bounds() -> ! +{ panic!("ndarray: index out of bounds"); } @@ -52,7 +53,8 @@ where { type Output = S::Elem; #[inline] - fn index(&self, index: I) -> &S::Elem { + fn index(&self, index: I) -> &S::Elem + { debug_bounds_check!(self, index); unsafe { &*self.ptr.as_ptr().offset( @@ -74,7 +76,8 @@ where S: DataMut, { #[inline] - fn index_mut(&mut self, index: I) -> &mut S::Elem { + fn index_mut(&mut self, index: I) -> &mut S::Elem + { debug_bounds_check!(self, index); unsafe { &mut *self.as_mut_ptr().offset( @@ -95,7 +98,8 @@ where S2: Data, D: Dimension, { - fn eq(&self, rhs: &ArrayBase) -> bool { + fn eq(&self, rhs: &ArrayBase) -> bool + { if self.shape() != rhs.shape() { return false; } @@ -127,7 +131,8 @@ where S2: Data, D: Dimension, { - fn eq(&self, rhs: &&ArrayBase) -> bool { + fn eq(&self, rhs: &&ArrayBase) -> bool + { *self == **rhs } } @@ -142,7 +147,8 @@ where S2: Data, D: Dimension, { - fn eq(&self, rhs: &ArrayBase) -> bool { + fn eq(&self, rhs: &ArrayBase) -> bool + { **self == *rhs } } @@ -161,7 +167,8 @@ where S: DataOwned /// Create a one-dimensional array from a boxed slice (no copying needed). /// /// **Panics** if the length is greater than `isize::MAX`. - fn from(b: Box<[A]>) -> Self { + fn from(b: Box<[A]>) -> Self + { Self::from_vec(b.into_vec()) } } @@ -178,7 +185,8 @@ where S: DataOwned /// /// let array = Array::from(vec![1., 2., 3., 4.]); /// ``` - fn from(v: Vec) -> Self { + fn from(v: Vec) -> Self + { Self::from_vec(v) } } @@ -198,7 +206,8 @@ where S: DataOwned /// assert!(array == arr1(&[0, 1, 4, 9, 16])) /// ``` fn from_iter(iterable: I) -> ArrayBase - where I: IntoIterator { + where I: IntoIterator + { Self::from_iter(iterable) } } @@ -211,7 +220,8 @@ where type Item = &'a S::Elem; type IntoIter = Iter<'a, S::Elem, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { self.iter() } } @@ -224,7 +234,8 @@ where type Item = &'a mut S::Elem; type IntoIter = IterMut<'a, S::Elem, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { self.iter_mut() } } @@ -235,7 +246,8 @@ where D: Dimension type Item = &'a A; type IntoIter = Iter<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { self.into_iter_() } } @@ -246,7 +258,8 @@ where D: Dimension type Item = &'a mut A; type IntoIter = IterMut<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { self.into_iter_() } } @@ -258,7 +271,8 @@ where S::Elem: hash::Hash, { // Note: elements are hashed in the logical order - fn hash(&self, state: &mut H) { + fn hash(&self, state: &mut H) + { self.shape().hash(state); if let Some(self_s) = self.as_slice() { hash::Hash::hash_slice(self_s, state); @@ -313,7 +327,8 @@ where Slice: AsRef<[A]> /// Create a one-dimensional read-only array view of the data in `slice`. /// /// **Panics** if the slice length is greater than `isize::MAX`. - fn from(slice: &'a Slice) -> Self { + fn from(slice: &'a Slice) -> Self + { aview1(slice.as_ref()) } } @@ -323,9 +338,11 @@ where Slice: AsRef<[A]> /// **Panics** if the product of non-zero axis lengths overflows `isize` (This can only occur if A /// is zero-sized because slices cannot contain more than `isize::MAX` number of bytes). /// **Panics** if N == 0 and the number of rows is greater than isize::MAX. -impl<'a, A, const M: usize, const N: usize> From<&'a [[A; N]; M]> for ArrayView<'a, A, Ix2> { +impl<'a, A, const M: usize, const N: usize> From<&'a [[A; N]; M]> for ArrayView<'a, A, Ix2> +{ /// Create a two-dimensional read-only array view of the data in `slice` - fn from(xs: &'a [[A; N]; M]) -> Self { + fn from(xs: &'a [[A; N]; M]) -> Self + { Self::from(&xs[..]) } } @@ -335,9 +352,11 @@ impl<'a, A, const M: usize, const N: usize> From<&'a [[A; N]; M]> for ArrayView< /// **Panics** if the product of non-zero axis lengths overflows `isize`. (This /// can only occur if A is zero-sized or if `N` is zero, because slices cannot /// contain more than `isize::MAX` number of bytes.) -impl<'a, A, const N: usize> From<&'a [[A; N]]> for ArrayView<'a, A, Ix2> { +impl<'a, A, const N: usize> From<&'a [[A; N]]> for ArrayView<'a, A, Ix2> +{ /// Create a two-dimensional read-only array view of the data in `slice` - fn from(xs: &'a [[A; N]]) -> Self { + fn from(xs: &'a [[A; N]]) -> Self + { aview2(xs) } } @@ -349,7 +368,8 @@ where D: Dimension, { /// Create a read-only array view of the array. - fn from(array: &'a ArrayBase) -> Self { + fn from(array: &'a ArrayBase) -> Self + { array.view() } } @@ -361,7 +381,8 @@ where Slice: AsMut<[A]> /// Create a one-dimensional read-write array view of the data in `slice`. /// /// **Panics** if the slice length is greater than `isize::MAX`. - fn from(slice: &'a mut Slice) -> Self { + fn from(slice: &'a mut Slice) -> Self + { let xs = slice.as_mut(); if mem::size_of::() == 0 { assert!( @@ -378,9 +399,11 @@ where Slice: AsMut<[A]> /// **Panics** if the product of non-zero axis lengths overflows `isize` (This can only occur if A /// is zero-sized because slices cannot contain more than `isize::MAX` number of bytes). /// **Panics** if N == 0 and the number of rows is greater than isize::MAX. -impl<'a, A, const M: usize, const N: usize> From<&'a mut [[A; N]; M]> for ArrayViewMut<'a, A, Ix2> { +impl<'a, A, const M: usize, const N: usize> From<&'a mut [[A; N]; M]> for ArrayViewMut<'a, A, Ix2> +{ /// Create a two-dimensional read-write array view of the data in `slice` - fn from(xs: &'a mut [[A; N]; M]) -> Self { + fn from(xs: &'a mut [[A; N]; M]) -> Self + { Self::from(&mut xs[..]) } } @@ -390,9 +413,11 @@ impl<'a, A, const M: usize, const N: usize> From<&'a mut [[A; N]; M]> for ArrayV /// **Panics** if the product of non-zero axis lengths overflows `isize`. (This /// can only occur if `A` is zero-sized or if `N` is zero, because slices /// cannot contain more than `isize::MAX` number of bytes.) -impl<'a, A, const N: usize> From<&'a mut [[A; N]]> for ArrayViewMut<'a, A, Ix2> { +impl<'a, A, const N: usize> From<&'a mut [[A; N]]> for ArrayViewMut<'a, A, Ix2> +{ /// Create a two-dimensional read-write array view of the data in `slice` - fn from(xs: &'a mut [[A; N]]) -> Self { + fn from(xs: &'a mut [[A; N]]) -> Self + { let cols = N; let rows = xs.len(); let dim = Ix2(rows, cols); @@ -421,7 +446,8 @@ where D: Dimension, { /// Create a read-write array view of the array. - fn from(array: &'a mut ArrayBase) -> Self { + fn from(array: &'a mut ArrayBase) -> Self + { array.view_mut() } } @@ -429,7 +455,8 @@ where impl From> for ArcArray where D: Dimension { - fn from(arr: Array) -> ArcArray { + fn from(arr: Array) -> ArcArray + { arr.into_shared() } } @@ -486,7 +513,8 @@ where { // NOTE: We can implement Default for non-zero dimensional array views by // using an empty slice, however we need a trait for nonzero Dimension. - fn default() -> Self { + fn default() -> Self + { ArrayBase::default(D::default()) } } diff --git a/src/data_repr.rs b/src/data_repr.rs index 235ac3276..cb57d81f3 100644 --- a/src/data_repr.rs +++ b/src/data_repr.rs @@ -13,7 +13,8 @@ use std::ptr::NonNull; use rawpointer::PointerExt; #[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum Device { +pub enum Device +{ Host, #[cfg(feature = "opencl")] OpenCL, @@ -31,15 +32,18 @@ pub enum Device { // transmutable A -> B. #[derive(Debug)] #[repr(C)] -pub struct OwnedRepr { +pub struct OwnedRepr +{ ptr: NonNull, len: usize, capacity: usize, device: Device, } -impl OwnedRepr { - pub(crate) fn from(v: Vec) -> Self { +impl OwnedRepr +{ + pub(crate) fn from(v: Vec) -> Self + { let mut v = ManuallyDrop::new(v); let len = v.len(); let capacity = v.capacity(); @@ -53,11 +57,13 @@ impl OwnedRepr { } } - pub(crate) const fn device(&self) -> Device { + pub(crate) const fn device(&self) -> Device + { self.device } - pub(crate) const unsafe fn from_components(ptr: NonNull, len: usize, capacity: usize, device: Device) -> Self { + pub(crate) const unsafe fn from_components(ptr: NonNull, len: usize, capacity: usize, device: Device) -> Self + { Self { ptr, len, @@ -68,7 +74,8 @@ impl OwnedRepr { /// Move this storage object to a specified device. #[allow(clippy::unnecessary_wraps)] - pub(crate) fn move_to_device(self, device: Device) -> Option { + pub(crate) fn move_to_device(self, device: Device) -> Option + { // println!("Copying to {device:?}"); // let mut self_ = ManuallyDrop::new(self); // self_.device = device; @@ -171,7 +178,8 @@ impl OwnedRepr { } /// Drop the object and free the memory - pub(crate) unsafe fn drop_impl(&mut self) -> Vec { + pub(crate) unsafe fn drop_impl(&mut self) -> Vec + { let capacity = self.capacity; let len = self.len; self.len = 0; @@ -209,7 +217,8 @@ impl OwnedRepr { /// # Panics /// Will panic if the underlying memory is not allocated on /// the host device. - pub(crate) fn into_vec(self) -> Vec { + pub(crate) fn into_vec(self) -> Vec + { // Creating a Vec requires the data to be on the host device assert_eq!(self.device, Device::Host); ManuallyDrop::new(self).take_as_vec() @@ -220,14 +229,16 @@ impl OwnedRepr { /// # Panics /// Will panic if the underlying memory is not allocated /// on the host device. - pub(crate) fn as_slice(&self) -> &[A] { + pub(crate) fn as_slice(&self) -> &[A] + { // Cannot create a slice of a device pointer debug_assert_eq!(self.device, Device::Host, "Cannot create a slice of a device pointer"); unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) } } - pub(crate) const fn len(&self) -> usize { + pub(crate) const fn len(&self) -> usize + { self.len } @@ -237,7 +248,8 @@ impl OwnedRepr { /// The pointer **may not necessarily point to the host**. /// Using a non-host pointer on the host will almost certainly /// cause a segmentation-fault. - pub(crate) const fn as_ptr(&self) -> *const A { + pub(crate) const fn as_ptr(&self) -> *const A + { self.ptr.as_ptr() } @@ -247,7 +259,8 @@ impl OwnedRepr { /// The pointer **may not necessarily point to the host**. /// Using a non-host pointer on the host will almost certainly /// cause a segmentation-fault. - pub(crate) const fn as_ptr_mut(&self) -> *mut A { + pub(crate) const fn as_ptr_mut(&self) -> *mut A + { self.ptr.as_ptr() } @@ -257,7 +270,8 @@ impl OwnedRepr { /// The pointer **may not necessarily point to the host**. /// Using a non-host pointer on the host will almost certainly /// cause a segmentation-fault. - pub(crate) fn as_nonnull_mut(&mut self) -> NonNull { + pub(crate) fn as_nonnull_mut(&mut self) -> NonNull + { self.ptr } @@ -267,7 +281,8 @@ impl OwnedRepr { /// The pointer **may not necessarily point to the host**. /// Using a non-host pointer on the host will almost certainly /// cause a segmentation-fault. - pub(crate) fn as_end_nonnull(&self) -> NonNull { + pub(crate) fn as_end_nonnull(&self) -> NonNull + { unsafe { self.ptr.add(self.len) } } @@ -276,7 +291,8 @@ impl OwnedRepr { /// ## Safety /// Note that existing pointers into the data are invalidated #[must_use = "must use new pointer to update existing pointers"] - pub(crate) fn reserve(&mut self, additional: usize) -> NonNull { + pub(crate) fn reserve(&mut self, additional: usize) -> NonNull + { self.modify_as_vec(|mut v| { v.reserve(additional); v @@ -288,7 +304,8 @@ impl OwnedRepr { /// /// ## Safety /// The first `new_len` elements of the data should be valid. - pub(crate) unsafe fn set_len(&mut self, new_len: usize) { + pub(crate) unsafe fn set_len(&mut self, new_len: usize) + { debug_assert!(new_len <= self.capacity); self.len = new_len; } @@ -297,7 +314,8 @@ impl OwnedRepr { /// the internal length to zero. /// /// todo: Is this valid/safe? Mark as unsafe? - pub(crate) fn release_all_elements(&mut self) -> usize { + pub(crate) fn release_all_elements(&mut self) -> usize + { let ret = self.len; self.len = 0; ret @@ -308,7 +326,8 @@ impl OwnedRepr { /// ## Safety /// Caller must ensure the two types have the same representation. /// **Panics** if sizes don't match (which is not a sufficient check). - pub(crate) unsafe fn data_subst(self) -> OwnedRepr { + pub(crate) unsafe fn data_subst(self) -> OwnedRepr + { // necessary but not sufficient check assert_eq!(mem::size_of::(), mem::size_of::()); let self_ = ManuallyDrop::new(self); @@ -321,7 +340,8 @@ impl OwnedRepr { } /// Apply a `f(Vec) -> Vec` to this storage object and update `self`. - fn modify_as_vec(&mut self, f: impl FnOnce(Vec) -> Vec) { + fn modify_as_vec(&mut self, f: impl FnOnce(Vec) -> Vec) + { let v = self.take_as_vec(); *self = Self::from(f(v)); } @@ -331,7 +351,8 @@ impl OwnedRepr { /// # Panics /// Will panic if the underlying memory is not allocated /// on the host device. - fn take_as_vec(&mut self) -> Vec { + fn take_as_vec(&mut self) -> Vec + { assert_eq!(self.device, Device::Host); let capacity = self.capacity; let len = self.len; @@ -344,7 +365,8 @@ impl OwnedRepr { impl Clone for OwnedRepr where A: Clone { - fn clone(&self) -> Self { + fn clone(&self) -> Self + { match self.device { Device::Host => Self::from(self.as_slice().to_owned()), @@ -387,7 +409,8 @@ where A: Clone } } - fn clone_from(&mut self, other: &Self) { + fn clone_from(&mut self, other: &Self) + { match self.device { Device::Host => { let mut v = self.take_as_vec(); @@ -437,8 +460,10 @@ where A: Clone } } -impl Drop for OwnedRepr { - fn drop(&mut self) { +impl Drop for OwnedRepr +{ + fn drop(&mut self) + { if self.capacity > 0 { // correct because: If the elements don't need dropping, an // empty Vec is ok. Only the Vec's allocation needs dropping. diff --git a/src/data_traits.rs b/src/data_traits.rs index fb786a604..93cb7e34d 100644 --- a/src/data_traits.rs +++ b/src/data_traits.rs @@ -29,7 +29,8 @@ use crate::{ArcArray, Array, ArrayBase, CowRepr, Device, Dimension, OwnedArcRepr /// Traits in Rust can serve many different roles. This trait is public because /// it is used as a bound on public methods. #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait RawData: Sized { +pub unsafe trait RawData: Sized +{ /// The array element type. type Elem; @@ -42,7 +43,8 @@ pub unsafe trait RawData: Sized { fn _is_pointer_inbounds(&self, ptr: *const Self::Elem) -> bool; #[doc(hidden)] - fn _device(&self) -> Option { + fn _device(&self) -> Option + { None } @@ -55,7 +57,8 @@ pub unsafe trait RawData: Sized { /// /// ***Internal trait, see `RawData`.*** #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait RawDataMut: RawData { +pub unsafe trait RawDataMut: RawData +{ /// If possible, ensures that the array has unique access to its data. /// /// The implementer must ensure that if the input is contiguous, then the @@ -83,13 +86,15 @@ pub unsafe trait RawDataMut: RawData { /// /// ***Internal trait, see `RawData`.*** #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait RawDataClone: RawData { +pub unsafe trait RawDataClone: RawData +{ #[doc(hidden)] /// Unsafe because, `ptr` must point inside the current storage. unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull); #[doc(hidden)] - unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull { + unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull + { let (data, ptr) = other.clone_with_ptr(ptr); *self = data; ptr @@ -102,7 +107,8 @@ pub unsafe trait RawDataClone: RawData { /// /// ***Internal trait, see `RawData`.*** #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait Data: RawData { +pub unsafe trait Data: RawData +{ /// Converts the array to a uniquely owned array, cloning elements if necessary. #[doc(hidden)] #[allow(clippy::wrong_self_convention)] @@ -144,7 +150,8 @@ pub unsafe trait Data: RawData { // the data is unique. You are also guaranteeing that `try_is_unique` always // returns `Some(_)`. #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait DataMut: Data + RawDataMut { +pub unsafe trait DataMut: Data + RawDataMut +{ /// Ensures that the array has unique access to its data. #[doc(hidden)] #[inline] @@ -160,50 +167,60 @@ pub unsafe trait DataMut: Data + RawDataMut { #[doc(hidden)] #[inline] #[allow(clippy::wrong_self_convention)] // mut needed for Arc types - fn is_unique(&mut self) -> bool { + fn is_unique(&mut self) -> bool + { self.try_is_unique().unwrap() } } -unsafe impl RawData for RawViewRepr<*const A> { +unsafe impl RawData for RawViewRepr<*const A> +{ type Elem = A; #[inline] - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { None } #[inline(always)] - fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool { + fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool + { true } private_impl! {} } -unsafe impl RawDataClone for RawViewRepr<*const A> { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { +unsafe impl RawDataClone for RawViewRepr<*const A> +{ + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) + { (*self, ptr) } } -unsafe impl RawData for RawViewRepr<*mut A> { +unsafe impl RawData for RawViewRepr<*mut A> +{ type Elem = A; #[inline] - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { None } #[inline(always)] - fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool { + fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool + { true } private_impl! {} } -unsafe impl RawDataMut for RawViewRepr<*mut A> { +unsafe impl RawDataMut for RawViewRepr<*mut A> +{ #[inline] fn try_ensure_unique(_: &mut ArrayBase) where @@ -213,24 +230,30 @@ unsafe impl RawDataMut for RawViewRepr<*mut A> { } #[inline] - fn try_is_unique(&mut self) -> Option { + fn try_is_unique(&mut self) -> Option + { None } } -unsafe impl RawDataClone for RawViewRepr<*mut A> { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { +unsafe impl RawDataClone for RawViewRepr<*mut A> +{ + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) + { (*self, ptr) } } -unsafe impl RawData for OwnedArcRepr { +unsafe impl RawData for OwnedArcRepr +{ type Elem = A; - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { Some(self.0.as_slice()) } - fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool { + fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool + { self.0._is_pointer_inbounds(self_ptr) } @@ -268,12 +291,14 @@ where A: Clone } } - fn try_is_unique(&mut self) -> Option { + fn try_is_unique(&mut self) -> Option + { Some(Arc::get_mut(&mut self.0).is_some()) } } -unsafe impl Data for OwnedArcRepr { +unsafe impl Data for OwnedArcRepr +{ fn into_owned(mut self_: ArrayBase) -> Array where A: Clone, @@ -286,7 +311,8 @@ unsafe impl Data for OwnedArcRepr { } fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> - where D: Dimension { + where D: Dimension + { match Arc::try_unwrap(self_.data.0) { Ok(owned_data) => unsafe { // Safe because the data is equivalent. @@ -314,21 +340,26 @@ unsafe impl Data for OwnedArcRepr { unsafe impl DataMut for OwnedArcRepr where A: Clone {} -unsafe impl RawDataClone for OwnedArcRepr { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { +unsafe impl RawDataClone for OwnedArcRepr +{ + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) + { // pointer is preserved (self.clone(), ptr) } } -unsafe impl RawData for OwnedRepr { +unsafe impl RawData for OwnedRepr +{ type Elem = A; - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { Some(self.as_slice()) } - fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool { + fn _is_pointer_inbounds(&self, self_ptr: *const Self::Elem) -> bool + { // let slc = self.as_slice(); // let ptr = slc.as_ptr() as *mut A; // let end = unsafe { ptr.add(slc.len()) }; @@ -341,14 +372,16 @@ unsafe impl RawData for OwnedRepr { ptr <= self_ptr && self_ptr <= end } - fn _device(&self) -> Option { + fn _device(&self) -> Option + { Some(self.device()) } private_impl! {} } -unsafe impl RawDataMut for OwnedRepr { +unsafe impl RawDataMut for OwnedRepr +{ #[inline] fn try_ensure_unique(_: &mut ArrayBase) where @@ -358,12 +391,14 @@ unsafe impl RawDataMut for OwnedRepr { } #[inline] - fn try_is_unique(&mut self) -> Option { + fn try_is_unique(&mut self) -> Option + { Some(true) } } -unsafe impl Data for OwnedRepr { +unsafe impl Data for OwnedRepr +{ #[inline] fn into_owned(self_: ArrayBase) -> Array where @@ -375,7 +410,8 @@ unsafe impl Data for OwnedRepr { #[inline] fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> - where D: Dimension { + where D: Dimension + { Ok(self_) } } @@ -385,7 +421,8 @@ unsafe impl DataMut for OwnedRepr {} unsafe impl RawDataClone for OwnedRepr where A: Clone { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) + { let mut u = self.clone(); let mut new_ptr = u.as_nonnull_mut(); if size_of::() != 0 { @@ -395,7 +432,8 @@ where A: Clone (u, new_ptr) } - unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull { + unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull + { let our_off = if size_of::() != 0 { (ptr.as_ptr() as isize - other.as_ptr() as isize) / mem::size_of::() as isize } else { @@ -406,23 +444,27 @@ where A: Clone } } -unsafe impl<'a, A> RawData for ViewRepr<&'a A> { +unsafe impl<'a, A> RawData for ViewRepr<&'a A> +{ type Elem = A; #[inline] - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { None } #[inline(always)] - fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool { + fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool + { true } private_impl! {} } -unsafe impl<'a, A> Data for ViewRepr<&'a A> { +unsafe impl<'a, A> Data for ViewRepr<&'a A> +{ fn into_owned(self_: ArrayBase) -> Array where Self::Elem: Clone, @@ -432,34 +474,41 @@ unsafe impl<'a, A> Data for ViewRepr<&'a A> { } fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> - where D: Dimension { + where D: Dimension + { Err(self_) } } -unsafe impl<'a, A> RawDataClone for ViewRepr<&'a A> { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { +unsafe impl<'a, A> RawDataClone for ViewRepr<&'a A> +{ + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) + { (*self, ptr) } } -unsafe impl<'a, A> RawData for ViewRepr<&'a mut A> { +unsafe impl<'a, A> RawData for ViewRepr<&'a mut A> +{ type Elem = A; #[inline] - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { None } #[inline(always)] - fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool { + fn _is_pointer_inbounds(&self, _ptr: *const Self::Elem) -> bool + { true } private_impl! {} } -unsafe impl<'a, A> RawDataMut for ViewRepr<&'a mut A> { +unsafe impl<'a, A> RawDataMut for ViewRepr<&'a mut A> +{ #[inline] fn try_ensure_unique(_: &mut ArrayBase) where @@ -469,12 +518,14 @@ unsafe impl<'a, A> RawDataMut for ViewRepr<&'a mut A> { } #[inline] - fn try_is_unique(&mut self) -> Option { + fn try_is_unique(&mut self) -> Option + { Some(true) } } -unsafe impl<'a, A> Data for ViewRepr<&'a mut A> { +unsafe impl<'a, A> Data for ViewRepr<&'a mut A> +{ fn into_owned(self_: ArrayBase) -> Array where Self::Elem: Clone, @@ -484,7 +535,8 @@ unsafe impl<'a, A> Data for ViewRepr<&'a mut A> { } fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> - where D: Dimension { + where D: Dimension + { Err(self_) } } @@ -504,7 +556,8 @@ unsafe impl<'a, A> DataMut for ViewRepr<&'a mut A> {} // unsharing storage before mutating it. The initially allocated storage must be mutable so // that it can be mutated directly - through .raw_view_mut_unchecked() - for initialization. #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait DataOwned: Data { +pub unsafe trait DataOwned: Data +{ /// Corresponding owned data with MaybeUninit elements type MaybeUninit: DataOwned> + RawDataSubst; #[doc(hidden)] @@ -527,34 +580,42 @@ pub unsafe trait DataShared: Clone + Data + RawDataClone {} unsafe impl DataShared for OwnedArcRepr {} unsafe impl<'a, A> DataShared for ViewRepr<&'a A> {} -unsafe impl DataOwned for OwnedRepr { +unsafe impl DataOwned for OwnedRepr +{ type MaybeUninit = OwnedRepr>; - fn new(elements: Vec) -> Self { + fn new(elements: Vec) -> Self + { OwnedRepr::from(elements) } - fn into_shared(self) -> OwnedArcRepr { + fn into_shared(self) -> OwnedArcRepr + { OwnedArcRepr(Arc::new(self)) } } -unsafe impl DataOwned for OwnedArcRepr { +unsafe impl DataOwned for OwnedArcRepr +{ type MaybeUninit = OwnedArcRepr>; - fn new(elements: Vec) -> Self { + fn new(elements: Vec) -> Self + { OwnedArcRepr(Arc::new(OwnedRepr::from(elements))) } - fn into_shared(self) -> OwnedArcRepr { + fn into_shared(self) -> OwnedArcRepr + { self } } -unsafe impl<'a, A> RawData for CowRepr<'a, A> { +unsafe impl<'a, A> RawData for CowRepr<'a, A> +{ type Elem = A; - fn _data_slice(&self) -> Option<&[A]> { + fn _data_slice(&self) -> Option<&[A]> + { #[allow(deprecated)] match self { CowRepr::View(view) => view._data_slice(), @@ -563,7 +624,8 @@ unsafe impl<'a, A> RawData for CowRepr<'a, A> { } #[inline] - fn _is_pointer_inbounds(&self, ptr: *const Self::Elem) -> bool { + fn _is_pointer_inbounds(&self, ptr: *const Self::Elem) -> bool + { match self { CowRepr::View(view) => view._is_pointer_inbounds(ptr), CowRepr::Owned(data) => data._is_pointer_inbounds(ptr), @@ -595,7 +657,8 @@ where A: Clone } #[inline] - fn try_is_unique(&mut self) -> Option { + fn try_is_unique(&mut self) -> Option + { Some(self.is_owned()) } } @@ -603,7 +666,8 @@ where A: Clone unsafe impl<'a, A> RawDataClone for CowRepr<'a, A> where A: Clone { - unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) { + unsafe fn clone_with_ptr(&self, ptr: NonNull) -> (Self, NonNull) + { match self { CowRepr::View(view) => { let (new_view, ptr) = view.clone_with_ptr(ptr); @@ -616,7 +680,8 @@ where A: Clone } } - unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull { + unsafe fn clone_from_with_ptr(&mut self, other: &Self, ptr: NonNull) -> NonNull + { match (&mut *self, other) { (CowRepr::View(self_), CowRepr::View(other)) => self_.clone_from_with_ptr(other, ptr), (CowRepr::Owned(self_), CowRepr::Owned(other)) => self_.clone_from_with_ptr(other, ptr), @@ -634,7 +699,8 @@ where A: Clone } } -unsafe impl<'a, A> Data for CowRepr<'a, A> { +unsafe impl<'a, A> Data for CowRepr<'a, A> +{ #[inline] fn into_owned(self_: ArrayBase, D>) -> Array where @@ -651,7 +717,8 @@ unsafe impl<'a, A> Data for CowRepr<'a, A> { } fn try_into_owned_nocopy(self_: ArrayBase) -> Result, ArrayBase> - where D: Dimension { + where D: Dimension + { match self_.data { CowRepr::View(_) => Err(self_), CowRepr::Owned(data) => unsafe { @@ -670,7 +737,8 @@ unsafe impl<'a, A> DataMut for CowRepr<'a, A> where A: Clone {} /// keeping the same kind of storage. /// /// For example, `RawDataSubst` can map the type `OwnedRepr` to `OwnedRepr`. -pub trait RawDataSubst: RawData { +pub trait RawDataSubst: RawData +{ /// The resulting array storage of the same kind but substituted element type type Output: RawData; @@ -683,58 +751,72 @@ pub trait RawDataSubst: RawData { unsafe fn data_subst(self) -> Self::Output; } -impl RawDataSubst for OwnedRepr { +impl RawDataSubst for OwnedRepr +{ type Output = OwnedRepr; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { self.data_subst() } } -impl RawDataSubst for OwnedArcRepr { +impl RawDataSubst for OwnedArcRepr +{ type Output = OwnedArcRepr; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { OwnedArcRepr(Arc::from_raw(Arc::into_raw(self.0) as *const OwnedRepr)) } } -impl RawDataSubst for RawViewRepr<*const A> { +impl RawDataSubst for RawViewRepr<*const A> +{ type Output = RawViewRepr<*const B>; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { RawViewRepr::new() } } -impl RawDataSubst for RawViewRepr<*mut A> { +impl RawDataSubst for RawViewRepr<*mut A> +{ type Output = RawViewRepr<*mut B>; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { RawViewRepr::new() } } -impl<'a, A: 'a, B: 'a> RawDataSubst for ViewRepr<&'a A> { +impl<'a, A: 'a, B: 'a> RawDataSubst for ViewRepr<&'a A> +{ type Output = ViewRepr<&'a B>; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { ViewRepr::new() } } -impl<'a, A: 'a, B: 'a> RawDataSubst for ViewRepr<&'a mut A> { +impl<'a, A: 'a, B: 'a> RawDataSubst for ViewRepr<&'a mut A> +{ type Output = ViewRepr<&'a mut B>; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { ViewRepr::new() } } -impl<'a, A: 'a, B: 'a> RawDataSubst for CowRepr<'a, A> { +impl<'a, A: 'a, B: 'a> RawDataSubst for CowRepr<'a, A> +{ type Output = CowRepr<'a, B>; - unsafe fn data_subst(self) -> Self::Output { + unsafe fn data_subst(self) -> Self::Output + { match self { CowRepr::View(view) => CowRepr::View(view.data_subst()), CowRepr::Owned(owned) => CowRepr::Owned(owned.data_subst()), diff --git a/src/dimension/axes.rs b/src/dimension/axes.rs index f83a38e34..925b257a7 100644 --- a/src/dimension/axes.rs +++ b/src/dimension/axes.rs @@ -2,7 +2,8 @@ use crate::{Axis, Dimension, Ix, Ixs}; /// Create a new Axes iterator pub(crate) fn axes_of<'a, D>(d: &'a D, strides: &'a D) -> Axes<'a, D> -where D: Dimension { +where D: Dimension +{ Axes { dim: d, strides, @@ -36,7 +37,8 @@ where D: Dimension { /// assert_eq!(largest_axis.len, 5); /// ``` #[derive(Debug)] -pub struct Axes<'a, D> { +pub struct Axes<'a, D> +{ dim: &'a D, strides: &'a D, start: usize, @@ -45,7 +47,8 @@ pub struct Axes<'a, D> { /// Description of the axis, its length and its stride. #[derive(Debug)] -pub struct AxisDescription { +pub struct AxisDescription +{ /// Axis identifier (index) pub axis: Axis, /// Length in count of elements of the current axis @@ -59,23 +62,27 @@ copy_and_clone!(AxisDescription); // AxisDescription can't really be empty // https://github.com/rust-ndarray/ndarray/pull/642#discussion_r296051702 #[allow(clippy::len_without_is_empty)] -impl AxisDescription { +impl AxisDescription +{ /// Return axis #[deprecated(note = "Use .axis field instead", since = "0.15.0")] #[inline(always)] - pub fn axis(self) -> Axis { + pub fn axis(self) -> Axis + { self.axis } /// Return length #[deprecated(note = "Use .len field instead", since = "0.15.0")] #[inline(always)] - pub fn len(self) -> Ix { + pub fn len(self) -> Ix + { self.len } /// Return stride #[deprecated(note = "Use .stride field instead", since = "0.15.0")] #[inline(always)] - pub fn stride(self) -> Ixs { + pub fn stride(self) -> Ixs + { self.stride } } @@ -88,7 +95,8 @@ where D: Dimension /// Description of the axis, its length and its stride. type Item = AxisDescription; - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { if self.start < self.end { let i = self.start.post_inc(); Some(AxisDescription { @@ -102,7 +110,8 @@ where D: Dimension } fn fold(self, init: B, f: F) -> B - where F: FnMut(B, AxisDescription) -> B { + where F: FnMut(B, AxisDescription) -> B + { (self.start..self.end) .map(move |i| AxisDescription { axis: Axis(i), @@ -112,7 +121,8 @@ where D: Dimension .fold(init, f) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let len = self.end - self.start; (len, Some(len)) } @@ -121,7 +131,8 @@ where D: Dimension impl<'a, D> DoubleEndedIterator for Axes<'a, D> where D: Dimension { - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { if self.start < self.end { let i = self.end.pre_dec(); Some(AxisDescription { @@ -135,20 +146,24 @@ where D: Dimension } } -trait IncOps: Copy { +trait IncOps: Copy +{ fn post_inc(&mut self) -> Self; fn pre_dec(&mut self) -> Self; } -impl IncOps for usize { +impl IncOps for usize +{ #[inline(always)] - fn post_inc(&mut self) -> Self { + fn post_inc(&mut self) -> Self + { let x = *self; *self += 1; x } #[inline(always)] - fn pre_dec(&mut self) -> Self { + fn pre_dec(&mut self) -> Self + { *self -= 1; *self } diff --git a/src/dimension/axis.rs b/src/dimension/axis.rs index 611c62b31..8c896f6b7 100644 --- a/src/dimension/axis.rs +++ b/src/dimension/axis.rs @@ -26,10 +26,12 @@ #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Axis(pub usize); -impl Axis { +impl Axis +{ /// Return the index of the axis. #[inline(always)] - pub fn index(self) -> usize { + pub fn index(self) -> usize + { self.0 } } diff --git a/src/dimension/broadcast.rs b/src/dimension/broadcast.rs index b2aa886a9..d277cfea2 100644 --- a/src/dimension/broadcast.rs +++ b/src/dimension/broadcast.rs @@ -34,7 +34,8 @@ where Ok(out) } -pub trait DimMax { +pub trait DimMax +{ /// The resulting dimension type after broadcasting. type Output: Dimension; } @@ -42,7 +43,8 @@ pub trait DimMax { /// Dimensions of the same type remain unchanged when co_broadcast. /// So you can directly use D as the resulting type. /// (Instead of >::BroadcastOutput) -impl DimMax for D { +impl DimMax for D +{ type Output = D; } @@ -89,12 +91,14 @@ impl_broadcast_distinct_fixed!(Ix6, IxDyn); #[cfg(test)] #[cfg(feature = "std")] -mod tests { +mod tests +{ use super::co_broadcast; use crate::{Dim, DimMax, Dimension, ErrorKind, Ix0, IxDynImpl, ShapeError}; #[test] - fn test_broadcast_shape() { + fn test_broadcast_shape() + { fn test_co(d1: &D1, d2: &D2, r: Result<>::Output, ShapeError>) where D1: Dimension + DimMax, diff --git a/src/dimension/conversion.rs b/src/dimension/conversion.rs index d4ca00810..0cf2e1296 100644 --- a/src/dimension/conversion.rs +++ b/src/dimension/conversion.rs @@ -40,15 +40,18 @@ macro_rules! index_item { } /// Argument conversion a dimension. -pub trait IntoDimension { +pub trait IntoDimension +{ type Dim: Dimension; fn into_dimension(self) -> Self::Dim; } -impl IntoDimension for Ix { +impl IntoDimension for Ix +{ type Dim = Ix1; #[inline(always)] - fn into_dimension(self) -> Ix1 { + fn into_dimension(self) -> Ix1 + { Ix1(self) } } @@ -58,28 +61,34 @@ where D: Dimension { type Dim = D; #[inline(always)] - fn into_dimension(self) -> Self { + fn into_dimension(self) -> Self + { self } } -impl IntoDimension for IxDynImpl { +impl IntoDimension for IxDynImpl +{ type Dim = IxDyn; #[inline(always)] - fn into_dimension(self) -> Self::Dim { + fn into_dimension(self) -> Self::Dim + { Dim::new(self) } } -impl IntoDimension for Vec { +impl IntoDimension for Vec +{ type Dim = IxDyn; #[inline(always)] - fn into_dimension(self) -> Self::Dim { + fn into_dimension(self) -> Self::Dim + { Dim::new(IxDynImpl::from(self)) } } -pub trait Convert { +pub trait Convert +{ type To; fn convert(self) -> Self::To; } diff --git a/src/dimension/dim.rs b/src/dimension/dim.rs index ffc6ccbbd..96e433bb3 100644 --- a/src/dimension/dim.rs +++ b/src/dimension/dim.rs @@ -35,21 +35,26 @@ use std::fmt; /// assert_eq!(array.raw_dim(), Dim([3, 2])); /// ``` #[derive(Copy, Clone, PartialEq, Eq, Hash, Default)] -pub struct Dim { +pub struct Dim +{ index: I, } -impl Dim { +impl Dim +{ /// Private constructor and accessors for Dim - pub(crate) const fn new(index: I) -> Dim { + pub(crate) const fn new(index: I) -> Dim + { Dim { index } } #[inline(always)] - pub(crate) fn ix(&self) -> &I { + pub(crate) fn ix(&self) -> &I + { &self.index } #[inline(always)] - pub(crate) fn ixm(&mut self) -> &mut I { + pub(crate) fn ixm(&mut self) -> &mut I + { &mut self.index } } @@ -57,14 +62,16 @@ impl Dim { /// Create a new dimension value. #[allow(non_snake_case)] pub fn Dim(index: T) -> T::Dim -where T: IntoDimension { +where T: IntoDimension +{ index.into_dimension() } impl PartialEq for Dim where I: PartialEq { - fn eq(&self, rhs: &I) -> bool { + fn eq(&self, rhs: &I) -> bool + { self.index == *rhs } } @@ -72,7 +79,8 @@ where I: PartialEq impl fmt::Debug for Dim where I: fmt::Debug { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { write!(f, "{:?}", self.index) } } diff --git a/src/dimension/dimension_trait.rs b/src/dimension/dimension_trait.rs index 4f3b82e52..3544a7f3c 100644 --- a/src/dimension/dimension_trait.rs +++ b/src/dimension/dimension_trait.rs @@ -83,12 +83,14 @@ pub trait Dimension: fn into_pattern(self) -> Self::Pattern; /// Compute the size of the dimension (number of elements) - fn size(&self) -> usize { + fn size(&self) -> usize + { self.slice().iter().product() } /// Compute the size while checking for overflow. - fn size_checked(&self) -> Option { + fn size_checked(&self) -> Option + { self.slice() .iter() .try_fold(1_usize, |s, &a| s.checked_mul(a)) @@ -101,17 +103,20 @@ pub trait Dimension: fn slice_mut(&mut self) -> &mut [Ix]; /// Borrow as a read-only array view. - fn as_array_view(&self) -> ArrayView1<'_, Ix> { + fn as_array_view(&self) -> ArrayView1<'_, Ix> + { ArrayView1::from(self.slice()) } /// Borrow as a read-write array view. - fn as_array_view_mut(&mut self) -> ArrayViewMut1<'_, Ix> { + fn as_array_view_mut(&mut self) -> ArrayViewMut1<'_, Ix> + { ArrayViewMut1::from(self.slice_mut()) } #[doc(hidden)] - fn equal(&self, rhs: &Self) -> bool { + fn equal(&self, rhs: &Self) -> bool + { self.slice() == rhs.slice() } @@ -120,7 +125,8 @@ pub trait Dimension: /// If the array is non-empty, the strides result in contiguous layout; if /// the array is empty, the strides are all zeros. #[doc(hidden)] - fn default_strides(&self) -> Self { + fn default_strides(&self) -> Self + { // Compute default array strides // Shape (a, b, c) => Give strides (b * c, c, 1) let mut strides = Self::zeros(self.ndim()); @@ -145,7 +151,8 @@ pub trait Dimension: /// If the array is non-empty, the strides result in contiguous layout; if /// the array is empty, the strides are all zeros. #[doc(hidden)] - fn fortran_strides(&self) -> Self { + fn fortran_strides(&self) -> Self + { // Compute fortran array strides // Shape (a, b, c) => Give strides (1, a, a * b) let mut strides = Self::zeros(self.ndim()); @@ -175,7 +182,8 @@ pub trait Dimension: #[doc(hidden)] #[inline] - fn first_index(&self) -> Option { + fn first_index(&self) -> Option + { for ax in self.slice().iter() { if *ax == 0 { return None; @@ -189,7 +197,8 @@ pub trait Dimension: /// or None if there are no more. // FIXME: use &Self for index or even &mut? #[inline] - fn next_for(&self, index: Self) -> Option { + fn next_for(&self, index: Self) -> Option + { let mut index = index; let mut done = false; for (&dim, ix) in zip(self.slice(), index.slice_mut()).rev() { @@ -214,7 +223,8 @@ pub trait Dimension: /// /// Next in f-order #[inline] - fn next_for_f(&self, index: &mut Self) -> bool { + fn next_for_f(&self, index: &mut Self) -> bool + { let mut end_iteration = true; for (&dim, ix) in zip(self.slice(), index.slice_mut()) { *ix += 1; @@ -237,7 +247,8 @@ pub trait Dimension: /// Note: Returns `false` if any of the ndims don't match. #[doc(hidden)] fn strides_equivalent(&self, strides1: &Self, strides2: &D) -> bool - where D: Dimension { + where D: Dimension + { let shape_ndim = self.ndim(); shape_ndim == strides1.ndim() && shape_ndim == strides2.ndim() @@ -247,7 +258,8 @@ pub trait Dimension: #[doc(hidden)] /// Return stride offset for index. - fn stride_offset(index: &Self, strides: &Self) -> isize { + fn stride_offset(index: &Self, strides: &Self) -> isize + { let mut offset = 0; for (&i, &s) in izip!(index.slice(), strides.slice()) { offset += stride_offset(i, s); @@ -257,12 +269,14 @@ pub trait Dimension: #[doc(hidden)] /// Return stride offset for this dimension and index. - fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option { + fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option + { stride_offset_checked(self.slice(), strides.slice(), index.slice()) } #[doc(hidden)] - fn last_elem(&self) -> usize { + fn last_elem(&self) -> usize + { if self.ndim() == 0 { 0 } else { @@ -271,13 +285,15 @@ pub trait Dimension: } #[doc(hidden)] - fn set_last_elem(&mut self, i: usize) { + fn set_last_elem(&mut self, i: usize) + { let nd = self.ndim(); self.slice_mut()[nd - 1] = i; } #[doc(hidden)] - fn is_contiguous(dim: &Self, strides: &Self) -> bool { + fn is_contiguous(dim: &Self, strides: &Self) -> bool + { let defaults = dim.default_strides(); if strides.equal(&defaults) { return true; @@ -309,7 +325,8 @@ pub trait Dimension: /// /// Assumes that no stride value appears twice. #[doc(hidden)] - fn _fastest_varying_stride_order(&self) -> Self { + fn _fastest_varying_stride_order(&self) -> Self + { let mut indices = self.clone(); for (i, elt) in enumerate(indices.slice_mut()) { *elt = i; @@ -324,7 +341,8 @@ pub trait Dimension: /// Compute the minimum stride axis (absolute value), under the constraint /// that the length of the axis is > 1; #[doc(hidden)] - fn min_stride_axis(&self, strides: &Self) -> Axis { + fn min_stride_axis(&self, strides: &Self) -> Axis + { let n = match self.ndim() { 0 => panic!("min_stride_axis: Array must have ndim > 0"), 1 => return Axis(0), @@ -339,7 +357,8 @@ pub trait Dimension: /// Compute the maximum stride axis (absolute value), under the constraint /// that the length of the axis is > 1; #[doc(hidden)] - fn max_stride_axis(&self, strides: &Self) -> Axis { + fn max_stride_axis(&self, strides: &Self) -> Axis + { match self.ndim() { 0 => panic!("max_stride_axis: Array must have ndim > 0"), 1 => return Axis(0), @@ -352,12 +371,14 @@ pub trait Dimension: } /// Convert the dimensional into a dynamic dimensional (IxDyn). - fn into_dyn(self) -> IxDyn { + fn into_dyn(self) -> IxDyn + { IxDyn(self.slice()) } #[doc(hidden)] - fn from_dimension(d: &D2) -> Option { + fn from_dimension(d: &D2) -> Option + { let mut s = Self::default(); if s.ndim() == d.ndim() { for i in 0..d.ndim() { @@ -393,76 +414,91 @@ macro_rules! impl_insert_axis_array( ); ); -impl Dimension for Dim<[Ix; 0]> { +impl Dimension for Dim<[Ix; 0]> +{ const NDIM: Option = Some(0); type Pattern = (); type Smaller = Self; type Larger = Ix1; // empty product is 1 -> size is 1 #[inline] - fn ndim(&self) -> usize { + fn ndim(&self) -> usize + { 0 } #[inline] - fn slice(&self) -> &[Ix] { + fn slice(&self) -> &[Ix] + { &[] } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { + fn slice_mut(&mut self) -> &mut [Ix] + { &mut [] } #[inline] - fn _fastest_varying_stride_order(&self) -> Self { + fn _fastest_varying_stride_order(&self) -> Self + { Ix0() } #[inline] fn into_pattern(self) -> Self::Pattern {} #[inline] - fn zeros(ndim: usize) -> Self { + fn zeros(ndim: usize) -> Self + { assert_eq!(ndim, 0); Self::default() } #[inline] - fn next_for(&self, _index: Self) -> Option { + fn next_for(&self, _index: Self) -> Option + { None } impl_insert_axis_array!(0); #[inline] - fn try_remove_axis(&self, _ignore: Axis) -> Self::Smaller { + fn try_remove_axis(&self, _ignore: Axis) -> Self::Smaller + { *self } private_impl! {} } -impl Dimension for Dim<[Ix; 1]> { +impl Dimension for Dim<[Ix; 1]> +{ const NDIM: Option = Some(1); type Pattern = Ix; type Smaller = Ix0; type Larger = Ix2; #[inline] - fn ndim(&self) -> usize { + fn ndim(&self) -> usize + { 1 } #[inline] - fn slice(&self) -> &[Ix] { + fn slice(&self) -> &[Ix] + { self.ix() } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { + fn slice_mut(&mut self) -> &mut [Ix] + { self.ixm() } #[inline] - fn into_pattern(self) -> Self::Pattern { + fn into_pattern(self) -> Self::Pattern + { get!(&self, 0) } #[inline] - fn zeros(ndim: usize) -> Self { + fn zeros(ndim: usize) -> Self + { assert_eq!(ndim, 1); Self::default() } #[inline] - fn next_for(&self, mut index: Self) -> Option { + fn next_for(&self, mut index: Self) -> Option + { getm!(index, 0) += 1; if get!(&index, 0) < get!(self, 0) { Some(index) @@ -472,21 +508,25 @@ impl Dimension for Dim<[Ix; 1]> { } #[inline] - fn equal(&self, rhs: &Self) -> bool { + fn equal(&self, rhs: &Self) -> bool + { get!(self, 0) == get!(rhs, 0) } #[inline] - fn size(&self) -> usize { + fn size(&self) -> usize + { get!(self, 0) } #[inline] - fn size_checked(&self) -> Option { + fn size_checked(&self) -> Option + { Some(get!(self, 0)) } #[inline] - fn default_strides(&self) -> Self { + fn default_strides(&self) -> Self + { if get!(self, 0) == 0 { Ix1(0) } else { @@ -495,22 +535,26 @@ impl Dimension for Dim<[Ix; 1]> { } #[inline] - fn _fastest_varying_stride_order(&self) -> Self { + fn _fastest_varying_stride_order(&self) -> Self + { Ix1(0) } #[inline(always)] - fn min_stride_axis(&self, _: &Self) -> Axis { + fn min_stride_axis(&self, _: &Self) -> Axis + { Axis(0) } #[inline(always)] - fn max_stride_axis(&self, _: &Self) -> Axis { + fn max_stride_axis(&self, _: &Self) -> Axis + { Axis(0) } #[inline] - fn first_index(&self) -> Option { + fn first_index(&self) -> Option + { if get!(self, 0) != 0 { Some(Ix1(0)) } else { @@ -520,13 +564,15 @@ impl Dimension for Dim<[Ix; 1]> { /// Self is an index, return the stride offset #[inline(always)] - fn stride_offset(index: &Self, stride: &Self) -> isize { + fn stride_offset(index: &Self, stride: &Self) -> isize + { stride_offset(get!(index, 0), get!(stride, 0)) } /// Return stride offset for this dimension and index. #[inline] - fn stride_offset_checked(&self, stride: &Self, index: &Self) -> Option { + fn stride_offset_checked(&self, stride: &Self, index: &Self) -> Option + { if get!(index, 0) < get!(self, 0) { Some(stride_offset(get!(index, 0), get!(stride, 0))) } else { @@ -535,11 +581,13 @@ impl Dimension for Dim<[Ix; 1]> { } impl_insert_axis_array!(1); #[inline] - fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { + fn try_remove_axis(&self, axis: Axis) -> Self::Smaller + { self.remove_axis(axis) } - fn from_dimension(d: &D2) -> Option { + fn from_dimension(d: &D2) -> Option + { if 1 == d.ndim() { Some(Ix1(d[0])) } else { @@ -549,34 +597,41 @@ impl Dimension for Dim<[Ix; 1]> { private_impl! {} } -impl Dimension for Dim<[Ix; 2]> { +impl Dimension for Dim<[Ix; 2]> +{ const NDIM: Option = Some(2); type Pattern = (Ix, Ix); type Smaller = Ix1; type Larger = Ix3; #[inline] - fn ndim(&self) -> usize { + fn ndim(&self) -> usize + { 2 } #[inline] - fn into_pattern(self) -> Self::Pattern { + fn into_pattern(self) -> Self::Pattern + { self.ix().convert() } #[inline] - fn slice(&self) -> &[Ix] { + fn slice(&self) -> &[Ix] + { self.ix() } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { + fn slice_mut(&mut self) -> &mut [Ix] + { self.ixm() } #[inline] - fn zeros(ndim: usize) -> Self { + fn zeros(ndim: usize) -> Self + { assert_eq!(ndim, 2); Self::default() } #[inline] - fn next_for(&self, index: Self) -> Option { + fn next_for(&self, index: Self) -> Option + { let mut i = get!(&index, 0); let mut j = get!(&index, 1); let imax = get!(self, 0); @@ -593,34 +648,40 @@ impl Dimension for Dim<[Ix; 2]> { } #[inline] - fn equal(&self, rhs: &Self) -> bool { + fn equal(&self, rhs: &Self) -> bool + { get!(self, 0) == get!(rhs, 0) && get!(self, 1) == get!(rhs, 1) } #[inline] - fn size(&self) -> usize { + fn size(&self) -> usize + { get!(self, 0) * get!(self, 1) } #[inline] - fn size_checked(&self) -> Option { + fn size_checked(&self) -> Option + { let m = get!(self, 0); let n = get!(self, 1); m.checked_mul(n) } #[inline] - fn last_elem(&self) -> usize { + fn last_elem(&self) -> usize + { get!(self, 1) } #[inline] - fn set_last_elem(&mut self, i: usize) { + fn set_last_elem(&mut self, i: usize) + { getm!(self, 1) = i; } #[inline] - fn default_strides(&self) -> Self { + fn default_strides(&self) -> Self + { let m = get!(self, 0); let n = get!(self, 1); if m == 0 || n == 0 { @@ -630,7 +691,8 @@ impl Dimension for Dim<[Ix; 2]> { } } #[inline] - fn fortran_strides(&self) -> Self { + fn fortran_strides(&self) -> Self + { let m = get!(self, 0); let n = get!(self, 1); if m == 0 || n == 0 { @@ -641,7 +703,8 @@ impl Dimension for Dim<[Ix; 2]> { } #[inline] - fn _fastest_varying_stride_order(&self) -> Self { + fn _fastest_varying_stride_order(&self) -> Self + { if (get!(self, 0) as Ixs).abs() <= (get!(self, 1) as Ixs).abs() { Ix2(0, 1) } else { @@ -650,7 +713,8 @@ impl Dimension for Dim<[Ix; 2]> { } #[inline] - fn min_stride_axis(&self, strides: &Self) -> Axis { + fn min_stride_axis(&self, strides: &Self) -> Axis + { let s = get!(strides, 0) as Ixs; let t = get!(strides, 1) as Ixs; if s.abs() < t.abs() { @@ -661,7 +725,8 @@ impl Dimension for Dim<[Ix; 2]> { } #[inline] - fn first_index(&self) -> Option { + fn first_index(&self) -> Option + { let m = get!(self, 0); let n = get!(self, 1); if m != 0 && n != 0 { @@ -673,7 +738,8 @@ impl Dimension for Dim<[Ix; 2]> { /// Self is an index, return the stride offset #[inline(always)] - fn stride_offset(index: &Self, strides: &Self) -> isize { + fn stride_offset(index: &Self, strides: &Self) -> isize + { let i = get!(index, 0); let j = get!(index, 1); let s = get!(strides, 0); @@ -683,7 +749,8 @@ impl Dimension for Dim<[Ix; 2]> { /// Return stride offset for this dimension and index. #[inline] - fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option { + fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option + { let m = get!(self, 0); let n = get!(self, 1); let i = get!(index, 0); @@ -698,36 +765,43 @@ impl Dimension for Dim<[Ix; 2]> { } impl_insert_axis_array!(2); #[inline] - fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { + fn try_remove_axis(&self, axis: Axis) -> Self::Smaller + { self.remove_axis(axis) } private_impl! {} } -impl Dimension for Dim<[Ix; 3]> { +impl Dimension for Dim<[Ix; 3]> +{ const NDIM: Option = Some(3); type Pattern = (Ix, Ix, Ix); type Smaller = Ix2; type Larger = Ix4; #[inline] - fn ndim(&self) -> usize { + fn ndim(&self) -> usize + { 3 } #[inline] - fn into_pattern(self) -> Self::Pattern { + fn into_pattern(self) -> Self::Pattern + { self.ix().convert() } #[inline] - fn slice(&self) -> &[Ix] { + fn slice(&self) -> &[Ix] + { self.ix() } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { + fn slice_mut(&mut self) -> &mut [Ix] + { self.ixm() } #[inline] - fn size(&self) -> usize { + fn size(&self) -> usize + { let m = get!(self, 0); let n = get!(self, 1); let o = get!(self, 2); @@ -735,13 +809,15 @@ impl Dimension for Dim<[Ix; 3]> { } #[inline] - fn zeros(ndim: usize) -> Self { + fn zeros(ndim: usize) -> Self + { assert_eq!(ndim, 3); Self::default() } #[inline] - fn next_for(&self, index: Self) -> Option { + fn next_for(&self, index: Self) -> Option + { let mut i = get!(&index, 0); let mut j = get!(&index, 1); let mut k = get!(&index, 2); @@ -765,7 +841,8 @@ impl Dimension for Dim<[Ix; 3]> { /// Self is an index, return the stride offset #[inline] - fn stride_offset(index: &Self, strides: &Self) -> isize { + fn stride_offset(index: &Self, strides: &Self) -> isize + { let i = get!(index, 0); let j = get!(index, 1); let k = get!(index, 2); @@ -777,7 +854,8 @@ impl Dimension for Dim<[Ix; 3]> { /// Return stride offset for this dimension and index. #[inline] - fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option { + fn stride_offset_checked(&self, strides: &Self, index: &Self) -> Option + { let m = get!(self, 0); let n = get!(self, 1); let l = get!(self, 2); @@ -795,7 +873,8 @@ impl Dimension for Dim<[Ix; 3]> { } #[inline] - fn _fastest_varying_stride_order(&self) -> Self { + fn _fastest_varying_stride_order(&self) -> Self + { let mut stride = *self; let mut order = Ix3(0, 1, 2); macro_rules! swap { @@ -817,7 +896,8 @@ impl Dimension for Dim<[Ix; 3]> { } impl_insert_axis_array!(3); #[inline] - fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { + fn try_remove_axis(&self, axis: Axis) -> Self::Smaller + { self.remove_axis(axis) } private_impl! {} @@ -874,41 +954,49 @@ large_dim!(6, Ix6, (Ix, Ix, Ix, Ix, Ix, Ix), IxDyn, { /// IxDyn is a "dynamic" index, pretty hard to use when indexing, /// and memory wasteful, but it allows an arbitrary and dynamic number of axes. -impl Dimension for IxDyn { +impl Dimension for IxDyn +{ const NDIM: Option = None; type Pattern = Self; type Smaller = Self; type Larger = Self; #[inline] - fn ndim(&self) -> usize { + fn ndim(&self) -> usize + { self.ix().len() } #[inline] - fn slice(&self) -> &[Ix] { + fn slice(&self) -> &[Ix] + { self.ix() } #[inline] - fn slice_mut(&mut self) -> &mut [Ix] { + fn slice_mut(&mut self) -> &mut [Ix] + { self.ixm() } #[inline] - fn into_pattern(self) -> Self::Pattern { + fn into_pattern(self) -> Self::Pattern + { self } #[inline] - fn zeros(ndim: usize) -> Self { + fn zeros(ndim: usize) -> Self + { IxDyn::zeros(ndim) } #[inline] - fn insert_axis(&self, axis: Axis) -> Self::Larger { + fn insert_axis(&self, axis: Axis) -> Self::Larger + { debug_assert!(axis.index() <= self.ndim()); Dim::new(self.ix().insert(axis.index())) } #[inline] - fn try_remove_axis(&self, axis: Axis) -> Self::Smaller { + fn try_remove_axis(&self, axis: Axis) -> Self::Smaller + { if self.ndim() > 0 { self.remove_axis(axis) } else { @@ -916,26 +1004,32 @@ impl Dimension for IxDyn { } } - fn from_dimension(d: &D2) -> Option { + fn from_dimension(d: &D2) -> Option + { Some(IxDyn(d.slice())) } - fn into_dyn(self) -> IxDyn { + fn into_dyn(self) -> IxDyn + { self } private_impl! {} } -impl Index for Dim { +impl Index for Dim +{ type Output = >::Output; - fn index(&self, index: usize) -> &Self::Output { + fn index(&self, index: usize) -> &Self::Output + { &self.ix()[index] } } -impl IndexMut for Dim { - fn index_mut(&mut self, index: usize) -> &mut Self::Output { +impl IndexMut for Dim +{ + fn index_mut(&mut self, index: usize) -> &mut Self::Output + { &mut self.ixm()[index] } } diff --git a/src/dimension/dynindeximpl.rs b/src/dimension/dynindeximpl.rs index e25fa7717..60aeacd80 100644 --- a/src/dimension/dynindeximpl.rs +++ b/src/dimension/dynindeximpl.rs @@ -10,14 +10,17 @@ const CAP: usize = 4; /// T is usize or isize #[derive(Debug)] -enum IxDynRepr { +enum IxDynRepr +{ Inline(u32, [T; CAP]), Alloc(Box<[T]>), } -impl Deref for IxDynRepr { +impl Deref for IxDynRepr +{ type Target = [T]; - fn deref(&self) -> &[T] { + fn deref(&self) -> &[T] + { match *self { IxDynRepr::Inline(len, ref ar) => { debug_assert!(len as usize <= ar.len()); @@ -28,8 +31,10 @@ impl Deref for IxDynRepr { } } -impl DerefMut for IxDynRepr { - fn deref_mut(&mut self) -> &mut [T] { +impl DerefMut for IxDynRepr +{ + fn deref_mut(&mut self) -> &mut [T] + { match *self { IxDynRepr::Inline(len, ref mut ar) => { debug_assert!(len as usize <= ar.len()); @@ -41,16 +46,20 @@ impl DerefMut for IxDynRepr { } /// The default is equivalent to `Self::from(&[0])`. -impl Default for IxDynRepr { - fn default() -> Self { +impl Default for IxDynRepr +{ + fn default() -> Self + { Self::copy_from(&[0]) } } use num_traits::Zero; -impl IxDynRepr { - pub fn copy_from(x: &[T]) -> Self { +impl IxDynRepr +{ + pub fn copy_from(x: &[T]) -> Self + { if x.len() <= CAP { let mut arr = [T::zero(); CAP]; arr[..x.len()].copy_from_slice(x); @@ -61,9 +70,11 @@ impl IxDynRepr { } } -impl IxDynRepr { +impl IxDynRepr +{ // make an Inline or Alloc version as appropriate - fn from_vec_auto(v: Vec) -> Self { + fn from_vec_auto(v: Vec) -> Self + { if v.len() <= CAP { Self::copy_from(&v) } else { @@ -72,18 +83,23 @@ impl IxDynRepr { } } -impl IxDynRepr { - fn from_vec(v: Vec) -> Self { +impl IxDynRepr +{ + fn from_vec(v: Vec) -> Self + { IxDynRepr::Alloc(v.into_boxed_slice()) } - fn from(x: &[T]) -> Self { + fn from(x: &[T]) -> Self + { Self::from_vec(x.to_vec()) } } -impl Clone for IxDynRepr { - fn clone(&self) -> Self { +impl Clone for IxDynRepr +{ + fn clone(&self) -> Self + { match *self { IxDynRepr::Inline(len, arr) => IxDynRepr::Inline(len, arr), _ => Self::from(&self[..]), @@ -93,8 +109,10 @@ impl Clone for IxDynRepr { impl Eq for IxDynRepr {} -impl PartialEq for IxDynRepr { - fn eq(&self, rhs: &Self) -> bool { +impl PartialEq for IxDynRepr +{ + fn eq(&self, rhs: &Self) -> bool + { match (self, rhs) { (&IxDynRepr::Inline(slen, ref sarr), &IxDynRepr::Inline(rlen, ref rarr)) => slen == rlen @@ -106,8 +124,10 @@ impl PartialEq for IxDynRepr { } } -impl Hash for IxDynRepr { - fn hash(&self, state: &mut H) { +impl Hash for IxDynRepr +{ + fn hash(&self, state: &mut H) + { Hash::hash(&self[..], state) } } @@ -120,8 +140,10 @@ impl Hash for IxDynRepr { #[derive(Debug, Clone, PartialEq, Eq, Hash, Default)] pub struct IxDynImpl(IxDynRepr); -impl IxDynImpl { - pub(crate) fn insert(&self, i: usize) -> Self { +impl IxDynImpl +{ + pub(crate) fn insert(&self, i: usize) -> Self + { let len = self.len(); debug_assert!(i <= len); IxDynImpl(if len < CAP { @@ -138,7 +160,8 @@ impl IxDynImpl { }) } - fn remove(&self, i: usize) -> Self { + fn remove(&self, i: usize) -> Self + { IxDynImpl(match self.0 { IxDynRepr::Inline(0, _) => IxDynRepr::Inline(0, [0; CAP]), IxDynRepr::Inline(1, _) => IxDynRepr::Inline(0, [0; CAP]), @@ -159,16 +182,20 @@ impl IxDynImpl { } } -impl<'a> From<&'a [Ix]> for IxDynImpl { +impl<'a> From<&'a [Ix]> for IxDynImpl +{ #[inline] - fn from(ix: &'a [Ix]) -> Self { + fn from(ix: &'a [Ix]) -> Self + { IxDynImpl(IxDynRepr::copy_from(ix)) } } -impl From> for IxDynImpl { +impl From> for IxDynImpl +{ #[inline] - fn from(ix: Vec) -> Self { + fn from(ix: Vec) -> Self + { IxDynImpl(IxDynRepr::from_vec_auto(ix)) } } @@ -177,7 +204,8 @@ impl Index for IxDynImpl where [Ix]: Index { type Output = <[Ix] as Index>::Output; - fn index(&self, index: J) -> &Self::Output { + fn index(&self, index: J) -> &Self::Output + { &self.0[index] } } @@ -185,46 +213,57 @@ where [Ix]: Index impl IndexMut for IxDynImpl where [Ix]: IndexMut { - fn index_mut(&mut self, index: J) -> &mut Self::Output { + fn index_mut(&mut self, index: J) -> &mut Self::Output + { &mut self.0[index] } } -impl Deref for IxDynImpl { +impl Deref for IxDynImpl +{ type Target = [Ix]; #[inline] - fn deref(&self) -> &[Ix] { + fn deref(&self) -> &[Ix] + { &self.0 } } -impl DerefMut for IxDynImpl { +impl DerefMut for IxDynImpl +{ #[inline] - fn deref_mut(&mut self) -> &mut [Ix] { + fn deref_mut(&mut self) -> &mut [Ix] + { &mut self.0 } } -impl<'a> IntoIterator for &'a IxDynImpl { +impl<'a> IntoIterator for &'a IxDynImpl +{ type Item = &'a Ix; type IntoIter = <&'a [Ix] as IntoIterator>::IntoIter; #[inline] - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { self[..].iter() } } -impl RemoveAxis for Dim { - fn remove_axis(&self, axis: Axis) -> Self { +impl RemoveAxis for Dim +{ + fn remove_axis(&self, axis: Axis) -> Self + { debug_assert!(axis.index() < self.ndim()); Dim::new(self.ix().remove(axis.index())) } } -impl IxDyn { +impl IxDyn +{ /// Create a new dimension value with `n` axes, all zeros #[inline] - pub fn zeros(n: usize) -> IxDyn { + pub fn zeros(n: usize) -> IxDyn + { const ZEROS: &[usize] = &[0; 4]; if n <= ZEROS.len() { Dim(&ZEROS[..n]) diff --git a/src/dimension/mod.rs b/src/dimension/mod.rs index 4d1349b57..e1563613e 100644 --- a/src/dimension/mod.rs +++ b/src/dimension/mod.rs @@ -46,7 +46,8 @@ mod sequence; /// Calculate offset from `Ix` stride converting sign properly #[inline(always)] -pub fn stride_offset(n: Ix, stride: Ix) -> isize { +pub fn stride_offset(n: Ix, stride: Ix) -> isize +{ (n as isize) * (stride as Ixs) } @@ -55,7 +56,8 @@ pub fn stride_offset(n: Ix, stride: Ix) -> isize { /// There is overlap if, when iterating through the dimensions in order of /// increasing stride, the current stride is less than or equal to the maximum /// possible offset along the preceding axes. (Axes of length ≤1 are ignored.) -pub fn dim_stride_overlap(dim: &D, strides: &D) -> bool { +pub fn dim_stride_overlap(dim: &D, strides: &D) -> bool +{ let order = strides._fastest_varying_stride_order(); let mut sum_prev_offsets = 0; for &index in order.slice() { @@ -84,7 +86,8 @@ pub fn dim_stride_overlap(dim: &D, strides: &D) -> bool { /// are met to construct an array from the data buffer, `dim`, and `strides`. /// (The data buffer being a slice or `Vec` guarantees that it contains no more /// than `isize::MAX` bytes.) -pub fn size_of_shape_checked(dim: &D) -> Result { +pub fn size_of_shape_checked(dim: &D) -> Result +{ let size_nonzero = dim .slice() .iter() @@ -124,7 +127,8 @@ pub fn size_of_shape_checked(dim: &D) -> Result /// accessible by moving along all axes does not exceed `isize::MAX`. pub(crate) fn can_index_slice_with_strides( data: &[A], dim: &D, strides: &Strides, -) -> Result<(), ShapeError> { +) -> Result<(), ShapeError> +{ if let Strides::Custom(strides) = strides { can_index_slice(data, dim, strides) } else { @@ -132,7 +136,8 @@ pub(crate) fn can_index_slice_with_strides( } } -pub(crate) fn can_index_slice_not_custom(data_len: usize, dim: &D) -> Result<(), ShapeError> { +pub(crate) fn can_index_slice_not_custom(data_len: usize, dim: &D) -> Result<(), ShapeError> +{ // Condition 1. let len = size_of_shape_checked(dim)?; // Condition 2. @@ -157,12 +162,14 @@ pub(crate) fn can_index_slice_not_custom(data_len: usize, dim: &D) /// also implies that the length of any individual axis does not exceed /// `isize::MAX`.) pub fn max_abs_offset_check_overflow(dim: &D, strides: &D) -> Result -where D: Dimension { +where D: Dimension +{ max_abs_offset_check_overflow_impl(mem::size_of::(), dim, strides) } fn max_abs_offset_check_overflow_impl(elem_size: usize, dim: &D, strides: &D) -> Result -where D: Dimension { +where D: Dimension +{ // Condition 1. if dim.ndim() != strides.ndim() { return Err(from_kind(ErrorKind::IncompatibleLayout)); @@ -233,7 +240,8 @@ where D: Dimension { /// allocation. (In other words, the pointer to the first element of the array /// must be computed using `offset_from_low_addr_ptr_to_logical_ptr` so that /// negative strides are correctly handled.) -pub(crate) fn can_index_slice(data: &[A], dim: &D, strides: &D) -> Result<(), ShapeError> { +pub(crate) fn can_index_slice(data: &[A], dim: &D, strides: &D) -> Result<(), ShapeError> +{ // Check conditions 1 and 2 and calculate `max_offset`. let max_offset = max_abs_offset_check_overflow::(dim, strides)?; can_index_slice_impl(max_offset, data.len(), dim, strides) @@ -241,7 +249,8 @@ pub(crate) fn can_index_slice(data: &[A], dim: &D, strides: &D) fn can_index_slice_impl( max_offset: usize, data_len: usize, dim: &D, strides: &D, -) -> Result<(), ShapeError> { +) -> Result<(), ShapeError> +{ // Check condition 3. let is_empty = dim.slice().iter().any(|&d| d == 0); if is_empty && max_offset > data_len { @@ -261,7 +270,8 @@ fn can_index_slice_impl( /// Stride offset checked general version (slices) #[inline] -pub fn stride_offset_checked(dim: &[Ix], strides: &[Ix], index: &[Ix]) -> Option { +pub fn stride_offset_checked(dim: &[Ix], strides: &[Ix], index: &[Ix]) -> Option +{ if index.len() != dim.len() { return None; } @@ -277,7 +287,8 @@ pub fn stride_offset_checked(dim: &[Ix], strides: &[Ix], index: &[Ix]) -> Option /// Checks if strides are non-negative. pub fn strides_non_negative(strides: &D) -> Result<(), ShapeError> -where D: Dimension { +where D: Dimension +{ for &stride in strides.slice() { if (stride as isize) < 0 { return Err(from_kind(ErrorKind::Unsupported)); @@ -287,7 +298,8 @@ where D: Dimension { } /// Implementation-specific extensions to `Dimension` -pub trait DimensionExt { +pub trait DimensionExt +{ // note: many extensions go in the main trait if they need to be special- // cased per dimension /// Get the dimension at `axis`. @@ -307,24 +319,29 @@ impl DimensionExt for D where D: Dimension { #[inline] - fn axis(&self, axis: Axis) -> Ix { + fn axis(&self, axis: Axis) -> Ix + { self[axis.index()] } #[inline] - fn set_axis(&mut self, axis: Axis, value: Ix) { + fn set_axis(&mut self, axis: Axis, value: Ix) + { self[axis.index()] = value; } } -impl DimensionExt for [Ix] { +impl DimensionExt for [Ix] +{ #[inline] - fn axis(&self, axis: Axis) -> Ix { + fn axis(&self, axis: Axis) -> Ix + { self[axis.index()] } #[inline] - fn set_axis(&mut self, axis: Axis, value: Ix) { + fn set_axis(&mut self, axis: Axis, value: Ix) + { self[axis.index()] = value; } } @@ -335,7 +352,8 @@ impl DimensionExt for [Ix] { /// **Panics** if `index` is larger than the size of the axis #[track_caller] // FIXME: Move to Dimension trait -pub fn do_collapse_axis(dims: &mut D, strides: &D, axis: usize, index: usize) -> isize { +pub fn do_collapse_axis(dims: &mut D, strides: &D, axis: usize, index: usize) -> isize +{ let dim = dims.slice()[axis]; let stride = strides.slice()[axis]; ndassert!( @@ -352,7 +370,8 @@ pub fn do_collapse_axis(dims: &mut D, strides: &D, axis: usize, in /// Compute the equivalent unsigned index given the axis length and signed index. #[inline] -pub fn abs_index(len: Ix, index: Ixs) -> Ix { +pub fn abs_index(len: Ix, index: Ixs) -> Ix +{ if index < 0 { len - (-index as Ix) } else { @@ -366,7 +385,8 @@ pub fn abs_index(len: Ix, index: Ixs) -> Ix { /// /// **Panics** if stride is 0 or if any index is out of bounds. #[track_caller] -fn to_abs_slice(axis_len: usize, slice: Slice) -> (usize, usize, isize) { +fn to_abs_slice(axis_len: usize, slice: Slice) -> (usize, usize, isize) +{ let Slice { start, end, step } = slice; let start = abs_index(axis_len, start); let mut end = abs_index(axis_len, end.unwrap_or(axis_len as isize)); @@ -391,7 +411,8 @@ fn to_abs_slice(axis_len: usize, slice: Slice) -> (usize, usize, isize) { /// Returns the offset from the lowest-address element to the logically first /// element. -pub fn offset_from_low_addr_ptr_to_logical_ptr(dim: &D, strides: &D) -> usize { +pub fn offset_from_low_addr_ptr_to_logical_ptr(dim: &D, strides: &D) -> usize +{ let offset = izip!(dim.slice(), strides.slice()).fold(0, |_offset, (&d, &s)| { let s = s as isize; if s < 0 && d > 1 { @@ -408,7 +429,8 @@ pub fn offset_from_low_addr_ptr_to_logical_ptr(dim: &D, strides: & /// /// **Panics** if stride is 0 or if any index is out of bounds. #[track_caller] -pub fn do_slice(dim: &mut usize, stride: &mut usize, slice: Slice) -> isize { +pub fn do_slice(dim: &mut usize, stride: &mut usize, slice: Slice) -> isize +{ let (start, end, step) = to_abs_slice(*dim, slice); let m = end - start; @@ -461,7 +483,8 @@ pub fn do_slice(dim: &mut usize, stride: &mut usize, slice: Slice) -> isize { /// nonnegative. /// /// See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm -fn extended_gcd(a: isize, b: isize) -> (isize, (isize, isize)) { +fn extended_gcd(a: isize, b: isize) -> (isize, (isize, isize)) +{ if a == 0 { (b.abs(), (0, b.signum())) } else if b == 0 { @@ -497,7 +520,8 @@ fn extended_gcd(a: isize, b: isize) -> (isize, (isize, isize)) { /// /// See https://en.wikipedia.org/wiki/Diophantine_equation#One_equation /// and https://math.stackexchange.com/questions/1656120#1656138 -fn solve_linear_diophantine_eq(a: isize, b: isize, c: isize) -> Option<(isize, isize)> { +fn solve_linear_diophantine_eq(a: isize, b: isize, c: isize) -> Option<(isize, isize)> +{ debug_assert_ne!(a, 0); debug_assert_ne!(b, 0); let (g, (u, _)) = extended_gcd(a, b); @@ -515,7 +539,8 @@ fn solve_linear_diophantine_eq(a: isize, b: isize, c: isize) -> Option<(isize, i /// consecutive elements (the sign is irrelevant). /// /// **Note** `step1` and `step2` must be nonzero. -fn arith_seq_intersect((min1, max1, step1): (isize, isize, isize), (min2, max2, step2): (isize, isize, isize)) -> bool { +fn arith_seq_intersect((min1, max1, step1): (isize, isize, isize), (min2, max2, step2): (isize, isize, isize)) -> bool +{ debug_assert!(max1 >= min1); debug_assert!(max2 >= min2); debug_assert_eq!((max1 - min1) % step1, 0); @@ -571,7 +596,8 @@ fn arith_seq_intersect((min1, max1, step1): (isize, isize, isize), (min2, max2, /// Returns the minimum and maximum values of the indices (inclusive). /// /// If the slice is empty, then returns `None`, otherwise returns `Some((min, max))`. -fn slice_min_max(axis_len: usize, slice: Slice) -> Option<(usize, usize)> { +fn slice_min_max(axis_len: usize, slice: Slice) -> Option<(usize, usize)> +{ let (start, end, step) = to_abs_slice(axis_len, slice); if start == end { None @@ -583,7 +609,8 @@ fn slice_min_max(axis_len: usize, slice: Slice) -> Option<(usize, usize)> { } /// Returns `true` iff the slices intersect. -pub fn slices_intersect(dim: &D, indices1: impl SliceArg, indices2: impl SliceArg) -> bool { +pub fn slices_intersect(dim: &D, indices1: impl SliceArg, indices2: impl SliceArg) -> bool +{ debug_assert_eq!(indices1.in_ndim(), indices2.in_ndim()); for (&axis_len, &si1, &si2) in izip!( dim.slice(), @@ -640,7 +667,8 @@ pub fn slices_intersect(dim: &D, indices1: impl SliceArg, indic true } -pub(crate) fn is_layout_c(dim: &D, strides: &D) -> bool { +pub(crate) fn is_layout_c(dim: &D, strides: &D) -> bool +{ if let Some(1) = D::NDIM { return strides[0] == 1 || dim[0] <= 1; } @@ -665,7 +693,8 @@ pub(crate) fn is_layout_c(dim: &D, strides: &D) -> bool { true } -pub(crate) fn is_layout_f(dim: &D, strides: &D) -> bool { +pub(crate) fn is_layout_f(dim: &D, strides: &D) -> bool +{ if let Some(1) = D::NDIM { return strides[0] == 1 || dim[0] <= 1; } @@ -691,7 +720,8 @@ pub(crate) fn is_layout_f(dim: &D, strides: &D) -> bool { } pub fn merge_axes(dim: &mut D, strides: &mut D, take: Axis, into: Axis) -> bool -where D: Dimension { +where D: Dimension +{ let into_len = dim.axis(into); let into_stride = strides.axis(into) as isize; let take_len = dim.axis(take); @@ -718,7 +748,8 @@ where D: Dimension { /// Move the axis which has the smallest absolute stride and a length /// greater than one to be the last axis. pub fn move_min_stride_axis_to_last(dim: &mut D, strides: &mut D) -where D: Dimension { +where D: Dimension +{ debug_assert_eq!(dim.ndim(), strides.ndim()); match dim.ndim() { 0 | 1 => {} @@ -741,7 +772,8 @@ where D: Dimension { } #[cfg(test)] -mod test { +mod test +{ use super::{ arith_seq_intersect, can_index_slice, @@ -760,7 +792,8 @@ mod test { use quickcheck::{quickcheck, TestResult}; #[test] - fn slice_indexing_uncommon_strides() { + fn slice_indexing_uncommon_strides() + { let v: alloc::vec::Vec<_> = (0..12).collect(); let dim = (2, 3, 2).into_dimension(); let strides = (1, 2, 6).into_dimension(); @@ -774,7 +807,8 @@ mod test { } #[test] - fn overlapping_strides_dim() { + fn overlapping_strides_dim() + { let dim = (2, 3, 2).into_dimension(); let strides = (5, 2, 1).into_dimension(); assert!(super::dim_stride_overlap(&dim, &strides)); @@ -796,7 +830,8 @@ mod test { } #[test] - fn max_abs_offset_check_overflow_examples() { + fn max_abs_offset_check_overflow_examples() + { let dim = (1, ::std::isize::MAX as usize, 1).into_dimension(); let strides = (1, 1, 1).into_dimension(); max_abs_offset_check_overflow::(&dim, &strides).unwrap(); @@ -812,13 +847,15 @@ mod test { } #[test] - fn can_index_slice_ix0() { + fn can_index_slice_ix0() + { can_index_slice::(&[1], &Ix0(), &Ix0()).unwrap(); can_index_slice::(&[], &Ix0(), &Ix0()).unwrap_err(); } #[test] - fn can_index_slice_ix1() { + fn can_index_slice_ix1() + { can_index_slice::(&[], &Ix1(0), &Ix1(0)).unwrap(); can_index_slice::(&[], &Ix1(0), &Ix1(1)).unwrap(); can_index_slice::(&[], &Ix1(1), &Ix1(0)).unwrap_err(); @@ -833,7 +870,8 @@ mod test { } #[test] - fn can_index_slice_ix2() { + fn can_index_slice_ix2() + { can_index_slice::(&[], &Ix2(0, 0), &Ix2(0, 0)).unwrap(); can_index_slice::(&[], &Ix2(0, 0), &Ix2(2, 1)).unwrap(); can_index_slice::(&[], &Ix2(0, 1), &Ix2(0, 0)).unwrap(); @@ -848,7 +886,8 @@ mod test { } #[test] - fn can_index_slice_ix3() { + fn can_index_slice_ix3() + { can_index_slice::(&[], &Ix3(0, 0, 1), &Ix3(2, 1, 3)).unwrap(); can_index_slice::(&[], &Ix3(1, 1, 1), &Ix3(2, 1, 3)).unwrap_err(); can_index_slice::(&[1], &Ix3(1, 1, 1), &Ix3(2, 1, 3)).unwrap(); @@ -857,7 +896,8 @@ mod test { } #[test] - fn can_index_slice_zero_size_elem() { + fn can_index_slice_zero_size_elem() + { can_index_slice::<(), _>(&[], &Ix1(0), &Ix1(1)).unwrap(); can_index_slice::<(), _>(&[()], &Ix1(1), &Ix1(1)).unwrap(); can_index_slice::<(), _>(&[(), ()], &Ix1(2), &Ix1(1)).unwrap(); @@ -907,7 +947,8 @@ mod test { } #[test] - fn extended_gcd_zero() { + fn extended_gcd_zero() + { assert_eq!(extended_gcd(0, 0), (0, (0, 0))); assert_eq!(extended_gcd(0, 5), (5, (0, 1))); assert_eq!(extended_gcd(5, 0), (5, (1, 0))); @@ -997,7 +1038,8 @@ mod test { } #[test] - fn slice_min_max_empty() { + fn slice_min_max_empty() + { assert_eq!(slice_min_max(0, Slice::new(0, None, 3)), None); assert_eq!(slice_min_max(10, Slice::new(1, Some(1), 3)), None); assert_eq!(slice_min_max(10, Slice::new(-1, Some(-1), 3)), None); @@ -1006,7 +1048,8 @@ mod test { } #[test] - fn slice_min_max_pos_step() { + fn slice_min_max_pos_step() + { assert_eq!(slice_min_max(10, Slice::new(1, Some(8), 3)), Some((1, 7))); assert_eq!(slice_min_max(10, Slice::new(1, Some(9), 3)), Some((1, 7))); assert_eq!(slice_min_max(10, Slice::new(-9, Some(8), 3)), Some((1, 7))); @@ -1022,7 +1065,8 @@ mod test { } #[test] - fn slice_min_max_neg_step() { + fn slice_min_max_neg_step() + { assert_eq!(slice_min_max(10, Slice::new(1, Some(8), -3)), Some((1, 7))); assert_eq!(slice_min_max(10, Slice::new(2, Some(8), -3)), Some((4, 7))); assert_eq!(slice_min_max(10, Slice::new(-9, Some(8), -3)), Some((1, 7))); @@ -1044,7 +1088,8 @@ mod test { } #[test] - fn slices_intersect_true() { + fn slices_intersect_true() + { assert!(slices_intersect( &Dim([4, 5]), s![NewAxis, .., NewAxis, ..], @@ -1069,7 +1114,8 @@ mod test { } #[test] - fn slices_intersect_false() { + fn slices_intersect_false() + { assert!(!slices_intersect( &Dim([4, 5]), s![..;2, ..], diff --git a/src/dimension/ndindex.rs b/src/dimension/ndindex.rs index 5792f1a22..e27e68c99 100644 --- a/src/dimension/ndindex.rs +++ b/src/dimension/ndindex.rs @@ -17,7 +17,8 @@ use crate::{Dim, Dimension, IntoDimension, Ix, Ix0, Ix1, Ix2, Ix3, Ix4, Ix5, Ix6 /// assert_eq!(a[(1, 1)], 4); /// ``` #[allow(clippy::missing_safety_doc)] // TODO: Add doc -pub unsafe trait NdIndex: Debug { +pub unsafe trait NdIndex: Debug +{ #[doc(hidden)] fn index_checked(&self, dim: &E, strides: &E) -> Option; #[doc(hidden)] @@ -27,93 +28,116 @@ pub unsafe trait NdIndex: Debug { unsafe impl NdIndex for D where D: Dimension { - fn index_checked(&self, dim: &D, strides: &D) -> Option { + fn index_checked(&self, dim: &D, strides: &D) -> Option + { dim.stride_offset_checked(strides, self) } - fn index_unchecked(&self, strides: &D) -> isize { + fn index_unchecked(&self, strides: &D) -> isize + { D::stride_offset(self, strides) } } -unsafe impl NdIndex for () { +unsafe impl NdIndex for () +{ #[inline] - fn index_checked(&self, dim: &Ix0, strides: &Ix0) -> Option { + fn index_checked(&self, dim: &Ix0, strides: &Ix0) -> Option + { dim.stride_offset_checked(strides, &Ix0()) } #[inline(always)] - fn index_unchecked(&self, _strides: &Ix0) -> isize { + fn index_unchecked(&self, _strides: &Ix0) -> isize + { 0 } } -unsafe impl NdIndex for (Ix, Ix) { +unsafe impl NdIndex for (Ix, Ix) +{ #[inline] - fn index_checked(&self, dim: &Ix2, strides: &Ix2) -> Option { + fn index_checked(&self, dim: &Ix2, strides: &Ix2) -> Option + { dim.stride_offset_checked(strides, &Ix2(self.0, self.1)) } #[inline] - fn index_unchecked(&self, strides: &Ix2) -> isize { + fn index_unchecked(&self, strides: &Ix2) -> isize + { stride_offset(self.0, get!(strides, 0)) + stride_offset(self.1, get!(strides, 1)) } } -unsafe impl NdIndex for (Ix, Ix, Ix) { +unsafe impl NdIndex for (Ix, Ix, Ix) +{ #[inline] - fn index_checked(&self, dim: &Ix3, strides: &Ix3) -> Option { + fn index_checked(&self, dim: &Ix3, strides: &Ix3) -> Option + { dim.stride_offset_checked(strides, &self.into_dimension()) } #[inline] - fn index_unchecked(&self, strides: &Ix3) -> isize { + fn index_unchecked(&self, strides: &Ix3) -> isize + { stride_offset(self.0, get!(strides, 0)) + stride_offset(self.1, get!(strides, 1)) + stride_offset(self.2, get!(strides, 2)) } } -unsafe impl NdIndex for (Ix, Ix, Ix, Ix) { +unsafe impl NdIndex for (Ix, Ix, Ix, Ix) +{ #[inline] - fn index_checked(&self, dim: &Ix4, strides: &Ix4) -> Option { + fn index_checked(&self, dim: &Ix4, strides: &Ix4) -> Option + { dim.stride_offset_checked(strides, &self.into_dimension()) } #[inline] - fn index_unchecked(&self, strides: &Ix4) -> isize { + fn index_unchecked(&self, strides: &Ix4) -> isize + { zip(strides.ix(), self.into_dimension().ix()) .map(|(&s, &i)| stride_offset(i, s)) .sum() } } -unsafe impl NdIndex for (Ix, Ix, Ix, Ix, Ix) { +unsafe impl NdIndex for (Ix, Ix, Ix, Ix, Ix) +{ #[inline] - fn index_checked(&self, dim: &Ix5, strides: &Ix5) -> Option { + fn index_checked(&self, dim: &Ix5, strides: &Ix5) -> Option + { dim.stride_offset_checked(strides, &self.into_dimension()) } #[inline] - fn index_unchecked(&self, strides: &Ix5) -> isize { + fn index_unchecked(&self, strides: &Ix5) -> isize + { zip(strides.ix(), self.into_dimension().ix()) .map(|(&s, &i)| stride_offset(i, s)) .sum() } } -unsafe impl NdIndex for Ix { +unsafe impl NdIndex for Ix +{ #[inline] - fn index_checked(&self, dim: &Ix1, strides: &Ix1) -> Option { + fn index_checked(&self, dim: &Ix1, strides: &Ix1) -> Option + { dim.stride_offset_checked(strides, &Ix1(*self)) } #[inline(always)] - fn index_unchecked(&self, strides: &Ix1) -> isize { + fn index_unchecked(&self, strides: &Ix1) -> isize + { stride_offset(*self, get!(strides, 0)) } } -unsafe impl NdIndex for Ix { +unsafe impl NdIndex for Ix +{ #[inline] - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option + { debug_assert_eq!(dim.ndim(), 1); stride_offset_checked(dim.ix(), strides.ix(), &[*self]) } #[inline(always)] - fn index_unchecked(&self, strides: &IxDyn) -> isize { + fn index_unchecked(&self, strides: &IxDyn) -> isize + { debug_assert_eq!(strides.ndim(), 1); stride_offset(*self, get!(strides, 0)) } @@ -152,9 +176,11 @@ ndindex_with_array! { } // implement NdIndex for Dim<[Ix; 2]> and so on -unsafe impl NdIndex for Dim<[Ix; N]> { +unsafe impl NdIndex for Dim<[Ix; N]> +{ #[inline] - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option + { debug_assert_eq!( strides.ndim(), N, @@ -166,7 +192,8 @@ unsafe impl NdIndex for Dim<[Ix; N]> { } #[inline] - fn index_unchecked(&self, strides: &IxDyn) -> isize { + fn index_unchecked(&self, strides: &IxDyn) -> isize + { debug_assert_eq!( strides.ndim(), N, @@ -181,9 +208,11 @@ unsafe impl NdIndex for Dim<[Ix; N]> { } // implement NdIndex for [Ix; 2] and so on -unsafe impl NdIndex for [Ix; N] { +unsafe impl NdIndex for [Ix; N] +{ #[inline] - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option + { debug_assert_eq!( strides.ndim(), N, @@ -195,7 +224,8 @@ unsafe impl NdIndex for [Ix; N] { } #[inline] - fn index_unchecked(&self, strides: &IxDyn) -> isize { + fn index_unchecked(&self, strides: &IxDyn) -> isize + { debug_assert_eq!( strides.ndim(), N, @@ -209,27 +239,35 @@ unsafe impl NdIndex for [Ix; N] { } } -impl<'a> IntoDimension for &'a [Ix] { +impl<'a> IntoDimension for &'a [Ix] +{ type Dim = IxDyn; - fn into_dimension(self) -> Self::Dim { + fn into_dimension(self) -> Self::Dim + { Dim(IxDynImpl::from(self)) } } -unsafe impl<'a> NdIndex for &'a IxDyn { - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { +unsafe impl<'a> NdIndex for &'a IxDyn +{ + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option + { (**self).index_checked(dim, strides) } - fn index_unchecked(&self, strides: &IxDyn) -> isize { + fn index_unchecked(&self, strides: &IxDyn) -> isize + { (**self).index_unchecked(strides) } } -unsafe impl<'a> NdIndex for &'a [Ix] { - fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option { +unsafe impl<'a> NdIndex for &'a [Ix] +{ + fn index_checked(&self, dim: &IxDyn, strides: &IxDyn) -> Option + { stride_offset_checked(dim.ix(), strides.ix(), self) } - fn index_unchecked(&self, strides: &IxDyn) -> isize { + fn index_unchecked(&self, strides: &IxDyn) -> isize + { zip(strides.ix(), *self) .map(|(&s, &i)| stride_offset(i, s)) .sum() diff --git a/src/dimension/ops.rs b/src/dimension/ops.rs index dd23216f6..1365ab488 100644 --- a/src/dimension/ops.rs +++ b/src/dimension/ops.rs @@ -1,7 +1,8 @@ use crate::imp_prelude::*; /// Adds the two dimensions at compile time. -pub trait DimAdd { +pub trait DimAdd +{ /// The sum of the two dimensions. type Output: Dimension; } @@ -27,7 +28,8 @@ macro_rules! impl_dimadd_const_out_dyn { }; } -impl DimAdd for Ix0 { +impl DimAdd for Ix0 +{ type Output = D; } @@ -85,6 +87,7 @@ impl_dimadd_const_out_dyn!(6, 5); impl_dimadd_const_out_dyn!(6, 6); impl_dimadd_const_out_dyn!(6, IxDyn); -impl DimAdd for IxDyn { +impl DimAdd for IxDyn +{ type Output = IxDyn; } diff --git a/src/dimension/remove_axis.rs b/src/dimension/remove_axis.rs index da366ae17..cbb039fc5 100644 --- a/src/dimension/remove_axis.rs +++ b/src/dimension/remove_axis.rs @@ -12,21 +12,26 @@ use crate::{Axis, Dim, Dimension, Ix, Ix0, Ix1}; /// /// `RemoveAxis` defines a larger-than relation for array shapes: /// removing one axis from *Self* gives smaller dimension *Smaller*. -pub trait RemoveAxis: Dimension { +pub trait RemoveAxis: Dimension +{ fn remove_axis(&self, axis: Axis) -> Self::Smaller; } -impl RemoveAxis for Dim<[Ix; 1]> { +impl RemoveAxis for Dim<[Ix; 1]> +{ #[inline] - fn remove_axis(&self, axis: Axis) -> Ix0 { + fn remove_axis(&self, axis: Axis) -> Ix0 + { debug_assert!(axis.index() < self.ndim()); Ix0() } } -impl RemoveAxis for Dim<[Ix; 2]> { +impl RemoveAxis for Dim<[Ix; 2]> +{ #[inline] - fn remove_axis(&self, axis: Axis) -> Ix1 { + fn remove_axis(&self, axis: Axis) -> Ix1 + { let axis = axis.index(); debug_assert!(axis < self.ndim()); if axis == 0 { diff --git a/src/dimension/reshape.rs b/src/dimension/reshape.rs index 99ab66d8f..52d9e719a 100644 --- a/src/dimension/reshape.rs +++ b/src/dimension/reshape.rs @@ -146,7 +146,8 @@ where #[cfg(feature = "std")] #[test] -fn test_reshape() { +fn test_reshape() +{ use crate::Dim; macro_rules! test_reshape { diff --git a/src/dimension/sequence.rs b/src/dimension/sequence.rs index c407ece51..ed3605d57 100644 --- a/src/dimension/sequence.rs +++ b/src/dimension/sequence.rs @@ -12,7 +12,8 @@ where D: Dimension type Output = usize; #[inline] - fn index(&self, index: usize) -> &usize { + fn index(&self, index: usize) -> &usize + { &self.0[index] } } @@ -23,7 +24,8 @@ where D: Dimension type Output = usize; #[inline] - fn index(&self, index: usize) -> &usize { + fn index(&self, index: usize) -> &usize + { &self.0[index] } } @@ -32,7 +34,8 @@ impl IndexMut for Forward<&mut D> where D: Dimension { #[inline] - fn index_mut(&mut self, index: usize) -> &mut usize { + fn index_mut(&mut self, index: usize) -> &mut usize + { &mut self.0[index] } } @@ -43,7 +46,8 @@ where D: Dimension type Output = usize; #[inline] - fn index(&self, index: usize) -> &usize { + fn index(&self, index: usize) -> &usize + { &self.0[self.len() - index - 1] } } @@ -54,7 +58,8 @@ where D: Dimension type Output = usize; #[inline] - fn index(&self, index: usize) -> &usize { + fn index(&self, index: usize) -> &usize + { &self.0[self.len() - index - 1] } } @@ -63,14 +68,16 @@ impl IndexMut for Reverse<&mut D> where D: Dimension { #[inline] - fn index_mut(&mut self, index: usize) -> &mut usize { + fn index_mut(&mut self, index: usize) -> &mut usize + { let len = self.len(); &mut self.0[len - index - 1] } } /// Indexable sequence with length -pub(in crate::dimension) trait Sequence: Index { +pub(in crate::dimension) trait Sequence: Index +{ fn len(&self) -> usize; } @@ -81,7 +88,8 @@ impl Sequence for Forward<&D> where D: Dimension { #[inline] - fn len(&self) -> usize { + fn len(&self) -> usize + { self.0.ndim() } } @@ -90,7 +98,8 @@ impl Sequence for Forward<&mut D> where D: Dimension { #[inline] - fn len(&self) -> usize { + fn len(&self) -> usize + { self.0.ndim() } } @@ -101,7 +110,8 @@ impl Sequence for Reverse<&D> where D: Dimension { #[inline] - fn len(&self) -> usize { + fn len(&self) -> usize + { self.0.ndim() } } @@ -110,7 +120,8 @@ impl Sequence for Reverse<&mut D> where D: Dimension { #[inline] - fn len(&self) -> usize { + fn len(&self) -> usize + { self.0.ndim() } } diff --git a/src/error.rs b/src/error.rs index c45496142..eb7395ad8 100644 --- a/src/error.rs +++ b/src/error.rs @@ -12,20 +12,24 @@ use std::fmt; /// An error related to array shape or layout. #[derive(Clone)] -pub struct ShapeError { +pub struct ShapeError +{ // we want to be able to change this representation later repr: ErrorKind, } -impl ShapeError { +impl ShapeError +{ /// Return the `ErrorKind` of this error. #[inline] - pub fn kind(&self) -> ErrorKind { + pub fn kind(&self) -> ErrorKind + { self.repr } /// Create a new `ShapeError` - pub fn from_kind(error: ErrorKind) -> Self { + pub fn from_kind(error: ErrorKind) -> Self + { from_kind(error) } } @@ -36,7 +40,8 @@ impl ShapeError { /// is not guaranteed. #[non_exhaustive] #[derive(Copy, Clone, Debug)] -pub enum ErrorKind { +pub enum ErrorKind +{ /// incompatible shape IncompatibleShape = 1, /// incompatible memory layout @@ -52,20 +57,25 @@ pub enum ErrorKind { } #[inline(always)] -pub fn from_kind(k: ErrorKind) -> ShapeError { +pub fn from_kind(k: ErrorKind) -> ShapeError +{ ShapeError { repr: k } } -impl PartialEq for ErrorKind { +impl PartialEq for ErrorKind +{ #[inline(always)] - fn eq(&self, rhs: &Self) -> bool { + fn eq(&self, rhs: &Self) -> bool + { *self as u8 == *rhs as u8 } } -impl PartialEq for ShapeError { +impl PartialEq for ShapeError +{ #[inline(always)] - fn eq(&self, rhs: &Self) -> bool { + fn eq(&self, rhs: &Self) -> bool + { self.repr == rhs.repr } } @@ -73,8 +83,10 @@ impl PartialEq for ShapeError { #[cfg(feature = "std")] impl Error for ShapeError {} -impl fmt::Display for ShapeError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +impl fmt::Display for ShapeError +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { let description = match self.kind() { ErrorKind::IncompatibleShape => "incompatible shapes", ErrorKind::IncompatibleLayout => "incompatible memory layout", @@ -87,8 +99,10 @@ impl fmt::Display for ShapeError { } } -impl fmt::Debug for ShapeError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +impl fmt::Debug for ShapeError +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { write!(f, "{}", self) } } diff --git a/src/extension/nonnull.rs b/src/extension/nonnull.rs index 043abee13..08f80927e 100644 --- a/src/extension/nonnull.rs +++ b/src/extension/nonnull.rs @@ -3,7 +3,8 @@ use alloc::vec::Vec; use std::ptr::NonNull; /// Return a NonNull pointer to the vector's data -pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull { +pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull +{ // this pointer is guaranteed to be non-null unsafe { NonNull::new_unchecked(v.as_mut_ptr()) } } @@ -14,7 +15,8 @@ pub(crate) fn nonnull_from_vec_data(v: &mut Vec) -> NonNull { /// This is checked with a debug assertion, and will panic if this is not true, /// but treat this as an unconditional conversion. #[inline] -pub(crate) unsafe fn nonnull_debug_checked_from_ptr(ptr: *mut T) -> NonNull { +pub(crate) unsafe fn nonnull_debug_checked_from_ptr(ptr: *mut T) -> NonNull +{ debug_assert!(!ptr.is_null()); NonNull::new_unchecked(ptr) } diff --git a/src/free_functions.rs b/src/free_functions.rs index 971e89ee2..3adf2d8f3 100644 --- a/src/free_functions.rs +++ b/src/free_functions.rs @@ -51,22 +51,26 @@ macro_rules! array { } /// Create a zero-dimensional array with the element `x`. -pub fn arr0(x: A) -> Array0 { +pub fn arr0(x: A) -> Array0 +{ unsafe { ArrayBase::from_shape_vec_unchecked((), vec![x]) } } /// Create a one-dimensional array with elements from `xs`. -pub fn arr1(xs: &[A]) -> Array1 { +pub fn arr1(xs: &[A]) -> Array1 +{ ArrayBase::from(xs.to_vec()) } /// Create a one-dimensional array with elements from `xs`. -pub fn rcarr1(xs: &[A]) -> ArcArray1 { +pub fn rcarr1(xs: &[A]) -> ArcArray1 +{ arr1(xs).into_shared() } /// Create a zero-dimensional array view borrowing `x`. -pub const fn aview0(x: &A) -> ArrayView0<'_, A> { +pub const fn aview0(x: &A) -> ArrayView0<'_, A> +{ ArrayBase { data: ViewRepr::new(), // Safe because references are always non-null. @@ -97,7 +101,8 @@ pub const fn aview0(x: &A) -> ArrayView0<'_, A> { /// /// assert_eq!(C.sum(), 6.); /// ``` -pub const fn aview1(xs: &[A]) -> ArrayView1<'_, A> { +pub const fn aview1(xs: &[A]) -> ArrayView1<'_, A> +{ if size_of::() == 0 { assert!( xs.len() <= isize::MAX as usize, @@ -131,7 +136,8 @@ pub const fn aview1(xs: &[A]) -> ArrayView1<'_, A> { /// const C: ArrayView2<'static, f64> = aview2(&[[1., 2., 3.], [4., 5., 6.]]); /// assert_eq!(C.sum(), 21.); /// ``` -pub const fn aview2(xs: &[[A; N]]) -> ArrayView2<'_, A> { +pub const fn aview2(xs: &[[A; N]]) -> ArrayView2<'_, A> +{ let cols = N; let rows = xs.len(); if size_of::() == 0 { @@ -179,7 +185,8 @@ pub const fn aview2(xs: &[[A; N]]) -> ArrayView2<'_, A> { /// } /// assert_eq!(&data[..10], [5, 0, 0, 5, 0, 0, 5, 0, 0, 5]); /// ``` -pub fn aview_mut1(xs: &mut [A]) -> ArrayViewMut1<'_, A> { +pub fn aview_mut1(xs: &mut [A]) -> ArrayViewMut1<'_, A> +{ ArrayViewMut::from(xs) } @@ -205,7 +212,8 @@ pub fn aview_mut1(xs: &mut [A]) -> ArrayViewMut1<'_, A> { /// // look at the start of the result /// assert_eq!(&data[..3], [[1., -1.], [1., -1.], [1., -1.]]); /// ``` -pub fn aview_mut2(xs: &mut [[A; N]]) -> ArrayViewMut2<'_, A> { +pub fn aview_mut2(xs: &mut [[A; N]]) -> ArrayViewMut2<'_, A> +{ ArrayViewMut2::from(xs) } @@ -220,15 +228,18 @@ pub fn aview_mut2(xs: &mut [[A; N]]) -> ArrayViewMut2<'_, A> /// a.shape() == [2, 3] /// ); /// ``` -pub fn arr2(xs: &[[A; N]]) -> Array2 { +pub fn arr2(xs: &[[A; N]]) -> Array2 +{ Array2::from(xs.to_vec()) } -impl From> for Array2 { +impl From> for Array2 +{ /// Converts the `Vec` of arrays to an owned 2-D array. /// /// **Panics** if the product of non-zero axis lengths overflows `isize`. - fn from(mut xs: Vec<[A; N]>) -> Self { + fn from(mut xs: Vec<[A; N]>) -> Self + { let dim = Ix2(xs.len(), N); let ptr = xs.as_mut_ptr(); let cap = xs.capacity(); @@ -251,11 +262,13 @@ impl From> for Array2 { } } -impl From> for Array3 { +impl From> for Array3 +{ /// Converts the `Vec` of arrays to an owned 3-D array. /// /// **Panics** if the product of non-zero axis lengths overflows `isize`. - fn from(mut xs: Vec<[[A; M]; N]>) -> Self { + fn from(mut xs: Vec<[[A; M]; N]>) -> Self + { let dim = Ix3(xs.len(), N, M); let ptr = xs.as_mut_ptr(); let cap = xs.capacity(); @@ -280,7 +293,8 @@ impl From> for Array3 { /// Create a two-dimensional array with elements from `xs`. /// -pub fn rcarr2(xs: &[[A; N]]) -> ArcArray2 { +pub fn rcarr2(xs: &[[A; N]]) -> ArcArray2 +{ arr2(xs).into_shared() } @@ -301,11 +315,13 @@ pub fn rcarr2(xs: &[[A; N]]) -> ArcArray2 { /// a.shape() == [3, 2, 2] /// ); /// ``` -pub fn arr3(xs: &[[[A; M]; N]]) -> Array3 { +pub fn arr3(xs: &[[[A; M]; N]]) -> Array3 +{ Array3::from(xs.to_vec()) } /// Create a three-dimensional array with elements from `xs`. -pub fn rcarr3(xs: &[[[A; M]; N]]) -> ArcArray { +pub fn rcarr3(xs: &[[[A; M]; N]]) -> ArcArray +{ arr3(xs).into_shared() } diff --git a/src/geomspace.rs b/src/geomspace.rs index 23ee073c3..0ac91f529 100644 --- a/src/geomspace.rs +++ b/src/geomspace.rs @@ -11,7 +11,8 @@ use num_traits::Float; /// An iterator of a sequence of geometrically spaced floats. /// /// Iterator element type is `F`. -pub struct Geomspace { +pub struct Geomspace +{ sign: F, start: F, step: F, @@ -25,7 +26,8 @@ where F: Float type Item = F; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { if self.index >= self.len { None } else { @@ -38,7 +40,8 @@ where F: Float } #[inline] - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let n = self.len - self.index; (n, Some(n)) } @@ -48,7 +51,8 @@ impl DoubleEndedIterator for Geomspace where F: Float { #[inline] - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { if self.index >= self.len { None } else { @@ -78,7 +82,8 @@ impl ExactSizeIterator for Geomspace where Geomspace: Iterator {} /// **Panics** if converting `n - 1` to type `F` fails. #[inline] pub fn geomspace(a: F, b: F, n: usize) -> Option> -where F: Float { +where F: Float +{ if a == F::zero() || b == F::zero() || a.is_sign_negative() != b.is_sign_negative() { return None; } @@ -100,12 +105,14 @@ where F: Float { } #[cfg(test)] -mod tests { +mod tests +{ use super::geomspace; #[test] #[cfg(feature = "approx")] - fn valid() { + fn valid() + { use crate::{arr1, Array1}; use approx::assert_abs_diff_eq; @@ -123,7 +130,8 @@ mod tests { } #[test] - fn iter_forward() { + fn iter_forward() + { let mut iter = geomspace(1.0f64, 1e3, 4).unwrap(); assert!(iter.size_hint() == (4, Some(4))); @@ -138,7 +146,8 @@ mod tests { } #[test] - fn iter_backward() { + fn iter_backward() + { let mut iter = geomspace(1.0f64, 1e3, 4).unwrap(); assert!(iter.size_hint() == (4, Some(4))); @@ -153,17 +162,20 @@ mod tests { } #[test] - fn zero_lower() { + fn zero_lower() + { assert!(geomspace(0.0, 1.0, 4).is_none()); } #[test] - fn zero_upper() { + fn zero_upper() + { assert!(geomspace(1.0, 0.0, 4).is_none()); } #[test] - fn zero_included() { + fn zero_included() + { assert!(geomspace(-1.0, 1.0, 4).is_none()); } } diff --git a/src/impl_1d.rs b/src/impl_1d.rs index 18ad72869..e49fdd731 100644 --- a/src/impl_1d.rs +++ b/src/impl_1d.rs @@ -34,7 +34,8 @@ where S: RawData /// Rotate the elements of the array by 1 element towards the front; /// the former first element becomes the last. pub(crate) fn rotate1_front(&mut self) - where S: DataMut { + where S: DataMut + { // use swapping to keep all elements initialized (as required by owned storage) let mut lane_iter = self.iter_mut(); let mut dst = if let Some(dst) = lane_iter.next() { dst } else { return }; diff --git a/src/impl_2d.rs b/src/impl_2d.rs index 8f5c96eea..c2e9725ac 100644 --- a/src/impl_2d.rs +++ b/src/impl_2d.rs @@ -24,7 +24,8 @@ where S: RawData /// ``` #[track_caller] pub fn row(&self, index: Ix) -> ArrayView1<'_, A> - where S: Data { + where S: Data + { self.index_axis(Axis(0), index) } @@ -40,7 +41,8 @@ where S: RawData /// ``` #[track_caller] pub fn row_mut(&mut self, index: Ix) -> ArrayViewMut1<'_, A> - where S: DataMut { + where S: DataMut + { self.index_axis_mut(Axis(0), index) } @@ -61,7 +63,8 @@ where S: RawData /// // get length of any particular axis with .len_of() /// assert_eq!(m, array.len_of(Axis(0))); /// ``` - pub fn nrows(&self) -> usize { + pub fn nrows(&self) -> usize + { self.len_of(Axis(0)) } @@ -76,7 +79,8 @@ where S: RawData /// ``` #[track_caller] pub fn column(&self, index: Ix) -> ArrayView1<'_, A> - where S: Data { + where S: Data + { self.index_axis(Axis(1), index) } @@ -92,7 +96,8 @@ where S: RawData /// ``` #[track_caller] pub fn column_mut(&mut self, index: Ix) -> ArrayViewMut1<'_, A> - where S: DataMut { + where S: DataMut + { self.index_axis_mut(Axis(1), index) } @@ -113,7 +118,8 @@ where S: RawData /// // get length of any particular axis with .len_of() /// assert_eq!(n, array.len_of(Axis(1))); /// ``` - pub fn ncols(&self) -> usize { + pub fn ncols(&self) -> usize + { self.len_of(Axis(1)) } @@ -132,7 +138,8 @@ where S: RawData /// let array = array![[1., 2., 5.], [3., 4., 6.]]; /// assert!(!array.is_square()); /// ``` - pub fn is_square(&self) -> bool { + pub fn is_square(&self) -> bool + { let (m, n) = self.dim(); m == n } diff --git a/src/impl_clone.rs b/src/impl_clone.rs index e2e111a12..d65f6c338 100644 --- a/src/impl_clone.rs +++ b/src/impl_clone.rs @@ -9,8 +9,10 @@ use crate::imp_prelude::*; use crate::RawDataClone; -impl Clone for ArrayBase { - fn clone(&self) -> ArrayBase { +impl Clone for ArrayBase +{ + fn clone(&self) -> ArrayBase + { // safe because `clone_with_ptr` promises to provide equivalent data and ptr unsafe { let (data, ptr) = self.data.clone_with_ptr(self.ptr); @@ -26,7 +28,8 @@ impl Clone for ArrayBase { /// `Array` implements `.clone_from()` to reuse an array's existing /// allocation. Semantically equivalent to `*self = other.clone()`, but /// potentially more efficient. - fn clone_from(&mut self, other: &Self) { + fn clone_from(&mut self, other: &Self) + { unsafe { self.ptr = self.data.clone_from_with_ptr(&other.data, other.ptr); self.dim.clone_from(&other.dim); diff --git a/src/impl_constructors.rs b/src/impl_constructors.rs index d01ada8fa..cdce67e4c 100644 --- a/src/impl_constructors.rs +++ b/src/impl_constructors.rs @@ -54,7 +54,8 @@ where S: DataOwned /// /// let array = Array::from_vec(vec![1., 2., 3., 4.]); /// ``` - pub fn from_vec(v: Vec) -> Self { + pub fn from_vec(v: Vec) -> Self + { if mem::size_of::() == 0 { assert!( v.len() <= isize::MAX as usize, @@ -74,7 +75,8 @@ where S: DataOwned /// let array = Array::from_iter(0..10); /// ``` #[allow(clippy::should_implement_trait)] - pub fn from_iter>(iterable: I) -> Self { + pub fn from_iter>(iterable: I) -> Self + { Self::from_vec(iterable.into_iter().collect()) } @@ -97,7 +99,8 @@ where S: DataOwned /// ``` #[cfg(feature = "std")] pub fn linspace(start: A, end: A, n: usize) -> Self - where A: Float { + where A: Float + { Self::from(to_vec(linspace::linspace(start, end, n))) } @@ -114,7 +117,8 @@ where S: DataOwned /// ``` #[cfg(feature = "std")] pub fn range(start: A, end: A, step: A) -> Self - where A: Float { + where A: Float + { Self::from(to_vec(linspace::range(start, end, step))) } @@ -141,7 +145,8 @@ where S: DataOwned /// ``` #[cfg(feature = "std")] pub fn logspace(base: A, start: A, end: A, n: usize) -> Self - where A: Float { + where A: Float + { Self::from(to_vec(logspace::logspace(base, start, end, n))) } @@ -174,7 +179,8 @@ where S: DataOwned /// ``` #[cfg(feature = "std")] pub fn geomspace(start: A, end: A, n: usize) -> Option - where A: Float { + where A: Float + { Some(Self::from(to_vec(geomspace::geomspace(start, end, n)?))) } } @@ -449,12 +455,14 @@ where /// ); /// ``` pub fn from_shape_vec(shape: Sh, v: Vec) -> Result - where Sh: Into> { + where Sh: Into> + { // eliminate the type parameter Sh as soon as possible Self::from_shape_vec_impl(shape.into(), v) } - fn from_shape_vec_impl(shape: StrideShape, v: Vec) -> Result { + fn from_shape_vec_impl(shape: StrideShape, v: Vec) -> Result + { let dim = shape.dim; let is_custom = shape.strides.is_custom(); dimension::can_index_slice_with_strides(&v, &dim, &shape.strides)?; @@ -490,14 +498,16 @@ where /// 5. The strides must not allow any element to be referenced by two different /// indices. pub unsafe fn from_shape_vec_unchecked(shape: Sh, v: Vec) -> Self - where Sh: Into> { + where Sh: Into> + { let shape = shape.into(); let dim = shape.dim; let strides = shape.strides.strides_for_dim(&dim); Self::from_vec_dim_stride_unchecked(dim, strides, v) } - unsafe fn from_vec_dim_stride_unchecked(dim: D, strides: D, mut v: Vec) -> Self { + unsafe fn from_vec_dim_stride_unchecked(dim: D, strides: D, mut v: Vec) -> Self + { // debug check for issues that indicates wrong use of this constructor debug_assert!(dimension::can_index_slice(&v, &dim, &strides).is_ok()); @@ -570,7 +580,8 @@ where /// # let _ = shift_by_two; /// ``` pub fn uninit(shape: Sh) -> ArrayBase - where Sh: ShapeBuilder { + where Sh: ShapeBuilder + { unsafe { let shape = shape.into_shape_with_order(); let size = size_of_shape_checked_unwrap!(&shape.dim); @@ -614,7 +625,8 @@ where array } - pub(crate) const unsafe fn from_parts(data: S, ptr: std::ptr::NonNull, dim: D, strides: D) -> Self { + pub(crate) const unsafe fn from_parts(data: S, ptr: std::ptr::NonNull, dim: D, strides: D) -> Self + { Self { data, ptr, @@ -674,7 +686,8 @@ where /// This method has been renamed to `uninit` #[deprecated(note = "Renamed to `uninit`", since = "0.15.0")] pub fn maybe_uninit(shape: Sh) -> Self - where Sh: ShapeBuilder { + where Sh: ShapeBuilder + { unsafe { let shape = shape.into_shape_with_order(); let size = size_of_shape_checked_unwrap!(&shape.dim); diff --git a/src/impl_cow.rs b/src/impl_cow.rs index a04b04b7f..f064ce7bd 100644 --- a/src/impl_cow.rs +++ b/src/impl_cow.rs @@ -15,12 +15,14 @@ impl<'a, A, D> CowArray<'a, A, D> where D: Dimension { /// Returns `true` iff the array is the view (borrowed) variant. - pub fn is_view(&self) -> bool { + pub fn is_view(&self) -> bool + { self.data.is_view() } /// Returns `true` iff the array is the owned variant. - pub fn is_owned(&self) -> bool { + pub fn is_owned(&self) -> bool + { self.data.is_owned() } } @@ -28,7 +30,8 @@ where D: Dimension impl<'a, A, D> From> for CowArray<'a, A, D> where D: Dimension { - fn from(view: ArrayView<'a, A, D>) -> CowArray<'a, A, D> { + fn from(view: ArrayView<'a, A, D>) -> CowArray<'a, A, D> + { // safe because equivalent data unsafe { ArrayBase::from_data_ptr(CowRepr::View(view.data), view.ptr).with_strides_dim(view.strides, view.dim) } } @@ -37,7 +40,8 @@ where D: Dimension impl<'a, A, D> From> for CowArray<'a, A, D> where D: Dimension { - fn from(array: Array) -> CowArray<'a, A, D> { + fn from(array: Array) -> CowArray<'a, A, D> + { // safe because equivalent data unsafe { ArrayBase::from_data_ptr(CowRepr::Owned(array.data), array.ptr).with_strides_dim(array.strides, array.dim) @@ -59,7 +63,8 @@ where Slice: AsRef<[A]> /// assert!(array.is_view()); /// assert_eq!(array, array![1., 2., 3., 4.]); /// ``` - fn from(slice: &'a Slice) -> Self { + fn from(slice: &'a Slice) -> Self + { Self::from(ArrayView1::from(slice)) } } @@ -70,7 +75,8 @@ where D: Dimension, { /// Create a read-only clone-on-write view of the array. - fn from(array: &'a ArrayBase) -> Self { + fn from(array: &'a ArrayBase) -> Self + { Self::from(array.view()) } } diff --git a/src/impl_dyn.rs b/src/impl_dyn.rs index 4dc911a7e..836234cec 100644 --- a/src/impl_dyn.rs +++ b/src/impl_dyn.rs @@ -29,7 +29,8 @@ where S: Data /// assert_eq!(a.shape(), &[2, 1, 3]); /// ``` #[track_caller] - pub fn insert_axis_inplace(&mut self, axis: Axis) { + pub fn insert_axis_inplace(&mut self, axis: Axis) + { assert!(axis.index() <= self.ndim()); self.dim = self.dim.insert_axis(axis); self.strides = self.strides.insert_axis(axis); @@ -51,7 +52,8 @@ where S: Data /// assert_eq!(a.shape(), &[2]); /// ``` #[track_caller] - pub fn index_axis_inplace(&mut self, axis: Axis, index: usize) { + pub fn index_axis_inplace(&mut self, axis: Axis, index: usize) + { self.collapse_axis(axis, index); self.dim = self.dim.remove_axis(axis); self.strides = self.strides.remove_axis(axis); diff --git a/src/impl_internal_constructors.rs b/src/impl_internal_constructors.rs index ad0462040..ebb2e26e0 100644 --- a/src/impl_internal_constructors.rs +++ b/src/impl_internal_constructors.rs @@ -22,7 +22,8 @@ where S: RawData /// The caller must ensure that the data storage and pointer is valid. /// /// See ArrayView::from_shape_ptr for general pointer validity documentation. - pub(crate) unsafe fn from_data_ptr(data: S, ptr: NonNull) -> Self { + pub(crate) unsafe fn from_data_ptr(data: S, ptr: NonNull) -> Self + { let array = ArrayBase { data, ptr, @@ -50,7 +51,8 @@ where /// The caller needs to ensure that the new strides and dimensions are correct /// for the array data. pub(crate) unsafe fn with_strides_dim(self, strides: E, dim: E) -> ArrayBase - where E: Dimension { + where E: Dimension + { debug_assert_eq!(strides.ndim(), dim.ndim()); ArrayBase { data: self.data, diff --git a/src/impl_methods.rs b/src/impl_methods.rs index 9669effc9..17720ccbc 100644 --- a/src/impl_methods.rs +++ b/src/impl_methods.rs @@ -67,7 +67,8 @@ where D: Dimension, { /// Return the total number of elements in the array. - pub fn len(&self) -> usize { + pub fn len(&self) -> usize + { self.dim.size() } @@ -78,24 +79,28 @@ where /// /// ***Panics*** if the axis is out of bounds. #[track_caller] - pub fn len_of(&self, axis: Axis) -> usize { + pub fn len_of(&self, axis: Axis) -> usize + { self.dim[axis.index()] } /// Return whether the array has any elements - pub fn is_empty(&self) -> bool { + pub fn is_empty(&self) -> bool + { self.len() == 0 } /// Return the number of dimensions (axes) in the array - pub fn ndim(&self) -> usize { + pub fn ndim(&self) -> usize + { self.dim.ndim() } /// Return the shape of the array in its “pattern” form, /// an integer in the one-dimensional case, tuple in the n-dimensional cases /// and so on. - pub fn dim(&self) -> D::Pattern { + pub fn dim(&self) -> D::Pattern + { self.dim.clone().into_pattern() } @@ -113,11 +118,13 @@ where /// // Create an array of zeros that's the same shape and dimensionality as `a`. /// let b = Array::::zeros(a.raw_dim()); /// ``` - pub fn raw_dim(&self) -> D { + pub fn raw_dim(&self) -> D + { self.dim.clone() } - pub fn raw_strides(&self) -> D { + pub fn raw_strides(&self) -> D + { self.strides.clone() } @@ -145,12 +152,14 @@ where /// let c = Array::zeros(a.raw_dim()); /// assert_eq!(a, c); /// ``` - pub fn shape(&self) -> &[usize] { + pub fn shape(&self) -> &[usize] + { self.dim.slice() } /// Return the strides of the array as a slice. - pub fn strides(&self) -> &[isize] { + pub fn strides(&self) -> &[isize] + { let s = self.strides.slice(); // reinterpret unsigned integer as signed unsafe { slice::from_raw_parts(s.as_ptr() as *const _, s.len()) } @@ -163,21 +172,24 @@ where /// /// ***Panics*** if the axis is out of bounds. #[track_caller] - pub fn stride_of(&self, axis: Axis) -> isize { + pub fn stride_of(&self, axis: Axis) -> isize + { // strides are reinterpreted as isize self.strides[axis.index()] as isize } /// Return a read-only view of the array pub fn view(&self) -> ArrayView<'_, A, D> - where S: Data { + where S: Data + { debug_assert!(self.pointer_is_inbounds()); unsafe { ArrayView::new(self.ptr, self.dim.clone(), self.strides.clone()) } } /// Return a read-write view of the array pub fn view_mut(&mut self) -> ArrayViewMut<'_, A, D> - where S: DataMut { + where S: DataMut + { self.ensure_unique(); unsafe { ArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) } } @@ -190,7 +202,8 @@ where /// The view acts "as if" the elements are temporarily in cells, and elements /// can be changed through shared references using the regular cell methods. pub fn cell_view(&mut self) -> ArrayView<'_, MathCell, D> - where S: DataMut { + where S: DataMut + { self.view_mut().into_cell_view() } @@ -278,14 +291,16 @@ where /// assert_eq!(unique, array![[1., 2.], [3., 4.]]); /// ``` pub fn try_into_owned_nocopy(self) -> Result, Self> - where S: Data { + where S: Data + { S::try_into_owned_nocopy(self) } /// Turn the array into a shared ownership (copy on write) array, /// without any copying. pub fn into_shared(self) -> ArcArray - where S: DataOwned { + where S: DataOwned + { let data = self.data.into_shared(); // safe because: equivalent unmoved data, ptr and dims remain valid unsafe { ArrayBase::from_data_ptr(data, self.ptr).with_strides_dim(self.strides, self.dim) } @@ -307,7 +322,8 @@ where /// assert_eq!(b.first(), None); /// ``` pub fn first(&self) -> Option<&A> - where S: Data { + where S: Data + { if self.is_empty() { None } else { @@ -331,7 +347,8 @@ where /// assert_eq!(b.first_mut(), None); /// ``` pub fn first_mut(&mut self) -> Option<&mut A> - where S: DataMut { + where S: DataMut + { if self.is_empty() { None } else { @@ -355,7 +372,8 @@ where /// assert_eq!(b.last(), None); /// ``` pub fn last(&self) -> Option<&A> - where S: Data { + where S: Data + { if self.is_empty() { None } else { @@ -383,7 +401,8 @@ where /// assert_eq!(b.last_mut(), None); /// ``` pub fn last_mut(&mut self) -> Option<&mut A> - where S: DataMut { + where S: DataMut + { if self.is_empty() { None } else { @@ -402,7 +421,8 @@ where /// /// Iterator element type is `&A`. pub fn iter(&self) -> Iter<'_, A, D> - where S: Data { + where S: Data + { debug_assert!(self.pointer_is_inbounds()); self.view().into_iter_() } @@ -414,7 +434,8 @@ where /// /// Iterator element type is `&mut A`. pub fn iter_mut(&mut self) -> IterMut<'_, A, D> - where S: DataMut { + where S: DataMut + { self.view_mut().into_iter_() } @@ -427,7 +448,8 @@ where /// /// See also [`Zip::indexed`] pub fn indexed_iter(&self) -> IndexedIter<'_, A, D> - where S: Data { + where S: Data + { IndexedIter::new(self.view().into_elements_base()) } @@ -438,7 +460,8 @@ where /// /// Iterator element type is `(D::Pattern, &mut A)`. pub fn indexed_iter_mut(&mut self) -> IndexedIterMut<'_, A, D> - where S: DataMut { + where S: DataMut + { IndexedIterMut::new(self.view_mut().into_elements_base()) } @@ -515,7 +538,8 @@ where /// (**Panics** if `D` is `IxDyn` and `info` does not match the number of array axes.) #[track_caller] pub fn slice_move(mut self, info: I) -> ArrayBase - where I: SliceArg { + where I: SliceArg + { assert_eq!( info.in_ndim(), self.ndim(), @@ -583,7 +607,8 @@ where /// - if `D` is `IxDyn` and `info` does not match the number of array axes #[track_caller] pub fn slice_collapse(&mut self, info: I) - where I: SliceArg { + where I: SliceArg + { assert_eq!( info.in_ndim(), self.ndim(), @@ -612,7 +637,8 @@ where #[track_caller] #[must_use = "slice_axis returns an array view with the sliced result"] pub fn slice_axis(&self, axis: Axis, indices: Slice) -> ArrayView<'_, A, D> - where S: Data { + where S: Data + { let mut view = self.view(); view.slice_axis_inplace(axis, indices); view @@ -625,7 +651,8 @@ where #[track_caller] #[must_use = "slice_axis_mut returns an array view with the sliced result"] pub fn slice_axis_mut(&mut self, axis: Axis, indices: Slice) -> ArrayViewMut<'_, A, D> - where S: DataMut { + where S: DataMut + { let mut view_mut = self.view_mut(); view_mut.slice_axis_inplace(axis, indices); view_mut @@ -636,7 +663,8 @@ where /// **Panics** if an index is out of bounds or step size is zero.
/// **Panics** if `axis` is out of bounds. #[track_caller] - pub fn slice_axis_inplace(&mut self, axis: Axis, indices: Slice) { + pub fn slice_axis_inplace(&mut self, axis: Axis, indices: Slice) + { let offset = do_slice(&mut self.dim.slice_mut()[axis.index()], &mut self.strides.slice_mut()[axis.index()], indices); unsafe { @@ -650,7 +678,8 @@ where /// **Panics** if an index is out of bounds or step size is zero.
/// **Panics** if `axis` is out of bounds. #[must_use = "slice_axis_move returns an array with the sliced result"] - pub fn slice_axis_move(mut self, axis: Axis, indices: Slice) -> Self { + pub fn slice_axis_move(mut self, axis: Axis, indices: Slice) -> Self + { self.slice_axis_inplace(axis, indices); self } @@ -700,7 +729,8 @@ where /// **Panics** if an index is out of bounds or step size is zero. #[track_caller] pub fn slice_each_axis_inplace(&mut self, mut f: F) - where F: FnMut(AxisDescription) -> Slice { + where F: FnMut(AxisDescription) -> Slice + { for ax in 0..self.ndim() { self.slice_axis_inplace( Axis(ax), @@ -753,7 +783,8 @@ where /// assert_eq!(unsafe { *p }, 2.); /// ``` pub fn get_ptr(&self, index: I) -> Option<*const A> - where I: NdIndex { + where I: NdIndex + { let ptr = self.ptr; index .index_checked(&self.dim, &self.strides) @@ -901,7 +932,8 @@ where // `get` for zero-dimensional arrays // panics if dimension is not zero. otherwise an element is always present. fn get_0d(&self) -> &A - where S: Data { + where S: Data + { assert!(self.ndim() == 0); unsafe { &*self.as_ptr() } } @@ -976,7 +1008,8 @@ where /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] pub fn index_axis_move(mut self, axis: Axis, index: usize) -> ArrayBase - where D: RemoveAxis { + where D: RemoveAxis + { self.collapse_axis(axis, index); let dim = self.dim.remove_axis(axis); let strides = self.strides.remove_axis(axis); @@ -988,7 +1021,8 @@ where /// /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] - pub fn collapse_axis(&mut self, axis: Axis, index: usize) { + pub fn collapse_axis(&mut self, axis: Axis, index: usize) + { let offset = dimension::do_collapse_axis(&mut self.dim, &self.strides, axis.index(), index); self.ptr = unsafe { self.ptr.offset(offset) }; debug_assert!(self.pointer_is_inbounds()); @@ -1081,7 +1115,8 @@ where /// } /// ``` pub fn rows(&self) -> Lanes<'_, A, D::Smaller> - where S: Data { + where S: Data + { let mut n = self.ndim(); if n == 0 { n += 1; @@ -1091,7 +1126,8 @@ where #[deprecated(note = "Renamed to .rows()", since = "0.15.0")] pub fn genrows(&self) -> Lanes<'_, A, D::Smaller> - where S: Data { + where S: Data + { self.rows() } @@ -1100,7 +1136,8 @@ where /// /// Iterator element is `ArrayView1
` (1D read-write array view). pub fn rows_mut(&mut self) -> LanesMut<'_, A, D::Smaller> - where S: DataMut { + where S: DataMut + { let mut n = self.ndim(); if n == 0 { n += 1; @@ -1110,7 +1147,8 @@ where #[deprecated(note = "Renamed to .rows_mut()", since = "0.15.0")] pub fn genrows_mut(&mut self) -> LanesMut<'_, A, D::Smaller> - where S: DataMut { + where S: DataMut + { self.rows_mut() } @@ -1141,7 +1179,8 @@ where /// } /// ``` pub fn columns(&self) -> Lanes<'_, A, D::Smaller> - where S: Data { + where S: Data + { Lanes::new(self.view(), Axis(0)) } @@ -1151,7 +1190,8 @@ where /// Renamed to `.columns()` #[deprecated(note = "Renamed to .columns()", since = "0.15.0")] pub fn gencolumns(&self) -> Lanes<'_, A, D::Smaller> - where S: Data { + where S: Data + { self.columns() } @@ -1160,7 +1200,8 @@ where /// /// Iterator element is `ArrayView1` (1D read-write array view). pub fn columns_mut(&mut self) -> LanesMut<'_, A, D::Smaller> - where S: DataMut { + where S: DataMut + { LanesMut::new(self.view_mut(), Axis(0)) } @@ -1170,7 +1211,8 @@ where /// Renamed to `.columns_mut()` #[deprecated(note = "Renamed to .columns_mut()", since = "0.15.0")] pub fn gencolumns_mut(&mut self) -> LanesMut<'_, A, D::Smaller> - where S: DataMut { + where S: DataMut + { self.columns_mut() } @@ -1203,7 +1245,8 @@ where /// assert_eq!(inner2.into_iter().next().unwrap(), aview1(&[0, 1, 2])); /// ``` pub fn lanes(&self, axis: Axis) -> Lanes<'_, A, D::Smaller> - where S: Data { + where S: Data + { Lanes::new(self.view(), axis) } @@ -1212,7 +1255,8 @@ where /// /// Iterator element is `ArrayViewMut1` (1D read-write array view). pub fn lanes_mut(&mut self, axis: Axis) -> LanesMut<'_, A, D::Smaller> - where S: DataMut { + where S: DataMut + { LanesMut::new(self.view_mut(), axis) } @@ -1314,7 +1358,8 @@ where /// ``` #[track_caller] pub fn axis_chunks_iter(&self, axis: Axis, size: usize) -> AxisChunksIter<'_, A, D> - where S: Data { + where S: Data + { AxisChunksIter::new(self.view(), axis, size) } @@ -1326,7 +1371,8 @@ where /// **Panics** if `axis` is out of bounds or if `size` is zero. #[track_caller] pub fn axis_chunks_iter_mut(&mut self, axis: Axis, size: usize) -> AxisChunksIterMut<'_, A, D> - where S: DataMut { + where S: DataMut + { AxisChunksIterMut::new(self.view_mut(), axis, size) } @@ -1481,7 +1527,8 @@ where /// } /// ``` pub fn axis_windows(&self, axis: Axis, window_size: usize) -> Windows<'_, A, D> - where S: Data { + where S: Data + { let axis_index = axis.index(); ndassert!( @@ -1502,7 +1549,8 @@ where } // Return (length, stride) for diagonal - fn diag_params(&self) -> (Ix, Ixs) { + fn diag_params(&self) -> (Ix, Ixs) + { /* empty shape has len 1 */ let len = self.dim.slice().iter().cloned().min().unwrap_or(1); let stride = self.strides().iter().sum(); @@ -1514,18 +1562,21 @@ where /// The diagonal is simply the sequence indexed by *(0, 0, .., 0)*, /// *(1, 1, ..., 1)* etc as long as all axes have elements. pub fn diag(&self) -> ArrayView1<'_, A> - where S: Data { + where S: Data + { self.view().into_diag() } /// Return a read-write view over the diagonal elements of the array. pub fn diag_mut(&mut self) -> ArrayViewMut1<'_, A> - where S: DataMut { + where S: DataMut + { self.view_mut().into_diag() } /// Return the diagonal as a one-dimensional array. - pub fn into_diag(self) -> ArrayBase { + pub fn into_diag(self) -> ArrayBase + { let (len, stride) = self.diag_params(); // safe because new len stride allows access to a subset of the current elements unsafe { self.with_strides_dim(Ix1(stride as Ix), Ix1(len)) } @@ -1537,7 +1588,8 @@ where /// /// This method is mostly only useful with unsafe code. fn try_ensure_unique(&mut self) - where S: RawDataMut { + where S: RawDataMut + { debug_assert!(self.pointer_is_inbounds()); S::try_ensure_unique(self); debug_assert!(self.pointer_is_inbounds()); @@ -1547,7 +1599,8 @@ where /// /// This method is mostly only useful with unsafe code. fn ensure_unique(&mut self) - where S: DataMut { + where S: DataMut + { debug_assert!(self.pointer_is_inbounds()); S::ensure_unique(self); debug_assert!(self.pointer_is_inbounds()); @@ -1558,12 +1611,14 @@ where /// /// Return `false` otherwise, i.e. the array is possibly not /// contiguous in memory, it has custom strides, etc. - pub fn is_standard_layout(&self) -> bool { + pub fn is_standard_layout(&self) -> bool + { dimension::is_layout_c(&self.dim, &self.strides) } /// Return true if the array is known to be contiguous. - pub(crate) fn is_contiguous(&self) -> bool { + pub(crate) fn is_contiguous(&self) -> bool + { D::is_contiguous(&self.dim, &self.strides) } @@ -1619,7 +1674,8 @@ where /// /// where *d* is `self.ndim()`. #[inline(always)] - pub fn as_ptr(&self) -> *const A { + pub fn as_ptr(&self) -> *const A + { self.ptr.as_ptr() as *const A } @@ -1635,14 +1691,16 @@ where /// the data may change the strides. #[inline(always)] pub fn as_mut_ptr(&mut self) -> *mut A - where S: RawDataMut { + where S: RawDataMut + { self.try_ensure_unique(); // for ArcArray self.ptr.as_ptr() } /// Return a raw view of the array. #[inline] - pub fn raw_view(&self) -> RawArrayView { + pub fn raw_view(&self) -> RawArrayView + { unsafe { RawArrayView::new(self.ptr, self.dim.clone(), self.strides.clone()) } } @@ -1652,7 +1710,8 @@ where /// data is guaranteed to be uniquely held on return. #[inline] pub fn raw_view_mut(&mut self) -> RawArrayViewMut - where S: RawDataMut { + where S: RawDataMut + { self.try_ensure_unique(); // for ArcArray unsafe { RawArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) } } @@ -1662,7 +1721,8 @@ where /// Safety: The caller must ensure that the owned array is unshared when this is called #[inline] pub(crate) unsafe fn raw_view_mut_unchecked(&mut self) -> RawArrayViewMut - where S: DataOwned { + where S: DataOwned + { RawArrayViewMut::new(self.ptr, self.dim.clone(), self.strides.clone()) } @@ -1672,7 +1732,8 @@ where /// If this function returns `Some(_)`, then the element order in the slice /// corresponds to the logical order of the array’s elements. pub fn as_slice(&self) -> Option<&[A]> - where S: Data { + where S: Data + { if self.is_standard_layout() { unsafe { Some(slice::from_raw_parts(self.ptr.as_ptr(), self.len())) } } else { @@ -1683,7 +1744,8 @@ where /// Return the array’s data as a slice, if it is contiguous and in standard order. /// Return `None` otherwise. pub fn as_slice_mut(&mut self) -> Option<&mut [A]> - where S: DataMut { + where S: DataMut + { if self.is_standard_layout() { self.ensure_unique(); unsafe { Some(slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len())) } @@ -1698,7 +1760,8 @@ where /// If this function returns `Some(_)`, then the elements in the slice /// have whatever order the elements have in memory. pub fn as_slice_memory_order(&self) -> Option<&[A]> - where S: Data { + where S: Data + { if self.is_contiguous() { let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides); unsafe { Some(slice::from_raw_parts(self.ptr.sub(offset).as_ptr(), self.len())) } @@ -1714,14 +1777,16 @@ where /// method unshares the data if necessary, but it preserves the existing /// strides. pub fn as_slice_memory_order_mut(&mut self) -> Option<&mut [A]> - where S: DataMut { + where S: DataMut + { self.try_as_slice_memory_order_mut().ok() } /// Return the array’s data as a slice if it is contiguous, otherwise /// return `self` in the `Err` variant. pub(crate) fn try_as_slice_memory_order_mut(&mut self) -> Result<&mut [A], &mut Self> - where S: DataMut { + where S: DataMut + { if self.is_contiguous() { self.ensure_unique(); let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides); @@ -1881,13 +1946,15 @@ where /// ); /// ``` pub fn into_shape_with_order(self, shape: E) -> Result, ShapeError> - where E: ShapeArg { + where E: ShapeArg + { let (shape, order) = shape.into_shape_and_order(); self.into_shape_with_order_impl(shape, order.unwrap_or(Order::RowMajor)) } fn into_shape_with_order_impl(self, shape: E, order: Order) -> Result, ShapeError> - where E: Dimension { + where E: Dimension + { let shape = shape.into_dimension(); if size_of_shape_checked(&shape) != Ok(self.dim.size()) { return Err(error::incompatible_shapes(&self.dim, &shape)); @@ -1931,7 +1998,8 @@ where /// ``` #[deprecated = "Use `.into_shape_with_order()` or `.to_shape()`"] pub fn into_shape(self, shape: E) -> Result, ShapeError> - where E: IntoDimension { + where E: IntoDimension + { let shape = shape.into_dimension(); if size_of_shape_checked(&shape) != Ok(self.dim.size()) { return Err(error::incompatible_shapes(&self.dim, &shape)); @@ -2077,7 +2145,8 @@ where /// let array: ArrayD = arr2(&[[1, 2], /// [3, 4]]).into_dyn(); /// ``` - pub fn into_dyn(self) -> ArrayBase { + pub fn into_dyn(self) -> ArrayBase + { // safe because new dims equivalent unsafe { ArrayBase::from_data_ptr(self.data, self.ptr).with_strides_dim(self.strides.into_dyn(), self.dim.into_dyn()) @@ -2101,7 +2170,8 @@ where /// assert!(array.into_dimensionality::().is_ok()); /// ``` pub fn into_dimensionality(self) -> Result, ShapeError> - where D2: Dimension { + where D2: Dimension + { unsafe { if D::NDIM == D2::NDIM { // safe because D == D2 @@ -2163,7 +2233,8 @@ where /// /// **Note:** Cannot be used for mutable iterators, since repeating /// elements would create aliasing pointers. - fn upcast(to: &D, from: &E, stride: &E) -> Option { + fn upcast(to: &D, from: &E, stride: &E) -> Option + { // Make sure the product of non-zero axis lengths does not exceed // `isize::MAX`. This is the only safety check we need to perform // because all the other constraints of `ArrayBase` are guaranteed @@ -2269,7 +2340,8 @@ where /// ); /// ``` #[track_caller] - pub fn swap_axes(&mut self, ax: usize, bx: usize) { + pub fn swap_axes(&mut self, ax: usize, bx: usize) + { self.dim.slice_mut().swap(ax, bx); self.strides.slice_mut().swap(ax, bx); } @@ -2298,7 +2370,8 @@ where /// ``` #[track_caller] pub fn permuted_axes(self, axes: T) -> ArrayBase - where T: IntoDimension { + where T: IntoDimension + { let axes = axes.into_dimension(); // Ensure that each axis is used exactly once. let mut usage_counts = D::zeros(self.ndim()); @@ -2327,7 +2400,8 @@ where /// /// Transposition reverses the order of the axes (dimensions and strides) /// while retaining the same data. - pub fn reversed_axes(mut self) -> ArrayBase { + pub fn reversed_axes(mut self) -> ArrayBase + { self.dim.slice_mut().reverse(); self.strides.slice_mut().reverse(); self @@ -2339,12 +2413,14 @@ where /// /// See also the more general methods `.reversed_axes()` and `.swap_axes()`. pub fn t(&self) -> ArrayView<'_, A, D> - where S: Data { + where S: Data + { self.view().reversed_axes() } /// Return an iterator over the length and stride of each axis. - pub fn axes(&self) -> Axes<'_, D> { + pub fn axes(&self) -> Axes<'_, D> + { axes_of(&self.dim, &self.strides) } @@ -2357,7 +2433,8 @@ where /// Return the axis with the greatest stride (by absolute value), /// preferring axes with len > 1. - pub fn max_stride_axis(&self) -> Axis { + pub fn max_stride_axis(&self) -> Axis + { self.dim.max_stride_axis(&self.strides) } @@ -2365,7 +2442,8 @@ where /// /// ***Panics*** if the axis is out of bounds. #[track_caller] - pub fn invert_axis(&mut self, axis: Axis) { + pub fn invert_axis(&mut self, axis: Axis) + { unsafe { let s = self.strides.axis(axis) as Ixs; let m = self.dim.axis(axis); @@ -2412,7 +2490,8 @@ where /// /// ***Panics*** if an axis is out of bounds. #[track_caller] - pub fn merge_axes(&mut self, take: Axis, into: Axis) -> bool { + pub fn merge_axes(&mut self, take: Axis, into: Axis) -> bool + { merge_axes(&mut self.dim, &mut self.strides, take, into) } @@ -2438,7 +2517,8 @@ where /// /// ***Panics*** if the axis is out of bounds. #[track_caller] - pub fn insert_axis(self, axis: Axis) -> ArrayBase { + pub fn insert_axis(self, axis: Axis) -> ArrayBase + { assert!(axis.index() <= self.ndim()); // safe because a new axis of length one does not affect memory layout unsafe { @@ -2456,11 +2536,13 @@ where /// **Panics** if the axis is out of bounds or its length is zero. #[track_caller] pub fn remove_axis(self, axis: Axis) -> ArrayBase - where D: RemoveAxis { + where D: RemoveAxis + { self.index_axis_move(axis, 0) } - pub(crate) fn pointer_is_inbounds(&self) -> bool { + pub(crate) fn pointer_is_inbounds(&self) -> bool + { self.data._is_pointer_inbounds(self.as_ptr()) } @@ -2903,7 +2985,8 @@ where /// ***Panics*** if `axis` is out of bounds
/// ***Panics*** if not `index < self.len_of(axis)`. pub fn remove_index(&mut self, axis: Axis, index: usize) - where S: DataOwned + DataMut { + where S: DataOwned + DataMut + { assert!(index < self.len_of(axis), "index {} must be less than length of Axis({})", index, axis.index()); let (_, mut tail) = self.view_mut().split_at(axis, index); @@ -2969,7 +3052,8 @@ where }); } - pub fn device(&self) -> Device { + pub fn device(&self) -> Device + { // If a device is returned, use that. Otherwise, it's fairly safe to // assume that the data is on the host. self.data._device().unwrap_or(Device::Host) @@ -2984,7 +3068,8 @@ where /// **Panics** if the size of A and B are different. #[track_caller] #[inline] -unsafe fn unlimited_transmute(data: A) -> B { +unsafe fn unlimited_transmute(data: A) -> B +{ // safe when sizes are equal and caller guarantees that representations are equal assert_eq!(size_of::
(), size_of::()); let old_data = ManuallyDrop::new(data); @@ -3000,7 +3085,8 @@ impl ArrayBase, D> // self.data.device() // } - pub fn move_to_device(self, device: Device) -> Option { + pub fn move_to_device(self, device: Device) -> Option + { let dim = self.dim; let strides = self.strides; let data = self.data.move_to_device(device)?; diff --git a/src/impl_ops.rs b/src/impl_ops.rs index 2e2228f31..8d1798080 100644 --- a/src/impl_ops.rs +++ b/src/impl_ops.rs @@ -397,22 +397,26 @@ impl<'a, S, D> $trt<&'a ArrayBase> for $scalar ); } -mod arithmetic_ops { +mod arithmetic_ops +{ use std::ops::*; use crate::imp_prelude::*; use super::*; - fn clone_opf(f: impl Fn(A, B) -> C) -> impl FnMut(&A, &B) -> C { + fn clone_opf(f: impl Fn(A, B) -> C) -> impl FnMut(&A, &B) -> C + { move |x, y| f(x.clone(), y.clone()) } - fn clone_iopf(f: impl Fn(A, B) -> A) -> impl FnMut(&mut A, &B) { + fn clone_iopf(f: impl Fn(A, B) -> A) -> impl FnMut(&mut A, &B) + { move |x, y| *x = f(x.clone(), y.clone()) } - fn clone_iopf_rev(f: impl Fn(A, B) -> B) -> impl FnMut(&mut B, &A) { + fn clone_iopf_rev(f: impl Fn(A, B) -> B) -> impl FnMut(&mut B, &A) + { move |x, y| *x = f(y.clone(), x.clone()) } @@ -488,7 +492,8 @@ mod arithmetic_ops { { type Output = Self; /// Perform an elementwise negation of `self` and return the result. - fn neg(mut self) -> Self { + fn neg(mut self) -> Self + { self.map_inplace(|elt| { *elt = -elt.clone(); }); @@ -505,7 +510,8 @@ mod arithmetic_ops { type Output = Array; /// Perform an elementwise negation of reference `self` and return the /// result as a new `Array`. - fn neg(self) -> Array { + fn neg(self) -> Array + { self.map(Neg::neg) } } @@ -518,7 +524,8 @@ mod arithmetic_ops { { type Output = Self; /// Perform an elementwise unary not of `self` and return the result. - fn not(mut self) -> Self { + fn not(mut self) -> Self + { self.map_inplace(|elt| { *elt = !elt.clone(); }); @@ -535,13 +542,15 @@ mod arithmetic_ops { type Output = Array; /// Perform an elementwise unary not of reference `self` and return the /// result as a new `Array`. - fn not(self) -> Array { + fn not(self) -> Array + { self.map(Not::not) } } } -mod assign_ops { +mod assign_ops +{ use crate::imp_prelude::*; use super::*; diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs index ebe76fd99..53be9e48c 100644 --- a/src/impl_owned_array.rs +++ b/src/impl_owned_array.rs @@ -17,7 +17,8 @@ use crate::Zip; /// Methods specific to `Array0`. /// /// ***See also all methods for [`ArrayBase`]*** -impl Array { +impl Array +{ /// Returns the single element in the array without cloning it. /// /// ``` @@ -31,7 +32,8 @@ impl Array { /// let scalar: Foo = array.into_scalar(); /// assert_eq!(scalar, Foo); /// ``` - pub fn into_scalar(self) -> A { + pub fn into_scalar(self) -> A + { let size = mem::size_of::(); if size == 0 { // Any index in the `Vec` is fine since all elements are identical. @@ -62,7 +64,8 @@ where D: Dimension /// /// If the array is in standard memory layout, the logical element order /// of the array (`.iter()` order) and of the returned vector will be the same. - pub fn into_raw_vec(self) -> Vec { + pub fn into_raw_vec(self) -> Vec + { self.data.into_vec() } } @@ -70,7 +73,8 @@ where D: Dimension /// Methods specific to `Array2`. /// /// ***See also all methods for [`ArrayBase`]*** -impl Array { +impl Array +{ /// Append a row to an array /// /// The elements from `row` are cloned and added as a new row in the array. @@ -111,7 +115,8 @@ impl Array { /// [-1., -2., -3., -4.]]); /// ``` pub fn push_row(&mut self, row: ArrayView) -> Result<(), ShapeError> - where A: Clone { + where A: Clone + { self.append(Axis(0), row.insert_axis(Axis(0))) } @@ -155,7 +160,8 @@ impl Array { /// [2., -2.]]); /// ``` pub fn push_column(&mut self, column: ArrayView) -> Result<(), ShapeError> - where A: Clone { + where A: Clone + { self.append(Axis(1), column.insert_axis(Axis(1))) } } @@ -197,7 +203,8 @@ where D: Dimension } } - fn move_into_needs_drop(mut self, new_array: ArrayViewMut) { + fn move_into_needs_drop(mut self, new_array: ArrayViewMut) + { // Simple case where `A` has a destructor: just swap values between self and new_array. // Afterwards, `self` drops full of initialized values and dropping works as usual. // This avoids moving out of owned values in `self` while at the same time managing @@ -242,7 +249,8 @@ where D: Dimension self.move_into_impl(new_array.into()) } - fn move_into_impl(mut self, new_array: ArrayViewMut, D>) { + fn move_into_impl(mut self, new_array: ArrayViewMut, D>) + { unsafe { // Safety: copy_to_nonoverlapping cannot panic let guard = AbortIfPanic(&"move_into: moving out of owned value"); @@ -267,7 +275,8 @@ where D: Dimension /// # Safety /// /// This is a panic critical section since `self` is already moved-from. - fn drop_unreachable_elements(mut self) -> OwnedRepr { + fn drop_unreachable_elements(mut self) -> OwnedRepr + { let self_len = self.len(); // "deconstruct" self; the owned repr releases ownership of all elements and we @@ -287,7 +296,8 @@ where D: Dimension #[inline(never)] #[cold] - fn drop_unreachable_elements_slow(mut self) -> OwnedRepr { + fn drop_unreachable_elements_slow(mut self) -> OwnedRepr + { // "deconstruct" self; the owned repr releases ownership of all elements and we // carry on with raw view methods let data_len = self.data.len(); @@ -308,7 +318,8 @@ where D: Dimension /// Create an empty array with an all-zeros shape /// /// ***Panics*** if D is zero-dimensional, because it can't be empty - pub(crate) fn empty() -> Array { + pub(crate) fn empty() -> Array + { assert_ne!(D::NDIM, Some(0)); let ndim = D::NDIM.unwrap_or(1); Array::from_shape_simple_fn(D::zeros(ndim), || unreachable!()) @@ -316,7 +327,8 @@ where D: Dimension /// Create new_array with the right layout for appending to `growing_axis` #[cold] - fn change_to_contig_append_layout(&mut self, growing_axis: Axis) { + fn change_to_contig_append_layout(&mut self, growing_axis: Axis) + { let ndim = self.ndim(); let mut dim = self.raw_dim(); @@ -615,13 +627,16 @@ where D: Dimension // on scope exit (panic or loop finish). This "indirect" way to // write the length is used to help the compiler, the len store to self.data may // otherwise be mistaken to alias with other stores in the loop. - struct SetLenOnDrop<'a, A: 'a> { + struct SetLenOnDrop<'a, A: 'a> + { len: usize, data: &'a mut OwnedRepr, } - impl Drop for SetLenOnDrop<'_, A> { - fn drop(&mut self) { + impl Drop for SetLenOnDrop<'_, A> + { + fn drop(&mut self) + { unsafe { self.data.set_len(self.len); } @@ -663,7 +678,8 @@ where D: Dimension /// This is an internal function for use by move_into and IntoIter only, safety invariants may need /// to be upheld across the calls from those implementations. pub(crate) unsafe fn drop_unreachable_raw(mut self_: RawArrayViewMut, data_ptr: *mut A, data_len: usize) -where D: Dimension { +where D: Dimension +{ let self_len = self_.len(); for i in 0..self_.ndim() { @@ -745,7 +761,8 @@ where } fn sort_axes1_impl(adim: &mut D, astrides: &mut D) -where D: Dimension { +where D: Dimension +{ debug_assert!(adim.ndim() > 1); debug_assert_eq!(adim.ndim(), astrides.ndim()); // bubble sort axes @@ -784,7 +801,8 @@ where } fn sort_axes2_impl(adim: &mut D, astrides: &mut D, bdim: &mut D, bstrides: &mut D) -where D: Dimension { +where D: Dimension +{ debug_assert!(adim.ndim() > 1); debug_assert_eq!(adim.ndim(), bdim.ndim()); // bubble sort axes diff --git a/src/impl_raw_views.rs b/src/impl_raw_views.rs index 237a94898..aeee75cb2 100644 --- a/src/impl_raw_views.rs +++ b/src/impl_raw_views.rs @@ -16,11 +16,13 @@ where D: Dimension /// Unsafe because caller is responsible for ensuring that the array will /// meet all of the invariants of the `ArrayBase` type. #[inline] - pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self { + pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self + { RawArrayView::from_data_ptr(RawViewRepr::new(), ptr).with_strides_dim(strides, dim) } - unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self { + unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self + { Self::new(nonnull_debug_checked_from_ptr(ptr as *mut A), dim, strides) } @@ -65,7 +67,8 @@ where D: Dimension /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *const A) -> Self - where Sh: Into> { + where Sh: Into> + { let shape = shape.into(); let dim = shape.dim; if cfg!(debug_assertions) { @@ -90,7 +93,8 @@ where D: Dimension /// data is valid, ensure that the pointer is aligned, and choose the /// correct lifetime. #[inline] - pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D> { + pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D> + { debug_assert!( is_aligned(self.ptr.as_ptr()), "The pointer must be aligned." @@ -103,7 +107,8 @@ where D: Dimension /// /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] - pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { + pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) + { assert!(index <= self.len_of(axis)); let left_ptr = self.ptr.as_ptr(); let right_ptr = if index == self.len_of(axis) { @@ -137,7 +142,8 @@ where D: Dimension /// casts are safe, access through the produced raw view is only possible /// in an unsafe block or function. #[track_caller] - pub fn cast(self) -> RawArrayView { + pub fn cast(self) -> RawArrayView + { assert_eq!( mem::size_of::(), mem::size_of::(), @@ -153,7 +159,8 @@ where D: Dimension { /// Splits the view into views of the real and imaginary components of the /// elements. - pub fn split_complex(self) -> Complex> { + pub fn split_complex(self) -> Complex> + { // Check that the size and alignment of `Complex` are as expected. // These assertions should always pass, for arbitrary `T`. assert_eq!( @@ -222,11 +229,13 @@ where D: Dimension /// Unsafe because caller is responsible for ensuring that the array will /// meet all of the invariants of the `ArrayBase` type. #[inline] - pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self { + pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self + { RawArrayViewMut::from_data_ptr(RawViewRepr::new(), ptr).with_strides_dim(strides, dim) } - unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self { + unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self + { Self::new(nonnull_debug_checked_from_ptr(ptr), dim, strides) } @@ -271,7 +280,8 @@ where D: Dimension /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *mut A) -> Self - where Sh: Into> { + where Sh: Into> + { let shape = shape.into(); let dim = shape.dim; if cfg!(debug_assertions) { @@ -289,7 +299,8 @@ where D: Dimension /// Converts to a non-mutable `RawArrayView`. #[inline] - pub(crate) fn into_raw_view(self) -> RawArrayView { + pub(crate) fn into_raw_view(self) -> RawArrayView + { unsafe { RawArrayView::new(self.ptr, self.dim, self.strides) } } @@ -302,7 +313,8 @@ where D: Dimension /// data is valid, ensure that the pointer is aligned, and choose the /// correct lifetime. #[inline] - pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D> { + pub unsafe fn deref_into_view<'a>(self) -> ArrayView<'a, A, D> + { debug_assert!( is_aligned(self.ptr.as_ptr()), "The pointer must be aligned." @@ -319,7 +331,8 @@ where D: Dimension /// data is valid, ensure that the pointer is aligned, and choose the /// correct lifetime. #[inline] - pub unsafe fn deref_into_view_mut<'a>(self) -> ArrayViewMut<'a, A, D> { + pub unsafe fn deref_into_view_mut<'a>(self) -> ArrayViewMut<'a, A, D> + { debug_assert!( is_aligned(self.ptr.as_ptr()), "The pointer must be aligned." @@ -332,7 +345,8 @@ where D: Dimension /// /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] - pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { + pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) + { let (left, right) = self.into_raw_view().split_at(axis, index); unsafe { (Self::new(left.ptr, left.dim, left.strides), Self::new(right.ptr, right.dim, right.strides)) } } @@ -348,7 +362,8 @@ where D: Dimension /// casts are safe, access through the produced raw view is only possible /// in an unsafe block or function. #[track_caller] - pub fn cast(self) -> RawArrayViewMut { + pub fn cast(self) -> RawArrayViewMut + { assert_eq!( mem::size_of::(), mem::size_of::(), @@ -364,7 +379,8 @@ where D: Dimension { /// Splits the view into views of the real and imaginary components of the /// elements. - pub fn split_complex(self) -> Complex> { + pub fn split_complex(self) -> Complex> + { let Complex { re, im } = self.into_raw_view().split_complex(); unsafe { Complex { diff --git a/src/impl_special_element_types.rs b/src/impl_special_element_types.rs index 65e878963..e430b20bc 100644 --- a/src/impl_special_element_types.rs +++ b/src/impl_special_element_types.rs @@ -31,7 +31,8 @@ where /// Note that for owned and shared ownership arrays, the promise must include all of the /// array's storage; it is for example possible to slice these in place, but that must /// only be done after all elements have been initialized. - pub unsafe fn assume_init(self) -> ArrayBase<>::Output, D> { + pub unsafe fn assume_init(self) -> ArrayBase<>::Output, D> + { let ArrayBase { data, ptr, diff --git a/src/impl_views/constructors.rs b/src/impl_views/constructors.rs index 136ba0ece..33c7b15be 100644 --- a/src/impl_views/constructors.rs +++ b/src/impl_views/constructors.rs @@ -45,12 +45,14 @@ where D: Dimension /// assert!(a.strides() == &[1, 4, 2]); /// ``` pub fn from_shape(shape: Sh, xs: &'a [A]) -> Result - where Sh: Into> { + where Sh: Into> + { // eliminate the type parameter Sh as soon as possible Self::from_shape_impl(shape.into(), xs) } - fn from_shape_impl(shape: StrideShape, xs: &'a [A]) -> Result { + fn from_shape_impl(shape: StrideShape, xs: &'a [A]) -> Result + { let dim = shape.dim; dimension::can_index_slice_with_strides(xs, &dim, &shape.strides)?; let strides = shape.strides.strides_for_dim(&dim); @@ -109,7 +111,8 @@ where D: Dimension /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *const A) -> Self - where Sh: Into> { + where Sh: Into> + { RawArrayView::from_shape_ptr(shape, ptr).deref_into_view() } } @@ -144,12 +147,14 @@ where D: Dimension /// assert!(a.strides() == &[1, 4, 2]); /// ``` pub fn from_shape(shape: Sh, xs: &'a mut [A]) -> Result - where Sh: Into> { + where Sh: Into> + { // eliminate the type parameter Sh as soon as possible Self::from_shape_impl(shape.into(), xs) } - fn from_shape_impl(shape: StrideShape, xs: &'a mut [A]) -> Result { + fn from_shape_impl(shape: StrideShape, xs: &'a mut [A]) -> Result + { let dim = shape.dim; dimension::can_index_slice_with_strides(xs, &dim, &shape.strides)?; let strides = shape.strides.strides_for_dim(&dim); @@ -208,14 +213,16 @@ where D: Dimension /// /// [`.offset()`]: https://doc.rust-lang.org/stable/std/primitive.pointer.html#method.offset pub unsafe fn from_shape_ptr(shape: Sh, ptr: *mut A) -> Self - where Sh: Into> { + where Sh: Into> + { RawArrayViewMut::from_shape_ptr(shape, ptr).deref_into_view_mut() } /// Convert the view into an `ArrayViewMut<'b, A, D>` where `'b` is a lifetime /// outlived by `'a'`. pub fn reborrow<'b>(self) -> ArrayViewMut<'b, A, D> - where 'a: 'b { + where 'a: 'b + { unsafe { ArrayViewMut::new(self.ptr, self.dim, self.strides) } } } @@ -228,7 +235,8 @@ where D: Dimension /// /// Unsafe because: `ptr` must be valid for the given dimension and strides. #[inline(always)] - pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self { + pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self + { if cfg!(debug_assertions) { assert!(is_aligned(ptr.as_ptr()), "The pointer must be aligned."); dimension::max_abs_offset_check_overflow::(&dim, &strides).unwrap(); @@ -238,7 +246,8 @@ where D: Dimension /// Unsafe because: `ptr` must be valid for the given dimension and strides. #[inline] - pub(crate) unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self { + pub(crate) unsafe fn new_(ptr: *const A, dim: D, strides: D) -> Self + { Self::new(nonnull_debug_checked_from_ptr(ptr as *mut A), dim, strides) } } @@ -250,7 +259,8 @@ where D: Dimension /// /// Unsafe because: `ptr` must be valid for the given dimension and strides. #[inline(always)] - pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self { + pub(crate) unsafe fn new(ptr: NonNull, dim: D, strides: D) -> Self + { if cfg!(debug_assertions) { assert!(is_aligned(ptr.as_ptr()), "The pointer must be aligned."); dimension::max_abs_offset_check_overflow::(&dim, &strides).unwrap(); @@ -262,7 +272,8 @@ where D: Dimension /// /// Unsafe because: `ptr` must be valid for the given dimension and strides. #[inline(always)] - pub(crate) unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self { + pub(crate) unsafe fn new_(ptr: *mut A, dim: D, strides: D) -> Self + { Self::new(nonnull_debug_checked_from_ptr(ptr), dim, strides) } } diff --git a/src/impl_views/conversions.rs b/src/impl_views/conversions.rs index a364f3e70..f545ebdd0 100644 --- a/src/impl_views/conversions.rs +++ b/src/impl_views/conversions.rs @@ -26,7 +26,8 @@ where D: Dimension /// Convert the view into an `ArrayView<'b, A, D>` where `'b` is a lifetime /// outlived by `'a'`. pub fn reborrow<'b>(self) -> ArrayView<'b, A, D> - where 'a: 'b { + where 'a: 'b + { unsafe { ArrayView::new(self.ptr, self.dim, self.strides) } } @@ -35,7 +36,8 @@ where D: Dimension /// /// Note that while the method is similar to [`ArrayBase::as_slice()`], this method transfers /// the view's lifetime to the slice, so it is a bit more powerful. - pub fn to_slice(&self) -> Option<&'a [A]> { + pub fn to_slice(&self) -> Option<&'a [A]> + { if self.is_standard_layout() { unsafe { Some(slice::from_raw_parts(self.ptr.as_ptr(), self.len())) } } else { @@ -49,7 +51,8 @@ where D: Dimension /// Note that while the method is similar to /// [`ArrayBase::as_slice_memory_order()`], this method transfers the view's /// lifetime to the slice, so it is a bit more powerful. - pub fn to_slice_memory_order(&self) -> Option<&'a [A]> { + pub fn to_slice_memory_order(&self) -> Option<&'a [A]> + { if self.is_contiguous() { let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides); unsafe { Some(slice::from_raw_parts(self.ptr.sub(offset).as_ptr(), self.len())) } @@ -59,7 +62,8 @@ where D: Dimension } /// Converts to a raw array view. - pub(crate) fn into_raw_view(self) -> RawArrayView { + pub(crate) fn into_raw_view(self) -> RawArrayView + { unsafe { RawArrayView::new(self.ptr, self.dim, self.strides) } } } @@ -67,7 +71,8 @@ where D: Dimension /// Methods specific to `ArrayView0`. /// /// ***See also all methods for [`ArrayView`] and [`ArrayBase`]*** -impl<'a, A> ArrayView<'a, A, Ix0> { +impl<'a, A> ArrayView<'a, A, Ix0> +{ /// Consume the view and return a reference to the single element in the array. /// /// The lifetime of the returned reference matches the lifetime of the data @@ -85,7 +90,8 @@ impl<'a, A> ArrayView<'a, A, Ix0> { /// let scalar: &Foo = view.into_scalar(); /// assert_eq!(scalar, &Foo); /// ``` - pub fn into_scalar(self) -> &'a A { + pub fn into_scalar(self) -> &'a A + { self.index(Ix0()) } } @@ -93,7 +99,8 @@ impl<'a, A> ArrayView<'a, A, Ix0> { /// Methods specific to `ArrayViewMut0`. /// /// ***See also all methods for [`ArrayViewMut`] and [`ArrayBase`]*** -impl<'a, A> ArrayViewMut<'a, A, Ix0> { +impl<'a, A> ArrayViewMut<'a, A, Ix0> +{ /// Consume the mutable view and return a mutable reference to the single element in the array. /// /// The lifetime of the returned reference matches the lifetime of the data @@ -109,7 +116,8 @@ impl<'a, A> ArrayViewMut<'a, A, Ix0> { /// assert_eq!(scalar, &7.); /// assert_eq!(array[()], 7.); /// ``` - pub fn into_scalar(self) -> &'a mut A { + pub fn into_scalar(self) -> &'a mut A + { self.index(Ix0()) } } @@ -123,7 +131,8 @@ where D: Dimension /// /// Note that while this is similar to [`ArrayBase::as_slice_mut()`], this method transfers the /// view's lifetime to the slice. - pub fn into_slice(self) -> Option<&'a mut [A]> { + pub fn into_slice(self) -> Option<&'a mut [A]> + { self.try_into_slice().ok() } @@ -133,7 +142,8 @@ where D: Dimension /// Note that while this is similar to /// [`ArrayBase::as_slice_memory_order_mut()`], this method transfers the /// view's lifetime to the slice. - pub fn into_slice_memory_order(self) -> Option<&'a mut [A]> { + pub fn into_slice_memory_order(self) -> Option<&'a mut [A]> + { self.try_into_slice_memory_order().ok() } @@ -143,7 +153,8 @@ where D: Dimension /// /// The view acts "as if" the elements are temporarily in cells, and elements /// can be changed through shared references using the regular cell methods. - pub fn into_cell_view(self) -> ArrayView<'a, MathCell, D> { + pub fn into_cell_view(self) -> ArrayView<'a, MathCell, D> + { // safety: valid because // A and MathCell have the same representation // &'a mut T is interchangeable with &'a Cell -- see method Cell::from_mut in std @@ -167,7 +178,8 @@ where D: Dimension /// This method allows writing uninitialized data into the view, which could leave any /// original array that we borrow from in an inconsistent state. This is not allowed /// when using the resulting array view. - pub(crate) unsafe fn into_maybe_uninit(self) -> ArrayViewMut<'a, MaybeUninit, D> { + pub(crate) unsafe fn into_maybe_uninit(self) -> ArrayViewMut<'a, MaybeUninit, D> + { // Safe because: A and MaybeUninit have the same representation; // and we can go from initialized to (maybe) not unconditionally in terms of // representation. However, the user must be careful to not write uninit elements @@ -183,7 +195,8 @@ impl RawArrayView where D: Dimension { #[inline] - pub(crate) fn into_base_iter(self) -> Baseiter { + pub(crate) fn into_base_iter(self) -> Baseiter + { unsafe { Baseiter::new(self.ptr.as_ptr(), self.dim, self.strides) } } } @@ -192,7 +205,8 @@ impl RawArrayViewMut where D: Dimension { #[inline] - pub(crate) fn into_base_iter(self) -> Baseiter { + pub(crate) fn into_base_iter(self) -> Baseiter + { unsafe { Baseiter::new(self.ptr.as_ptr(), self.dim, self.strides) } } } @@ -202,16 +216,19 @@ impl<'a, A, D> ArrayView<'a, A, D> where D: Dimension { #[inline] - pub(crate) fn into_base_iter(self) -> Baseiter { + pub(crate) fn into_base_iter(self) -> Baseiter + { unsafe { Baseiter::new(self.ptr.as_ptr(), self.dim, self.strides) } } #[inline] - pub(crate) fn into_elements_base(self) -> ElementsBase<'a, A, D> { + pub(crate) fn into_elements_base(self) -> ElementsBase<'a, A, D> + { ElementsBase::new(self) } - pub(crate) fn into_iter_(self) -> Iter<'a, A, D> { + pub(crate) fn into_iter_(self) -> Iter<'a, A, D> + { Iter::new(self) } @@ -219,7 +236,8 @@ where D: Dimension #[doc(hidden)] // not official #[deprecated(note = "This method will be replaced.")] pub fn into_outer_iter(self) -> iter::AxisIter<'a, A, D::Smaller> - where D: RemoveAxis { + where D: RemoveAxis + { AxisIter::new(self, Axis(0)) } } @@ -228,28 +246,33 @@ impl<'a, A, D> ArrayViewMut<'a, A, D> where D: Dimension { // Convert into a read-only view - pub(crate) fn into_view(self) -> ArrayView<'a, A, D> { + pub(crate) fn into_view(self) -> ArrayView<'a, A, D> + { unsafe { ArrayView::new(self.ptr, self.dim, self.strides) } } /// Converts to a mutable raw array view. - pub(crate) fn into_raw_view_mut(self) -> RawArrayViewMut { + pub(crate) fn into_raw_view_mut(self) -> RawArrayViewMut + { unsafe { RawArrayViewMut::new(self.ptr, self.dim, self.strides) } } #[inline] - pub(crate) fn into_base_iter(self) -> Baseiter { + pub(crate) fn into_base_iter(self) -> Baseiter + { unsafe { Baseiter::new(self.ptr.as_ptr(), self.dim, self.strides) } } #[inline] - pub(crate) fn into_elements_base(self) -> ElementsBaseMut<'a, A, D> { + pub(crate) fn into_elements_base(self) -> ElementsBaseMut<'a, A, D> + { ElementsBaseMut::new(self) } /// Return the array’s data as a slice, if it is contiguous and in standard order. /// Otherwise return self in the Err branch of the result. - pub(crate) fn try_into_slice(self) -> Result<&'a mut [A], Self> { + pub(crate) fn try_into_slice(self) -> Result<&'a mut [A], Self> + { if self.is_standard_layout() { unsafe { Ok(slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len())) } } else { @@ -259,7 +282,8 @@ where D: Dimension /// Return the array’s data as a slice, if it is contiguous. /// Otherwise return self in the Err branch of the result. - fn try_into_slice_memory_order(self) -> Result<&'a mut [A], Self> { + fn try_into_slice_memory_order(self) -> Result<&'a mut [A], Self> + { if self.is_contiguous() { let offset = offset_from_low_addr_ptr_to_logical_ptr(&self.dim, &self.strides); unsafe { Ok(slice::from_raw_parts_mut(self.ptr.sub(offset).as_ptr(), self.len())) } @@ -268,7 +292,8 @@ where D: Dimension } } - pub(crate) fn into_iter_(self) -> IterMut<'a, A, D> { + pub(crate) fn into_iter_(self) -> IterMut<'a, A, D> + { IterMut::new(self) } @@ -276,7 +301,8 @@ where D: Dimension #[doc(hidden)] // not official #[deprecated(note = "This method will be replaced.")] pub fn into_outer_iter(self) -> iter::AxisIterMut<'a, A, D::Smaller> - where D: RemoveAxis { + where D: RemoveAxis + { AxisIterMut::new(self, Axis(0)) } } diff --git a/src/impl_views/indexing.rs b/src/impl_views/indexing.rs index 3494b91db..2b72c2142 100644 --- a/src/impl_views/indexing.rs +++ b/src/impl_views/indexing.rs @@ -46,7 +46,8 @@ use crate::NdIndex; /// assert_eq!(long_life_ref, &0.); /// /// ``` -pub trait IndexLonger { +pub trait IndexLonger +{ /// The type of the reference to the element that is produced, including /// its lifetime. type Output; @@ -119,12 +120,14 @@ where /// /// **Panics** if index is out of bounds. #[track_caller] - fn index(self, index: I) -> &'a A { + fn index(self, index: I) -> &'a A + { debug_bounds_check!(self, index); unsafe { &*self.get_ptr(index).unwrap_or_else(|| array_out_of_bounds()) } } - fn get(self, index: I) -> Option<&'a A> { + fn get(self, index: I) -> Option<&'a A> + { unsafe { self.get_ptr(index).map(|ptr| &*ptr) } } @@ -139,7 +142,8 @@ where /// [1]: ArrayBase::uget /// /// **Note:** only unchecked for non-debug builds of ndarray. - unsafe fn uget(self, index: I) -> &'a A { + unsafe fn uget(self, index: I) -> &'a A + { debug_bounds_check!(self, index); &*self.as_ptr().offset(index.index_unchecked(&self.strides)) } @@ -165,7 +169,8 @@ where /// /// **Panics** if index is out of bounds. #[track_caller] - fn index(mut self, index: I) -> &'a mut A { + fn index(mut self, index: I) -> &'a mut A + { debug_bounds_check!(self, index); unsafe { match self.get_mut_ptr(index) { @@ -183,7 +188,8 @@ where /// /// [1]: ArrayBase::get_mut /// - fn get(mut self, index: I) -> Option<&'a mut A> { + fn get(mut self, index: I) -> Option<&'a mut A> + { debug_bounds_check!(self, index); unsafe { match self.get_mut_ptr(index) { @@ -202,7 +208,8 @@ where /// [1]: ArrayBase::uget_mut /// /// **Note:** only unchecked for non-debug builds of ndarray. - unsafe fn uget(mut self, index: I) -> &'a mut A { + unsafe fn uget(mut self, index: I) -> &'a mut A + { debug_bounds_check!(self, index); &mut *self .as_mut_ptr() diff --git a/src/impl_views/splitting.rs b/src/impl_views/splitting.rs index f2f4e8f82..e26900984 100644 --- a/src/impl_views/splitting.rs +++ b/src/impl_views/splitting.rs @@ -88,7 +88,8 @@ where D: Dimension /// along Axis(1) /// ``` #[track_caller] - pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { + pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) + { unsafe { let (left, right) = self.into_raw_view().split_at(axis, index); (left.deref_into_view(), right.deref_into_view()) @@ -115,7 +116,8 @@ where D: Dimension /// assert_eq!(re, array![[1., 3.], [5., 7.], [9., 11.]]); /// assert_eq!(im, array![[2., 4.], [6., 8.], [10., 12.]]); /// ``` - pub fn split_complex(self) -> Complex> { + pub fn split_complex(self) -> Complex> + { unsafe { let Complex { re, im } = self.into_raw_view().split_complex(); Complex { @@ -135,7 +137,8 @@ where D: Dimension /// /// **Panics** if `axis` or `index` is out of bounds. #[track_caller] - pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { + pub fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) + { unsafe { let (left, right) = self.into_raw_view_mut().split_at(axis, index); (left.deref_into_view_mut(), right.deref_into_view_mut()) @@ -161,7 +164,8 @@ where D: Dimension /// * if `D` is `IxDyn` and `info` does not match the number of array axes #[track_caller] pub fn multi_slice_move(self, info: M) -> M::Output - where M: MultiSliceArg<'a, A, D> { + where M: MultiSliceArg<'a, A, D> + { info.multi_slice_move(self) } } @@ -192,7 +196,8 @@ where D: Dimension /// assert_eq!(arr[[0, 1]], Complex64::new(13., 4.)); /// assert_eq!(arr[[2, 0]], Complex64::new(9., 14.)); /// ``` - pub fn split_complex(self) -> Complex> { + pub fn split_complex(self) -> Complex> + { unsafe { let Complex { re, im } = self.into_raw_view_mut().split_complex(); Complex { diff --git a/src/indexes.rs b/src/indexes.rs index 368303840..0fa2b50fb 100644 --- a/src/indexes.rs +++ b/src/indexes.rs @@ -18,7 +18,8 @@ use crate::{ArrayBase, Data}; /// /// Iterator element type is `D`. #[derive(Clone)] -pub struct IndicesIter { +pub struct IndicesIter +{ dim: D, index: Option, } @@ -28,7 +29,8 @@ pub struct IndicesIter { /// *Note:* prefer higher order methods, arithmetic operations and /// non-indexed iteration before using indices. pub fn indices(shape: E) -> Indices -where E: IntoDimension { +where E: IntoDimension +{ let dim = shape.into_dimension(); Indices { start: E::Dim::zeros(dim.ndim()), @@ -53,7 +55,8 @@ where D: Dimension { type Item = D::Pattern; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { let index = match self.index { None => return None, Some(ref ix) => ix.clone(), @@ -62,7 +65,8 @@ where D: Dimension Some(index.into_pattern()) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let l = match self.index { None => 0, Some(ref ix) => { @@ -80,7 +84,8 @@ where D: Dimension } fn fold(self, init: B, mut f: F) -> B - where F: FnMut(B, D::Pattern) -> B { + where F: FnMut(B, D::Pattern) -> B + { let IndicesIter { mut index, dim } = self; let ndim = dim.ndim(); if ndim == 0 { @@ -111,7 +116,8 @@ where D: Dimension { type Item = D::Pattern; type IntoIter = IndicesIter; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { let sz = self.dim.size(); let index = if sz != 0 { Some(self.start) } else { None }; IndicesIter { index, dim: self.dim } @@ -130,7 +136,8 @@ where D: Dimension } #[derive(Copy, Clone, Debug)] -pub struct IndexPtr { +pub struct IndexPtr +{ index: D, } @@ -140,7 +147,8 @@ where D: Dimension + Copy // stride: The axis to increment type Stride = usize; - unsafe fn stride_offset(mut self, stride: Self::Stride, index: usize) -> Self { + unsafe fn stride_offset(mut self, stride: Self::Stride, index: usize) -> Self + { self.index[stride] += index; self } @@ -161,7 +169,8 @@ where D: Dimension + Copy // [0, 0, 0].stride_offset(1, 10) => [0, 10, 0] axis 1 is incremented by 10. // // .as_ref() converts the Ptr value to an Item. For example [0, 10, 0] => (0, 10, 0) -impl NdProducer for Indices { +impl NdProducer for Indices +{ type Item = D::Pattern; type Dim = D; type Ptr = IndexPtr; @@ -169,19 +178,23 @@ impl NdProducer for Indices { private_impl! {} - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { self.dim } - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.dim.equal(dim) } - fn as_ptr(&self) -> Self::Ptr { + fn as_ptr(&self) -> Self::Ptr + { IndexPtr { index: self.start } } - fn layout(&self) -> Layout { + fn layout(&self) -> Layout + { if self.dim.ndim() <= 1 { Layout::one_dimensional() } else { @@ -189,26 +202,31 @@ impl NdProducer for Indices { } } - unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { + unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item + { ptr.index.into_pattern() } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr + { let mut index = *i; index += &self.start; IndexPtr { index } } - fn stride_of(&self, axis: Axis) -> Self::Stride { + fn stride_of(&self, axis: Axis) -> Self::Stride + { axis.index() } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { + fn contiguous_stride(&self) -> Self::Stride + { 0 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { let start_a = self.start; let mut start_b = start_a; let (a, b) = self.dim.split_at(axis, index); @@ -221,14 +239,16 @@ impl NdProducer for Indices { /// /// Iterator element type is `D`. #[derive(Clone)] -pub struct IndicesIterF { +pub struct IndicesIterF +{ dim: D, index: D, has_remaining: bool, } pub fn indices_iter_f(shape: E) -> IndicesIterF -where E: IntoDimension { +where E: IntoDimension +{ let dim = shape.into_dimension(); let zero = E::Dim::zeros(dim.ndim()); IndicesIterF { @@ -243,7 +263,8 @@ where D: Dimension { type Item = D::Pattern; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { if !self.has_remaining { None } else { @@ -253,7 +274,8 @@ where D: Dimension } } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { if !self.has_remaining { return (0, Some(0)); } @@ -272,12 +294,14 @@ where D: Dimension impl ExactSizeIterator for IndicesIterF where D: Dimension {} #[cfg(test)] -mod tests { +mod tests +{ use super::indices; use super::indices_iter_f; #[test] - fn test_indices_iter_c_size_hint() { + fn test_indices_iter_c_size_hint() + { let dim = (3, 4); let mut it = indices(dim).into_iter(); let mut len = dim.0 * dim.1; @@ -290,7 +314,8 @@ mod tests { } #[test] - fn test_indices_iter_c_fold() { + fn test_indices_iter_c_fold() + { macro_rules! run_test { ($dim:expr) => { for num_consume in 0..3 { @@ -318,7 +343,8 @@ mod tests { } #[test] - fn test_indices_iter_f_size_hint() { + fn test_indices_iter_f_size_hint() + { let dim = (3, 4); let mut it = indices_iter_f(dim); let mut len = dim.0 * dim.1; diff --git a/src/iterators/chunks.rs b/src/iterators/chunks.rs index 2be5092f1..465428968 100644 --- a/src/iterators/chunks.rs +++ b/src/iterators/chunks.rs @@ -30,19 +30,22 @@ impl_ndproducer! { /// See [`.exact_chunks()`](ArrayBase::exact_chunks) for more /// information. //#[derive(Debug)] -pub struct ExactChunks<'a, A, D> { +pub struct ExactChunks<'a, A, D> +{ base: RawArrayView, life: PhantomData<&'a A>, chunk: D, inner_strides: D, } -impl<'a, A, D: Dimension> ExactChunks<'a, A, D> { +impl<'a, A, D: Dimension> ExactChunks<'a, A, D> +{ /// Creates a new exact chunks producer. /// /// **Panics** if any chunk dimension is zero pub(crate) fn new(a: ArrayView<'a, A, D>, chunk: E) -> Self - where E: IntoDimension { + where E: IntoDimension + { let mut a = a.into_raw_view(); let chunk = chunk.into_dimension(); ndassert!( @@ -77,7 +80,8 @@ where { type Item = ::Item; type IntoIter = ExactChunksIter<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { ExactChunksIter { iter: self.base.into_base_iter(), life: self.life, @@ -91,7 +95,8 @@ where /// /// See [`.exact_chunks()`](ArrayBase::exact_chunks) for more /// information. -pub struct ExactChunksIter<'a, A, D> { +pub struct ExactChunksIter<'a, A, D> +{ iter: Baseiter, life: PhantomData<&'a A>, chunk: D, @@ -124,19 +129,22 @@ impl_ndproducer! { /// See [`.exact_chunks_mut()`](ArrayBase::exact_chunks_mut) /// for more information. //#[derive(Debug)] -pub struct ExactChunksMut<'a, A, D> { +pub struct ExactChunksMut<'a, A, D> +{ base: RawArrayViewMut, life: PhantomData<&'a mut A>, chunk: D, inner_strides: D, } -impl<'a, A, D: Dimension> ExactChunksMut<'a, A, D> { +impl<'a, A, D: Dimension> ExactChunksMut<'a, A, D> +{ /// Creates a new exact chunks producer. /// /// **Panics** if any chunk dimension is zero pub(crate) fn new(a: ArrayViewMut<'a, A, D>, chunk: E) -> Self - where E: IntoDimension { + where E: IntoDimension + { let mut a = a.into_raw_view_mut(); let chunk = chunk.into_dimension(); ndassert!( @@ -171,7 +179,8 @@ where { type Item = ::Item; type IntoIter = ExactChunksIterMut<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { ExactChunksIterMut { iter: self.base.into_base_iter(), life: self.life, @@ -230,7 +239,8 @@ impl_iterator! { /// /// See [`.exact_chunks_mut()`](ArrayBase::exact_chunks_mut) /// for more information. -pub struct ExactChunksIterMut<'a, A, D> { +pub struct ExactChunksIterMut<'a, A, D> +{ iter: Baseiter, life: PhantomData<&'a mut A>, chunk: D, diff --git a/src/iterators/into_iter.rs b/src/iterators/into_iter.rs index a07c8042d..fcc2e4b8c 100644 --- a/src/iterators/into_iter.rs +++ b/src/iterators/into_iter.rs @@ -33,7 +33,8 @@ impl IntoIter where D: Dimension { /// Create a new by-value iterator that consumes `array` - pub(crate) fn new(mut array: Array) -> Self { + pub(crate) fn new(mut array: Array) -> Self + { unsafe { let array_head_ptr = array.ptr; let ptr = array.as_mut_ptr(); @@ -54,21 +55,26 @@ where D: Dimension } } -impl Iterator for IntoIter { +impl Iterator for IntoIter +{ type Item = A; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { self.inner.next().map(|p| unsafe { p.read() }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.inner.size_hint() } } -impl ExactSizeIterator for IntoIter { - fn len(&self) -> usize { +impl ExactSizeIterator for IntoIter +{ + fn len(&self) -> usize + { self.inner.len() } } @@ -76,7 +82,8 @@ impl ExactSizeIterator for IntoIter { impl Drop for IntoIter where D: Dimension { - fn drop(&mut self) { + fn drop(&mut self) + { if !self.has_unreachable_elements || mem::size_of::() == 0 || !mem::needs_drop::() { return; } @@ -100,7 +107,8 @@ where D: Dimension type Item = A; type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { IntoIter::new(self) } } @@ -113,7 +121,8 @@ where type Item = A; type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { IntoIter::new(self.into_owned()) } } @@ -126,7 +135,8 @@ where type Item = A; type IntoIter = IntoIter; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { IntoIter::new(self.into_owned()) } } diff --git a/src/iterators/lanes.rs b/src/iterators/lanes.rs index 0eefa05c4..11c83d002 100644 --- a/src/iterators/lanes.rs +++ b/src/iterators/lanes.rs @@ -25,15 +25,18 @@ impl_ndproducer! { /// See [`.lanes()`](ArrayBase::lanes) /// for more information. -pub struct Lanes<'a, A, D> { +pub struct Lanes<'a, A, D> +{ base: ArrayView<'a, A, D>, inner_len: Ix, inner_stride: Ixs, } -impl<'a, A, D: Dimension> Lanes<'a, A, D> { +impl<'a, A, D: Dimension> Lanes<'a, A, D> +{ pub(crate) fn new(v: ArrayView<'a, A, Di>, axis: Axis) -> Self - where Di: Dimension { + where Di: Dimension + { let ndim = v.ndim(); let len; let stride; @@ -78,7 +81,8 @@ where D: Dimension { type Item = ::Item; type IntoIter = LanesIter<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { LanesIter { iter: self.base.into_base_iter(), inner_len: self.inner_len, @@ -90,15 +94,18 @@ where D: Dimension /// See [`.lanes_mut()`](ArrayBase::lanes_mut) /// for more information. -pub struct LanesMut<'a, A, D> { +pub struct LanesMut<'a, A, D> +{ base: ArrayViewMut<'a, A, D>, inner_len: Ix, inner_stride: Ixs, } -impl<'a, A, D: Dimension> LanesMut<'a, A, D> { +impl<'a, A, D: Dimension> LanesMut<'a, A, D> +{ pub(crate) fn new(v: ArrayViewMut<'a, A, Di>, axis: Axis) -> Self - where Di: Dimension { + where Di: Dimension + { let ndim = v.ndim(); let len; let stride; @@ -125,7 +132,8 @@ where D: Dimension { type Item = ::Item; type IntoIter = LanesIterMut<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { LanesIterMut { iter: self.base.into_base_iter(), inner_len: self.inner_len, diff --git a/src/iterators/mod.rs b/src/iterators/mod.rs index 9dedddd79..4851b2827 100644 --- a/src/iterators/mod.rs +++ b/src/iterators/mod.rs @@ -36,19 +36,22 @@ use std::slice::{self, Iter as SliceIter, IterMut as SliceIterMut}; /// /// Iterator element type is `*mut A`. #[derive(Debug)] -pub struct Baseiter { +pub struct Baseiter +{ ptr: *mut A, dim: D, strides: D, index: Option, } -impl Baseiter { +impl Baseiter +{ /// Creating a Baseiter is unsafe because shape and stride parameters need /// to be correct to avoid performing an unsafe pointer offset while /// iterating. #[inline] - pub unsafe fn new(ptr: *mut A, len: D, stride: D) -> Baseiter { + pub unsafe fn new(ptr: *mut A, len: D, stride: D) -> Baseiter + { Baseiter { ptr, index: len.first_index(), @@ -58,11 +61,13 @@ impl Baseiter { } } -impl Iterator for Baseiter { +impl Iterator for Baseiter +{ type Item = *mut A; #[inline] - fn next(&mut self) -> Option<*mut A> { + fn next(&mut self) -> Option<*mut A> + { let index = match self.index { None => return None, Some(ref ix) => ix.clone(), @@ -72,13 +77,15 @@ impl Iterator for Baseiter { unsafe { Some(self.ptr.offset(offset)) } } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let len = self.len(); (len, Some(len)) } fn fold(mut self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, *mut A) -> Acc { + where G: FnMut(Acc, *mut A) -> Acc + { let ndim = self.dim.ndim(); debug_assert_ne!(ndim, 0); let mut accum = init; @@ -103,8 +110,10 @@ impl Iterator for Baseiter { } } -impl ExactSizeIterator for Baseiter { - fn len(&self) -> usize { +impl ExactSizeIterator for Baseiter +{ + fn len(&self) -> usize + { match self.index { None => 0, Some(ref ix) => { @@ -121,9 +130,11 @@ impl ExactSizeIterator for Baseiter { } } -impl DoubleEndedIterator for Baseiter { +impl DoubleEndedIterator for Baseiter +{ #[inline] - fn next_back(&mut self) -> Option<*mut A> { + fn next_back(&mut self) -> Option<*mut A> + { let index = match self.index { None => return None, Some(ix) => ix, @@ -137,7 +148,8 @@ impl DoubleEndedIterator for Baseiter { unsafe { Some(self.ptr.offset(offset)) } } - fn nth_back(&mut self, n: usize) -> Option<*mut A> { + fn nth_back(&mut self, n: usize) -> Option<*mut A> + { let index = self.index?; let len = self.dim[0] - index[0]; if n < len { @@ -154,7 +166,8 @@ impl DoubleEndedIterator for Baseiter { } fn rfold(mut self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, *mut A) -> Acc { + where G: FnMut(Acc, *mut A) -> Acc + { let mut accum = init; if let Some(index) = self.index { let elem_index = index[0]; @@ -196,8 +209,10 @@ clone_bounds!( } ); -impl<'a, A, D: Dimension> ElementsBase<'a, A, D> { - pub fn new(v: ArrayView<'a, A, D>) -> Self { +impl<'a, A, D: Dimension> ElementsBase<'a, A, D> +{ + pub fn new(v: ArrayView<'a, A, D>) -> Self + { ElementsBase { inner: v.into_base_iter(), life: PhantomData, @@ -205,31 +220,38 @@ impl<'a, A, D: Dimension> ElementsBase<'a, A, D> { } } -impl<'a, A, D: Dimension> Iterator for ElementsBase<'a, A, D> { +impl<'a, A, D: Dimension> Iterator for ElementsBase<'a, A, D> +{ type Item = &'a A; #[inline] - fn next(&mut self) -> Option<&'a A> { + fn next(&mut self) -> Option<&'a A> + { self.inner.next().map(|p| unsafe { &*p }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.inner.size_hint() } fn fold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc { + where G: FnMut(Acc, Self::Item) -> Acc + { unsafe { self.inner.fold(init, move |acc, ptr| g(acc, &*ptr)) } } } -impl<'a, A> DoubleEndedIterator for ElementsBase<'a, A, Ix1> { +impl<'a, A> DoubleEndedIterator for ElementsBase<'a, A, Ix1> +{ #[inline] - fn next_back(&mut self) -> Option<&'a A> { + fn next_back(&mut self) -> Option<&'a A> + { self.inner.next_back().map(|p| unsafe { &*p }) } fn rfold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc { + where G: FnMut(Acc, Self::Item) -> Acc + { unsafe { self.inner.rfold(init, move |acc, ptr| g(acc, &*ptr)) } } } @@ -237,7 +259,8 @@ impl<'a, A> DoubleEndedIterator for ElementsBase<'a, A, Ix1> { impl<'a, A, D> ExactSizeIterator for ElementsBase<'a, A, D> where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.inner.len() } } @@ -272,7 +295,8 @@ clone_bounds!( impl<'a, A, D> Iter<'a, A, D> where D: Dimension { - pub(crate) fn new(self_: ArrayView<'a, A, D>) -> Self { + pub(crate) fn new(self_: ArrayView<'a, A, D>) -> Self + { Iter { inner: if let Some(slc) = self_.to_slice() { ElementsRepr::Slice(slc.iter()) @@ -286,7 +310,8 @@ where D: Dimension impl<'a, A, D> IterMut<'a, A, D> where D: Dimension { - pub(crate) fn new(self_: ArrayViewMut<'a, A, D>) -> Self { + pub(crate) fn new(self_: ArrayViewMut<'a, A, D>) -> Self + { IterMut { inner: match self_.try_into_slice() { Ok(x) => ElementsRepr::Slice(x.iter_mut()), @@ -297,7 +322,8 @@ where D: Dimension } #[derive(Clone, Debug)] -pub enum ElementsRepr { +pub enum ElementsRepr +{ Slice(S), Counted(C), } @@ -308,13 +334,15 @@ pub enum ElementsRepr { /// /// See [`.iter()`](ArrayBase::iter) for more information. #[derive(Debug)] -pub struct Iter<'a, A, D> { +pub struct Iter<'a, A, D> +{ inner: ElementsRepr, ElementsBase<'a, A, D>>, } /// Counted read only iterator #[derive(Debug)] -pub struct ElementsBase<'a, A, D> { +pub struct ElementsBase<'a, A, D> +{ inner: Baseiter, life: PhantomData<&'a A>, } @@ -325,7 +353,8 @@ pub struct ElementsBase<'a, A, D> { /// /// See [`.iter_mut()`](ArrayBase::iter_mut) for more information. #[derive(Debug)] -pub struct IterMut<'a, A, D> { +pub struct IterMut<'a, A, D> +{ inner: ElementsRepr, ElementsBaseMut<'a, A, D>>, } @@ -333,13 +362,16 @@ pub struct IterMut<'a, A, D> { /// /// Iterator element type is `&'a mut A`. #[derive(Debug)] -pub struct ElementsBaseMut<'a, A, D> { +pub struct ElementsBaseMut<'a, A, D> +{ inner: Baseiter, life: PhantomData<&'a mut A>, } -impl<'a, A, D: Dimension> ElementsBaseMut<'a, A, D> { - pub fn new(v: ArrayViewMut<'a, A, D>) -> Self { +impl<'a, A, D: Dimension> ElementsBaseMut<'a, A, D> +{ + pub fn new(v: ArrayViewMut<'a, A, D>) -> Self + { ElementsBaseMut { inner: v.into_base_iter(), life: PhantomData, @@ -360,7 +392,8 @@ pub struct IndexedIterMut<'a, A, D>(ElementsBaseMut<'a, A, D>); impl<'a, A, D> IndexedIter<'a, A, D> where D: Dimension { - pub(crate) fn new(x: ElementsBase<'a, A, D>) -> Self { + pub(crate) fn new(x: ElementsBase<'a, A, D>) -> Self + { IndexedIter(x) } } @@ -368,82 +401,100 @@ where D: Dimension impl<'a, A, D> IndexedIterMut<'a, A, D> where D: Dimension { - pub(crate) fn new(x: ElementsBaseMut<'a, A, D>) -> Self { + pub(crate) fn new(x: ElementsBaseMut<'a, A, D>) -> Self + { IndexedIterMut(x) } } -impl<'a, A, D: Dimension> Iterator for Iter<'a, A, D> { +impl<'a, A, D: Dimension> Iterator for Iter<'a, A, D> +{ type Item = &'a A; #[inline] - fn next(&mut self) -> Option<&'a A> { + fn next(&mut self) -> Option<&'a A> + { either_mut!(self.inner, iter => iter.next()) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { either!(self.inner, ref iter => iter.size_hint()) } fn fold(self, init: Acc, g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc { + where G: FnMut(Acc, Self::Item) -> Acc + { either!(self.inner, iter => iter.fold(init, g)) } - fn nth(&mut self, n: usize) -> Option { + fn nth(&mut self, n: usize) -> Option + { either_mut!(self.inner, iter => iter.nth(n)) } fn collect(self) -> B - where B: FromIterator { + where B: FromIterator + { either!(self.inner, iter => iter.collect()) } fn all(&mut self, f: F) -> bool - where F: FnMut(Self::Item) -> bool { + where F: FnMut(Self::Item) -> bool + { either_mut!(self.inner, iter => iter.all(f)) } fn any(&mut self, f: F) -> bool - where F: FnMut(Self::Item) -> bool { + where F: FnMut(Self::Item) -> bool + { either_mut!(self.inner, iter => iter.any(f)) } fn find

(&mut self, predicate: P) -> Option - where P: FnMut(&Self::Item) -> bool { + where P: FnMut(&Self::Item) -> bool + { either_mut!(self.inner, iter => iter.find(predicate)) } fn find_map(&mut self, f: F) -> Option - where F: FnMut(Self::Item) -> Option { + where F: FnMut(Self::Item) -> Option + { either_mut!(self.inner, iter => iter.find_map(f)) } - fn count(self) -> usize { + fn count(self) -> usize + { either!(self.inner, iter => iter.count()) } - fn last(self) -> Option { + fn last(self) -> Option + { either!(self.inner, iter => iter.last()) } fn position

(&mut self, predicate: P) -> Option - where P: FnMut(Self::Item) -> bool { + where P: FnMut(Self::Item) -> bool + { either_mut!(self.inner, iter => iter.position(predicate)) } } -impl<'a, A> DoubleEndedIterator for Iter<'a, A, Ix1> { +impl<'a, A> DoubleEndedIterator for Iter<'a, A, Ix1> +{ #[inline] - fn next_back(&mut self) -> Option<&'a A> { + fn next_back(&mut self) -> Option<&'a A> + { either_mut!(self.inner, iter => iter.next_back()) } - fn nth_back(&mut self, n: usize) -> Option<&'a A> { + fn nth_back(&mut self, n: usize) -> Option<&'a A> + { either_mut!(self.inner, iter => iter.nth_back(n)) } fn rfold(self, init: Acc, g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc { + where G: FnMut(Acc, Self::Item) -> Acc + { either!(self.inner, iter => iter.rfold(init, g)) } } @@ -451,15 +502,18 @@ impl<'a, A> DoubleEndedIterator for Iter<'a, A, Ix1> { impl<'a, A, D> ExactSizeIterator for Iter<'a, A, D> where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { either!(self.inner, ref iter => iter.len()) } } -impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> { +impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> +{ type Item = (D::Pattern, &'a A); #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { let index = match self.0.inner.index { None => return None, Some(ref ix) => ix.clone(), @@ -470,7 +524,8 @@ impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> { } } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.0.size_hint() } } @@ -478,82 +533,100 @@ impl<'a, A, D: Dimension> Iterator for IndexedIter<'a, A, D> { impl<'a, A, D> ExactSizeIterator for IndexedIter<'a, A, D> where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.0.inner.len() } } -impl<'a, A, D: Dimension> Iterator for IterMut<'a, A, D> { +impl<'a, A, D: Dimension> Iterator for IterMut<'a, A, D> +{ type Item = &'a mut A; #[inline] - fn next(&mut self) -> Option<&'a mut A> { + fn next(&mut self) -> Option<&'a mut A> + { either_mut!(self.inner, iter => iter.next()) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { either!(self.inner, ref iter => iter.size_hint()) } fn fold(self, init: Acc, g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc { + where G: FnMut(Acc, Self::Item) -> Acc + { either!(self.inner, iter => iter.fold(init, g)) } - fn nth(&mut self, n: usize) -> Option { + fn nth(&mut self, n: usize) -> Option + { either_mut!(self.inner, iter => iter.nth(n)) } fn collect(self) -> B - where B: FromIterator { + where B: FromIterator + { either!(self.inner, iter => iter.collect()) } fn all(&mut self, f: F) -> bool - where F: FnMut(Self::Item) -> bool { + where F: FnMut(Self::Item) -> bool + { either_mut!(self.inner, iter => iter.all(f)) } fn any(&mut self, f: F) -> bool - where F: FnMut(Self::Item) -> bool { + where F: FnMut(Self::Item) -> bool + { either_mut!(self.inner, iter => iter.any(f)) } fn find

(&mut self, predicate: P) -> Option - where P: FnMut(&Self::Item) -> bool { + where P: FnMut(&Self::Item) -> bool + { either_mut!(self.inner, iter => iter.find(predicate)) } fn find_map(&mut self, f: F) -> Option - where F: FnMut(Self::Item) -> Option { + where F: FnMut(Self::Item) -> Option + { either_mut!(self.inner, iter => iter.find_map(f)) } - fn count(self) -> usize { + fn count(self) -> usize + { either!(self.inner, iter => iter.count()) } - fn last(self) -> Option { + fn last(self) -> Option + { either!(self.inner, iter => iter.last()) } fn position

(&mut self, predicate: P) -> Option - where P: FnMut(Self::Item) -> bool { + where P: FnMut(Self::Item) -> bool + { either_mut!(self.inner, iter => iter.position(predicate)) } } -impl<'a, A> DoubleEndedIterator for IterMut<'a, A, Ix1> { +impl<'a, A> DoubleEndedIterator for IterMut<'a, A, Ix1> +{ #[inline] - fn next_back(&mut self) -> Option<&'a mut A> { + fn next_back(&mut self) -> Option<&'a mut A> + { either_mut!(self.inner, iter => iter.next_back()) } - fn nth_back(&mut self, n: usize) -> Option<&'a mut A> { + fn nth_back(&mut self, n: usize) -> Option<&'a mut A> + { either_mut!(self.inner, iter => iter.nth_back(n)) } fn rfold(self, init: Acc, g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc { + where G: FnMut(Acc, Self::Item) -> Acc + { either!(self.inner, iter => iter.rfold(init, g)) } } @@ -561,36 +634,44 @@ impl<'a, A> DoubleEndedIterator for IterMut<'a, A, Ix1> { impl<'a, A, D> ExactSizeIterator for IterMut<'a, A, D> where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { either!(self.inner, ref iter => iter.len()) } } -impl<'a, A, D: Dimension> Iterator for ElementsBaseMut<'a, A, D> { +impl<'a, A, D: Dimension> Iterator for ElementsBaseMut<'a, A, D> +{ type Item = &'a mut A; #[inline] - fn next(&mut self) -> Option<&'a mut A> { + fn next(&mut self) -> Option<&'a mut A> + { self.inner.next().map(|p| unsafe { &mut *p }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.inner.size_hint() } fn fold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc { + where G: FnMut(Acc, Self::Item) -> Acc + { unsafe { self.inner.fold(init, move |acc, ptr| g(acc, &mut *ptr)) } } } -impl<'a, A> DoubleEndedIterator for ElementsBaseMut<'a, A, Ix1> { +impl<'a, A> DoubleEndedIterator for ElementsBaseMut<'a, A, Ix1> +{ #[inline] - fn next_back(&mut self) -> Option<&'a mut A> { + fn next_back(&mut self) -> Option<&'a mut A> + { self.inner.next_back().map(|p| unsafe { &mut *p }) } fn rfold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc { + where G: FnMut(Acc, Self::Item) -> Acc + { unsafe { self.inner.rfold(init, move |acc, ptr| g(acc, &mut *ptr)) } } } @@ -598,15 +679,18 @@ impl<'a, A> DoubleEndedIterator for ElementsBaseMut<'a, A, Ix1> { impl<'a, A, D> ExactSizeIterator for ElementsBaseMut<'a, A, D> where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.inner.len() } } -impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> { +impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> +{ type Item = (D::Pattern, &'a mut A); #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { let index = match self.0.inner.index { None => return None, Some(ref ix) => ix.clone(), @@ -617,7 +701,8 @@ impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> { } } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.0.size_hint() } } @@ -625,7 +710,8 @@ impl<'a, A, D: Dimension> Iterator for IndexedIterMut<'a, A, D> { impl<'a, A, D> ExactSizeIterator for IndexedIterMut<'a, A, D> where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.0.inner.len() } } @@ -634,7 +720,8 @@ where D: Dimension /// each lane along that axis. /// /// See [`.lanes()`](ArrayBase::lanes) for more information. -pub struct LanesIter<'a, A, D> { +pub struct LanesIter<'a, A, D> +{ inner_len: Ix, inner_stride: Ixs, iter: Baseiter, @@ -657,13 +744,15 @@ impl<'a, A, D> Iterator for LanesIter<'a, A, D> where D: Dimension { type Item = ArrayView<'a, A, Ix1>; - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { self.iter .next() .map(|ptr| unsafe { ArrayView::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.iter.size_hint() } } @@ -671,13 +760,16 @@ where D: Dimension impl<'a, A, D> ExactSizeIterator for LanesIter<'a, A, D> where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.iter.len() } } -impl<'a, A> DoubleEndedIterator for LanesIter<'a, A, Ix1> { - fn next_back(&mut self) -> Option { +impl<'a, A> DoubleEndedIterator for LanesIter<'a, A, Ix1> +{ + fn next_back(&mut self) -> Option + { self.iter .next_back() .map(|ptr| unsafe { ArrayView::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) @@ -692,7 +784,8 @@ impl<'a, A> DoubleEndedIterator for LanesIter<'a, A, Ix1> { /// /// See [`.lanes_mut()`](ArrayBase::lanes_mut) /// for more information. -pub struct LanesIterMut<'a, A, D> { +pub struct LanesIterMut<'a, A, D> +{ inner_len: Ix, inner_stride: Ixs, iter: Baseiter, @@ -703,13 +796,15 @@ impl<'a, A, D> Iterator for LanesIterMut<'a, A, D> where D: Dimension { type Item = ArrayViewMut<'a, A, Ix1>; - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { self.iter .next() .map(|ptr| unsafe { ArrayViewMut::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.iter.size_hint() } } @@ -717,13 +812,16 @@ where D: Dimension impl<'a, A, D> ExactSizeIterator for LanesIterMut<'a, A, D> where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.iter.len() } } -impl<'a, A> DoubleEndedIterator for LanesIterMut<'a, A, Ix1> { - fn next_back(&mut self) -> Option { +impl<'a, A> DoubleEndedIterator for LanesIterMut<'a, A, Ix1> +{ + fn next_back(&mut self) -> Option + { self.iter .next_back() .map(|ptr| unsafe { ArrayViewMut::new_(ptr, Ix1(self.inner_len), Ix1(self.inner_stride as Ix)) }) @@ -731,7 +829,8 @@ impl<'a, A> DoubleEndedIterator for LanesIterMut<'a, A, Ix1> { } #[derive(Debug)] -pub struct AxisIterCore { +pub struct AxisIterCore +{ /// Index along the axis of the value of `.next()`, relative to the start /// of the axis. index: Ix, @@ -762,7 +861,8 @@ clone_bounds!( } ); -impl AxisIterCore { +impl AxisIterCore +{ /// Constructs a new iterator over the specified axis. fn new(v: ArrayBase, axis: Axis) -> Self where @@ -780,7 +880,8 @@ impl AxisIterCore { } #[inline] - unsafe fn offset(&self, index: usize) -> *mut A { + unsafe fn offset(&self, index: usize) -> *mut A + { debug_assert!( index < self.end, "index={}, end={}, stride={}", @@ -799,7 +900,8 @@ impl AxisIterCore { /// **Panics** if `index` is strictly greater than the iterator's remaining /// length. #[track_caller] - fn split_at(self, index: usize) -> (Self, Self) { + fn split_at(self, index: usize) -> (Self, Self) + { assert!(index <= self.len()); let mid = self.index + index; let left = AxisIterCore { @@ -823,14 +925,16 @@ impl AxisIterCore { /// Does the same thing as `.next()` but also returns the index of the item /// relative to the start of the axis. - fn next_with_index(&mut self) -> Option<(usize, *mut A)> { + fn next_with_index(&mut self) -> Option<(usize, *mut A)> + { let index = self.index; self.next().map(|ptr| (index, ptr)) } /// Does the same thing as `.next_back()` but also returns the index of the /// item relative to the start of the axis. - fn next_back_with_index(&mut self) -> Option<(usize, *mut A)> { + fn next_back_with_index(&mut self) -> Option<(usize, *mut A)> + { self.next_back().map(|ptr| (self.end, ptr)) } } @@ -840,7 +944,8 @@ where D: Dimension { type Item = *mut A; - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { if self.index >= self.end { None } else { @@ -850,7 +955,8 @@ where D: Dimension } } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let len = self.len(); (len, Some(len)) } @@ -859,7 +965,8 @@ where D: Dimension impl DoubleEndedIterator for AxisIterCore where D: Dimension { - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { if self.index >= self.end { None } else { @@ -873,7 +980,8 @@ where D: Dimension impl ExactSizeIterator for AxisIterCore where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.end - self.index } } @@ -893,7 +1001,8 @@ where D: Dimension /// or [`.axis_iter()`](ArrayBase::axis_iter) /// for more information. #[derive(Debug)] -pub struct AxisIter<'a, A, D> { +pub struct AxisIter<'a, A, D> +{ iter: AxisIterCore, life: PhantomData<&'a A>, } @@ -908,10 +1017,12 @@ clone_bounds!( } ); -impl<'a, A, D: Dimension> AxisIter<'a, A, D> { +impl<'a, A, D: Dimension> AxisIter<'a, A, D> +{ /// Creates a new iterator over the specified axis. pub(crate) fn new(v: ArrayView<'a, A, Di>, axis: Axis) -> Self - where Di: RemoveAxis { + where Di: RemoveAxis + { AxisIter { iter: AxisIterCore::new(v, axis), life: PhantomData, @@ -926,7 +1037,8 @@ impl<'a, A, D: Dimension> AxisIter<'a, A, D> { /// **Panics** if `index` is strictly greater than the iterator's remaining /// length. #[track_caller] - pub fn split_at(self, index: usize) -> (Self, Self) { + pub fn split_at(self, index: usize) -> (Self, Self) + { let (left, right) = self.iter.split_at(index); ( AxisIter { @@ -946,11 +1058,13 @@ where D: Dimension { type Item = ArrayView<'a, A, D>; - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { self.iter.next().map(|ptr| unsafe { self.as_ref(ptr) }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.iter.size_hint() } } @@ -958,7 +1072,8 @@ where D: Dimension impl<'a, A, D> DoubleEndedIterator for AxisIter<'a, A, D> where D: Dimension { - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { self.iter.next_back().map(|ptr| unsafe { self.as_ref(ptr) }) } } @@ -966,7 +1081,8 @@ where D: Dimension impl<'a, A, D> ExactSizeIterator for AxisIter<'a, A, D> where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.iter.len() } } @@ -985,15 +1101,18 @@ where D: Dimension /// See [`.outer_iter_mut()`](ArrayBase::outer_iter_mut) /// or [`.axis_iter_mut()`](ArrayBase::axis_iter_mut) /// for more information. -pub struct AxisIterMut<'a, A, D> { +pub struct AxisIterMut<'a, A, D> +{ iter: AxisIterCore, life: PhantomData<&'a mut A>, } -impl<'a, A, D: Dimension> AxisIterMut<'a, A, D> { +impl<'a, A, D: Dimension> AxisIterMut<'a, A, D> +{ /// Creates a new iterator over the specified axis. pub(crate) fn new(v: ArrayViewMut<'a, A, Di>, axis: Axis) -> Self - where Di: RemoveAxis { + where Di: RemoveAxis + { AxisIterMut { iter: AxisIterCore::new(v, axis), life: PhantomData, @@ -1008,7 +1127,8 @@ impl<'a, A, D: Dimension> AxisIterMut<'a, A, D> { /// **Panics** if `index` is strictly greater than the iterator's remaining /// length. #[track_caller] - pub fn split_at(self, index: usize) -> (Self, Self) { + pub fn split_at(self, index: usize) -> (Self, Self) + { let (left, right) = self.iter.split_at(index); ( AxisIterMut { @@ -1028,11 +1148,13 @@ where D: Dimension { type Item = ArrayViewMut<'a, A, D>; - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { self.iter.next().map(|ptr| unsafe { self.as_ref(ptr) }) } - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { self.iter.size_hint() } } @@ -1040,7 +1162,8 @@ where D: Dimension impl<'a, A, D> DoubleEndedIterator for AxisIterMut<'a, A, D> where D: Dimension { - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { self.iter.next_back().map(|ptr| unsafe { self.as_ref(ptr) }) } } @@ -1048,26 +1171,31 @@ where D: Dimension impl<'a, A, D> ExactSizeIterator for AxisIterMut<'a, A, D> where D: Dimension { - fn len(&self) -> usize { + fn len(&self) -> usize + { self.iter.len() } } -impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> { +impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> +{ type Item = ::Item; type Dim = Ix1; type Ptr = *mut A; type Stride = isize; - fn layout(&self) -> crate::Layout { + fn layout(&self) -> crate::Layout + { crate::Layout::one_dimensional() } - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { Ix1(self.len()) } - fn as_ptr(&self) -> Self::Ptr { + fn as_ptr(&self) -> Self::Ptr + { if self.len() > 0 { // `self.iter.index` is guaranteed to be in-bounds if any of the // iterator remains (i.e. if `self.len() > 0`). @@ -1080,44 +1208,53 @@ impl<'a, A, D: Dimension> NdProducer for AxisIter<'a, A, D> { } } - fn contiguous_stride(&self) -> isize { + fn contiguous_stride(&self) -> isize + { self.iter.stride } - unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { + unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item + { ArrayView::new_(ptr, self.iter.inner_dim.clone(), self.iter.inner_strides.clone()) } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr + { self.iter.offset(self.iter.index + i[0]) } - fn stride_of(&self, _axis: Axis) -> isize { + fn stride_of(&self, _axis: Axis) -> isize + { self.contiguous_stride() } - fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) + { self.split_at(index) } private_impl! {} } -impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> { +impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> +{ type Item = ::Item; type Dim = Ix1; type Ptr = *mut A; type Stride = isize; - fn layout(&self) -> crate::Layout { + fn layout(&self) -> crate::Layout + { crate::Layout::one_dimensional() } - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { Ix1(self.len()) } - fn as_ptr(&self) -> Self::Ptr { + fn as_ptr(&self) -> Self::Ptr + { if self.len() > 0 { // `self.iter.index` is guaranteed to be in-bounds if any of the // iterator remains (i.e. if `self.len() > 0`). @@ -1130,23 +1267,28 @@ impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> { } } - fn contiguous_stride(&self) -> isize { + fn contiguous_stride(&self) -> isize + { self.iter.stride } - unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { + unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item + { ArrayViewMut::new_(ptr, self.iter.inner_dim.clone(), self.iter.inner_strides.clone()) } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr + { self.iter.offset(self.iter.index + i[0]) } - fn stride_of(&self, _axis: Axis) -> isize { + fn stride_of(&self, _axis: Axis) -> isize + { self.contiguous_stride() } - fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, _axis: Axis, index: usize) -> (Self, Self) + { self.split_at(index) } @@ -1163,7 +1305,8 @@ impl<'a, A, D: Dimension> NdProducer for AxisIterMut<'a, A, D> { /// Iterator element type is `ArrayView<'a, A, D>`. /// /// See [`.axis_chunks_iter()`](ArrayBase::axis_chunks_iter) for more information. -pub struct AxisChunksIter<'a, A, D> { +pub struct AxisChunksIter<'a, A, D> +{ iter: AxisIterCore, /// Index of the partial chunk (the chunk smaller than the specified chunk /// size due to the axis length not being evenly divisible). If the axis @@ -1196,9 +1339,9 @@ clone_bounds!( /// /// **Panics** if `size == 0`. #[track_caller] -fn chunk_iter_parts( - v: ArrayView<'_, A, D>, axis: Axis, size: usize, -) -> (AxisIterCore, usize, D) { +fn chunk_iter_parts(v: ArrayView<'_, A, D>, axis: Axis, size: usize) + -> (AxisIterCore, usize, D) +{ assert_ne!(size, 0, "Chunk size must be nonzero."); let axis_len = v.len_of(axis); let n_whole_chunks = axis_len / size; @@ -1235,8 +1378,10 @@ fn chunk_iter_parts( (iter, partial_chunk_index, partial_chunk_dim) } -impl<'a, A, D: Dimension> AxisChunksIter<'a, A, D> { - pub(crate) fn new(v: ArrayView<'a, A, D>, axis: Axis, size: usize) -> Self { +impl<'a, A, D: Dimension> AxisChunksIter<'a, A, D> +{ + pub(crate) fn new(v: ArrayView<'a, A, D>, axis: Axis, size: usize) -> Self + { let (iter, partial_chunk_index, partial_chunk_dim) = chunk_iter_parts(v, axis, size); AxisChunksIter { iter, @@ -1343,15 +1488,18 @@ macro_rules! chunk_iter_impl { /// /// See [`.axis_chunks_iter_mut()`](ArrayBase::axis_chunks_iter_mut) /// for more information. -pub struct AxisChunksIterMut<'a, A, D> { +pub struct AxisChunksIterMut<'a, A, D> +{ iter: AxisIterCore, partial_chunk_index: usize, partial_chunk_dim: D, life: PhantomData<&'a mut A>, } -impl<'a, A, D: Dimension> AxisChunksIterMut<'a, A, D> { - pub(crate) fn new(v: ArrayViewMut<'a, A, D>, axis: Axis, size: usize) -> Self { +impl<'a, A, D: Dimension> AxisChunksIterMut<'a, A, D> +{ + pub(crate) fn new(v: ArrayViewMut<'a, A, D>, axis: Axis, size: usize) -> Self + { let (iter, partial_chunk_index, partial_chunk_dim) = chunk_iter_parts(v.into_view(), axis, size); AxisChunksIterMut { iter, @@ -1411,7 +1559,8 @@ unsafe impl TrustedIterator for IntoIter where D: Dimension {} /// Like Iterator::collect, but only for trusted length iterators pub fn to_vec(iter: I) -> Vec -where I: TrustedIterator + ExactSizeIterator { +where I: TrustedIterator + ExactSizeIterator +{ to_vec_mapped(iter, |x| x) } diff --git a/src/iterators/windows.rs b/src/iterators/windows.rs index 050071450..ec1afb634 100644 --- a/src/iterators/windows.rs +++ b/src/iterators/windows.rs @@ -11,16 +11,19 @@ use crate::Slice; /// /// See [`.windows()`](ArrayBase::windows) for more /// information. -pub struct Windows<'a, A, D> { +pub struct Windows<'a, A, D> +{ base: RawArrayView, life: PhantomData<&'a A>, window: D, strides: D, } -impl<'a, A, D: Dimension> Windows<'a, A, D> { +impl<'a, A, D: Dimension> Windows<'a, A, D> +{ pub(crate) fn new(a: ArrayView<'a, A, D>, window_size: E) -> Self - where E: IntoDimension { + where E: IntoDimension + { let window = window_size.into_dimension(); let ndim = window.ndim(); @@ -31,7 +34,8 @@ impl<'a, A, D: Dimension> Windows<'a, A, D> { } pub(crate) fn new_with_stride(a: ArrayView<'a, A, D>, window_size: E, axis_strides: E) -> Self - where E: IntoDimension { + where E: IntoDimension + { let window = window_size.into_dimension(); let strides = axis_strides.into_dimension(); @@ -108,7 +112,8 @@ where { type Item = ::Item; type IntoIter = WindowsIter<'a, A, D>; - fn into_iter(self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter + { WindowsIter { iter: self.base.into_base_iter(), life: self.life, @@ -122,7 +127,8 @@ where /// /// See [`.windows()`](ArrayBase::windows) for more /// information. -pub struct WindowsIter<'a, A, D> { +pub struct WindowsIter<'a, A, D> +{ iter: Baseiter, life: PhantomData<&'a A>, window: D, diff --git a/src/itertools.rs b/src/itertools.rs index ccfb852be..d3562e687 100644 --- a/src/itertools.rs +++ b/src/itertools.rs @@ -23,7 +23,8 @@ use std::iter; /// } /// ``` pub(crate) fn enumerate(iterable: I) -> iter::Enumerate -where I: IntoIterator { +where I: IntoIterator +{ iterable.into_iter().enumerate() } diff --git a/src/layout/layoutfmt.rs b/src/layout/layoutfmt.rs index 3d7fad00a..f20f0caaa 100644 --- a/src/layout/layoutfmt.rs +++ b/src/layout/layoutfmt.rs @@ -12,8 +12,10 @@ const LAYOUT_NAMES: &[&str] = &["C", "F", "c", "f"]; use std::fmt; -impl fmt::Debug for Layout { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +impl fmt::Debug for Layout +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { if self.0 == 0 { write!(f, "Custom")? } else { diff --git a/src/layout/mod.rs b/src/layout/mod.rs index 291c9d4ca..f0e759b51 100644 --- a/src/layout/mod.rs +++ b/src/layout/mod.rs @@ -8,63 +8,74 @@ mod layoutfmt; #[derive(Copy, Clone)] pub struct Layout(u32); -impl Layout { +impl Layout +{ pub(crate) const CORDER: u32 = 0b01; pub(crate) const FORDER: u32 = 0b10; pub(crate) const CPREFER: u32 = 0b0100; pub(crate) const FPREFER: u32 = 0b1000; #[inline(always)] - pub(crate) fn is(self, flag: u32) -> bool { + pub(crate) fn is(self, flag: u32) -> bool + { self.0 & flag != 0 } /// Return layout common to both inputs #[inline(always)] - pub(crate) fn intersect(self, other: Layout) -> Layout { + pub(crate) fn intersect(self, other: Layout) -> Layout + { Layout(self.0 & other.0) } /// Return a layout that simultaneously "is" what both of the inputs are #[inline(always)] - pub(crate) fn also(self, other: Layout) -> Layout { + pub(crate) fn also(self, other: Layout) -> Layout + { Layout(self.0 | other.0) } #[inline(always)] - pub(crate) fn one_dimensional() -> Layout { + pub(crate) fn one_dimensional() -> Layout + { Layout::c().also(Layout::f()) } #[inline(always)] - pub(crate) fn c() -> Layout { + pub(crate) fn c() -> Layout + { Layout(Layout::CORDER | Layout::CPREFER) } #[inline(always)] - pub(crate) fn f() -> Layout { + pub(crate) fn f() -> Layout + { Layout(Layout::FORDER | Layout::FPREFER) } #[inline(always)] - pub(crate) fn cpref() -> Layout { + pub(crate) fn cpref() -> Layout + { Layout(Layout::CPREFER) } #[inline(always)] - pub(crate) fn fpref() -> Layout { + pub(crate) fn fpref() -> Layout + { Layout(Layout::FPREFER) } #[inline(always)] - pub(crate) fn none() -> Layout { + pub(crate) fn none() -> Layout + { Layout(0) } /// A simple "score" method which scores positive for preferring C-order, negative for F-order /// Subject to change when we can describe other layouts #[inline] - pub(crate) fn tendency(self) -> i32 { + pub(crate) fn tendency(self) -> i32 + { (self.is(Layout::CORDER) as i32 - self.is(Layout::FORDER) as i32) + (self.is(Layout::CPREFER) as i32 - self.is(Layout::FPREFER) as i32) } @@ -73,7 +84,8 @@ impl Layout { /// /// **Note**: We ignore the preference bits #[inline(always)] - pub(crate) fn matches(self, other: Self) -> bool { + pub(crate) fn matches(self, other: Self) -> bool + { self.0 & (0b11) == other.0 & (0b11) } @@ -85,7 +97,8 @@ impl Layout { } #[cfg(test)] -mod tests { +mod tests +{ use super::*; use crate::imp_prelude::*; use crate::NdProducer; @@ -119,7 +132,8 @@ mod tests { } #[test] - fn contig_layouts() { + fn contig_layouts() + { let a = M::zeros((5, 5)); let b = M::zeros((5, 5).f()); let ac = a.view().layout(); @@ -131,7 +145,8 @@ mod tests { } #[test] - fn contig_cf_layouts() { + fn contig_cf_layouts() + { let a = M::zeros((5, 1)); let b = M::zeros((1, 5).f()); assert_layouts!(a, CORDER, CPREFER, FORDER, FPREFER); @@ -159,7 +174,8 @@ mod tests { } #[test] - fn stride_layouts() { + fn stride_layouts() + { let a = M::zeros((5, 5)); { @@ -186,7 +202,8 @@ mod tests { } #[test] - fn no_layouts() { + fn no_layouts() + { let a = M::zeros((5, 5)); let b = M::zeros((5, 5).f()); @@ -214,7 +231,8 @@ mod tests { } #[test] - fn skip_layouts() { + fn skip_layouts() + { let a = M::zeros((5, 5)); { let v1 = a.slice(s![..;2, ..]).layout(); diff --git a/src/lib.rs b/src/lib.rs index e24bcc3a3..569e298e5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -212,7 +212,8 @@ pub use crate::data_repr::Device; pub use crate::layout::Layout; /// Implementation's prelude. Common types used everywhere. -mod imp_prelude { +mod imp_prelude +{ pub use crate::dimension::DimensionExt; pub use crate::prelude::*; pub use crate::ArcArray; @@ -1436,8 +1437,10 @@ pub use data_repr::OwnedRepr; #[derive(Debug)] pub struct OwnedArcRepr(Arc>); -impl Clone for OwnedArcRepr { - fn clone(&self) -> Self { +impl Clone for OwnedArcRepr +{ + fn clone(&self) -> Self + { OwnedArcRepr(self.0.clone()) } } @@ -1448,13 +1451,16 @@ impl Clone for OwnedArcRepr { /// [`RawArrayView`] / [`RawArrayViewMut`] for the array type!* #[derive(Copy, Clone)] // This is just a marker type, to carry the mutability and element type. -pub struct RawViewRepr { +pub struct RawViewRepr +{ ptr: PhantomData, } -impl RawViewRepr { +impl RawViewRepr +{ #[inline(always)] - const fn new() -> Self { + const fn new() -> Self + { RawViewRepr { ptr: PhantomData } } } @@ -1465,13 +1471,16 @@ impl RawViewRepr { /// [`ArrayView`] / [`ArrayViewMut`] for the array type!* #[derive(Copy, Clone)] // This is just a marker type, to carry the lifetime parameter. -pub struct ViewRepr { +pub struct ViewRepr +{ life: PhantomData, } -impl ViewRepr { +impl ViewRepr +{ #[inline(always)] - const fn new() -> Self { + const fn new() -> Self + { ViewRepr { life: PhantomData } } } @@ -1480,16 +1489,19 @@ impl ViewRepr { /// /// *Don't use this type directly—use the type alias /// [`CowArray`] for the array type!* -pub enum CowRepr<'a, A> { +pub enum CowRepr<'a, A> +{ /// Borrowed data. View(ViewRepr<&'a A>), /// Owned data. Owned(OwnedRepr), } -impl<'a, A> CowRepr<'a, A> { +impl<'a, A> CowRepr<'a, A> +{ /// Returns `true` iff the data is the `View` variant. - pub fn is_view(&self) -> bool { + pub fn is_view(&self) -> bool + { match self { CowRepr::View(_) => true, CowRepr::Owned(_) => false, @@ -1497,7 +1509,8 @@ impl<'a, A> CowRepr<'a, A> { } /// Returns `true` iff the data is the `Owned` variant. - pub fn is_owned(&self) -> bool { + pub fn is_owned(&self) -> bool + { match self { CowRepr::View(_) => false, CowRepr::Owned(_) => true, @@ -1525,7 +1538,8 @@ where { #[inline] fn broadcast_unwrap(&self, dim: E) -> ArrayView<'_, A, E> - where E: Dimension { + where E: Dimension + { #[cold] #[inline(never)] fn broadcast_panic(from: &D, to: &E) -> ! @@ -1550,7 +1564,8 @@ where // (Checked in debug assertions). #[inline] fn broadcast_assume(&self, dim: E) -> ArrayView<'_, A, E> - where E: Dimension { + where E: Dimension + { let dim = dim.into_dimension(); debug_assert_eq!(self.shape(), dim.slice()); let ptr = self.ptr; @@ -1560,7 +1575,8 @@ where } /// Remove array axis `axis` and return the result. - fn try_remove_axis(self, axis: Axis) -> ArrayBase { + fn try_remove_axis(self, axis: Axis) -> ArrayBase + { let d = self.dim.try_remove_axis(axis); let s = self.strides.try_remove_axis(axis); // safe because new dimension, strides allow access to a subset of old data @@ -1598,14 +1614,16 @@ mod impl_raw_views; mod impl_cow; /// Returns `true` if the pointer is aligned. -pub(crate) fn is_aligned(ptr: *const T) -> bool { +pub(crate) fn is_aligned(ptr: *const T) -> bool +{ (ptr as usize) % ::std::mem::align_of::() == 0 } #[cfg(feature = "opencl")] mod opencl; -pub fn configure() { +pub fn configure() +{ #[cfg(feature = "opencl")] unsafe { hasty_::opencl::configure_opencl(); diff --git a/src/linalg/impl_linalg.rs b/src/linalg/impl_linalg.rs index 67d8db6e6..bcfcba94e 100644 --- a/src/linalg/impl_linalg.rs +++ b/src/linalg/impl_linalg.rs @@ -66,7 +66,8 @@ where S: Data /// layout allows. #[track_caller] pub fn dot(&self, rhs: &Rhs) -> >::Output - where Self: Dot { + where Self: Dot + { Dot::dot(self, rhs) } @@ -144,7 +145,8 @@ where S: Data /// which agrees with our pointer for non-negative strides, but /// is at the opposite end for negative strides. #[cfg(feature = "blas")] -unsafe fn blas_1d_params(ptr: *const A, len: usize, stride: isize) -> (*const A, blas_index, blas_index) { +unsafe fn blas_1d_params(ptr: *const A, len: usize, stride: isize) -> (*const A, blas_index, blas_index) +{ // [x x x x] // ^--ptr // stride = -1 @@ -161,7 +163,8 @@ unsafe fn blas_1d_params(ptr: *const A, len: usize, stride: isize) -> (*const /// /// For two-dimensional arrays, the dot method computes the matrix /// multiplication. -pub trait Dot { +pub trait Dot +{ /// The result of the operation. /// /// For two-dimensional arrays: a rectangular array. @@ -186,7 +189,8 @@ where /// *Note:* If enabled, uses blas `dot` for elements of `f32, f64` when memory /// layout allows. #[track_caller] - fn dot(&self, rhs: &ArrayBase) -> A { + fn dot(&self, rhs: &ArrayBase) -> A + { self.dot_impl(rhs) } } @@ -209,7 +213,8 @@ where /// /// **Panics** if shapes are incompatible. #[track_caller] - fn dot(&self, rhs: &ArrayBase) -> Array { + fn dot(&self, rhs: &ArrayBase) -> Array + { rhs.t().dot(self) } } @@ -248,7 +253,8 @@ where S: Data /// ``` #[track_caller] pub fn dot(&self, rhs: &Rhs) -> >::Output - where Self: Dot { + where Self: Dot + { Dot::dot(self, rhs) } } @@ -260,7 +266,8 @@ where A: LinalgScalar, { type Output = Array2; - fn dot(&self, b: &ArrayBase) -> Array2 { + fn dot(&self, b: &ArrayBase) -> Array2 + { let a = self.view(); let b = b.view(); let ((m, k), (k2, n)) = (a.dim(), b.dim()); @@ -286,7 +293,8 @@ where /// Assumes that `m` and `n` are ≤ `isize::MAX`. #[cold] #[inline(never)] -fn dot_shape_error(m: usize, k: usize, k2: usize, n: usize) -> ! { +fn dot_shape_error(m: usize, k: usize, k2: usize, n: usize) -> ! +{ match m.checked_mul(n) { Some(len) if len <= ::std::isize::MAX as usize => {} _ => panic!("ndarray: shape {} × {} overflows isize", m, n), @@ -299,7 +307,8 @@ fn dot_shape_error(m: usize, k: usize, k2: usize, n: usize) -> ! { #[cold] #[inline(never)] -fn general_dot_shape_error(m: usize, k: usize, k2: usize, n: usize, c1: usize, c2: usize) -> ! { +fn general_dot_shape_error(m: usize, k: usize, k2: usize, n: usize, c1: usize, c2: usize) -> ! +{ panic!("ndarray: inputs {} × {}, {} × {}, and output {} × {} are not compatible for matrix multiplication", m, k, k2, n, c1, c2); } @@ -321,7 +330,8 @@ where { type Output = Array; #[track_caller] - fn dot(&self, rhs: &ArrayBase) -> Array { + fn dot(&self, rhs: &ArrayBase) -> Array + { let ((m, a), n) = (self.dim(), rhs.dim()); if a != n { dot_shape_error(m, a, n, 1); @@ -487,7 +497,8 @@ fn mat_mul_impl( /// C ← α A B + β C fn mat_mul_general( alpha: A, lhs: &ArrayView2<'_, A>, rhs: &ArrayView2<'_, A>, beta: A, c: &mut ArrayViewMut2<'_, A>, -) where A: LinalgScalar { +) where A: LinalgScalar +{ let ((m, k), (_, n)) = (lhs.dim(), rhs.dim()); // common parameters for gemm @@ -780,14 +791,16 @@ where #[inline(always)] /// Return `true` if `A` and `B` are the same type -fn same_type() -> bool { +fn same_type() -> bool +{ TypeId::of::() == TypeId::of::() } // Read pointer to type `A` as type `B`. // // **Panics** if `A` and `B` are not the same type -fn cast_as(a: &A) -> B { +fn cast_as(a: &A) -> B +{ assert!(same_type::(), "expect type {} and {} to match", std::any::type_name::(), std::any::type_name::()); unsafe { ::std::ptr::read(a as *const _ as *const B) } @@ -795,7 +808,8 @@ fn cast_as(a: &A) -> B { /// Return the complex in the form of an array [re, im] #[inline] -fn complex_array(z: Complex) -> [A; 2] { +fn complex_array(z: Complex) -> [A; 2] +{ [z.re, z.im] } @@ -820,7 +834,8 @@ where } #[cfg(feature = "blas")] -enum MemoryOrder { +enum MemoryOrder +{ C, F, } @@ -852,7 +867,8 @@ where } #[cfg(feature = "blas")] -fn is_blas_2d(dim: &Ix2, stride: &Ix2, order: MemoryOrder) -> bool { +fn is_blas_2d(dim: &Ix2, stride: &Ix2, order: MemoryOrder) -> bool +{ let (m, n) = dim.into_pattern(); let s0 = stride[0] as isize; let s1 = stride[1] as isize; @@ -895,32 +911,37 @@ where #[cfg(test)] #[cfg(feature = "blas")] -mod blas_tests { +mod blas_tests +{ use super::*; #[test] - fn blas_row_major_2d_normal_matrix() { + fn blas_row_major_2d_normal_matrix() + { let m: Array2 = Array2::zeros((3, 5)); assert!(blas_row_major_2d::(&m)); assert!(!blas_column_major_2d::(&m)); } #[test] - fn blas_row_major_2d_row_matrix() { + fn blas_row_major_2d_row_matrix() + { let m: Array2 = Array2::zeros((1, 5)); assert!(blas_row_major_2d::(&m)); assert!(blas_column_major_2d::(&m)); } #[test] - fn blas_row_major_2d_column_matrix() { + fn blas_row_major_2d_column_matrix() + { let m: Array2 = Array2::zeros((5, 1)); assert!(blas_row_major_2d::(&m)); assert!(blas_column_major_2d::(&m)); } #[test] - fn blas_row_major_2d_transposed_row_matrix() { + fn blas_row_major_2d_transposed_row_matrix() + { let m: Array2 = Array2::zeros((1, 5)); let m_t = m.t(); assert!(blas_row_major_2d::(&m_t)); @@ -928,7 +949,8 @@ mod blas_tests { } #[test] - fn blas_row_major_2d_transposed_column_matrix() { + fn blas_row_major_2d_transposed_column_matrix() + { let m: Array2 = Array2::zeros((5, 1)); let m_t = m.t(); assert!(blas_row_major_2d::(&m_t)); @@ -936,7 +958,8 @@ mod blas_tests { } #[test] - fn blas_column_major_2d_normal_matrix() { + fn blas_column_major_2d_normal_matrix() + { let m: Array2 = Array2::zeros((3, 5).f()); assert!(!blas_row_major_2d::(&m)); assert!(blas_column_major_2d::(&m)); diff --git a/src/linspace.rs b/src/linspace.rs index ec03d5b7a..411c480db 100644 --- a/src/linspace.rs +++ b/src/linspace.rs @@ -11,7 +11,8 @@ use num_traits::Float; /// An iterator of a sequence of evenly spaced floats. /// /// Iterator element type is `F`. -pub struct Linspace { +pub struct Linspace +{ start: F, step: F, index: usize, @@ -24,7 +25,8 @@ where F: Float type Item = F; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { if self.index >= self.len { None } else { @@ -36,7 +38,8 @@ where F: Float } #[inline] - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let n = self.len - self.index; (n, Some(n)) } @@ -46,7 +49,8 @@ impl DoubleEndedIterator for Linspace where F: Float { #[inline] - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { if self.index >= self.len { None } else { @@ -70,7 +74,8 @@ impl ExactSizeIterator for Linspace where Linspace: Iterator {} /// **Panics** if converting `n - 1` to type `F` fails. #[inline] pub fn linspace(a: F, b: F, n: usize) -> Linspace -where F: Float { +where F: Float +{ let step = if n > 1 { let num_steps = F::from(n - 1).expect("Converting number of steps to `A` must not fail."); (b - a) / num_steps @@ -96,7 +101,8 @@ where F: Float { /// **Panics** if converting `((b - a) / step).ceil()` to type `F` fails. #[inline] pub fn range(a: F, b: F, step: F) -> Linspace -where F: Float { +where F: Float +{ let len = b - a; let steps = F::ceil(len / step); Linspace { diff --git a/src/logspace.rs b/src/logspace.rs index ee67d09c7..6f8de885d 100644 --- a/src/logspace.rs +++ b/src/logspace.rs @@ -11,7 +11,8 @@ use num_traits::Float; /// An iterator of a sequence of logarithmically spaced number. /// /// Iterator element type is `F`. -pub struct Logspace { +pub struct Logspace +{ sign: F, base: F, start: F, @@ -26,7 +27,8 @@ where F: Float type Item = F; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option + { if self.index >= self.len { None } else { @@ -39,7 +41,8 @@ where F: Float } #[inline] - fn size_hint(&self) -> (usize, Option) { + fn size_hint(&self) -> (usize, Option) + { let n = self.len - self.index; (n, Some(n)) } @@ -49,7 +52,8 @@ impl DoubleEndedIterator for Logspace where F: Float { #[inline] - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option + { if self.index >= self.len { None } else { @@ -76,7 +80,8 @@ impl ExactSizeIterator for Logspace where Logspace: Iterator {} /// **Panics** if converting `n - 1` to type `F` fails. #[inline] pub fn logspace(base: F, a: F, b: F, n: usize) -> Logspace -where F: Float { +where F: Float +{ let step = if n > 1 { let num_steps = F::from(n - 1).expect("Converting number of steps to `A` must not fail."); (b - a) / num_steps @@ -94,12 +99,14 @@ where F: Float { } #[cfg(test)] -mod tests { +mod tests +{ use super::logspace; #[test] #[cfg(feature = "approx")] - fn valid() { + fn valid() + { use crate::{arr1, Array1}; use approx::assert_abs_diff_eq; @@ -117,7 +124,8 @@ mod tests { } #[test] - fn iter_forward() { + fn iter_forward() + { let mut iter = logspace(10.0f64, 0.0, 3.0, 4); assert!(iter.size_hint() == (4, Some(4))); @@ -132,7 +140,8 @@ mod tests { } #[test] - fn iter_backward() { + fn iter_backward() + { let mut iter = logspace(10.0f64, 0.0, 3.0, 4); assert!(iter.size_hint() == (4, Some(4))); diff --git a/src/low_level_util.rs b/src/low_level_util.rs index e75554889..5a615a187 100644 --- a/src/low_level_util.rs +++ b/src/low_level_util.rs @@ -13,18 +13,22 @@ #[must_use] pub(crate) struct AbortIfPanic(pub(crate) &'static &'static str); -impl AbortIfPanic { +impl AbortIfPanic +{ /// Defuse the AbortIfPanic guard. This *must* be done when finished. #[inline] - pub(crate) fn defuse(self) { + pub(crate) fn defuse(self) + { std::mem::forget(self); } } -impl Drop for AbortIfPanic { +impl Drop for AbortIfPanic +{ // The compiler should be able to remove this, if it can see through that there // is no panic in the code section. - fn drop(&mut self) { + fn drop(&mut self) + { #[cfg(feature = "std")] { eprintln!("ndarray: panic in no-panic section, aborting: {}", self.0); diff --git a/src/math_cell.rs b/src/math_cell.rs index c68926250..6ed1ed71f 100644 --- a/src/math_cell.rs +++ b/src/math_cell.rs @@ -13,35 +13,43 @@ use std::ops::{Deref, DerefMut}; #[derive(Default)] pub struct MathCell(Cell); -impl MathCell { +impl MathCell +{ /// Create a new cell with the given value #[inline(always)] - pub const fn new(value: T) -> Self { + pub const fn new(value: T) -> Self + { MathCell(Cell::new(value)) } /// Return the inner value - pub fn into_inner(self) -> T { + pub fn into_inner(self) -> T + { Cell::into_inner(self.0) } /// Swap value with another cell - pub fn swap(&self, other: &Self) { + pub fn swap(&self, other: &Self) + { Cell::swap(&self.0, &other.0) } } -impl Deref for MathCell { +impl Deref for MathCell +{ type Target = Cell; #[inline(always)] - fn deref(&self) -> &Self::Target { + fn deref(&self) -> &Self::Target + { &self.0 } } -impl DerefMut for MathCell { +impl DerefMut for MathCell +{ #[inline(always)] - fn deref_mut(&mut self) -> &mut Self::Target { + fn deref_mut(&mut self) -> &mut Self::Target + { &mut self.0 } } @@ -49,7 +57,8 @@ impl DerefMut for MathCell { impl Clone for MathCell where T: Copy { - fn clone(&self) -> Self { + fn clone(&self) -> Self + { MathCell::new(self.get()) } } @@ -57,7 +66,8 @@ where T: Copy impl PartialEq for MathCell where T: Copy + PartialEq { - fn eq(&self, rhs: &Self) -> bool { + fn eq(&self, rhs: &Self) -> bool + { self.get() == rhs.get() } } @@ -67,20 +77,25 @@ impl Eq for MathCell where T: Copy + Eq {} impl PartialOrd for MathCell where T: Copy + PartialOrd { - fn partial_cmp(&self, rhs: &Self) -> Option { + fn partial_cmp(&self, rhs: &Self) -> Option + { self.get().partial_cmp(&rhs.get()) } - fn lt(&self, rhs: &Self) -> bool { + fn lt(&self, rhs: &Self) -> bool + { self.get().lt(&rhs.get()) } - fn le(&self, rhs: &Self) -> bool { + fn le(&self, rhs: &Self) -> bool + { self.get().le(&rhs.get()) } - fn gt(&self, rhs: &Self) -> bool { + fn gt(&self, rhs: &Self) -> bool + { self.get().gt(&rhs.get()) } - fn ge(&self, rhs: &Self) -> bool { + fn ge(&self, rhs: &Self) -> bool + { self.get().ge(&rhs.get()) } } @@ -88,7 +103,8 @@ where T: Copy + PartialOrd impl Ord for MathCell where T: Copy + Ord { - fn cmp(&self, rhs: &Self) -> Ordering { + fn cmp(&self, rhs: &Self) -> Ordering + { self.get().cmp(&rhs.get()) } } @@ -96,17 +112,20 @@ where T: Copy + Ord impl fmt::Debug for MathCell where T: Copy + fmt::Debug { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result + { self.get().fmt(f) } } #[cfg(test)] -mod tests { +mod tests +{ use super::MathCell; #[test] - fn test_basic() { + fn test_basic() + { let c = &MathCell::new(0); c.set(1); assert_eq!(c.get(), 1); diff --git a/src/numeric/impl_float_maths.rs b/src/numeric/impl_float_maths.rs index 4b3208800..54fed49c2 100644 --- a/src/numeric/impl_float_maths.rs +++ b/src/numeric/impl_float_maths.rs @@ -137,7 +137,8 @@ where /// Square (two powers) of each element. #[must_use = "method returns a new array and does not mutate the original value"] - pub fn pow2(&self) -> Array { + pub fn pow2(&self) -> Array + { self.mapv(|v: A| v * v) } } @@ -161,7 +162,8 @@ where /// # Panics /// /// Panics if `!(min <= max)`. - pub fn clamp(&self, min: A, max: A) -> Array { + pub fn clamp(&self, min: A, max: A) -> Array + { assert!(min <= max, "min must be less than or equal to max"); self.mapv(|a| num_traits::clamp(a, min.clone(), max.clone())) } diff --git a/src/numeric/impl_numeric.rs b/src/numeric/impl_numeric.rs index 6caad239f..ca6f24bbe 100644 --- a/src/numeric/impl_numeric.rs +++ b/src/numeric/impl_numeric.rs @@ -30,7 +30,8 @@ where /// assert_eq!(a.sum(), 10.); /// ``` pub fn sum(&self) -> A - where A: Clone + Add + num_traits::Zero { + where A: Clone + Add + num_traits::Zero + { if let Some(slc) = self.as_slice_memory_order() { return numeric_util::unrolled_fold(slc, A::zero, A::add); } @@ -50,7 +51,8 @@ where /// *This method has been renamed to `.sum()`* #[deprecated(note = "renamed to `sum`", since = "0.15.0")] pub fn scalar_sum(&self) -> A - where A: Clone + Add + num_traits::Zero { + where A: Clone + Add + num_traits::Zero + { self.sum() } @@ -68,7 +70,8 @@ where /// /// [arithmetic mean]: https://en.wikipedia.org/wiki/Arithmetic_mean pub fn mean(&self) -> Option - where A: Clone + FromPrimitive + Add + Div + Zero { + where A: Clone + FromPrimitive + Add + Div + Zero + { let n_elements = self.len(); if n_elements == 0 { None @@ -88,7 +91,8 @@ where /// assert_eq!(a.product(), 24.); /// ``` pub fn product(&self) -> A - where A: Clone + Mul + num_traits::One { + where A: Clone + Mul + num_traits::One + { if let Some(slc) = self.as_slice_memory_order() { return numeric_util::unrolled_fold(slc, A::one, A::mul); } @@ -145,7 +149,8 @@ where #[track_caller] #[cfg(feature = "std")] pub fn var(&self, ddof: A) -> A - where A: Float + FromPrimitive { + where A: Float + FromPrimitive + { let zero = A::from_usize(0).expect("Converting 0 to `A` must not fail."); let n = A::from_usize(self.len()).expect("Converting length to `A` must not fail."); assert!( @@ -209,7 +214,8 @@ where #[track_caller] #[cfg(feature = "std")] pub fn std(&self, ddof: A) -> A - where A: Float + FromPrimitive { + where A: Float + FromPrimitive + { self.var(ddof).sqrt() } diff --git a/src/numeric_util.rs b/src/numeric_util.rs index 1ed6d75a5..9d5ce66c5 100644 --- a/src/numeric_util.rs +++ b/src/numeric_util.rs @@ -54,7 +54,8 @@ where /// /// `xs` and `ys` must be the same length pub fn unrolled_dot(xs: &[A], ys: &[A]) -> A -where A: LinalgScalar { +where A: LinalgScalar +{ debug_assert_eq!(xs.len(), ys.len()); // eightfold unrolled so that floating point can be vectorized // (even with strict floating point accuracy semantics) @@ -95,7 +96,8 @@ where A: LinalgScalar { /// /// `xs` and `ys` must be the same length pub fn unrolled_eq(xs: &[A], ys: &[B]) -> bool -where A: PartialEq { +where A: PartialEq +{ debug_assert_eq!(xs.len(), ys.len()); // eightfold unrolled for performance (this is not done by llvm automatically) let len = cmp::min(xs.len(), ys.len()); diff --git a/src/opencl.rs b/src/opencl.rs index f9b127457..6e5206dcc 100644 --- a/src/opencl.rs +++ b/src/opencl.rs @@ -1,4 +1,5 @@ -pub(crate) fn rust_type_to_c_name() -> Option<&'static str> { +pub(crate) fn rust_type_to_c_name() -> Option<&'static str> +{ match std::any::type_name::() { "f32" => Some("float"), "f64" => Some("double"), @@ -14,7 +15,8 @@ pub(crate) fn rust_type_to_c_name() -> Option<&'static str> { } } -pub(crate) fn gen_contiguous_linear_kernel_3(kernel_name: &str, typename: &str, op: &str) -> String { +pub(crate) fn gen_contiguous_linear_kernel_3(kernel_name: &str, typename: &str, op: &str) -> String +{ format!( r#" #ifndef NDARRAY_INCLUDE_STDINT diff --git a/src/order.rs b/src/order.rs index 4ab8c84e8..a52a32e2c 100644 --- a/src/order.rs +++ b/src/order.rs @@ -30,14 +30,16 @@ /// or "Fortran" order. #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[non_exhaustive] -pub enum Order { +pub enum Order +{ /// Row major or "C" order RowMajor, /// Column major or "F" order ColumnMajor, } -impl Order { +impl Order +{ /// "C" is an alias for row major ordering pub const C: Order = Order::RowMajor; @@ -46,7 +48,8 @@ impl Order { /// Return true if input is Order::RowMajor, false otherwise #[inline] - pub fn is_row_major(self) -> bool { + pub fn is_row_major(self) -> bool + { match self { Order::RowMajor => true, Order::ColumnMajor => false, @@ -55,13 +58,15 @@ impl Order { /// Return true if input is Order::ColumnMajor, false otherwise #[inline] - pub fn is_column_major(self) -> bool { + pub fn is_column_major(self) -> bool + { !self.is_row_major() } /// Return Order::RowMajor if the input is true, Order::ColumnMajor otherwise #[inline] - pub fn row_major(row_major: bool) -> Order { + pub fn row_major(row_major: bool) -> Order + { if row_major { Order::RowMajor } else { @@ -71,13 +76,15 @@ impl Order { /// Return Order::ColumnMajor if the input is true, Order::RowMajor otherwise #[inline] - pub fn column_major(column_major: bool) -> Order { + pub fn column_major(column_major: bool) -> Order + { Self::row_major(!column_major) } /// Return the transpose: row major becomes column major and vice versa. #[inline] - pub fn transpose(self) -> Order { + pub fn transpose(self) -> Order + { match self { Order::RowMajor => Order::ColumnMajor, Order::ColumnMajor => Order::RowMajor, diff --git a/src/parallel/impl_par_methods.rs b/src/parallel/impl_par_methods.rs index 7bb513f21..b3fbdedc8 100644 --- a/src/parallel/impl_par_methods.rs +++ b/src/parallel/impl_par_methods.rs @@ -22,7 +22,8 @@ where /// /// Elements are visited in arbitrary order. pub fn par_map_inplace(&mut self, f: F) - where F: Fn(&mut A) + Sync + Send { + where F: Fn(&mut A) + Sync + Send + { self.view_mut().into_par_iter().for_each(f) } diff --git a/src/parallel/into_impls.rs b/src/parallel/into_impls.rs index c1a5388fd..75bded7de 100644 --- a/src/parallel/into_impls.rs +++ b/src/parallel/into_impls.rs @@ -11,7 +11,8 @@ where { type Item = &'a A; type Iter = Parallel>; - fn into_par_iter(self) -> Self::Iter { + fn into_par_iter(self) -> Self::Iter + { self.view().into_par_iter() } } @@ -25,7 +26,8 @@ where { type Item = &'a A; type Iter = Parallel>; - fn into_par_iter(self) -> Self::Iter { + fn into_par_iter(self) -> Self::Iter + { self.view().into_par_iter() } } @@ -38,7 +40,8 @@ where { type Item = &'a mut A; type Iter = Parallel>; - fn into_par_iter(self) -> Self::Iter { + fn into_par_iter(self) -> Self::Iter + { self.view_mut().into_par_iter() } } @@ -52,7 +55,8 @@ where { type Item = &'a mut A; type Iter = Parallel>; - fn into_par_iter(self) -> Self::Iter { + fn into_par_iter(self) -> Self::Iter + { self.view_mut().into_par_iter() } } diff --git a/src/parallel/mod.rs b/src/parallel/mod.rs index e5a41ac02..0c84baa91 100644 --- a/src/parallel/mod.rs +++ b/src/parallel/mod.rs @@ -123,7 +123,8 @@ use crate::iter::{AxisChunksIter, AxisChunksIterMut, AxisIter, AxisIterMut}; use crate::{ArcArray, Array, ArrayBase, ArrayView, ArrayViewMut, Zip}; /// Into- traits for creating parallelized iterators and/or using [`par_azip!`] -pub mod prelude { +pub mod prelude +{ #[doc(no_inline)] pub use rayon::prelude::{ IndexedParallelIterator, diff --git a/src/parallel/par.rs b/src/parallel/par.rs index 5caef372a..b59af4c8e 100644 --- a/src/parallel/par.rs +++ b/src/parallel/par.rs @@ -19,7 +19,8 @@ use crate::{ArrayView, ArrayViewMut}; /// Parallel iterator wrapper. #[derive(Copy, Clone, Debug)] -pub struct Parallel { +pub struct Parallel +{ iter: I, min_len: usize, } @@ -316,7 +317,8 @@ where D: Dimension /// to begin with. /// /// ***Panics*** if `min_len` is zero. - pub fn with_min_len(self, min_len: usize) -> Self { + pub fn with_min_len(self, min_len: usize) -> Self + { assert_ne!(min_len, 0, "Minimum number of elements must at least be one to avoid splitting off empty tasks."); Self { min_len, ..self } @@ -325,7 +327,8 @@ where D: Dimension /// A parallel iterator (unindexed) that produces the splits of the array /// or producer `P`. -pub(crate) struct ParallelSplits

{ +pub(crate) struct ParallelSplits

+{ pub(crate) iter: P, pub(crate) max_splits: usize, } @@ -336,11 +339,13 @@ where P: SplitPreference + Send type Item = P; fn drive_unindexed(self, consumer: C) -> C::Result - where C: UnindexedConsumer { + where C: UnindexedConsumer + { bridge_unindexed(self, consumer) } - fn opt_len(&self) -> Option { + fn opt_len(&self) -> Option + { None } } @@ -350,7 +355,8 @@ where P: SplitPreference + Send { type Item = P; - fn split(self) -> (Self, Option) { + fn split(self) -> (Self, Option) + { if self.max_splits == 0 || !self.iter.can_split() { return (self, None); } @@ -368,7 +374,8 @@ where P: SplitPreference + Send } fn fold_with(self, folder: Fold) -> Fold - where Fold: Folder { + where Fold: Folder + { folder.consume(self.iter) } } diff --git a/src/parallel/send_producer.rs b/src/parallel/send_producer.rs index 23d6cd475..ecfb77af0 100644 --- a/src/parallel/send_producer.rs +++ b/src/parallel/send_producer.rs @@ -4,28 +4,35 @@ use std::ops::{Deref, DerefMut}; /// An NdProducer that is unconditionally `Send`. #[repr(transparent)] -pub(crate) struct SendProducer { +pub(crate) struct SendProducer +{ inner: T, } -impl SendProducer { +impl SendProducer +{ /// Create an unconditionally `Send` ndproducer from the producer - pub(crate) unsafe fn new(producer: T) -> Self { + pub(crate) unsafe fn new(producer: T) -> Self + { Self { inner: producer } } } unsafe impl

Send for SendProducer

{} -impl

Deref for SendProducer

{ +impl

Deref for SendProducer

+{ type Target = P; - fn deref(&self) -> &P { + fn deref(&self) -> &P + { &self.inner } } -impl

DerefMut for SendProducer

{ - fn deref_mut(&mut self) -> &mut P { +impl

DerefMut for SendProducer

+{ + fn deref_mut(&mut self) -> &mut P + { &mut self.inner } } @@ -41,46 +48,55 @@ where P: NdProducer private_impl! {} #[inline(always)] - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { self.inner.raw_dim() } #[inline(always)] - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.inner.equal_dim(dim) } #[inline(always)] - fn as_ptr(&self) -> Self::Ptr { + fn as_ptr(&self) -> Self::Ptr + { self.inner.as_ptr() } #[inline(always)] - fn layout(&self) -> Layout { + fn layout(&self) -> Layout + { self.inner.layout() } #[inline(always)] - unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item { + unsafe fn as_ref(&self, ptr: Self::Ptr) -> Self::Item + { self.inner.as_ref(ptr) } #[inline(always)] - unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> Self::Ptr + { self.inner.uget_ptr(i) } #[inline(always)] - fn stride_of(&self, axis: Axis) -> Self::Stride { + fn stride_of(&self, axis: Axis) -> Self::Stride + { self.inner.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { + fn contiguous_stride(&self) -> Self::Stride + { self.inner.contiguous_stride() } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { let (a, b) = self.inner.split_at(axis, index); (Self { inner: a }, Self { inner: b }) } diff --git a/src/partial.rs b/src/partial.rs index 6054b6ba8..99aba75a8 100644 --- a/src/partial.rs +++ b/src/partial.rs @@ -12,14 +12,16 @@ use std::ptr; /// it is the owner of the elements, but not the allocation, /// and will drop the elements on drop. #[must_use] -pub(crate) struct Partial { +pub(crate) struct Partial +{ /// Data pointer ptr: *mut T, /// Current length pub(crate) len: usize, } -impl Partial { +impl Partial +{ /// Create an empty partial for this data pointer /// /// ## Safety @@ -29,12 +31,14 @@ impl Partial { /// the `len` elements following it valid. /// /// The Partial has an accessible length field which must only be modified in trusted code. - pub(crate) unsafe fn new(ptr: *mut T) -> Self { + pub(crate) unsafe fn new(ptr: *mut T) -> Self + { Self { ptr, len: 0 } } #[cfg(feature = "rayon")] - pub(crate) fn stub() -> Self { + pub(crate) fn stub() -> Self + { Self { len: 0, ptr: ptr::null_mut(), @@ -42,12 +46,14 @@ impl Partial { } #[cfg(feature = "rayon")] - pub(crate) fn is_stub(&self) -> bool { + pub(crate) fn is_stub(&self) -> bool + { self.ptr.is_null() } /// Release Partial's ownership of the written elements, and return the current length - pub(crate) fn release_ownership(mut self) -> usize { + pub(crate) fn release_ownership(mut self) -> usize + { let ret = self.len; self.len = 0; ret @@ -56,7 +62,8 @@ impl Partial { #[cfg(feature = "rayon")] /// Merge if they are in order (left to right) and contiguous. /// Skips merge if T does not need drop. - pub(crate) fn try_merge(mut left: Self, right: Self) -> Self { + pub(crate) fn try_merge(mut left: Self, right: Self) -> Self + { if !std::mem::needs_drop::() { return left; } @@ -77,8 +84,10 @@ impl Partial { unsafe impl Send for Partial where T: Send {} -impl Drop for Partial { - fn drop(&mut self) { +impl Drop for Partial +{ + fn drop(&mut self) + { if !self.ptr.is_null() { unsafe { ptr::drop_in_place(alloc::slice::from_raw_parts_mut(self.ptr, self.len)); diff --git a/src/shape_builder.rs b/src/shape_builder.rs index f373e0e4f..8b25f71e7 100644 --- a/src/shape_builder.rs +++ b/src/shape_builder.rs @@ -6,7 +6,8 @@ use crate::Dimension; /// /// Either c- or f- memory ordered (*c* a.k.a *row major* is the default). #[derive(Copy, Clone, Debug)] -pub struct Shape { +pub struct Shape +{ /// Shape (axis lengths) pub(crate) dim: D, /// Strides can only be C or F here @@ -16,15 +17,18 @@ pub struct Shape { #[derive(Copy, Clone, Debug)] pub(crate) enum Contiguous {} -impl Shape { - pub(crate) fn is_c(&self) -> bool { +impl Shape +{ + pub(crate) fn is_c(&self) -> bool + { matches!(self.strides, Strides::C) } } /// An array shape of n dimensions in c-order, f-order or custom strides. #[derive(Copy, Clone, Debug)] -pub struct StrideShape { +pub struct StrideShape +{ pub(crate) dim: D, pub(crate) strides: Strides, } @@ -33,18 +37,21 @@ impl StrideShape where D: Dimension { /// Return a reference to the dimension - pub fn raw_dim(&self) -> &D { + pub fn raw_dim(&self) -> &D + { &self.dim } /// Return the size of the shape in number of elements - pub fn size(&self) -> usize { + pub fn size(&self) -> usize + { self.dim.size() } } /// Stride description #[derive(Copy, Clone, Debug)] -pub(crate) enum Strides { +pub(crate) enum Strides +{ /// Row-major ("C"-order) C, /// Column-major ("F"-order) @@ -53,10 +60,12 @@ pub(crate) enum Strides { Custom(D), } -impl Strides { +impl Strides +{ /// Return strides for `dim` (computed from dimension if c/f, else return the custom stride) pub(crate) fn strides_for_dim(self, dim: &D) -> D - where D: Dimension { + where D: Dimension + { match self { Strides::C => dim.default_strides(), Strides::F => dim.fortran_strides(), @@ -73,7 +82,8 @@ impl Strides { } } - pub(crate) fn is_custom(&self) -> bool { + pub(crate) fn is_custom(&self) -> bool + { matches!(*self, Strides::Custom(_)) } } @@ -83,7 +93,8 @@ impl Strides { /// /// This trait is used together with array constructor methods like /// `Array::from_shape_vec`. -pub trait ShapeBuilder { +pub trait ShapeBuilder +{ type Dim: Dimension; type Strides; @@ -97,7 +108,8 @@ impl From for Shape where D: Dimension { /// Create a `Shape` from `dimension`, using the default memory layout. - fn from(dimension: D) -> Shape { + fn from(dimension: D) -> Shape + { dimension.into_shape_with_order() } } @@ -107,7 +119,8 @@ where D: Dimension, T: ShapeBuilder, { - fn from(value: T) -> Self { + fn from(value: T) -> Self + { let shape = value.into_shape_with_order(); let st = if shape.is_c() { Strides::C } else { Strides::F }; StrideShape { @@ -122,19 +135,23 @@ where T: IntoDimension { type Dim = T::Dim; type Strides = T; - fn into_shape_with_order(self) -> Shape { + fn into_shape_with_order(self) -> Shape + { Shape { dim: self.into_dimension(), strides: Strides::C, } } - fn f(self) -> Shape { + fn f(self) -> Shape + { self.set_f(true) } - fn set_f(self, is_f: bool) -> Shape { + fn set_f(self, is_f: bool) -> Shape + { self.into_shape_with_order().set_f(is_f) } - fn strides(self, st: T) -> StrideShape { + fn strides(self, st: T) -> StrideShape + { self.into_shape_with_order().strides(st.into_dimension()) } } @@ -145,20 +162,24 @@ where D: Dimension type Dim = D; type Strides = D; - fn into_shape_with_order(self) -> Shape { + fn into_shape_with_order(self) -> Shape + { self } - fn f(self) -> Self { + fn f(self) -> Self + { self.set_f(true) } - fn set_f(mut self, is_f: bool) -> Self { + fn set_f(mut self, is_f: bool) -> Self + { self.strides = if !is_f { Strides::C } else { Strides::F }; self } - fn strides(self, st: D) -> StrideShape { + fn strides(self, st: D) -> StrideShape + { StrideShape { dim: self.dim, strides: Strides::Custom(st), @@ -170,11 +191,13 @@ impl Shape where D: Dimension { /// Return a reference to the dimension - pub fn raw_dim(&self) -> &D { + pub fn raw_dim(&self) -> &D + { &self.dim } /// Return the size of the shape in number of elements - pub fn size(&self) -> usize { + pub fn size(&self) -> usize + { self.dim.size() } } @@ -187,7 +210,8 @@ where D: Dimension /// (optionally) an ordering argument. /// /// See for example [`.to_shape()`](crate::ArrayBase::to_shape). -pub trait ShapeArg { +pub trait ShapeArg +{ type Dim: Dimension; fn into_shape_and_order(self) -> (Self::Dim, Option); } @@ -197,7 +221,8 @@ where T: IntoDimension { type Dim = T::Dim; - fn into_shape_and_order(self) -> (Self::Dim, Option) { + fn into_shape_and_order(self) -> (Self::Dim, Option) + { (self.into_dimension(), None) } } @@ -207,7 +232,8 @@ where T: IntoDimension { type Dim = T::Dim; - fn into_shape_and_order(self) -> (Self::Dim, Option) { + fn into_shape_and_order(self) -> (Self::Dim, Option) + { (self.0.into_dimension(), Some(self.1)) } } diff --git a/src/slice.rs b/src/slice.rs index 8d6fc03d8..9e6acc449 100644 --- a/src/slice.rs +++ b/src/slice.rs @@ -36,7 +36,8 @@ use std::ops::{Deref, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, Rang /// reverse order. It can also be created with `Slice::from(a..).step_by(-1)`. /// The Python equivalent is `[a::-1]`. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub struct Slice { +pub struct Slice +{ /// start index; negative are counted from the back of the axis pub start: isize, /// end index; negative are counted from the back of the axis; when not present @@ -46,7 +47,8 @@ pub struct Slice { pub step: isize, } -impl Slice { +impl Slice +{ /// Create a new `Slice` with the given extents. /// /// See also the `From` impls, converting from ranges; for example @@ -54,7 +56,8 @@ impl Slice { /// /// `step` must be nonzero. /// (This method checks with a debug assertion that `step` is not zero.) - pub fn new(start: isize, end: Option, step: isize) -> Slice { + pub fn new(start: isize, end: Option, step: isize) -> Slice + { debug_assert_ne!(step, 0, "Slice::new: step must be nonzero"); Slice { start, end, step } } @@ -65,7 +68,8 @@ impl Slice { /// `step` must be nonzero. /// (This method checks with a debug assertion that `step` is not zero.) #[inline] - pub fn step_by(self, step: isize) -> Self { + pub fn step_by(self, step: isize) -> Self + { debug_assert_ne!(step, 0, "Slice::step_by: step must be nonzero"); Slice { step: self.step * step, @@ -109,11 +113,13 @@ pub struct NewAxis; /// with `SliceInfoElem::from(NewAxis)`. The Python equivalent is /// `[np.newaxis]`. The macro equivalent is `s![NewAxis]`. #[derive(Debug, PartialEq, Eq, Hash)] -pub enum SliceInfoElem { +pub enum SliceInfoElem +{ /// A range with step size. `end` is an exclusive index. Negative `start` /// or `end` indexes are counted from the back of the axis. If `end` is /// `None`, the slice extends to the end of the axis. - Slice { + Slice + { /// start index; negative are counted from the back of the axis start: isize, /// end index; negative are counted from the back of the axis; when not present @@ -130,25 +136,31 @@ pub enum SliceInfoElem { copy_and_clone! {SliceInfoElem} -impl SliceInfoElem { +impl SliceInfoElem +{ /// Returns `true` if `self` is a `Slice` value. - pub fn is_slice(&self) -> bool { + pub fn is_slice(&self) -> bool + { matches!(self, SliceInfoElem::Slice { .. }) } /// Returns `true` if `self` is an `Index` value. - pub fn is_index(&self) -> bool { + pub fn is_index(&self) -> bool + { matches!(self, SliceInfoElem::Index(_)) } /// Returns `true` if `self` is a `NewAxis` value. - pub fn is_new_axis(&self) -> bool { + pub fn is_new_axis(&self) -> bool + { matches!(self, SliceInfoElem::NewAxis) } } -impl fmt::Display for SliceInfoElem { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { +impl fmt::Display for SliceInfoElem +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result + { match *self { SliceInfoElem::Index(index) => write!(f, "{}", index)?, SliceInfoElem::Slice { start, end, step } => { @@ -236,9 +248,11 @@ impl_slice_variant_from_range!(SliceInfoElem, SliceInfoElem::Slice, isize); impl_slice_variant_from_range!(SliceInfoElem, SliceInfoElem::Slice, usize); impl_slice_variant_from_range!(SliceInfoElem, SliceInfoElem::Slice, i32); -impl From for Slice { +impl From for Slice +{ #[inline] - fn from(_: RangeFull) -> Slice { + fn from(_: RangeFull) -> Slice + { Slice { start: 0, end: None, @@ -247,9 +261,11 @@ impl From for Slice { } } -impl From for SliceInfoElem { +impl From for SliceInfoElem +{ #[inline] - fn from(_: RangeFull) -> SliceInfoElem { + fn from(_: RangeFull) -> SliceInfoElem + { SliceInfoElem::Slice { start: 0, end: None, @@ -258,9 +274,11 @@ impl From for SliceInfoElem { } } -impl From for SliceInfoElem { +impl From for SliceInfoElem +{ #[inline] - fn from(s: Slice) -> SliceInfoElem { + fn from(s: Slice) -> SliceInfoElem + { SliceInfoElem::Slice { start: s.start, end: s.end, @@ -283,9 +301,11 @@ impl_sliceinfoelem_from_index!(isize); impl_sliceinfoelem_from_index!(usize); impl_sliceinfoelem_from_index!(i32); -impl From for SliceInfoElem { +impl From for SliceInfoElem +{ #[inline] - fn from(_: NewAxis) -> SliceInfoElem { + fn from(_: NewAxis) -> SliceInfoElem + { SliceInfoElem::NewAxis } } @@ -297,7 +317,8 @@ impl From for SliceInfoElem { /// consistent with the `&[SliceInfoElem]` returned by `self.as_ref()` and that /// `self.as_ref()` always returns the same value when called multiple times. #[allow(clippy::missing_safety_doc)] // not implementable downstream -pub unsafe trait SliceArg: AsRef<[SliceInfoElem]> { +pub unsafe trait SliceArg: AsRef<[SliceInfoElem]> +{ /// Dimensionality of the output array. type OutDim: Dimension; @@ -317,11 +338,13 @@ where { type OutDim = T::OutDim; - fn in_ndim(&self) -> usize { + fn in_ndim(&self) -> usize + { T::in_ndim(self) } - fn out_ndim(&self) -> usize { + fn out_ndim(&self) -> usize + { T::out_ndim(self) } @@ -365,25 +388,30 @@ where { type OutDim = Dout; - fn in_ndim(&self) -> usize { + fn in_ndim(&self) -> usize + { self.in_ndim() } - fn out_ndim(&self) -> usize { + fn out_ndim(&self) -> usize + { self.out_ndim() } private_impl! {} } -unsafe impl SliceArg for [SliceInfoElem] { +unsafe impl SliceArg for [SliceInfoElem] +{ type OutDim = IxDyn; - fn in_ndim(&self) -> usize { + fn in_ndim(&self) -> usize + { self.iter().filter(|s| !s.is_new_axis()).count() } - fn out_ndim(&self) -> usize { + fn out_ndim(&self) -> usize + { self.iter().filter(|s| !s.is_index()).count() } @@ -401,7 +429,8 @@ unsafe impl SliceArg for [SliceInfoElem] { /// /// [`.slice()`]: crate::ArrayBase::slice #[derive(Debug)] -pub struct SliceInfo { +pub struct SliceInfo +{ in_dim: PhantomData, out_dim: PhantomData, indices: T, @@ -413,7 +442,8 @@ where Dout: Dimension, { type Target = T; - fn deref(&self) -> &Self::Target { + fn deref(&self) -> &Self::Target + { &self.indices } } @@ -454,7 +484,8 @@ where #[doc(hidden)] pub unsafe fn new_unchecked( indices: T, in_dim: PhantomData, out_dim: PhantomData, - ) -> SliceInfo { + ) -> SliceInfo + { if cfg!(debug_assertions) { check_dims_for_sliceinfo::(indices.as_ref()) .expect("`Din` and `Dout` must be consistent with `indices`."); @@ -476,7 +507,8 @@ where /// /// The caller must ensure `indices.as_ref()` always returns the same value /// when called multiple times. - pub unsafe fn new(indices: T) -> Result, ShapeError> { + pub unsafe fn new(indices: T) -> Result, ShapeError> + { check_dims_for_sliceinfo::(indices.as_ref())?; Ok(SliceInfo { in_dim: PhantomData, @@ -491,7 +523,8 @@ where /// If `Din` is a fixed-size dimension type, then this is equivalent to /// `Din::NDIM.unwrap()`. Otherwise, the value is calculated by iterating /// over the `SliceInfoElem` elements. - pub fn in_ndim(&self) -> usize { + pub fn in_ndim(&self) -> usize + { if let Some(ndim) = Din::NDIM { ndim } else { @@ -506,7 +539,8 @@ where /// If `Dout` is a fixed-size dimension type, then this is equivalent to /// `Dout::NDIM.unwrap()`. Otherwise, the value is calculated by iterating /// over the `SliceInfoElem` elements. - pub fn out_ndim(&self) -> usize { + pub fn out_ndim(&self) -> usize + { if let Some(ndim) = Dout::NDIM { ndim } else { @@ -522,7 +556,8 @@ where { type Error = ShapeError; - fn try_from(indices: &'a [SliceInfoElem]) -> Result, ShapeError> { + fn try_from(indices: &'a [SliceInfoElem]) -> Result, ShapeError> + { unsafe { // This is okay because `&[SliceInfoElem]` always returns the same // value for `.as_ref()`. @@ -538,7 +573,8 @@ where { type Error = ShapeError; - fn try_from(indices: Vec) -> Result, Din, Dout>, ShapeError> { + fn try_from(indices: Vec) -> Result, Din, Dout>, ShapeError> + { unsafe { // This is okay because `Vec` always returns the same value for // `.as_ref()`. @@ -585,7 +621,8 @@ where Din: Dimension, Dout: Dimension, { - fn as_ref(&self) -> &[SliceInfoElem] { + fn as_ref(&self) -> &[SliceInfoElem] + { self.indices.as_ref() } } @@ -596,7 +633,8 @@ where Din: Dimension, Dout: Dimension, { - fn from(info: &'a SliceInfo) -> SliceInfo<&'a [SliceInfoElem], Din, Dout> { + fn from(info: &'a SliceInfo) -> SliceInfo<&'a [SliceInfoElem], Din, Dout> + { SliceInfo { in_dim: info.in_dim, out_dim: info.out_dim, @@ -619,7 +657,8 @@ where Din: Dimension, Dout: Dimension, { - fn clone(&self) -> Self { + fn clone(&self) -> Self + { SliceInfo { in_dim: PhantomData, out_dim: PhantomData, @@ -630,19 +669,22 @@ where /// Trait for determining dimensionality of input and output for [`s!`] macro. #[doc(hidden)] -pub trait SliceNextDim { +pub trait SliceNextDim +{ /// Number of dimensions that this slicing argument consumes in the input array. type InDim: Dimension; /// Number of dimensions that this slicing argument produces in the output array. type OutDim: Dimension; fn next_in_dim(&self, _: PhantomData) -> PhantomData<>::Output> - where D: Dimension + DimAdd { + where D: Dimension + DimAdd + { PhantomData } fn next_out_dim(&self, _: PhantomData) -> PhantomData<>::Output> - where D: Dimension + DimAdd { + where D: Dimension + DimAdd + { PhantomData } } @@ -904,7 +946,8 @@ where { type Output = (ArrayViewMut<'a, A, I0::OutDim>,); - fn multi_slice_move(&self, view: ArrayViewMut<'a, A, D>) -> Self::Output { + fn multi_slice_move(&self, view: ArrayViewMut<'a, A, D>) -> Self::Output + { (view.slice_move(&self.0),) } @@ -966,7 +1009,8 @@ where { type Output = T::Output; - fn multi_slice_move(&self, view: ArrayViewMut<'a, A, D>) -> Self::Output { + fn multi_slice_move(&self, view: ArrayViewMut<'a, A, D>) -> Self::Output + { T::multi_slice_move(self, view) } diff --git a/src/split_at.rs b/src/split_at.rs index 67c46d06f..4af1403c0 100644 --- a/src/split_at.rs +++ b/src/split_at.rs @@ -1,17 +1,20 @@ use crate::imp_prelude::*; /// Arrays and similar that can be split along an axis -pub(crate) trait SplitAt { +pub(crate) trait SplitAt +{ fn split_at(self, axis: Axis, index: usize) -> (Self, Self) where Self: Sized; } -pub(crate) trait SplitPreference: SplitAt { +pub(crate) trait SplitPreference: SplitAt +{ #[allow(dead_code)] // used only when Rayon support is enabled fn can_split(&self) -> bool; fn split_preference(&self) -> (Axis, usize); fn split(self) -> (Self, Self) - where Self: Sized { + where Self: Sized + { let (axis, index) = self.split_preference(); self.split_at(axis, index) } @@ -20,7 +23,8 @@ pub(crate) trait SplitPreference: SplitAt { impl SplitAt for D where D: Dimension { - fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) { + fn split_at(self, axis: Axis, index: Ix) -> (Self, Self) + { let mut d1 = self; let mut d2 = d1.clone(); let i = axis.index(); @@ -34,7 +38,8 @@ where D: Dimension impl<'a, A, D> SplitAt for ArrayViewMut<'a, A, D> where D: Dimension { - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { self.split_at(axis, index) } } @@ -42,7 +47,8 @@ where D: Dimension impl SplitAt for RawArrayViewMut where D: Dimension { - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { self.split_at(axis, index) } } diff --git a/src/zip/mod.rs b/src/zip/mod.rs index 82edb3d61..aadc93032 100644 --- a/src/zip/mod.rs +++ b/src/zip/mod.rs @@ -51,7 +51,8 @@ where E: IntoDimension } /// Compute `Layout` hints for array shape dim, strides -fn array_layout(dim: &D, strides: &D) -> Layout { +fn array_layout(dim: &D, strides: &D) -> Layout +{ let n = dim.ndim(); if dimension::is_layout_c(dim, strides) { // effectively one-dimensional => C and F layout compatible @@ -80,7 +81,8 @@ where S: RawData, D: Dimension, { - pub(crate) fn layout_impl(&self) -> Layout { + pub(crate) fn layout_impl(&self) -> Layout + { array_layout(&self.dim, &self.strides) } } @@ -91,7 +93,8 @@ where D: Dimension, { type Output = ArrayView<'a, A, E::Dim>; - fn broadcast_unwrap(self, shape: E) -> Self::Output { + fn broadcast_unwrap(self, shape: E) -> Self::Output + { #[allow(clippy::needless_borrow)] let res: ArrayView<'_, A, E::Dim> = (&self).broadcast_unwrap(shape.into_dimension()); unsafe { ArrayView::new(res.ptr, res.dim, res.strides) } @@ -99,7 +102,8 @@ where private_impl! {} } -trait ZippableTuple: Sized { +trait ZippableTuple: Sized +{ type Item; type Ptr: OffsetTuple + Copy; type Dim: Dimension; @@ -188,7 +192,8 @@ trait ZippableTuple: Sized { /// ``` #[derive(Debug, Clone)] #[must_use = "zipping producers is lazy and does nothing unless consumed"] -pub struct Zip { +pub struct Zip +{ parts: Parts, dimension: D, layout: Layout, @@ -207,7 +212,8 @@ where /// The Zip will take the exact dimension of `p` and all inputs /// must have the same dimensions (or be broadcast to them). pub fn from(p: IP) -> Self - where IP: IntoNdProducer { + where IP: IntoNdProducer + { let array = p.into_producer(); let dim = array.raw_dim(); let layout = array.layout(); @@ -231,7 +237,8 @@ where /// /// *Note:* Indexed zip has overhead. pub fn indexed(p: IP) -> Self - where IP: IntoNdProducer { + where IP: IntoNdProducer + { let array = p.into_producer(); let dim = array.raw_dim(); Zip::from(indices(dim)).and(array) @@ -256,7 +263,8 @@ impl Zip where D: Dimension { /// Return a the number of element tuples in the Zip - pub fn size(&self) -> usize { + pub fn size(&self) -> usize + { self.dimension.size() } @@ -264,18 +272,21 @@ where D: Dimension /// /// ***Panics*** if `axis` is out of bounds. #[track_caller] - fn len_of(&self, axis: Axis) -> usize { + fn len_of(&self, axis: Axis) -> usize + { self.dimension[axis.index()] } - fn prefer_f(&self) -> bool { + fn prefer_f(&self) -> bool + { !self.layout.is(Layout::CORDER) && (self.layout.is(Layout::FORDER) || self.layout_tendency < 0) } /// Return an *approximation* to the max stride axis; if /// component arrays disagree, there may be no choice better than the /// others. - fn max_stride_axis(&self) -> Axis { + fn max_stride_axis(&self) -> Axis + { let i = if self.prefer_f() { self.dimension .slice() @@ -415,7 +426,8 @@ where D: Dimension } #[cfg(feature = "rayon")] - pub(crate) fn uninitialized_for_current_layout(&self) -> Array, D> { + pub(crate) fn uninitialized_for_current_layout(&self) -> Array, D> + { let is_f = self.prefer_f(); Array::uninit(self.dimension.clone().set_f(is_f)) } @@ -430,7 +442,8 @@ where /// Debug assert traversal order is like c (including 1D case) // Method placement: only used for binary Zip at the moment. #[inline] - pub(crate) fn debug_assert_c_order(self) -> Self { + pub(crate) fn debug_assert_c_order(self) -> Self + { debug_assert!(self.layout.is(Layout::CORDER) || self.layout_tendency >= 0 || self.dimension.slice().iter().filter(|&&d| d > 1).count() <= 1, "Assertion failed: traversal is not c-order or 1D for \ @@ -455,14 +468,17 @@ impl Offset for *mut T { } */ -trait OffsetTuple { +trait OffsetTuple +{ type Args; unsafe fn stride_offset(self, stride: Self::Args, index: usize) -> Self; } -impl OffsetTuple for *mut T { +impl OffsetTuple for *mut T +{ type Args = isize; - unsafe fn stride_offset(self, stride: Self::Args, index: usize) -> Self { + unsafe fn stride_offset(self, stride: Self::Args, index: usize) -> Self + { self.offset(index as isize * stride) } } @@ -936,23 +952,27 @@ map_impl! { /// Value controlling the execution of `.fold_while` on `Zip`. #[derive(Debug, Copy, Clone)] -pub enum FoldWhile { +pub enum FoldWhile +{ /// Continue folding with this value Continue(T), /// Fold is complete and will return this value Done(T), } -impl FoldWhile { +impl FoldWhile +{ /// Return the inner value - pub fn into_inner(self) -> T { + pub fn into_inner(self) -> T + { match self { FoldWhile::Continue(x) | FoldWhile::Done(x) => x, } } /// Return true if it is `Done`, false if `Continue` - pub fn is_done(&self) -> bool { + pub fn is_done(&self) -> bool + { match *self { FoldWhile::Continue(_) => false, FoldWhile::Done(_) => true, diff --git a/src/zip/ndproducer.rs b/src/zip/ndproducer.rs index b69626bcf..1d1b3391b 100644 --- a/src/zip/ndproducer.rs +++ b/src/zip/ndproducer.rs @@ -9,7 +9,8 @@ use alloc::vec::Vec; /// Slices and vectors can be used (equivalent to 1-dimensional array views). /// /// This trait is like `IntoIterator` for `NdProducers` instead of iterators. -pub trait IntoNdProducer { +pub trait IntoNdProducer +{ /// The element produced per iteration. type Item; /// Dimension type of the producer @@ -25,7 +26,8 @@ where P: NdProducer type Item = P::Item; type Dim = P::Dim; type Output = Self; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { self } } @@ -50,7 +52,8 @@ where P: NdProducer /// *producing* multidimensional items). /// /// See also [`IntoNdProducer`] -pub trait NdProducer { +pub trait NdProducer +{ /// The element produced per iteration. type Item; // Internal use / Pointee type @@ -73,7 +76,8 @@ pub trait NdProducer { /// Return the shape of the producer. fn raw_dim(&self) -> Self::Dim; #[doc(hidden)] - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.raw_dim() == *dim } #[doc(hidden)] @@ -93,23 +97,28 @@ pub trait NdProducer { private_decl! {} } -pub trait Offset: Copy { +pub trait Offset: Copy +{ type Stride: Copy; unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self; private_decl! {} } -impl Offset for *const T { +impl Offset for *const T +{ type Stride = isize; - unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self { + unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self + { self.offset(s * (index as isize)) } private_impl! {} } -impl Offset for *mut T { +impl Offset for *mut T +{ type Stride = isize; - unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self { + unsafe fn stride_offset(self, s: Self::Stride, index: usize) -> Self + { self.offset(s * (index as isize)) } private_impl! {} @@ -125,7 +134,8 @@ where type Item = &'a A; type Dim = D; type Output = ArrayView<'a, A, D>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { self.view() } } @@ -140,72 +150,86 @@ where type Item = &'a mut A; type Dim = D; type Output = ArrayViewMut<'a, A, D>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { self.view_mut() } } /// A slice is a one-dimensional producer -impl<'a, A: 'a> IntoNdProducer for &'a [A] { +impl<'a, A: 'a> IntoNdProducer for &'a [A] +{ type Item = ::Item; type Dim = Ix1; type Output = ArrayView1<'a, A>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { <_>::from(self) } } /// A mutable slice is a mutable one-dimensional producer -impl<'a, A: 'a> IntoNdProducer for &'a mut [A] { +impl<'a, A: 'a> IntoNdProducer for &'a mut [A] +{ type Item = ::Item; type Dim = Ix1; type Output = ArrayViewMut1<'a, A>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { <_>::from(self) } } /// A one-dimensional array is a one-dimensional producer -impl<'a, A: 'a, const N: usize> IntoNdProducer for &'a [A; N] { +impl<'a, A: 'a, const N: usize> IntoNdProducer for &'a [A; N] +{ type Item = ::Item; type Dim = Ix1; type Output = ArrayView1<'a, A>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { <_>::from(self) } } /// A mutable one-dimensional array is a mutable one-dimensional producer -impl<'a, A: 'a, const N: usize> IntoNdProducer for &'a mut [A; N] { +impl<'a, A: 'a, const N: usize> IntoNdProducer for &'a mut [A; N] +{ type Item = ::Item; type Dim = Ix1; type Output = ArrayViewMut1<'a, A>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { <_>::from(self) } } /// A Vec is a one-dimensional producer -impl<'a, A: 'a> IntoNdProducer for &'a Vec { +impl<'a, A: 'a> IntoNdProducer for &'a Vec +{ type Item = ::Item; type Dim = Ix1; type Output = ArrayView1<'a, A>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { <_>::from(self) } } /// A mutable Vec is a mutable one-dimensional producer -impl<'a, A: 'a> IntoNdProducer for &'a mut Vec { +impl<'a, A: 'a> IntoNdProducer for &'a mut Vec +{ type Item = ::Item; type Dim = Ix1; type Output = ArrayViewMut1<'a, A>; - fn into_producer(self) -> Self::Output { + fn into_producer(self) -> Self::Output + { <_>::from(self) } } -impl<'a, A, D: Dimension> NdProducer for ArrayView<'a, A, D> { +impl<'a, A, D: Dimension> NdProducer for ArrayView<'a, A, D> +{ type Item = &'a A; type Dim = D; type Ptr = *mut A; @@ -213,45 +237,55 @@ impl<'a, A, D: Dimension> NdProducer for ArrayView<'a, A, D> { private_impl! {} - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { self.raw_dim() } - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.dim.equal(dim) } - fn as_ptr(&self) -> *mut A { + fn as_ptr(&self) -> *mut A + { self.as_ptr() as _ } - fn layout(&self) -> Layout { + fn layout(&self) -> Layout + { self.layout_impl() } - unsafe fn as_ref(&self, ptr: *mut A) -> Self::Item { + unsafe fn as_ref(&self, ptr: *mut A) -> Self::Item + { &*ptr } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A + { self.ptr.as_ptr().offset(i.index_unchecked(&self.strides)) } - fn stride_of(&self, axis: Axis) -> isize { + fn stride_of(&self, axis: Axis) -> isize + { self.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { + fn contiguous_stride(&self) -> Self::Stride + { 1 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { self.split_at(axis, index) } } -impl<'a, A, D: Dimension> NdProducer for ArrayViewMut<'a, A, D> { +impl<'a, A, D: Dimension> NdProducer for ArrayViewMut<'a, A, D> +{ type Item = &'a mut A; type Dim = D; type Ptr = *mut A; @@ -259,45 +293,55 @@ impl<'a, A, D: Dimension> NdProducer for ArrayViewMut<'a, A, D> { private_impl! {} - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { self.raw_dim() } - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.dim.equal(dim) } - fn as_ptr(&self) -> *mut A { + fn as_ptr(&self) -> *mut A + { self.as_ptr() as _ } - fn layout(&self) -> Layout { + fn layout(&self) -> Layout + { self.layout_impl() } - unsafe fn as_ref(&self, ptr: *mut A) -> Self::Item { + unsafe fn as_ref(&self, ptr: *mut A) -> Self::Item + { &mut *ptr } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A + { self.ptr.as_ptr().offset(i.index_unchecked(&self.strides)) } - fn stride_of(&self, axis: Axis) -> isize { + fn stride_of(&self, axis: Axis) -> isize + { self.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { + fn contiguous_stride(&self) -> Self::Stride + { 1 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { self.split_at(axis, index) } } -impl NdProducer for RawArrayView { +impl NdProducer for RawArrayView +{ type Item = *const A; type Dim = D; type Ptr = *const A; @@ -305,45 +349,55 @@ impl NdProducer for RawArrayView { private_impl! {} - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { self.raw_dim() } - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.dim.equal(dim) } - fn as_ptr(&self) -> *const A { + fn as_ptr(&self) -> *const A + { self.as_ptr() } - fn layout(&self) -> Layout { + fn layout(&self) -> Layout + { self.layout_impl() } - unsafe fn as_ref(&self, ptr: *const A) -> *const A { + unsafe fn as_ref(&self, ptr: *const A) -> *const A + { ptr } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> *const A { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> *const A + { self.ptr.as_ptr().offset(i.index_unchecked(&self.strides)) } - fn stride_of(&self, axis: Axis) -> isize { + fn stride_of(&self, axis: Axis) -> isize + { self.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { + fn contiguous_stride(&self) -> Self::Stride + { 1 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { self.split_at(axis, index) } } -impl NdProducer for RawArrayViewMut { +impl NdProducer for RawArrayViewMut +{ type Item = *mut A; type Dim = D; type Ptr = *mut A; @@ -351,40 +405,49 @@ impl NdProducer for RawArrayViewMut { private_impl! {} - fn raw_dim(&self) -> Self::Dim { + fn raw_dim(&self) -> Self::Dim + { self.raw_dim() } - fn equal_dim(&self, dim: &Self::Dim) -> bool { + fn equal_dim(&self, dim: &Self::Dim) -> bool + { self.dim.equal(dim) } - fn as_ptr(&self) -> *mut A { + fn as_ptr(&self) -> *mut A + { self.as_ptr() as _ } - fn layout(&self) -> Layout { + fn layout(&self) -> Layout + { self.layout_impl() } - unsafe fn as_ref(&self, ptr: *mut A) -> *mut A { + unsafe fn as_ref(&self, ptr: *mut A) -> *mut A + { ptr } - unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A { + unsafe fn uget_ptr(&self, i: &Self::Dim) -> *mut A + { self.ptr.as_ptr().offset(i.index_unchecked(&self.strides)) } - fn stride_of(&self, axis: Axis) -> isize { + fn stride_of(&self, axis: Axis) -> isize + { self.stride_of(axis) } #[inline(always)] - fn contiguous_stride(&self) -> Self::Stride { + fn contiguous_stride(&self) -> Self::Stride + { 1 } - fn split_at(self, axis: Axis, index: usize) -> (Self, Self) { + fn split_at(self, axis: Axis, index: usize) -> (Self, Self) + { self.split_at(axis, index) } } diff --git a/tests/append.rs b/tests/append.rs index 131a1f0a5..cf5397de1 100644 --- a/tests/append.rs +++ b/tests/append.rs @@ -2,7 +2,8 @@ use ndarray::prelude::*; use ndarray::{ErrorKind, ShapeError}; #[test] -fn push_row() { +fn push_row() +{ let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -24,7 +25,8 @@ fn push_row() { } #[test] -fn push_row_wrong_layout() { +fn push_row_wrong_layout() +{ let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -56,7 +58,8 @@ fn push_row_wrong_layout() { } #[test] -fn push_row_neg_stride_1() { +fn push_row_neg_stride_1() +{ let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -99,7 +102,8 @@ fn push_row_neg_stride_1() { } #[test] -fn push_row_neg_stride_2() { +fn push_row_neg_stride_2() +{ let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -142,7 +146,8 @@ fn push_row_neg_stride_2() { } #[test] -fn push_row_error() { +fn push_row_error() +{ let mut a = Array::zeros((3, 4)); assert_eq!(a.push_row(aview1(&[1.])), @@ -160,7 +165,8 @@ fn push_row_error() { } #[test] -fn push_row_existing() { +fn push_row_existing() +{ let mut a = Array::zeros((1, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_row(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -184,7 +190,8 @@ fn push_row_existing() { } #[test] -fn push_row_col_len_1() { +fn push_row_col_len_1() +{ // Test appending 1 row and then cols from shape 1 x 1 let mut a = Array::zeros((1, 1)); a.push_row(aview1(&[1.])).unwrap(); // shape 2 x 1 @@ -201,7 +208,8 @@ fn push_row_col_len_1() { } #[test] -fn push_column() { +fn push_column() +{ let mut a = Array::zeros((4, 0)); a.push_column(aview1(&[0., 1., 2., 3.])).unwrap(); a.push_column(aview1(&[4., 5., 6., 7.])).unwrap(); @@ -213,7 +221,8 @@ fn push_column() { } #[test] -fn append_array1() { +fn append_array1() +{ let mut a = Array::zeros((0, 4)); a.append(Axis(0), aview2(&[[0., 1., 2., 3.]])).unwrap(); println!("{:?}", a); @@ -237,7 +246,8 @@ fn append_array1() { } #[test] -fn append_array_3d() { +fn append_array_3d() +{ let mut a = Array::zeros((0, 2, 2)); a.append(Axis(0), array![[[0, 1], [2, 3]]].view()).unwrap(); println!("{:?}", a); @@ -278,7 +288,8 @@ fn append_array_3d() { } #[test] -fn test_append_2d() { +fn test_append_2d() +{ // create an empty array and append let mut a = Array::zeros((0, 4)); let ones = ArrayView::from(&[1.; 12]) @@ -314,7 +325,8 @@ fn test_append_2d() { } #[test] -fn test_append_middle_axis() { +fn test_append_middle_axis() +{ // ensure we can append to Axis(1) by letting it become outermost let mut a = Array::::zeros((3, 0, 2)); a.append( @@ -359,7 +371,8 @@ fn test_append_middle_axis() { } #[test] -fn test_append_zero_size() { +fn test_append_zero_size() +{ { let mut a = Array::::zeros((0, 0)); a.append(Axis(0), aview2(&[[]])).unwrap(); @@ -380,7 +393,8 @@ fn test_append_zero_size() { } #[test] -fn push_row_neg_stride_3() { +fn push_row_neg_stride_3() +{ let mut a = Array::zeros((0, 4)); a.push_row(aview1(&[0., 1., 2., 3.])).unwrap(); a.invert_axis(Axis(1)); @@ -391,7 +405,8 @@ fn push_row_neg_stride_3() { } #[test] -fn push_row_ignore_strides_length_one_axes() { +fn push_row_ignore_strides_length_one_axes() +{ let strides = &[0, 1, 10, 20]; for invert in &[vec![], vec![0], vec![1], vec![0, 1]] { for &stride0 in strides { @@ -411,20 +426,23 @@ fn push_row_ignore_strides_length_one_axes() { #[test] #[should_panic(expected = "IncompatibleShape")] -fn zero_dimensional_error1() { +fn zero_dimensional_error1() +{ let mut a = Array::zeros(()).into_dyn(); a.append(Axis(0), arr0(0).into_dyn().view()).unwrap(); } #[test] #[should_panic(expected = "IncompatibleShape")] -fn zero_dimensional_error2() { +fn zero_dimensional_error2() +{ let mut a = Array::zeros(()).into_dyn(); a.push(Axis(0), arr0(0).into_dyn().view()).unwrap(); } #[test] -fn zero_dimensional_ok() { +fn zero_dimensional_ok() +{ let mut a = Array::zeros(0); let one = aview0(&1); let two = aview0(&2); diff --git a/tests/array-construct.rs b/tests/array-construct.rs index 8df0596b7..f7339dff6 100644 --- a/tests/array-construct.rs +++ b/tests/array-construct.rs @@ -8,14 +8,16 @@ use ndarray::prelude::*; use ndarray::Zip; #[test] -fn test_from_shape_fn() { +fn test_from_shape_fn() +{ let step = 3.1; let h = Array::from_shape_fn((5, 5), |(i, j)| f64::sin(i as f64 / step) * f64::cos(j as f64 / step)); assert_eq!(h.shape(), &[5, 5]); } #[test] -fn test_dimension_zero() { +fn test_dimension_zero() +{ let a: Array2 = Array2::from(vec![[], [], []]); assert_eq!(vec![0.; 0], a.into_raw_vec()); let a: Array3 = Array3::from(vec![[[]], [[]], [[]]]); @@ -24,7 +26,8 @@ fn test_dimension_zero() { #[test] #[cfg(feature = "approx")] -fn test_arc_into_owned() { +fn test_arc_into_owned() +{ use approx::assert_abs_diff_ne; let a = Array2::from_elem((5, 5), 1.).into_shared(); @@ -37,7 +40,8 @@ fn test_arc_into_owned() { } #[test] -fn test_arcarray_thread_safe() { +fn test_arcarray_thread_safe() +{ fn is_send(_t: &T) {} fn is_sync(_t: &T) {} let a = Array2::from_elem((5, 5), 1.).into_shared(); @@ -49,7 +53,8 @@ fn test_arcarray_thread_safe() { #[test] #[cfg(feature = "std")] #[allow(deprecated)] // uninitialized -fn test_uninit() { +fn test_uninit() +{ unsafe { let mut a = Array::::uninitialized((3, 4).f()); assert_eq!(a.dim(), (3, 4)); @@ -64,7 +69,8 @@ fn test_uninit() { } #[test] -fn test_from_fn_c0() { +fn test_from_fn_c0() +{ let a = Array::from_shape_fn((), |i| i); assert_eq!(a[()], ()); assert_eq!(a.len(), 1); @@ -72,7 +78,8 @@ fn test_from_fn_c0() { } #[test] -fn test_from_fn_c1() { +fn test_from_fn_c1() +{ let a = Array::from_shape_fn(28, |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -80,7 +87,8 @@ fn test_from_fn_c1() { } #[test] -fn test_from_fn_c() { +fn test_from_fn_c() +{ let a = Array::from_shape_fn((4, 7), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -88,7 +96,8 @@ fn test_from_fn_c() { } #[test] -fn test_from_fn_c3() { +fn test_from_fn_c3() +{ let a = Array::from_shape_fn((4, 3, 7), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -96,7 +105,8 @@ fn test_from_fn_c3() { } #[test] -fn test_from_fn_f0() { +fn test_from_fn_f0() +{ let a = Array::from_shape_fn(().f(), |i| i); assert_eq!(a[()], ()); assert_eq!(a.len(), 1); @@ -104,7 +114,8 @@ fn test_from_fn_f0() { } #[test] -fn test_from_fn_f1() { +fn test_from_fn_f1() +{ let a = Array::from_shape_fn(28.f(), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -112,7 +123,8 @@ fn test_from_fn_f1() { } #[test] -fn test_from_fn_f() { +fn test_from_fn_f() +{ let a = Array::from_shape_fn((4, 7).f(), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -120,7 +132,8 @@ fn test_from_fn_f() { } #[test] -fn test_from_fn_f_with_zero() { +fn test_from_fn_f_with_zero() +{ defmac!(test_from_fn_f_with_zero shape => { let a = Array::from_shape_fn(shape.f(), |i| i); assert_eq!(a.len(), 0); @@ -135,7 +148,8 @@ fn test_from_fn_f_with_zero() { } #[test] -fn test_from_fn_f3() { +fn test_from_fn_f3() +{ let a = Array::from_shape_fn((4, 2, 7).f(), |i| i); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt); @@ -143,7 +157,8 @@ fn test_from_fn_f3() { } #[test] -fn deny_wraparound_from_vec() { +fn deny_wraparound_from_vec() +{ let five = vec![0; 5]; let five_large = Array::from_shape_vec((3, 7, 29, 36760123, 823996703), five.clone()); println!("{:?}", five_large); @@ -153,7 +168,8 @@ fn deny_wraparound_from_vec() { } #[test] -fn test_ones() { +fn test_ones() +{ let mut a = Array::::zeros((2, 3, 4)); a.fill(1.0); let b = Array::::ones((2, 3, 4)); @@ -161,7 +177,8 @@ fn test_ones() { } #[test] -fn test_from_shape_empty_with_neg_stride() { +fn test_from_shape_empty_with_neg_stride() +{ // Issue #998, negative strides for an axis where it doesn't matter. let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]; let v = s[..12].to_vec(); @@ -172,7 +189,8 @@ fn test_from_shape_empty_with_neg_stride() { } #[test] -fn test_from_shape_with_neg_stride() { +fn test_from_shape_with_neg_stride() +{ // Issue #998, negative strides for an axis where it doesn't matter. let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]; let v = s[..12].to_vec(); @@ -184,7 +202,8 @@ fn test_from_shape_with_neg_stride() { } #[test] -fn test_from_shape_2_2_2_with_neg_stride() { +fn test_from_shape_2_2_2_with_neg_stride() +{ // Issue #998, negative strides for an axis where it doesn't matter. let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]; let v = s[..12].to_vec(); @@ -199,14 +218,16 @@ fn test_from_shape_2_2_2_with_neg_stride() { #[should_panic] #[test] -fn deny_wraparound_zeros() { +fn deny_wraparound_zeros() +{ //2^64 + 5 = 18446744073709551621 = 3×7×29×36760123×823996703 (5 distinct prime factors) let _five_large = Array::::zeros((3, 7, 29, 36760123, 823996703)); } #[should_panic] #[test] -fn deny_wraparound_reshape() { +fn deny_wraparound_reshape() +{ //2^64 + 5 = 18446744073709551621 = 3×7×29×36760123×823996703 (5 distinct prime factors) let five = Array::::zeros(5); let _five_large = five @@ -216,20 +237,23 @@ fn deny_wraparound_reshape() { #[should_panic] #[test] -fn deny_wraparound_default() { +fn deny_wraparound_default() +{ let _five_large = Array::::default((3, 7, 29, 36760123, 823996703)); } #[should_panic] #[test] -fn deny_wraparound_from_shape_fn() { +fn deny_wraparound_from_shape_fn() +{ let _five_large = Array::::from_shape_fn((3, 7, 29, 36760123, 823996703), |_| 0.); } #[should_panic] #[test] #[allow(deprecated)] // uninitialized -fn deny_wraparound_uninitialized() { +fn deny_wraparound_uninitialized() +{ unsafe { let _five_large = Array::::uninitialized((3, 7, 29, 36760123, 823996703)); } @@ -237,36 +261,42 @@ fn deny_wraparound_uninitialized() { #[should_panic] #[test] -fn deny_wraparound_uninit() { +fn deny_wraparound_uninit() +{ let _five_large = Array::::uninit((3, 7, 29, 36760123, 823996703)); } #[should_panic] #[test] -fn deny_slice_with_too_many_rows_to_arrayview2() { +fn deny_slice_with_too_many_rows_to_arrayview2() +{ let _view = ArrayView2::from(&[[0u8; 0]; usize::MAX][..]); } #[should_panic] #[test] -fn deny_slice_with_too_many_zero_sized_elems_to_arrayview2() { +fn deny_slice_with_too_many_zero_sized_elems_to_arrayview2() +{ let _view = ArrayView2::from(&[[(); isize::MAX as usize]; isize::MAX as usize][..]); } #[should_panic] #[test] -fn deny_slice_with_too_many_rows_to_arrayviewmut2() { +fn deny_slice_with_too_many_rows_to_arrayviewmut2() +{ let _view = ArrayViewMut2::from(&mut [[0u8; 0]; usize::MAX][..]); } #[should_panic] #[test] -fn deny_slice_with_too_many_zero_sized_elems_to_arrayviewmut2() { +fn deny_slice_with_too_many_zero_sized_elems_to_arrayviewmut2() +{ let _view = ArrayViewMut2::from(&mut [[(); isize::MAX as usize]; isize::MAX as usize][..]); } #[test] -fn maybe_uninit_1() { +fn maybe_uninit_1() +{ use std::mem::MaybeUninit; unsafe { diff --git a/tests/array.rs b/tests/array.rs index ccc499b4f..3f2c38a62 100644 --- a/tests/array.rs +++ b/tests/array.rs @@ -30,7 +30,8 @@ macro_rules! assert_panics { } #[test] -fn test_matmul_arcarray() { +fn test_matmul_arcarray() +{ let mut A = ArcArray::::zeros((2, 3)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -54,18 +55,21 @@ fn test_matmul_arcarray() { } #[allow(unused)] -fn arrayview_shrink_lifetime<'a, 'b: 'a>(view: ArrayView1<'b, f64>) -> ArrayView1<'a, f64> { +fn arrayview_shrink_lifetime<'a, 'b: 'a>(view: ArrayView1<'b, f64>) -> ArrayView1<'a, f64> +{ view.reborrow() } #[allow(unused)] -fn arrayviewmut_shrink_lifetime<'a, 'b: 'a>(view: ArrayViewMut1<'b, f64>) -> ArrayViewMut1<'a, f64> { +fn arrayviewmut_shrink_lifetime<'a, 'b: 'a>(view: ArrayViewMut1<'b, f64>) -> ArrayViewMut1<'a, f64> +{ view.reborrow() } #[test] #[cfg(feature = "std")] -fn test_mat_mul() { +fn test_mat_mul() +{ // smoke test, a big matrix multiplication of uneven size let (n, m) = (45, 33); let a = ArcArray::linspace(0., ((n * m) - 1) as f32, n as usize * m as usize) @@ -79,7 +83,8 @@ fn test_mat_mul() { #[deny(unsafe_code)] #[test] -fn test_slice() { +fn test_slice() +{ let mut A = ArcArray::::zeros((3, 4, 5)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -94,13 +99,15 @@ fn test_slice() { #[deny(unsafe_code)] #[test] -fn test_slice_ix0() { +fn test_slice_ix0() +{ let arr = arr0(5); assert_eq!(arr.slice(s![]), aview0(&5)); } #[test] -fn test_slice_edge_cases() { +fn test_slice_edge_cases() +{ let mut arr = Array3::::zeros((3, 4, 5)); arr.slice_collapse(s![0..0;-1, .., ..]); assert_eq!(arr.shape(), &[0, 4, 5]); @@ -110,7 +117,8 @@ fn test_slice_edge_cases() { } #[test] -fn test_slice_inclusive_range() { +fn test_slice_inclusive_range() +{ let arr = array![[1, 2, 3], [4, 5, 6]]; assert_eq!(arr.slice(s![1..=1, 1..=2]), array![[5, 6]]); assert_eq!(arr.slice(s![1..=-1, -2..=2;-1]), array![[6, 5]]); @@ -124,7 +132,8 @@ fn test_slice_inclusive_range() { /// `ArrayView1` and `ArrayView2`, so the compiler needs to determine which /// type is the correct result for the `.slice()` call. #[test] -fn test_slice_infer() { +fn test_slice_infer() +{ let a = array![1., 2.]; let b = array![[3., 4.], [5., 6.]]; b.slice(s![..-1, ..]).dot(&a); @@ -132,7 +141,8 @@ fn test_slice_infer() { } #[test] -fn test_slice_with_many_dim() { +fn test_slice_with_many_dim() +{ let mut A = ArcArray::::zeros(&[3, 1, 4, 1, 3, 2, 1][..]); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -159,14 +169,16 @@ fn test_slice_with_many_dim() { } #[test] -fn test_slice_range_variable() { +fn test_slice_range_variable() +{ let range = 1..4; let arr = array![0, 1, 2, 3, 4]; assert_eq!(arr.slice(s![range]), array![1, 2, 3]); } #[test] -fn test_slice_args_eval_range_once() { +fn test_slice_args_eval_range_once() +{ let mut eval_count = 0; { let mut range = || { @@ -180,7 +192,8 @@ fn test_slice_args_eval_range_once() { } #[test] -fn test_slice_args_eval_step_once() { +fn test_slice_args_eval_step_once() +{ let mut eval_count = 0; { let mut step = || { @@ -194,7 +207,8 @@ fn test_slice_args_eval_step_once() { } #[test] -fn test_slice_array_fixed() { +fn test_slice_array_fixed() +{ let mut arr = Array3::::zeros((5, 2, 5)); let info = s![1.., 1, NewAxis, ..;2]; arr.slice(info); @@ -205,7 +219,8 @@ fn test_slice_array_fixed() { } #[test] -fn test_slice_dyninput_array_fixed() { +fn test_slice_dyninput_array_fixed() +{ let mut arr = Array3::::zeros((5, 2, 5)).into_dyn(); let info = s![1.., 1, NewAxis, ..;2]; arr.slice(info); @@ -216,7 +231,8 @@ fn test_slice_dyninput_array_fixed() { } #[test] -fn test_slice_array_dyn() { +fn test_slice_array_dyn() +{ let mut arr = Array3::::zeros((5, 2, 5)); let info = SliceInfo::<_, Ix3, IxDyn>::try_from([ SliceInfoElem::from(1..), @@ -238,7 +254,8 @@ fn test_slice_array_dyn() { } #[test] -fn test_slice_dyninput_array_dyn() { +fn test_slice_dyninput_array_dyn() +{ let mut arr = Array3::::zeros((5, 2, 5)).into_dyn(); let info = SliceInfo::<_, Ix3, IxDyn>::try_from([ SliceInfoElem::from(1..), @@ -260,7 +277,8 @@ fn test_slice_dyninput_array_dyn() { } #[test] -fn test_slice_dyninput_vec_fixed() { +fn test_slice_dyninput_vec_fixed() +{ let mut arr = Array3::::zeros((5, 2, 5)).into_dyn(); let info = &SliceInfo::<_, Ix3, Ix3>::try_from(vec![ SliceInfoElem::from(1..), @@ -282,7 +300,8 @@ fn test_slice_dyninput_vec_fixed() { } #[test] -fn test_slice_dyninput_vec_dyn() { +fn test_slice_dyninput_vec_dyn() +{ let mut arr = Array3::::zeros((5, 2, 5)).into_dyn(); let info = &SliceInfo::<_, Ix3, IxDyn>::try_from(vec![ SliceInfoElem::from(1..), @@ -304,7 +323,8 @@ fn test_slice_dyninput_vec_dyn() { } #[test] -fn test_slice_with_subview_and_new_axis() { +fn test_slice_with_subview_and_new_axis() +{ let mut arr = ArcArray::::zeros((3, 5, 4)); for (i, elt) in arr.iter_mut().enumerate() { *elt = i; @@ -341,7 +361,8 @@ fn test_slice_with_subview_and_new_axis() { } #[test] -fn test_slice_collapse_with_indices() { +fn test_slice_collapse_with_indices() +{ let mut arr = ArcArray::::zeros((3, 5, 4)); for (i, elt) in arr.iter_mut().enumerate() { *elt = i; @@ -380,13 +401,15 @@ fn test_slice_collapse_with_indices() { #[test] #[should_panic] -fn test_slice_collapse_with_newaxis() { +fn test_slice_collapse_with_newaxis() +{ let mut arr = Array2::::zeros((2, 3)); arr.slice_collapse(s![0, 0, NewAxis]); } #[test] -fn test_multislice() { +fn test_multislice() +{ macro_rules! do_test { ($arr:expr, $($s:expr),*) => { { @@ -424,7 +447,8 @@ fn test_multislice() { } #[test] -fn test_multislice_intersecting() { +fn test_multislice_intersecting() +{ assert_panics!({ let mut arr = Array2::::zeros((8, 6)); arr.multi_slice_mut((s![3, .., NewAxis], s![3, ..])); @@ -465,34 +489,39 @@ fn test_multislice_intersecting() { #[should_panic] #[test] -fn index_out_of_bounds() { +fn index_out_of_bounds() +{ let mut a = Array::::zeros((3, 4)); a[[3, 2]] = 1; } #[should_panic] #[test] -fn slice_oob() { +fn slice_oob() +{ let a = ArcArray::::zeros((3, 4)); let _vi = a.slice(s![..10, ..]); } #[should_panic] #[test] -fn slice_axis_oob() { +fn slice_axis_oob() +{ let a = ArcArray::::zeros((3, 4)); let _vi = a.slice_axis(Axis(0), Slice::new(0, Some(10), 1)); } #[should_panic] #[test] -fn slice_wrong_dim() { +fn slice_wrong_dim() +{ let a = ArcArray::::zeros(vec![3, 4, 5]); let _vi = a.slice(s![.., ..]); } #[test] -fn test_index() { +fn test_index() +{ let mut A = ArcArray::::zeros((2, 3)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -513,7 +542,8 @@ fn test_index() { } #[test] -fn test_index_arrays() { +fn test_index_arrays() +{ let a = Array1::from_iter(0..12); assert_eq!(a[1], a[[1]]); let v = a.view().into_shape_with_order((3, 4)).unwrap(); @@ -524,7 +554,8 @@ fn test_index_arrays() { #[test] #[allow(clippy::assign_op_pattern)] -fn test_add() { +fn test_add() +{ let mut A = ArcArray::::zeros((2, 2)); for (i, elt) in A.iter_mut().enumerate() { *elt = i; @@ -539,7 +570,8 @@ fn test_add() { } #[test] -fn test_multidim() { +fn test_multidim() +{ let mut mat = ArcArray::zeros(2 * 3 * 4 * 5 * 6) .into_shape_with_order((2, 3, 4, 5, 6)) .unwrap(); @@ -564,7 +596,8 @@ array([[[ 7, 6], [ 9, 8]]]) */ #[test] -fn test_negative_stride_arcarray() { +fn test_negative_stride_arcarray() +{ let mut mat = ArcArray::zeros((2, 4, 2)); mat[[0, 0, 0]] = 1.0f32; for (i, elt) in mat.iter_mut().enumerate() { @@ -590,7 +623,8 @@ fn test_negative_stride_arcarray() { } #[test] -fn test_cow() { +fn test_cow() +{ let mut mat = ArcArray::zeros((2, 2)); mat[[0, 0]] = 1; let n = mat.clone(); @@ -622,7 +656,8 @@ fn test_cow() { } #[test] -fn test_cow_shrink() { +fn test_cow_shrink() +{ // A test for clone-on-write in the case that // mutation shrinks the array and gives it different strides // @@ -657,7 +692,8 @@ fn test_cow_shrink() { #[test] #[cfg(feature = "std")] -fn test_sub() { +fn test_sub() +{ let mat = ArcArray::linspace(0., 15., 16) .into_shape_with_order((2, 4, 2)) .unwrap(); @@ -678,7 +714,8 @@ fn test_sub() { #[should_panic] #[test] #[cfg(feature = "std")] -fn test_sub_oob_1() { +fn test_sub_oob_1() +{ let mat = ArcArray::linspace(0., 15., 16) .into_shape_with_order((2, 4, 2)) .unwrap(); @@ -687,7 +724,8 @@ fn test_sub_oob_1() { #[test] #[cfg(feature = "approx")] -fn test_select() { +fn test_select() +{ use approx::assert_abs_diff_eq; // test for 2-d array @@ -710,7 +748,8 @@ fn test_select() { } #[test] -fn test_select_1d() { +fn test_select_1d() +{ let x = arr1(&[0, 1, 2, 3, 4, 5, 6]); let r1 = x.select(Axis(0), &[1, 3, 4, 2, 2, 5]); assert_eq!(r1, arr1(&[1, 3, 4, 2, 2, 5])); @@ -723,7 +762,8 @@ fn test_select_1d() { } #[test] -fn diag() { +fn diag() +{ let d = arr2(&[[1., 2., 3.0f32]]).into_diag(); assert_eq!(d.dim(), 1); let a = arr2(&[[1., 2., 3.0f32], [0., 0., 0.]]); @@ -740,7 +780,8 @@ fn diag() { /// Note that this does not check the strides in the "merged" case! #[test] #[allow(clippy::cognitive_complexity)] -fn merge_axes() { +fn merge_axes() +{ macro_rules! assert_merged { ($arr:expr, $slice:expr, $take:expr, $into:expr) => { let mut v = $arr.slice($slice); @@ -828,7 +869,8 @@ fn merge_axes() { } #[test] -fn swapaxes() { +fn swapaxes() +{ let mut a = arr2(&[[1., 2.], [3., 4.0f32]]); let b = arr2(&[[1., 3.], [2., 4.0f32]]); assert!(a != b); @@ -841,7 +883,8 @@ fn swapaxes() { } #[test] -fn permuted_axes() { +fn permuted_axes() +{ let a = array![1].index_axis_move(Axis(0), 0); let permuted = a.view().permuted_axes([]); assert_eq!(a, permuted); @@ -877,7 +920,8 @@ fn permuted_axes() { #[should_panic] #[test] -fn permuted_axes_repeated_axis() { +fn permuted_axes_repeated_axis() +{ let a = Array::from_iter(0..24) .into_shape_with_order((2, 3, 4)) .unwrap(); @@ -886,7 +930,8 @@ fn permuted_axes_repeated_axis() { #[should_panic] #[test] -fn permuted_axes_missing_axis() { +fn permuted_axes_missing_axis() +{ let a = Array::from_iter(0..24) .into_shape_with_order((2, 3, 4)) .unwrap() @@ -896,7 +941,8 @@ fn permuted_axes_missing_axis() { #[should_panic] #[test] -fn permuted_axes_oob() { +fn permuted_axes_oob() +{ let a = Array::from_iter(0..24) .into_shape_with_order((2, 3, 4)) .unwrap(); @@ -904,7 +950,8 @@ fn permuted_axes_oob() { } #[test] -fn standard_layout() { +fn standard_layout() +{ let mut a = arr2(&[[1., 2.], [3., 4.0]]); assert!(a.is_standard_layout()); a.swap_axes(0, 1); @@ -922,7 +969,8 @@ fn standard_layout() { } #[test] -fn iter_size_hint() { +fn iter_size_hint() +{ let mut a = arr2(&[[1., 2.], [3., 4.]]); { let mut it = a.iter(); @@ -957,7 +1005,8 @@ fn iter_size_hint() { } #[test] -fn zero_axes() { +fn zero_axes() +{ let mut a = arr1::(&[]); for _ in a.iter() { panic!(); @@ -975,7 +1024,8 @@ fn zero_axes() { } #[test] -fn equality() { +fn equality() +{ let a = arr2(&[[1., 2.], [3., 4.]]); let mut b = arr2(&[[1., 2.], [2., 4.]]); assert!(a != b); @@ -988,7 +1038,8 @@ fn equality() { } #[test] -fn map1() { +fn map1() +{ let a = arr2(&[[1., 2.], [3., 4.]]); let b = a.map(|&x| (x / 3.) as isize); assert_eq!(b, arr2(&[[0, 0], [1, 1]])); @@ -998,21 +1049,24 @@ fn map1() { } #[test] -fn mapv_into_any_same_type() { +fn mapv_into_any_same_type() +{ let a: Array = array![[1., 2., 3.], [4., 5., 6.]]; let a_plus_one: Array = array![[2., 3., 4.], [5., 6., 7.]]; assert_eq!(a.mapv_into_any(|a| a + 1.), a_plus_one); } #[test] -fn mapv_into_any_diff_types() { +fn mapv_into_any_diff_types() +{ let a: Array = array![[1., 2., 3.], [4., 5., 6.]]; let a_even: Array = array![[false, true, false], [true, false, true]]; assert_eq!(a.mapv_into_any(|a| a.round() as i32 % 2 == 0), a_even); } #[test] -fn as_slice_memory_order_mut_arcarray() { +fn as_slice_memory_order_mut_arcarray() +{ // Test that mutation breaks sharing for `ArcArray`. let a = rcarr2(&[[1., 2.], [3., 4.0f32]]); let mut b = a.clone(); @@ -1023,7 +1077,8 @@ fn as_slice_memory_order_mut_arcarray() { } #[test] -fn as_slice_memory_order_mut_cowarray() { +fn as_slice_memory_order_mut_cowarray() +{ // Test that mutation breaks sharing for `CowArray`. let a = arr2(&[[1., 2.], [3., 4.0f32]]); let mut b = CowArray::from(a.view()); @@ -1034,7 +1089,8 @@ fn as_slice_memory_order_mut_cowarray() { } #[test] -fn as_slice_memory_order_mut_contiguous_arcarray() { +fn as_slice_memory_order_mut_contiguous_arcarray() +{ // Test that unsharing preserves the strides in the contiguous case for `ArcArray`. let a = rcarr2(&[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]]).reversed_axes(); let mut b = a.clone().slice_move(s![.., ..2]); @@ -1044,7 +1100,8 @@ fn as_slice_memory_order_mut_contiguous_arcarray() { } #[test] -fn as_slice_memory_order_mut_contiguous_cowarray() { +fn as_slice_memory_order_mut_contiguous_cowarray() +{ // Test that unsharing preserves the strides in the contiguous case for `CowArray`. let a = arr2(&[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]]).reversed_axes(); let mut b = CowArray::from(a.slice(s![.., ..2])); @@ -1055,7 +1112,8 @@ fn as_slice_memory_order_mut_contiguous_cowarray() { } #[test] -fn to_slice_memory_order() { +fn to_slice_memory_order() +{ for shape in vec![[2, 0, 3, 5], [2, 1, 3, 5], [2, 4, 3, 5]] { let data: Vec = (0..shape.iter().product()).collect(); let mut orig = Array1::from(data.clone()) @@ -1072,7 +1130,8 @@ fn to_slice_memory_order() { } #[test] -fn to_slice_memory_order_discontiguous() { +fn to_slice_memory_order_discontiguous() +{ let mut orig = Array3::::zeros([3, 2, 4]); assert!(orig .slice(s![.., 1.., ..]) @@ -1093,7 +1152,8 @@ fn to_slice_memory_order_discontiguous() { } #[test] -fn array0_into_scalar() { +fn array0_into_scalar() +{ // With this kind of setup, the `Array`'s pointer is not the same as the // underlying `Vec`'s pointer. let a: Array0 = array![4, 5, 6, 7].index_axis_move(Axis(0), 2); @@ -1108,7 +1168,8 @@ fn array0_into_scalar() { } #[test] -fn array_view0_into_scalar() { +fn array_view0_into_scalar() +{ // With this kind of setup, the `Array`'s pointer is not the same as the // underlying `Vec`'s pointer. let a: Array0 = array![4, 5, 6, 7].index_axis_move(Axis(0), 2); @@ -1123,7 +1184,8 @@ fn array_view0_into_scalar() { } #[test] -fn array_view_mut0_into_scalar() { +fn array_view_mut0_into_scalar() +{ // With this kind of setup, the `Array`'s pointer is not the same as the // underlying `Vec`'s pointer. let a: Array0 = array![4, 5, 6, 7].index_axis_move(Axis(0), 2); @@ -1138,7 +1200,8 @@ fn array_view_mut0_into_scalar() { } #[test] -fn owned_array1() { +fn owned_array1() +{ let mut a = Array::from(vec![1, 2, 3, 4]); for elt in a.iter_mut() { *elt = 2; @@ -1163,7 +1226,8 @@ fn owned_array1() { } #[test] -fn owned_array_with_stride() { +fn owned_array_with_stride() +{ let v: Vec<_> = (0..12).collect(); let dim = (2, 3, 2); let strides = (1, 4, 2); @@ -1173,7 +1237,8 @@ fn owned_array_with_stride() { } #[test] -fn owned_array_discontiguous() { +fn owned_array_discontiguous() +{ use std::iter::repeat; let v: Vec<_> = (0..12).flat_map(|x| repeat(x).take(2)).collect(); let dim = (3, 2, 2); @@ -1186,14 +1251,17 @@ fn owned_array_discontiguous() { } #[test] -fn owned_array_discontiguous_drop() { +fn owned_array_discontiguous_drop() +{ use std::cell::RefCell; use std::collections::BTreeSet; use std::rc::Rc; struct InsertOnDrop(Rc>>, Option); - impl Drop for InsertOnDrop { - fn drop(&mut self) { + impl Drop for InsertOnDrop + { + fn drop(&mut self) + { let InsertOnDrop(ref set, ref mut value) = *self; set.borrow_mut().insert(value.take().expect("double drop!")); } @@ -1227,13 +1295,15 @@ macro_rules! assert_matches { } #[test] -fn from_vec_dim_stride_empty_1d() { +fn from_vec_dim_stride_empty_1d() +{ let empty: [f32; 0] = []; assert_matches!(Array::from_shape_vec(0.strides(1), empty.to_vec()), Ok(_)); } #[test] -fn from_vec_dim_stride_0d() { +fn from_vec_dim_stride_0d() +{ let empty: [f32; 0] = []; let one = [1.]; let two = [1., 2.]; @@ -1249,7 +1319,8 @@ fn from_vec_dim_stride_0d() { } #[test] -fn from_vec_dim_stride_2d_1() { +fn from_vec_dim_stride_2d_1() +{ let two = [1., 2.]; let d = Ix2(2, 1); let s = d.default_strides(); @@ -1257,7 +1328,8 @@ fn from_vec_dim_stride_2d_1() { } #[test] -fn from_vec_dim_stride_2d_2() { +fn from_vec_dim_stride_2d_2() +{ let two = [1., 2.]; let d = Ix2(1, 2); let s = d.default_strides(); @@ -1265,7 +1337,8 @@ fn from_vec_dim_stride_2d_2() { } #[test] -fn from_vec_dim_stride_2d_3() { +fn from_vec_dim_stride_2d_3() +{ let a = arr3(&[[[1]], [[2]], [[3]]]); let d = a.raw_dim(); let s = d.default_strides(); @@ -1276,7 +1349,8 @@ fn from_vec_dim_stride_2d_3() { } #[test] -fn from_vec_dim_stride_2d_4() { +fn from_vec_dim_stride_2d_4() +{ let a = arr3(&[[[1]], [[2]], [[3]]]); let d = a.raw_dim(); let s = d.fortran_strides(); @@ -1287,7 +1361,8 @@ fn from_vec_dim_stride_2d_4() { } #[test] -fn from_vec_dim_stride_2d_5() { +fn from_vec_dim_stride_2d_5() +{ let a = arr3(&[[[1, 2, 3]]]); let d = a.raw_dim(); let s = d.fortran_strides(); @@ -1298,7 +1373,8 @@ fn from_vec_dim_stride_2d_5() { } #[test] -fn from_vec_dim_stride_2d_6() { +fn from_vec_dim_stride_2d_6() +{ let a = [1., 2., 3., 4., 5., 6.]; let d = (2, 1, 1); let s = (2, 2, 1); @@ -1310,7 +1386,8 @@ fn from_vec_dim_stride_2d_6() { } #[test] -fn from_vec_dim_stride_2d_7() { +fn from_vec_dim_stride_2d_7() +{ // empty arrays can have 0 strides let a: [f32; 0] = []; // [[]] shape=[4, 0], strides=[0, 1] @@ -1320,7 +1397,8 @@ fn from_vec_dim_stride_2d_7() { } #[test] -fn from_vec_dim_stride_2d_8() { +fn from_vec_dim_stride_2d_8() +{ // strides of length 1 axes can be zero let a = [1.]; let d = (1, 1); @@ -1329,7 +1407,8 @@ fn from_vec_dim_stride_2d_8() { } #[test] -fn from_vec_dim_stride_2d_rejects() { +fn from_vec_dim_stride_2d_rejects() +{ let two = [1., 2.]; let d = (2, 2); let s = (1, 0); @@ -1341,7 +1420,8 @@ fn from_vec_dim_stride_2d_rejects() { } #[test] -fn views() { +fn views() +{ let a = ArcArray::from(vec![1, 2, 3, 4]) .into_shape_with_order((2, 2)) .unwrap(); @@ -1360,7 +1440,8 @@ fn views() { } #[test] -fn view_mut() { +fn view_mut() +{ let mut a = ArcArray::from(vec![1, 2, 3, 4]) .into_shape_with_order((2, 2)) .unwrap(); @@ -1381,7 +1462,8 @@ fn view_mut() { } #[test] -fn slice_mut() { +fn slice_mut() +{ let mut a = ArcArray::from(vec![1, 2, 3, 4]) .into_shape_with_order((2, 2)) .unwrap(); @@ -1405,7 +1487,8 @@ fn slice_mut() { } #[test] -fn assign_ops() { +fn assign_ops() +{ let mut a = arr2(&[[1., 2.], [3., 4.]]); let b = arr2(&[[1., 3.], [2., 4.]]); (*&mut a.view_mut()) += &b; @@ -1423,7 +1506,8 @@ fn assign_ops() { } #[test] -fn aview() { +fn aview() +{ let a = arr2(&[[1., 2., 3.], [4., 5., 6.]]); let data = [[1., 2., 3.], [4., 5., 6.]]; let b = aview2(&data); @@ -1432,7 +1516,8 @@ fn aview() { } #[test] -fn aview_mut() { +fn aview_mut() +{ let mut data = [0; 16]; { let mut a = aview_mut1(&mut data).into_shape_with_order((4, 4)).unwrap(); @@ -1445,7 +1530,8 @@ fn aview_mut() { } #[test] -fn transpose_view() { +fn transpose_view() +{ let a = arr2(&[[1, 2], [3, 4]]); let at = a.view().reversed_axes(); assert_eq!(at, arr2(&[[1, 3], [2, 4]])); @@ -1456,7 +1542,8 @@ fn transpose_view() { } #[test] -fn transpose_view_mut() { +fn transpose_view_mut() +{ let mut a = arr2(&[[1, 2], [3, 4]]); let mut at = a.view_mut().reversed_axes(); at[[0, 1]] = 5; @@ -1470,7 +1557,8 @@ fn transpose_view_mut() { #[test] #[allow(clippy::cognitive_complexity)] -fn insert_axis() { +fn insert_axis() +{ defmac!(test_insert orig, index, new => { let res = orig.insert_axis(Axis(index)); assert_eq!(res, new); @@ -1565,7 +1653,8 @@ fn insert_axis() { } #[test] -fn insert_axis_f() { +fn insert_axis_f() +{ defmac!(test_insert_f orig, index, new => { let res = orig.insert_axis(Axis(index)); assert_eq!(res, new); @@ -1612,7 +1701,8 @@ fn insert_axis_f() { } #[test] -fn insert_axis_view() { +fn insert_axis_view() +{ let a = array![[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]; assert_eq!( @@ -1630,7 +1720,8 @@ fn insert_axis_view() { } #[test] -fn arithmetic_broadcast() { +fn arithmetic_broadcast() +{ let mut a = arr2(&[[1., 2.], [3., 4.]]); let b = a.clone() * aview0(&1.); assert_eq!(a, b); @@ -1689,7 +1780,8 @@ fn arithmetic_broadcast() { } #[test] -fn char_array() { +fn char_array() +{ // test compilation & basics of non-numerical array let cc = ArcArray::from_iter("alphabet".chars()) .into_shape_with_order((4, 2)) @@ -1698,7 +1790,8 @@ fn char_array() { } #[test] -fn scalar_ops() { +fn scalar_ops() +{ let a = Array::::zeros((5, 5)); let b = &a + 1; let c = (&a + &a + 2) - 3; @@ -1736,7 +1829,8 @@ fn scalar_ops() { #[test] #[cfg(feature = "std")] -fn split_at() { +fn split_at() +{ let mut a = arr2(&[[1., 2.], [3., 4.]]); { @@ -1776,21 +1870,24 @@ fn split_at() { #[test] #[should_panic] -fn deny_split_at_axis_out_of_bounds() { +fn deny_split_at_axis_out_of_bounds() +{ let a = arr2(&[[1., 2.], [3., 4.]]); a.view().split_at(Axis(2), 0); } #[test] #[should_panic] -fn deny_split_at_index_out_of_bounds() { +fn deny_split_at_index_out_of_bounds() +{ let a = arr2(&[[1., 2.], [3., 4.]]); a.view().split_at(Axis(1), 3); } #[test] #[cfg(feature = "std")] -fn test_range() { +fn test_range() +{ let a = Array::range(0., 5., 1.); assert_eq!(a.len(), 5); assert_eq!(a[0], 0.); @@ -1819,7 +1916,8 @@ fn test_range() { } #[test] -fn test_f_order() { +fn test_f_order() +{ // Test that arrays are logically equal in every way, // even if the underlying memory order is different let c = arr2(&[[1, 2, 3], [4, 5, 6]]); @@ -1841,7 +1939,8 @@ fn test_f_order() { } #[test] -fn to_owned_memory_order() { +fn to_owned_memory_order() +{ // check that .to_owned() makes f-contiguous arrays out of f-contiguous // input. let c = arr2(&[[1, 2, 3], [4, 5, 6]]); @@ -1861,7 +1960,8 @@ fn to_owned_memory_order() { } #[test] -fn to_owned_neg_stride() { +fn to_owned_neg_stride() +{ let mut c = arr2(&[[1, 2, 3], [4, 5, 6]]); c.slice_collapse(s![.., ..;-1]); let co = c.to_owned(); @@ -1870,7 +1970,8 @@ fn to_owned_neg_stride() { } #[test] -fn discontiguous_owned_to_owned() { +fn discontiguous_owned_to_owned() +{ let mut c = arr2(&[[1, 2, 3], [4, 5, 6]]); c.slice_collapse(s![.., ..;2]); @@ -1881,7 +1982,8 @@ fn discontiguous_owned_to_owned() { } #[test] -fn map_memory_order() { +fn map_memory_order() +{ let a = arr3(&[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [0, -1, -2]]]); let mut v = a.view(); v.swap_axes(0, 1); @@ -1891,7 +1993,8 @@ fn map_memory_order() { } #[test] -fn map_mut_with_unsharing() { +fn map_mut_with_unsharing() +{ // Fortran-layout `ArcArray`. let a = rcarr2(&[[0, 5], [1, 6], [2, 7], [3, 8], [4, 9]]).reversed_axes(); assert_eq!(a.shape(), &[2, 5]); @@ -1918,7 +2021,8 @@ fn map_mut_with_unsharing() { } #[test] -fn test_view_from_shape() { +fn test_view_from_shape() +{ let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]; let a = ArrayView::from_shape((2, 3, 2), &s).unwrap(); let mut answer = Array::from(s.to_vec()) @@ -1941,7 +2045,8 @@ fn test_view_from_shape() { } #[test] -fn test_contiguous() { +fn test_contiguous() +{ let c = arr3(&[[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 7, 7]]]); assert!(c.is_standard_layout()); assert!(c.as_slice_memory_order().is_some()); @@ -1975,7 +2080,8 @@ fn test_contiguous() { } #[test] -fn test_contiguous_single_element() { +fn test_contiguous_single_element() +{ assert_matches!(array![1].as_slice_memory_order(), Some(&[1])); let arr1 = array![1, 2, 3]; @@ -1990,7 +2096,8 @@ fn test_contiguous_single_element() { } #[test] -fn test_contiguous_neg_strides() { +fn test_contiguous_neg_strides() +{ let s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]; let a = ArrayView::from_shape((2, 3, 2).strides((1, 4, 2)), &s).unwrap(); assert_eq!( @@ -2048,7 +2155,8 @@ fn test_contiguous_neg_strides() { } #[test] -fn test_swap() { +fn test_swap() +{ let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9]]); let b = a.clone(); @@ -2061,7 +2169,8 @@ fn test_swap() { } #[test] -fn test_uswap() { +fn test_uswap() +{ let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9]]); let b = a.clone(); @@ -2074,7 +2183,8 @@ fn test_uswap() { } #[test] -fn test_shape() { +fn test_shape() +{ let data = [0, 1, 2, 3, 4, 5]; let a = Array::from_shape_vec((1, 2, 3), data.to_vec()).unwrap(); let b = Array::from_shape_vec((1, 2, 3).f(), data.to_vec()).unwrap(); @@ -2088,7 +2198,8 @@ fn test_shape() { } #[test] -fn test_view_from_shape_ptr() { +fn test_view_from_shape_ptr() +{ let data = [0, 1, 2, 3, 4, 5]; let view = unsafe { ArrayView::from_shape_ptr((2, 3), data.as_ptr()) }; assert_eq!(view, aview2(&[[0, 1, 2], [3, 4, 5]])); @@ -2104,7 +2215,8 @@ fn test_view_from_shape_ptr() { #[should_panic(expected = "Unsupported")] #[cfg(debug_assertions)] #[test] -fn test_view_from_shape_ptr_deny_neg_strides() { +fn test_view_from_shape_ptr_deny_neg_strides() +{ let data = [0, 1, 2, 3, 4, 5]; let _view = unsafe { ArrayView::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_ptr()) }; } @@ -2112,7 +2224,8 @@ fn test_view_from_shape_ptr_deny_neg_strides() { #[should_panic(expected = "Unsupported")] #[cfg(debug_assertions)] #[test] -fn test_view_mut_from_shape_ptr_deny_neg_strides() { +fn test_view_mut_from_shape_ptr_deny_neg_strides() +{ let mut data = [0, 1, 2, 3, 4, 5]; let _view = unsafe { ArrayViewMut::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_mut_ptr()) }; } @@ -2120,7 +2233,8 @@ fn test_view_mut_from_shape_ptr_deny_neg_strides() { #[should_panic(expected = "Unsupported")] #[cfg(debug_assertions)] #[test] -fn test_raw_view_from_shape_ptr_deny_neg_strides() { +fn test_raw_view_from_shape_ptr_deny_neg_strides() +{ let data = [0, 1, 2, 3, 4, 5]; let _view = unsafe { RawArrayView::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_ptr()) }; } @@ -2128,13 +2242,15 @@ fn test_raw_view_from_shape_ptr_deny_neg_strides() { #[should_panic(expected = "Unsupported")] #[cfg(debug_assertions)] #[test] -fn test_raw_view_mut_from_shape_ptr_deny_neg_strides() { +fn test_raw_view_mut_from_shape_ptr_deny_neg_strides() +{ let mut data = [0, 1, 2, 3, 4, 5]; let _view = unsafe { RawArrayViewMut::from_shape_ptr((2, 3).strides((-3isize as usize, 1)), data.as_mut_ptr()) }; } #[test] -fn test_default() { +fn test_default() +{ let a = as Default>::default(); assert_eq!(a, aview2(&[[0.0; 0]; 0])); @@ -2145,14 +2261,16 @@ fn test_default() { } #[test] -fn test_default_ixdyn() { +fn test_default_ixdyn() +{ let a = as Default>::default(); let b = >::zeros(IxDyn(&[0])); assert_eq!(a, b); } #[test] -fn test_map_axis() { +fn test_map_axis() +{ let a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); let b = a.map_axis(Axis(0), |view| view.sum()); @@ -2185,7 +2303,8 @@ fn test_map_axis() { } #[test] -fn test_accumulate_axis_inplace_noop() { +fn test_accumulate_axis_inplace_noop() +{ let mut a = Array2::::zeros((0, 3)); a.accumulate_axis_inplace(Axis(0), |&prev, curr| *curr += prev); assert_eq!(a, Array2::zeros((0, 3))); @@ -2227,7 +2346,8 @@ fn test_accumulate_axis_inplace_nonstandard_layout() { } #[test] -fn test_to_vec() { +fn test_to_vec() +{ let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); a.slice_collapse(s![..;-1, ..]); @@ -2238,7 +2358,8 @@ fn test_to_vec() { } #[test] -fn test_array_clone_unalias() { +fn test_array_clone_unalias() +{ let a = Array::::zeros((3, 3)); let mut b = a.clone(); b.fill(1); @@ -2247,7 +2368,8 @@ fn test_array_clone_unalias() { } #[test] -fn test_array_clone_same_view() { +fn test_array_clone_same_view() +{ let mut a = Array::from_iter(0..9) .into_shape_with_order((3, 3)) .unwrap(); @@ -2257,7 +2379,8 @@ fn test_array_clone_same_view() { } #[test] -fn test_array2_from_diag() { +fn test_array2_from_diag() +{ let diag = arr1(&[0, 1, 2]); let x = Array2::from_diag(&diag); let x_exp = arr2(&[[0, 0, 0], [0, 1, 0], [0, 0, 2]]); @@ -2271,7 +2394,8 @@ fn test_array2_from_diag() { } #[test] -fn array_macros() { +fn array_macros() +{ // array let a1 = array![1, 2, 3]; assert_eq!(a1, arr1(&[1, 2, 3])); @@ -2299,7 +2423,8 @@ fn array_macros() { } #[cfg(test)] -mod as_standard_layout_tests { +mod as_standard_layout_tests +{ use super::*; use ndarray::Data; use std::fmt::Debug; @@ -2318,7 +2443,8 @@ mod as_standard_layout_tests { } #[test] - fn test_f_layout() { + fn test_f_layout() + { let shape = (2, 2).f(); let arr = Array::::from_shape_vec(shape, vec![1, 2, 3, 4]).unwrap(); assert!(!arr.is_standard_layout()); @@ -2326,14 +2452,16 @@ mod as_standard_layout_tests { } #[test] - fn test_c_layout() { + fn test_c_layout() + { let arr = Array::::from_shape_vec((2, 2), vec![1, 2, 3, 4]).unwrap(); assert!(arr.is_standard_layout()); test_as_standard_layout_for(arr); } #[test] - fn test_f_layout_view() { + fn test_f_layout_view() + { let shape = (2, 2).f(); let arr = Array::::from_shape_vec(shape, vec![1, 2, 3, 4]).unwrap(); let arr_view = arr.view(); @@ -2342,7 +2470,8 @@ mod as_standard_layout_tests { } #[test] - fn test_c_layout_view() { + fn test_c_layout_view() + { let arr = Array::::from_shape_vec((2, 2), vec![1, 2, 3, 4]).unwrap(); let arr_view = arr.view(); assert!(arr_view.is_standard_layout()); @@ -2350,14 +2479,16 @@ mod as_standard_layout_tests { } #[test] - fn test_zero_dimensional_array() { + fn test_zero_dimensional_array() + { let arr_view = ArrayView1::::from(&[]); assert!(arr_view.is_standard_layout()); test_as_standard_layout_for(arr_view); } #[test] - fn test_custom_layout() { + fn test_custom_layout() + { let shape = (1, 2, 3, 2).strides((12, 1, 2, 6)); let arr_data: Vec = (0..12).collect(); let arr = Array::::from_shape_vec(shape, arr_data).unwrap(); @@ -2367,11 +2498,13 @@ mod as_standard_layout_tests { } #[cfg(test)] -mod array_cow_tests { +mod array_cow_tests +{ use super::*; #[test] - fn test_is_variant() { + fn test_is_variant() + { let arr: Array = array![[1, 2], [3, 4]]; let arr_cow = CowArray::::from(arr.view()); assert!(arr_cow.is_view()); @@ -2381,7 +2514,8 @@ mod array_cow_tests { assert!(!arr_cow.is_view()); } - fn run_with_various_layouts(mut f: impl FnMut(Array2)) { + fn run_with_various_layouts(mut f: impl FnMut(Array2)) + { for all in vec![ Array2::from_shape_vec((7, 8), (0..7 * 8).collect()).unwrap(), Array2::from_shape_vec((7, 8).f(), (0..7 * 8).collect()).unwrap(), @@ -2399,7 +2533,8 @@ mod array_cow_tests { } #[test] - fn test_element_mutation() { + fn test_element_mutation() + { run_with_various_layouts(|arr: Array2| { let mut expected = arr.clone(); expected[(1, 1)] = 2; @@ -2419,7 +2554,8 @@ mod array_cow_tests { } #[test] - fn test_clone() { + fn test_clone() + { run_with_various_layouts(|arr: Array2| { let arr_cow = CowArray::::from(arr.view()); let arr_cow_clone = arr_cow.clone(); @@ -2438,8 +2574,10 @@ mod array_cow_tests { } #[test] - fn test_clone_from() { - fn assert_eq_contents_and_layout(arr1: &CowArray<'_, i32, Ix2>, arr2: &CowArray<'_, i32, Ix2>) { + fn test_clone_from() + { + fn assert_eq_contents_and_layout(arr1: &CowArray<'_, i32, Ix2>, arr2: &CowArray<'_, i32, Ix2>) + { assert_eq!(arr1, arr2); assert_eq!(arr1.dim(), arr2.dim()); assert_eq!(arr1.strides(), arr2.strides()); @@ -2475,7 +2613,8 @@ mod array_cow_tests { } #[test] - fn test_into_owned() { + fn test_into_owned() + { run_with_various_layouts(|arr: Array2| { let before = CowArray::::from(arr.view()); let after = before.into_owned(); @@ -2491,7 +2630,8 @@ mod array_cow_tests { } #[test] -fn test_remove_index() { +fn test_remove_index() +{ let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); a.remove_index(Axis(0), 1); a.remove_index(Axis(1), 2); @@ -2528,14 +2668,16 @@ fn test_remove_index() { #[should_panic(expected = "must be less")] #[test] -fn test_remove_index_oob1() { +fn test_remove_index_oob1() +{ let mut a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]); a.remove_index(Axis(0), 4); } #[should_panic(expected = "must be less")] #[test] -fn test_remove_index_oob2() { +fn test_remove_index_oob2() +{ let mut a = array![[10], [4], [1]]; a.remove_index(Axis(1), 0); assert_eq!(a.shape(), &[3, 0]); @@ -2552,13 +2694,15 @@ fn test_remove_index_oob2() { #[should_panic(expected = "index out of bounds")] #[test] -fn test_remove_index_oob3() { +fn test_remove_index_oob3() +{ let mut a = array![[10], [4], [1]]; a.remove_index(Axis(2), 0); } #[test] -fn test_split_complex_view() { +fn test_split_complex_view() +{ let a = Array3::from_shape_fn((3, 4, 5), |(i, j, k)| Complex::::new(i as f32 * j as f32, k as f32)); let Complex { re, im } = a.view().split_complex(); assert_relative_eq!(re.sum(), 90.); @@ -2566,7 +2710,8 @@ fn test_split_complex_view() { } #[test] -fn test_split_complex_view_roundtrip() { +fn test_split_complex_view_roundtrip() +{ let a_re = Array3::from_shape_fn((3, 1, 5), |(i, j, _k)| i * j); let a_im = Array3::from_shape_fn((3, 1, 5), |(_i, _j, k)| k); let a = Array3::from_shape_fn((3, 1, 5), |(i, j, k)| Complex::new(a_re[[i, j, k]], a_im[[i, j, k]])); @@ -2576,7 +2721,8 @@ fn test_split_complex_view_roundtrip() { } #[test] -fn test_split_complex_view_mut() { +fn test_split_complex_view_mut() +{ let eye_scalar = Array2::::eye(4); let eye_complex = Array2::>::eye(4); let mut a = Array2::>::zeros((4, 4)); @@ -2587,7 +2733,8 @@ fn test_split_complex_view_mut() { } #[test] -fn test_split_complex_zerod() { +fn test_split_complex_zerod() +{ let mut a = Array0::from_elem((), Complex::new(42, 32)); let Complex { re, im } = a.view().split_complex(); assert_eq!(re.get(()), Some(&42)); @@ -2598,7 +2745,8 @@ fn test_split_complex_zerod() { } #[test] -fn test_split_complex_permuted() { +fn test_split_complex_permuted() +{ let a = Array3::from_shape_fn((3, 4, 5), |(i, j, k)| Complex::new(i * k + j, k)); let permuted = a.view().permuted_axes([1, 0, 2]); let Complex { re, im } = permuted.split_complex(); @@ -2607,7 +2755,8 @@ fn test_split_complex_permuted() { } #[test] -fn test_split_complex_invert_axis() { +fn test_split_complex_invert_axis() +{ let mut a = Array::from_shape_fn((2, 3, 2), |(i, j, k)| Complex::new(i as f64 + j as f64, i as f64 + k as f64)); a.invert_axis(Axis(1)); let cmplx = a.view().split_complex(); diff --git a/tests/assign.rs b/tests/assign.rs index 8205828c2..29a6b851a 100644 --- a/tests/assign.rs +++ b/tests/assign.rs @@ -3,7 +3,8 @@ use ndarray::prelude::*; use std::sync::atomic::{AtomicUsize, Ordering}; #[test] -fn assign() { +fn assign() +{ let mut a = arr2(&[[1., 2.], [3., 4.]]); let b = arr2(&[[1., 3.], [2., 4.]]); a.assign(&b); @@ -28,7 +29,8 @@ fn assign() { } #[test] -fn assign_to() { +fn assign_to() +{ let mut a = arr2(&[[1., 2.], [3., 4.]]); let b = arr2(&[[0., 3.], [2., 0.]]); b.assign_to(&mut a); @@ -36,7 +38,8 @@ fn assign_to() { } #[test] -fn move_into_copy() { +fn move_into_copy() +{ let a = arr2(&[[1., 2.], [3., 4.]]); let acopy = a.clone(); let mut b = Array::uninit(a.dim()); @@ -53,7 +56,8 @@ fn move_into_copy() { } #[test] -fn move_into_owned() { +fn move_into_owned() +{ // Test various memory layouts and holes while moving String elements. for &use_f_order in &[false, true] { for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { @@ -83,7 +87,8 @@ fn move_into_owned() { } #[test] -fn move_into_slicing() { +fn move_into_slicing() +{ // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes). for &use_f_order in &[false, true] { for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { @@ -117,7 +122,8 @@ fn move_into_slicing() { } #[test] -fn move_into_diag() { +fn move_into_diag() +{ // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes). for &use_f_order in &[false, true] { let counter = DropCounter::default(); @@ -142,7 +148,8 @@ fn move_into_diag() { } #[test] -fn move_into_0dim() { +fn move_into_0dim() +{ // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes). for &use_f_order in &[false, true] { let counter = DropCounter::default(); @@ -169,7 +176,8 @@ fn move_into_0dim() { } #[test] -fn move_into_empty() { +fn move_into_empty() +{ // Count correct number of drops when using move_into_uninit and discontiguous arrays (with holes). for &use_f_order in &[false, true] { let counter = DropCounter::default(); @@ -195,7 +203,8 @@ fn move_into_empty() { } #[test] -fn move_into() { +fn move_into() +{ // Test various memory layouts and holes while moving String elements with move_into for &use_f_order in &[false, true] { for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { @@ -226,28 +235,34 @@ fn move_into() { /// This counter can create elements, and then count and verify /// the number of which have actually been dropped again. #[derive(Default)] -struct DropCounter { +struct DropCounter +{ created: AtomicUsize, dropped: AtomicUsize, } struct Element<'a>(&'a AtomicUsize); -impl DropCounter { - fn created(&self) -> usize { +impl DropCounter +{ + fn created(&self) -> usize + { self.created.load(Ordering::Relaxed) } - fn dropped(&self) -> usize { + fn dropped(&self) -> usize + { self.dropped.load(Ordering::Relaxed) } - fn element(&self) -> Element<'_> { + fn element(&self) -> Element<'_> + { self.created.fetch_add(1, Ordering::Relaxed); Element(&self.dropped) } - fn assert_drop_count(&self) { + fn assert_drop_count(&self) + { assert_eq!( self.created(), self.dropped(), @@ -258,8 +273,10 @@ impl DropCounter { } } -impl<'a> Drop for Element<'a> { - fn drop(&mut self) { +impl<'a> Drop for Element<'a> +{ + fn drop(&mut self) + { self.0.fetch_add(1, Ordering::Relaxed); } } diff --git a/tests/azip.rs b/tests/azip.rs index 14a639ea0..a4bb6ffac 100644 --- a/tests/azip.rs +++ b/tests/azip.rs @@ -11,7 +11,8 @@ use itertools::{assert_equal, cloned}; use std::mem::swap; #[test] -fn test_azip1() { +fn test_azip1() +{ let mut a = Array::zeros(62); let mut x = 0; azip!((a in &mut a) { *a = x; x += 1; }); @@ -19,7 +20,8 @@ fn test_azip1() { } #[test] -fn test_azip2() { +fn test_azip2() +{ let mut a = Array::zeros((5, 7)); let b = Array::from_shape_fn(a.dim(), |(i, j)| 1. / (i + 2 * j) as f32); azip!((a in &mut a, &b in &b) *a = b); @@ -27,7 +29,8 @@ fn test_azip2() { } #[test] -fn test_azip2_1() { +fn test_azip2_1() +{ let mut a = Array::zeros((5, 7)); let b = Array::from_shape_fn((5, 10), |(i, j)| 1. / (i + 2 * j) as f32); let b = b.slice(s![..;-1, 3..]); @@ -36,7 +39,8 @@ fn test_azip2_1() { } #[test] -fn test_azip2_3() { +fn test_azip2_3() +{ let mut b = Array::from_shape_fn((5, 10), |(i, j)| 1. / (i + 2 * j) as f32); let mut c = Array::from_shape_fn((5, 10), |(i, j)| f32::exp((i + j) as f32)); let a = b.clone(); @@ -47,7 +51,8 @@ fn test_azip2_3() { #[test] #[cfg(feature = "approx")] -fn test_zip_collect() { +fn test_zip_collect() +{ use approx::assert_abs_diff_eq; // test Zip::map_collect and that it preserves c/f layout. @@ -75,7 +80,8 @@ fn test_zip_collect() { #[test] #[cfg(feature = "approx")] -fn test_zip_assign_into() { +fn test_zip_assign_into() +{ use approx::assert_abs_diff_eq; let mut a = Array::::zeros((5, 10)); @@ -89,7 +95,8 @@ fn test_zip_assign_into() { #[test] #[cfg(feature = "approx")] -fn test_zip_assign_into_cell() { +fn test_zip_assign_into_cell() +{ use approx::assert_abs_diff_eq; use std::cell::Cell; @@ -104,33 +111,40 @@ fn test_zip_assign_into_cell() { } #[test] -fn test_zip_collect_drop() { +fn test_zip_collect_drop() +{ use std::cell::RefCell; use std::panic; struct Recorddrop<'a>((usize, usize), &'a RefCell>); - impl<'a> Drop for Recorddrop<'a> { - fn drop(&mut self) { + impl<'a> Drop for Recorddrop<'a> + { + fn drop(&mut self) + { self.1.borrow_mut().push(self.0); } } #[derive(Copy, Clone)] - enum Config { + enum Config + { CC, CF, FF, } - impl Config { - fn a_is_f(self) -> bool { + impl Config + { + fn a_is_f(self) -> bool + { match self { Config::CC | Config::CF => false, _ => true, } } - fn b_is_f(self) -> bool { + fn b_is_f(self) -> bool + { match self { Config::CC => false, _ => true, @@ -176,7 +190,8 @@ fn test_zip_collect_drop() { } #[test] -fn test_azip_syntax_trailing_comma() { +fn test_azip_syntax_trailing_comma() +{ let mut b = Array::::zeros((5, 5)); let mut c = Array::::ones((5, 5)); let a = b.clone(); @@ -187,7 +202,8 @@ fn test_azip_syntax_trailing_comma() { #[test] #[cfg(feature = "approx")] -fn test_azip2_sum() { +fn test_azip2_sum() +{ use approx::assert_abs_diff_eq; let c = Array::from_shape_fn((5, 10), |(i, j)| f32::exp((i + j) as f32)); @@ -201,7 +217,8 @@ fn test_azip2_sum() { #[test] #[cfg(feature = "approx")] -fn test_azip3_slices() { +fn test_azip3_slices() +{ use approx::assert_abs_diff_eq; let mut a = [0.; 32]; @@ -221,7 +238,8 @@ fn test_azip3_slices() { #[test] #[cfg(feature = "approx")] -fn test_broadcast() { +fn test_broadcast() +{ use approx::assert_abs_diff_eq; let n = 16; @@ -246,7 +264,8 @@ fn test_broadcast() { #[should_panic] #[test] -fn test_zip_dim_mismatch_1() { +fn test_zip_dim_mismatch_1() +{ let mut a = Array::zeros((5, 7)); let mut d = a.raw_dim(); d[0] += 1; @@ -258,7 +277,8 @@ fn test_zip_dim_mismatch_1() { // Zip::from(A).and(B) // where A is F-contiguous and B contiguous but neither F nor C contiguous. #[test] -fn test_contiguous_but_not_c_or_f() { +fn test_contiguous_but_not_c_or_f() +{ let a = Array::from_iter(0..27) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -284,7 +304,8 @@ fn test_contiguous_but_not_c_or_f() { } #[test] -fn test_clone() { +fn test_clone() +{ let a = Array::from_iter(0..27) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -303,7 +324,8 @@ fn test_clone() { } #[test] -fn test_indices_0() { +fn test_indices_0() +{ let a1 = arr0(3); let mut count = 0; @@ -316,7 +338,8 @@ fn test_indices_0() { } #[test] -fn test_indices_1() { +fn test_indices_1() +{ let mut a1 = Array::default(12); for (i, elt) in a1.indexed_iter_mut() { *elt = i; @@ -346,7 +369,8 @@ fn test_indices_1() { } #[test] -fn test_indices_2() { +fn test_indices_2() +{ let mut a1 = Array::default((10, 12)); for (i, elt) in a1.indexed_iter_mut() { *elt = i; @@ -376,7 +400,8 @@ fn test_indices_2() { } #[test] -fn test_indices_3() { +fn test_indices_3() +{ let mut a1 = Array::default((4, 5, 6)); for (i, elt) in a1.indexed_iter_mut() { *elt = i; @@ -406,7 +431,8 @@ fn test_indices_3() { } #[test] -fn test_indices_split_1() { +fn test_indices_split_1() +{ for m in (0..4).chain(10..12) { for n in (0..4).chain(10..12) { let a1 = Array::::default((m, n)); @@ -438,7 +464,8 @@ fn test_indices_split_1() { } #[test] -fn test_zip_all() { +fn test_zip_all() +{ let a = Array::::zeros(62); let b = Array::::ones(62); let mut c = Array::::ones(62); @@ -449,7 +476,8 @@ fn test_zip_all() { } #[test] -fn test_zip_all_empty_array() { +fn test_zip_all_empty_array() +{ let a = Array::::zeros(0); let b = Array::::ones(0); assert_eq!(true, Zip::from(&a).and(&b).all(|&_x, &_y| true)); diff --git a/tests/broadcast.rs b/tests/broadcast.rs index 82047db60..288ccb38a 100644 --- a/tests/broadcast.rs +++ b/tests/broadcast.rs @@ -2,7 +2,8 @@ use ndarray::prelude::*; #[test] #[cfg(feature = "std")] -fn broadcast_1() { +fn broadcast_1() +{ let a_dim = Dim([2, 4, 2, 2]); let b_dim = Dim([2, 1, 2, 1]); let a = ArcArray::linspace(0., 1., a_dim.size()) @@ -34,7 +35,8 @@ fn broadcast_1() { #[test] #[cfg(feature = "std")] -fn test_add() { +fn test_add() +{ let a_dim = Dim([2, 4, 2, 2]); let b_dim = Dim([2, 1, 2, 1]); let mut a = ArcArray::linspace(0.0, 1., a_dim.size()) @@ -51,7 +53,8 @@ fn test_add() { #[test] #[should_panic] #[cfg(feature = "std")] -fn test_add_incompat() { +fn test_add_incompat() +{ let a_dim = Dim([2, 4, 2, 2]); let mut a = ArcArray::linspace(0.0, 1., a_dim.size()) .into_shape_with_order(a_dim) @@ -61,7 +64,8 @@ fn test_add_incompat() { } #[test] -fn test_broadcast() { +fn test_broadcast() +{ let (_, n, k) = (16, 16, 16); let x1 = 1.; // b0 broadcast 1 -> n, k @@ -81,7 +85,8 @@ fn test_broadcast() { } #[test] -fn test_broadcast_1d() { +fn test_broadcast_1d() +{ let n = 16; let x1 = 1.; // b0 broadcast 1 -> n diff --git a/tests/clone.rs b/tests/clone.rs index e1914ba7f..4a7e50b8e 100644 --- a/tests/clone.rs +++ b/tests/clone.rs @@ -1,7 +1,8 @@ use ndarray::arr2; #[test] -fn test_clone_from() { +fn test_clone_from() +{ let a = arr2(&[[1, 2, 3], [4, 5, 6], [7, 8, 9]]); let b = arr2(&[[7, 7, 7]]); let mut c = b.clone(); diff --git a/tests/complex.rs b/tests/complex.rs index 1b52b2671..824e296a4 100644 --- a/tests/complex.rs +++ b/tests/complex.rs @@ -3,12 +3,14 @@ use ndarray::{arr1, arr2, Axis}; use num_complex::Complex; use num_traits::Num; -fn c(re: T, im: T) -> Complex { +fn c(re: T, im: T) -> Complex +{ Complex::new(re, im) } #[test] -fn complex_mat_mul() { +fn complex_mat_mul() +{ let a = arr2(&[[c(3., 4.), c(2., 0.)], [c(0., -2.), c(3., 0.)]]); let b = (&a * c(3., 0.)).map(|c| 5. * c / c.norm_sqr()); println!("{:>8.2}", b); diff --git a/tests/dimension.rs b/tests/dimension.rs index 2500164a0..6a9207e4c 100644 --- a/tests/dimension.rs +++ b/tests/dimension.rs @@ -7,7 +7,8 @@ use ndarray::{arr2, ArcArray, Array, Axis, Dim, Dimension, IxDyn, RemoveAxis}; use std::hash::{Hash, Hasher}; #[test] -fn insert_axis() { +fn insert_axis() +{ assert_eq!(Dim([]).insert_axis(Axis(0)), Dim([1])); assert_eq!(Dim([3]).insert_axis(Axis(0)), Dim([1, 3])); @@ -41,7 +42,8 @@ fn insert_axis() { } #[test] -fn remove_axis() { +fn remove_axis() +{ assert_eq!(Dim([3]).remove_axis(Axis(0)), Dim([])); assert_eq!(Dim([1, 2]).remove_axis(Axis(0)), Dim([2])); assert_eq!(Dim([4, 5, 6]).remove_axis(Axis(1)), Dim([4, 6])); @@ -63,7 +65,8 @@ fn remove_axis() { #[test] #[allow(clippy::eq_op)] -fn dyn_dimension() { +fn dyn_dimension() +{ let a = arr2(&[[1., 2.], [3., 4.0]]) .into_shape_with_order(vec![2, 2]) .unwrap(); @@ -79,7 +82,8 @@ fn dyn_dimension() { } #[test] -fn dyn_insert() { +fn dyn_insert() +{ let mut v = vec![2, 3, 4, 5]; let mut dim = Dim(v.clone()); defmac!(test_insert index => { @@ -98,7 +102,8 @@ fn dyn_insert() { } #[test] -fn dyn_remove() { +fn dyn_remove() +{ let mut v = vec![1, 2, 3, 4, 5, 6, 7]; let mut dim = Dim(v.clone()); defmac!(test_remove index => { @@ -117,7 +122,8 @@ fn dyn_remove() { } #[test] -fn fastest_varying_order() { +fn fastest_varying_order() +{ let strides = Dim([2, 8, 4, 1]); let order = strides._fastest_varying_stride_order(); assert_eq!(order.slice(), &[3, 0, 2, 1]); @@ -190,7 +196,8 @@ fn min_stride_axis() { */ #[test] -fn max_stride_axis() { +fn max_stride_axis() +{ let a = ArrayF32::zeros(10); assert_eq!(a.max_stride_axis(), Axis(0)); @@ -217,7 +224,8 @@ fn max_stride_axis() { } #[test] -fn test_indexing() { +fn test_indexing() +{ let mut x = Dim([1, 2]); assert_eq!(x[0], 1); @@ -228,7 +236,8 @@ fn test_indexing() { } #[test] -fn test_operations() { +fn test_operations() +{ let mut x = Dim([1, 2]); let mut y = Dim([1, 1]); @@ -245,8 +254,10 @@ fn test_operations() { #[test] #[allow(clippy::cognitive_complexity)] -fn test_hash() { - fn calc_hash(value: &T) -> u64 { +fn test_hash() +{ + fn calc_hash(value: &T) -> u64 + { let mut hasher = std::collections::hash_map::DefaultHasher::new(); value.hash(&mut hasher); hasher.finish() @@ -281,8 +292,10 @@ fn test_hash() { } #[test] -fn test_generic_operations() { - fn test_dim(d: &D) { +fn test_generic_operations() +{ + fn test_dim(d: &D) + { let mut x = d.clone(); x[0] += 1; assert_eq!(x[0], 3); @@ -296,8 +309,10 @@ fn test_generic_operations() { } #[test] -fn test_array_view() { - fn test_dim(d: &D) { +fn test_array_view() +{ + fn test_dim(d: &D) + { assert_eq!(d.as_array_view().sum(), 7); assert_eq!(d.as_array_view().strides(), &[1]); } @@ -310,7 +325,8 @@ fn test_array_view() { #[test] #[cfg(feature = "std")] #[allow(clippy::cognitive_complexity)] -fn test_all_ndindex() { +fn test_all_ndindex() +{ use ndarray::IntoDimension; macro_rules! ndindex { ($($i:expr),*) => { diff --git a/tests/format.rs b/tests/format.rs index 4b21fe39d..35909871f 100644 --- a/tests/format.rs +++ b/tests/format.rs @@ -2,7 +2,8 @@ use ndarray::prelude::*; use ndarray::rcarr1; #[test] -fn formatting() { +fn formatting() +{ let a = rcarr1::(&[1., 2., 3., 4.]); assert_eq!(format!("{}", a), "[1, 2, 3, 4]"); assert_eq!(format!("{:4}", a), "[ 1, 2, 3, 4]"); @@ -55,7 +56,8 @@ fn formatting() { } #[test] -fn debug_format() { +fn debug_format() +{ let a = Array2::::zeros((3, 4)); assert_eq!( format!("{:?}", a), diff --git a/tests/higher_order_f.rs b/tests/higher_order_f.rs index c567eb3e0..72245412f 100644 --- a/tests/higher_order_f.rs +++ b/tests/higher_order_f.rs @@ -2,7 +2,8 @@ use ndarray::prelude::*; #[test] #[should_panic] -fn test_fold_axis_oob() { +fn test_fold_axis_oob() +{ let a = arr2(&[[1., 2.], [3., 4.]]); a.fold_axis(Axis(2), 0., |x, y| x + y); } diff --git a/tests/indices.rs b/tests/indices.rs index ca6ca9887..a9414f9a7 100644 --- a/tests/indices.rs +++ b/tests/indices.rs @@ -3,7 +3,8 @@ use ndarray::prelude::*; use ndarray::Order; #[test] -fn test_ixdyn_index_iterate() { +fn test_ixdyn_index_iterate() +{ for &order in &[Order::C, Order::F] { let mut a = Array::zeros((2, 3, 4).set_f(order.is_column_major())); let dim = a.shape().to_vec(); diff --git a/tests/into-ixdyn.rs b/tests/into-ixdyn.rs index ef5b75be4..6e7bf9607 100644 --- a/tests/into-ixdyn.rs +++ b/tests/into-ixdyn.rs @@ -6,12 +6,14 @@ use ndarray::prelude::*; #[test] -fn test_arr0_into_dyn() { +fn test_arr0_into_dyn() +{ assert!(arr0(1.234).into_dyn()[IxDyn(&[])] == 1.234); } #[test] -fn test_arr2_into_arrd_nonstandard_strides() { +fn test_arr2_into_arrd_nonstandard_strides() +{ let arr = Array2::from_shape_fn((12, 34).f(), |(i, j)| i * 34 + j).into_dyn(); let brr = ArrayD::from_shape_fn(vec![12, 34], |d| d[0] * 34 + d[1]); diff --git a/tests/iterator_chunks.rs b/tests/iterator_chunks.rs index 04ec9cc42..79b5403ef 100644 --- a/tests/iterator_chunks.rs +++ b/tests/iterator_chunks.rs @@ -7,7 +7,8 @@ use ndarray::prelude::*; #[test] #[cfg(feature = "std")] -fn chunks() { +fn chunks() +{ use ndarray::NdProducer; let a = >::linspace(1., 100., 10 * 10) .into_shape_with_order((10, 10)) @@ -46,13 +47,15 @@ fn chunks() { #[should_panic] #[test] -fn chunks_different_size_1() { +fn chunks_different_size_1() +{ let a = Array::::zeros(vec![2, 3]); a.exact_chunks(vec![2]); } #[test] -fn chunks_ok_size() { +fn chunks_ok_size() +{ let mut a = Array::::zeros(vec![2, 3]); a.fill(1.); let mut c = 0; @@ -66,13 +69,15 @@ fn chunks_ok_size() { #[should_panic] #[test] -fn chunks_different_size_2() { +fn chunks_different_size_2() +{ let a = Array::::zeros(vec![2, 3]); a.exact_chunks(vec![2, 3, 4]); } #[test] -fn chunks_mut() { +fn chunks_mut() +{ let mut a = Array::zeros((7, 8)); for (i, mut chunk) in a.exact_chunks_mut((2, 3)).into_iter().enumerate() { chunk.fill(i); @@ -92,7 +97,8 @@ fn chunks_mut() { #[should_panic] #[test] -fn chunks_different_size_3() { +fn chunks_different_size_3() +{ let mut a = Array::::zeros(vec![2, 3]); a.exact_chunks_mut(vec![2, 3, 4]); } diff --git a/tests/iterators.rs b/tests/iterators.rs index d8e6cb4a6..23175fd40 100644 --- a/tests/iterators.rs +++ b/tests/iterators.rs @@ -25,7 +25,8 @@ macro_rules! assert_panics { #[test] #[cfg(feature = "std")] -fn double_ended() { +fn double_ended() +{ let a = ArcArray::linspace(0., 7., 8); let mut it = a.iter().cloned(); assert_eq!(it.next(), Some(0.)); @@ -37,7 +38,8 @@ fn double_ended() { } #[test] -fn double_ended_rows() { +fn double_ended_rows() +{ let a = ArcArray::from_iter(0..8).into_shape_clone((4, 2)).unwrap(); let mut row_it = a.rows().into_iter(); assert_equal(row_it.next_back().unwrap(), &[6, 7]); @@ -58,7 +60,8 @@ fn double_ended_rows() { } #[test] -fn iter_size_hint() { +fn iter_size_hint() +{ // Check that the size hint is correctly computed let a = ArcArray::from_iter(0..24) .into_shape_with_order((2, 3, 4)) @@ -79,7 +82,8 @@ fn iter_size_hint() { #[test] #[cfg(feature = "std")] -fn indexed() { +fn indexed() +{ let a = ArcArray::linspace(0., 7., 8); for (i, elt) in a.indexed_iter() { assert_eq!(i, *elt as usize); @@ -99,7 +103,8 @@ fn indexed() { #[test] #[cfg(feature = "std")] -fn as_slice() { +fn as_slice() +{ use ndarray::Data; fn assert_slice_correct(v: &ArrayBase) @@ -156,7 +161,8 @@ fn as_slice() { } #[test] -fn inner_iter() { +fn inner_iter() +{ let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -187,7 +193,8 @@ fn inner_iter() { } #[test] -fn inner_iter_corner_cases() { +fn inner_iter_corner_cases() +{ let a0 = ArcArray::::zeros(()); assert_equal(a0.rows(), vec![aview1(&[0])]); @@ -199,7 +206,8 @@ fn inner_iter_corner_cases() { } #[test] -fn inner_iter_size_hint() { +fn inner_iter_size_hint() +{ // Check that the size hint is correctly computed let a = ArcArray::from_iter(0..24) .into_shape_with_order((2, 3, 4)) @@ -216,7 +224,8 @@ fn inner_iter_size_hint() { #[allow(deprecated)] // into_outer_iter #[test] -fn outer_iter() { +fn outer_iter() +{ let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -266,7 +275,8 @@ fn outer_iter() { } #[test] -fn axis_iter() { +fn axis_iter() +{ let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -283,7 +293,8 @@ fn axis_iter() { } #[test] -fn axis_iter_split_at() { +fn axis_iter_split_at() +{ let a = Array::from_iter(0..5); let iter = a.axis_iter(Axis(0)); let all: Vec<_> = iter.clone().collect(); @@ -295,7 +306,8 @@ fn axis_iter_split_at() { } #[test] -fn axis_iter_split_at_partially_consumed() { +fn axis_iter_split_at_partially_consumed() +{ let a = Array::from_iter(0..5); let mut iter = a.axis_iter(Axis(0)); while iter.next().is_some() { @@ -309,7 +321,8 @@ fn axis_iter_split_at_partially_consumed() { } #[test] -fn axis_iter_zip() { +fn axis_iter_zip() +{ let a = Array::from_iter(0..5); let iter = a.axis_iter(Axis(0)); let mut b = Array::zeros(5); @@ -318,7 +331,8 @@ fn axis_iter_zip() { } #[test] -fn axis_iter_zip_partially_consumed() { +fn axis_iter_zip_partially_consumed() +{ let a = Array::from_iter(0..5); let mut iter = a.axis_iter(Axis(0)); let mut consumed = 0; @@ -333,7 +347,8 @@ fn axis_iter_zip_partially_consumed() { } #[test] -fn axis_iter_zip_partially_consumed_discontiguous() { +fn axis_iter_zip_partially_consumed_discontiguous() +{ let a = Array::from_iter(0..5); let mut iter = a.axis_iter(Axis(0)); let mut consumed = 0; @@ -349,7 +364,8 @@ fn axis_iter_zip_partially_consumed_discontiguous() { } #[test] -fn outer_iter_corner_cases() { +fn outer_iter_corner_cases() +{ let a2 = ArcArray::::zeros((0, 3)); assert_equal(a2.outer_iter(), vec![aview1(&[]); 0]); @@ -359,7 +375,8 @@ fn outer_iter_corner_cases() { #[allow(deprecated)] #[test] -fn outer_iter_mut() { +fn outer_iter_mut() +{ let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -383,7 +400,8 @@ fn outer_iter_mut() { } #[test] -fn axis_iter_mut() { +fn axis_iter_mut() +{ let a = ArcArray::from_iter(0..12); let a = a.into_shape_with_order((2, 3, 2)).unwrap(); // [[[0, 1], @@ -403,7 +421,8 @@ fn axis_iter_mut() { } #[test] -fn axis_chunks_iter() { +fn axis_chunks_iter() +{ let a = ArcArray::from_iter(0..24); let a = a.into_shape_with_order((2, 6, 2)).unwrap(); @@ -441,7 +460,8 @@ fn axis_chunks_iter() { } #[test] -fn axis_iter_mut_split_at() { +fn axis_iter_mut_split_at() +{ let mut a = Array::from_iter(0..5); let mut a_clone = a.clone(); let all: Vec<_> = a_clone.axis_iter_mut(Axis(0)).collect(); @@ -453,7 +473,8 @@ fn axis_iter_mut_split_at() { } #[test] -fn axis_iter_mut_split_at_partially_consumed() { +fn axis_iter_mut_split_at_partially_consumed() +{ let mut a = Array::from_iter(0..5); for consumed in 1..=a.len() { for mid in 0..=(a.len() - consumed) { @@ -479,7 +500,8 @@ fn axis_iter_mut_split_at_partially_consumed() { } #[test] -fn axis_iter_mut_zip() { +fn axis_iter_mut_zip() +{ let orig = Array::from_iter(0..5); let mut cloned = orig.clone(); let iter = cloned.axis_iter_mut(Axis(0)); @@ -493,7 +515,8 @@ fn axis_iter_mut_zip() { } #[test] -fn axis_iter_mut_zip_partially_consumed() { +fn axis_iter_mut_zip_partially_consumed() +{ let mut a = Array::from_iter(0..5); for consumed in 1..=a.len() { let remaining = a.len() - consumed; @@ -508,7 +531,8 @@ fn axis_iter_mut_zip_partially_consumed() { } #[test] -fn axis_iter_mut_zip_partially_consumed_discontiguous() { +fn axis_iter_mut_zip_partially_consumed_discontiguous() +{ let mut a = Array::from_iter(0..5); for consumed in 1..=a.len() { let remaining = a.len() - consumed; @@ -525,7 +549,8 @@ fn axis_iter_mut_zip_partially_consumed_discontiguous() { #[test] #[cfg(feature = "std")] -fn axis_chunks_iter_corner_cases() { +fn axis_chunks_iter_corner_cases() +{ // examples provided by @bluss in PR #65 // these tests highlight corner cases of the axis_chunks_iter implementation // and enable checking if no pointer offsetting is out of bounds. However @@ -556,7 +581,8 @@ fn axis_chunks_iter_corner_cases() { } #[test] -fn axis_chunks_iter_zero_stride() { +fn axis_chunks_iter_zero_stride() +{ { // stride 0 case let b = Array::from(vec![0f32; 0]) @@ -592,19 +618,22 @@ fn axis_chunks_iter_zero_stride() { #[should_panic] #[test] -fn axis_chunks_iter_zero_chunk_size() { +fn axis_chunks_iter_zero_chunk_size() +{ let a = Array::from_iter(0..5); a.axis_chunks_iter(Axis(0), 0); } #[test] -fn axis_chunks_iter_zero_axis_len() { +fn axis_chunks_iter_zero_axis_len() +{ let a = Array::from_iter(0..0); assert!(a.axis_chunks_iter(Axis(0), 5).next().is_none()); } #[test] -fn axis_chunks_iter_split_at() { +fn axis_chunks_iter_split_at() +{ let mut a = Array2::::zeros((11, 3)); a.iter_mut().enumerate().for_each(|(i, elt)| *elt = i); for source in &[ @@ -631,7 +660,8 @@ fn axis_chunks_iter_split_at() { } #[test] -fn axis_chunks_iter_mut() { +fn axis_chunks_iter_mut() +{ let a = ArcArray::from_iter(0..24); let mut a = a.into_shape_with_order((2, 6, 2)).unwrap(); @@ -643,19 +673,22 @@ fn axis_chunks_iter_mut() { #[should_panic] #[test] -fn axis_chunks_iter_mut_zero_chunk_size() { +fn axis_chunks_iter_mut_zero_chunk_size() +{ let mut a = Array::from_iter(0..5); a.axis_chunks_iter_mut(Axis(0), 0); } #[test] -fn axis_chunks_iter_mut_zero_axis_len() { +fn axis_chunks_iter_mut_zero_axis_len() +{ let mut a = Array::from_iter(0..0); assert!(a.axis_chunks_iter_mut(Axis(0), 5).next().is_none()); } #[test] -fn outer_iter_size_hint() { +fn outer_iter_size_hint() +{ // Check that the size hint is correctly computed let a = ArcArray::from_iter(0..24) .into_shape_with_order((4, 3, 2)) @@ -690,7 +723,8 @@ fn outer_iter_size_hint() { } #[test] -fn outer_iter_split_at() { +fn outer_iter_split_at() +{ let a = ArcArray::from_iter(0..30) .into_shape_with_order((5, 3, 2)) .unwrap(); @@ -714,7 +748,8 @@ fn outer_iter_split_at() { #[test] #[should_panic] -fn outer_iter_split_at_panics() { +fn outer_iter_split_at_panics() +{ let a = ArcArray::from_iter(0..30) .into_shape_with_order((5, 3, 2)) .unwrap(); @@ -724,7 +759,8 @@ fn outer_iter_split_at_panics() { } #[test] -fn outer_iter_mut_split_at() { +fn outer_iter_mut_split_at() +{ let mut a = ArcArray::from_iter(0..30) .into_shape_with_order((5, 3, 2)) .unwrap(); @@ -746,7 +782,8 @@ fn outer_iter_mut_split_at() { } #[test] -fn iterators_are_send_sync() { +fn iterators_are_send_sync() +{ // When the element type is Send + Sync, then the iterators and views // are too. fn _send_sync(_: &T) {} @@ -778,7 +815,8 @@ fn iterators_are_send_sync() { #[test] #[allow(clippy::unnecessary_fold)] -fn test_fold() { +fn test_fold() +{ let mut a = Array2::::default((20, 20)); a += 1; let mut iter = a.iter(); @@ -791,7 +829,8 @@ fn test_fold() { } #[test] -fn nth_back_examples() { +fn nth_back_examples() +{ let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); assert_eq!(a.iter().nth_back(0), Some(&a[a.len() - 1])); @@ -804,7 +843,8 @@ fn nth_back_examples() { } #[test] -fn nth_back_zero_n() { +fn nth_back_zero_n() +{ let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter1 = a.iter(); @@ -816,7 +856,8 @@ fn nth_back_zero_n() { } #[test] -fn nth_back_nonzero_n() { +fn nth_back_nonzero_n() +{ let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter1 = a.iter(); @@ -832,7 +873,8 @@ fn nth_back_nonzero_n() { } #[test] -fn nth_back_past_end() { +fn nth_back_past_end() +{ let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter = a.iter(); @@ -841,7 +883,8 @@ fn nth_back_past_end() { } #[test] -fn nth_back_partially_consumed() { +fn nth_back_partially_consumed() +{ let mut a: Array1 = (0..256).collect(); a.slice_axis_inplace(Axis(0), Slice::new(0, None, 2)); let mut iter = a.iter(); @@ -859,7 +902,8 @@ fn nth_back_partially_consumed() { } #[test] -fn test_rfold() { +fn test_rfold() +{ { let mut a = Array1::::default(256); a += 1; @@ -905,14 +949,16 @@ fn test_rfold() { } #[test] -fn test_into_iter() { +fn test_into_iter() +{ let a = Array1::from(vec![1, 2, 3, 4]); let v = a.into_iter().collect::>(); assert_eq!(v, [1, 2, 3, 4]); } #[test] -fn test_into_iter_2d() { +fn test_into_iter_2d() +{ let a = Array1::from(vec![1, 2, 3, 4]) .into_shape_with_order((2, 2)) .unwrap(); @@ -928,7 +974,8 @@ fn test_into_iter_2d() { } #[test] -fn test_into_iter_sliced() { +fn test_into_iter_sliced() +{ let (m, n) = (4, 5); let drops = Cell::new(0); @@ -972,20 +1019,25 @@ fn test_into_iter_sliced() { /// /// Compares equal by its "represented value". #[derive(Clone, Debug)] -struct DropCount<'a> { +struct DropCount<'a> +{ value: i32, my_drops: usize, drops: &'a Cell, } -impl PartialEq for DropCount<'_> { - fn eq(&self, other: &Self) -> bool { +impl PartialEq for DropCount<'_> +{ + fn eq(&self, other: &Self) -> bool + { self.value == other.value } } -impl<'a> DropCount<'a> { - fn new(value: i32, drops: &'a Cell) -> Self { +impl<'a> DropCount<'a> +{ + fn new(value: i32, drops: &'a Cell) -> Self + { DropCount { value, my_drops: 0, @@ -994,8 +1046,10 @@ impl<'a> DropCount<'a> { } } -impl Drop for DropCount<'_> { - fn drop(&mut self) { +impl Drop for DropCount<'_> +{ + fn drop(&mut self) + { assert_eq!(self.my_drops, 0); self.my_drops += 1; self.drops.set(self.drops.get() + 1); diff --git a/tests/ix0.rs b/tests/ix0.rs index 714d499df..f1038556a 100644 --- a/tests/ix0.rs +++ b/tests/ix0.rs @@ -8,7 +8,8 @@ use ndarray::Ix0; use ndarray::ShapeBuilder; #[test] -fn test_ix0() { +fn test_ix0() +{ let mut a = Array::zeros(Ix0()); assert_eq!(a[()], 0.); a[()] = 1.; @@ -27,7 +28,8 @@ fn test_ix0() { } #[test] -fn test_ix0_add() { +fn test_ix0_add() +{ let mut a = Array::zeros(Ix0()); a += 1.; assert_eq!(a[()], 1.); @@ -36,7 +38,8 @@ fn test_ix0_add() { } #[test] -fn test_ix0_add_add() { +fn test_ix0_add_add() +{ let mut a = Array::zeros(Ix0()); a += 1.; let mut b = Array::zeros(Ix0()); @@ -46,7 +49,8 @@ fn test_ix0_add_add() { } #[test] -fn test_ix0_add_broad() { +fn test_ix0_add_broad() +{ let mut b = Array::from(vec![5., 6.]); let mut a = Array::zeros(Ix0()); a += 1.; diff --git a/tests/ixdyn.rs b/tests/ixdyn.rs index ba85688cf..05f123ba1 100644 --- a/tests/ixdyn.rs +++ b/tests/ixdyn.rs @@ -10,7 +10,8 @@ use ndarray::Order; use ndarray::ShapeBuilder; #[test] -fn test_ixdyn() { +fn test_ixdyn() +{ // check that we can use fixed size arrays for indexing let mut a = Array::zeros(vec![2, 3, 4]); a[[1, 1, 1]] = 1.; @@ -19,7 +20,8 @@ fn test_ixdyn() { #[should_panic] #[test] -fn test_ixdyn_wrong_dim() { +fn test_ixdyn_wrong_dim() +{ // check that we can use but it panics at runtime, if number of axes is wrong let mut a = Array::zeros(vec![2, 3, 4]); a[[1, 1, 1]] = 1.; @@ -28,7 +30,8 @@ fn test_ixdyn_wrong_dim() { } #[test] -fn test_ixdyn_out_of_bounds() { +fn test_ixdyn_out_of_bounds() +{ // check that we are out of bounds let a = Array::::zeros(vec![2, 3, 4]); let res = a.get([0, 3, 0]); @@ -36,7 +39,8 @@ fn test_ixdyn_out_of_bounds() { } #[test] -fn test_ixdyn_iterate() { +fn test_ixdyn_iterate() +{ for &order in &[Order::C, Order::F] { let mut a = Array::zeros((2, 3, 4).set_f(order.is_column_major())); let dim = a.shape().to_vec(); @@ -56,7 +60,8 @@ fn test_ixdyn_iterate() { } #[test] -fn test_ixdyn_index_iterate() { +fn test_ixdyn_index_iterate() +{ for &order in &[Order::C, Order::F] { let mut a = Array::zeros((2, 3, 4).set_f(order.is_column_major())); let dim = a.shape().to_vec(); @@ -75,7 +80,8 @@ fn test_ixdyn_index_iterate() { } #[test] -fn test_ixdyn_uget() { +fn test_ixdyn_uget() +{ // check that we are out of bounds let mut a = Array::::zeros(vec![2, 3, 4]); @@ -104,7 +110,8 @@ fn test_ixdyn_uget() { } #[test] -fn test_0() { +fn test_0() +{ let mut a = Array::zeros(vec![]); let z = vec![].into_dimension(); assert_eq!(a[z.clone()], 0.); @@ -124,7 +131,8 @@ fn test_0() { } #[test] -fn test_0_add() { +fn test_0_add() +{ let mut a = Array::zeros(vec![]); a += 1.; assert_eq!(a[[]], 1.); @@ -133,7 +141,8 @@ fn test_0_add() { } #[test] -fn test_0_add_add() { +fn test_0_add_add() +{ let mut a = Array::zeros(vec![]); a += 1.; let mut b = Array::zeros(vec![]); @@ -143,7 +152,8 @@ fn test_0_add_add() { } #[test] -fn test_0_add_broad() { +fn test_0_add_broad() +{ let mut b = Array::from(vec![5., 6.]); let mut a = Array::zeros(vec![]); a += 1.; @@ -154,7 +164,8 @@ fn test_0_add_broad() { #[test] #[cfg(feature = "std")] -fn test_into_dimension() { +fn test_into_dimension() +{ use ndarray::{Ix0, Ix1, Ix2, IxDyn}; let a = Array::linspace(0., 41., 6 * 7) diff --git a/tests/numeric.rs b/tests/numeric.rs index 6f1c52dd0..4d70d4502 100644 --- a/tests/numeric.rs +++ b/tests/numeric.rs @@ -8,19 +8,22 @@ use ndarray::{arr0, arr1, arr2, array, aview1, Array, Array1, Array2, Array3, Ax use std::f64; #[test] -fn test_mean_with_nan_values() { +fn test_mean_with_nan_values() +{ let a = array![f64::NAN, 1.]; assert!(a.mean().unwrap().is_nan()); } #[test] -fn test_mean_with_empty_array_of_floats() { +fn test_mean_with_empty_array_of_floats() +{ let a: Array1 = array![]; assert!(a.mean().is_none()); } #[test] -fn test_mean_with_array_of_floats() { +fn test_mean_with_array_of_floats() +{ let a: Array1 = array![ 0.99889651, 0.0150731, 0.28492482, 0.83819218, 0.48413156, 0.80710412, 0.41762936, 0.22879429, 0.43997224, 0.23831807, 0.02416466, 0.6269962, 0.47420614, 0.56275487, @@ -36,7 +39,8 @@ fn test_mean_with_array_of_floats() { } #[test] -fn sum_mean() { +fn sum_mean() +{ let a: Array2 = arr2(&[[1., 2.], [3., 4.]]); assert_eq!(a.sum_axis(Axis(0)), arr1(&[4., 6.])); assert_eq!(a.sum_axis(Axis(1)), arr1(&[3., 7.])); @@ -48,7 +52,8 @@ fn sum_mean() { } #[test] -fn sum_mean_empty() { +fn sum_mean_empty() +{ assert_eq!(Array3::::ones((2, 0, 3)).sum(), 0.); assert_eq!(Array1::::ones(0).sum_axis(Axis(0)), arr0(0.)); assert_eq!( @@ -63,7 +68,8 @@ fn sum_mean_empty() { #[test] #[cfg(feature = "std")] -fn var() { +fn var() +{ let a = array![1., -4.32, 1.14, 0.32]; assert_abs_diff_eq!(a.var(0.), 5.049875, epsilon = 1e-8); } @@ -71,7 +77,8 @@ fn var() { #[test] #[cfg(feature = "std")] #[should_panic] -fn var_negative_ddof() { +fn var_negative_ddof() +{ let a = array![1., 2., 3.]; a.var(-1.); } @@ -79,14 +86,16 @@ fn var_negative_ddof() { #[test] #[cfg(feature = "std")] #[should_panic] -fn var_too_large_ddof() { +fn var_too_large_ddof() +{ let a = array![1., 2., 3.]; a.var(4.); } #[test] #[cfg(feature = "std")] -fn var_nan_ddof() { +fn var_nan_ddof() +{ let a = Array2::::zeros((2, 3)); let v = a.var(::std::f64::NAN); assert!(v.is_nan()); @@ -94,14 +103,16 @@ fn var_nan_ddof() { #[test] #[cfg(feature = "std")] -fn var_empty_arr() { +fn var_empty_arr() +{ let a: Array1 = array![]; assert!(a.var(0.0).is_nan()); } #[test] #[cfg(feature = "std")] -fn std() { +fn std() +{ let a = array![1., -4.32, 1.14, 0.32]; assert_abs_diff_eq!(a.std(0.), 2.24719, epsilon = 1e-5); } @@ -109,7 +120,8 @@ fn std() { #[test] #[cfg(feature = "std")] #[should_panic] -fn std_negative_ddof() { +fn std_negative_ddof() +{ let a = array![1., 2., 3.]; a.std(-1.); } @@ -117,14 +129,16 @@ fn std_negative_ddof() { #[test] #[cfg(feature = "std")] #[should_panic] -fn std_too_large_ddof() { +fn std_too_large_ddof() +{ let a = array![1., 2., 3.]; a.std(4.); } #[test] #[cfg(feature = "std")] -fn std_nan_ddof() { +fn std_nan_ddof() +{ let a = Array2::::zeros((2, 3)); let v = a.std(::std::f64::NAN); assert!(v.is_nan()); @@ -132,14 +146,16 @@ fn std_nan_ddof() { #[test] #[cfg(feature = "std")] -fn std_empty_arr() { +fn std_empty_arr() +{ let a: Array1 = array![]; assert!(a.std(0.0).is_nan()); } #[test] #[cfg(feature = "approx")] -fn var_axis() { +fn var_axis() +{ use ndarray::{aview0, aview2}; let a = array![ @@ -197,7 +213,8 @@ fn var_axis() { #[test] #[cfg(feature = "approx")] -fn std_axis() { +fn std_axis() +{ use ndarray::aview2; let a = array![ @@ -257,7 +274,8 @@ fn std_axis() { #[test] #[should_panic] #[cfg(feature = "std")] -fn var_axis_negative_ddof() { +fn var_axis_negative_ddof() +{ let a = array![1., 2., 3.]; a.var_axis(Axis(0), -1.); } @@ -265,14 +283,16 @@ fn var_axis_negative_ddof() { #[test] #[should_panic] #[cfg(feature = "std")] -fn var_axis_too_large_ddof() { +fn var_axis_too_large_ddof() +{ let a = array![1., 2., 3.]; a.var_axis(Axis(0), 4.); } #[test] #[cfg(feature = "std")] -fn var_axis_nan_ddof() { +fn var_axis_nan_ddof() +{ let a = Array2::::zeros((2, 3)); let v = a.var_axis(Axis(1), ::std::f64::NAN); assert_eq!(v.shape(), &[2]); @@ -281,7 +301,8 @@ fn var_axis_nan_ddof() { #[test] #[cfg(feature = "std")] -fn var_axis_empty_axis() { +fn var_axis_empty_axis() +{ let a = Array2::::zeros((2, 0)); let v = a.var_axis(Axis(1), 0.); assert_eq!(v.shape(), &[2]); @@ -291,14 +312,16 @@ fn var_axis_empty_axis() { #[test] #[should_panic] #[cfg(feature = "std")] -fn std_axis_bad_dof() { +fn std_axis_bad_dof() +{ let a = array![1., 2., 3.]; a.std_axis(Axis(0), 4.); } #[test] #[cfg(feature = "std")] -fn std_axis_empty_axis() { +fn std_axis_empty_axis() +{ let a = Array2::::zeros((2, 0)); let v = a.std_axis(Axis(1), 0.); assert_eq!(v.shape(), &[2]); diff --git a/tests/oper.rs b/tests/oper.rs index de1347752..294a762c6 100644 --- a/tests/oper.rs +++ b/tests/oper.rs @@ -13,7 +13,8 @@ use num_traits::Zero; use approx::assert_abs_diff_eq; use defmac::defmac; -fn test_oper(op: &str, a: &[f32], b: &[f32], c: &[f32]) { +fn test_oper(op: &str, a: &[f32], b: &[f32], c: &[f32]) +{ let aa = CowArray::from(arr1(a)); let bb = CowArray::from(arr1(b)); let cc = CowArray::from(arr1(c)); @@ -31,7 +32,8 @@ fn test_oper(op: &str, a: &[f32], b: &[f32], c: &[f32]) { } fn test_oper_arr(op: &str, mut aa: CowArray, bb: CowArray, cc: CowArray) -where D: Dimension { +where D: Dimension +{ match op { "+" => { assert_eq!(&aa + &bb, cc); @@ -67,7 +69,8 @@ where D: Dimension { } #[test] -fn operations() { +fn operations() +{ test_oper("+", &[1.0, 2.0, 3.0, 4.0], &[0.0, 1.0, 2.0, 3.0], &[1.0, 3.0, 5.0, 7.0]); test_oper("-", &[1.0, 2.0, 3.0, 4.0], &[0.0, 1.0, 2.0, 3.0], &[1.0, 1.0, 1.0, 1.0]); test_oper("*", &[1.0, 2.0, 3.0, 4.0], &[0.0, 1.0, 2.0, 3.0], &[0.0, 2.0, 6.0, 12.0]); @@ -77,7 +80,8 @@ fn operations() { } #[test] -fn scalar_operations() { +fn scalar_operations() +{ let a = arr0::(1.); let b = rcarr1::(&[1., 1.]); let c = rcarr2(&[[1., 1.], [1., 1.]]); @@ -123,7 +127,8 @@ where } #[test] -fn dot_product() { +fn dot_product() +{ let a = Array::range(0., 69., 1.); let b = &a * 2. - 7.; let dot = 197846.; @@ -161,7 +166,8 @@ fn dot_product() { // test that we can dot product with a broadcast array #[test] -fn dot_product_0() { +fn dot_product_0() +{ let a = Array::range(0., 69., 1.); let x = 1.5; let b = aview0(&x); @@ -181,7 +187,8 @@ fn dot_product_0() { } #[test] -fn dot_product_neg_stride() { +fn dot_product_neg_stride() +{ // test that we can dot with negative stride let a = Array::range(0., 69., 1.); let b = &a * 2. - 7.; @@ -200,7 +207,8 @@ fn dot_product_neg_stride() { } #[test] -fn fold_and_sum() { +fn fold_and_sum() +{ let a = Array::linspace(0., 127., 128) .into_shape_with_order((8, 16)) .unwrap(); @@ -241,7 +249,8 @@ fn fold_and_sum() { } #[test] -fn product() { +fn product() +{ let a = Array::linspace(0.5, 2., 128) .into_shape_with_order((8, 16)) .unwrap(); @@ -262,24 +271,28 @@ fn product() { } } -fn range_mat(m: Ix, n: Ix) -> Array2 { +fn range_mat(m: Ix, n: Ix) -> Array2 +{ Array::linspace(0., (m * n) as f32 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() } -fn range_mat64(m: Ix, n: Ix) -> Array2 { +fn range_mat64(m: Ix, n: Ix) -> Array2 +{ Array::linspace(0., (m * n) as f64 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() } #[cfg(feature = "approx")] -fn range1_mat64(m: Ix) -> Array1 { +fn range1_mat64(m: Ix) -> Array1 +{ Array::linspace(0., m as f64 - 1., m) } -fn range_i32(m: Ix, n: Ix) -> Array2 { +fn range_i32(m: Ix, n: Ix) -> Array2 +{ Array::from_iter(0..(m * n) as i32) .into_shape_with_order((m, n)) .unwrap() @@ -316,7 +329,8 @@ where } #[test] -fn mat_mul() { +fn mat_mul() +{ let (m, n, k) = (8, 8, 8); let a = range_mat(m, n); let b = range_mat(n, k); @@ -378,7 +392,8 @@ fn mat_mul() { // Check that matrix multiplication of contiguous matrices returns a // matrix with the same order #[test] -fn mat_mul_order() { +fn mat_mul_order() +{ let (m, n, k) = (8, 8, 8); let a = range_mat(m, n); let b = range_mat(n, k); @@ -397,7 +412,8 @@ fn mat_mul_order() { // test matrix multiplication shape mismatch #[test] #[should_panic] -fn mat_mul_shape_mismatch() { +fn mat_mul_shape_mismatch() +{ let (m, k, k2, n) = (8, 8, 9, 8); let a = range_mat(m, k); let b = range_mat(k2, n); @@ -407,7 +423,8 @@ fn mat_mul_shape_mismatch() { // test matrix multiplication shape mismatch #[test] #[should_panic] -fn mat_mul_shape_mismatch_2() { +fn mat_mul_shape_mismatch_2() +{ let (m, k, k2, n) = (8, 8, 8, 8); let a = range_mat(m, k); let b = range_mat(k2, n); @@ -418,7 +435,8 @@ fn mat_mul_shape_mismatch_2() { // Check that matrix multiplication // supports broadcast arrays. #[test] -fn mat_mul_broadcast() { +fn mat_mul_broadcast() +{ let (m, n, k) = (16, 16, 16); let a = range_mat(m, n); let x1 = 1.; @@ -437,7 +455,8 @@ fn mat_mul_broadcast() { // Check that matrix multiplication supports reversed axes #[test] -fn mat_mul_rev() { +fn mat_mul_rev() +{ let (m, n, k) = (16, 16, 16); let a = range_mat(m, n); let b = range_mat(n, k); @@ -453,7 +472,8 @@ fn mat_mul_rev() { // Check that matrix multiplication supports arrays with zero rows or columns #[test] -fn mat_mut_zero_len() { +fn mat_mut_zero_len() +{ defmac!(mat_mul_zero_len range_mat_fn => { for n in 0..4 { for m in 0..4 { @@ -474,7 +494,8 @@ fn mat_mut_zero_len() { } #[test] -fn scaled_add() { +fn scaled_add() +{ let a = range_mat(16, 15); let mut b = range_mat(16, 15); b.mapv_inplace(f32::exp); @@ -489,7 +510,8 @@ fn scaled_add() { #[cfg(feature = "approx")] #[test] -fn scaled_add_2() { +fn scaled_add_2() +{ let beta = -2.3; let sizes = vec![ (4, 4, 1, 4), @@ -526,7 +548,8 @@ fn scaled_add_2() { #[cfg(feature = "approx")] #[test] -fn scaled_add_3() { +fn scaled_add_3() +{ use approx::assert_relative_eq; use ndarray::{Slice, SliceInfo, SliceInfoElem}; use std::convert::TryFrom; @@ -577,7 +600,8 @@ fn scaled_add_3() { #[cfg(feature = "approx")] #[test] -fn gen_mat_mul() { +fn gen_mat_mul() +{ let alpha = -2.3; let beta = 3.14; let sizes = vec![ @@ -619,7 +643,8 @@ fn gen_mat_mul() { // Test y = A x where A is f-order #[cfg(feature = "approx")] #[test] -fn gemm_64_1_f() { +fn gemm_64_1_f() +{ let a = range_mat64(64, 64).reversed_axes(); let (m, n) = a.dim(); // m x n times n x 1 == m x 1 @@ -631,7 +656,8 @@ fn gemm_64_1_f() { } #[test] -fn gen_mat_mul_i32() { +fn gen_mat_mul_i32() +{ let alpha = -1; let beta = 2; let sizes = if cfg!(miri) { @@ -662,7 +688,8 @@ fn gen_mat_mul_i32() { #[cfg(feature = "approx")] #[test] -fn gen_mat_vec_mul() { +fn gen_mat_vec_mul() +{ use approx::assert_relative_eq; use ndarray::linalg::general_mat_vec_mul; @@ -730,7 +757,8 @@ fn gen_mat_vec_mul() { #[cfg(feature = "approx")] #[test] -fn vec_mat_mul() { +fn vec_mat_mul() +{ use approx::assert_relative_eq; // simple, slow, correct (hopefully) mat mul @@ -793,7 +821,8 @@ fn vec_mat_mul() { } #[test] -fn kron_square_f64() { +fn kron_square_f64() +{ let a = arr2(&[[1.0, 0.0], [0.0, 1.0]]); let b = arr2(&[[0.0, 1.0], [1.0, 0.0]]); @@ -819,7 +848,8 @@ fn kron_square_f64() { } #[test] -fn kron_square_i64() { +fn kron_square_i64() +{ let a = arr2(&[[1, 0], [0, 1]]); let b = arr2(&[[0, 1], [1, 0]]); @@ -835,7 +865,8 @@ fn kron_square_i64() { } #[test] -fn kron_i64() { +fn kron_i64() +{ let a = arr2(&[[1, 0]]); let b = arr2(&[[0, 1], [1, 0]]); let r = arr2(&[[0, 1, 0, 0], [1, 0, 0, 0]]); diff --git a/tests/par_azip.rs b/tests/par_azip.rs index e5dc02c4e..418c21ef8 100644 --- a/tests/par_azip.rs +++ b/tests/par_azip.rs @@ -7,7 +7,8 @@ use ndarray::prelude::*; use std::sync::atomic::{AtomicUsize, Ordering}; #[test] -fn test_par_azip1() { +fn test_par_azip1() +{ let mut a = Array::zeros(62); let b = Array::from_elem(62, 42); par_azip!((a in &mut a) { *a = 42 }); @@ -15,7 +16,8 @@ fn test_par_azip1() { } #[test] -fn test_par_azip2() { +fn test_par_azip2() +{ let mut a = Array::zeros((5, 7)); let b = Array::from_shape_fn(a.dim(), |(i, j)| 1. / (i + 2 * j) as f32); par_azip!((a in &mut a, &b in &b, ) *a = b ); @@ -24,7 +26,8 @@ fn test_par_azip2() { #[test] #[cfg(feature = "approx")] -fn test_par_azip3() { +fn test_par_azip3() +{ use approx::assert_abs_diff_eq; let mut a = [0.; 32]; @@ -44,7 +47,8 @@ fn test_par_azip3() { #[should_panic] #[test] -fn test_zip_dim_mismatch_1() { +fn test_zip_dim_mismatch_1() +{ let mut a = Array::zeros((5, 7)); let mut d = a.raw_dim(); d[0] += 1; @@ -53,7 +57,8 @@ fn test_zip_dim_mismatch_1() { } #[test] -fn test_indices_1() { +fn test_indices_1() +{ let mut a1 = Array::default(12); for (i, elt) in a1.indexed_iter_mut() { *elt = i; diff --git a/tests/par_rayon.rs b/tests/par_rayon.rs index 40670c6bf..13669763f 100644 --- a/tests/par_rayon.rs +++ b/tests/par_rayon.rs @@ -9,7 +9,8 @@ const CHUNK_SIZE: usize = 100; const N_CHUNKS: usize = (M + CHUNK_SIZE - 1) / CHUNK_SIZE; #[test] -fn test_axis_iter() { +fn test_axis_iter() +{ let mut a = Array2::::zeros((M, N)); for (i, mut v) in a.axis_iter_mut(Axis(0)).enumerate() { v.fill(i as _); @@ -22,7 +23,8 @@ fn test_axis_iter() { #[test] #[cfg(feature = "approx")] -fn test_axis_iter_mut() { +fn test_axis_iter_mut() +{ use approx::assert_abs_diff_eq; let mut a = Array::linspace(0., 1.0f64, M * N) .into_shape_with_order((M, N)) @@ -36,7 +38,8 @@ fn test_axis_iter_mut() { } #[test] -fn test_regular_iter() { +fn test_regular_iter() +{ let mut a = Array2::::zeros((M, N)); for (i, mut v) in a.axis_iter_mut(Axis(0)).enumerate() { v.fill(i as _); @@ -47,7 +50,8 @@ fn test_regular_iter() { } #[test] -fn test_regular_iter_collect() { +fn test_regular_iter_collect() +{ let mut a = Array2::::zeros((M, N)); for (i, mut v) in a.axis_iter_mut(Axis(0)).enumerate() { v.fill(i as _); @@ -57,7 +61,8 @@ fn test_regular_iter_collect() { } #[test] -fn test_axis_chunks_iter() { +fn test_axis_chunks_iter() +{ let mut a = Array2::::zeros((M, N)); for (i, mut v) in a.axis_chunks_iter_mut(Axis(0), CHUNK_SIZE).enumerate() { v.fill(i as _); @@ -74,7 +79,8 @@ fn test_axis_chunks_iter() { #[test] #[cfg(feature = "approx")] -fn test_axis_chunks_iter_mut() { +fn test_axis_chunks_iter_mut() +{ use approx::assert_abs_diff_eq; let mut a = Array::linspace(0., 1.0f64, M * N) .into_shape_with_order((M, N)) diff --git a/tests/par_zip.rs b/tests/par_zip.rs index ec96c1bb9..9f10d9fd5 100644 --- a/tests/par_zip.rs +++ b/tests/par_zip.rs @@ -8,14 +8,16 @@ const M: usize = 1024 * 10; const N: usize = 100; #[test] -fn test_zip_1() { +fn test_zip_1() +{ let mut a = Array2::::zeros((M, N)); Zip::from(&mut a).par_for_each(|x| *x = x.exp()); } #[test] -fn test_zip_index_1() { +fn test_zip_index_1() +{ let mut a = Array2::default((10, 10)); Zip::indexed(&mut a).par_for_each(|i, x| { @@ -28,7 +30,8 @@ fn test_zip_index_1() { } #[test] -fn test_zip_index_2() { +fn test_zip_index_2() +{ let mut a = Array2::default((M, N)); Zip::indexed(&mut a).par_for_each(|i, x| { @@ -41,7 +44,8 @@ fn test_zip_index_2() { } #[test] -fn test_zip_index_3() { +fn test_zip_index_3() +{ let mut a = Array::default((1, 2, 1, 2, 3)); Zip::indexed(&mut a).par_for_each(|i, x| { @@ -54,7 +58,8 @@ fn test_zip_index_3() { } #[test] -fn test_zip_index_4() { +fn test_zip_index_4() +{ let mut a = Array2::zeros((M, N)); let mut b = Array2::zeros((M, N)); @@ -75,7 +80,8 @@ fn test_zip_index_4() { #[test] #[cfg(feature = "approx")] -fn test_zip_collect() { +fn test_zip_collect() +{ use approx::assert_abs_diff_eq; // test Zip::map_collect and that it preserves c/f layout. @@ -103,7 +109,8 @@ fn test_zip_collect() { #[test] #[cfg(feature = "approx")] -fn test_zip_small_collect() { +fn test_zip_small_collect() +{ use approx::assert_abs_diff_eq; for m in 0..32 { @@ -129,7 +136,8 @@ fn test_zip_small_collect() { #[test] #[cfg(feature = "approx")] -fn test_zip_assign_into() { +fn test_zip_assign_into() +{ use approx::assert_abs_diff_eq; let mut a = Array::::zeros((M, N)); diff --git a/tests/raw_views.rs b/tests/raw_views.rs index bb39547e8..929e969d7 100644 --- a/tests/raw_views.rs +++ b/tests/raw_views.rs @@ -4,7 +4,8 @@ use ndarray::Zip; use std::cell::Cell; #[test] -fn raw_view_cast_cell() { +fn raw_view_cast_cell() +{ // Test .cast() by creating an ArrayView> let mut a = Array::from_shape_fn((10, 5), |(i, j)| (i * j) as f32); @@ -20,7 +21,8 @@ fn raw_view_cast_cell() { } #[test] -fn raw_view_cast_reinterpret() { +fn raw_view_cast_reinterpret() +{ // Test .cast() by reinterpreting u16 as [u8; 2] let a = Array::from_shape_fn((5, 5).f(), |(i, j)| (i as u16) << 8 | j as u16); let answer = a.mapv(u16::to_ne_bytes); @@ -31,7 +33,8 @@ fn raw_view_cast_reinterpret() { } #[test] -fn raw_view_cast_zst() { +fn raw_view_cast_zst() +{ struct Zst; let a = Array::<(), _>::default((250, 250)); @@ -42,14 +45,16 @@ fn raw_view_cast_zst() { #[test] #[should_panic] -fn raw_view_invalid_size_cast() { +fn raw_view_invalid_size_cast() +{ let data = [0i32; 16]; ArrayView::from(&data[..]).raw_view().cast::(); } #[test] #[should_panic] -fn raw_view_mut_invalid_size_cast() { +fn raw_view_mut_invalid_size_cast() +{ let mut data = [0i32; 16]; ArrayViewMut::from(&mut data[..]) .raw_view_mut() @@ -57,7 +62,8 @@ fn raw_view_mut_invalid_size_cast() { } #[test] -fn raw_view_misaligned() { +fn raw_view_misaligned() +{ let data: [u16; 2] = [0x0011, 0x2233]; let ptr: *const u16 = data.as_ptr(); unsafe { @@ -69,8 +75,10 @@ fn raw_view_misaligned() { #[test] #[cfg(debug_assertions)] #[should_panic = "The pointer must be aligned."] -fn raw_view_deref_into_view_misaligned() { - fn misaligned_deref(data: &[u16; 2]) -> ArrayView1<'_, u16> { +fn raw_view_deref_into_view_misaligned() +{ + fn misaligned_deref(data: &[u16; 2]) -> ArrayView1<'_, u16> + { let ptr: *const u16 = data.as_ptr(); unsafe { let misaligned_ptr = (ptr as *const u8).add(1) as *const u16; @@ -85,8 +93,10 @@ fn raw_view_deref_into_view_misaligned() { #[test] #[cfg(debug_assertions)] #[should_panic = "Unsupported"] -fn raw_view_negative_strides() { - fn misaligned_deref(data: &[u16; 2]) -> ArrayView1<'_, u16> { +fn raw_view_negative_strides() +{ + fn misaligned_deref(data: &[u16; 2]) -> ArrayView1<'_, u16> + { let ptr: *const u16 = data.as_ptr(); unsafe { let raw_view = RawArrayView::from_shape_ptr(1.strides((-1isize) as usize), ptr); diff --git a/tests/reshape.rs b/tests/reshape.rs index 533b124fd..a13a5c05f 100644 --- a/tests/reshape.rs +++ b/tests/reshape.rs @@ -5,7 +5,8 @@ use itertools::enumerate; use ndarray::Order; #[test] -fn reshape() { +fn reshape() +{ let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); let u = v.into_shape_with_order((3, 3)); @@ -21,7 +22,8 @@ fn reshape() { #[test] #[should_panic(expected = "IncompatibleShape")] -fn reshape_error1() { +fn reshape_error1() +{ let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); let _u = v.into_shape_with_order((2, 5)).unwrap(); @@ -29,7 +31,8 @@ fn reshape_error1() { #[test] #[should_panic(expected = "IncompatibleLayout")] -fn reshape_error2() { +fn reshape_error2() +{ let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); let mut u = v.into_shape_with_order((2, 2, 2)).unwrap(); @@ -38,7 +41,8 @@ fn reshape_error2() { } #[test] -fn reshape_f() { +fn reshape_f() +{ let mut u = Array::zeros((3, 4).f()); for (i, elt) in enumerate(u.as_slice_memory_order_mut().unwrap()) { *elt = i as i32; @@ -63,7 +67,8 @@ fn reshape_f() { } #[test] -fn to_shape_easy() { +fn to_shape_easy() +{ // 1D -> C -> C let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); @@ -102,7 +107,8 @@ fn to_shape_easy() { } #[test] -fn to_shape_copy() { +fn to_shape_copy() +{ // 1D -> C -> F let v = ArrayView::from(&[1, 2, 3, 4, 5, 6, 7, 8]); let u = v.to_shape(((4, 2), Order::RowMajor)).unwrap(); @@ -125,7 +131,8 @@ fn to_shape_copy() { } #[test] -fn to_shape_add_axis() { +fn to_shape_add_axis() +{ // 1D -> C -> C let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); @@ -136,7 +143,8 @@ fn to_shape_add_axis() { } #[test] -fn to_shape_copy_stride() { +fn to_shape_copy_stride() +{ let v = array![[1, 2, 3, 4], [5, 6, 7, 8]]; let vs = v.slice(s![.., ..3]); let lin1 = vs.to_shape(6).unwrap(); @@ -149,7 +157,8 @@ fn to_shape_copy_stride() { } #[test] -fn to_shape_zero_len() { +fn to_shape_zero_len() +{ let v = array![[1, 2, 3, 4], [5, 6, 7, 8]]; let vs = v.slice(s![.., ..0]); let lin1 = vs.to_shape(0).unwrap(); @@ -159,7 +168,8 @@ fn to_shape_zero_len() { #[test] #[should_panic(expected = "IncompatibleShape")] -fn to_shape_error1() { +fn to_shape_error1() +{ let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); let _u = v.to_shape((2, 5)).unwrap(); @@ -167,7 +177,8 @@ fn to_shape_error1() { #[test] #[should_panic(expected = "IncompatibleShape")] -fn to_shape_error2() { +fn to_shape_error2() +{ // overflow let data = [3, 4, 5, 6, 7, 8]; let v = aview1(&data); @@ -175,7 +186,8 @@ fn to_shape_error2() { } #[test] -fn to_shape_discontig() { +fn to_shape_discontig() +{ for &create_order in &[Order::C, Order::F] { let a = Array::from_iter(0..64); let mut a1 = a.to_shape(((4, 4, 4), create_order)).unwrap(); @@ -202,7 +214,8 @@ fn to_shape_discontig() { } #[test] -fn to_shape_broadcast() { +fn to_shape_broadcast() +{ for &create_order in &[Order::C, Order::F] { let a = Array::from_iter(0..64); let mut a1 = a.to_shape(((4, 4, 4), create_order)).unwrap(); @@ -229,7 +242,8 @@ fn to_shape_broadcast() { } #[test] -fn into_shape_with_order() { +fn into_shape_with_order() +{ // 1D -> C -> C let data = [1, 2, 3, 4, 5, 6, 7, 8]; let v = aview1(&data); @@ -268,7 +282,8 @@ fn into_shape_with_order() { } #[test] -fn into_shape_clone() { +fn into_shape_clone() +{ // 1D -> C -> C { let data = [1, 2, 3, 4, 5, 6, 7, 8]; diff --git a/tests/s.rs b/tests/s.rs index 56eed03a1..edb3f071a 100644 --- a/tests/s.rs +++ b/tests/s.rs @@ -5,7 +5,8 @@ use ndarray::{s, Array}; #[test] -fn test_s() { +fn test_s() +{ let a = Array::::zeros((3, 4)); let vi = a.slice(s![1.., ..;2]); assert_eq!(vi.shape(), &[2, 2]); diff --git a/tests/stacking.rs b/tests/stacking.rs index 0c4e79c79..bdfe478b4 100644 --- a/tests/stacking.rs +++ b/tests/stacking.rs @@ -1,7 +1,8 @@ use ndarray::{arr2, arr3, aview1, aview2, concatenate, stack, Array2, Axis, ErrorKind, Ix1}; #[test] -fn concatenating() { +fn concatenating() +{ let a = arr2(&[[2., 2.], [3., 3.]]); let b = ndarray::concatenate(Axis(0), &[a.view(), a.view()]).unwrap(); assert_eq!(b, arr2(&[[2., 2.], [3., 3.], [2., 2.], [3., 3.]])); @@ -33,7 +34,8 @@ fn concatenating() { } #[test] -fn stacking() { +fn stacking() +{ let a = arr2(&[[2., 2.], [3., 3.]]); let b = ndarray::stack(Axis(0), &[a.view(), a.view()]).unwrap(); assert_eq!(b, arr3(&[[[2., 2.], [3., 3.]], [[2., 2.], [3., 3.]]])); diff --git a/tests/views.rs b/tests/views.rs index ecef72fe8..02970b1b7 100644 --- a/tests/views.rs +++ b/tests/views.rs @@ -2,7 +2,8 @@ use ndarray::prelude::*; use ndarray::Zip; #[test] -fn cell_view() { +fn cell_view() +{ let mut a = Array::from_shape_fn((10, 5), |(i, j)| (i * j) as f32); let answer = &a + 1.; diff --git a/tests/windows.rs b/tests/windows.rs index 692e71e5a..d8d5b699e 100644 --- a/tests/windows.rs +++ b/tests/windows.rs @@ -22,7 +22,8 @@ use ndarray::{arr3, Zip}; /// Test that verifies the `Windows` iterator panics on window sizes equal to zero. #[test] #[should_panic] -fn windows_iterator_zero_size() { +fn windows_iterator_zero_size() +{ let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -31,7 +32,8 @@ fn windows_iterator_zero_size() { /// Test that verifies that no windows are yielded on oversized window sizes. #[test] -fn windows_iterator_oversized() { +fn windows_iterator_oversized() +{ let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -41,7 +43,8 @@ fn windows_iterator_oversized() { /// Simple test for iterating 1d-arrays via `Windows`. #[test] -fn windows_iterator_1d() { +fn windows_iterator_1d() +{ let a = Array::from_iter(10..20).into_shape_with_order(10).unwrap(); itertools::assert_equal(a.windows(Dim(4)), vec![ arr1(&[10, 11, 12, 13]), @@ -56,7 +59,8 @@ fn windows_iterator_1d() { /// Simple test for iterating 2d-arrays via `Windows`. #[test] -fn windows_iterator_2d() { +fn windows_iterator_2d() +{ let a = Array::from_iter(10..30) .into_shape_with_order((5, 4)) .unwrap(); @@ -75,7 +79,8 @@ fn windows_iterator_2d() { /// Simple test for iterating 3d-arrays via `Windows`. #[test] -fn windows_iterator_3d() { +fn windows_iterator_3d() +{ let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -94,7 +99,8 @@ fn windows_iterator_3d() { /// Test that verifies the `Windows` iterator panics when stride has an axis equal to zero. #[test] #[should_panic] -fn windows_iterator_stride_axis_zero() { +fn windows_iterator_stride_axis_zero() +{ let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -103,7 +109,8 @@ fn windows_iterator_stride_axis_zero() { /// Test that verifies that only first window is yielded when stride is oversized on every axis. #[test] -fn windows_iterator_only_one_valid_window_for_oversized_stride() { +fn windows_iterator_only_one_valid_window_for_oversized_stride() +{ let a = Array::from_iter(10..135) .into_shape_with_order((5, 5, 5)) .unwrap(); @@ -113,7 +120,8 @@ fn windows_iterator_only_one_valid_window_for_oversized_stride() { /// Simple test for iterating 1d-arrays via `Windows` with stride. #[test] -fn windows_iterator_1d_with_stride() { +fn windows_iterator_1d_with_stride() +{ let a = Array::from_iter(10..20).into_shape_with_order(10).unwrap(); itertools::assert_equal(a.windows_with_stride(4, 2), vec![ arr1(&[10, 11, 12, 13]), @@ -125,7 +133,8 @@ fn windows_iterator_1d_with_stride() { /// Simple test for iterating 2d-arrays via `Windows` with stride. #[test] -fn windows_iterator_2d_with_stride() { +fn windows_iterator_2d_with_stride() +{ let a = Array::from_iter(10..30) .into_shape_with_order((5, 4)) .unwrap(); @@ -141,7 +150,8 @@ fn windows_iterator_2d_with_stride() { /// Simple test for iterating 3d-arrays via `Windows` with stride. #[test] -fn windows_iterator_3d_with_stride() { +fn windows_iterator_3d_with_stride() +{ let a = Array::from_iter(10..74) .into_shape_with_order((4, 4, 4)) .unwrap(); @@ -158,7 +168,8 @@ fn windows_iterator_3d_with_stride() { } #[test] -fn test_window_zip() { +fn test_window_zip() +{ let a = Array::from_iter(0..64) .into_shape_with_order((4, 4, 4)) .unwrap(); @@ -183,7 +194,8 @@ fn test_window_zip() { /// Test verifies that non existent Axis results in panic #[test] #[should_panic] -fn axis_windows_outofbound() { +fn axis_windows_outofbound() +{ let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -193,7 +205,8 @@ fn axis_windows_outofbound() { /// Test verifies that zero sizes results in panic #[test] #[should_panic] -fn axis_windows_zero_size() { +fn axis_windows_zero_size() +{ let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -202,7 +215,8 @@ fn axis_windows_zero_size() { /// Test verifies that over sized windows yield nothing #[test] -fn axis_windows_oversized() { +fn axis_windows_oversized() +{ let a = Array::from_iter(10..37) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -212,7 +226,8 @@ fn axis_windows_oversized() { /// Simple test for iterating 1d-arrays via `Axis Windows`. #[test] -fn test_axis_windows_1d() { +fn test_axis_windows_1d() +{ let a = Array::from_iter(10..20).into_shape_with_order(10).unwrap(); itertools::assert_equal(a.axis_windows(Axis(0), 5), vec![ @@ -227,7 +242,8 @@ fn test_axis_windows_1d() { /// Simple test for iterating 2d-arrays via `Axis Windows`. #[test] -fn test_axis_windows_2d() { +fn test_axis_windows_2d() +{ let a = Array::from_iter(10..30) .into_shape_with_order((5, 4)) .unwrap(); @@ -242,7 +258,8 @@ fn test_axis_windows_2d() { /// Simple test for iterating 3d-arrays via `Axis Windows`. #[test] -fn test_axis_windows_3d() { +fn test_axis_windows_3d() +{ let a = Array::from_iter(0..27) .into_shape_with_order((3, 3, 3)) .unwrap(); @@ -262,7 +279,8 @@ fn test_axis_windows_3d() { } #[test] -fn test_window_neg_stride() { +fn test_window_neg_stride() +{ let array = Array::from_iter(1..10) .into_shape_with_order((3, 3)) .unwrap(); @@ -292,7 +310,8 @@ fn test_window_neg_stride() { } #[test] -fn test_windows_with_stride_on_inverted_axis() { +fn test_windows_with_stride_on_inverted_axis() +{ let mut array = Array::from_iter(1..17) .into_shape_with_order((4, 4)) .unwrap(); diff --git a/tests/zst.rs b/tests/zst.rs index c3c779d2c..f5f2c8e32 100644 --- a/tests/zst.rs +++ b/tests/zst.rs @@ -2,7 +2,8 @@ use ndarray::arr2; use ndarray::ArcArray; #[test] -fn test_swap() { +fn test_swap() +{ let mut a = arr2(&[[(); 3]; 3]); let b = a.clone(); @@ -16,7 +17,8 @@ fn test_swap() { } #[test] -fn test() { +fn test() +{ let c = ArcArray::<(), _>::default((3, 4)); let mut d = c.clone(); for _ in d.iter_mut() {} diff --git a/xtest-blas/tests/oper.rs b/xtest-blas/tests/oper.rs index 1cedc9018..3ed81915e 100644 --- a/xtest-blas/tests/oper.rs +++ b/xtest-blas/tests/oper.rs @@ -17,7 +17,8 @@ use num_complex::Complex32; use num_complex::Complex64; #[test] -fn mat_vec_product_1d() { +fn mat_vec_product_1d() +{ let a = arr2(&[[1.], [2.]]); let b = arr1(&[1., 2.]); let ans = arr1(&[5.]); @@ -25,7 +26,8 @@ fn mat_vec_product_1d() { } #[test] -fn mat_vec_product_1d_broadcast() { +fn mat_vec_product_1d_broadcast() +{ let a = arr2(&[[1.], [2.], [3.]]); let b = arr1(&[1.]); let b = b.broadcast(3).unwrap(); @@ -34,7 +36,8 @@ fn mat_vec_product_1d_broadcast() { } #[test] -fn mat_vec_product_1d_inverted_axis() { +fn mat_vec_product_1d_inverted_axis() +{ let a = arr2(&[[1.], [2.], [3.]]); let mut b = arr1(&[1., 2., 3.]); b.invert_axis(Axis(0)); @@ -43,37 +46,43 @@ fn mat_vec_product_1d_inverted_axis() { assert_eq!(a.t().dot(&b), ans); } -fn range_mat(m: Ix, n: Ix) -> Array2 { +fn range_mat(m: Ix, n: Ix) -> Array2 +{ Array::linspace(0., (m * n) as f32 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() } -fn range_mat64(m: Ix, n: Ix) -> Array2 { +fn range_mat64(m: Ix, n: Ix) -> Array2 +{ Array::linspace(0., (m * n) as f64 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() } -fn range_mat_complex(m: Ix, n: Ix) -> Array2 { +fn range_mat_complex(m: Ix, n: Ix) -> Array2 +{ Array::linspace(0., (m * n) as f32 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() .map(|&f| Complex32::new(f, 0.)) } -fn range_mat_complex64(m: Ix, n: Ix) -> Array2 { +fn range_mat_complex64(m: Ix, n: Ix) -> Array2 +{ Array::linspace(0., (m * n) as f64 - 1., m * n) .into_shape_with_order((m, n)) .unwrap() .map(|&f| Complex64::new(f, 0.)) } -fn range1_mat64(m: Ix) -> Array1 { +fn range1_mat64(m: Ix) -> Array1 +{ Array::linspace(0., m as f64 - 1., m) } -fn range_i32(m: Ix, n: Ix) -> Array2 { +fn range_i32(m: Ix, n: Ix) -> Array2 +{ Array::from_iter(0..(m * n) as i32) .into_shape_with_order((m, n)) .unwrap() @@ -148,7 +157,8 @@ where // Check that matrix multiplication of contiguous matrices returns a // matrix with the same order #[test] -fn mat_mul_order() { +fn mat_mul_order() +{ let (m, n, k) = (50, 50, 50); let a = range_mat(m, n); let b = range_mat(n, k); @@ -167,7 +177,8 @@ fn mat_mul_order() { // Check that matrix multiplication // supports broadcast arrays. #[test] -fn mat_mul_broadcast() { +fn mat_mul_broadcast() +{ let (m, n, k) = (16, 16, 16); let a = range_mat(m, n); let x1 = 1.; @@ -186,7 +197,8 @@ fn mat_mul_broadcast() { // Check that matrix multiplication supports reversed axes #[test] -fn mat_mul_rev() { +fn mat_mul_rev() +{ let (m, n, k) = (16, 16, 16); let a = range_mat(m, n); let b = range_mat(n, k); @@ -202,7 +214,8 @@ fn mat_mul_rev() { // Check that matrix multiplication supports arrays with zero rows or columns #[test] -fn mat_mut_zero_len() { +fn mat_mut_zero_len() +{ defmac!(mat_mul_zero_len range_mat_fn => { for n in 0..4 { for m in 0..4 { @@ -223,7 +236,8 @@ fn mat_mut_zero_len() { } #[test] -fn gen_mat_mul() { +fn gen_mat_mul() +{ let alpha = -2.3; let beta = 3.14; let sizes = vec![ @@ -264,7 +278,8 @@ fn gen_mat_mul() { // Test y = A x where A is f-order #[test] -fn gemm_64_1_f() { +fn gemm_64_1_f() +{ let a = range_mat64(64, 64).reversed_axes(); let (m, n) = a.dim(); // m x n times n x 1 == m x 1 @@ -276,7 +291,8 @@ fn gemm_64_1_f() { } #[test] -fn gemm_c64_1_f() { +fn gemm_c64_1_f() +{ let a = range_mat_complex64(64, 64).reversed_axes(); let (m, n) = a.dim(); // m x n times n x 1 == m x 1 @@ -293,7 +309,8 @@ fn gemm_c64_1_f() { } #[test] -fn gemm_c32_1_f() { +fn gemm_c32_1_f() +{ let a = range_mat_complex(64, 64).reversed_axes(); let (m, n) = a.dim(); // m x n times n x 1 == m x 1 @@ -310,7 +327,8 @@ fn gemm_c32_1_f() { } #[test] -fn gemm_c64_actually_complex() { +fn gemm_c64_actually_complex() +{ let mut a = range_mat_complex64(4, 4); a = a.map(|&i| if i.re > 8. { i.conj() } else { i }); let mut b = range_mat_complex64(4, 6); @@ -329,7 +347,8 @@ fn gemm_c64_actually_complex() { } #[test] -fn gen_mat_vec_mul() { +fn gen_mat_vec_mul() +{ let alpha = -2.3; let beta = 3.14; let sizes = vec![ @@ -375,7 +394,8 @@ fn gen_mat_vec_mul() { } #[test] -fn vec_mat_mul() { +fn vec_mat_mul() +{ let sizes = vec![ (4, 4), (8, 8), diff --git a/xtest-numeric/tests/accuracy.rs b/xtest-numeric/tests/accuracy.rs index b64a71d22..e98fb3c4d 100644 --- a/xtest-numeric/tests/accuracy.rs +++ b/xtest-numeric/tests/accuracy.rs @@ -23,7 +23,8 @@ use rand_distr::{Distribution, Normal, StandardNormal}; use approx::{assert_abs_diff_eq, assert_relative_eq}; fn kahan_sum(iter: impl Iterator) -> A -where A: LinalgScalar { +where A: LinalgScalar +{ let mut sum = A::zero(); let mut compensation = A::zero(); @@ -83,7 +84,8 @@ where } #[test] -fn accurate_eye_f32() { +fn accurate_eye_f32() +{ let rng = &mut SmallRng::from_entropy(); for i in 0..20 { let eye = Array::eye(i); @@ -110,7 +112,8 @@ fn accurate_eye_f32() { } #[test] -fn accurate_eye_f64() { +fn accurate_eye_f64() +{ let rng = &mut SmallRng::from_entropy(); let abs_tol = 1e-15; for i in 0..20 { @@ -138,22 +141,26 @@ fn accurate_eye_f64() { } #[test] -fn accurate_mul_f32_dot() { +fn accurate_mul_f32_dot() +{ accurate_mul_float_general::(1e-5, false); } #[test] -fn accurate_mul_f32_general() { +fn accurate_mul_f32_general() +{ accurate_mul_float_general::(1e-5, true); } #[test] -fn accurate_mul_f64_dot() { +fn accurate_mul_f64_dot() +{ accurate_mul_float_general::(1e-14, false); } #[test] -fn accurate_mul_f64_general() { +fn accurate_mul_f64_general() +{ accurate_mul_float_general::(1e-14, true); } @@ -163,7 +170,8 @@ fn accurate_mul_f64_general() { fn random_matrix_mul( rng: &mut SmallRng, use_stride: bool, use_general: bool, generator: fn(Ix2, &mut SmallRng) -> Array2, ) -> (Array2, Array2) -where A: LinalgScalar { +where A: LinalgScalar +{ let m = rng.gen_range(15..512); let k = rng.gen_range(15..512); let n = rng.gen_range(15..1560); @@ -215,12 +223,14 @@ where } #[test] -fn accurate_mul_complex32() { +fn accurate_mul_complex32() +{ accurate_mul_complex_general::(1e-5); } #[test] -fn accurate_mul_complex64() { +fn accurate_mul_complex64() +{ accurate_mul_complex_general::(1e-14); } @@ -246,7 +256,8 @@ where } #[test] -fn accurate_mul_with_column_f64() { +fn accurate_mul_with_column_f64() +{ // pick a few random sizes let rng = &mut SmallRng::from_entropy(); for i in 0..10 { diff --git a/xtest-serialization/tests/serialize.rs b/xtest-serialization/tests/serialize.rs index 6e00f3af5..95e93e4fb 100644 --- a/xtest-serialization/tests/serialize.rs +++ b/xtest-serialization/tests/serialize.rs @@ -12,7 +12,8 @@ extern crate ron; use ndarray::{arr0, arr1, arr2, s, ArcArray, ArcArray2, ArrayD, IxDyn}; #[test] -fn serial_many_dim_serde() { +fn serial_many_dim_serde() +{ { let a = arr0::(2.72); let serial = serde_json::to_string(&a).unwrap(); @@ -58,7 +59,8 @@ fn serial_many_dim_serde() { } #[test] -fn serial_ixdyn_serde() { +fn serial_ixdyn_serde() +{ { let a = arr0::(2.72).into_dyn(); let serial = serde_json::to_string(&a).unwrap(); @@ -97,7 +99,8 @@ fn serial_ixdyn_serde() { } #[test] -fn serial_wrong_count_serde() { +fn serial_wrong_count_serde() +{ // one element too few let text = r##"{"v":1,"dim":[2,3],"data":[3,1,2.2,3.1,4]}"##; let arr = serde_json::from_str::>(text); @@ -112,7 +115,8 @@ fn serial_wrong_count_serde() { } #[test] -fn serial_many_dim_serde_msgpack() { +fn serial_many_dim_serde_msgpack() +{ { let a = arr0::(2.72); @@ -176,7 +180,8 @@ fn serial_many_dim_serde_msgpack() { #[test] #[cfg(feature = "ron")] -fn serial_many_dim_ron() { +fn serial_many_dim_ron() +{ use ron::de::from_str as ron_deserialize; use ron::ser::to_string as ron_serialize;