diff --git a/benches/append.rs b/benches/append.rs new file mode 100644 index 000000000..1a911a278 --- /dev/null +++ b/benches/append.rs @@ -0,0 +1,35 @@ +#![feature(test)] + +extern crate test; +use test::Bencher; + +use ndarray::prelude::*; + +#[bench] +fn select_axis0(bench: &mut Bencher) { + let a = Array::::zeros((256, 256)); + let selectable = vec![0, 1, 2, 0, 1, 3, 0, 4, 16, 32, 128, 147, 149, 220, 221, 255, 221, 0, 1]; + bench.iter(|| { + a.select(Axis(0), &selectable) + }); +} + +#[bench] +fn select_axis1(bench: &mut Bencher) { + let a = Array::::zeros((256, 256)); + let selectable = vec![0, 1, 2, 0, 1, 3, 0, 4, 16, 32, 128, 147, 149, 220, 221, 255, 221, 0, 1]; + bench.iter(|| { + a.select(Axis(1), &selectable) + }); +} + +#[bench] +fn select_1d(bench: &mut Bencher) { + let a = Array::::zeros(1024); + let mut selectable = (0..a.len()).step_by(17).collect::>(); + selectable.extend(selectable.clone().iter().rev()); + + bench.iter(|| { + a.select(Axis(0), &selectable) + }); +} diff --git a/src/data_repr.rs b/src/data_repr.rs index b34f0a4ca..8a52f64c4 100644 --- a/src/data_repr.rs +++ b/src/data_repr.rs @@ -6,6 +6,8 @@ use alloc::borrow::ToOwned; use alloc::vec::Vec; use crate::extension::nonnull; +use rawpointer::PointerExt; + /// Array's representation. /// /// *Don’t use this type directly—use the type alias @@ -55,6 +57,37 @@ impl OwnedRepr { self.ptr } + /// Return end pointer + pub(crate) fn as_end_nonnull(&self) -> NonNull { + unsafe { + self.ptr.add(self.len) + } + } + + /// Reserve `additional` elements; return the new pointer + /// + /// ## Safety + /// + /// Note that existing pointers into the data are invalidated + #[must_use = "must use new pointer to update existing pointers"] + pub(crate) fn reserve(&mut self, additional: usize) -> NonNull { + self.modify_as_vec(|mut v| { + v.reserve(additional); + v + }); + self.as_nonnull_mut() + } + + /// Set the valid length of the data + /// + /// ## Safety + /// + /// The first `new_len` elements of the data should be valid. + pub(crate) unsafe fn set_len(&mut self, new_len: usize) { + debug_assert!(new_len <= self.capacity); + self.len = new_len; + } + /// Cast self into equivalent repr of other element type /// /// ## Safety @@ -72,6 +105,11 @@ impl OwnedRepr { } } + fn modify_as_vec(&mut self, f: impl FnOnce(Vec) -> Vec) { + let v = self.take_as_vec(); + *self = Self::from(f(v)); + } + fn take_as_vec(&mut self) -> Vec { let capacity = self.capacity; let len = self.len; diff --git a/src/impl_methods.rs b/src/impl_methods.rs index 03ca09d74..fccce3179 100644 --- a/src/impl_methods.rs +++ b/src/impl_methods.rs @@ -873,20 +873,39 @@ where /// ``` pub fn select(&self, axis: Axis, indices: &[Ix]) -> Array where - A: Copy, + A: Clone, S: Data, D: RemoveAxis, { - let mut subs = vec![self.view(); indices.len()]; - for (&i, sub) in zip(indices, &mut subs[..]) { - sub.collapse_axis(axis, i); - } - if subs.is_empty() { - let mut dim = self.raw_dim(); - dim.set_axis(axis, 0); - unsafe { Array::from_shape_vec_unchecked(dim, vec![]) } + if self.ndim() == 1 { + // using .len_of(axis) means that we check if `axis` is in bounds too. + let axis_len = self.len_of(axis); + // bounds check the indices first + if let Some(max_index) = indices.iter().cloned().max() { + if max_index >= axis_len { + panic!("ndarray: index {} is out of bounds in array of len {}", + max_index, self.len_of(axis)); + } + } // else: indices empty is ok + let view = self.view().into_dimensionality::().unwrap(); + Array::from_iter(indices.iter().map(move |&index| { + // Safety: bounds checked indexes + unsafe { + view.uget(index).clone() + } + })).into_dimensionality::().unwrap() } else { - concatenate(axis, &subs).unwrap() + let mut subs = vec![self.view(); indices.len()]; + for (&i, sub) in zip(indices, &mut subs[..]) { + sub.collapse_axis(axis, i); + } + if subs.is_empty() { + let mut dim = self.raw_dim(); + dim.set_axis(axis, 0); + unsafe { Array::from_shape_vec_unchecked(dim, vec![]) } + } else { + concatenate(axis, &subs).unwrap() + } } } diff --git a/src/impl_owned_array.rs b/src/impl_owned_array.rs index 41eff2b11..a795a354a 100644 --- a/src/impl_owned_array.rs +++ b/src/impl_owned_array.rs @@ -1,7 +1,18 @@ use alloc::vec::Vec; +use std::mem::MaybeUninit; + +use rawpointer::PointerExt; + use crate::imp_prelude::*; +use crate::dimension; +use crate::error::{ErrorKind, ShapeError}; +use crate::iterators::Baseiter; +use crate::low_level_util::AbortIfPanic; +use crate::OwnedRepr; +use crate::Zip; + /// Methods specific to `Array0`. /// /// ***See also all methods for [`ArrayBase`]*** @@ -59,3 +70,609 @@ where self.data.into_vec() } } + +/// Methods specific to `Array2`. +/// +/// ***See also all methods for [`ArrayBase`]*** +/// +/// [`ArrayBase`]: struct.ArrayBase.html +impl Array { + /// Append a row to an array + /// + /// ***Errors*** with a shape error if the length of the row does not match the length of the + /// rows in the array.
+ /// + /// The memory layout of the `self` array matters for ensuring that the append is efficient. + /// Appending automatically changes memory layout of the array so that it is appended to + /// along the "growing axis". + /// + /// Ensure appending is efficient by, for example, appending to an empty array and then + /// always appending along the same axis. For rows, ndarray's default layout is efficient for + /// appending. + /// + /// Notice that an empty array (where it has an axis of length zero) is the simplest starting + /// point. When repeatedly appending to a single axis, the amortized average complexity of each append is O(m), where *m* is the length of + /// the row. + /// + /// ```rust + /// use ndarray::{Array, ArrayView, array}; + /// + /// // create an empty array and append + /// let mut a = Array::zeros((0, 4)); + /// a.append_row(ArrayView::from(&[ 1., 2., 3., 4.])).unwrap(); + /// a.append_row(ArrayView::from(&[-1., -2., -3., -4.])).unwrap(); + /// + /// assert_eq!( + /// a, + /// array![[ 1., 2., 3., 4.], + /// [-1., -2., -3., -4.]]); + /// ``` + pub fn append_row(&mut self, row: ArrayView) -> Result<(), ShapeError> + where + A: Clone, + { + self.append(Axis(0), row.insert_axis(Axis(0))) + } + + /// Append a column to an array + /// + /// ***Errors*** with a shape error if the length of the column does not match the length of + /// the columns in the array.
+ /// + /// The memory layout of the `self` array matters for ensuring that the append is efficient. + /// Appending automatically changes memory layout of the array so that it is appended to + /// along the "growing axis". + /// + /// Ensure appending is efficient by, for example, appending to an empty array and then + /// always appending along the same axis. For columns, column major ("F") memory layout is + /// efficient for appending. + /// + /// Notice that an empty array (where it has an axis of length zero) is the simplest starting + /// point. When repeatedly appending to a single axis, the amortized average complexity of each append is O(m), where *m* is the length of + /// the row. + /// + /// ```rust + /// use ndarray::{Array, ArrayView, array}; + /// + /// // create an empty array and append + /// let mut a = Array::zeros((2, 0)); + /// a.append_column(ArrayView::from(&[1., 2.])).unwrap(); + /// a.append_column(ArrayView::from(&[-1., -2.])).unwrap(); + /// + /// assert_eq!( + /// a, + /// array![[1., -1.], + /// [2., -2.]]); + /// ``` + pub fn append_column(&mut self, column: ArrayView) -> Result<(), ShapeError> + where + A: Clone, + { + self.append(Axis(1), column.insert_axis(Axis(1))) + } +} + +impl Array + where D: Dimension +{ + /// Move all elements from self into `new_array`, which must be of the same shape but + /// can have a different memory layout. The destination is overwritten completely. + /// + /// The destination should be a mut reference to an array or an `ArrayViewMut` with + /// `MaybeUninit
` elements (which are overwritten without dropping any existing value). + /// + /// Minor implementation note: Owned arrays like `self` may be sliced in place and own elements + /// that are not part of their active view; these are dropped at the end of this function, + /// after all elements in the "active view" are moved into `new_array`. If there is a panic in + /// drop of any such element, other elements may be leaked. + /// + /// ***Panics*** if the shapes don't agree. + pub fn move_into<'a, AM>(self, new_array: AM) + where + AM: Into, D>>, + A: 'a, + { + // Remove generic parameter P and call the implementation + self.move_into_impl(new_array.into()) + } + + fn move_into_impl(mut self, new_array: ArrayViewMut, D>) { + unsafe { + // Safety: copy_to_nonoverlapping cannot panic + let guard = AbortIfPanic(&"move_into: moving out of owned value"); + // Move all reachable elements + Zip::from(self.raw_view_mut()) + .and(new_array) + .for_each(|src, dst| { + src.copy_to_nonoverlapping(dst.as_mut_ptr(), 1); + }); + guard.defuse(); + // Drop all unreachable elements + self.drop_unreachable_elements(); + } + } + + /// This drops all "unreachable" elements in the data storage of self. + /// + /// That means those elements that are not visible in the slicing of the array. + /// *Reachable elements are assumed to already have been moved from.* + /// + /// # Safety + /// + /// This is a panic critical section since `self` is already moved-from. + fn drop_unreachable_elements(mut self) -> OwnedRepr { + let self_len = self.len(); + + // "deconstruct" self; the owned repr releases ownership of all elements and we + // and carry on with raw view methods + let data_len = self.data.len(); + + let has_unreachable_elements = self_len != data_len; + if !has_unreachable_elements || std::mem::size_of::() == 0 { + unsafe { + self.data.set_len(0); + } + self.data + } else { + self.drop_unreachable_elements_slow() + } + } + + #[inline(never)] + #[cold] + fn drop_unreachable_elements_slow(mut self) -> OwnedRepr { + // "deconstruct" self; the owned repr releases ownership of all elements and we + // carry on with raw view methods + let self_len = self.len(); + let data_len = self.data.len(); + let data_ptr = self.data.as_nonnull_mut().as_ptr(); + + let mut self_; + + unsafe { + // Safety: self.data releases ownership of the elements. Any panics below this point + // will result in leaking elements instead of double drops. + self_ = self.raw_view_mut(); + self.data.set_len(0); + } + + + // uninvert axes where needed, so that stride > 0 + for i in 0..self_.ndim() { + if self_.stride_of(Axis(i)) < 0 { + self_.invert_axis(Axis(i)); + } + } + + // Sort axes to standard order, Axis(0) has biggest stride and Axis(n - 1) least stride + // Note that self_ has holes, so self_ is not C-contiguous + sort_axes_in_default_order(&mut self_); + + unsafe { + // with uninverted axes this is now the element with lowest address + let array_memory_head_ptr = self_.ptr.as_ptr(); + let data_end_ptr = data_ptr.add(data_len); + debug_assert!(data_ptr <= array_memory_head_ptr); + debug_assert!(array_memory_head_ptr <= data_end_ptr); + + // The idea is simply this: the iterator will yield the elements of self_ in + // increasing address order. + // + // The pointers produced by the iterator are those that we *do not* touch. + // The pointers *not mentioned* by the iterator are those we have to drop. + // + // We have to drop elements in the range from `data_ptr` until (not including) + // `data_end_ptr`, except those that are produced by `iter`. + + // As an optimization, the innermost axis is removed if it has stride 1, because + // we then have a long stretch of contiguous elements we can skip as one. + let inner_lane_len; + if self_.ndim() > 1 && self_.strides.last_elem() == 1 { + self_.dim.slice_mut().rotate_right(1); + self_.strides.slice_mut().rotate_right(1); + inner_lane_len = self_.dim[0]; + self_.dim[0] = 1; + self_.strides[0] = 1; + } else { + inner_lane_len = 1; + } + + // iter is a raw pointer iterator traversing the array in memory order now with the + // sorted axes. + let mut iter = Baseiter::new(self_.ptr.as_ptr(), self_.dim, self_.strides); + let mut dropped_elements = 0; + + let mut last_ptr = data_ptr; + + while let Some(elem_ptr) = iter.next() { + // The interval from last_ptr up until (not including) elem_ptr + // should now be dropped. This interval may be empty, then we just skip this loop. + while last_ptr != elem_ptr { + debug_assert!(last_ptr < data_end_ptr); + std::ptr::drop_in_place(last_ptr); + last_ptr = last_ptr.add(1); + dropped_elements += 1; + } + // Next interval will continue one past the current lane + last_ptr = elem_ptr.add(inner_lane_len); + } + + while last_ptr < data_end_ptr { + std::ptr::drop_in_place(last_ptr); + last_ptr = last_ptr.add(1); + dropped_elements += 1; + } + + assert_eq!(data_len, dropped_elements + self_len, + "Internal error: inconsistency in move_into"); + } + self.data + } + + /// Create an empty array with an all-zeros shape + /// + /// ***Panics*** if D is zero-dimensional, because it can't be empty + pub(crate) fn empty() -> Array { + assert_ne!(D::NDIM, Some(0)); + let ndim = D::NDIM.unwrap_or(1); + Array::from_shape_simple_fn(D::zeros(ndim), || unreachable!()) + } + + /// Create new_array with the right layout for appending to `growing_axis` + #[cold] + fn change_to_contig_append_layout(&mut self, growing_axis: Axis) { + let ndim = self.ndim(); + let mut dim = self.raw_dim(); + + // The array will be created with 0 (C) or ndim-1 (F) as the biggest stride + // axis. Rearrange the shape so that `growing_axis` is the biggest stride axis + // afterwards. + let mut new_array; + if growing_axis == Axis(ndim - 1) { + new_array = Self::uninit(dim.f()); + } else { + dim.slice_mut()[..=growing_axis.index()].rotate_right(1); + new_array = Self::uninit(dim); + new_array.dim.slice_mut()[..=growing_axis.index()].rotate_left(1); + new_array.strides.slice_mut()[..=growing_axis.index()].rotate_left(1); + } + + // self -> old_self. + // dummy array -> self. + // old_self elements are moved -> new_array. + let old_self = std::mem::replace(self, Self::empty()); + old_self.move_into(new_array.view_mut()); + + // new_array -> self. + unsafe { + *self = new_array.assume_init(); + } + } + + + /// Append an array to the array + /// + /// The axis-to-append-to `axis` must be the array's "growing axis" for this operation + /// to succeed. The growing axis is the outermost or last-visited when elements are visited in + /// memory order: + /// + /// `axis` must be the growing axis of the current array, an axis with length 0 or 1. + /// + /// - This is the 0th axis for standard layout arrays + /// - This is the *n*-1 th axis for fortran layout arrays + /// - If the array is empty (the axis or any other has length 0) or if `axis` + /// has length 1, then the array can always be appended. + /// + /// ***Errors*** with a shape error if the shape of self does not match the array-to-append; + /// all axes *except* the axis along which it being appended matter for this check. + /// + /// The memory layout of the `self` array matters for ensuring that the append is efficient. + /// Appending automatically changes memory layout of the array so that it is appended to + /// along the "growing axis". + /// + /// Ensure appending is efficient by for example starting from an empty array and/or always + /// appending to an array along the same axis. + /// + /// The amortized average complexity of the append, when appending along its growing axis, is + /// O(*m*) where *m* is the length of the row. + /// + /// The memory layout of the argument `array` does not matter to the same extent. + /// + /// ```rust + /// use ndarray::{Array, ArrayView, array, Axis}; + /// + /// // create an empty array and append + /// let mut a = Array::zeros((0, 4)); + /// let ones = ArrayView::from(&[1.; 8]).into_shape((2, 4)).unwrap(); + /// let zeros = ArrayView::from(&[0.; 8]).into_shape((2, 4)).unwrap(); + /// a.append(Axis(0), ones).unwrap(); + /// a.append(Axis(0), zeros).unwrap(); + /// a.append(Axis(0), ones).unwrap(); + /// + /// assert_eq!( + /// a, + /// array![[1., 1., 1., 1.], + /// [1., 1., 1., 1.], + /// [0., 0., 0., 0.], + /// [0., 0., 0., 0.], + /// [1., 1., 1., 1.], + /// [1., 1., 1., 1.]]); + /// ``` + pub fn append(&mut self, axis: Axis, mut array: ArrayView) + -> Result<(), ShapeError> + where + A: Clone, + D: RemoveAxis, + { + if self.ndim() == 0 { + return Err(ShapeError::from_kind(ErrorKind::IncompatibleShape)); + } + + let current_axis_len = self.len_of(axis); + let remaining_shape = self.raw_dim().remove_axis(axis); + let array_rem_shape = array.raw_dim().remove_axis(axis); + + if remaining_shape != array_rem_shape { + return Err(ShapeError::from_kind(ErrorKind::IncompatibleShape)); + } + + let len_to_append = array.len(); + + let array_shape = array.raw_dim(); + let mut res_dim = self.raw_dim(); + res_dim[axis.index()] += array_shape[axis.index()]; + let new_len = dimension::size_of_shape_checked(&res_dim)?; + + if len_to_append == 0 { + // There are no elements to append and shapes are compatible: + // either the dimension increment is zero, or there is an existing + // zero in another axis in self. + debug_assert_eq!(self.len(), new_len); + self.dim = res_dim; + return Ok(()); + } + + let self_is_empty = self.is_empty(); + let mut incompatible_layout = false; + + // array must be empty or have `axis` as the outermost (longest stride) axis + if !self_is_empty && current_axis_len > 1 { + // `axis` must be max stride axis or equal to its stride + let axis_stride = self.stride_of(axis); + if axis_stride < 0 { + incompatible_layout = true; + } else { + for ax in self.axes() { + if ax.axis == axis { + continue; + } + if ax.len > 1 && ax.stride.abs() > axis_stride { + incompatible_layout = true; + break; + } + } + } + } + + // array must be be "full" (contiguous and have no exterior holes) + if self.len() != self.data.len() { + incompatible_layout = true; + } + + if incompatible_layout { + self.change_to_contig_append_layout(axis); + // safety-check parameters after remodeling + debug_assert_eq!(self_is_empty, self.is_empty()); + debug_assert_eq!(current_axis_len, self.len_of(axis)); + } + + let strides = if self_is_empty { + // recompute strides - if the array was previously empty, it could have zeros in + // strides. + // The new order is based on c/f-contig but must have `axis` as outermost axis. + if axis == Axis(self.ndim() - 1) { + // prefer f-contig when appending to the last axis + // Axis n - 1 is outermost axis + res_dim.fortran_strides() + } else { + // standard axis order except for the growing axis; + // anticipates that it's likely that `array` has standard order apart from the + // growing axis. + res_dim.slice_mut()[..=axis.index()].rotate_right(1); + let mut strides = res_dim.default_strides(); + res_dim.slice_mut()[..=axis.index()].rotate_left(1); + strides.slice_mut()[..=axis.index()].rotate_left(1); + strides + } + } else if current_axis_len == 1 { + // This is the outermost/longest stride axis; so we find the max across the other axes + let new_stride = self.axes().fold(1, |acc, ax| { + if ax.axis == axis || ax.len <= 1 { + acc + } else { + let this_ax = ax.len as isize * ax.stride.abs(); + if this_ax > acc { this_ax } else { acc } + } + }); + let mut strides = self.strides.clone(); + strides[axis.index()] = new_stride as usize; + strides + } else { + self.strides.clone() + }; + + unsafe { + // grow backing storage and update head ptr + let data_to_array_offset = if std::mem::size_of::() != 0 { + self.as_ptr().offset_from(self.data.as_ptr()) + } else { + 0 + }; + debug_assert!(data_to_array_offset >= 0); + self.ptr = self.data.reserve(len_to_append).offset(data_to_array_offset); + + // clone elements from view to the array now + // + // To be robust for panics and drop the right elements, we want + // to fill the tail in memory order, so that we can drop the right elements on panic. + // + // We have: Zip::from(tail_view).and(array) + // Transform tail_view into standard order by inverting and moving its axes. + // Keep the Zip traversal unchanged by applying the same axis transformations to + // `array`. This ensures the Zip traverses the underlying memory in order. + // + // XXX It would be possible to skip this transformation if the element + // doesn't have drop. However, in the interest of code coverage, all elements + // use this code initially. + + // Invert axes in tail_view by inverting strides + let mut tail_strides = strides.clone(); + if tail_strides.ndim() > 1 { + for i in 0..tail_strides.ndim() { + let s = tail_strides[i] as isize; + if s < 0 { + tail_strides.set_axis(Axis(i), -s as usize); + array.invert_axis(Axis(i)); + } + } + } + + // With > 0 strides, the current end of data is the correct base pointer for tail_view + let tail_ptr = self.data.as_end_nonnull(); + let mut tail_view = RawArrayViewMut::new(tail_ptr, array_shape, tail_strides); + + if tail_view.ndim() > 1 { + sort_axes_in_default_order_tandem(&mut tail_view, &mut array); + debug_assert!(tail_view.is_standard_layout(), + "not std layout dim: {:?}, strides: {:?}", + tail_view.shape(), tail_view.strides()); + } + + // Keep track of currently filled length of `self.data` and update it + // on scope exit (panic or loop finish). This "indirect" way to + // write the length is used to help the compiler, the len store to self.data may + // otherwise be mistaken to alias with other stores in the loop. + struct SetLenOnDrop<'a, A: 'a> { + len: usize, + data: &'a mut OwnedRepr, + } + + impl Drop for SetLenOnDrop<'_, A> { + fn drop(&mut self) { + unsafe { + self.data.set_len(self.len); + } + } + } + + let mut data_length_guard = SetLenOnDrop { + len: self.data.len(), + data: &mut self.data, + }; + + + // Safety: tail_view is constructed to have the same shape as array + Zip::from(tail_view) + .and_unchecked(array) + .debug_assert_c_order() + .for_each(|to, from| { + to.write(from.clone()); + data_length_guard.len += 1; + }); + drop(data_length_guard); + + // update array dimension + self.strides = strides; + self.dim = res_dim; + } + // multiple assertions after pointer & dimension update + debug_assert_eq!(self.data.len(), self.len()); + debug_assert_eq!(self.len(), new_len); + debug_assert!(self.pointer_is_inbounds()); + + Ok(()) + } +} + +/// Sort axes to standard order, i.e Axis(0) has biggest stride and Axis(n - 1) least stride +/// +/// The axes should have stride >= 0 before calling this method. +fn sort_axes_in_default_order(a: &mut ArrayBase) +where + S: RawData, + D: Dimension, +{ + if a.ndim() <= 1 { + return; + } + sort_axes1_impl(&mut a.dim, &mut a.strides); +} + +fn sort_axes1_impl(adim: &mut D, astrides: &mut D) +where + D: Dimension, +{ + debug_assert!(adim.ndim() > 1); + debug_assert_eq!(adim.ndim(), astrides.ndim()); + // bubble sort axes + let mut changed = true; + while changed { + changed = false; + for i in 0..adim.ndim() - 1 { + let axis_i = i; + let next_axis = i + 1; + + // make sure higher stride axes sort before. + debug_assert!(astrides.slice()[axis_i] as isize >= 0); + if (astrides.slice()[axis_i] as isize) < astrides.slice()[next_axis] as isize { + changed = true; + adim.slice_mut().swap(axis_i, next_axis); + astrides.slice_mut().swap(axis_i, next_axis); + } + } + } +} + + +/// Sort axes to standard order, i.e Axis(0) has biggest stride and Axis(n - 1) least stride +/// +/// Axes in a and b are sorted by the strides of `a`, and `a`'s axes should have stride >= 0 before +/// calling this method. +fn sort_axes_in_default_order_tandem(a: &mut ArrayBase, b: &mut ArrayBase) +where + S: RawData, + S2: RawData, + D: Dimension, +{ + if a.ndim() <= 1 { + return; + } + sort_axes2_impl(&mut a.dim, &mut a.strides, &mut b.dim, &mut b.strides); +} + +fn sort_axes2_impl(adim: &mut D, astrides: &mut D, bdim: &mut D, bstrides: &mut D) +where + D: Dimension, +{ + debug_assert!(adim.ndim() > 1); + debug_assert_eq!(adim.ndim(), bdim.ndim()); + // bubble sort axes + let mut changed = true; + while changed { + changed = false; + for i in 0..adim.ndim() - 1 { + let axis_i = i; + let next_axis = i + 1; + + // make sure higher stride axes sort before. + debug_assert!(astrides.slice()[axis_i] as isize >= 0); + if (astrides.slice()[axis_i] as isize) < astrides.slice()[next_axis] as isize { + changed = true; + adim.slice_mut().swap(axis_i, next_axis); + astrides.slice_mut().swap(axis_i, next_axis); + bdim.slice_mut().swap(axis_i, next_axis); + bstrides.slice_mut().swap(axis_i, next_axis); + } + } + } +} diff --git a/src/layout/mod.rs b/src/layout/mod.rs index d4271dd77..e7434fbc1 100644 --- a/src/layout/mod.rs +++ b/src/layout/mod.rs @@ -58,6 +58,7 @@ impl Layout { /// A simple "score" method which scores positive for preferring C-order, negative for F-order /// Subject to change when we can describe other layouts + #[inline] pub(crate) fn tendency(self) -> i32 { (self.is(CORDER) as i32 - self.is(FORDER) as i32) + (self.is(CPREFER) as i32 - self.is(FPREFER) as i32) diff --git a/src/lib.rs b/src/lib.rs index c4a0812a3..06aca0150 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -12,6 +12,8 @@ clippy::deref_addrof, clippy::unreadable_literal, clippy::manual_map, // is not an error + clippy::while_let_on_iterator, // is not an error + clippy::from_iter_instead_of_collect, // using from_iter is good style )] #![cfg_attr(not(feature = "std"), no_std)] @@ -235,8 +237,8 @@ pub type Ixs = isize; /// An *n*-dimensional array. /// -/// The array is a general container of elements. It cannot grow or shrink, but -/// can be sliced into subsets of its data. +/// The array is a general container of elements. It cannot grow or shrink (with some exceptions), +/// but can be sliced into subsets of its data. /// The array supports arithmetic operations by applying them elementwise. /// /// In *n*-dimensional we include for example 1-dimensional rows or columns, diff --git a/src/stacking.rs b/src/stacking.rs index 500ded6af..fb05c1963 100644 --- a/src/stacking.rs +++ b/src/stacking.rs @@ -6,6 +6,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use alloc::vec::Vec; + +use crate::dimension; use crate::error::{from_kind, ErrorKind, ShapeError}; use crate::imp_prelude::*; @@ -38,7 +41,7 @@ pub fn stack( arrays: &[ArrayView], ) -> Result, ShapeError> where - A: Copy, + A: Clone, D: Dimension, D::Larger: RemoveAxis, { @@ -68,7 +71,7 @@ where /// ``` pub fn concatenate(axis: Axis, arrays: &[ArrayView]) -> Result, ShapeError> where - A: Copy, + A: Clone, D: RemoveAxis, { if arrays.is_empty() { @@ -88,24 +91,21 @@ where let stacked_dim = arrays.iter().fold(0, |acc, a| acc + a.len_of(axis)); res_dim.set_axis(axis, stacked_dim); + let new_len = dimension::size_of_shape_checked(&res_dim)?; - // we can safely use uninitialized values here because we will - // overwrite every one of them. - let mut res = Array::uninit(res_dim); + // start with empty array with precomputed capacity + // append's handling of empty arrays makes sure `axis` is ok for appending + res_dim.set_axis(axis, 0); + let mut res = unsafe { + // Safety: dimension is size 0 and vec is empty + Array::from_shape_vec_unchecked(res_dim, Vec::with_capacity(new_len)) + }; - { - let mut assign_view = res.view_mut(); - for array in arrays { - let len = array.len_of(axis); - let (front, rest) = assign_view.split_at(axis, len); - array.assign_to(front); - assign_view = rest; - } - debug_assert_eq!(assign_view.len(), 0); - } - unsafe { - Ok(res.assume_init()) + for array in arrays { + res.append(axis, array.clone())?; } + debug_assert_eq!(res.len_of(axis), stacked_dim); + Ok(res) } #[deprecated(note="Use under the name stack instead.", since="0.15.0")] @@ -138,7 +138,7 @@ pub fn stack_new_axis( arrays: &[ArrayView], ) -> Result, ShapeError> where - A: Copy, + A: Clone, D: Dimension, D::Larger: RemoveAxis, { @@ -158,24 +158,22 @@ where res_dim.set_axis(axis, arrays.len()); - // we can safely use uninitialized values here because we will - // overwrite every one of them. - let mut res = Array::uninit(res_dim); + let new_len = dimension::size_of_shape_checked(&res_dim)?; - res.axis_iter_mut(axis) - .zip(arrays.iter()) - .for_each(|(assign_view, array)| { - // assign_view is D::Larger::Smaller which is usually == D - // (but if D is Ix6, we have IxD != Ix6 here; differing types - // but same number of axes). - let assign_view = assign_view.into_dimensionality::() - .expect("same-dimensionality cast"); - array.assign_to(assign_view); - }); + // start with empty array with precomputed capacity + // append's handling of empty arrays makes sure `axis` is ok for appending + res_dim.set_axis(axis, 0); + let mut res = unsafe { + // Safety: dimension is size 0 and vec is empty + Array::from_shape_vec_unchecked(res_dim, Vec::with_capacity(new_len)) + }; - unsafe { - Ok(res.assume_init()) + for array in arrays { + res.append(axis, array.clone().insert_axis(axis))?; } + + debug_assert_eq!(res.len_of(axis), arrays.len()); + Ok(res) } /// Stack arrays along the new axis. diff --git a/src/zip/mod.rs b/src/zip/mod.rs index 5f9b15c5a..18d07ddfd 100644 --- a/src/zip/mod.rs +++ b/src/zip/mod.rs @@ -240,22 +240,25 @@ where } } -impl Zip +#[inline] +fn zip_dimension_check(dimension: &D, part: &P) where D: Dimension, + P: NdProducer, { - fn check

(&self, part: &P) - where - P: NdProducer, - { - ndassert!( - part.equal_dim(&self.dimension), - "Zip: Producer dimension mismatch, expected: {:?}, got: {:?}", - self.dimension, - part.raw_dim() - ); - } + ndassert!( + part.equal_dim(&dimension), + "Zip: Producer dimension mismatch, expected: {:?}, got: {:?}", + dimension, + part.raw_dim() + ); +} + +impl Zip +where + D: Dimension, +{ /// Return a the number of element tuples in the Zip pub fn size(&self) -> usize { self.dimension.size() @@ -427,6 +430,26 @@ where } } +impl Zip<(P1, P2), D> +where + D: Dimension, + P1: NdProducer, + P1: NdProducer, +{ + /// Debug assert traversal order is like c (including 1D case) + // Method placement: only used for binary Zip at the moment. + #[inline] + pub(crate) fn debug_assert_c_order(self) -> Self { + debug_assert!(self.layout.is(CORDER) || self.layout_tendency >= 0 || + self.dimension.slice().iter().filter(|&&d| d > 1).count() <= 1, + "Assertion failed: traversal is not c-order or 1D for \ + layout {:?}, tendency {}, dimension {:?}", + self.layout, self.layout_tendency, self.dimension); + self + } +} + + /* trait Offset : Copy { unsafe fn offset(self, off: isize) -> Self; @@ -652,10 +675,30 @@ macro_rules! map_impl { where P: IntoNdProducer, { let part = p.into_producer(); - self.check(&part); + zip_dimension_check(&self.dimension, &part); self.build_and(part) } + /// Include the producer `p` in the Zip. + /// + /// ## Safety + /// + /// The caller must ensure that the producer's shape is equal to the Zip's shape. + /// Uses assertions when debug assertions are enabled. + #[allow(unused)] + pub(crate) unsafe fn and_unchecked

(self, p: P) -> Zip<($($p,)* P::Output, ), D> + where P: IntoNdProducer, + { + #[cfg(debug_assertions)] + { + self.and(p) + } + #[cfg(not(debug_assertions))] + { + self.build_and(p.into_producer()) + } + } + /// Include the producer `p` in the Zip, broadcasting if needed. /// /// If their shapes disagree, `rhs` is broadcast to the shape of `self`. diff --git a/tests/append.rs b/tests/append.rs new file mode 100644 index 000000000..6306b2ecf --- /dev/null +++ b/tests/append.rs @@ -0,0 +1,378 @@ + +use ndarray::prelude::*; +use ndarray::{ShapeError, ErrorKind}; + +#[test] +fn append_row() { + let mut a = Array::zeros((0, 4)); + a.append_row(aview1(&[0., 1., 2., 3.])).unwrap(); + a.append_row(aview1(&[4., 5., 6., 7.])).unwrap(); + assert_eq!(a.shape(), &[2, 4]); + + assert_eq!(a, + array![[0., 1., 2., 3.], + [4., 5., 6., 7.]]); + + assert_eq!(a.append_row(aview1(&[1.])), + Err(ShapeError::from_kind(ErrorKind::IncompatibleShape))); + assert_eq!(a.append_column(aview1(&[1.])), + Err(ShapeError::from_kind(ErrorKind::IncompatibleShape))); + assert_eq!(a.append_column(aview1(&[1., 2.])), + Ok(())); + assert_eq!(a, + array![[0., 1., 2., 3., 1.], + [4., 5., 6., 7., 2.]]); +} + +#[test] +fn append_row_wrong_layout() { + let mut a = Array::zeros((0, 4)); + a.append_row(aview1(&[0., 1., 2., 3.])).unwrap(); + a.append_row(aview1(&[4., 5., 6., 7.])).unwrap(); + assert_eq!(a.shape(), &[2, 4]); + + assert_eq!(a, + array![[0., 1., 2., 3.], + [4., 5., 6., 7.]]); + assert_eq!(a.strides(), &[4, 1]); + + // Changing the memory layout to fit the next append + let mut a2 = a.clone(); + a2.append_column(aview1(&[1., 2.])).unwrap(); + assert_eq!(a2, + array![[0., 1., 2., 3., 1.], + [4., 5., 6., 7., 2.]]); + assert_eq!(a2.strides(), &[1, 2]); + + + // Clone the array + + let mut dim = a.raw_dim(); + dim[1] = 0; + let mut b = Array::zeros(dim); + b.append(Axis(1), a.view()).unwrap(); + assert_eq!(b.append_column(aview1(&[1., 2.])), Ok(())); + assert_eq!(b, + array![[0., 1., 2., 3., 1.], + [4., 5., 6., 7., 2.]]); +} + +#[test] +fn append_row_neg_stride_1() { + let mut a = Array::zeros((0, 4)); + a.append_row(aview1(&[0., 1., 2., 3.])).unwrap(); + a.append_row(aview1(&[4., 5., 6., 7.])).unwrap(); + assert_eq!(a.shape(), &[2, 4]); + + assert_eq!(a, + array![[0., 1., 2., 3.], + [4., 5., 6., 7.]]); + assert_eq!(a.strides(), &[4, 1]); + + a.invert_axis(Axis(0)); + + // Changing the memory layout to fit the next append + let mut a2 = a.clone(); + println!("a = {:?}", a); + println!("a2 = {:?}", a2); + a2.append_column(aview1(&[1., 2.])).unwrap(); + assert_eq!(a2, + array![[4., 5., 6., 7., 1.], + [0., 1., 2., 3., 2.]]); + assert_eq!(a2.strides(), &[1, 2]); + + a.invert_axis(Axis(1)); + let mut a3 = a.clone(); + a3.append_row(aview1(&[4., 5., 6., 7.])).unwrap(); + assert_eq!(a3, + array![[7., 6., 5., 4.], + [3., 2., 1., 0.], + [4., 5., 6., 7.]]); + assert_eq!(a3.strides(), &[4, 1]); + + a.invert_axis(Axis(0)); + let mut a4 = a.clone(); + a4.append_row(aview1(&[4., 5., 6., 7.])).unwrap(); + assert_eq!(a4, + array![[3., 2., 1., 0.], + [7., 6., 5., 4.], + [4., 5., 6., 7.]]); + assert_eq!(a4.strides(), &[4, -1]); +} + +#[test] +fn append_row_neg_stride_2() { + let mut a = Array::zeros((0, 4)); + a.append_row(aview1(&[0., 1., 2., 3.])).unwrap(); + a.append_row(aview1(&[4., 5., 6., 7.])).unwrap(); + assert_eq!(a.shape(), &[2, 4]); + + assert_eq!(a, + array![[0., 1., 2., 3.], + [4., 5., 6., 7.]]); + assert_eq!(a.strides(), &[4, 1]); + + a.invert_axis(Axis(1)); + + // Changing the memory layout to fit the next append + let mut a2 = a.clone(); + println!("a = {:?}", a); + println!("a2 = {:?}", a2); + a2.append_column(aview1(&[1., 2.])).unwrap(); + assert_eq!(a2, + array![[3., 2., 1., 0., 1.], + [7., 6., 5., 4., 2.]]); + assert_eq!(a2.strides(), &[1, 2]); + + a.invert_axis(Axis(0)); + let mut a3 = a.clone(); + a3.append_row(aview1(&[4., 5., 6., 7.])).unwrap(); + assert_eq!(a3, + array![[7., 6., 5., 4.], + [3., 2., 1., 0.], + [4., 5., 6., 7.]]); + assert_eq!(a3.strides(), &[4, 1]); + + a.invert_axis(Axis(1)); + let mut a4 = a.clone(); + a4.append_row(aview1(&[4., 5., 6., 7.])).unwrap(); + assert_eq!(a4, + array![[4., 5., 6., 7.], + [0., 1., 2., 3.], + [4., 5., 6., 7.]]); + assert_eq!(a4.strides(), &[4, 1]); +} + +#[test] +fn append_row_error() { + let mut a = Array::zeros((3, 4)); + + assert_eq!(a.append_row(aview1(&[1.])), + Err(ShapeError::from_kind(ErrorKind::IncompatibleShape))); + assert_eq!(a.append_column(aview1(&[1.])), + Err(ShapeError::from_kind(ErrorKind::IncompatibleShape))); + assert_eq!(a.append_column(aview1(&[1., 2., 3.])), + Ok(())); + assert_eq!(a.t(), + array![[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.], + [1., 2., 3.]]); +} + +#[test] +fn append_row_existing() { + let mut a = Array::zeros((1, 4)); + a.append_row(aview1(&[0., 1., 2., 3.])).unwrap(); + a.append_row(aview1(&[4., 5., 6., 7.])).unwrap(); + assert_eq!(a.shape(), &[3, 4]); + + assert_eq!(a, + array![[0., 0., 0., 0.], + [0., 1., 2., 3.], + [4., 5., 6., 7.]]); + + assert_eq!(a.append_row(aview1(&[1.])), + Err(ShapeError::from_kind(ErrorKind::IncompatibleShape))); + assert_eq!(a.append_column(aview1(&[1.])), + Err(ShapeError::from_kind(ErrorKind::IncompatibleShape))); + assert_eq!(a.append_column(aview1(&[1., 2., 3.])), + Ok(())); + assert_eq!(a, + array![[0., 0., 0., 0., 1.], + [0., 1., 2., 3., 2.], + [4., 5., 6., 7., 3.]]); +} + +#[test] +fn append_row_col_len_1() { + // Test appending 1 row and then cols from shape 1 x 1 + let mut a = Array::zeros((1, 1)); + a.append_row(aview1(&[1.])).unwrap(); // shape 2 x 1 + a.append_column(aview1(&[2., 3.])).unwrap(); // shape 2 x 2 + assert_eq!(a.append_row(aview1(&[1.])), + Err(ShapeError::from_kind(ErrorKind::IncompatibleShape))); + //assert_eq!(a.append_row(aview1(&[1., 2.])), Err(ShapeError::from_kind(ErrorKind::IncompatibleLayout))); + a.append_column(aview1(&[4., 5.])).unwrap(); // shape 2 x 3 + assert_eq!(a.shape(), &[2, 3]); + + assert_eq!(a, + array![[0., 2., 4.], + [1., 3., 5.]]); +} + +#[test] +fn append_column() { + let mut a = Array::zeros((4, 0)); + a.append_column(aview1(&[0., 1., 2., 3.])).unwrap(); + a.append_column(aview1(&[4., 5., 6., 7.])).unwrap(); + assert_eq!(a.shape(), &[4, 2]); + + assert_eq!(a.t(), + array![[0., 1., 2., 3.], + [4., 5., 6., 7.]]); +} + +#[test] +fn append_array1() { + let mut a = Array::zeros((0, 4)); + a.append(Axis(0), aview2(&[[0., 1., 2., 3.]])).unwrap(); + println!("{:?}", a); + a.append(Axis(0), aview2(&[[4., 5., 6., 7.]])).unwrap(); + println!("{:?}", a); + //a.append_column(aview1(&[4., 5., 6., 7.])).unwrap(); + //assert_eq!(a.shape(), &[4, 2]); + + assert_eq!(a, + array![[0., 1., 2., 3.], + [4., 5., 6., 7.]]); + + a.append(Axis(0), aview2(&[[5., 5., 4., 4.], [3., 3., 2., 2.]])).unwrap(); + println!("{:?}", a); + assert_eq!(a, + array![[0., 1., 2., 3.], + [4., 5., 6., 7.], + [5., 5., 4., 4.], + [3., 3., 2., 2.]]); +} + +#[test] +fn append_array_3d() { + let mut a = Array::zeros((0, 2, 2)); + a.append(Axis(0), array![[[0, 1], [2, 3]]].view()).unwrap(); + println!("{:?}", a); + + let aa = array![[[51, 52], [53, 54]], [[55, 56], [57, 58]]]; + let av = aa.view(); + println!("Send {:?} to append", av); + a.append(Axis(0), av.clone()).unwrap(); + + a.swap_axes(0, 1); + let aa = array![[[71, 72], [73, 74]], [[75, 76], [77, 78]]]; + let mut av = aa.view(); + av.swap_axes(0, 1); + println!("Send {:?} to append", av); + a.append(Axis(1), av.clone()).unwrap(); + println!("{:?}", a); + let aa = array![[[81, 82], [83, 84]], [[85, 86], [87, 88]]]; + let mut av = aa.view(); + av.swap_axes(0, 1); + println!("Send {:?} to append", av); + a.append(Axis(1), av).unwrap(); + println!("{:?}", a); + assert_eq!(a, + array![[[0, 1], + [51, 52], + [55, 56], + [71, 72], + [75, 76], + [81, 82], + [85, 86]], + [[2, 3], + [53, 54], + [57, 58], + [73, 74], + [77, 78], + [83, 84], + [87, 88]]]); +} + +#[test] +fn test_append_2d() { + // create an empty array and append + let mut a = Array::zeros((0, 4)); + let ones = ArrayView::from(&[1.; 12]).into_shape((3, 4)).unwrap(); + let zeros = ArrayView::from(&[0.; 8]).into_shape((2, 4)).unwrap(); + a.append(Axis(0), ones).unwrap(); + a.append(Axis(0), zeros).unwrap(); + a.append(Axis(0), ones).unwrap(); + println!("{:?}", a); + assert_eq!(a.shape(), &[8, 4]); + for (i, row) in a.rows().into_iter().enumerate() { + let ones = i < 3 || i >= 5; + assert!(row.iter().all(|&x| x == ones as i32 as f64), "failed on lane {}", i); + } + + let mut a = Array::zeros((0, 4)); + a = a.reversed_axes(); + let ones = ones.reversed_axes(); + let zeros = zeros.reversed_axes(); + a.append(Axis(1), ones).unwrap(); + a.append(Axis(1), zeros).unwrap(); + a.append(Axis(1), ones).unwrap(); + println!("{:?}", a); + assert_eq!(a.shape(), &[4, 8]); + + for (i, row) in a.columns().into_iter().enumerate() { + let ones = i < 3 || i >= 5; + assert!(row.iter().all(|&x| x == ones as i32 as f64), "failed on lane {}", i); + } +} + +#[test] +fn test_append_middle_axis() { + // ensure we can append to Axis(1) by letting it become outermost + let mut a = Array::::zeros((3, 0, 2)); + a.append(Axis(1), Array::from_iter(0..12).into_shape((3, 2, 2)).unwrap().view()).unwrap(); + println!("{:?}", a); + a.append(Axis(1), Array::from_iter(12..24).into_shape((3, 2, 2)).unwrap().view()).unwrap(); + println!("{:?}", a); + + // ensure we can append to Axis(1) by letting it become outermost + let mut a = Array::::zeros((3, 1, 2)); + a.append(Axis(1), Array::from_iter(0..12).into_shape((3, 2, 2)).unwrap().view()).unwrap(); + println!("{:?}", a); + a.append(Axis(1), Array::from_iter(12..24).into_shape((3, 2, 2)).unwrap().view()).unwrap(); + println!("{:?}", a); +} + +#[test] +fn test_append_zero_size() { + { + let mut a = Array::::zeros((0, 0)); + a.append(Axis(0), aview2(&[[]])).unwrap(); + a.append(Axis(0), aview2(&[[]])).unwrap(); + assert_eq!(a.len(), 0); + assert_eq!(a.shape(), &[2, 0]); + } + + { + let mut a = Array::::zeros((0, 0)); + a.append(Axis(1), ArrayView::from(&[]).into_shape((0, 1)).unwrap()).unwrap(); + a.append(Axis(1), ArrayView::from(&[]).into_shape((0, 1)).unwrap()).unwrap(); + assert_eq!(a.len(), 0); + assert_eq!(a.shape(), &[0, 2]); + } +} + +#[test] +fn append_row_neg_stride_3() { + let mut a = Array::zeros((0, 4)); + a.append_row(aview1(&[0., 1., 2., 3.])).unwrap(); + a.invert_axis(Axis(1)); + a.append_row(aview1(&[4., 5., 6., 7.])).unwrap(); + assert_eq!(a.shape(), &[2, 4]); + assert_eq!(a, array![[3., 2., 1., 0.], [4., 5., 6., 7.]]); + assert_eq!(a.strides(), &[4, -1]); +} + +#[test] +fn append_row_ignore_strides_length_one_axes() { + let strides = &[0, 1, 10, 20]; + for invert in &[vec![], vec![0], vec![1], vec![0, 1]] { + for &stride0 in strides { + for &stride1 in strides { + let mut a = + Array::from_shape_vec([1, 1].strides([stride0, stride1]), vec![0.]).unwrap(); + for &ax in invert { + a.invert_axis(Axis(ax)); + } + a.append_row(aview1(&[1.])).unwrap(); + assert_eq!(a.shape(), &[2, 1]); + assert_eq!(a, array![[0.], [1.]]); + assert_eq!(a.stride_of(Axis(0)), 1); + } + } + } +} diff --git a/tests/array.rs b/tests/array.rs index 38d2711aa..976824dfe 100644 --- a/tests/array.rs +++ b/tests/array.rs @@ -709,6 +709,19 @@ fn test_select() { assert_abs_diff_eq!(c, c_target); } +#[test] +fn test_select_1d() { + let x = arr1(&[0, 1, 2, 3, 4, 5, 6]); + let r1 = x.select(Axis(0), &[1, 3, 4, 2, 2, 5]); + assert_eq!(r1, arr1(&[1, 3, 4, 2, 2, 5])); + // select nothing + let r2 = x.select(Axis(0), &[]); + assert_eq!(r2, arr1(&[])); + // select nothing from empty + let r3 = r2.select(Axis(0), &[]); + assert_eq!(r3, arr1(&[])); +} + #[test] fn diag() { let d = arr2(&[[1., 2., 3.0f32]]).into_diag(); diff --git a/tests/assign.rs b/tests/assign.rs new file mode 100644 index 000000000..19156cce8 --- /dev/null +++ b/tests/assign.rs @@ -0,0 +1,237 @@ +use ndarray::prelude::*; + +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[test] +fn assign() { + let mut a = arr2(&[[1., 2.], [3., 4.]]); + let b = arr2(&[[1., 3.], [2., 4.]]); + a.assign(&b); + assert_eq!(a, b); + + /* Test broadcasting */ + a.assign(&ArcArray::zeros(1)); + assert_eq!(a, ArcArray::zeros((2, 2))); + + /* Test other type */ + a.assign(&Array::from_elem((2, 2), 3.)); + assert_eq!(a, ArcArray::from_elem((2, 2), 3.)); + + /* Test mut view */ + let mut a = arr2(&[[1, 2], [3, 4]]); + { + let mut v = a.view_mut(); + v.slice_collapse(s![..1, ..]); + v.fill(0); + } + assert_eq!(a, arr2(&[[0, 0], [3, 4]])); +} + + +#[test] +fn assign_to() { + let mut a = arr2(&[[1., 2.], [3., 4.]]); + let b = arr2(&[[0., 3.], [2., 0.]]); + b.assign_to(&mut a); + assert_eq!(a, b); +} + +#[test] +fn move_into_copy() { + let a = arr2(&[[1., 2.], [3., 4.]]); + let acopy = a.clone(); + let mut b = Array::uninit(a.dim()); + a.move_into(b.view_mut()); + let b = unsafe { b.assume_init() }; + assert_eq!(acopy, b); + + let a = arr2(&[[1., 2.], [3., 4.]]).reversed_axes(); + let acopy = a.clone(); + let mut b = Array::uninit(a.dim()); + a.move_into(b.view_mut()); + let b = unsafe { b.assume_init() }; + assert_eq!(acopy, b); +} + +#[test] +fn move_into_owned() { + // Test various memory layouts and holes while moving String elements. + for &use_f_order in &[false, true] { + for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { // bitmask for axis to invert + for &slice in &[false, true] { + let mut a = Array::from_shape_fn((5, 4).set_f(use_f_order), + |idx| format!("{:?}", idx)); + if slice { + a.slice_collapse(s![1..-1, ..;2]); + } + + if invert_axis & 0b01 != 0 { + a.invert_axis(Axis(0)); + } + if invert_axis & 0b10 != 0 { + a.invert_axis(Axis(1)); + } + + let acopy = a.clone(); + let mut b = Array::uninit(a.dim()); + a.move_into(b.view_mut()); + let b = unsafe { b.assume_init() }; + + assert_eq!(acopy, b); + } + } + } +} + +#[test] +fn move_into_slicing() { + // Count correct number of drops when using move_into and discontiguous arrays (with holes). + for &use_f_order in &[false, true] { + for &invert_axis in &[0b00, 0b01, 0b10, 0b11] { // bitmask for axis to invert + let counter = DropCounter::default(); + { + let (m, n) = (5, 4); + + let mut a = Array::from_shape_fn((m, n).set_f(use_f_order), |_idx| counter.element()); + a.slice_collapse(s![1..-1, ..;2]); + if invert_axis & 0b01 != 0 { + a.invert_axis(Axis(0)); + } + if invert_axis & 0b10 != 0 { + a.invert_axis(Axis(1)); + } + + let mut b = Array::uninit(a.dim()); + a.move_into(b.view_mut()); + let b = unsafe { b.assume_init() }; + + let total = m * n; + let dropped_1 = total - (m - 2) * (n - 2); + assert_eq!(counter.created(), total); + assert_eq!(counter.dropped(), dropped_1); + drop(b); + } + counter.assert_drop_count(); + } + } +} + +#[test] +fn move_into_diag() { + // Count correct number of drops when using move_into and discontiguous arrays (with holes). + for &use_f_order in &[false, true] { + let counter = DropCounter::default(); + { + let (m, n) = (5, 4); + + let a = Array::from_shape_fn((m, n).set_f(use_f_order), |_idx| counter.element()); + let a = a.into_diag(); + + let mut b = Array::uninit(a.dim()); + a.move_into(b.view_mut()); + let b = unsafe { b.assume_init() }; + + let total = m * n; + let dropped_1 = total - Ord::min(m, n); + assert_eq!(counter.created(), total); + assert_eq!(counter.dropped(), dropped_1); + drop(b); + } + counter.assert_drop_count(); + } +} + +#[test] +fn move_into_0dim() { + // Count correct number of drops when using move_into and discontiguous arrays (with holes). + for &use_f_order in &[false, true] { + let counter = DropCounter::default(); + { + let (m, n) = (5, 4); + + // slice into a 0-dim array + let a = Array::from_shape_fn((m, n).set_f(use_f_order), |_idx| counter.element()); + let a = a.slice_move(s![2, 2]); + + assert_eq!(a.ndim(), 0); + let mut b = Array::uninit(a.dim()); + a.move_into(b.view_mut()); + let b = unsafe { b.assume_init() }; + + let total = m * n; + let dropped_1 = total - 1; + assert_eq!(counter.created(), total); + assert_eq!(counter.dropped(), dropped_1); + drop(b); + } + counter.assert_drop_count(); + } +} + +#[test] +fn move_into_empty() { + // Count correct number of drops when using move_into and discontiguous arrays (with holes). + for &use_f_order in &[false, true] { + let counter = DropCounter::default(); + { + let (m, n) = (5, 4); + + // slice into an empty array; + let a = Array::from_shape_fn((m, n).set_f(use_f_order), |_idx| counter.element()); + let a = a.slice_move(s![..0, 1..1]); + assert!(a.is_empty()); + let mut b = Array::uninit(a.dim()); + a.move_into(b.view_mut()); + let b = unsafe { b.assume_init() }; + + let total = m * n; + let dropped_1 = total; + assert_eq!(counter.created(), total); + assert_eq!(counter.dropped(), dropped_1); + drop(b); + } + counter.assert_drop_count(); + } +} + + +/// This counter can create elements, and then count and verify +/// the number of which have actually been dropped again. +#[derive(Default)] +struct DropCounter { + created: AtomicUsize, + dropped: AtomicUsize, +} + +struct Element<'a>(&'a AtomicUsize); + +impl DropCounter { + fn created(&self) -> usize { + self.created.load(Ordering::Relaxed) + } + + fn dropped(&self) -> usize { + self.dropped.load(Ordering::Relaxed) + } + + fn element(&self) -> Element<'_> { + self.created.fetch_add(1, Ordering::Relaxed); + Element(&self.dropped) + } + + fn assert_drop_count(&self) { + assert_eq!( + self.created(), + self.dropped(), + "Expected {} dropped elements, but found {}", + self.created(), + self.dropped() + ); + } +} + +impl<'a> Drop for Element<'a> { + fn drop(&mut self) { + self.0.fetch_add(1, Ordering::Relaxed); + } +} diff --git a/tests/higher_order_f.rs b/tests/higher_order_f.rs index 1238cc4d8..c567eb3e0 100644 --- a/tests/higher_order_f.rs +++ b/tests/higher_order_f.rs @@ -6,37 +6,3 @@ fn test_fold_axis_oob() { let a = arr2(&[[1., 2.], [3., 4.]]); a.fold_axis(Axis(2), 0., |x, y| x + y); } - -#[test] -fn assign() { - let mut a = arr2(&[[1., 2.], [3., 4.]]); - let b = arr2(&[[1., 3.], [2., 4.]]); - a.assign(&b); - assert_eq!(a, b); - - /* Test broadcasting */ - a.assign(&ArcArray::zeros(1)); - assert_eq!(a, ArcArray::zeros((2, 2))); - - /* Test other type */ - a.assign(&Array::from_elem((2, 2), 3.)); - assert_eq!(a, ArcArray::from_elem((2, 2), 3.)); - - /* Test mut view */ - let mut a = arr2(&[[1, 2], [3, 4]]); - { - let mut v = a.view_mut(); - v.slice_collapse(s![..1, ..]); - v.fill(0); - } - assert_eq!(a, arr2(&[[0, 0], [3, 4]])); -} - - -#[test] -fn assign_to() { - let mut a = arr2(&[[1., 2.], [3., 4.]]); - let b = arr2(&[[0., 3.], [2., 0.]]); - b.assign_to(&mut a); - assert_eq!(a, b); -} diff --git a/tests/stacking.rs b/tests/stacking.rs index 032525ffa..0c4e79c79 100644 --- a/tests/stacking.rs +++ b/tests/stacking.rs @@ -1,4 +1,4 @@ -use ndarray::{arr2, arr3, aview1, concatenate, stack, Array2, Axis, ErrorKind, Ix1}; +use ndarray::{arr2, arr3, aview1, aview2, concatenate, stack, Array2, Axis, ErrorKind, Ix1}; #[test] fn concatenating() { @@ -15,6 +15,13 @@ fn concatenating() { let d = concatenate![Axis(0), a.row(0), &[9., 9.]]; assert_eq!(d, aview1(&[2., 2., 9., 9.])); + let d = concatenate![Axis(1), a.row(0).insert_axis(Axis(1)), aview1(&[9., 9.]).insert_axis(Axis(1))]; + assert_eq!(d, aview2(&[[2., 9.], + [2., 9.]])); + + let d = concatenate![Axis(0), a.row(0).insert_axis(Axis(1)), aview1(&[9., 9.]).insert_axis(Axis(1))]; + assert_eq!(d, aview2(&[[2.], [2.], [9.], [9.]])); + let res = ndarray::concatenate(Axis(1), &[a.view(), c.view()]); assert_eq!(res.unwrap_err().kind(), ErrorKind::IncompatibleShape);