ndarray/impl_owned_array.rs
1
2use alloc::vec::Vec;
3use std::mem;
4use std::mem::MaybeUninit;
5
6use rawpointer::PointerExt;
7
8use crate::imp_prelude::*;
9
10use crate::dimension;
11use crate::error::{ErrorKind, ShapeError};
12use crate::iterators::Baseiter;
13use crate::low_level_util::AbortIfPanic;
14use crate::OwnedRepr;
15use crate::Zip;
16
17/// Methods specific to `Array0`.
18///
19/// ***See also all methods for [`ArrayBase`]***
20impl<A> Array<A, Ix0> {
21 /// Returns the single element in the array without cloning it.
22 ///
23 /// ```
24 /// use ndarray::{arr0, Array0};
25 ///
26 /// // `Foo` doesn't implement `Clone`.
27 /// #[derive(Debug, Eq, PartialEq)]
28 /// struct Foo;
29 ///
30 /// let array: Array0<Foo> = arr0(Foo);
31 /// let scalar: Foo = array.into_scalar();
32 /// assert_eq!(scalar, Foo);
33 /// ```
34 pub fn into_scalar(self) -> A {
35 let size = mem::size_of::<A>();
36 if size == 0 {
37 // Any index in the `Vec` is fine since all elements are identical.
38 self.data.into_vec().remove(0)
39 } else {
40 // Find the index in the `Vec` corresponding to `self.ptr`.
41 // (This is necessary because the element in the array might not be
42 // the first element in the `Vec`, such as if the array was created
43 // by `array![1, 2, 3, 4].slice_move(s![2])`.)
44 let first = self.ptr.as_ptr() as usize;
45 let base = self.data.as_ptr() as usize;
46 let index = (first - base) / size;
47 debug_assert_eq!((first - base) % size, 0);
48 // Remove the element at the index and return it.
49 self.data.into_vec().remove(index)
50 }
51 }
52}
53
54/// Methods specific to `Array`.
55///
56/// ***See also all methods for [`ArrayBase`]***
57impl<A, D> Array<A, D>
58where
59 D: Dimension,
60{
61 /// Return a vector of the elements in the array, in the way they are
62 /// stored internally.
63 ///
64 /// If the array is in standard memory layout, the logical element order
65 /// of the array (`.iter()` order) and of the returned vector will be the same.
66 pub fn into_raw_vec(self) -> Vec<A> {
67 self.data.into_vec()
68 }
69}
70
71/// Methods specific to `Array2`.
72///
73/// ***See also all methods for [`ArrayBase`]***
74impl<A> Array<A, Ix2> {
75 /// Append a row to an array
76 ///
77 /// The elements from `row` are cloned and added as a new row in the array.
78 ///
79 /// ***Errors*** with a shape error if the length of the row does not match the length of the
80 /// rows in the array.
81 ///
82 /// The memory layout of the `self` array matters for ensuring that the append is efficient.
83 /// Appending automatically changes memory layout of the array so that it is appended to
84 /// along the "growing axis". However, if the memory layout needs adjusting, the array must
85 /// reallocate and move memory.
86 ///
87 /// The operation leaves the existing data in place and is most efficent if one of these is
88 /// true:
89 ///
90 /// - The axis being appended to is the longest stride axis, i.e the array is in row major
91 /// ("C") layout.
92 /// - The array has 0 or 1 rows (It is converted to row major)
93 ///
94 /// Ensure appending is efficient by, for example, appending to an empty array and then always
95 /// pushing/appending along the same axis. For pushing rows, ndarray's default layout (C order)
96 /// is efficient.
97 ///
98 /// When repeatedly appending to a single axis, the amortized average complexity of each
99 /// append is O(m), where *m* is the length of the row.
100 ///
101 /// ```rust
102 /// use ndarray::{Array, ArrayView, array};
103 ///
104 /// // create an empty array and append
105 /// let mut a = Array::zeros((0, 4));
106 /// a.push_row(ArrayView::from(&[ 1., 2., 3., 4.])).unwrap();
107 /// a.push_row(ArrayView::from(&[-1., -2., -3., -4.])).unwrap();
108 ///
109 /// assert_eq!(
110 /// a,
111 /// array![[ 1., 2., 3., 4.],
112 /// [-1., -2., -3., -4.]]);
113 /// ```
114 pub fn push_row(&mut self, row: ArrayView<A, Ix1>) -> Result<(), ShapeError>
115 where
116 A: Clone,
117 {
118 self.append(Axis(0), row.insert_axis(Axis(0)))
119 }
120
121 /// Append a column to an array
122 ///
123 /// The elements from `column` are cloned and added as a new column in the array.
124 ///
125 /// ***Errors*** with a shape error if the length of the column does not match the length of
126 /// the columns in the array.
127 ///
128 /// The memory layout of the `self` array matters for ensuring that the append is efficient.
129 /// Appending automatically changes memory layout of the array so that it is appended to
130 /// along the "growing axis". However, if the memory layout needs adjusting, the array must
131 /// reallocate and move memory.
132 ///
133 /// The operation leaves the existing data in place and is most efficent if one of these is
134 /// true:
135 ///
136 /// - The axis being appended to is the longest stride axis, i.e the array is in column major
137 /// ("F") layout.
138 /// - The array has 0 or 1 columns (It is converted to column major)
139 ///
140 /// Ensure appending is efficient by, for example, appending to an empty array and then always
141 /// pushing/appending along the same axis. For pushing columns, column major layout (F order)
142 /// is efficient.
143 ///
144 /// When repeatedly appending to a single axis, the amortized average complexity of each append
145 /// is O(m), where *m* is the length of the column.
146 ///
147 /// ```rust
148 /// use ndarray::{Array, ArrayView, array};
149 ///
150 /// // create an empty array and append
151 /// let mut a = Array::zeros((2, 0));
152 /// a.push_column(ArrayView::from(&[1., 2.])).unwrap();
153 /// a.push_column(ArrayView::from(&[-1., -2.])).unwrap();
154 ///
155 /// assert_eq!(
156 /// a,
157 /// array![[1., -1.],
158 /// [2., -2.]]);
159 /// ```
160 pub fn push_column(&mut self, column: ArrayView<A, Ix1>) -> Result<(), ShapeError>
161 where
162 A: Clone,
163 {
164 self.append(Axis(1), column.insert_axis(Axis(1)))
165 }
166}
167
168impl<A, D> Array<A, D>
169 where D: Dimension
170{
171 /// Move all elements from self into `new_array`, which must be of the same shape but
172 /// can have a different memory layout. The destination is overwritten completely.
173 ///
174 /// The destination should be a mut reference to an array or an `ArrayViewMut` with
175 /// `A` elements.
176 ///
177 /// ***Panics*** if the shapes don't agree.
178 ///
179 /// ## Example
180 ///
181 /// ```
182 /// use ndarray::Array;
183 ///
184 /// // Usage example of move_into in safe code
185 /// let mut a = Array::default((10, 10));
186 /// let b = Array::from_shape_fn((10, 10), |(i, j)| (i + j).to_string());
187 /// b.move_into(&mut a);
188 /// ```
189 pub fn move_into<'a, AM>(self, new_array: AM)
190 where
191 AM: Into<ArrayViewMut<'a, A, D>>,
192 A: 'a,
193 {
194 // Remove generic parameter P and call the implementation
195 let new_array = new_array.into();
196 if mem::needs_drop::<A>() {
197 self.move_into_needs_drop(new_array);
198 } else {
199 // If `A` doesn't need drop, we can overwrite the destination.
200 // Safe because: move_into_uninit only writes initialized values
201 unsafe {
202 self.move_into_uninit(new_array.into_maybe_uninit())
203 }
204 }
205 }
206
207 fn move_into_needs_drop(mut self, new_array: ArrayViewMut<A, D>) {
208 // Simple case where `A` has a destructor: just swap values between self and new_array.
209 // Afterwards, `self` drops full of initialized values and dropping works as usual.
210 // This avoids moving out of owned values in `self` while at the same time managing
211 // the dropping if the values being overwritten in `new_array`.
212 Zip::from(&mut self).and(new_array)
213 .for_each(|src, dst| mem::swap(src, dst));
214 }
215
216 /// Move all elements from self into `new_array`, which must be of the same shape but
217 /// can have a different memory layout. The destination is overwritten completely.
218 ///
219 /// The destination should be a mut reference to an array or an `ArrayViewMut` with
220 /// `MaybeUninit<A>` elements (which are overwritten without dropping any existing value).
221 ///
222 /// Minor implementation note: Owned arrays like `self` may be sliced in place and own elements
223 /// that are not part of their active view; these are dropped at the end of this function,
224 /// after all elements in the "active view" are moved into `new_array`. If there is a panic in
225 /// drop of any such element, other elements may be leaked.
226 ///
227 /// ***Panics*** if the shapes don't agree.
228 ///
229 /// ## Example
230 ///
231 /// ```
232 /// use ndarray::Array;
233 ///
234 /// let a = Array::from_iter(0..100).into_shape((10, 10)).unwrap();
235 /// let mut b = Array::uninit((10, 10));
236 /// a.move_into_uninit(&mut b);
237 /// unsafe {
238 /// // we can now promise we have fully initialized `b`.
239 /// let b = b.assume_init();
240 /// }
241 /// ```
242 pub fn move_into_uninit<'a, AM>(self, new_array: AM)
243 where
244 AM: Into<ArrayViewMut<'a, MaybeUninit<A>, D>>,
245 A: 'a,
246 {
247 // Remove generic parameter AM and call the implementation
248 self.move_into_impl(new_array.into())
249 }
250
251 fn move_into_impl(mut self, new_array: ArrayViewMut<MaybeUninit<A>, D>) {
252 unsafe {
253 // Safety: copy_to_nonoverlapping cannot panic
254 let guard = AbortIfPanic(&"move_into: moving out of owned value");
255 // Move all reachable elements; we move elements out of `self`.
256 // and thus must not panic for the whole section until we call `self.data.set_len(0)`.
257 Zip::from(self.raw_view_mut())
258 .and(new_array)
259 .for_each(|src, dst| {
260 src.copy_to_nonoverlapping(dst.as_mut_ptr(), 1);
261 });
262 guard.defuse();
263 // Drop all unreachable elements
264 self.drop_unreachable_elements();
265 }
266 }
267
268 /// This drops all "unreachable" elements in the data storage of self.
269 ///
270 /// That means those elements that are not visible in the slicing of the array.
271 /// *Reachable elements are assumed to already have been moved from.*
272 ///
273 /// # Safety
274 ///
275 /// This is a panic critical section since `self` is already moved-from.
276 fn drop_unreachable_elements(mut self) -> OwnedRepr<A> {
277 let self_len = self.len();
278
279 // "deconstruct" self; the owned repr releases ownership of all elements and we
280 // and carry on with raw view methods
281 let data_len = self.data.len();
282
283 let has_unreachable_elements = self_len != data_len;
284 if !has_unreachable_elements || mem::size_of::<A>() == 0 || !mem::needs_drop::<A>() {
285 unsafe {
286 self.data.set_len(0);
287 }
288 self.data
289 } else {
290 self.drop_unreachable_elements_slow()
291 }
292 }
293
294 #[inline(never)]
295 #[cold]
296 fn drop_unreachable_elements_slow(mut self) -> OwnedRepr<A> {
297 // "deconstruct" self; the owned repr releases ownership of all elements and we
298 // carry on with raw view methods
299 let data_len = self.data.len();
300 let data_ptr = self.data.as_nonnull_mut().as_ptr();
301
302 unsafe {
303 // Safety: self.data releases ownership of the elements. Any panics below this point
304 // will result in leaking elements instead of double drops.
305 let self_ = self.raw_view_mut();
306 self.data.set_len(0);
307
308 drop_unreachable_raw(self_, data_ptr, data_len);
309 }
310
311 self.data
312 }
313
314 /// Create an empty array with an all-zeros shape
315 ///
316 /// ***Panics*** if D is zero-dimensional, because it can't be empty
317 pub(crate) fn empty() -> Array<A, D> {
318 assert_ne!(D::NDIM, Some(0));
319 let ndim = D::NDIM.unwrap_or(1);
320 Array::from_shape_simple_fn(D::zeros(ndim), || unreachable!())
321 }
322
323 /// Create new_array with the right layout for appending to `growing_axis`
324 #[cold]
325 fn change_to_contig_append_layout(&mut self, growing_axis: Axis) {
326 let ndim = self.ndim();
327 let mut dim = self.raw_dim();
328
329 // The array will be created with 0 (C) or ndim-1 (F) as the biggest stride
330 // axis. Rearrange the shape so that `growing_axis` is the biggest stride axis
331 // afterwards.
332 let mut new_array;
333 if growing_axis == Axis(ndim - 1) {
334 new_array = Self::uninit(dim.f());
335 } else {
336 dim.slice_mut()[..=growing_axis.index()].rotate_right(1);
337 new_array = Self::uninit(dim);
338 new_array.dim.slice_mut()[..=growing_axis.index()].rotate_left(1);
339 new_array.strides.slice_mut()[..=growing_axis.index()].rotate_left(1);
340 }
341
342 // self -> old_self.
343 // dummy array -> self.
344 // old_self elements are moved -> new_array.
345 let old_self = std::mem::replace(self, Self::empty());
346 old_self.move_into_uninit(new_array.view_mut());
347
348 // new_array -> self.
349 unsafe {
350 *self = new_array.assume_init();
351 }
352 }
353
354 /// Append an array to the array along an axis.
355 ///
356 /// The elements of `array` are cloned and extend the axis `axis` in the present array;
357 /// `self` will grow in size by 1 along `axis`.
358 ///
359 /// Append to the array, where the array being pushed to the array has one dimension less than
360 /// the `self` array. This method is equivalent to [append](ArrayBase::append) in this way:
361 /// `self.append(axis, array.insert_axis(axis))`.
362 ///
363 /// ***Errors*** with a shape error if the shape of self does not match the array-to-append;
364 /// all axes *except* the axis along which it being appended matter for this check:
365 /// the shape of `self` with `axis` removed must be the same as the shape of `array`.
366 ///
367 /// The memory layout of the `self` array matters for ensuring that the append is efficient.
368 /// Appending automatically changes memory layout of the array so that it is appended to
369 /// along the "growing axis". However, if the memory layout needs adjusting, the array must
370 /// reallocate and move memory.
371 ///
372 /// The operation leaves the existing data in place and is most efficent if `axis` is a
373 /// "growing axis" for the array, i.e. one of these is true:
374 ///
375 /// - The axis is the longest stride axis, for example the 0th axis in a C-layout or the
376 /// *n-1*th axis in an F-layout array.
377 /// - The axis has length 0 or 1 (It is converted to the new growing axis)
378 ///
379 /// Ensure appending is efficient by for example starting from an empty array and/or always
380 /// appending to an array along the same axis.
381 ///
382 /// The amortized average complexity of the append, when appending along its growing axis, is
383 /// O(*m*) where *m* is the number of individual elements to append.
384 ///
385 /// The memory layout of the argument `array` does not matter to the same extent.
386 ///
387 /// ```rust
388 /// use ndarray::{Array, ArrayView, array, Axis};
389 ///
390 /// // create an empty array and push rows to it
391 /// let mut a = Array::zeros((0, 4));
392 /// let ones = ArrayView::from(&[1.; 4]);
393 /// let zeros = ArrayView::from(&[0.; 4]);
394 /// a.push(Axis(0), ones).unwrap();
395 /// a.push(Axis(0), zeros).unwrap();
396 /// a.push(Axis(0), ones).unwrap();
397 ///
398 /// assert_eq!(
399 /// a,
400 /// array![[1., 1., 1., 1.],
401 /// [0., 0., 0., 0.],
402 /// [1., 1., 1., 1.]]);
403 /// ```
404 pub fn push(&mut self, axis: Axis, array: ArrayView<A, D::Smaller>)
405 -> Result<(), ShapeError>
406 where
407 A: Clone,
408 D: RemoveAxis,
409 {
410 // same-dimensionality conversion
411 self.append(axis, array.insert_axis(axis).into_dimensionality::<D>().unwrap())
412 }
413
414
415 /// Append an array to the array along an axis.
416 ///
417 /// The elements of `array` are cloned and extend the axis `axis` in the present array;
418 /// `self` will grow in size by `array.len_of(axis)` along `axis`.
419 ///
420 /// ***Errors*** with a shape error if the shape of self does not match the array-to-append;
421 /// all axes *except* the axis along which it being appended matter for this check:
422 /// the shape of `self` with `axis` removed must be the same as the shape of `array` with
423 /// `axis` removed.
424 ///
425 /// The memory layout of the `self` array matters for ensuring that the append is efficient.
426 /// Appending automatically changes memory layout of the array so that it is appended to
427 /// along the "growing axis". However, if the memory layout needs adjusting, the array must
428 /// reallocate and move memory.
429 ///
430 /// The operation leaves the existing data in place and is most efficent if `axis` is a
431 /// "growing axis" for the array, i.e. one of these is true:
432 ///
433 /// - The axis is the longest stride axis, for example the 0th axis in a C-layout or the
434 /// *n-1*th axis in an F-layout array.
435 /// - The axis has length 0 or 1 (It is converted to the new growing axis)
436 ///
437 /// Ensure appending is efficient by for example starting from an empty array and/or always
438 /// appending to an array along the same axis.
439 ///
440 /// The amortized average complexity of the append, when appending along its growing axis, is
441 /// O(*m*) where *m* is the number of individual elements to append.
442 ///
443 /// The memory layout of the argument `array` does not matter to the same extent.
444 ///
445 /// ```rust
446 /// use ndarray::{Array, ArrayView, array, Axis};
447 ///
448 /// // create an empty array and append two rows at a time
449 /// let mut a = Array::zeros((0, 4));
450 /// let ones = ArrayView::from(&[1.; 8]).into_shape((2, 4)).unwrap();
451 /// let zeros = ArrayView::from(&[0.; 8]).into_shape((2, 4)).unwrap();
452 /// a.append(Axis(0), ones).unwrap();
453 /// a.append(Axis(0), zeros).unwrap();
454 /// a.append(Axis(0), ones).unwrap();
455 ///
456 /// assert_eq!(
457 /// a,
458 /// array![[1., 1., 1., 1.],
459 /// [1., 1., 1., 1.],
460 /// [0., 0., 0., 0.],
461 /// [0., 0., 0., 0.],
462 /// [1., 1., 1., 1.],
463 /// [1., 1., 1., 1.]]);
464 /// ```
465 pub fn append(&mut self, axis: Axis, mut array: ArrayView<A, D>)
466 -> Result<(), ShapeError>
467 where
468 A: Clone,
469 D: RemoveAxis,
470 {
471 if self.ndim() == 0 {
472 return Err(ShapeError::from_kind(ErrorKind::IncompatibleShape));
473 }
474
475 let current_axis_len = self.len_of(axis);
476 let self_dim = self.raw_dim();
477 let array_dim = array.raw_dim();
478 let remaining_shape = self_dim.remove_axis(axis);
479 let array_rem_shape = array_dim.remove_axis(axis);
480
481 if remaining_shape != array_rem_shape {
482 return Err(ShapeError::from_kind(ErrorKind::IncompatibleShape));
483 }
484
485 let len_to_append = array.len();
486
487 let mut res_dim = self_dim;
488 res_dim[axis.index()] += array_dim[axis.index()];
489 let new_len = dimension::size_of_shape_checked(&res_dim)?;
490
491 if len_to_append == 0 {
492 // There are no elements to append and shapes are compatible:
493 // either the dimension increment is zero, or there is an existing
494 // zero in another axis in self.
495 debug_assert_eq!(self.len(), new_len);
496 self.dim = res_dim;
497 return Ok(());
498 }
499
500 let self_is_empty = self.is_empty();
501 let mut incompatible_layout = false;
502
503 // array must be empty or have `axis` as the outermost (longest stride) axis
504 if !self_is_empty && current_axis_len > 1 {
505 // `axis` must be max stride axis or equal to its stride
506 let axis_stride = self.stride_of(axis);
507 if axis_stride < 0 {
508 incompatible_layout = true;
509 } else {
510 for ax in self.axes() {
511 if ax.axis == axis {
512 continue;
513 }
514 if ax.len > 1 && ax.stride.abs() > axis_stride {
515 incompatible_layout = true;
516 break;
517 }
518 }
519 }
520 }
521
522 // array must be be "full" (contiguous and have no exterior holes)
523 if self.len() != self.data.len() {
524 incompatible_layout = true;
525 }
526
527 if incompatible_layout {
528 self.change_to_contig_append_layout(axis);
529 // safety-check parameters after remodeling
530 debug_assert_eq!(self_is_empty, self.is_empty());
531 debug_assert_eq!(current_axis_len, self.len_of(axis));
532 }
533
534 let strides = if self_is_empty {
535 // recompute strides - if the array was previously empty, it could have zeros in
536 // strides.
537 // The new order is based on c/f-contig but must have `axis` as outermost axis.
538 if axis == Axis(self.ndim() - 1) {
539 // prefer f-contig when appending to the last axis
540 // Axis n - 1 is outermost axis
541 res_dim.fortran_strides()
542 } else {
543 // standard axis order except for the growing axis;
544 // anticipates that it's likely that `array` has standard order apart from the
545 // growing axis.
546 res_dim.slice_mut()[..=axis.index()].rotate_right(1);
547 let mut strides = res_dim.default_strides();
548 res_dim.slice_mut()[..=axis.index()].rotate_left(1);
549 strides.slice_mut()[..=axis.index()].rotate_left(1);
550 strides
551 }
552 } else if current_axis_len == 1 {
553 // This is the outermost/longest stride axis; so we find the max across the other axes
554 let new_stride = self.axes().fold(1, |acc, ax| {
555 if ax.axis == axis || ax.len <= 1 {
556 acc
557 } else {
558 let this_ax = ax.len as isize * ax.stride.abs();
559 if this_ax > acc { this_ax } else { acc }
560 }
561 });
562 let mut strides = self.strides.clone();
563 strides[axis.index()] = new_stride as usize;
564 strides
565 } else {
566 self.strides.clone()
567 };
568
569 unsafe {
570 // grow backing storage and update head ptr
571 let data_to_array_offset = if std::mem::size_of::<A>() != 0 {
572 self.as_ptr().offset_from(self.data.as_ptr())
573 } else {
574 0
575 };
576 debug_assert!(data_to_array_offset >= 0);
577 self.ptr = self.data.reserve(len_to_append).offset(data_to_array_offset);
578
579 // clone elements from view to the array now
580 //
581 // To be robust for panics and drop the right elements, we want
582 // to fill the tail in memory order, so that we can drop the right elements on panic.
583 //
584 // We have: Zip::from(tail_view).and(array)
585 // Transform tail_view into standard order by inverting and moving its axes.
586 // Keep the Zip traversal unchanged by applying the same axis transformations to
587 // `array`. This ensures the Zip traverses the underlying memory in order.
588 //
589 // XXX It would be possible to skip this transformation if the element
590 // doesn't have drop. However, in the interest of code coverage, all elements
591 // use this code initially.
592
593 // Invert axes in tail_view by inverting strides
594 let mut tail_strides = strides.clone();
595 if tail_strides.ndim() > 1 {
596 for i in 0..tail_strides.ndim() {
597 let s = tail_strides[i] as isize;
598 if s < 0 {
599 tail_strides.set_axis(Axis(i), -s as usize);
600 array.invert_axis(Axis(i));
601 }
602 }
603 }
604
605 // With > 0 strides, the current end of data is the correct base pointer for tail_view
606 let tail_ptr = self.data.as_end_nonnull();
607 let mut tail_view = RawArrayViewMut::new(tail_ptr, array_dim, tail_strides);
608
609 if tail_view.ndim() > 1 {
610 sort_axes_in_default_order_tandem(&mut tail_view, &mut array);
611 debug_assert!(tail_view.is_standard_layout(),
612 "not std layout dim: {:?}, strides: {:?}",
613 tail_view.shape(), tail_view.strides());
614 }
615
616 // Keep track of currently filled length of `self.data` and update it
617 // on scope exit (panic or loop finish). This "indirect" way to
618 // write the length is used to help the compiler, the len store to self.data may
619 // otherwise be mistaken to alias with other stores in the loop.
620 struct SetLenOnDrop<'a, A: 'a> {
621 len: usize,
622 data: &'a mut OwnedRepr<A>,
623 }
624
625 impl<A> Drop for SetLenOnDrop<'_, A> {
626 fn drop(&mut self) {
627 unsafe {
628 self.data.set_len(self.len);
629 }
630 }
631 }
632
633 let mut data_length_guard = SetLenOnDrop {
634 len: self.data.len(),
635 data: &mut self.data,
636 };
637
638
639 // Safety: tail_view is constructed to have the same shape as array
640 Zip::from(tail_view)
641 .and_unchecked(array)
642 .debug_assert_c_order()
643 .for_each(|to, from| {
644 to.write(from.clone());
645 data_length_guard.len += 1;
646 });
647 drop(data_length_guard);
648
649 // update array dimension
650 self.strides = strides;
651 self.dim = res_dim;
652 }
653 // multiple assertions after pointer & dimension update
654 debug_assert_eq!(self.data.len(), self.len());
655 debug_assert_eq!(self.len(), new_len);
656 debug_assert!(self.pointer_is_inbounds());
657
658 Ok(())
659 }
660}
661
662/// This drops all "unreachable" elements in `self_` given the data pointer and data length.
663///
664/// # Safety
665///
666/// This is an internal function for use by move_into and IntoIter only, safety invariants may need
667/// to be upheld across the calls from those implementations.
668pub(crate) unsafe fn drop_unreachable_raw<A, D>(mut self_: RawArrayViewMut<A, D>, data_ptr: *mut A, data_len: usize)
669where
670 D: Dimension,
671{
672 let self_len = self_.len();
673
674 for i in 0..self_.ndim() {
675 if self_.stride_of(Axis(i)) < 0 {
676 self_.invert_axis(Axis(i));
677 }
678 }
679 sort_axes_in_default_order(&mut self_);
680 // with uninverted axes this is now the element with lowest address
681 let array_memory_head_ptr = self_.ptr.as_ptr();
682 let data_end_ptr = data_ptr.add(data_len);
683 debug_assert!(data_ptr <= array_memory_head_ptr);
684 debug_assert!(array_memory_head_ptr <= data_end_ptr);
685
686 // The idea is simply this: the iterator will yield the elements of self_ in
687 // increasing address order.
688 //
689 // The pointers produced by the iterator are those that we *do not* touch.
690 // The pointers *not mentioned* by the iterator are those we have to drop.
691 //
692 // We have to drop elements in the range from `data_ptr` until (not including)
693 // `data_end_ptr`, except those that are produced by `iter`.
694
695 // As an optimization, the innermost axis is removed if it has stride 1, because
696 // we then have a long stretch of contiguous elements we can skip as one.
697 let inner_lane_len;
698 if self_.ndim() > 1 && self_.strides.last_elem() == 1 {
699 self_.dim.slice_mut().rotate_right(1);
700 self_.strides.slice_mut().rotate_right(1);
701 inner_lane_len = self_.dim[0];
702 self_.dim[0] = 1;
703 self_.strides[0] = 1;
704 } else {
705 inner_lane_len = 1;
706 }
707
708 // iter is a raw pointer iterator traversing the array in memory order now with the
709 // sorted axes.
710 let mut iter = Baseiter::new(self_.ptr.as_ptr(), self_.dim, self_.strides);
711 let mut dropped_elements = 0;
712
713 let mut last_ptr = data_ptr;
714
715 while let Some(elem_ptr) = iter.next() {
716 // The interval from last_ptr up until (not including) elem_ptr
717 // should now be dropped. This interval may be empty, then we just skip this loop.
718 while last_ptr != elem_ptr {
719 debug_assert!(last_ptr < data_end_ptr);
720 std::ptr::drop_in_place(last_ptr);
721 last_ptr = last_ptr.add(1);
722 dropped_elements += 1;
723 }
724 // Next interval will continue one past the current lane
725 last_ptr = elem_ptr.add(inner_lane_len);
726 }
727
728 while last_ptr < data_end_ptr {
729 std::ptr::drop_in_place(last_ptr);
730 last_ptr = last_ptr.add(1);
731 dropped_elements += 1;
732 }
733
734 assert_eq!(data_len, dropped_elements + self_len,
735 "Internal error: inconsistency in move_into");
736}
737
738/// Sort axes to standard order, i.e Axis(0) has biggest stride and Axis(n - 1) least stride
739///
740/// The axes should have stride >= 0 before calling this method.
741fn sort_axes_in_default_order<S, D>(a: &mut ArrayBase<S, D>)
742where
743 S: RawData,
744 D: Dimension,
745{
746 if a.ndim() <= 1 {
747 return;
748 }
749 sort_axes1_impl(&mut a.dim, &mut a.strides);
750}
751
752fn sort_axes1_impl<D>(adim: &mut D, astrides: &mut D)
753where
754 D: Dimension,
755{
756 debug_assert!(adim.ndim() > 1);
757 debug_assert_eq!(adim.ndim(), astrides.ndim());
758 // bubble sort axes
759 let mut changed = true;
760 while changed {
761 changed = false;
762 for i in 0..adim.ndim() - 1 {
763 let axis_i = i;
764 let next_axis = i + 1;
765
766 // make sure higher stride axes sort before.
767 debug_assert!(astrides.slice()[axis_i] as isize >= 0);
768 if (astrides.slice()[axis_i] as isize) < astrides.slice()[next_axis] as isize {
769 changed = true;
770 adim.slice_mut().swap(axis_i, next_axis);
771 astrides.slice_mut().swap(axis_i, next_axis);
772 }
773 }
774 }
775}
776
777
778/// Sort axes to standard order, i.e Axis(0) has biggest stride and Axis(n - 1) least stride
779///
780/// Axes in a and b are sorted by the strides of `a`, and `a`'s axes should have stride >= 0 before
781/// calling this method.
782fn sort_axes_in_default_order_tandem<S, S2, D>(a: &mut ArrayBase<S, D>, b: &mut ArrayBase<S2, D>)
783where
784 S: RawData,
785 S2: RawData,
786 D: Dimension,
787{
788 if a.ndim() <= 1 {
789 return;
790 }
791 sort_axes2_impl(&mut a.dim, &mut a.strides, &mut b.dim, &mut b.strides);
792}
793
794fn sort_axes2_impl<D>(adim: &mut D, astrides: &mut D, bdim: &mut D, bstrides: &mut D)
795where
796 D: Dimension,
797{
798 debug_assert!(adim.ndim() > 1);
799 debug_assert_eq!(adim.ndim(), bdim.ndim());
800 // bubble sort axes
801 let mut changed = true;
802 while changed {
803 changed = false;
804 for i in 0..adim.ndim() - 1 {
805 let axis_i = i;
806 let next_axis = i + 1;
807
808 // make sure higher stride axes sort before.
809 debug_assert!(astrides.slice()[axis_i] as isize >= 0);
810 if (astrides.slice()[axis_i] as isize) < astrides.slice()[next_axis] as isize {
811 changed = true;
812 adim.slice_mut().swap(axis_i, next_axis);
813 astrides.slice_mut().swap(axis_i, next_axis);
814 bdim.slice_mut().swap(axis_i, next_axis);
815 bstrides.slice_mut().swap(axis_i, next_axis);
816 }
817 }
818 }
819}