1use num::{One, Zero};
2#[cfg(feature = "abomonation-serialize")]
3use std::io::{Result as IOResult, Write};
4
5use approx::{AbsDiffEq, RelativeEq, UlpsEq};
6use std::any::TypeId;
7use std::cmp::Ordering;
8use std::fmt;
9use std::hash::{Hash, Hasher};
10use std::marker::PhantomData;
11use std::mem;
12
13#[cfg(feature = "serde-serialize-no-std")]
14use serde::{Deserialize, Deserializer, Serialize, Serializer};
15
16#[cfg(feature = "abomonation-serialize")]
17use abomonation::Abomonation;
18
19use simba::scalar::{ClosedAdd, ClosedMul, ClosedSub, Field, SupersetOf};
20use simba::simd::SimdPartialOrd;
21
22use crate::base::allocator::{Allocator, SameShapeAllocator, SameShapeC, SameShapeR};
23use crate::base::constraint::{DimEq, SameNumberOfColumns, SameNumberOfRows, ShapeConstraint};
24use crate::base::dimension::{Dim, DimAdd, DimSum, IsNotStaticOne, U1, U2, U3};
25use crate::base::iter::{
26 ColumnIter, ColumnIterMut, MatrixIter, MatrixIterMut, RowIter, RowIterMut,
27};
28use crate::base::storage::{Owned, RawStorage, RawStorageMut, SameShapeStorage};
29use crate::base::{Const, DefaultAllocator, OMatrix, OVector, Scalar, Unit};
30use crate::{ArrayStorage, SMatrix, SimdComplexField, Storage, UninitMatrix};
31
32use crate::storage::IsContiguous;
33use crate::uninit::{Init, InitStatus, Uninit};
34#[cfg(any(feature = "std", feature = "alloc"))]
35use crate::{DMatrix, DVector, Dynamic, RowDVector, VecStorage};
36use std::mem::MaybeUninit;
37
38pub type SquareMatrix<T, D, S> = Matrix<T, D, D, S>;
40
41pub type Vector<T, D, S> = Matrix<T, D, U1, S>;
43
44pub type RowVector<T, D, S> = Matrix<T, U1, D, S>;
46
47pub type MatrixSum<T, R1, C1, R2, C2> =
49 Matrix<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>, SameShapeStorage<T, R1, C1, R2, C2>>;
50
51pub type VectorSum<T, R1, R2> =
53 Matrix<T, SameShapeR<R1, R2>, U1, SameShapeStorage<T, R1, U1, R2, U1>>;
54
55pub type MatrixCross<T, R1, C1, R2, C2> =
57 Matrix<T, SameShapeR<R1, R2>, SameShapeC<C1, C2>, SameShapeStorage<T, R1, C1, R2, C2>>;
58
59#[repr(C)]
157#[derive(Clone, Copy)]
158#[cfg_attr(
159 all(not(target_os = "cuda"), feature = "cuda"),
160 derive(cust::DeviceCopy)
161)]
162pub struct Matrix<T, R, C, S> {
163 pub data: S,
187
188 _phantoms: PhantomData<(T, R, C)>,
199}
200
201impl<T, R: Dim, C: Dim, S: fmt::Debug> fmt::Debug for Matrix<T, R, C, S> {
202 fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
203 self.data.fmt(formatter)
204 }
205}
206
207impl<T, R, C, S> Default for Matrix<T, R, C, S>
208where
209 T: Scalar,
210 R: Dim,
211 C: Dim,
212 S: Default,
213{
214 fn default() -> Self {
215 Matrix {
216 data: Default::default(),
217 _phantoms: PhantomData,
218 }
219 }
220}
221
222#[cfg(feature = "serde-serialize-no-std")]
223impl<T, R, C, S> Serialize for Matrix<T, R, C, S>
224where
225 T: Scalar,
226 R: Dim,
227 C: Dim,
228 S: Serialize,
229{
230 fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
231 where
232 Ser: Serializer,
233 {
234 self.data.serialize(serializer)
235 }
236}
237
238#[cfg(feature = "serde-serialize-no-std")]
239impl<'de, T, R, C, S> Deserialize<'de> for Matrix<T, R, C, S>
240where
241 T: Scalar,
242 R: Dim,
243 C: Dim,
244 S: Deserialize<'de>,
245{
246 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
247 where
248 D: Deserializer<'de>,
249 {
250 S::deserialize(deserializer).map(|x| Matrix {
251 data: x,
252 _phantoms: PhantomData,
253 })
254 }
255}
256
257#[cfg(feature = "abomonation-serialize")]
258impl<T: Scalar, R: Dim, C: Dim, S: Abomonation> Abomonation for Matrix<T, R, C, S> {
259 unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
260 self.data.entomb(writer)
261 }
262
263 unsafe fn exhume<'a, 'b>(&'a mut self, bytes: &'b mut [u8]) -> Option<&'b mut [u8]> {
264 self.data.exhume(bytes)
265 }
266
267 fn extent(&self) -> usize {
268 self.data.extent()
269 }
270}
271
272#[cfg(feature = "compare")]
273impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> matrixcompare_core::Matrix<T>
274 for Matrix<T, R, C, S>
275{
276 fn rows(&self) -> usize {
277 self.nrows()
278 }
279
280 fn cols(&self) -> usize {
281 self.ncols()
282 }
283
284 fn access(&self) -> matrixcompare_core::Access<'_, T> {
285 matrixcompare_core::Access::Dense(self)
286 }
287}
288
289#[cfg(feature = "compare")]
290impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> matrixcompare_core::DenseAccess<T>
291 for Matrix<T, R, C, S>
292{
293 fn fetch_single(&self, row: usize, col: usize) -> T {
294 self.index((row, col)).clone()
295 }
296}
297
298#[cfg(feature = "bytemuck")]
299unsafe impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> bytemuck::Zeroable
300 for Matrix<T, R, C, S>
301where
302 S: bytemuck::Zeroable,
303{
304}
305
306#[cfg(feature = "bytemuck")]
307unsafe impl<T: Scalar, R: Dim, C: Dim, S: RawStorage<T, R, C>> bytemuck::Pod for Matrix<T, R, C, S>
308where
309 S: bytemuck::Pod,
310 Self: Copy,
311{
312}
313
314#[cfg(feature = "rkyv-serialize-no-std")]
315mod rkyv_impl {
316 use super::Matrix;
317 use core::marker::PhantomData;
318 use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
319
320 impl<T: Archive, R: Archive, C: Archive, S: Archive> Archive for Matrix<T, R, C, S> {
321 type Archived = Matrix<T::Archived, R::Archived, C::Archived, S::Archived>;
322 type Resolver = S::Resolver;
323
324 fn resolve(
325 &self,
326 pos: usize,
327 resolver: Self::Resolver,
328 out: &mut core::mem::MaybeUninit<Self::Archived>,
329 ) {
330 self.data.resolve(
331 pos + offset_of!(Self::Archived, data),
332 resolver,
333 project_struct!(out: Self::Archived => data),
334 );
335 }
336 }
337
338 impl<T: Archive, R: Archive, C: Archive, S: Serialize<_S>, _S: Fallible + ?Sized> Serialize<_S>
339 for Matrix<T, R, C, S>
340 {
341 fn serialize(&self, serializer: &mut _S) -> Result<Self::Resolver, _S::Error> {
342 self.data.serialize(serializer)
343 }
344 }
345
346 impl<T: Archive, R: Archive, C: Archive, S: Archive, D: Fallible + ?Sized>
347 Deserialize<Matrix<T, R, C, S>, D>
348 for Matrix<T::Archived, R::Archived, C::Archived, S::Archived>
349 where
350 S::Archived: Deserialize<S, D>,
351 {
352 fn deserialize(&self, deserializer: &mut D) -> Result<Matrix<T, R, C, S>, D::Error> {
353 Ok(Matrix {
354 data: self.data.deserialize(deserializer)?,
355 _phantoms: PhantomData,
356 })
357 }
358 }
359}
360
361impl<T, R, C, S> Matrix<T, R, C, S> {
362 #[inline(always)]
365 pub const unsafe fn from_data_statically_unchecked(data: S) -> Matrix<T, R, C, S> {
366 Matrix {
367 data,
368 _phantoms: PhantomData,
369 }
370 }
371}
372
373impl<T, const R: usize, const C: usize> SMatrix<T, R, C> {
374 #[inline(always)]
379 pub const fn from_array_storage(storage: ArrayStorage<T, R, C>) -> Self {
380 unsafe { Self::from_data_statically_unchecked(storage) }
383 }
384}
385
386#[cfg(any(feature = "std", feature = "alloc"))]
389impl<T> DMatrix<T> {
390 pub const fn from_vec_storage(storage: VecStorage<T, Dynamic, Dynamic>) -> Self {
395 unsafe { Self::from_data_statically_unchecked(storage) }
398 }
399}
400
401#[cfg(any(feature = "std", feature = "alloc"))]
404impl<T> DVector<T> {
405 pub const fn from_vec_storage(storage: VecStorage<T, Dynamic, U1>) -> Self {
410 unsafe { Self::from_data_statically_unchecked(storage) }
413 }
414}
415
416#[cfg(any(feature = "std", feature = "alloc"))]
419impl<T> RowDVector<T> {
420 pub const fn from_vec_storage(storage: VecStorage<T, U1, Dynamic>) -> Self {
425 unsafe { Self::from_data_statically_unchecked(storage) }
428 }
429}
430
431impl<T, R: Dim, C: Dim> UninitMatrix<T, R, C>
432where
433 DefaultAllocator: Allocator<T, R, C>,
434{
435 #[inline(always)]
443 pub unsafe fn assume_init(self) -> OMatrix<T, R, C> {
444 OMatrix::from_data(<DefaultAllocator as Allocator<T, R, C>>::assume_init(
445 self.data,
446 ))
447 }
448}
449
450impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
451 #[inline(always)]
453 pub fn from_data(data: S) -> Self {
454 unsafe { Self::from_data_statically_unchecked(data) }
455 }
456
457 #[inline]
466 #[must_use]
467 pub fn shape(&self) -> (usize, usize) {
468 let (nrows, ncols) = self.shape_generic();
469 (nrows.value(), ncols.value())
470 }
471
472 #[inline]
474 #[must_use]
475 pub fn shape_generic(&self) -> (R, C) {
476 self.data.shape()
477 }
478
479 #[inline]
488 #[must_use]
489 pub fn nrows(&self) -> usize {
490 self.shape().0
491 }
492
493 #[inline]
502 #[must_use]
503 pub fn ncols(&self) -> usize {
504 self.shape().1
505 }
506
507 #[inline]
518 #[must_use]
519 pub fn strides(&self) -> (usize, usize) {
520 let (srows, scols) = self.data.strides();
521 (srows.value(), scols.value())
522 }
523
524 #[inline]
537 #[must_use]
538 pub fn vector_to_matrix_index(&self, i: usize) -> (usize, usize) {
539 let (nrows, ncols) = self.shape();
540
541 if nrows == 1 {
544 (0, i)
545 } else if ncols == 1 {
546 (i, 0)
547 } else {
548 (i % nrows, i / nrows)
549 }
550 }
551
552 #[inline]
566 #[must_use]
567 pub fn as_ptr(&self) -> *const T {
568 self.data.ptr()
569 }
570
571 #[inline]
575 #[must_use]
576 pub fn relative_eq<R2, C2, SB>(
577 &self,
578 other: &Matrix<T, R2, C2, SB>,
579 eps: T::Epsilon,
580 max_relative: T::Epsilon,
581 ) -> bool
582 where
583 T: RelativeEq,
584 R2: Dim,
585 C2: Dim,
586 SB: Storage<T, R2, C2>,
587 T::Epsilon: Clone,
588 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
589 {
590 assert!(self.shape() == other.shape());
591 self.iter()
592 .zip(other.iter())
593 .all(|(a, b)| a.relative_eq(b, eps.clone(), max_relative.clone()))
594 }
595
596 #[inline]
598 #[must_use]
599 #[allow(clippy::should_implement_trait)]
600 pub fn eq<R2, C2, SB>(&self, other: &Matrix<T, R2, C2, SB>) -> bool
601 where
602 T: PartialEq,
603 R2: Dim,
604 C2: Dim,
605 SB: RawStorage<T, R2, C2>,
606 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
607 {
608 assert!(self.shape() == other.shape());
609 self.iter().zip(other.iter()).all(|(a, b)| *a == *b)
610 }
611
612 #[inline]
614 pub fn into_owned(self) -> OMatrix<T, R, C>
615 where
616 T: Scalar,
617 S: Storage<T, R, C>,
618 DefaultAllocator: Allocator<T, R, C>,
619 {
620 Matrix::from_data(self.data.into_owned())
621 }
622
623 #[inline]
628 pub fn into_owned_sum<R2, C2>(self) -> MatrixSum<T, R, C, R2, C2>
629 where
630 T: Scalar,
631 S: Storage<T, R, C>,
632 R2: Dim,
633 C2: Dim,
634 DefaultAllocator: SameShapeAllocator<T, R, C, R2, C2>,
635 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
636 {
637 if TypeId::of::<SameShapeStorage<T, R, C, R2, C2>>() == TypeId::of::<Owned<T, R, C>>() {
638 unsafe {
641 let owned = self.into_owned();
643 let res = mem::transmute_copy(&owned);
644 mem::forget(owned);
645 res
646 }
647 } else {
648 self.clone_owned_sum()
649 }
650 }
651
652 #[inline]
654 #[must_use]
655 pub fn clone_owned(&self) -> OMatrix<T, R, C>
656 where
657 T: Scalar,
658 S: Storage<T, R, C>,
659 DefaultAllocator: Allocator<T, R, C>,
660 {
661 Matrix::from_data(self.data.clone_owned())
662 }
663
664 #[inline]
667 #[must_use]
668 pub fn clone_owned_sum<R2, C2>(&self) -> MatrixSum<T, R, C, R2, C2>
669 where
670 T: Scalar,
671 S: Storage<T, R, C>,
672 R2: Dim,
673 C2: Dim,
674 DefaultAllocator: SameShapeAllocator<T, R, C, R2, C2>,
675 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
676 {
677 let (nrows, ncols) = self.shape();
678 let nrows: SameShapeR<R, R2> = Dim::from_usize(nrows);
679 let ncols: SameShapeC<C, C2> = Dim::from_usize(ncols);
680
681 let mut res = Matrix::uninit(nrows, ncols);
682
683 unsafe {
684 for j in 0..res.ncols() {
686 for i in 0..res.nrows() {
687 *res.get_unchecked_mut((i, j)) =
688 MaybeUninit::new(self.get_unchecked((i, j)).clone());
689 }
690 }
691
692 res.assume_init()
694 }
695 }
696
697 #[inline]
699 fn transpose_to_uninit<Status, R2, C2, SB>(
700 &self,
701 _status: Status,
702 out: &mut Matrix<Status::Value, R2, C2, SB>,
703 ) where
704 Status: InitStatus<T>,
705 T: Scalar,
706 R2: Dim,
707 C2: Dim,
708 SB: RawStorageMut<Status::Value, R2, C2>,
709 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
710 {
711 let (nrows, ncols) = self.shape();
712 assert!(
713 (ncols, nrows) == out.shape(),
714 "Incompatible shape for transposition."
715 );
716
717 for i in 0..nrows {
719 for j in 0..ncols {
720 unsafe {
722 Status::init(
723 out.get_unchecked_mut((j, i)),
724 self.get_unchecked((i, j)).clone(),
725 );
726 }
727 }
728 }
729 }
730
731 #[inline]
733 pub fn transpose_to<R2, C2, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
734 where
735 T: Scalar,
736 R2: Dim,
737 C2: Dim,
738 SB: RawStorageMut<T, R2, C2>,
739 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
740 {
741 self.transpose_to_uninit(Init, out)
742 }
743
744 #[inline]
746 #[must_use = "Did you mean to use transpose_mut()?"]
747 pub fn transpose(&self) -> OMatrix<T, C, R>
748 where
749 T: Scalar,
750 DefaultAllocator: Allocator<T, C, R>,
751 {
752 let (nrows, ncols) = self.shape_generic();
753
754 let mut res = Matrix::uninit(ncols, nrows);
755 self.transpose_to_uninit(Uninit, &mut res);
756 unsafe { res.assume_init() }
758 }
759}
760
761impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
763 #[inline]
765 #[must_use]
766 pub fn map<T2: Scalar, F: FnMut(T) -> T2>(&self, mut f: F) -> OMatrix<T2, R, C>
767 where
768 T: Scalar,
769 DefaultAllocator: Allocator<T2, R, C>,
770 {
771 let (nrows, ncols) = self.shape_generic();
772 let mut res = Matrix::uninit(nrows, ncols);
773
774 for j in 0..ncols.value() {
775 for i in 0..nrows.value() {
776 unsafe {
778 let a = self.data.get_unchecked(i, j).clone();
779 *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a));
780 }
781 }
782 }
783
784 unsafe { res.assume_init() }
786 }
787
788 pub fn cast<T2: Scalar>(self) -> OMatrix<T2, R, C>
798 where
799 T: Scalar,
800 OMatrix<T2, R, C>: SupersetOf<Self>,
801 DefaultAllocator: Allocator<T2, R, C>,
802 {
803 crate::convert(self)
804 }
805
806 #[inline]
814 #[must_use]
815 pub fn fold_with<T2>(
816 &self,
817 init_f: impl FnOnce(Option<&T>) -> T2,
818 f: impl FnMut(T2, &T) -> T2,
819 ) -> T2
820 where
821 T: Scalar,
822 {
823 let mut it = self.iter();
824 let init = init_f(it.next());
825 it.fold(init, f)
826 }
827
828 #[inline]
831 #[must_use]
832 pub fn map_with_location<T2: Scalar, F: FnMut(usize, usize, T) -> T2>(
833 &self,
834 mut f: F,
835 ) -> OMatrix<T2, R, C>
836 where
837 T: Scalar,
838 DefaultAllocator: Allocator<T2, R, C>,
839 {
840 let (nrows, ncols) = self.shape_generic();
841 let mut res = Matrix::uninit(nrows, ncols);
842
843 for j in 0..ncols.value() {
844 for i in 0..nrows.value() {
845 unsafe {
847 let a = self.data.get_unchecked(i, j).clone();
848 *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(i, j, a));
849 }
850 }
851 }
852
853 unsafe { res.assume_init() }
855 }
856
857 #[inline]
860 #[must_use]
861 pub fn zip_map<T2, N3, S2, F>(&self, rhs: &Matrix<T2, R, C, S2>, mut f: F) -> OMatrix<N3, R, C>
862 where
863 T: Scalar,
864 T2: Scalar,
865 N3: Scalar,
866 S2: RawStorage<T2, R, C>,
867 F: FnMut(T, T2) -> N3,
868 DefaultAllocator: Allocator<N3, R, C>,
869 {
870 let (nrows, ncols) = self.shape_generic();
871 let mut res = Matrix::uninit(nrows, ncols);
872
873 assert_eq!(
874 (nrows.value(), ncols.value()),
875 rhs.shape(),
876 "Matrix simultaneous traversal error: dimension mismatch."
877 );
878
879 for j in 0..ncols.value() {
880 for i in 0..nrows.value() {
881 unsafe {
883 let a = self.data.get_unchecked(i, j).clone();
884 let b = rhs.data.get_unchecked(i, j).clone();
885 *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b))
886 }
887 }
888 }
889
890 unsafe { res.assume_init() }
892 }
893
894 #[inline]
897 #[must_use]
898 pub fn zip_zip_map<T2, N3, N4, S2, S3, F>(
899 &self,
900 b: &Matrix<T2, R, C, S2>,
901 c: &Matrix<N3, R, C, S3>,
902 mut f: F,
903 ) -> OMatrix<N4, R, C>
904 where
905 T: Scalar,
906 T2: Scalar,
907 N3: Scalar,
908 N4: Scalar,
909 S2: RawStorage<T2, R, C>,
910 S3: RawStorage<N3, R, C>,
911 F: FnMut(T, T2, N3) -> N4,
912 DefaultAllocator: Allocator<N4, R, C>,
913 {
914 let (nrows, ncols) = self.shape_generic();
915 let mut res = Matrix::uninit(nrows, ncols);
916
917 assert_eq!(
918 (nrows.value(), ncols.value()),
919 b.shape(),
920 "Matrix simultaneous traversal error: dimension mismatch."
921 );
922 assert_eq!(
923 (nrows.value(), ncols.value()),
924 c.shape(),
925 "Matrix simultaneous traversal error: dimension mismatch."
926 );
927
928 for j in 0..ncols.value() {
929 for i in 0..nrows.value() {
930 unsafe {
932 let a = self.data.get_unchecked(i, j).clone();
933 let b = b.data.get_unchecked(i, j).clone();
934 let c = c.data.get_unchecked(i, j).clone();
935 *res.data.get_unchecked_mut(i, j) = MaybeUninit::new(f(a, b, c))
936 }
937 }
938 }
939
940 unsafe { res.assume_init() }
942 }
943
944 #[inline]
946 #[must_use]
947 pub fn fold<Acc>(&self, init: Acc, mut f: impl FnMut(Acc, T) -> Acc) -> Acc
948 where
949 T: Scalar,
950 {
951 let (nrows, ncols) = self.shape_generic();
952
953 let mut res = init;
954
955 for j in 0..ncols.value() {
956 for i in 0..nrows.value() {
957 unsafe {
959 let a = self.data.get_unchecked(i, j).clone();
960 res = f(res, a)
961 }
962 }
963 }
964
965 res
966 }
967
968 #[inline]
970 #[must_use]
971 pub fn zip_fold<T2, R2, C2, S2, Acc>(
972 &self,
973 rhs: &Matrix<T2, R2, C2, S2>,
974 init: Acc,
975 mut f: impl FnMut(Acc, T, T2) -> Acc,
976 ) -> Acc
977 where
978 T: Scalar,
979 T2: Scalar,
980 R2: Dim,
981 C2: Dim,
982 S2: RawStorage<T2, R2, C2>,
983 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
984 {
985 let (nrows, ncols) = self.shape_generic();
986
987 let mut res = init;
988
989 assert_eq!(
990 (nrows.value(), ncols.value()),
991 rhs.shape(),
992 "Matrix simultaneous traversal error: dimension mismatch."
993 );
994
995 for j in 0..ncols.value() {
996 for i in 0..nrows.value() {
997 unsafe {
998 let a = self.data.get_unchecked(i, j).clone();
999 let b = rhs.data.get_unchecked(i, j).clone();
1000 res = f(res, a, b)
1001 }
1002 }
1003 }
1004
1005 res
1006 }
1007
1008 #[inline]
1010 pub fn apply<F: FnMut(&mut T)>(&mut self, mut f: F)
1011 where
1012 S: RawStorageMut<T, R, C>,
1013 {
1014 let (nrows, ncols) = self.shape();
1015
1016 for j in 0..ncols {
1017 for i in 0..nrows {
1018 unsafe {
1019 let e = self.data.get_unchecked_mut(i, j);
1020 f(e)
1021 }
1022 }
1023 }
1024 }
1025
1026 #[inline]
1029 pub fn zip_apply<T2, R2, C2, S2>(
1030 &mut self,
1031 rhs: &Matrix<T2, R2, C2, S2>,
1032 mut f: impl FnMut(&mut T, T2),
1033 ) where
1034 S: RawStorageMut<T, R, C>,
1035 T2: Scalar,
1036 R2: Dim,
1037 C2: Dim,
1038 S2: RawStorage<T2, R2, C2>,
1039 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
1040 {
1041 let (nrows, ncols) = self.shape();
1042
1043 assert_eq!(
1044 (nrows, ncols),
1045 rhs.shape(),
1046 "Matrix simultaneous traversal error: dimension mismatch."
1047 );
1048
1049 for j in 0..ncols {
1050 for i in 0..nrows {
1051 unsafe {
1052 let e = self.data.get_unchecked_mut(i, j);
1053 let rhs = rhs.get_unchecked((i, j)).clone();
1054 f(e, rhs)
1055 }
1056 }
1057 }
1058 }
1059
1060 #[inline]
1063 pub fn zip_zip_apply<T2, R2, C2, S2, N3, R3, C3, S3>(
1064 &mut self,
1065 b: &Matrix<T2, R2, C2, S2>,
1066 c: &Matrix<N3, R3, C3, S3>,
1067 mut f: impl FnMut(&mut T, T2, N3),
1068 ) where
1069 S: RawStorageMut<T, R, C>,
1070 T2: Scalar,
1071 R2: Dim,
1072 C2: Dim,
1073 S2: RawStorage<T2, R2, C2>,
1074 N3: Scalar,
1075 R3: Dim,
1076 C3: Dim,
1077 S3: RawStorage<N3, R3, C3>,
1078 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
1079 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
1080 {
1081 let (nrows, ncols) = self.shape();
1082
1083 assert_eq!(
1084 (nrows, ncols),
1085 b.shape(),
1086 "Matrix simultaneous traversal error: dimension mismatch."
1087 );
1088 assert_eq!(
1089 (nrows, ncols),
1090 c.shape(),
1091 "Matrix simultaneous traversal error: dimension mismatch."
1092 );
1093
1094 for j in 0..ncols {
1095 for i in 0..nrows {
1096 unsafe {
1097 let e = self.data.get_unchecked_mut(i, j);
1098 let b = b.get_unchecked((i, j)).clone();
1099 let c = c.get_unchecked((i, j)).clone();
1100 f(e, b, c)
1101 }
1102 }
1103 }
1104 }
1105}
1106
1107impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
1109 #[inline]
1126 pub fn iter(&self) -> MatrixIter<'_, T, R, C, S> {
1127 MatrixIter::new(&self.data)
1128 }
1129
1130 #[inline]
1142 pub fn row_iter(&self) -> RowIter<'_, T, R, C, S> {
1143 RowIter::new(self)
1144 }
1145
1146 #[inline]
1157 pub fn column_iter(&self) -> ColumnIter<'_, T, R, C, S> {
1158 ColumnIter::new(self)
1159 }
1160
1161 #[inline]
1163 pub fn iter_mut(&mut self) -> MatrixIterMut<'_, T, R, C, S>
1164 where
1165 S: RawStorageMut<T, R, C>,
1166 {
1167 MatrixIterMut::new(&mut self.data)
1168 }
1169
1170 #[inline]
1186 pub fn row_iter_mut(&mut self) -> RowIterMut<'_, T, R, C, S>
1187 where
1188 S: RawStorageMut<T, R, C>,
1189 {
1190 RowIterMut::new(self)
1191 }
1192
1193 #[inline]
1209 pub fn column_iter_mut(&mut self) -> ColumnIterMut<'_, T, R, C, S>
1210 where
1211 S: RawStorageMut<T, R, C>,
1212 {
1213 ColumnIterMut::new(self)
1214 }
1215}
1216
1217impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
1218 #[inline]
1223 pub fn as_mut_ptr(&mut self) -> *mut T {
1224 self.data.ptr_mut()
1225 }
1226
1227 #[inline]
1229 pub unsafe fn swap_unchecked(&mut self, row_cols1: (usize, usize), row_cols2: (usize, usize)) {
1230 debug_assert!(row_cols1.0 < self.nrows() && row_cols1.1 < self.ncols());
1231 debug_assert!(row_cols2.0 < self.nrows() && row_cols2.1 < self.ncols());
1232 self.data.swap_unchecked(row_cols1, row_cols2)
1233 }
1234
1235 #[inline]
1237 pub fn swap(&mut self, row_cols1: (usize, usize), row_cols2: (usize, usize)) {
1238 let (nrows, ncols) = self.shape();
1239 assert!(
1240 row_cols1.0 < nrows && row_cols1.1 < ncols,
1241 "Matrix elements swap index out of bounds."
1242 );
1243 assert!(
1244 row_cols2.0 < nrows && row_cols2.1 < ncols,
1245 "Matrix elements swap index out of bounds."
1246 );
1247 unsafe { self.swap_unchecked(row_cols1, row_cols2) }
1248 }
1249
1250 #[inline]
1254 pub fn copy_from_slice(&mut self, slice: &[T])
1255 where
1256 T: Scalar,
1257 {
1258 let (nrows, ncols) = self.shape();
1259
1260 assert!(
1261 nrows * ncols == slice.len(),
1262 "The slice must contain the same number of elements as the matrix."
1263 );
1264
1265 for j in 0..ncols {
1266 for i in 0..nrows {
1267 unsafe {
1268 *self.get_unchecked_mut((i, j)) = slice.get_unchecked(i + j * nrows).clone();
1269 }
1270 }
1271 }
1272 }
1273
1274 #[inline]
1276 pub fn copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
1277 where
1278 T: Scalar,
1279 R2: Dim,
1280 C2: Dim,
1281 SB: RawStorage<T, R2, C2>,
1282 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
1283 {
1284 assert!(
1285 self.shape() == other.shape(),
1286 "Unable to copy from a matrix with a different shape."
1287 );
1288
1289 for j in 0..self.ncols() {
1290 for i in 0..self.nrows() {
1291 unsafe {
1292 *self.get_unchecked_mut((i, j)) = other.get_unchecked((i, j)).clone();
1293 }
1294 }
1295 }
1296 }
1297
1298 #[inline]
1300 pub fn tr_copy_from<R2, C2, SB>(&mut self, other: &Matrix<T, R2, C2, SB>)
1301 where
1302 T: Scalar,
1303 R2: Dim,
1304 C2: Dim,
1305 SB: RawStorage<T, R2, C2>,
1306 ShapeConstraint: DimEq<R, C2> + SameNumberOfColumns<C, R2>,
1307 {
1308 let (nrows, ncols) = self.shape();
1309 assert!(
1310 (ncols, nrows) == other.shape(),
1311 "Unable to copy from a matrix with incompatible shape."
1312 );
1313
1314 for j in 0..ncols {
1315 for i in 0..nrows {
1316 unsafe {
1317 *self.get_unchecked_mut((i, j)) = other.get_unchecked((j, i)).clone();
1318 }
1319 }
1320 }
1321 }
1322
1323 #[inline]
1326 pub fn apply_into<F: FnMut(&mut T)>(mut self, f: F) -> Self {
1327 self.apply(f);
1328 self
1329 }
1330}
1331
1332impl<T, D: Dim, S: RawStorage<T, D>> Vector<T, D, S> {
1333 #[inline]
1335 #[must_use]
1336 pub unsafe fn vget_unchecked(&self, i: usize) -> &T {
1337 debug_assert!(i < self.nrows(), "Vector index out of bounds.");
1338 let i = i * self.strides().0;
1339 self.data.get_unchecked_linear(i)
1340 }
1341}
1342
1343impl<T, D: Dim, S: RawStorageMut<T, D>> Vector<T, D, S> {
1344 #[inline]
1346 #[must_use]
1347 pub unsafe fn vget_unchecked_mut(&mut self, i: usize) -> &mut T {
1348 debug_assert!(i < self.nrows(), "Vector index out of bounds.");
1349 let i = i * self.strides().0;
1350 self.data.get_unchecked_linear_mut(i)
1351 }
1352}
1353
1354impl<T, R: Dim, C: Dim, S: RawStorage<T, R, C> + IsContiguous> Matrix<T, R, C, S> {
1355 #[inline]
1357 #[must_use]
1358 pub fn as_slice(&self) -> &[T] {
1359 unsafe { self.data.as_slice_unchecked() }
1361 }
1362}
1363
1364impl<T, R: Dim, C: Dim, S: RawStorageMut<T, R, C> + IsContiguous> Matrix<T, R, C, S> {
1365 #[inline]
1367 #[must_use]
1368 pub fn as_mut_slice(&mut self) -> &mut [T] {
1369 unsafe { self.data.as_mut_slice_unchecked() }
1371 }
1372}
1373
1374impl<T: Scalar, D: Dim, S: RawStorageMut<T, D, D>> Matrix<T, D, D, S> {
1375 pub fn transpose_mut(&mut self) {
1377 assert!(
1378 self.is_square(),
1379 "Unable to transpose a non-square matrix in-place."
1380 );
1381
1382 let dim = self.shape().0;
1383
1384 for i in 1..dim {
1385 for j in 0..i {
1386 unsafe { self.swap_unchecked((i, j), (j, i)) }
1387 }
1388 }
1389 }
1390}
1391
1392impl<T: SimdComplexField, R: Dim, C: Dim, S: RawStorage<T, R, C>> Matrix<T, R, C, S> {
1393 #[inline]
1395 fn adjoint_to_uninit<Status, R2, C2, SB>(
1396 &self,
1397 _status: Status,
1398 out: &mut Matrix<Status::Value, R2, C2, SB>,
1399 ) where
1400 Status: InitStatus<T>,
1401 R2: Dim,
1402 C2: Dim,
1403 SB: RawStorageMut<Status::Value, R2, C2>,
1404 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
1405 {
1406 let (nrows, ncols) = self.shape();
1407 assert!(
1408 (ncols, nrows) == out.shape(),
1409 "Incompatible shape for transpose-copy."
1410 );
1411
1412 for i in 0..nrows {
1414 for j in 0..ncols {
1415 unsafe {
1417 Status::init(
1418 out.get_unchecked_mut((j, i)),
1419 self.get_unchecked((i, j)).clone().simd_conjugate(),
1420 );
1421 }
1422 }
1423 }
1424 }
1425
1426 #[inline]
1428 pub fn adjoint_to<R2, C2, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
1429 where
1430 R2: Dim,
1431 C2: Dim,
1432 SB: RawStorageMut<T, R2, C2>,
1433 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
1434 {
1435 self.adjoint_to_uninit(Init, out)
1436 }
1437
1438 #[inline]
1440 #[must_use = "Did you mean to use adjoint_mut()?"]
1441 pub fn adjoint(&self) -> OMatrix<T, C, R>
1442 where
1443 DefaultAllocator: Allocator<T, C, R>,
1444 {
1445 let (nrows, ncols) = self.shape_generic();
1446
1447 let mut res = Matrix::uninit(ncols, nrows);
1448 self.adjoint_to_uninit(Uninit, &mut res);
1449
1450 unsafe { res.assume_init() }
1452 }
1453
1454 #[deprecated(note = "Renamed `self.adjoint_to(out)`.")]
1456 #[inline]
1457 pub fn conjugate_transpose_to<R2, C2, SB>(&self, out: &mut Matrix<T, R2, C2, SB>)
1458 where
1459 R2: Dim,
1460 C2: Dim,
1461 SB: RawStorageMut<T, R2, C2>,
1462 ShapeConstraint: SameNumberOfRows<R, C2> + SameNumberOfColumns<C, R2>,
1463 {
1464 self.adjoint_to(out)
1465 }
1466
1467 #[deprecated(note = "Renamed `self.adjoint()`.")]
1469 #[inline]
1470 pub fn conjugate_transpose(&self) -> OMatrix<T, C, R>
1471 where
1472 DefaultAllocator: Allocator<T, C, R>,
1473 {
1474 self.adjoint()
1475 }
1476
1477 #[inline]
1479 #[must_use = "Did you mean to use conjugate_mut()?"]
1480 pub fn conjugate(&self) -> OMatrix<T, R, C>
1481 where
1482 DefaultAllocator: Allocator<T, R, C>,
1483 {
1484 self.map(|e| e.simd_conjugate())
1485 }
1486
1487 #[inline]
1489 #[must_use = "Did you mean to use unscale_mut()?"]
1490 pub fn unscale(&self, real: T::SimdRealField) -> OMatrix<T, R, C>
1491 where
1492 DefaultAllocator: Allocator<T, R, C>,
1493 {
1494 self.map(|e| e.simd_unscale(real.clone()))
1495 }
1496
1497 #[inline]
1499 #[must_use = "Did you mean to use scale_mut()?"]
1500 pub fn scale(&self, real: T::SimdRealField) -> OMatrix<T, R, C>
1501 where
1502 DefaultAllocator: Allocator<T, R, C>,
1503 {
1504 self.map(|e| e.simd_scale(real.clone()))
1505 }
1506}
1507
1508impl<T: SimdComplexField, R: Dim, C: Dim, S: RawStorageMut<T, R, C>> Matrix<T, R, C, S> {
1509 #[inline]
1511 pub fn conjugate_mut(&mut self) {
1512 self.apply(|e| *e = e.clone().simd_conjugate())
1513 }
1514
1515 #[inline]
1517 pub fn unscale_mut(&mut self, real: T::SimdRealField) {
1518 self.apply(|e| *e = e.clone().simd_unscale(real.clone()))
1519 }
1520
1521 #[inline]
1523 pub fn scale_mut(&mut self, real: T::SimdRealField) {
1524 self.apply(|e| *e = e.clone().simd_scale(real.clone()))
1525 }
1526}
1527
1528impl<T: SimdComplexField, D: Dim, S: RawStorageMut<T, D, D>> Matrix<T, D, D, S> {
1529 #[deprecated(note = "Renamed to `self.adjoint_mut()`.")]
1531 pub fn conjugate_transform_mut(&mut self) {
1532 self.adjoint_mut()
1533 }
1534
1535 pub fn adjoint_mut(&mut self) {
1537 assert!(
1538 self.is_square(),
1539 "Unable to transpose a non-square matrix in-place."
1540 );
1541
1542 let dim = self.shape().0;
1543
1544 for i in 0..dim {
1545 for j in 0..i {
1546 unsafe {
1547 let ref_ij = self.get_unchecked((i, j)).clone();
1548 let ref_ji = self.get_unchecked((j, i)).clone();
1549 let conj_ij = ref_ij.simd_conjugate();
1550 let conj_ji = ref_ji.simd_conjugate();
1551 *self.get_unchecked_mut((i, j)) = conj_ji;
1552 *self.get_unchecked_mut((j, i)) = conj_ij;
1553 }
1554 }
1555
1556 {
1557 let diag = unsafe { self.get_unchecked_mut((i, i)) };
1558 *diag = diag.clone().simd_conjugate();
1559 }
1560 }
1561 }
1562}
1563
1564impl<T: Scalar, D: Dim, S: RawStorage<T, D, D>> SquareMatrix<T, D, S> {
1565 #[inline]
1567 #[must_use]
1568 pub fn diagonal(&self) -> OVector<T, D>
1569 where
1570 DefaultAllocator: Allocator<T, D>,
1571 {
1572 self.map_diagonal(|e| e)
1573 }
1574
1575 #[must_use]
1580 pub fn map_diagonal<T2: Scalar>(&self, mut f: impl FnMut(T) -> T2) -> OVector<T2, D>
1581 where
1582 DefaultAllocator: Allocator<T2, D>,
1583 {
1584 assert!(
1585 self.is_square(),
1586 "Unable to get the diagonal of a non-square matrix."
1587 );
1588
1589 let dim = self.shape_generic().0;
1590 let mut res = Matrix::uninit(dim, Const::<1>);
1591
1592 for i in 0..dim.value() {
1593 unsafe {
1595 *res.vget_unchecked_mut(i) =
1596 MaybeUninit::new(f(self.get_unchecked((i, i)).clone()));
1597 }
1598 }
1599
1600 unsafe { res.assume_init() }
1602 }
1603
1604 #[inline]
1606 #[must_use]
1607 pub fn trace(&self) -> T
1608 where
1609 T: Scalar + Zero + ClosedAdd,
1610 {
1611 assert!(
1612 self.is_square(),
1613 "Cannot compute the trace of non-square matrix."
1614 );
1615
1616 let dim = self.shape_generic().0;
1617 let mut res = T::zero();
1618
1619 for i in 0..dim.value() {
1620 res += unsafe { self.get_unchecked((i, i)).clone() };
1621 }
1622
1623 res
1624 }
1625}
1626
1627impl<T: SimdComplexField, D: Dim, S: Storage<T, D, D>> SquareMatrix<T, D, S> {
1628 #[inline]
1630 #[must_use]
1631 pub fn symmetric_part(&self) -> OMatrix<T, D, D>
1632 where
1633 DefaultAllocator: Allocator<T, D, D>,
1634 {
1635 assert!(
1636 self.is_square(),
1637 "Cannot compute the symmetric part of a non-square matrix."
1638 );
1639 let mut tr = self.transpose();
1640 tr += self;
1641 tr *= crate::convert::<_, T>(0.5);
1642 tr
1643 }
1644
1645 #[inline]
1647 #[must_use]
1648 pub fn hermitian_part(&self) -> OMatrix<T, D, D>
1649 where
1650 DefaultAllocator: Allocator<T, D, D>,
1651 {
1652 assert!(
1653 self.is_square(),
1654 "Cannot compute the hermitian part of a non-square matrix."
1655 );
1656
1657 let mut tr = self.adjoint();
1658 tr += self;
1659 tr *= crate::convert::<_, T>(0.5);
1660 tr
1661 }
1662}
1663
1664impl<T: Scalar + Zero + One, D: DimAdd<U1> + IsNotStaticOne, S: RawStorage<T, D, D>>
1665 Matrix<T, D, D, S>
1666{
1667 #[inline]
1670 #[must_use]
1671 pub fn to_homogeneous(&self) -> OMatrix<T, DimSum<D, U1>, DimSum<D, U1>>
1672 where
1673 DefaultAllocator: Allocator<T, DimSum<D, U1>, DimSum<D, U1>>,
1674 {
1675 assert!(
1676 self.is_square(),
1677 "Only square matrices can currently be transformed to homogeneous coordinates."
1678 );
1679 let dim = DimSum::<D, U1>::from_usize(self.nrows() + 1);
1680 let mut res = OMatrix::identity_generic(dim, dim);
1681 res.generic_slice_mut::<D, D>((0, 0), self.shape_generic())
1682 .copy_from(self);
1683 res
1684 }
1685}
1686
1687impl<T: Scalar + Zero, D: DimAdd<U1>, S: RawStorage<T, D>> Vector<T, D, S> {
1688 #[inline]
1691 #[must_use]
1692 pub fn to_homogeneous(&self) -> OVector<T, DimSum<D, U1>>
1693 where
1694 DefaultAllocator: Allocator<T, DimSum<D, U1>>,
1695 {
1696 self.push(T::zero())
1697 }
1698
1699 #[inline]
1702 pub fn from_homogeneous<SB>(v: Vector<T, DimSum<D, U1>, SB>) -> Option<OVector<T, D>>
1703 where
1704 SB: RawStorage<T, DimSum<D, U1>>,
1705 DefaultAllocator: Allocator<T, D>,
1706 {
1707 if v[v.len() - 1].is_zero() {
1708 let nrows = D::from_usize(v.len() - 1);
1709 Some(v.generic_slice((0, 0), (nrows, Const::<1>)).into_owned())
1710 } else {
1711 None
1712 }
1713 }
1714}
1715
1716impl<T: Scalar, D: DimAdd<U1>, S: RawStorage<T, D>> Vector<T, D, S> {
1717 #[inline]
1719 #[must_use]
1720 pub fn push(&self, element: T) -> OVector<T, DimSum<D, U1>>
1721 where
1722 DefaultAllocator: Allocator<T, DimSum<D, U1>>,
1723 {
1724 let len = self.len();
1725 let hnrows = DimSum::<D, U1>::from_usize(len + 1);
1726 let mut res = Matrix::uninit(hnrows, Const::<1>);
1727 res.generic_slice_mut((0, 0), self.shape_generic())
1730 .zip_apply(self, |out, e| *out = MaybeUninit::new(e));
1731 res[(len, 0)] = MaybeUninit::new(element);
1732
1733 unsafe { res.assume_init() }
1735 }
1736}
1737
1738impl<T, R: Dim, C: Dim, S> AbsDiffEq for Matrix<T, R, C, S>
1739where
1740 T: Scalar + AbsDiffEq,
1741 S: RawStorage<T, R, C>,
1742 T::Epsilon: Clone,
1743{
1744 type Epsilon = T::Epsilon;
1745
1746 #[inline]
1747 fn default_epsilon() -> Self::Epsilon {
1748 T::default_epsilon()
1749 }
1750
1751 #[inline]
1752 fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
1753 self.iter()
1754 .zip(other.iter())
1755 .all(|(a, b)| a.abs_diff_eq(b, epsilon.clone()))
1756 }
1757}
1758
1759impl<T, R: Dim, C: Dim, S> RelativeEq for Matrix<T, R, C, S>
1760where
1761 T: Scalar + RelativeEq,
1762 S: Storage<T, R, C>,
1763 T::Epsilon: Clone,
1764{
1765 #[inline]
1766 fn default_max_relative() -> Self::Epsilon {
1767 T::default_max_relative()
1768 }
1769
1770 #[inline]
1771 fn relative_eq(
1772 &self,
1773 other: &Self,
1774 epsilon: Self::Epsilon,
1775 max_relative: Self::Epsilon,
1776 ) -> bool {
1777 self.relative_eq(other, epsilon, max_relative)
1778 }
1779}
1780
1781impl<T, R: Dim, C: Dim, S> UlpsEq for Matrix<T, R, C, S>
1782where
1783 T: Scalar + UlpsEq,
1784 S: RawStorage<T, R, C>,
1785 T::Epsilon: Clone,
1786{
1787 #[inline]
1788 fn default_max_ulps() -> u32 {
1789 T::default_max_ulps()
1790 }
1791
1792 #[inline]
1793 fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
1794 assert!(self.shape() == other.shape());
1795 self.iter()
1796 .zip(other.iter())
1797 .all(|(a, b)| a.ulps_eq(b, epsilon.clone(), max_ulps))
1798 }
1799}
1800
1801impl<T, R: Dim, C: Dim, S> PartialOrd for Matrix<T, R, C, S>
1802where
1803 T: Scalar + PartialOrd,
1804 S: RawStorage<T, R, C>,
1805{
1806 #[inline]
1807 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1808 if self.shape() != other.shape() {
1809 return None;
1810 }
1811
1812 if self.nrows() == 0 || self.ncols() == 0 {
1813 return Some(Ordering::Equal);
1814 }
1815
1816 let mut first_ord = unsafe {
1817 self.data
1818 .get_unchecked_linear(0)
1819 .partial_cmp(other.data.get_unchecked_linear(0))
1820 };
1821
1822 if let Some(first_ord) = first_ord.as_mut() {
1823 let mut it = self.iter().zip(other.iter());
1824 let _ = it.next(); for (left, right) in it {
1827 if let Some(ord) = left.partial_cmp(right) {
1828 match ord {
1829 Ordering::Equal => { }
1830 Ordering::Less => {
1831 if *first_ord == Ordering::Greater {
1832 return None;
1833 }
1834 *first_ord = ord
1835 }
1836 Ordering::Greater => {
1837 if *first_ord == Ordering::Less {
1838 return None;
1839 }
1840 *first_ord = ord
1841 }
1842 }
1843 } else {
1844 return None;
1845 }
1846 }
1847 }
1848
1849 first_ord
1850 }
1851
1852 #[inline]
1853 fn lt(&self, right: &Self) -> bool {
1854 assert_eq!(
1855 self.shape(),
1856 right.shape(),
1857 "Matrix comparison error: dimensions mismatch."
1858 );
1859 self.iter().zip(right.iter()).all(|(a, b)| a.lt(b))
1860 }
1861
1862 #[inline]
1863 fn le(&self, right: &Self) -> bool {
1864 assert_eq!(
1865 self.shape(),
1866 right.shape(),
1867 "Matrix comparison error: dimensions mismatch."
1868 );
1869 self.iter().zip(right.iter()).all(|(a, b)| a.le(b))
1870 }
1871
1872 #[inline]
1873 fn gt(&self, right: &Self) -> bool {
1874 assert_eq!(
1875 self.shape(),
1876 right.shape(),
1877 "Matrix comparison error: dimensions mismatch."
1878 );
1879 self.iter().zip(right.iter()).all(|(a, b)| a.gt(b))
1880 }
1881
1882 #[inline]
1883 fn ge(&self, right: &Self) -> bool {
1884 assert_eq!(
1885 self.shape(),
1886 right.shape(),
1887 "Matrix comparison error: dimensions mismatch."
1888 );
1889 self.iter().zip(right.iter()).all(|(a, b)| a.ge(b))
1890 }
1891}
1892
1893impl<T, R: Dim, C: Dim, S> Eq for Matrix<T, R, C, S>
1894where
1895 T: Scalar + Eq,
1896 S: RawStorage<T, R, C>,
1897{
1898}
1899
1900impl<T, R, R2, C, C2, S, S2> PartialEq<Matrix<T, R2, C2, S2>> for Matrix<T, R, C, S>
1901where
1902 T: Scalar + PartialEq,
1903 C: Dim,
1904 C2: Dim,
1905 R: Dim,
1906 R2: Dim,
1907 S: RawStorage<T, R, C>,
1908 S2: RawStorage<T, R2, C2>,
1909{
1910 #[inline]
1911 fn eq(&self, right: &Matrix<T, R2, C2, S2>) -> bool {
1912 self.shape() == right.shape() && self.iter().zip(right.iter()).all(|(l, r)| l == r)
1913 }
1914}
1915
1916macro_rules! impl_fmt {
1917 ($trait: path, $fmt_str_without_precision: expr, $fmt_str_with_precision: expr) => {
1918 impl<T, R: Dim, C: Dim, S> $trait for Matrix<T, R, C, S>
1919 where
1920 T: Scalar + $trait,
1921 S: RawStorage<T, R, C>,
1922 {
1923 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1924 #[cfg(feature = "std")]
1925 fn val_width<T: Scalar + $trait>(val: &T, f: &mut fmt::Formatter<'_>) -> usize {
1926 match f.precision() {
1927 Some(precision) => format!($fmt_str_with_precision, val, precision)
1928 .chars()
1929 .count(),
1930 None => format!($fmt_str_without_precision, val).chars().count(),
1931 }
1932 }
1933
1934 #[cfg(not(feature = "std"))]
1935 fn val_width<T: Scalar + $trait>(_: &T, _: &mut fmt::Formatter<'_>) -> usize {
1936 4
1937 }
1938
1939 let (nrows, ncols) = self.shape();
1940
1941 if nrows == 0 || ncols == 0 {
1942 return write!(f, "[ ]");
1943 }
1944
1945 let mut max_length = 0;
1946
1947 for i in 0..nrows {
1948 for j in 0..ncols {
1949 max_length = crate::max(max_length, val_width(&self[(i, j)], f));
1950 }
1951 }
1952
1953 let max_length_with_space = max_length + 1;
1954
1955 writeln!(f)?;
1956 writeln!(
1957 f,
1958 " ┌ {:>width$} ┐",
1959 "",
1960 width = max_length_with_space * ncols - 1
1961 )?;
1962
1963 for i in 0..nrows {
1964 write!(f, " │")?;
1965 for j in 0..ncols {
1966 let number_length = val_width(&self[(i, j)], f) + 1;
1967 let pad = max_length_with_space - number_length;
1968 write!(f, " {:>thepad$}", "", thepad = pad)?;
1969 match f.precision() {
1970 Some(precision) => {
1971 write!(f, $fmt_str_with_precision, (*self)[(i, j)], precision)?
1972 }
1973 None => write!(f, $fmt_str_without_precision, (*self)[(i, j)])?,
1974 }
1975 }
1976 writeln!(f, " │")?;
1977 }
1978
1979 writeln!(
1980 f,
1981 " └ {:>width$} ┘",
1982 "",
1983 width = max_length_with_space * ncols - 1
1984 )?;
1985 writeln!(f)
1986 }
1987 }
1988 };
1989}
1990impl_fmt!(fmt::Display, "{}", "{:.1$}");
1991impl_fmt!(fmt::LowerExp, "{:e}", "{:.1$e}");
1992impl_fmt!(fmt::UpperExp, "{:E}", "{:.1$E}");
1993impl_fmt!(fmt::Octal, "{:o}", "{:1$o}");
1994impl_fmt!(fmt::LowerHex, "{:x}", "{:1$x}");
1995impl_fmt!(fmt::UpperHex, "{:X}", "{:1$X}");
1996impl_fmt!(fmt::Binary, "{:b}", "{:.1$b}");
1997impl_fmt!(fmt::Pointer, "{:p}", "{:.1$p}");
1998
1999#[cfg(test)]
2000mod tests {
2001 #[test]
2002 fn empty_display() {
2003 let vec: Vec<f64> = Vec::new();
2004 let dvector = crate::DVector::from_vec(vec);
2005 assert_eq!(format!("{}", dvector), "[ ]")
2006 }
2007
2008 #[test]
2009 fn lower_exp() {
2010 let test = crate::Matrix2::new(1e6, 2e5, 2e-5, 1.);
2011 assert_eq!(
2012 format!("{:e}", test),
2013 r"
2014 ┌ ┐
2015 │ 1e6 2e5 │
2016 │ 2e-5 1e0 │
2017 └ ┘
2018
2019"
2020 )
2021 }
2022}
2023
2024impl<T: Scalar + ClosedAdd + ClosedSub + ClosedMul, R: Dim, C: Dim, S: RawStorage<T, R, C>>
2026 Matrix<T, R, C, S>
2027{
2028 #[inline]
2030 #[must_use]
2031 pub fn perp<R2, C2, SB>(&self, b: &Matrix<T, R2, C2, SB>) -> T
2032 where
2033 R2: Dim,
2034 C2: Dim,
2035 SB: RawStorage<T, R2, C2>,
2036 ShapeConstraint: SameNumberOfRows<R, U2>
2037 + SameNumberOfColumns<C, U1>
2038 + SameNumberOfRows<R2, U2>
2039 + SameNumberOfColumns<C2, U1>,
2040 {
2041 let shape = self.shape();
2042 assert_eq!(
2043 shape,
2044 b.shape(),
2045 "2D vector perpendicular product dimension mismatch."
2046 );
2047 assert_eq!(
2048 shape,
2049 (2, 1),
2050 "2D perpendicular product requires (2, 1) vectors {:?}",
2051 shape
2052 );
2053
2054 let ax = unsafe { self.get_unchecked((0, 0)).clone() };
2056 let ay = unsafe { self.get_unchecked((1, 0)).clone() };
2057 let bx = unsafe { b.get_unchecked((0, 0)).clone() };
2058 let by = unsafe { b.get_unchecked((1, 0)).clone() };
2059
2060 ax * by - ay * bx
2061 }
2062
2063 #[inline]
2069 #[must_use]
2070 pub fn cross<R2, C2, SB>(&self, b: &Matrix<T, R2, C2, SB>) -> MatrixCross<T, R, C, R2, C2>
2071 where
2072 R2: Dim,
2073 C2: Dim,
2074 SB: RawStorage<T, R2, C2>,
2075 DefaultAllocator: SameShapeAllocator<T, R, C, R2, C2>,
2076 ShapeConstraint: SameNumberOfRows<R, R2> + SameNumberOfColumns<C, C2>,
2077 {
2078 let shape = self.shape();
2079 assert_eq!(shape, b.shape(), "Vector cross product dimension mismatch.");
2080 assert!(
2081 shape == (3, 1) || shape == (1, 3),
2082 "Vector cross product dimension mismatch: must be (3, 1) or (1, 3) but found {:?}.",
2083 shape
2084 );
2085
2086 if shape.0 == 3 {
2087 unsafe {
2088 let mut res = Matrix::uninit(Dim::from_usize(3), Dim::from_usize(1));
2089
2090 let ax = self.get_unchecked((0, 0));
2091 let ay = self.get_unchecked((1, 0));
2092 let az = self.get_unchecked((2, 0));
2093
2094 let bx = b.get_unchecked((0, 0));
2095 let by = b.get_unchecked((1, 0));
2096 let bz = b.get_unchecked((2, 0));
2097
2098 *res.get_unchecked_mut((0, 0)) =
2099 MaybeUninit::new(ay.clone() * bz.clone() - az.clone() * by.clone());
2100 *res.get_unchecked_mut((1, 0)) =
2101 MaybeUninit::new(az.clone() * bx.clone() - ax.clone() * bz.clone());
2102 *res.get_unchecked_mut((2, 0)) =
2103 MaybeUninit::new(ax.clone() * by.clone() - ay.clone() * bx.clone());
2104
2105 res.assume_init()
2107 }
2108 } else {
2109 unsafe {
2110 let mut res = Matrix::uninit(Dim::from_usize(1), Dim::from_usize(3));
2111
2112 let ax = self.get_unchecked((0, 0));
2113 let ay = self.get_unchecked((0, 1));
2114 let az = self.get_unchecked((0, 2));
2115
2116 let bx = b.get_unchecked((0, 0));
2117 let by = b.get_unchecked((0, 1));
2118 let bz = b.get_unchecked((0, 2));
2119
2120 *res.get_unchecked_mut((0, 0)) =
2121 MaybeUninit::new(ay.clone() * bz.clone() - az.clone() * by.clone());
2122 *res.get_unchecked_mut((0, 1)) =
2123 MaybeUninit::new(az.clone() * bx.clone() - ax.clone() * bz.clone());
2124 *res.get_unchecked_mut((0, 2)) =
2125 MaybeUninit::new(ax.clone() * by.clone() - ay.clone() * bx.clone());
2126
2127 res.assume_init()
2129 }
2130 }
2131 }
2132}
2133
2134impl<T: Scalar + Field, S: RawStorage<T, U3>> Vector<T, U3, S> {
2135 #[inline]
2137 #[must_use]
2138 pub fn cross_matrix(&self) -> OMatrix<T, U3, U3> {
2139 OMatrix::<T, U3, U3>::new(
2140 T::zero(),
2141 -self[2].clone(),
2142 self[1].clone(),
2143 self[2].clone(),
2144 T::zero(),
2145 -self[0].clone(),
2146 -self[1].clone(),
2147 self[0].clone(),
2148 T::zero(),
2149 )
2150 }
2151}
2152
2153impl<T: SimdComplexField, R: Dim, C: Dim, S: Storage<T, R, C>> Matrix<T, R, C, S> {
2154 #[inline]
2156 #[must_use]
2157 pub fn angle<R2: Dim, C2: Dim, SB>(&self, other: &Matrix<T, R2, C2, SB>) -> T::SimdRealField
2158 where
2159 SB: Storage<T, R2, C2>,
2160 ShapeConstraint: DimEq<R, R2> + DimEq<C, C2>,
2161 {
2162 let prod = self.dotc(other);
2163 let n1 = self.norm();
2164 let n2 = other.norm();
2165
2166 if n1.is_zero() || n2.is_zero() {
2167 T::SimdRealField::zero()
2168 } else {
2169 let cang = prod.simd_real() / (n1 * n2);
2170 cang.simd_clamp(-T::SimdRealField::one(), T::SimdRealField::one())
2171 .simd_acos()
2172 }
2173 }
2174}
2175
2176impl<T, R: Dim, C: Dim, S> AbsDiffEq for Unit<Matrix<T, R, C, S>>
2177where
2178 T: Scalar + AbsDiffEq,
2179 S: RawStorage<T, R, C>,
2180 T::Epsilon: Clone,
2181{
2182 type Epsilon = T::Epsilon;
2183
2184 #[inline]
2185 fn default_epsilon() -> Self::Epsilon {
2186 T::default_epsilon()
2187 }
2188
2189 #[inline]
2190 fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
2191 self.as_ref().abs_diff_eq(other.as_ref(), epsilon)
2192 }
2193}
2194
2195impl<T, R: Dim, C: Dim, S> RelativeEq for Unit<Matrix<T, R, C, S>>
2196where
2197 T: Scalar + RelativeEq,
2198 S: Storage<T, R, C>,
2199 T::Epsilon: Clone,
2200{
2201 #[inline]
2202 fn default_max_relative() -> Self::Epsilon {
2203 T::default_max_relative()
2204 }
2205
2206 #[inline]
2207 fn relative_eq(
2208 &self,
2209 other: &Self,
2210 epsilon: Self::Epsilon,
2211 max_relative: Self::Epsilon,
2212 ) -> bool {
2213 self.as_ref()
2214 .relative_eq(other.as_ref(), epsilon, max_relative)
2215 }
2216}
2217
2218impl<T, R: Dim, C: Dim, S> UlpsEq for Unit<Matrix<T, R, C, S>>
2219where
2220 T: Scalar + UlpsEq,
2221 S: RawStorage<T, R, C>,
2222 T::Epsilon: Clone,
2223{
2224 #[inline]
2225 fn default_max_ulps() -> u32 {
2226 T::default_max_ulps()
2227 }
2228
2229 #[inline]
2230 fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
2231 self.as_ref().ulps_eq(other.as_ref(), epsilon, max_ulps)
2232 }
2233}
2234
2235impl<T, R, C, S> Hash for Matrix<T, R, C, S>
2236where
2237 T: Scalar + Hash,
2238 R: Dim,
2239 C: Dim,
2240 S: RawStorage<T, R, C>,
2241{
2242 fn hash<H: Hasher>(&self, state: &mut H) {
2243 let (nrows, ncols) = self.shape();
2244 (nrows, ncols).hash(state);
2245
2246 for j in 0..ncols {
2247 for i in 0..nrows {
2248 unsafe {
2249 self.get_unchecked((i, j)).hash(state);
2250 }
2251 }
2252 }
2253 }
2254}