1use approx::{AbsDiffEq, RelativeEq, UlpsEq};
2use num::{One, Zero};
3use std::fmt;
4use std::hash;
5#[cfg(feature = "abomonation-serialize")]
6use std::io::{Result as IOResult, Write};
7
8#[cfg(feature = "serde-serialize-no-std")]
9use serde::{Deserialize, Deserializer, Serialize, Serializer};
10
11#[cfg(feature = "abomonation-serialize")]
12use abomonation::Abomonation;
13
14use crate::base::allocator::Allocator;
15use crate::base::dimension::{DimNameAdd, DimNameSum, U1};
16use crate::base::storage::Owned;
17use crate::base::{Const, DefaultAllocator, OMatrix, OVector, SVector, Scalar};
18use crate::ClosedDiv;
19use crate::ClosedMul;
20
21use crate::geometry::Point;
22
23#[repr(C)]
25#[cfg_attr(
26 all(not(target_os = "cuda"), feature = "cuda"),
27 derive(cust::DeviceCopy)
28)]
29#[derive(Copy, Clone)]
30pub struct Scale<T, const D: usize> {
31 pub vector: SVector<T, D>,
34}
35
36impl<T: fmt::Debug, const D: usize> fmt::Debug for Scale<T, D> {
37 fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
38 self.vector.as_slice().fmt(formatter)
39 }
40}
41
42impl<T: Scalar + hash::Hash, const D: usize> hash::Hash for Scale<T, D>
43where
44 Owned<T, Const<D>>: hash::Hash,
45{
46 fn hash<H: hash::Hasher>(&self, state: &mut H) {
47 self.vector.hash(state)
48 }
49}
50
51#[cfg(feature = "bytemuck")]
52unsafe impl<T, const D: usize> bytemuck::Zeroable for Scale<T, D>
53where
54 T: Scalar + bytemuck::Zeroable,
55 SVector<T, D>: bytemuck::Zeroable,
56{
57}
58
59#[cfg(feature = "bytemuck")]
60unsafe impl<T, const D: usize> bytemuck::Pod for Scale<T, D>
61where
62 T: Scalar + bytemuck::Pod,
63 SVector<T, D>: bytemuck::Pod,
64{
65}
66
67#[cfg(feature = "abomonation-serialize")]
68impl<T, const D: usize> Abomonation for Scale<T, D>
69where
70 T: Scalar,
71 SVector<T, D>: Abomonation,
72{
73 unsafe fn entomb<W: Write>(&self, writer: &mut W) -> IOResult<()> {
74 self.vector.entomb(writer)
75 }
76
77 fn extent(&self) -> usize {
78 self.vector.extent()
79 }
80
81 unsafe fn exhume<'a, 'b>(&'a mut self, bytes: &'b mut [u8]) -> Option<&'b mut [u8]> {
82 self.vector.exhume(bytes)
83 }
84}
85
86#[cfg(feature = "serde-serialize-no-std")]
87impl<T: Scalar, const D: usize> Serialize for Scale<T, D>
88where
89 Owned<T, Const<D>>: Serialize,
90{
91 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
92 where
93 S: Serializer,
94 {
95 self.vector.serialize(serializer)
96 }
97}
98
99#[cfg(feature = "serde-serialize-no-std")]
100impl<'a, T: Scalar, const D: usize> Deserialize<'a> for Scale<T, D>
101where
102 Owned<T, Const<D>>: Deserialize<'a>,
103{
104 fn deserialize<Des>(deserializer: Des) -> Result<Self, Des::Error>
105 where
106 Des: Deserializer<'a>,
107 {
108 let matrix = SVector::<T, D>::deserialize(deserializer)?;
109
110 Ok(Scale::from(matrix))
111 }
112}
113
114#[cfg(feature = "rkyv-serialize-no-std")]
115mod rkyv_impl {
116 use super::Scale;
117 use crate::base::SVector;
118 use rkyv::{offset_of, project_struct, Archive, Deserialize, Fallible, Serialize};
119
120 impl<T: Archive, const D: usize> Archive for Scale<T, D> {
121 type Archived = Scale<T::Archived, D>;
122 type Resolver = <SVector<T, D> as Archive>::Resolver;
123
124 fn resolve(
125 &self,
126 pos: usize,
127 resolver: Self::Resolver,
128 out: &mut core::mem::MaybeUninit<Self::Archived>,
129 ) {
130 self.vector.resolve(
131 pos + offset_of!(Self::Archived, vector),
132 resolver,
133 project_struct!(out: Self::Archived => vector),
134 );
135 }
136 }
137
138 impl<T: Serialize<S>, S: Fallible + ?Sized, const D: usize> Serialize<S> for Scale<T, D> {
139 fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
140 self.vector.serialize(serializer)
141 }
142 }
143
144 impl<T: Archive, _D: Fallible + ?Sized, const D: usize> Deserialize<Scale<T, D>, _D>
145 for Scale<T::Archived, D>
146 where
147 T::Archived: Deserialize<T, _D>,
148 {
149 fn deserialize(&self, deserializer: &mut _D) -> Result<Scale<T, D>, _D::Error> {
150 Ok(Scale {
151 vector: self.vector.deserialize(deserializer)?,
152 })
153 }
154 }
155}
156
157impl<T: Scalar, const D: usize> Scale<T, D> {
158 #[inline]
177 #[must_use = "Did you mean to use try_inverse_mut()?"]
178 pub fn try_inverse(&self) -> Option<Scale<T, D>>
179 where
180 T: ClosedDiv + One + Zero,
181 {
182 for i in 0..D {
183 if self.vector[i] == T::zero() {
184 return None;
185 }
186 }
187 return Some(self.vector.map(|e| T::one() / e).into());
188 }
189
190 #[inline]
208 #[must_use]
209 pub unsafe fn inverse_unchecked(&self) -> Scale<T, D>
210 where
211 T: ClosedDiv + One,
212 {
213 return self.vector.map(|e| T::one() / e).into();
214 }
215
216 #[inline]
236 #[must_use]
237 pub fn pseudo_inverse(&self) -> Scale<T, D>
238 where
239 T: ClosedDiv + One + Zero,
240 {
241 return self
242 .vector
243 .map(|e| {
244 if e != T::zero() {
245 T::one() / e
246 } else {
247 T::zero()
248 }
249 })
250 .into();
251 }
252
253 #[inline]
272 #[must_use]
273 pub fn to_homogeneous(&self) -> OMatrix<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>
274 where
275 T: Zero + One + Clone,
276 Const<D>: DimNameAdd<U1>,
277 DefaultAllocator: Allocator<T, DimNameSum<Const<D>, U1>, DimNameSum<Const<D>, U1>>
278 + Allocator<T, DimNameSum<Const<D>, U1>, U1>,
279 {
280 let mut v = OVector::from_element(T::one());
285 for i in 0..D {
286 v[i] = self.vector[i].clone();
287 }
288 return OMatrix::from_diagonal(&v);
289 }
290
291 #[inline]
314 pub fn try_inverse_mut(&mut self) -> bool
315 where
316 T: ClosedDiv + One + Zero,
317 {
318 if let Some(v) = self.try_inverse() {
319 self.vector = v.vector;
320 true
321 } else {
322 false
323 }
324 }
325}
326
327impl<T: Scalar + ClosedMul, const D: usize> Scale<T, D> {
328 #[inline]
340 #[must_use]
341 pub fn transform_point(&self, pt: &Point<T, D>) -> Point<T, D> {
342 self * pt
343 }
344}
345
346impl<T: Scalar + ClosedDiv + ClosedMul + One + Zero, const D: usize> Scale<T, D> {
347 #[inline]
362 #[must_use]
363 pub fn try_inverse_transform_point(&self, pt: &Point<T, D>) -> Option<Point<T, D>> {
364 self.try_inverse().map(|s| s * pt)
365 }
366}
367
368impl<T: Scalar + Eq, const D: usize> Eq for Scale<T, D> {}
369
370impl<T: Scalar + PartialEq, const D: usize> PartialEq for Scale<T, D> {
371 #[inline]
372 fn eq(&self, right: &Scale<T, D>) -> bool {
373 self.vector == right.vector
374 }
375}
376
377impl<T: Scalar + AbsDiffEq, const D: usize> AbsDiffEq for Scale<T, D>
378where
379 T::Epsilon: Clone,
380{
381 type Epsilon = T::Epsilon;
382
383 #[inline]
384 fn default_epsilon() -> Self::Epsilon {
385 T::default_epsilon()
386 }
387
388 #[inline]
389 fn abs_diff_eq(&self, other: &Self, epsilon: Self::Epsilon) -> bool {
390 self.vector.abs_diff_eq(&other.vector, epsilon)
391 }
392}
393
394impl<T: Scalar + RelativeEq, const D: usize> RelativeEq for Scale<T, D>
395where
396 T::Epsilon: Clone,
397{
398 #[inline]
399 fn default_max_relative() -> Self::Epsilon {
400 T::default_max_relative()
401 }
402
403 #[inline]
404 fn relative_eq(
405 &self,
406 other: &Self,
407 epsilon: Self::Epsilon,
408 max_relative: Self::Epsilon,
409 ) -> bool {
410 self.vector
411 .relative_eq(&other.vector, epsilon, max_relative)
412 }
413}
414
415impl<T: Scalar + UlpsEq, const D: usize> UlpsEq for Scale<T, D>
416where
417 T::Epsilon: Clone,
418{
419 #[inline]
420 fn default_max_ulps() -> u32 {
421 T::default_max_ulps()
422 }
423
424 #[inline]
425 fn ulps_eq(&self, other: &Self, epsilon: Self::Epsilon, max_ulps: u32) -> bool {
426 self.vector.ulps_eq(&other.vector, epsilon, max_ulps)
427 }
428}
429
430impl<T: Scalar + fmt::Display, const D: usize> fmt::Display for Scale<T, D> {
436 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
437 let precision = f.precision().unwrap_or(3);
438
439 writeln!(f, "Scale {{")?;
440 write!(f, "{:.*}", precision, self.vector)?;
441 writeln!(f, "}}")
442 }
443}