1use super::*;
2
3pick! {
4 if #[cfg(target_feature="avx2")] {
5 #[derive(Default, Clone, Copy, PartialEq, Eq)]
6 #[repr(C, align(32))]
7 pub struct u8x32 { pub(crate) avx: m256i }
8 } else {
9 #[derive(Default, Clone, Copy, PartialEq, Eq)]
10 #[repr(C, align(32))]
11 pub struct u8x32 { pub(crate) a : u8x16, pub(crate) b : u8x16 }
12 }
13}
14
15int_uint_consts!(u8, 32, u8x32, 256);
16
17unsafe impl Zeroable for u8x32 {}
18unsafe impl Pod for u8x32 {}
19
20impl Add for u8x32 {
21 type Output = Self;
22 #[inline]
23 #[must_use]
24 fn add(self, rhs: Self) -> Self::Output {
25 pick! {
26 if #[cfg(target_feature="avx2")] {
27 Self { avx: add_i8_m256i(self.avx,rhs.avx) }
28 } else {
29 Self {
30 a : self.a.add(rhs.a),
31 b : self.b.add(rhs.b),
32 }
33 }
34 }
35 }
36}
37
38impl Sub for u8x32 {
39 type Output = Self;
40 #[inline]
41 #[must_use]
42 fn sub(self, rhs: Self) -> Self::Output {
43 pick! {
44 if #[cfg(target_feature="avx2")] {
45 Self { avx: sub_i8_m256i(self.avx,rhs.avx) }
46 } else {
47 Self {
48 a : self.a.sub(rhs.a),
49 b : self.b.sub(rhs.b),
50 }
51 }
52 }
53 }
54}
55
56impl Add<u8> for u8x32 {
57 type Output = Self;
58 #[inline]
59 #[must_use]
60 fn add(self, rhs: u8) -> Self::Output {
61 self.add(Self::splat(rhs))
62 }
63}
64
65impl Sub<u8> for u8x32 {
66 type Output = Self;
67 #[inline]
68 #[must_use]
69 fn sub(self, rhs: u8) -> Self::Output {
70 self.sub(Self::splat(rhs))
71 }
72}
73
74impl Add<u8x32> for u8 {
75 type Output = u8x32;
76 #[inline]
77 #[must_use]
78 fn add(self, rhs: u8x32) -> Self::Output {
79 u8x32::splat(self).add(rhs)
80 }
81}
82
83impl Sub<u8x32> for u8 {
84 type Output = u8x32;
85 #[inline]
86 #[must_use]
87 fn sub(self, rhs: u8x32) -> Self::Output {
88 u8x32::splat(self).sub(rhs)
89 }
90}
91
92impl BitAnd for u8x32 {
93 type Output = Self;
94 #[inline]
95 #[must_use]
96 fn bitand(self, rhs: Self) -> Self::Output {
97 pick! {
98 if #[cfg(target_feature="avx2")] {
99 Self { avx : bitand_m256i(self.avx,rhs.avx) }
100 } else {
101 Self {
102 a : self.a.bitand(rhs.a),
103 b : self.b.bitand(rhs.b),
104 }
105 }
106 }
107 }
108}
109
110impl BitOr for u8x32 {
111 type Output = Self;
112 #[inline]
113 #[must_use]
114 fn bitor(self, rhs: Self) -> Self::Output {
115 pick! {
116 if #[cfg(target_feature="avx2")] {
117 Self { avx : bitor_m256i(self.avx,rhs.avx) }
118 } else {
119 Self {
120 a : self.a.bitor(rhs.a),
121 b : self.b.bitor(rhs.b),
122 }
123 }
124 }
125 }
126}
127
128impl BitXor for u8x32 {
129 type Output = Self;
130 #[inline]
131 #[must_use]
132 fn bitxor(self, rhs: Self) -> Self::Output {
133 pick! {
134 if #[cfg(target_feature="avx2")] {
135 Self { avx : bitxor_m256i(self.avx,rhs.avx) }
136 } else {
137 Self {
138 a : self.a.bitxor(rhs.a),
139 b : self.b.bitxor(rhs.b),
140 }
141 }
142 }
143 }
144}
145
146impl CmpEq for u8x32 {
147 type Output = Self;
148 #[inline]
149 #[must_use]
150 fn cmp_eq(self, rhs: Self) -> Self::Output {
151 pick! {
152 if #[cfg(target_feature="avx2")] {
153 Self { avx : cmp_eq_mask_i8_m256i(self.avx,rhs.avx) }
154 } else {
155 Self {
156 a : self.a.cmp_eq(rhs.a),
157 b : self.b.cmp_eq(rhs.b),
158 }
159 }
160 }
161 }
162}
163
164impl u8x32 {
165 #[inline]
166 #[must_use]
167 pub const fn new(array: [u8; 32]) -> Self {
168 unsafe { core::mem::transmute(array) }
169 }
170 #[inline]
171 #[must_use]
172 pub fn blend(self, t: Self, f: Self) -> Self {
173 pick! {
174 if #[cfg(target_feature="avx2")] {
175 Self { avx: blend_varying_i8_m256i(f.avx, t.avx, self.avx) }
176 } else {
177 Self {
178 a : self.a.blend(t.a, f.a),
179 b : self.b.blend(t.b, f.b),
180 }
181 }
182 }
183 }
184 #[inline]
185 #[must_use]
186 pub fn max(self, rhs: Self) -> Self {
187 pick! {
188 if #[cfg(target_feature="avx2")] {
189 Self { avx: max_u8_m256i(self.avx,rhs.avx) }
190 } else {
191 Self {
192 a : self.a.max(rhs.a),
193 b : self.b.max(rhs.b),
194 }
195 }
196 }
197 }
198 #[inline]
199 #[must_use]
200 pub fn min(self, rhs: Self) -> Self {
201 pick! {
202 if #[cfg(target_feature="avx2")] {
203 Self { avx: min_u8_m256i(self.avx,rhs.avx) }
204 } else {
205 Self {
206 a : self.a.min(rhs.a),
207 b : self.b.min(rhs.b),
208 }
209 }
210 }
211 }
212
213 #[inline]
214 #[must_use]
215 pub fn saturating_add(self, rhs: Self) -> Self {
216 pick! {
217 if #[cfg(target_feature="avx2")] {
218 Self { avx: add_saturating_u8_m256i(self.avx, rhs.avx) }
219 } else {
220 Self {
221 a : self.a.saturating_add(rhs.a),
222 b : self.b.saturating_add(rhs.b),
223 }
224 }
225 }
226 }
227 #[inline]
228 #[must_use]
229 pub fn saturating_sub(self, rhs: Self) -> Self {
230 pick! {
231 if #[cfg(target_feature="avx2")] {
232 Self { avx: sub_saturating_u8_m256i(self.avx, rhs.avx) }
233 } else {
234 Self {
235 a : self.a.saturating_sub(rhs.a),
236 b : self.b.saturating_sub(rhs.b),
237 }
238 }
239 }
240 }
241
242 #[inline]
243 #[must_use]
244 pub fn move_mask(self) -> i32 {
245 i8x32::move_mask(cast(self))
246 }
247
248 #[inline]
249 #[must_use]
250 pub fn any(self) -> bool {
251 i8x32::any(cast(self))
252 }
253
254 #[inline]
255 #[must_use]
256 pub fn all(self) -> bool {
257 i8x32::all(cast(self))
258 }
259
260 #[inline]
269 pub fn swizzle_half(self, rhs: i8x32) -> i8x32 {
270 cast(i8x32::swizzle_half(cast(self), cast(rhs)))
271 }
272
273 #[inline]
283 pub fn swizzle_half_relaxed(self, rhs: u8x32) -> u8x32 {
284 cast(i8x32::swizzle_half_relaxed(cast(self), cast(rhs)))
285 }
286
287 #[inline]
288 #[must_use]
289 pub fn none(self) -> bool {
290 !self.any()
291 }
292
293 #[inline]
294 pub fn to_array(self) -> [u8; 32] {
295 cast(self)
296 }
297
298 #[inline]
299 pub fn as_array_ref(&self) -> &[u8; 32] {
300 cast_ref(self)
301 }
302
303 #[inline]
304 pub fn as_array_mut(&mut self) -> &mut [u8; 32] {
305 cast_mut(self)
306 }
307}