1use super::*;
2
3pick! {
4 if #[cfg(target_feature="sse2")] {
5 #[derive(Default, Clone, Copy, PartialEq, Eq)]
6 #[repr(C, align(16))]
7 pub struct u8x16 { pub(crate) sse: m128i }
8 } else if #[cfg(target_feature="simd128")] {
9 use core::arch::wasm32::*;
10
11 #[derive(Clone, Copy)]
12 #[repr(transparent)]
13 pub struct u8x16 { pub(crate) simd: v128 }
14
15 impl Default for u8x16 {
16 fn default() -> Self {
17 Self::splat(0)
18 }
19 }
20
21 impl PartialEq for u8x16 {
22 fn eq(&self, other: &Self) -> bool {
23 u8x16_all_true(u8x16_eq(self.simd, other.simd))
24 }
25 }
26
27 impl Eq for u8x16 { }
28 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
29 use core::arch::aarch64::*;
30 #[repr(C)]
31 #[derive(Copy, Clone)]
32 pub struct u8x16 { pub(crate) neon : uint8x16_t }
33
34 impl Default for u8x16 {
35 #[inline]
36 #[must_use]
37 fn default() -> Self {
38 Self::splat(0)
39 }
40 }
41
42 impl PartialEq for u8x16 {
43 #[inline]
44 #[must_use]
45 fn eq(&self, other: &Self) -> bool {
46 unsafe { vminvq_u8(vceqq_u8(self.neon, other.neon))==u8::MAX }
47 }
48 }
49
50 impl Eq for u8x16 { }
51 } else {
52 #[derive(Default, Clone, Copy, PartialEq, Eq)]
53 #[repr(C, align(16))]
54 pub struct u8x16 { pub(crate) arr: [u8;16] }
55 }
56}
57
58int_uint_consts!(u8, 16, u8x16, 128);
59
60unsafe impl Zeroable for u8x16 {}
61unsafe impl Pod for u8x16 {}
62
63impl Add for u8x16 {
64 type Output = Self;
65 #[inline]
66 #[must_use]
67 fn add(self, rhs: Self) -> Self::Output {
68 pick! {
69 if #[cfg(target_feature="sse2")] {
70 Self { sse: add_i8_m128i(self.sse, rhs.sse) }
71 } else if #[cfg(target_feature="simd128")] {
72 Self { simd: u8x16_add(self.simd, rhs.simd) }
73 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
74 unsafe { Self { neon: vaddq_u8(self.neon, rhs.neon) } }
75 } else {
76 Self { arr: [
77 self.arr[0].wrapping_add(rhs.arr[0]),
78 self.arr[1].wrapping_add(rhs.arr[1]),
79 self.arr[2].wrapping_add(rhs.arr[2]),
80 self.arr[3].wrapping_add(rhs.arr[3]),
81 self.arr[4].wrapping_add(rhs.arr[4]),
82 self.arr[5].wrapping_add(rhs.arr[5]),
83 self.arr[6].wrapping_add(rhs.arr[6]),
84 self.arr[7].wrapping_add(rhs.arr[7]),
85 self.arr[8].wrapping_add(rhs.arr[8]),
86 self.arr[9].wrapping_add(rhs.arr[9]),
87 self.arr[10].wrapping_add(rhs.arr[10]),
88 self.arr[11].wrapping_add(rhs.arr[11]),
89 self.arr[12].wrapping_add(rhs.arr[12]),
90 self.arr[13].wrapping_add(rhs.arr[13]),
91 self.arr[14].wrapping_add(rhs.arr[14]),
92 self.arr[15].wrapping_add(rhs.arr[15]),
93 ]}
94 }
95 }
96 }
97}
98
99impl Sub for u8x16 {
100 type Output = Self;
101 #[inline]
102 #[must_use]
103 fn sub(self, rhs: Self) -> Self::Output {
104 pick! {
105 if #[cfg(target_feature="sse2")] {
106 Self { sse: sub_i8_m128i(self.sse, rhs.sse) }
107 } else if #[cfg(target_feature="simd128")] {
108 Self { simd: u8x16_sub(self.simd, rhs.simd) }
109 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
110 unsafe {Self { neon: vsubq_u8(self.neon, rhs.neon) }}
111 } else {
112 Self { arr: [
113 self.arr[0].wrapping_sub(rhs.arr[0]),
114 self.arr[1].wrapping_sub(rhs.arr[1]),
115 self.arr[2].wrapping_sub(rhs.arr[2]),
116 self.arr[3].wrapping_sub(rhs.arr[3]),
117 self.arr[4].wrapping_sub(rhs.arr[4]),
118 self.arr[5].wrapping_sub(rhs.arr[5]),
119 self.arr[6].wrapping_sub(rhs.arr[6]),
120 self.arr[7].wrapping_sub(rhs.arr[7]),
121 self.arr[8].wrapping_sub(rhs.arr[8]),
122 self.arr[9].wrapping_sub(rhs.arr[9]),
123 self.arr[10].wrapping_sub(rhs.arr[10]),
124 self.arr[11].wrapping_sub(rhs.arr[11]),
125 self.arr[12].wrapping_sub(rhs.arr[12]),
126 self.arr[13].wrapping_sub(rhs.arr[13]),
127 self.arr[14].wrapping_sub(rhs.arr[14]),
128 self.arr[15].wrapping_sub(rhs.arr[15]),
129 ]}
130 }
131 }
132 }
133}
134
135impl Add<u8> for u8x16 {
136 type Output = Self;
137 #[inline]
138 #[must_use]
139 fn add(self, rhs: u8) -> Self::Output {
140 self.add(Self::splat(rhs))
141 }
142}
143
144impl Sub<u8> for u8x16 {
145 type Output = Self;
146 #[inline]
147 #[must_use]
148 fn sub(self, rhs: u8) -> Self::Output {
149 self.sub(Self::splat(rhs))
150 }
151}
152
153impl Add<u8x16> for u8 {
154 type Output = u8x16;
155 #[inline]
156 #[must_use]
157 fn add(self, rhs: u8x16) -> Self::Output {
158 u8x16::splat(self).add(rhs)
159 }
160}
161
162impl Sub<u8x16> for u8 {
163 type Output = u8x16;
164 #[inline]
165 #[must_use]
166 fn sub(self, rhs: u8x16) -> Self::Output {
167 u8x16::splat(self).sub(rhs)
168 }
169}
170
171impl BitAnd for u8x16 {
172 type Output = Self;
173 #[inline]
174 #[must_use]
175 fn bitand(self, rhs: Self) -> Self::Output {
176 pick! {
177 if #[cfg(target_feature="sse2")] {
178 Self { sse: bitand_m128i(self.sse, rhs.sse) }
179 } else if #[cfg(target_feature="simd128")] {
180 Self { simd: v128_and(self.simd, rhs.simd) }
181 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
182 unsafe {Self { neon: vandq_u8(self.neon, rhs.neon) }}
183 } else {
184 Self { arr: [
185 self.arr[0].bitand(rhs.arr[0]),
186 self.arr[1].bitand(rhs.arr[1]),
187 self.arr[2].bitand(rhs.arr[2]),
188 self.arr[3].bitand(rhs.arr[3]),
189 self.arr[4].bitand(rhs.arr[4]),
190 self.arr[5].bitand(rhs.arr[5]),
191 self.arr[6].bitand(rhs.arr[6]),
192 self.arr[7].bitand(rhs.arr[7]),
193 self.arr[8].bitand(rhs.arr[8]),
194 self.arr[9].bitand(rhs.arr[9]),
195 self.arr[10].bitand(rhs.arr[10]),
196 self.arr[11].bitand(rhs.arr[11]),
197 self.arr[12].bitand(rhs.arr[12]),
198 self.arr[13].bitand(rhs.arr[13]),
199 self.arr[14].bitand(rhs.arr[14]),
200 self.arr[15].bitand(rhs.arr[15]),
201 ]}
202 }
203 }
204 }
205}
206
207impl BitOr for u8x16 {
208 type Output = Self;
209 #[inline]
210 #[must_use]
211 fn bitor(self, rhs: Self) -> Self::Output {
212 pick! {
213 if #[cfg(target_feature="sse2")] {
214 Self { sse: bitor_m128i(self.sse, rhs.sse) }
215 } else if #[cfg(target_feature="simd128")] {
216 Self { simd: v128_or(self.simd, rhs.simd) }
217 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
218 unsafe {Self { neon: vorrq_u8(self.neon, rhs.neon) }}
219 } else {
220 Self { arr: [
221 self.arr[0].bitor(rhs.arr[0]),
222 self.arr[1].bitor(rhs.arr[1]),
223 self.arr[2].bitor(rhs.arr[2]),
224 self.arr[3].bitor(rhs.arr[3]),
225 self.arr[4].bitor(rhs.arr[4]),
226 self.arr[5].bitor(rhs.arr[5]),
227 self.arr[6].bitor(rhs.arr[6]),
228 self.arr[7].bitor(rhs.arr[7]),
229 self.arr[8].bitor(rhs.arr[8]),
230 self.arr[9].bitor(rhs.arr[9]),
231 self.arr[10].bitor(rhs.arr[10]),
232 self.arr[11].bitor(rhs.arr[11]),
233 self.arr[12].bitor(rhs.arr[12]),
234 self.arr[13].bitor(rhs.arr[13]),
235 self.arr[14].bitor(rhs.arr[14]),
236 self.arr[15].bitor(rhs.arr[15]),
237 ]}
238 }
239 }
240 }
241}
242
243impl BitXor for u8x16 {
244 type Output = Self;
245 #[inline]
246 #[must_use]
247 fn bitxor(self, rhs: Self) -> Self::Output {
248 pick! {
249 if #[cfg(target_feature="sse2")] {
250 Self { sse: bitxor_m128i(self.sse, rhs.sse) }
251 } else if #[cfg(target_feature="simd128")] {
252 Self { simd: v128_xor(self.simd, rhs.simd) }
253 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
254 unsafe {Self { neon: veorq_u8(self.neon, rhs.neon) }}
255 } else {
256 Self { arr: [
257 self.arr[0].bitxor(rhs.arr[0]),
258 self.arr[1].bitxor(rhs.arr[1]),
259 self.arr[2].bitxor(rhs.arr[2]),
260 self.arr[3].bitxor(rhs.arr[3]),
261 self.arr[4].bitxor(rhs.arr[4]),
262 self.arr[5].bitxor(rhs.arr[5]),
263 self.arr[6].bitxor(rhs.arr[6]),
264 self.arr[7].bitxor(rhs.arr[7]),
265 self.arr[8].bitxor(rhs.arr[8]),
266 self.arr[9].bitxor(rhs.arr[9]),
267 self.arr[10].bitxor(rhs.arr[10]),
268 self.arr[11].bitxor(rhs.arr[11]),
269 self.arr[12].bitxor(rhs.arr[12]),
270 self.arr[13].bitxor(rhs.arr[13]),
271 self.arr[14].bitxor(rhs.arr[14]),
272 self.arr[15].bitxor(rhs.arr[15]),
273 ]}
274 }
275 }
276 }
277}
278
279impl CmpEq for u8x16 {
280 type Output = Self;
281 #[inline]
282 #[must_use]
283 fn cmp_eq(self, rhs: Self) -> Self::Output {
284 Self::cmp_eq(self, rhs)
285 }
286}
287
288impl u8x16 {
289 #[inline]
290 #[must_use]
291 pub const fn new(array: [u8; 16]) -> Self {
292 unsafe { core::mem::transmute(array) }
293 }
294 #[inline]
295 #[must_use]
296 pub fn cmp_eq(self, rhs: Self) -> Self {
297 pick! {
298 if #[cfg(target_feature="sse2")] {
299 Self { sse: cmp_eq_mask_i8_m128i(self.sse, rhs.sse) }
300 } else if #[cfg(target_feature="simd128")] {
301 Self { simd: u8x16_eq(self.simd, rhs.simd) }
302 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
303 unsafe {Self { neon: vceqq_u8(self.neon, rhs.neon) }}
304 } else {
305 Self { arr: [
306 if self.arr[0] == rhs.arr[0] { u8::MAX } else { 0 },
307 if self.arr[1] == rhs.arr[1] { u8::MAX } else { 0 },
308 if self.arr[2] == rhs.arr[2] { u8::MAX } else { 0 },
309 if self.arr[3] == rhs.arr[3] { u8::MAX } else { 0 },
310 if self.arr[4] == rhs.arr[4] { u8::MAX } else { 0 },
311 if self.arr[5] == rhs.arr[5] { u8::MAX } else { 0 },
312 if self.arr[6] == rhs.arr[6] { u8::MAX } else { 0 },
313 if self.arr[7] == rhs.arr[7] { u8::MAX } else { 0 },
314 if self.arr[8] == rhs.arr[8] { u8::MAX } else { 0 },
315 if self.arr[9] == rhs.arr[9] { u8::MAX } else { 0 },
316 if self.arr[10] == rhs.arr[10] { u8::MAX } else { 0 },
317 if self.arr[11] == rhs.arr[11] { u8::MAX } else { 0 },
318 if self.arr[12] == rhs.arr[12] { u8::MAX } else { 0 },
319 if self.arr[13] == rhs.arr[13] { u8::MAX } else { 0 },
320 if self.arr[14] == rhs.arr[14] { u8::MAX } else { 0 },
321 if self.arr[15] == rhs.arr[15] { u8::MAX } else { 0 },
322 ]}
323 }
324 }
325 }
326 #[inline]
327 #[must_use]
328 pub fn blend(self, t: Self, f: Self) -> Self {
329 pick! {
330 if #[cfg(target_feature="sse4.1")] {
331 Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
332 } else if #[cfg(target_feature="simd128")] {
333 Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
334 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
335 unsafe {Self { neon: vbslq_u8(self.neon, t.neon, f.neon) }}
336 } else {
337 generic_bit_blend(self, t, f)
338 }
339 }
340 }
341 #[inline]
342 #[must_use]
343 pub fn max(self, rhs: Self) -> Self {
344 pick! {
345 if #[cfg(target_feature="sse2")] {
346 Self { sse: max_u8_m128i(self.sse, rhs.sse) }
347 } else if #[cfg(target_feature="simd128")] {
348 Self { simd: u8x16_max(self.simd, rhs.simd) }
349 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
350 unsafe {Self { neon: vmaxq_u8(self.neon, rhs.neon) }}
351 } else {
352 Self { arr: [
353 self.arr[0].max(rhs.arr[0]),
354 self.arr[1].max(rhs.arr[1]),
355 self.arr[2].max(rhs.arr[2]),
356 self.arr[3].max(rhs.arr[3]),
357 self.arr[4].max(rhs.arr[4]),
358 self.arr[5].max(rhs.arr[5]),
359 self.arr[6].max(rhs.arr[6]),
360 self.arr[7].max(rhs.arr[7]),
361 self.arr[8].max(rhs.arr[8]),
362 self.arr[9].max(rhs.arr[9]),
363 self.arr[10].max(rhs.arr[10]),
364 self.arr[11].max(rhs.arr[11]),
365 self.arr[12].max(rhs.arr[12]),
366 self.arr[13].max(rhs.arr[13]),
367 self.arr[14].max(rhs.arr[14]),
368 self.arr[15].max(rhs.arr[15]),
369 ]}
370 }
371 }
372 }
373 #[inline]
374 #[must_use]
375 pub fn min(self, rhs: Self) -> Self {
376 pick! {
377 if #[cfg(target_feature="sse2")] {
378 Self { sse: min_u8_m128i(self.sse, rhs.sse) }
379 } else if #[cfg(target_feature="simd128")] {
380 Self { simd: u8x16_min(self.simd, rhs.simd) }
381 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
382 unsafe {Self { neon: vminq_u8(self.neon, rhs.neon) }}
383 } else {
384 Self { arr: [
385 self.arr[0].min(rhs.arr[0]),
386 self.arr[1].min(rhs.arr[1]),
387 self.arr[2].min(rhs.arr[2]),
388 self.arr[3].min(rhs.arr[3]),
389 self.arr[4].min(rhs.arr[4]),
390 self.arr[5].min(rhs.arr[5]),
391 self.arr[6].min(rhs.arr[6]),
392 self.arr[7].min(rhs.arr[7]),
393 self.arr[8].min(rhs.arr[8]),
394 self.arr[9].min(rhs.arr[9]),
395 self.arr[10].min(rhs.arr[10]),
396 self.arr[11].min(rhs.arr[11]),
397 self.arr[12].min(rhs.arr[12]),
398 self.arr[13].min(rhs.arr[13]),
399 self.arr[14].min(rhs.arr[14]),
400 self.arr[15].min(rhs.arr[15]),
401 ]}
402 }
403 }
404 }
405
406 #[inline]
407 #[must_use]
408 pub fn saturating_add(self, rhs: Self) -> Self {
409 pick! {
410 if #[cfg(target_feature="sse2")] {
411 Self { sse: add_saturating_u8_m128i(self.sse, rhs.sse) }
412 } else if #[cfg(target_feature="simd128")] {
413 Self { simd: u8x16_add_sat(self.simd, rhs.simd) }
414 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
415 unsafe {Self { neon: vqaddq_u8(self.neon, rhs.neon) }}
416 } else {
417 Self { arr: [
418 self.arr[0].saturating_add(rhs.arr[0]),
419 self.arr[1].saturating_add(rhs.arr[1]),
420 self.arr[2].saturating_add(rhs.arr[2]),
421 self.arr[3].saturating_add(rhs.arr[3]),
422 self.arr[4].saturating_add(rhs.arr[4]),
423 self.arr[5].saturating_add(rhs.arr[5]),
424 self.arr[6].saturating_add(rhs.arr[6]),
425 self.arr[7].saturating_add(rhs.arr[7]),
426 self.arr[8].saturating_add(rhs.arr[8]),
427 self.arr[9].saturating_add(rhs.arr[9]),
428 self.arr[10].saturating_add(rhs.arr[10]),
429 self.arr[11].saturating_add(rhs.arr[11]),
430 self.arr[12].saturating_add(rhs.arr[12]),
431 self.arr[13].saturating_add(rhs.arr[13]),
432 self.arr[14].saturating_add(rhs.arr[14]),
433 self.arr[15].saturating_add(rhs.arr[15]),
434 ]}
435 }
436 }
437 }
438 #[inline]
439 #[must_use]
440 pub fn saturating_sub(self, rhs: Self) -> Self {
441 pick! {
442 if #[cfg(target_feature="sse2")] {
443 Self { sse: sub_saturating_u8_m128i(self.sse, rhs.sse) }
444 } else if #[cfg(target_feature="simd128")] {
445 Self { simd: u8x16_sub_sat(self.simd, rhs.simd) }
446 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
447 unsafe { Self { neon: vqsubq_u8(self.neon, rhs.neon) } }
448 } else {
449 Self { arr: [
450 self.arr[0].saturating_sub(rhs.arr[0]),
451 self.arr[1].saturating_sub(rhs.arr[1]),
452 self.arr[2].saturating_sub(rhs.arr[2]),
453 self.arr[3].saturating_sub(rhs.arr[3]),
454 self.arr[4].saturating_sub(rhs.arr[4]),
455 self.arr[5].saturating_sub(rhs.arr[5]),
456 self.arr[6].saturating_sub(rhs.arr[6]),
457 self.arr[7].saturating_sub(rhs.arr[7]),
458 self.arr[8].saturating_sub(rhs.arr[8]),
459 self.arr[9].saturating_sub(rhs.arr[9]),
460 self.arr[10].saturating_sub(rhs.arr[10]),
461 self.arr[11].saturating_sub(rhs.arr[11]),
462 self.arr[12].saturating_sub(rhs.arr[12]),
463 self.arr[13].saturating_sub(rhs.arr[13]),
464 self.arr[14].saturating_sub(rhs.arr[14]),
465 self.arr[15].saturating_sub(rhs.arr[15]),
466 ]}
467 }
468 }
469 }
470
471 #[inline]
473 #[must_use]
474 pub fn unpack_low(lhs: u8x16, rhs: u8x16) -> u8x16 {
475 pick! {
476 if #[cfg(target_feature = "sse2")] {
477 u8x16 { sse: unpack_low_i8_m128i(lhs.sse, rhs.sse) }
478 } else if #[cfg(target_feature = "simd128")] {
479 u8x16 { simd: u8x16_shuffle::<0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23>(lhs.simd, rhs.simd) }
480 } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64"))] {
481 let lhs = unsafe { vget_low_u8(lhs.neon) };
482 let rhs = unsafe { vget_low_u8(rhs.neon) };
483
484 let zipped = unsafe { vzip_u8(lhs, rhs) };
485 u8x16 { neon: unsafe { vcombine_u8(zipped.0, zipped.1) } }
486 } else {
487 u8x16::new([
488 lhs.as_array_ref()[0], rhs.as_array_ref()[0],
489 lhs.as_array_ref()[1], rhs.as_array_ref()[1],
490 lhs.as_array_ref()[2], rhs.as_array_ref()[2],
491 lhs.as_array_ref()[3], rhs.as_array_ref()[3],
492 lhs.as_array_ref()[4], rhs.as_array_ref()[4],
493 lhs.as_array_ref()[5], rhs.as_array_ref()[5],
494 lhs.as_array_ref()[6], rhs.as_array_ref()[6],
495 lhs.as_array_ref()[7], rhs.as_array_ref()[7],
496 ])
497 }
498 }
499 }
500
501 #[inline]
503 #[must_use]
504 pub fn unpack_high(lhs: u8x16, rhs: u8x16) -> u8x16 {
505 pick! {
506 if #[cfg(target_feature = "sse2")] {
507 u8x16 { sse: unpack_high_i8_m128i(lhs.sse, rhs.sse) }
508 } else if #[cfg(target_feature = "simd128")] {
509 u8x16 { simd: u8x16_shuffle::<8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31>(lhs.simd, rhs.simd) }
510 } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64"))] {
511 let lhs = unsafe { vget_high_u8(lhs.neon) };
512 let rhs = unsafe { vget_high_u8(rhs.neon) };
513
514 let zipped = unsafe { vzip_u8(lhs, rhs) };
515 u8x16 { neon: unsafe { vcombine_u8(zipped.0, zipped.1) } }
516 } else {
517 u8x16::new([
518 lhs.as_array_ref()[8], rhs.as_array_ref()[8],
519 lhs.as_array_ref()[9], rhs.as_array_ref()[9],
520 lhs.as_array_ref()[10], rhs.as_array_ref()[10],
521 lhs.as_array_ref()[11], rhs.as_array_ref()[11],
522 lhs.as_array_ref()[12], rhs.as_array_ref()[12],
523 lhs.as_array_ref()[13], rhs.as_array_ref()[13],
524 lhs.as_array_ref()[14], rhs.as_array_ref()[14],
525 lhs.as_array_ref()[15], rhs.as_array_ref()[15],
526 ])
527 }
528 }
529 }
530
531 #[inline]
533 #[must_use]
534 pub fn narrow_i16x8(lhs: i16x8, rhs: i16x8) -> Self {
535 pick! {
536 if #[cfg(target_feature = "sse2")] {
537 u8x16 { sse: pack_i16_to_u8_m128i(lhs.sse, rhs.sse) }
538 } else if #[cfg(target_feature = "simd128")] {
539 u8x16 { simd: u8x16_narrow_i16x8(lhs.simd, rhs.simd) }
540 } else if #[cfg(all(target_feature = "neon", target_arch = "aarch64"))] {
541 let lhs = unsafe { vqmovun_s16(lhs.neon) };
542 let rhs = unsafe { vqmovun_s16(rhs.neon) };
543 u8x16 { neon: unsafe { vcombine_u8(lhs, rhs) } }
544 } else {
545 fn clamp(a: i16) -> u8 {
546 if a < u8::MIN as i16 {
547 u8::MIN
548 } else if a > u8::MAX as i16 {
549 u8::MAX
550 } else {
551 a as u8
552 }
553 }
554
555 Self { arr: [
556 clamp(lhs.as_array_ref()[0]),
557 clamp(lhs.as_array_ref()[1]),
558 clamp(lhs.as_array_ref()[2]),
559 clamp(lhs.as_array_ref()[3]),
560 clamp(lhs.as_array_ref()[4]),
561 clamp(lhs.as_array_ref()[5]),
562 clamp(lhs.as_array_ref()[6]),
563 clamp(lhs.as_array_ref()[7]),
564 clamp(rhs.as_array_ref()[0]),
565 clamp(rhs.as_array_ref()[1]),
566 clamp(rhs.as_array_ref()[2]),
567 clamp(rhs.as_array_ref()[3]),
568 clamp(rhs.as_array_ref()[4]),
569 clamp(rhs.as_array_ref()[5]),
570 clamp(rhs.as_array_ref()[6]),
571 clamp(rhs.as_array_ref()[7]),
572 ]}
573 }
574 }
575 }
576
577 #[inline]
584 pub fn swizzle(self, rhs: i8x16) -> i8x16 {
585 cast(i8x16::swizzle(cast(self), rhs))
586 }
587
588 #[inline]
597 pub fn swizzle_relaxed(self, rhs: u8x16) -> u8x16 {
598 cast(i8x16::swizzle_relaxed(cast(self), cast(rhs)))
599 }
600
601 #[inline]
602 #[must_use]
603 pub fn move_mask(self) -> i32 {
604 i8x16::move_mask(cast(self))
605 }
606
607 #[inline]
608 #[must_use]
609 pub fn any(self) -> bool {
610 i8x16::any(cast(self))
611 }
612
613 #[inline]
614 #[must_use]
615 pub fn all(self) -> bool {
616 i8x16::all(cast(self))
617 }
618
619 #[inline]
620 #[must_use]
621 pub fn none(self) -> bool {
622 i8x16::none(cast(self))
623 }
624
625 #[inline]
626 pub fn to_array(self) -> [u8; 16] {
627 cast(self)
628 }
629
630 #[inline]
631 pub fn as_array_ref(&self) -> &[u8; 16] {
632 cast_ref(self)
633 }
634
635 #[inline]
636 pub fn as_array_mut(&mut self) -> &mut [u8; 16] {
637 cast_mut(self)
638 }
639}