1use super::*;
2
3pick! {
4 if #[cfg(target_feature="sse2")] {
5 #[derive(Default, Clone, Copy, PartialEq, Eq)]
6 #[repr(C, align(16))]
7 pub struct u16x8 { pub(crate) sse: m128i }
8 } else if #[cfg(target_feature="simd128")] {
9 use core::arch::wasm32::*;
10
11 #[derive(Clone, Copy)]
12 #[repr(transparent)]
13 pub struct u16x8 { pub(crate) simd: v128 }
14
15 impl Default for u16x8 {
16 fn default() -> Self {
17 Self::splat(0)
18 }
19 }
20
21 impl PartialEq for u16x8 {
22 fn eq(&self, other: &Self) -> bool {
23 u16x8_all_true(u16x8_eq(self.simd, other.simd))
24 }
25 }
26
27 impl Eq for u16x8 { }
28 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
29 use core::arch::aarch64::*;
30 #[repr(C)]
31 #[derive(Copy, Clone)]
32 pub struct u16x8 { pub(crate) neon : uint16x8_t }
33
34 impl Default for u16x8 {
35 #[inline]
36 #[must_use]
37 fn default() -> Self {
38 Self::splat(0)
39 }
40 }
41
42 impl PartialEq for u16x8 {
43 #[inline]
44 #[must_use]
45 fn eq(&self, other: &Self) -> bool {
46 unsafe { vminvq_u16(vceqq_u16(self.neon, other.neon))==u16::MAX }
47 }
48 }
49
50 impl Eq for u16x8 { }
51 } else {
52 #[derive(Default, Clone, Copy, PartialEq, Eq)]
53 #[repr(C, align(16))]
54 pub struct u16x8 { pub(crate) arr: [u16;8] }
55 }
56}
57
58int_uint_consts!(u16, 8, u16x8, 128);
59
60unsafe impl Zeroable for u16x8 {}
61unsafe impl Pod for u16x8 {}
62
63impl Add for u16x8 {
64 type Output = Self;
65 #[inline]
66 #[must_use]
67 fn add(self, rhs: Self) -> Self::Output {
68 pick! {
69 if #[cfg(target_feature="sse2")] {
70 Self { sse: add_i16_m128i(self.sse, rhs.sse) }
71 } else if #[cfg(target_feature="simd128")] {
72 Self { simd: u16x8_add(self.simd, rhs.simd) }
73 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
74 unsafe { Self { neon: vaddq_u16(self.neon, rhs.neon) } }
75 } else {
76 Self { arr: [
77 self.arr[0].wrapping_add(rhs.arr[0]),
78 self.arr[1].wrapping_add(rhs.arr[1]),
79 self.arr[2].wrapping_add(rhs.arr[2]),
80 self.arr[3].wrapping_add(rhs.arr[3]),
81 self.arr[4].wrapping_add(rhs.arr[4]),
82 self.arr[5].wrapping_add(rhs.arr[5]),
83 self.arr[6].wrapping_add(rhs.arr[6]),
84 self.arr[7].wrapping_add(rhs.arr[7]),
85 ]}
86 }
87 }
88 }
89}
90
91impl Sub for u16x8 {
92 type Output = Self;
93 #[inline]
94 #[must_use]
95 fn sub(self, rhs: Self) -> Self::Output {
96 pick! {
97 if #[cfg(target_feature="sse2")] {
98 Self { sse: sub_i16_m128i(self.sse, rhs.sse) }
99 } else if #[cfg(target_feature="simd128")] {
100 Self { simd: u16x8_sub(self.simd, rhs.simd) }
101 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
102 unsafe {Self { neon: vsubq_u16(self.neon, rhs.neon) }}
103 } else {
104 Self { arr: [
105 self.arr[0].wrapping_sub(rhs.arr[0]),
106 self.arr[1].wrapping_sub(rhs.arr[1]),
107 self.arr[2].wrapping_sub(rhs.arr[2]),
108 self.arr[3].wrapping_sub(rhs.arr[3]),
109 self.arr[4].wrapping_sub(rhs.arr[4]),
110 self.arr[5].wrapping_sub(rhs.arr[5]),
111 self.arr[6].wrapping_sub(rhs.arr[6]),
112 self.arr[7].wrapping_sub(rhs.arr[7]),
113 ]}
114 }
115 }
116 }
117}
118
119impl Mul for u16x8 {
120 type Output = Self;
121 #[inline]
122 #[must_use]
123 fn mul(self, rhs: Self) -> Self::Output {
124 pick! {
125 if #[cfg(target_feature="sse2")] {
126 Self { sse: mul_i16_keep_low_m128i(self.sse, rhs.sse) }
127 } else if #[cfg(target_feature="simd128")] {
128 Self { simd: u16x8_mul(self.simd, rhs.simd) }
129 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
130 unsafe {Self { neon: vmulq_u16(self.neon, rhs.neon) }}
131 } else {
132 Self { arr: [
133 self.arr[0].wrapping_mul(rhs.arr[0]),
134 self.arr[1].wrapping_mul(rhs.arr[1]),
135 self.arr[2].wrapping_mul(rhs.arr[2]),
136 self.arr[3].wrapping_mul(rhs.arr[3]),
137 self.arr[4].wrapping_mul(rhs.arr[4]),
138 self.arr[5].wrapping_mul(rhs.arr[5]),
139 self.arr[6].wrapping_mul(rhs.arr[6]),
140 self.arr[7].wrapping_mul(rhs.arr[7]),
141 ]}
142 }
143 }
144 }
145}
146
147impl Add<u16> for u16x8 {
148 type Output = Self;
149 #[inline]
150 #[must_use]
151 fn add(self, rhs: u16) -> Self::Output {
152 self.add(Self::splat(rhs))
153 }
154}
155
156impl Sub<u16> for u16x8 {
157 type Output = Self;
158 #[inline]
159 #[must_use]
160 fn sub(self, rhs: u16) -> Self::Output {
161 self.sub(Self::splat(rhs))
162 }
163}
164
165impl Mul<u16> for u16x8 {
166 type Output = Self;
167 #[inline]
168 #[must_use]
169 fn mul(self, rhs: u16) -> Self::Output {
170 self.mul(Self::splat(rhs))
171 }
172}
173
174impl Add<u16x8> for u16 {
175 type Output = u16x8;
176 #[inline]
177 #[must_use]
178 fn add(self, rhs: u16x8) -> Self::Output {
179 u16x8::splat(self).add(rhs)
180 }
181}
182
183impl Sub<u16x8> for u16 {
184 type Output = u16x8;
185 #[inline]
186 #[must_use]
187 fn sub(self, rhs: u16x8) -> Self::Output {
188 u16x8::splat(self).sub(rhs)
189 }
190}
191
192impl Mul<u16x8> for u16 {
193 type Output = u16x8;
194 #[inline]
195 #[must_use]
196 fn mul(self, rhs: u16x8) -> Self::Output {
197 u16x8::splat(self).mul(rhs)
198 }
199}
200
201impl BitAnd for u16x8 {
202 type Output = Self;
203 #[inline]
204 #[must_use]
205 fn bitand(self, rhs: Self) -> Self::Output {
206 pick! {
207 if #[cfg(target_feature="sse2")] {
208 Self { sse: bitand_m128i(self.sse, rhs.sse) }
209 } else if #[cfg(target_feature="simd128")] {
210 Self { simd: v128_and(self.simd, rhs.simd) }
211 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
212 unsafe {Self { neon: vandq_u16(self.neon, rhs.neon) }}
213 } else {
214 Self { arr: [
215 self.arr[0].bitand(rhs.arr[0]),
216 self.arr[1].bitand(rhs.arr[1]),
217 self.arr[2].bitand(rhs.arr[2]),
218 self.arr[3].bitand(rhs.arr[3]),
219 self.arr[4].bitand(rhs.arr[4]),
220 self.arr[5].bitand(rhs.arr[5]),
221 self.arr[6].bitand(rhs.arr[6]),
222 self.arr[7].bitand(rhs.arr[7]),
223 ]}
224 }
225 }
226 }
227}
228
229impl BitOr for u16x8 {
230 type Output = Self;
231 #[inline]
232 #[must_use]
233 fn bitor(self, rhs: Self) -> Self::Output {
234 pick! {
235 if #[cfg(target_feature="sse2")] {
236 Self { sse: bitor_m128i(self.sse, rhs.sse) }
237 } else if #[cfg(target_feature="simd128")] {
238 Self { simd: v128_or(self.simd, rhs.simd) }
239 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
240 unsafe {Self { neon: vorrq_u16(self.neon, rhs.neon) }}
241 } else {
242 Self { arr: [
243 self.arr[0].bitor(rhs.arr[0]),
244 self.arr[1].bitor(rhs.arr[1]),
245 self.arr[2].bitor(rhs.arr[2]),
246 self.arr[3].bitor(rhs.arr[3]),
247 self.arr[4].bitor(rhs.arr[4]),
248 self.arr[5].bitor(rhs.arr[5]),
249 self.arr[6].bitor(rhs.arr[6]),
250 self.arr[7].bitor(rhs.arr[7]),
251 ]}
252 }
253 }
254 }
255}
256
257impl BitXor for u16x8 {
258 type Output = Self;
259 #[inline]
260 #[must_use]
261 fn bitxor(self, rhs: Self) -> Self::Output {
262 pick! {
263 if #[cfg(target_feature="sse2")] {
264 Self { sse: bitxor_m128i(self.sse, rhs.sse) }
265 } else if #[cfg(target_feature="simd128")] {
266 Self { simd: v128_xor(self.simd, rhs.simd) }
267 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
268 unsafe {Self { neon: veorq_u16(self.neon, rhs.neon) }}
269 } else {
270 Self { arr: [
271 self.arr[0].bitxor(rhs.arr[0]),
272 self.arr[1].bitxor(rhs.arr[1]),
273 self.arr[2].bitxor(rhs.arr[2]),
274 self.arr[3].bitxor(rhs.arr[3]),
275 self.arr[4].bitxor(rhs.arr[4]),
276 self.arr[5].bitxor(rhs.arr[5]),
277 self.arr[6].bitxor(rhs.arr[6]),
278 self.arr[7].bitxor(rhs.arr[7]),
279 ]}
280 }
281 }
282 }
283}
284
285macro_rules! impl_shl_t_for_u16x8 {
286 ($($shift_type:ty),+ $(,)?) => {
287 $(impl Shl<$shift_type> for u16x8 {
288 type Output = Self;
289 #[inline]
291 #[must_use]
292 fn shl(self, rhs: $shift_type) -> Self::Output {
293 pick! {
294 if #[cfg(target_feature="sse2")] {
295 let shift = cast([rhs as u64, 0]);
296 Self { sse: shl_all_u16_m128i(self.sse, shift) }
297 } else if #[cfg(target_feature="simd128")] {
298 Self { simd: u16x8_shl(self.simd, rhs as u32) }
299 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
300 unsafe {Self { neon: vshlq_u16(self.neon, vmovq_n_s16(rhs as i16)) }}
301 } else {
302 let u = rhs as u64;
303 Self { arr: [
304 self.arr[0] << u,
305 self.arr[1] << u,
306 self.arr[2] << u,
307 self.arr[3] << u,
308 self.arr[4] << u,
309 self.arr[5] << u,
310 self.arr[6] << u,
311 self.arr[7] << u,
312 ]}
313 }
314 }
315 }
316 })+
317 };
318}
319impl_shl_t_for_u16x8!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
320
321macro_rules! impl_shr_t_for_u16x8 {
322 ($($shift_type:ty),+ $(,)?) => {
323 $(impl Shr<$shift_type> for u16x8 {
324 type Output = Self;
325 #[inline]
327 #[must_use]
328 fn shr(self, rhs: $shift_type) -> Self::Output {
329 pick! {
330 if #[cfg(target_feature="sse2")] {
331 let shift = cast([rhs as u64, 0]);
332 Self { sse: shr_all_u16_m128i(self.sse, shift) }
333 } else if #[cfg(target_feature="simd128")] {
334 Self { simd: u16x8_shr(self.simd, rhs as u32) }
335 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
336 unsafe {Self { neon: vshlq_u16(self.neon, vmovq_n_s16( -(rhs as i16))) }}
337 } else {
338 let u = rhs as u64;
339 Self { arr: [
340 self.arr[0] >> u,
341 self.arr[1] >> u,
342 self.arr[2] >> u,
343 self.arr[3] >> u,
344 self.arr[4] >> u,
345 self.arr[5] >> u,
346 self.arr[6] >> u,
347 self.arr[7] >> u,
348 ]}
349 }
350 }
351 }
352 })+
353 };
354}
355impl_shr_t_for_u16x8!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
356
357impl u16x8 {
358 #[inline]
359 #[must_use]
360 pub const fn new(array: [u16; 8]) -> Self {
361 unsafe { core::intrinsics::transmute(array) }
362 }
363 #[inline]
364 #[must_use]
365 pub fn cmp_eq(self, rhs: Self) -> Self {
366 pick! {
367 if #[cfg(target_feature="sse2")] {
368 Self { sse: cmp_eq_mask_i16_m128i(self.sse, rhs.sse) }
369 } else if #[cfg(target_feature="simd128")] {
370 Self { simd: u16x8_eq(self.simd, rhs.simd) }
371 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
372 unsafe {Self { neon: vceqq_u16(self.neon, rhs.neon) }}
373 } else {
374 Self { arr: [
375 if self.arr[0] == rhs.arr[0] { u16::MAX } else { 0 },
376 if self.arr[1] == rhs.arr[1] { u16::MAX } else { 0 },
377 if self.arr[2] == rhs.arr[2] { u16::MAX } else { 0 },
378 if self.arr[3] == rhs.arr[3] { u16::MAX } else { 0 },
379 if self.arr[4] == rhs.arr[4] { u16::MAX } else { 0 },
380 if self.arr[5] == rhs.arr[5] { u16::MAX } else { 0 },
381 if self.arr[6] == rhs.arr[6] { u16::MAX } else { 0 },
382 if self.arr[7] == rhs.arr[7] { u16::MAX } else { 0 },
383 ]}
384 }
385 }
386 }
387 #[inline]
388 #[must_use]
389 pub fn blend(self, t: Self, f: Self) -> Self {
390 pick! {
391 if #[cfg(target_feature="sse4.1")] {
392 Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
393 } else if #[cfg(target_feature="simd128")] {
394 Self { simd: v128_bitselect(t.simd, f.simd, self.simd) }
395 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
396 unsafe {Self { neon: vbslq_u16(self.neon, t.neon, f.neon) }}
397 } else {
398 generic_bit_blend(self, t, f)
399 }
400 }
401 }
402 #[inline]
403 #[must_use]
404 pub fn max(self, rhs: Self) -> Self {
405 pick! {
406 if #[cfg(target_feature="sse4.1")] {
407 Self { sse: max_u16_m128i(self.sse, rhs.sse) }
408 } else if #[cfg(target_feature="simd128")] {
409 Self { simd: u16x8_max(self.simd, rhs.simd) }
410 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
411 unsafe {Self { neon: vmaxq_u16(self.neon, rhs.neon) }}
412 } else {
413 let arr: [u16; 8] = cast(self);
414 let rhs: [u16; 8] = cast(rhs);
415 cast([
416 arr[0].max(rhs[0]),
417 arr[1].max(rhs[1]),
418 arr[2].max(rhs[2]),
419 arr[3].max(rhs[3]),
420 arr[4].max(rhs[4]),
421 arr[5].max(rhs[5]),
422 arr[6].max(rhs[6]),
423 arr[7].max(rhs[7]),
424 ])
425 }
426 }
427 }
428 #[inline]
429 #[must_use]
430 pub fn min(self, rhs: Self) -> Self {
431 pick! {
432 if #[cfg(target_feature="sse4.1")] {
433 Self { sse: min_u16_m128i(self.sse, rhs.sse) }
434 } else if #[cfg(target_feature="simd128")] {
435 Self { simd: u16x8_min(self.simd, rhs.simd) }
436 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
437 unsafe {Self { neon: vminq_u16(self.neon, rhs.neon) }}
438 } else {
439 let arr: [u16; 8] = cast(self);
440 let rhs: [u16; 8] = cast(rhs);
441 cast([
442 arr[0].min(rhs[0]),
443 arr[1].min(rhs[1]),
444 arr[2].min(rhs[2]),
445 arr[3].min(rhs[3]),
446 arr[4].min(rhs[4]),
447 arr[5].min(rhs[5]),
448 arr[6].min(rhs[6]),
449 arr[7].min(rhs[7]),
450 ])
451 }
452 }
453 }
454
455 #[inline]
456 #[must_use]
457 pub fn saturating_add(self, rhs: Self) -> Self {
458 pick! {
459 if #[cfg(target_feature="sse2")] {
460 Self { sse: add_saturating_u16_m128i(self.sse, rhs.sse) }
461 } else if #[cfg(target_feature="simd128")] {
462 Self { simd: u16x8_add_sat(self.simd, rhs.simd) }
463 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
464 unsafe {Self { neon: vqaddq_u16(self.neon, rhs.neon) }}
465 } else {
466 Self { arr: [
467 self.arr[0].saturating_add(rhs.arr[0]),
468 self.arr[1].saturating_add(rhs.arr[1]),
469 self.arr[2].saturating_add(rhs.arr[2]),
470 self.arr[3].saturating_add(rhs.arr[3]),
471 self.arr[4].saturating_add(rhs.arr[4]),
472 self.arr[5].saturating_add(rhs.arr[5]),
473 self.arr[6].saturating_add(rhs.arr[6]),
474 self.arr[7].saturating_add(rhs.arr[7]),
475 ]}
476 }
477 }
478 }
479 #[inline]
480 #[must_use]
481 pub fn saturating_sub(self, rhs: Self) -> Self {
482 pick! {
483 if #[cfg(target_feature="sse2")] {
484 Self { sse: sub_saturating_u16_m128i(self.sse, rhs.sse) }
485 } else if #[cfg(target_feature="simd128")] {
486 Self { simd: u16x8_sub_sat(self.simd, rhs.simd) }
487 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))]{
488 unsafe {Self { neon: vqsubq_u16(self.neon, rhs.neon) }}
489 } else {
490 Self { arr: [
491 self.arr[0].saturating_sub(rhs.arr[0]),
492 self.arr[1].saturating_sub(rhs.arr[1]),
493 self.arr[2].saturating_sub(rhs.arr[2]),
494 self.arr[3].saturating_sub(rhs.arr[3]),
495 self.arr[4].saturating_sub(rhs.arr[4]),
496 self.arr[5].saturating_sub(rhs.arr[5]),
497 self.arr[6].saturating_sub(rhs.arr[6]),
498 self.arr[7].saturating_sub(rhs.arr[7]),
499 ]}
500 }
501 }
502 }
503
504 #[inline]
506 #[must_use]
507 pub fn from_u8x16_low(u: u8x16) -> Self {
508 pick! {
509 if #[cfg(target_feature="sse2")] {
510 Self{ sse: unpack_low_i8_m128i(u.sse, m128i::zeroed()) }
511 } else {
512 let u_arr: [u8; 16] = cast(u);
513 cast([
514 u_arr[0] as u16,
515 u_arr[1] as u16,
516 u_arr[2] as u16,
517 u_arr[3] as u16,
518 u_arr[4] as u16,
519 u_arr[5] as u16,
520 u_arr[6] as u16,
521 u_arr[7] as u16,
522 ])
523 }
524 }
525 }
526
527 #[inline]
529 #[must_use]
530 pub fn from_u8x16_high(u: u8x16) -> Self {
531 pick! {
532 if #[cfg(target_feature="sse2")] {
533 Self{ sse: unpack_high_i8_m128i(u.sse, m128i::zeroed()) }
534 } else {
535 let u_arr: [u8; 16] = cast(u);
536 cast([
537 u_arr[8] as u16,
538 u_arr[9] as u16,
539 u_arr[10] as u16,
540 u_arr[11] as u16,
541 u_arr[12] as u16,
542 u_arr[13] as u16,
543 u_arr[14] as u16,
544 u_arr[15] as u16,
545 ])
546 }
547 }
548 }
549
550 #[inline]
552 #[must_use]
553 pub fn mul_widen(self, rhs: Self) -> u32x8 {
554 pick! {
555 if #[cfg(target_feature="avx2")] {
556 let a = convert_to_i32_m256i_from_u16_m128i(self.sse);
557 let b = convert_to_i32_m256i_from_u16_m128i(rhs.sse);
558 u32x8 { avx2: mul_i32_keep_low_m256i(a,b) }
559 } else if #[cfg(target_feature="sse2")] {
560 let low = mul_i16_keep_low_m128i(self.sse, rhs.sse);
561 let high = mul_u16_keep_high_m128i(self.sse, rhs.sse);
562 u32x8 {
563 a: u32x4 { sse:unpack_low_i16_m128i(low, high) },
564 b: u32x4 { sse:unpack_high_i16_m128i(low, high) }
565 }
566 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
567 let lhs_low = unsafe { vget_low_u16(self.neon) };
568 let rhs_low = unsafe { vget_low_u16(rhs.neon) };
569
570 let lhs_high = unsafe { vget_high_u16(self.neon) };
571 let rhs_high = unsafe { vget_high_u16(rhs.neon) };
572
573 let low = unsafe { vmull_u16(lhs_low, rhs_low) };
574 let high = unsafe { vmull_u16(lhs_high, rhs_high) };
575
576 u32x8 { a: u32x4 { neon: low }, b: u32x4 {neon: high } }
577 } else {
578 let a = self.as_array_ref();
579 let b = rhs.as_array_ref();
580 u32x8::new([
581 u32::from(a[0]) * u32::from(b[0]),
582 u32::from(a[1]) * u32::from(b[1]),
583 u32::from(a[2]) * u32::from(b[2]),
584 u32::from(a[3]) * u32::from(b[3]),
585 u32::from(a[4]) * u32::from(b[4]),
586 u32::from(a[5]) * u32::from(b[5]),
587 u32::from(a[6]) * u32::from(b[6]),
588 u32::from(a[7]) * u32::from(b[7]),
589 ])
590 }
591 }
592 }
593
594 #[inline]
596 #[must_use]
597 pub fn mul_keep_high(self, rhs: Self) -> Self {
598 pick! {
599 if #[cfg(target_feature="sse2")] {
600 Self { sse: mul_u16_keep_high_m128i(self.sse, rhs.sse) }
601 } else if #[cfg(all(target_feature="neon",target_arch="aarch64"))] {
602 let lhs_low = unsafe { vget_low_u16(self.neon) };
603 let rhs_low = unsafe { vget_low_u16(rhs.neon) };
604
605 let lhs_high = unsafe { vget_high_u16(self.neon) };
606 let rhs_high = unsafe { vget_high_u16(rhs.neon) };
607
608 let low = unsafe { vmull_u16(lhs_low, rhs_low) };
609 let high = unsafe { vmull_u16(lhs_high, rhs_high) };
610
611 u16x8 { neon: unsafe { vuzpq_u16(vreinterpretq_u16_u32(low), vreinterpretq_u16_u32(high)).1 } }
612 } else if #[cfg(target_feature="simd128")] {
613 let low = u32x4_extmul_low_u16x8(self.simd, rhs.simd);
614 let high = u32x4_extmul_high_u16x8(self.simd, rhs.simd);
615
616 Self { simd: u16x8_shuffle::<1, 3, 5, 7, 9, 11, 13, 15>(low, high) }
617 } else {
618 u16x8::new([
619 ((u32::from(rhs.as_array_ref()[0]) * u32::from(self.as_array_ref()[0])) >> 16) as u16,
620 ((u32::from(rhs.as_array_ref()[1]) * u32::from(self.as_array_ref()[1])) >> 16) as u16,
621 ((u32::from(rhs.as_array_ref()[2]) * u32::from(self.as_array_ref()[2])) >> 16) as u16,
622 ((u32::from(rhs.as_array_ref()[3]) * u32::from(self.as_array_ref()[3])) >> 16) as u16,
623 ((u32::from(rhs.as_array_ref()[4]) * u32::from(self.as_array_ref()[4])) >> 16) as u16,
624 ((u32::from(rhs.as_array_ref()[5]) * u32::from(self.as_array_ref()[5])) >> 16) as u16,
625 ((u32::from(rhs.as_array_ref()[6]) * u32::from(self.as_array_ref()[6])) >> 16) as u16,
626 ((u32::from(rhs.as_array_ref()[7]) * u32::from(self.as_array_ref()[7])) >> 16) as u16,
627 ])
628 }
629 }
630 }
631
632 #[inline]
633 pub fn to_array(self) -> [u16; 8] {
634 cast(self)
635 }
636
637 #[inline]
638 pub fn as_array_ref(&self) -> &[u16; 8] {
639 cast_ref(self)
640 }
641
642 #[inline]
643 pub fn as_array_mut(&mut self) -> &mut [u16; 8] {
644 cast_mut(self)
645 }
646}