1use super::*;
2
3pick! {
4 if #[cfg(target_feature="avx2")] {
5 #[derive(Default, Clone, Copy, PartialEq, Eq)]
6 #[repr(C, align(32))]
7 pub struct i64x4 { pub(crate) avx2: m256i }
8 } else {
9 #[derive(Default, Clone, Copy, PartialEq, Eq)]
10 #[repr(C, align(32))]
11 pub struct i64x4 { pub(crate) a : i64x2, pub(crate) b : i64x2 }
12 }
13}
14
15int_uint_consts!(i64, 4, i64x4, 256);
16
17unsafe impl Zeroable for i64x4 {}
18unsafe impl Pod for i64x4 {}
19
20impl Add for i64x4 {
21 type Output = Self;
22 #[inline]
23 #[must_use]
24 fn add(self, rhs: Self) -> Self::Output {
25 pick! {
26 if #[cfg(target_feature="avx2")] {
27 Self { avx2: add_i64_m256i(self.avx2, rhs.avx2) }
28 } else {
29 Self {
30 a : self.a.add(rhs.a),
31 b : self.b.add(rhs.b),
32 }
33 }
34 }
35 }
36}
37
38impl Sub for i64x4 {
39 type Output = Self;
40 #[inline]
41 #[must_use]
42 fn sub(self, rhs: Self) -> Self::Output {
43 pick! {
44 if #[cfg(target_feature="avx2")] {
45 Self { avx2: sub_i64_m256i(self.avx2, rhs.avx2) }
46 } else {
47 Self {
48 a : self.a.sub(rhs.a),
49 b : self.b.sub(rhs.b),
50 }
51 }
52 }
53 }
54}
55
56impl Mul for i64x4 {
57 type Output = Self;
58 #[inline]
59 #[must_use]
60 fn mul(self, rhs: Self) -> Self::Output {
61 pick! {
62 if #[cfg(target_feature="avx2")] {
63 let arr1: [i64; 4] = cast(self);
64 let arr2: [i64; 4] = cast(rhs);
65 cast([
66 arr1[0].wrapping_mul(arr2[0]),
67 arr1[1].wrapping_mul(arr2[1]),
68 arr1[2].wrapping_mul(arr2[2]),
69 arr1[3].wrapping_mul(arr2[3]),
70 ])
71 } else {
72 Self { a: self.a.mul(rhs.a), b: self.b.mul(rhs.b) }
73 }
74 }
75 }
76}
77
78impl Add<i64> for i64x4 {
79 type Output = Self;
80 #[inline]
81 #[must_use]
82 fn add(self, rhs: i64) -> Self::Output {
83 self.add(Self::splat(rhs))
84 }
85}
86
87impl Sub<i64> for i64x4 {
88 type Output = Self;
89 #[inline]
90 #[must_use]
91 fn sub(self, rhs: i64) -> Self::Output {
92 self.sub(Self::splat(rhs))
93 }
94}
95
96impl Mul<i64> for i64x4 {
97 type Output = Self;
98 #[inline]
99 #[must_use]
100 fn mul(self, rhs: i64) -> Self::Output {
101 self.mul(Self::splat(rhs))
102 }
103}
104
105impl Add<i64x4> for i64 {
106 type Output = i64x4;
107 #[inline]
108 #[must_use]
109 fn add(self, rhs: i64x4) -> Self::Output {
110 i64x4::splat(self).add(rhs)
111 }
112}
113
114impl Sub<i64x4> for i64 {
115 type Output = i64x4;
116 #[inline]
117 #[must_use]
118 fn sub(self, rhs: i64x4) -> Self::Output {
119 i64x4::splat(self).sub(rhs)
120 }
121}
122
123impl Mul<i64x4> for i64 {
124 type Output = i64x4;
125 #[inline]
126 #[must_use]
127 fn mul(self, rhs: i64x4) -> Self::Output {
128 i64x4::splat(self).mul(rhs)
129 }
130}
131
132impl BitAnd for i64x4 {
133 type Output = Self;
134 #[inline]
135 #[must_use]
136 fn bitand(self, rhs: Self) -> Self::Output {
137 pick! {
138 if #[cfg(target_feature="avx2")] {
139 Self { avx2: bitand_m256i(self.avx2, rhs.avx2) }
140 } else {
141 Self {
142 a : self.a.bitand(rhs.a),
143 b : self.b.bitand(rhs.b),
144 }
145 }
146 }
147 }
148}
149
150impl BitOr for i64x4 {
151 type Output = Self;
152 #[inline]
153 #[must_use]
154 fn bitor(self, rhs: Self) -> Self::Output {
155 pick! {
156 if #[cfg(target_feature="avx2")] {
157 Self { avx2: bitor_m256i(self.avx2, rhs.avx2) }
158 } else {
159 Self {
160 a : self.a.bitor(rhs.a),
161 b : self.b.bitor(rhs.b),
162 }
163 }
164 }
165 }
166}
167
168impl BitXor for i64x4 {
169 type Output = Self;
170 #[inline]
171 #[must_use]
172 fn bitxor(self, rhs: Self) -> Self::Output {
173 pick! {
174 if #[cfg(target_feature="avx2")] {
175 Self { avx2: bitxor_m256i(self.avx2, rhs.avx2) }
176 } else {
177 Self {
178 a : self.a.bitxor(rhs.a),
179 b : self.b.bitxor(rhs.b),
180 }
181 }
182 }
183 }
184}
185
186macro_rules! impl_shl_t_for_i64x4 {
187 ($($shift_type:ty),+ $(,)?) => {
188 $(impl Shl<$shift_type> for i64x4 {
189 type Output = Self;
190 #[inline]
192 #[must_use]
193 fn shl(self, rhs: $shift_type) -> Self::Output {
194 pick! {
195 if #[cfg(target_feature="avx2")] {
196 let shift = cast([rhs as u64, 0]);
197 Self { avx2: shl_all_u64_m256i(self.avx2, shift) }
198 } else {
199 Self {
200 a : self.a.shl(rhs),
201 b : self.b.shl(rhs),
202 }
203 }
204 }
205 }
206 })+
207 };
208}
209impl_shl_t_for_i64x4!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
210
211macro_rules! impl_shr_t_for_i64x4 {
212 ($($shift_type:ty),+ $(,)?) => {
213 $(impl Shr<$shift_type> for i64x4 {
214 type Output = Self;
215 #[inline]
217 #[must_use]
218 fn shr(self, rhs: $shift_type) -> Self::Output {
219 pick! {
220 if #[cfg(target_feature="avx2")] {
221 let shift = cast([rhs as u64, 0]);
222 Self { avx2: shr_all_u64_m256i(self.avx2, shift) }
223 } else {
224 Self {
225 a : self.a.shr(rhs),
226 b : self.b.shr(rhs),
227 }
228 }
229 }
230 }
231 })+
232 };
233}
234impl_shr_t_for_i64x4!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
235
236impl CmpEq for i64x4 {
237 type Output = Self;
238 #[inline]
239 #[must_use]
240 fn cmp_eq(self, rhs: Self) -> Self::Output {
241 pick! {
242 if #[cfg(target_feature="avx2")] {
243 Self { avx2: cmp_eq_mask_i64_m256i(self.avx2, rhs.avx2) }
244 } else {
245 Self {
246 a : self.a.cmp_eq(rhs.a),
247 b : self.b.cmp_eq(rhs.b),
248 }
249 }
250 }
251 }
252}
253
254impl CmpGt for i64x4 {
255 type Output = Self;
256 #[inline]
257 #[must_use]
258 fn cmp_gt(self, rhs: Self) -> Self::Output {
259 pick! {
260 if #[cfg(target_feature="avx2")] {
261 Self { avx2: cmp_gt_mask_i64_m256i(self.avx2, rhs.avx2) }
262 } else {
263 Self {
264 a : self.a.cmp_gt(rhs.a),
265 b : self.b.cmp_gt(rhs.b),
266 }
267 }
268 }
269 }
270}
271
272impl CmpLt for i64x4 {
273 type Output = Self;
274 #[inline]
275 #[must_use]
276 fn cmp_lt(self, rhs: Self) -> Self::Output {
277 pick! {
278 if #[cfg(target_feature="avx2")] {
279 Self { avx2: !(cmp_gt_mask_i64_m256i(self.avx2, rhs.avx2) ^ cmp_eq_mask_i64_m256i(self.avx2, rhs.avx2)) }
280 } else {
281 Self {
282 a : self.a.cmp_lt(rhs.a),
283 b : self.b.cmp_lt(rhs.b),
284 }
285 }
286 }
287 }
288}
289
290impl i64x4 {
291 #[inline]
292 #[must_use]
293 pub const fn new(array: [i64; 4]) -> Self {
294 unsafe { core::intrinsics::transmute(array) }
295 }
296 #[inline]
297 #[must_use]
298 pub fn blend(self, t: Self, f: Self) -> Self {
299 pick! {
300 if #[cfg(target_feature="avx2")] {
301 Self { avx2: blend_varying_i8_m256i(f.avx2,t.avx2,self.avx2) }
302 } else {
303 Self {
304 a : self.a.blend(t.a, f.a),
305 b : self.b.blend(t.b, f.b),
306 }
307 }
308 }
309 }
310
311 #[inline]
312 #[must_use]
313 pub fn abs(self) -> Self {
314 pick! {
315 if #[cfg(target_feature="avx2")] {
316 let arr: [i64; 4] = cast(self);
318 cast(
319 [
320 arr[0].wrapping_abs(),
321 arr[1].wrapping_abs(),
322 arr[2].wrapping_abs(),
323 arr[3].wrapping_abs(),
324 ])
325 } else {
326 Self {
327 a : self.a.abs(),
328 b : self.b.abs(),
329 }
330 }
331 }
332 }
333
334 #[inline]
335 #[must_use]
336 pub fn unsigned_abs(self) -> u64x4 {
337 pick! {
338 if #[cfg(target_feature="avx2")] {
339 let arr: [i64; 4] = cast(self);
341 cast(
342 [
343 arr[0].unsigned_abs(),
344 arr[1].unsigned_abs(),
345 arr[2].unsigned_abs(),
346 arr[3].unsigned_abs(),
347 ])
348 } else {
349 u64x4 {
350 a : self.a.unsigned_abs(),
351 b : self.b.unsigned_abs(),
352 }
353 }
354 }
355 }
356
357 #[inline]
358 #[must_use]
359 pub fn round_float(self) -> f64x4 {
360 let arr: [i64; 4] = cast(self);
361 cast([arr[0] as f64, arr[1] as f64, arr[2] as f64, arr[3] as f64])
362 }
363
364 #[inline]
367 #[must_use]
368 pub fn move_mask(self) -> i32 {
369 pick! {
370 if #[cfg(target_feature="avx2")] {
371 move_mask_m256d(cast(self.avx2))
373 } else {
374 self.a.move_mask() | (self.b.move_mask() << 2)
375 }
376 }
377 }
378
379 #[inline]
381 #[must_use]
382 pub fn any(self) -> bool {
383 pick! {
384 if #[cfg(target_feature="avx2")] {
385 move_mask_m256d(cast(self.avx2)) != 0
386 } else {
387 (self.a | self.b).any()
388 }
389 }
390 }
391
392 #[inline]
394 #[must_use]
395 pub fn all(self) -> bool {
396 pick! {
397 if #[cfg(target_feature="avx2")] {
398 move_mask_m256d(cast(self.avx2)) == 0b1111
399 } else {
400 (self.a & self.b).all()
401 }
402 }
403 }
404
405 #[inline]
407 #[must_use]
408 pub fn none(self) -> bool {
409 !self.any()
410 }
411
412 #[inline]
413 pub fn to_array(self) -> [i64; 4] {
414 cast(self)
415 }
416
417 #[inline]
418 pub fn as_array_ref(&self) -> &[i64; 4] {
419 cast_ref(self)
420 }
421
422 #[inline]
423 pub fn as_array_mut(&mut self) -> &mut [i64; 4] {
424 cast_mut(self)
425 }
426}
427
428impl Not for i64x4 {
429 type Output = Self;
430 #[inline]
431 fn not(self) -> Self {
432 pick! {
433 if #[cfg(target_feature="avx2")] {
434 Self { avx2: self.avx2.not() }
435 } else {
436 Self {
437 a : self.a.not(),
438 b : self.b.not(),
439 }
440 }
441 }
442 }
443}