1use super::*;
2
3pick! {
4 if #[cfg(target_feature="avx2")] {
5 #[derive(Default, Clone, Copy, PartialEq, Eq)]
6 #[repr(C, align(32))]
7 pub struct u64x4 { pub(crate) avx2: m256i }
8 } else {
9 #[derive(Default, Clone, Copy, PartialEq, Eq)]
10 #[repr(C, align(32))]
11 pub struct u64x4 { pub(crate) a : u64x2, pub(crate) b : u64x2 }
12 }
13}
14
15int_uint_consts!(u64, 4, u64x4, 256);
16
17unsafe impl Zeroable for u64x4 {}
18unsafe impl Pod for u64x4 {}
19
20impl Add for u64x4 {
21 type Output = Self;
22 #[inline]
23 #[must_use]
24 fn add(self, rhs: Self) -> Self::Output {
25 pick! {
26 if #[cfg(target_feature="avx2")] {
27 Self { avx2: add_i64_m256i(self.avx2, rhs.avx2) }
28 } else {
29 Self {
30 a : self.a.add(rhs.a),
31 b : self.b.add(rhs.b),
32 }
33 }
34 }
35 }
36}
37
38impl Sub for u64x4 {
39 type Output = Self;
40 #[inline]
41 #[must_use]
42 fn sub(self, rhs: Self) -> Self::Output {
43 pick! {
44 if #[cfg(target_feature="avx2")] {
45 Self { avx2: sub_i64_m256i(self.avx2, rhs.avx2) }
46 } else {
47 Self {
48 a : self.a.sub(rhs.a),
49 b : self.b.sub(rhs.b),
50 }
51 }
52 }
53 }
54}
55
56impl Mul for u64x4 {
57 type Output = Self;
58 #[inline]
59 #[must_use]
60 fn mul(self, rhs: Self) -> Self::Output {
61 pick! {
62 if #[cfg(target_feature="avx2")] {
63 let arr1: [i64; 4] = cast(self);
64 let arr2: [i64; 4] = cast(rhs);
65 cast([
66 arr1[0].wrapping_mul(arr2[0]),
67 arr1[1].wrapping_mul(arr2[1]),
68 arr1[2].wrapping_mul(arr2[2]),
69 arr1[3].wrapping_mul(arr2[3]),
70 ])
71 } else {
72 Self { a: self.a.mul(rhs.a), b: self.b.mul(rhs.b) }
73 }
74 }
75 }
76}
77
78impl Add<u64> for u64x4 {
79 type Output = Self;
80 #[inline]
81 #[must_use]
82 fn add(self, rhs: u64) -> Self::Output {
83 self.add(Self::splat(rhs))
84 }
85}
86
87impl Sub<u64> for u64x4 {
88 type Output = Self;
89 #[inline]
90 #[must_use]
91 fn sub(self, rhs: u64) -> Self::Output {
92 self.sub(Self::splat(rhs))
93 }
94}
95
96impl Mul<u64> for u64x4 {
97 type Output = Self;
98 #[inline]
99 #[must_use]
100 fn mul(self, rhs: u64) -> Self::Output {
101 self.mul(Self::splat(rhs))
102 }
103}
104
105impl Add<u64x4> for u64 {
106 type Output = u64x4;
107 #[inline]
108 #[must_use]
109 fn add(self, rhs: u64x4) -> Self::Output {
110 u64x4::splat(self).add(rhs)
111 }
112}
113
114impl Sub<u64x4> for u64 {
115 type Output = u64x4;
116 #[inline]
117 #[must_use]
118 fn sub(self, rhs: u64x4) -> Self::Output {
119 u64x4::splat(self).sub(rhs)
120 }
121}
122
123impl Mul<u64x4> for u64 {
124 type Output = u64x4;
125 #[inline]
126 #[must_use]
127 fn mul(self, rhs: u64x4) -> Self::Output {
128 u64x4::splat(self).mul(rhs)
129 }
130}
131
132impl BitAnd for u64x4 {
133 type Output = Self;
134 #[inline]
135 #[must_use]
136 fn bitand(self, rhs: Self) -> Self::Output {
137 pick! {
138 if #[cfg(target_feature="avx2")] {
139 Self { avx2: bitand_m256i(self.avx2, rhs.avx2) }
140 } else {
141 Self {
142 a : self.a.bitand(rhs.a),
143 b : self.b.bitand(rhs.b),
144 }
145 }
146 }
147 }
148}
149
150impl BitOr for u64x4 {
151 type Output = Self;
152 #[inline]
153 #[must_use]
154 fn bitor(self, rhs: Self) -> Self::Output {
155 pick! {
156 if #[cfg(target_feature="avx2")] {
157 Self { avx2: bitor_m256i(self.avx2, rhs.avx2) }
158 } else {
159 Self {
160 a : self.a.bitor(rhs.a),
161 b : self.b.bitor(rhs.b),
162 }
163 }
164 }
165 }
166}
167
168impl BitXor for u64x4 {
169 type Output = Self;
170 #[inline]
171 #[must_use]
172 fn bitxor(self, rhs: Self) -> Self::Output {
173 pick! {
174 if #[cfg(target_feature="avx2")] {
175 Self { avx2: bitxor_m256i(self.avx2, rhs.avx2) }
176 } else {
177 Self {
178 a : self.a.bitxor(rhs.a),
179 b : self.b.bitxor(rhs.b),
180 }
181 }
182 }
183 }
184}
185
186macro_rules! impl_shl_t_for_u64x4 {
187 ($($shift_type:ty),+ $(,)?) => {
188 $(impl Shl<$shift_type> for u64x4 {
189 type Output = Self;
190 #[inline]
192 #[must_use]
193 fn shl(self, rhs: $shift_type) -> Self::Output {
194 pick! {
195 if #[cfg(target_feature="avx2")] {
196 let shift = cast([rhs as u64, 0]);
197 Self { avx2: shl_all_u64_m256i(self.avx2, shift) }
198 } else {
199 Self {
200 a : self.a.shl(rhs),
201 b : self.b.shl(rhs),
202 }
203 }
204 }
205 }
206 })+
207 };
208}
209impl_shl_t_for_u64x4!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
210
211macro_rules! impl_shr_t_for_u64x4 {
212 ($($shift_type:ty),+ $(,)?) => {
213 $(impl Shr<$shift_type> for u64x4 {
214 type Output = Self;
215 #[inline]
217 #[must_use]
218 fn shr(self, rhs: $shift_type) -> Self::Output {
219 pick! {
220 if #[cfg(target_feature="avx2")] {
221 let shift = cast([rhs as u64, 0]);
222 Self { avx2: shr_all_u64_m256i(self.avx2, shift) }
223 } else {
224 Self {
225 a : self.a.shr(rhs),
226 b : self.b.shr(rhs),
227 }
228 }
229 }
230 }
231 })+
232 };
233}
234impl_shr_t_for_u64x4!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
235
236impl u64x4 {
237 #[inline]
238 #[must_use]
239 pub const fn new(array: [u64; 4]) -> Self {
240 unsafe { core::intrinsics::transmute(array) }
241 }
242 #[inline]
243 #[must_use]
244 pub fn cmp_eq(self, rhs: Self) -> Self {
245 pick! {
246 if #[cfg(target_feature="avx2")] {
247 Self { avx2: cmp_eq_mask_i64_m256i(self.avx2, rhs.avx2) }
248 } else {
249 Self {
250 a : self.a.cmp_eq(rhs.a),
251 b : self.b.cmp_eq(rhs.b),
252 }
253 }
254 }
255 }
256 #[inline]
257 #[must_use]
258 pub fn cmp_gt(self, rhs: Self) -> Self {
259 pick! {
260 if #[cfg(target_feature="avx2")] {
261 let highbit = u64x4::splat(1 << 63);
263 Self { avx2: cmp_gt_mask_i64_m256i((self ^ highbit).avx2, (rhs ^ highbit).avx2) }
264 } else {
265 Self {
266 a : self.a.cmp_gt(rhs.a),
267 b : self.b.cmp_gt(rhs.b),
268 }
269 }
270 }
271 }
272
273 #[inline]
274 #[must_use]
275 pub fn cmp_lt(self, rhs: Self) -> Self {
276 rhs.cmp_gt(self)
278 }
279
280 #[inline]
281 #[must_use]
282 pub fn blend(self, t: Self, f: Self) -> Self {
283 pick! {
284 if #[cfg(target_feature="avx2")] {
285 Self { avx2: blend_varying_i8_m256i(f.avx2,t.avx2,self.avx2) }
286 } else {
287 Self {
288 a : self.a.blend(t.a, f.a),
289 b : self.b.blend(t.b, f.b),
290 }
291 }
292 }
293 }
294
295 #[inline]
296 pub fn to_array(self) -> [u64; 4] {
297 cast(self)
298 }
299
300 #[inline]
301 pub fn as_array_ref(&self) -> &[u64; 4] {
302 cast_ref(self)
303 }
304
305 #[inline]
306 pub fn as_array_mut(&mut self) -> &mut [u64; 4] {
307 cast_mut(self)
308 }
309}
310
311impl Not for u64x4 {
312 type Output = Self;
313 #[inline]
314 fn not(self) -> Self {
315 pick! {
316 if #[cfg(target_feature="avx2")] {
317 Self { avx2: self.avx2.not() }
318 } else {
319 Self {
320 a : self.a.not(),
321 b : self.b.not(),
322 }
323 }
324 }
325 }
326}