ethnum/int/
ops.rs

1//! Module `core::ops` trait implementations.
2//!
3//! Trait implementations for `i128` are also provided to allow notation such
4//! as:
5//!
6//! ```
7//! # use ethnum::I256;
8//!
9//! let a = 1 + I256::ONE;
10//! let b = I256::ONE + 1;
11//! dbg!(a, b);
12//! ```
13
14use super::I256;
15use crate::intrinsics::signed::*;
16
17impl_ops! {
18    for I256 | i128 {
19        add => iadd2, iadd3, iaddc;
20        mul => imul2, imul3, imulc;
21        sub => isub2, isub3, isubc;
22
23        div => idiv2, idiv3;
24        rem => irem2, irem3;
25
26        shl => ishl2, ishl3;
27        shr => isar2, isar3;
28    }
29}
30
31impl_ops_neg! {
32    for I256 {
33        add => iadd2;
34    }
35}
36
37#[cfg(test)]
38mod tests {
39    use super::*;
40    use crate::uint::U256;
41    use core::ops::*;
42
43    #[test]
44    fn trait_implementations() {
45        trait Implements {}
46        impl Implements for I256 {}
47        impl Implements for &'_ I256 {}
48
49        fn assert_ops<T>()
50        where
51            for<'a> T: Implements
52                + Add<&'a i128>
53                + Add<&'a I256>
54                + Add<i128>
55                + Add<I256>
56                + AddAssign<&'a i128>
57                + AddAssign<&'a I256>
58                + AddAssign<i128>
59                + AddAssign<I256>
60                + BitAnd<&'a i128>
61                + BitAnd<&'a I256>
62                + BitAnd<i128>
63                + BitAnd<I256>
64                + BitAndAssign<&'a i128>
65                + BitAndAssign<&'a I256>
66                + BitAndAssign<i128>
67                + BitAndAssign<I256>
68                + BitOr<&'a i128>
69                + BitOr<&'a I256>
70                + BitOr<i128>
71                + BitOr<I256>
72                + BitOrAssign<&'a i128>
73                + BitOrAssign<&'a I256>
74                + BitOrAssign<i128>
75                + BitOrAssign<I256>
76                + BitXor<&'a i128>
77                + BitXor<&'a I256>
78                + BitXor<i128>
79                + BitXor<I256>
80                + BitXorAssign<&'a i128>
81                + BitXorAssign<&'a I256>
82                + BitXorAssign<i128>
83                + BitXorAssign<I256>
84                + Div<&'a i128>
85                + Div<&'a I256>
86                + Div<i128>
87                + Div<I256>
88                + DivAssign<&'a i128>
89                + DivAssign<&'a I256>
90                + DivAssign<i128>
91                + DivAssign<I256>
92                + Mul<&'a i128>
93                + Mul<&'a I256>
94                + Mul<i128>
95                + Mul<I256>
96                + MulAssign<&'a i128>
97                + MulAssign<&'a I256>
98                + MulAssign<i128>
99                + MulAssign<I256>
100                + Neg
101                + Not
102                + Rem<&'a i128>
103                + Rem<&'a I256>
104                + Rem<i128>
105                + Rem<I256>
106                + RemAssign<&'a i128>
107                + RemAssign<&'a I256>
108                + RemAssign<i128>
109                + RemAssign<I256>
110                + Shl<&'a i128>
111                + Shl<&'a i16>
112                + Shl<&'a I256>
113                + Shl<&'a i32>
114                + Shl<&'a i64>
115                + Shl<&'a i8>
116                + Shl<&'a isize>
117                + Shl<&'a u128>
118                + Shl<&'a u16>
119                + Shl<&'a U256>
120                + Shl<&'a u32>
121                + Shl<&'a u64>
122                + Shl<&'a u8>
123                + Shl<&'a usize>
124                + Shl<i128>
125                + Shl<i16>
126                + Shl<I256>
127                + Shl<i32>
128                + Shl<i64>
129                + Shl<i8>
130                + Shl<isize>
131                + Shl<u128>
132                + Shl<u16>
133                + Shl<U256>
134                + Shl<u32>
135                + Shl<u64>
136                + Shl<u8>
137                + Shl<usize>
138                + ShlAssign<&'a i128>
139                + ShlAssign<&'a i16>
140                + ShlAssign<&'a I256>
141                + ShlAssign<&'a i32>
142                + ShlAssign<&'a i64>
143                + ShlAssign<&'a i8>
144                + ShlAssign<&'a isize>
145                + ShlAssign<&'a u128>
146                + ShlAssign<&'a u16>
147                + ShlAssign<&'a U256>
148                + ShlAssign<&'a u32>
149                + ShlAssign<&'a u64>
150                + ShlAssign<&'a u8>
151                + ShlAssign<&'a usize>
152                + ShlAssign<i128>
153                + ShlAssign<i16>
154                + ShlAssign<I256>
155                + ShlAssign<i32>
156                + ShlAssign<i64>
157                + ShlAssign<i8>
158                + ShlAssign<isize>
159                + ShlAssign<u128>
160                + ShlAssign<u16>
161                + ShlAssign<U256>
162                + ShlAssign<u32>
163                + ShlAssign<u64>
164                + ShlAssign<u8>
165                + ShlAssign<usize>
166                + Shr<&'a i128>
167                + Shr<&'a i16>
168                + Shr<&'a I256>
169                + Shr<&'a i32>
170                + Shr<&'a i64>
171                + Shr<&'a i8>
172                + Shr<&'a isize>
173                + Shr<&'a u128>
174                + Shr<&'a u16>
175                + Shr<&'a U256>
176                + Shr<&'a u32>
177                + Shr<&'a u64>
178                + Shr<&'a u8>
179                + Shr<&'a usize>
180                + Shr<i128>
181                + Shr<i16>
182                + Shr<I256>
183                + Shr<i32>
184                + Shr<i64>
185                + Shr<i8>
186                + Shr<isize>
187                + Shr<u128>
188                + Shr<u16>
189                + Shr<U256>
190                + Shr<u32>
191                + Shr<u64>
192                + Shr<u8>
193                + Shr<usize>
194                + ShrAssign<&'a i128>
195                + ShrAssign<&'a i16>
196                + ShrAssign<&'a I256>
197                + ShrAssign<&'a i32>
198                + ShrAssign<&'a i64>
199                + ShrAssign<&'a i8>
200                + ShrAssign<&'a isize>
201                + ShrAssign<&'a u128>
202                + ShrAssign<&'a u16>
203                + ShrAssign<&'a U256>
204                + ShrAssign<&'a u32>
205                + ShrAssign<&'a u64>
206                + ShrAssign<&'a u8>
207                + ShrAssign<&'a usize>
208                + ShrAssign<i128>
209                + ShrAssign<i16>
210                + ShrAssign<I256>
211                + ShrAssign<i32>
212                + ShrAssign<i64>
213                + ShrAssign<i8>
214                + ShrAssign<isize>
215                + ShrAssign<u128>
216                + ShrAssign<u16>
217                + ShrAssign<U256>
218                + ShrAssign<u32>
219                + ShrAssign<u64>
220                + ShrAssign<u8>
221                + ShrAssign<usize>
222                + Sub<&'a i128>
223                + Sub<&'a I256>
224                + Sub<i128>
225                + Sub<I256>
226                + SubAssign<&'a i128>
227                + SubAssign<&'a I256>
228                + SubAssign<i128>
229                + SubAssign<I256>,
230            for<'a> &'a T: Implements
231                + Add<&'a i128>
232                + Add<&'a I256>
233                + Add<i128>
234                + Add<I256>
235                + BitAnd<&'a i128>
236                + BitAnd<&'a I256>
237                + BitAnd<i128>
238                + BitAnd<I256>
239                + BitOr<&'a i128>
240                + BitOr<&'a I256>
241                + BitOr<i128>
242                + BitOr<I256>
243                + BitXor<&'a i128>
244                + BitXor<&'a I256>
245                + BitXor<i128>
246                + BitXor<I256>
247                + Div<&'a i128>
248                + Div<&'a I256>
249                + Div<i128>
250                + Div<I256>
251                + Mul<&'a i128>
252                + Mul<&'a I256>
253                + Mul<i128>
254                + Mul<I256>
255                + Neg
256                + Not
257                + Rem<&'a i128>
258                + Rem<&'a I256>
259                + Rem<i128>
260                + Rem<I256>
261                + Shl<&'a i128>
262                + Shl<&'a i16>
263                + Shl<&'a I256>
264                + Shl<&'a i32>
265                + Shl<&'a i64>
266                + Shl<&'a i8>
267                + Shl<&'a isize>
268                + Shl<&'a u128>
269                + Shl<&'a u16>
270                + Shl<&'a U256>
271                + Shl<&'a u32>
272                + Shl<&'a u64>
273                + Shl<&'a u8>
274                + Shl<&'a usize>
275                + Shl<i128>
276                + Shl<i16>
277                + Shl<I256>
278                + Shl<i32>
279                + Shl<i64>
280                + Shl<i8>
281                + Shl<isize>
282                + Shl<u128>
283                + Shl<u16>
284                + Shl<U256>
285                + Shl<u32>
286                + Shl<u64>
287                + Shl<u8>
288                + Shl<usize>
289                + Shr<&'a i128>
290                + Shr<&'a i16>
291                + Shr<&'a I256>
292                + Shr<&'a i32>
293                + Shr<&'a i64>
294                + Shr<&'a i8>
295                + Shr<&'a isize>
296                + Shr<&'a u128>
297                + Shr<&'a u16>
298                + Shr<&'a U256>
299                + Shr<&'a u32>
300                + Shr<&'a u64>
301                + Shr<&'a u8>
302                + Shr<&'a usize>
303                + Shr<i128>
304                + Shr<i16>
305                + Shr<I256>
306                + Shr<i32>
307                + Shr<i64>
308                + Shr<i8>
309                + Shr<isize>
310                + Shr<u128>
311                + Shr<u16>
312                + Shr<U256>
313                + Shr<u32>
314                + Shr<u64>
315                + Shr<u8>
316                + Shr<usize>
317                + Sub<&'a i128>
318                + Sub<&'a I256>
319                + Sub<i128>
320                + Sub<I256>,
321        {
322        }
323
324        assert_ops::<I256>();
325    }
326}