1use super::U256;
15use crate::intrinsics::signed::*;
16
17impl_ops! {
18 for U256 | u128 {
19 add => uadd2, uadd3, uaddc;
20 mul => umul2, umul3, umulc;
21 sub => usub2, usub3, usubc;
22
23 div => udiv2, udiv3;
24 rem => urem2, urem3;
25
26 shl => ushl2, ushl3;
27 shr => ushr2, ushr3;
28 }
29}
30
31#[cfg(test)]
32mod tests {
33 use super::*;
34 use core::ops::*;
35
36 #[test]
37 fn trait_implementations() {
38 trait Implements {}
39 impl Implements for U256 {}
40 impl Implements for &'_ U256 {}
41
42 fn assert_ops<T>()
43 where
44 for<'a> T: Implements
45 + Add<&'a u128>
46 + Add<&'a U256>
47 + Add<u128>
48 + Add<U256>
49 + AddAssign<&'a u128>
50 + AddAssign<&'a U256>
51 + AddAssign<u128>
52 + AddAssign<U256>
53 + BitAnd<&'a u128>
54 + BitAnd<&'a U256>
55 + BitAnd<u128>
56 + BitAnd<U256>
57 + BitAndAssign<&'a u128>
58 + BitAndAssign<&'a U256>
59 + BitAndAssign<u128>
60 + BitAndAssign<U256>
61 + BitOr<&'a u128>
62 + BitOr<&'a U256>
63 + BitOr<u128>
64 + BitOr<U256>
65 + BitOrAssign<&'a u128>
66 + BitOrAssign<&'a U256>
67 + BitOrAssign<u128>
68 + BitOrAssign<U256>
69 + BitXor<&'a u128>
70 + BitXor<&'a U256>
71 + BitXor<u128>
72 + BitXor<U256>
73 + BitXorAssign<&'a u128>
74 + BitXorAssign<&'a U256>
75 + BitXorAssign<u128>
76 + BitXorAssign<U256>
77 + Div<&'a u128>
78 + Div<&'a U256>
79 + Div<u128>
80 + Div<U256>
81 + DivAssign<&'a u128>
82 + DivAssign<&'a U256>
83 + DivAssign<u128>
84 + DivAssign<U256>
85 + Mul<&'a u128>
86 + Mul<&'a U256>
87 + Mul<u128>
88 + Mul<U256>
89 + MulAssign<&'a u128>
90 + MulAssign<&'a U256>
91 + MulAssign<u128>
92 + MulAssign<U256>
93 + Not
94 + Rem<&'a u128>
95 + Rem<&'a U256>
96 + Rem<u128>
97 + Rem<U256>
98 + RemAssign<&'a u128>
99 + RemAssign<&'a U256>
100 + RemAssign<u128>
101 + RemAssign<U256>
102 + Shl<&'a i128>
103 + Shl<&'a i16>
104 + Shl<&'a i32>
105 + Shl<&'a i64>
106 + Shl<&'a i8>
107 + Shl<&'a isize>
108 + Shl<&'a u128>
109 + Shl<&'a u16>
110 + Shl<&'a U256>
111 + Shl<&'a u32>
112 + Shl<&'a u64>
113 + Shl<&'a u8>
114 + Shl<&'a usize>
115 + Shl<i128>
116 + Shl<i16>
117 + Shl<i32>
118 + Shl<i64>
119 + Shl<i8>
120 + Shl<isize>
121 + Shl<u128>
122 + Shl<u16>
123 + Shl<U256>
124 + Shl<u32>
125 + Shl<u64>
126 + Shl<u8>
127 + Shl<usize>
128 + ShlAssign<&'a i128>
129 + ShlAssign<&'a i16>
130 + ShlAssign<&'a i32>
131 + ShlAssign<&'a i64>
132 + ShlAssign<&'a i8>
133 + ShlAssign<&'a isize>
134 + ShlAssign<&'a u128>
135 + ShlAssign<&'a u16>
136 + ShlAssign<&'a U256>
137 + ShlAssign<&'a u32>
138 + ShlAssign<&'a u64>
139 + ShlAssign<&'a u8>
140 + ShlAssign<&'a usize>
141 + ShlAssign<i128>
142 + ShlAssign<i16>
143 + ShlAssign<i32>
144 + ShlAssign<i64>
145 + ShlAssign<i8>
146 + ShlAssign<isize>
147 + ShlAssign<u128>
148 + ShlAssign<u16>
149 + ShlAssign<U256>
150 + ShlAssign<u32>
151 + ShlAssign<u64>
152 + ShlAssign<u8>
153 + ShlAssign<usize>
154 + Shr<&'a i128>
155 + Shr<&'a i16>
156 + Shr<&'a i32>
157 + Shr<&'a i64>
158 + Shr<&'a i8>
159 + Shr<&'a isize>
160 + Shr<&'a u128>
161 + Shr<&'a u16>
162 + Shr<&'a U256>
163 + Shr<&'a u32>
164 + Shr<&'a u64>
165 + Shr<&'a u8>
166 + Shr<&'a usize>
167 + Shr<i128>
168 + Shr<i16>
169 + Shr<i32>
170 + Shr<i64>
171 + Shr<i8>
172 + Shr<isize>
173 + Shr<u128>
174 + Shr<u16>
175 + Shr<U256>
176 + Shr<u32>
177 + Shr<u64>
178 + Shr<u8>
179 + Shr<usize>
180 + ShrAssign<&'a i128>
181 + ShrAssign<&'a i16>
182 + ShrAssign<&'a i32>
183 + ShrAssign<&'a i64>
184 + ShrAssign<&'a i8>
185 + ShrAssign<&'a isize>
186 + ShrAssign<&'a u128>
187 + ShrAssign<&'a u16>
188 + ShrAssign<&'a U256>
189 + ShrAssign<&'a u32>
190 + ShrAssign<&'a u64>
191 + ShrAssign<&'a u8>
192 + ShrAssign<&'a usize>
193 + ShrAssign<i128>
194 + ShrAssign<i16>
195 + ShrAssign<i32>
196 + ShrAssign<i64>
197 + ShrAssign<i8>
198 + ShrAssign<isize>
199 + ShrAssign<u128>
200 + ShrAssign<u16>
201 + ShrAssign<U256>
202 + ShrAssign<u32>
203 + ShrAssign<u64>
204 + ShrAssign<u8>
205 + ShrAssign<usize>
206 + Sub<&'a u128>
207 + Sub<&'a U256>
208 + Sub<u128>
209 + Sub<U256>
210 + SubAssign<&'a u128>
211 + SubAssign<&'a U256>
212 + SubAssign<u128>
213 + SubAssign<U256>,
214 for<'a> &'a T: Implements
215 + Add<&'a u128>
216 + Add<&'a U256>
217 + Add<u128>
218 + Add<U256>
219 + BitAnd<&'a u128>
220 + BitAnd<&'a U256>
221 + BitAnd<u128>
222 + BitAnd<U256>
223 + BitOr<&'a u128>
224 + BitOr<&'a U256>
225 + BitOr<u128>
226 + BitOr<U256>
227 + BitXor<&'a u128>
228 + BitXor<&'a U256>
229 + BitXor<u128>
230 + BitXor<U256>
231 + Div<&'a u128>
232 + Div<&'a U256>
233 + Div<u128>
234 + Div<U256>
235 + Mul<&'a u128>
236 + Mul<&'a U256>
237 + Mul<u128>
238 + Mul<U256>
239 + Not
240 + Rem<&'a u128>
241 + Rem<&'a U256>
242 + Rem<u128>
243 + Rem<U256>
244 + Shl<&'a i128>
245 + Shl<&'a i16>
246 + Shl<&'a i32>
247 + Shl<&'a i64>
248 + Shl<&'a i8>
249 + Shl<&'a isize>
250 + Shl<&'a u128>
251 + Shl<&'a u16>
252 + Shl<&'a U256>
253 + Shl<&'a u32>
254 + Shl<&'a u64>
255 + Shl<&'a u8>
256 + Shl<&'a usize>
257 + Shl<i128>
258 + Shl<i16>
259 + Shl<i32>
260 + Shl<i64>
261 + Shl<i8>
262 + Shl<isize>
263 + Shl<u128>
264 + Shl<u16>
265 + Shl<U256>
266 + Shl<u32>
267 + Shl<u64>
268 + Shl<u8>
269 + Shl<usize>
270 + Shr<&'a i128>
271 + Shr<&'a i16>
272 + Shr<&'a i32>
273 + Shr<&'a i64>
274 + Shr<&'a i8>
275 + Shr<&'a isize>
276 + Shr<&'a u128>
277 + Shr<&'a u16>
278 + Shr<&'a U256>
279 + Shr<&'a u32>
280 + Shr<&'a u64>
281 + Shr<&'a u8>
282 + Shr<&'a usize>
283 + Shr<i128>
284 + Shr<i16>
285 + Shr<i32>
286 + Shr<i64>
287 + Shr<i8>
288 + Shr<isize>
289 + Shr<u128>
290 + Shr<u16>
291 + Shr<U256>
292 + Shr<u32>
293 + Shr<u64>
294 + Shr<u8>
295 + Shr<usize>
296 + Sub<&'a u128>
297 + Sub<&'a U256>
298 + Sub<u128>
299 + Sub<U256>,
300 {
301 }
302
303 assert_ops::<U256>();
304 }
305}