20 #ifndef _COBALT_UAPI_ASM_GENERIC_ARITH_H
21 #define _COBALT_UAPI_ASM_GENERIC_ARITH_H
23 #ifndef xnarch_u64tou32
24 #define xnarch_u64tou32(ull, h, l) ({ \
26 unsigned long long _ull; \
27 struct endianstruct _s; \
35 #ifndef xnarch_u64fromu32
36 #define xnarch_u64fromu32(h, l) ({ \
38 unsigned long long _ull; \
39 struct endianstruct _s; \
49 xnarch_generic_ullmul(
const unsigned m0,
const unsigned m1)
51 return (
unsigned long long) m0 * m1;
53 #define xnarch_ullmul(m0,m1) xnarch_generic_ullmul((m0),(m1))
57 static inline unsigned long long xnarch_generic_ulldiv (
unsigned long long ull,
59 unsigned long *
const rp)
61 const unsigned r = do_div(ull, uld);
68 #define xnarch_ulldiv(ull,uld,rp) xnarch_generic_ulldiv((ull),(uld),(rp))
71 #ifndef xnarch_uldivrem
72 #define xnarch_uldivrem(ull,ul,rp) ((unsigned) xnarch_ulldiv((ull),(ul),(rp)))
75 #ifndef xnarch_divmod64
76 static inline unsigned long long
77 xnarch_generic_divmod64(
unsigned long long a,
79 unsigned long long *rem)
82 #if defined(__KERNEL__) && BITS_PER_LONG < 64
86 unsigned long long *rem);
87 if (b <= 0xffffffffULL) {
89 q = xnarch_ulldiv(a, b, &r);
108 #define xnarch_divmod64(a,b,rp) xnarch_generic_divmod64((a),(b),(rp))
111 #ifndef xnarch_imuldiv
112 static inline __attribute__((__const__))
int xnarch_generic_imuldiv(
int i,
117 const unsigned long long ull = xnarch_ullmul(i, mult);
118 return xnarch_uldivrem(ull, div, NULL);
120 #define xnarch_imuldiv(i,m,d) xnarch_generic_imuldiv((i),(m),(d))
123 #ifndef xnarch_imuldiv_ceil
124 static inline __attribute__((__const__))
int xnarch_generic_imuldiv_ceil(
int i,
129 const unsigned long long ull = xnarch_ullmul(i, mult);
130 return xnarch_uldivrem(ull + (
unsigned)div - 1, div, NULL);
132 #define xnarch_imuldiv_ceil(i,m,d) xnarch_generic_imuldiv_ceil((i),(m),(d))
138 static inline unsigned long long
139 xnarch_generic_div96by32(
const unsigned long long h,
142 unsigned long *
const rp)
145 const unsigned qh = xnarch_uldivrem(h, d, &rh);
146 const unsigned long long t = xnarch_u64fromu32(rh, l);
147 const unsigned ql = xnarch_uldivrem(t, d, rp);
149 return xnarch_u64fromu32(qh, ql);
154 unsigned long long xnarch_generic_ullimd(
const unsigned long long op,
158 unsigned int oph, opl, tlh, tll;
159 unsigned long long th, tl;
161 xnarch_u64tou32(op, oph, opl);
162 tl = xnarch_ullmul(opl, m);
163 xnarch_u64tou32(tl, tlh, tll);
164 th = xnarch_ullmul(oph, m);
167 return xnarch_generic_div96by32(th, tll, d, NULL);
171 xnarch_generic_llimd (
long long op,
unsigned m,
unsigned d)
180 ret = xnarch_generic_ullimd(op, m, d);
182 return sign ? -ret : ret;
184 #define xnarch_llimd(ll,m,d) xnarch_generic_llimd((ll),(m),(d))
187 #ifndef _xnarch_u96shift
188 #define xnarch_u96shift(h, m, l, s) ({ \
189 unsigned int _l = (l); \
190 unsigned int _m = (m); \
191 unsigned int _s = (s); \
193 _l |= (_m << (32 - _s)); \
195 _m |= ((h) << (32 - _s)); \
196 xnarch_u64fromu32(_m, _l); \
200 static inline long long xnarch_llmi(
int i,
int j)
203 return (
long long) i * j;
206 #ifndef xnarch_llmulshft
208 static inline long long
209 xnarch_generic_llmulshft(
const long long op,
213 unsigned int oph, opl, tlh, tll, thh, thl;
214 unsigned long long th, tl;
216 xnarch_u64tou32(op, oph, opl);
217 tl = xnarch_ullmul(opl, m);
218 xnarch_u64tou32(tl, tlh, tll);
219 th = xnarch_llmi(oph, m);
221 xnarch_u64tou32(th, thh, thl);
223 return xnarch_u96shift(thh, thl, tll, s);
225 #define xnarch_llmulshft(ll, m, s) xnarch_generic_llmulshft((ll), (m), (s))
228 #ifdef XNARCH_HAVE_NODIV_LLIMD
231 struct xnarch_u32frac {
232 unsigned long long frac;
236 static inline void xnarch_init_u32frac(
struct xnarch_u32frac *
const f,
245 volatile unsigned vol_d = d;
247 f->frac = xnarch_generic_div96by32
248 (xnarch_u64fromu32(m % d, 0), 0, vol_d, NULL);
251 #ifndef xnarch_nodiv_imuldiv
253 xnarch_generic_nodiv_imuldiv(
unsigned op,
const struct xnarch_u32frac f)
255 return (xnarch_ullmul(op, f.frac >> 32) >> 32) + f.integ * op;
257 #define xnarch_nodiv_imuldiv(op, f) xnarch_generic_nodiv_imuldiv((op),(f))
260 #ifndef xnarch_nodiv_imuldiv_ceil
262 xnarch_generic_nodiv_imuldiv_ceil(
unsigned op,
const struct xnarch_u32frac f)
264 unsigned long long full = xnarch_ullmul(op, f.frac >> 32) + ~0U;
265 return (full >> 32) + f.integ * op;
267 #define xnarch_nodiv_imuldiv_ceil(op, f) \
268 xnarch_generic_nodiv_imuldiv_ceil((op),(f))
271 #ifndef xnarch_nodiv_ullimd
273 #ifndef xnarch_add96and64
274 #error "xnarch_add96and64 must be implemented."
278 xnarch_mul64by64_high(
const unsigned long long op,
const unsigned long long m)
281 register unsigned long long t0, t1, t2, t3;
282 register unsigned int oph, opl, mh, ml, t0h, t0l, t1h, t1l, t2h, t2l, t3h, t3l;
284 xnarch_u64tou32(op, oph, opl);
285 xnarch_u64tou32(m, mh, ml);
286 t0 = xnarch_ullmul(opl, ml);
287 xnarch_u64tou32(t0, t0h, t0l);
288 t3 = xnarch_ullmul(oph, mh);
289 xnarch_u64tou32(t3, t3h, t3l);
290 xnarch_add96and64(t3h, t3l, t0h, 0, t0l >> 31);
291 t1 = xnarch_ullmul(oph, ml);
292 xnarch_u64tou32(t1, t1h, t1l);
293 xnarch_add96and64(t3h, t3l, t0h, t1h, t1l);
294 t2 = xnarch_ullmul(opl, mh);
295 xnarch_u64tou32(t2, t2h, t2l);
296 xnarch_add96and64(t3h, t3l, t0h, t2h, t2l);
298 return xnarch_u64fromu32(t3h, t3l);
301 static inline unsigned long long
302 xnarch_generic_nodiv_ullimd(
const unsigned long long op,
303 const unsigned long long frac,
306 return xnarch_mul64by64_high(op, frac) + integ * op;
308 #define xnarch_nodiv_ullimd(op, f, i) xnarch_generic_nodiv_ullimd((op),(f), (i))
311 #ifndef xnarch_nodiv_llimd
313 xnarch_generic_nodiv_llimd(
long long op,
unsigned long long frac,
323 ret = xnarch_nodiv_ullimd(op, frac, integ);
325 return sign ? -ret : ret;
327 #define xnarch_nodiv_llimd(ll,frac,integ) xnarch_generic_nodiv_llimd((ll),(frac),(integ))
332 static inline void xnarch_init_llmulshft(
const unsigned m_in,
342 volatile unsigned int vol_d = d_in;
343 unsigned long long mult;
347 mult = ((
unsigned long long)m_in) << *s_out;
349 if (mult <= 0x7FFFFFFF)
353 *m_out = (
unsigned int)mult;
356 #define xnarch_ullmod(ull,uld,rem) ({ xnarch_ulldiv(ull,uld,rem); (*rem); })
357 #define xnarch_uldiv(ull, d) xnarch_uldivrem(ull, d, NULL)
358 #define xnarch_ulmod(ull, d) ({ unsigned long _rem; \
359 xnarch_uldivrem(ull,d,&_rem); _rem; })
361 #define xnarch_div64(a,b) xnarch_divmod64((a),(b),NULL)
362 #define xnarch_mod64(a,b) ({ unsigned long long _rem; \
363 xnarch_divmod64((a),(b),&_rem); _rem; })
static int __attribute__((cold))
Test if a mutex structure contains a valid autoinitializer.
Definition: mutex.c:176
unsigned long long xnarch_generic_full_divmod64(unsigned long long a, unsigned long long b, unsigned long long *rem)
Architecture-independent div64 operation with remainder.
Definition: arith.c:44