1 #ifndef INTERNAL_BITS_H
2 #define INTERNAL_BITS_H
28 #include "ruby/internal/config.h"
31 #include "internal/compilers.h"
33 #if MSC_VERSION_SINCE(1310)
37 #if defined(HAVE_X86INTRIN_H) && ! defined(MJIT_HEADER)
39 # include <x86intrin.h>
40 #elif MSC_VERSION_SINCE(1310)
44 #if defined(_MSC_VER) && defined(__AVX__)
45 # pragma intrinsic(__popcnt)
46 # pragma intrinsic(__popcnt64)
49 #if defined(_MSC_VER) && defined(__AVX2__)
50 # pragma intrinsic(__lzcnt)
51 # pragma intrinsic(__lzcnt64)
54 #if MSC_VERSION_SINCE(1310)
55 # pragma intrinsic(_rotl)
56 # pragma intrinsic(_rotr)
58 # pragma intrinsic(_rotl64)
59 # pragma intrinsic(_rotr64)
63 #if MSC_VERSION_SINCE(1400)
64 # pragma intrinsic(_BitScanForward)
65 # pragma intrinsic(_BitScanReverse)
67 # pragma intrinsic(_BitScanForward64)
68 # pragma intrinsic(_BitScanReverse64)
73 #include "internal/static_assert.h"
79 #define HALF_LONG_MSB ((SIGNED_VALUE)1<<((SIZEOF_LONG*CHAR_BIT-1)/2))
81 #define SIGNED_INTEGER_TYPE_P(T) (0 > ((T)0)-1)
83 #define SIGNED_INTEGER_MIN(T) \
84 ((sizeof(T) == sizeof(int8_t)) ? ((T)INT8_MIN) : \
85 ((sizeof(T) == sizeof(int16_t)) ? ((T)INT16_MIN) : \
86 ((sizeof(T) == sizeof(int32_t)) ? ((T)INT32_MIN) : \
87 ((sizeof(T) == sizeof(int64_t)) ? ((T)INT64_MIN) : \
90 #define SIGNED_INTEGER_MAX(T) ((T)(SIGNED_INTEGER_MIN(T) ^ ((T)~(T)0)))
92 #define UNSIGNED_INTEGER_MAX(T) ((T)~(T)0)
94 #if __has_builtin(__builtin_mul_overflow_p)
95 # define MUL_OVERFLOW_P(a, b) \
96 __builtin_mul_overflow_p((a), (b), (__typeof__(a * b))0)
97 #elif __has_builtin(__builtin_mul_overflow)
98 # define MUL_OVERFLOW_P(a, b) \
99 __extension__ ({ __typeof__(a) c; __builtin_mul_overflow((a), (b), &c); })
102 #define MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, min, max) ( \
104 (a) == -1 ? (b) < -(max) : \
106 ((b) > 0 ? (max) / (a) < (b) : (min) / (a) > (b)) : \
107 ((b) > 0 ? (min) / (a) < (b) : (max) / (a) > (b)))
109 #if __has_builtin(__builtin_mul_overflow_p)
112 # define MUL_OVERFLOW_FIXNUM_P(a, b) \
114 struct { long fixnum : sizeof(long) * CHAR_BIT - 1; } c = { 0 }; \
115 __builtin_mul_overflow_p((a), (b), c.fixnum); \
118 # define MUL_OVERFLOW_FIXNUM_P(a, b) \
119 MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, FIXNUM_MIN, FIXNUM_MAX)
122 #ifdef MUL_OVERFLOW_P
123 # define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
124 # define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_P(a, b)
125 # define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_P(a, b)
127 # define MUL_OVERFLOW_LONG_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LLONG_MIN, LLONG_MAX)
128 # define MUL_OVERFLOW_LONG_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, LONG_MIN, LONG_MAX)
129 # define MUL_OVERFLOW_INT_P(a, b) MUL_OVERFLOW_SIGNED_INTEGER_P(a, b, INT_MIN, INT_MAX)
132 #ifdef HAVE_UINT128_T
133 # define bit_length(x) \
135 (sizeof(x) <= sizeof(int32_t) ? 32 - nlz_int32((uint32_t)(x)) : \
136 sizeof(x) <= sizeof(int64_t) ? 64 - nlz_int64((uint64_t)(x)) : \
137 128 - nlz_int128((uint128_t)(x)))
139 # define bit_length(x) \
141 (sizeof(x) <= sizeof(int32_t) ? 32 - nlz_int32((uint32_t)(x)) : \
142 64 - nlz_int64((uint64_t)(x)))
146 # define swap16 ruby_swap16
150 # define swap32 ruby_swap32
154 # define swap64 ruby_swap64
157 static inline uint16_t ruby_swap16(uint16_t);
158 static inline uint32_t ruby_swap32(uint32_t);
159 static inline uint64_t ruby_swap64(uint64_t);
160 static inline unsigned nlz_int(
unsigned x);
161 static inline unsigned nlz_long(
unsigned long x);
162 static inline unsigned nlz_long_long(
unsigned long long x);
163 static inline unsigned nlz_intptr(uintptr_t x);
164 static inline unsigned nlz_int32(uint32_t x);
165 static inline unsigned nlz_int64(uint64_t x);
166 #ifdef HAVE_UINT128_T
167 static inline unsigned nlz_int128(uint128_t x);
169 static inline unsigned rb_popcount32(uint32_t x);
170 static inline unsigned rb_popcount64(uint64_t x);
171 static inline unsigned rb_popcount_intptr(uintptr_t x);
172 static inline int ntz_int32(uint32_t x);
173 static inline int ntz_int64(uint64_t x);
174 static inline int ntz_intptr(uintptr_t x);
175 static inline VALUE RUBY_BIT_ROTL(
VALUE,
int);
176 static inline VALUE RUBY_BIT_ROTR(
VALUE,
int);
178 static inline uint16_t
179 ruby_swap16(uint16_t x)
181 #if __has_builtin(__builtin_bswap16)
182 return __builtin_bswap16(x);
184 #elif MSC_VERSION_SINCE(1310)
185 return _byteswap_ushort(x);
188 return (x << 8) | (x >> 8);
193 static inline uint32_t
194 ruby_swap32(uint32_t x)
196 #if __has_builtin(__builtin_bswap32)
197 return __builtin_bswap32(x);
199 #elif MSC_VERSION_SINCE(1310)
200 return _byteswap_ulong(x);
203 x = ((x & 0x0000FFFF) << 16) | ((x & 0xFFFF0000) >> 16);
204 x = ((x & 0x00FF00FF) << 8) | ((x & 0xFF00FF00) >> 8);
210 static inline uint64_t
211 ruby_swap64(uint64_t x)
213 #if __has_builtin(__builtin_bswap64)
214 return __builtin_bswap64(x);
216 #elif MSC_VERSION_SINCE(1310)
217 return _byteswap_uint64(x);
220 x = ((x & 0x00000000FFFFFFFFULL) << 32) | ((x & 0xFFFFFFFF00000000ULL) >> 32);
221 x = ((x & 0x0000FFFF0000FFFFULL) << 16) | ((x & 0xFFFF0000FFFF0000ULL) >> 16);
222 x = ((x & 0x00FF00FF00FF00FFULL) << 8) | ((x & 0xFF00FF00FF00FF00ULL) >> 8);
228 static inline unsigned int
229 nlz_int32(uint32_t x)
231 #if defined(_MSC_VER) && defined(__AVX2__)
236 return (
unsigned int)__lzcnt(x);
238 #elif defined(__x86_64__) && defined(__LZCNT__) && ! defined(MJIT_HEADER)
239 return (
unsigned int)_lzcnt_u32(x);
241 #elif MSC_VERSION_SINCE(1400)
243 return _BitScanReverse(&r, x) ? (31 - (int)r) : 32;
245 #elif __has_builtin(__builtin_clz)
246 STATIC_ASSERT(sizeof_int,
sizeof(
int) * CHAR_BIT == 32);
247 return x ? (
unsigned int)__builtin_clz(x) : 32;
252 y = x >> 16;
if (y) {n -= 16; x = y;}
253 y = x >> 8;
if (y) {n -= 8; x = y;}
254 y = x >> 4;
if (y) {n -= 4; x = y;}
255 y = x >> 2;
if (y) {n -= 2; x = y;}
256 y = x >> 1;
if (y) {
return n - 2;}
257 return (
unsigned int)(n - x);
261 static inline unsigned int
262 nlz_int64(uint64_t x)
264 #if defined(_MSC_VER) && defined(__AVX2__)
265 return (
unsigned int)__lzcnt64(x);
267 #elif defined(__x86_64__) && defined(__LZCNT__) && ! defined(MJIT_HEADER)
268 return (
unsigned int)_lzcnt_u64(x);
270 #elif defined(_WIN64) && MSC_VERSION_SINCE(1400)
272 return _BitScanReverse64(&r, x) ? (63u - (
unsigned int)r) : 64;
274 #elif __has_builtin(__builtin_clzl)
278 else if (
sizeof(
long) * CHAR_BIT == 64) {
279 return (
unsigned int)__builtin_clzl((
unsigned long)x);
281 else if (
sizeof(
long long) * CHAR_BIT == 64) {
282 return (
unsigned int)__builtin_clzll((
unsigned long long)x);
292 y = x >> 32;
if (y) {n -= 32; x = y;}
293 y = x >> 16;
if (y) {n -= 16; x = y;}
294 y = x >> 8;
if (y) {n -= 8; x = y;}
295 y = x >> 4;
if (y) {n -= 4; x = y;}
296 y = x >> 2;
if (y) {n -= 2; x = y;}
297 y = x >> 1;
if (y) {
return n - 2;}
298 return (
unsigned int)(n - x);
303 #ifdef HAVE_UINT128_T
304 static inline unsigned int
305 nlz_int128(uint128_t x)
307 uint64_t y = (uint64_t)(x >> 64);
313 return (
unsigned int)nlz_int64(x) + 64;
316 return (
unsigned int)nlz_int64(y);
321 static inline unsigned int
322 nlz_int(
unsigned int x)
324 if (
sizeof(
unsigned int) * CHAR_BIT == 32) {
325 return nlz_int32((uint32_t)x);
327 else if (
sizeof(
unsigned int) * CHAR_BIT == 64) {
328 return nlz_int64((uint64_t)x);
335 static inline unsigned int
336 nlz_long(
unsigned long x)
338 if (
sizeof(
unsigned long) * CHAR_BIT == 32) {
339 return nlz_int32((uint32_t)x);
341 else if (
sizeof(
unsigned long) * CHAR_BIT == 64) {
342 return nlz_int64((uint64_t)x);
349 static inline unsigned int
350 nlz_long_long(
unsigned long long x)
352 if (
sizeof(
unsigned long long) * CHAR_BIT == 64) {
353 return nlz_int64((uint64_t)x);
355 #ifdef HAVE_UINT128_T
356 else if (
sizeof(
unsigned long long) * CHAR_BIT == 128) {
357 return nlz_int128((uint128_t)x);
365 static inline unsigned int
366 nlz_intptr(uintptr_t x)
368 if (
sizeof(uintptr_t) ==
sizeof(
unsigned int)) {
369 return nlz_int((
unsigned int)x);
371 if (
sizeof(uintptr_t) ==
sizeof(
unsigned long)) {
372 return nlz_long((
unsigned long)x);
374 if (
sizeof(uintptr_t) ==
sizeof(
unsigned long long)) {
375 return nlz_long_long((
unsigned long long)x);
382 static inline unsigned int
383 rb_popcount32(uint32_t x)
385 #if defined(_MSC_VER) && defined(__AVX__)
389 return (
unsigned int)__popcnt(x);
391 #elif __has_builtin(__builtin_popcount)
392 STATIC_ASSERT(sizeof_int,
sizeof(
int) * CHAR_BIT >= 32);
393 return (
unsigned int)__builtin_popcount(x);
396 x = (x & 0x55555555) + (x >> 1 & 0x55555555);
397 x = (x & 0x33333333) + (x >> 2 & 0x33333333);
398 x = (x & 0x0f0f0f0f) + (x >> 4 & 0x0f0f0f0f);
399 x = (x & 0x001f001f) + (x >> 8 & 0x001f001f);
400 x = (x & 0x0000003f) + (x >>16 & 0x0000003f);
401 return (
unsigned int)x;
406 static inline unsigned int
407 rb_popcount64(uint64_t x)
409 #if defined(_MSC_VER) && defined(__AVX__)
410 return (
unsigned int)__popcnt64(x);
412 #elif __has_builtin(__builtin_popcount)
413 if (
sizeof(
long) * CHAR_BIT == 64) {
414 return (
unsigned int)__builtin_popcountl((
unsigned long)x);
416 else if (
sizeof(
long long) * CHAR_BIT == 64) {
417 return (
unsigned int)__builtin_popcountll((
unsigned long long)x);
425 x = (x & 0x5555555555555555) + (x >> 1 & 0x5555555555555555);
426 x = (x & 0x3333333333333333) + (x >> 2 & 0x3333333333333333);
427 x = (x & 0x0707070707070707) + (x >> 4 & 0x0707070707070707);
428 x = (x & 0x001f001f001f001f) + (x >> 8 & 0x001f001f001f001f);
429 x = (x & 0x0000003f0000003f) + (x >>16 & 0x0000003f0000003f);
430 x = (x & 0x000000000000007f) + (x >>32 & 0x000000000000007f);
431 return (
unsigned int)x;
436 static inline unsigned int
437 rb_popcount_intptr(uintptr_t x)
439 if (
sizeof(uintptr_t) * CHAR_BIT == 64) {
440 return rb_popcount64((uint64_t)x);
442 else if (
sizeof(uintptr_t) * CHAR_BIT == 32) {
443 return rb_popcount32((uint32_t)x);
451 ntz_int32(uint32_t x)
453 #if defined(__x86_64__) && defined(__BMI__) && ! defined(MJIT_HEADER)
454 return (
unsigned)_tzcnt_u32(x);
456 #elif MSC_VERSION_SINCE(1400)
460 return _BitScanForward(&r, x) ? (int)r : 32;
462 #elif __has_builtin(__builtin_ctz)
463 STATIC_ASSERT(sizeof_int,
sizeof(
int) * CHAR_BIT == 32);
464 return x ? (unsigned)__builtin_ctz(x) : 32;
467 return rb_popcount32((~x) & (x-1));
473 ntz_int64(uint64_t x)
475 #if defined(__x86_64__) && defined(__BMI__) && ! defined(MJIT_HEADER)
476 return (
unsigned)_tzcnt_u64(x);
478 #elif defined(_WIN64) && MSC_VERSION_SINCE(1400)
480 return _BitScanForward64(&r, x) ? (int)r : 64;
482 #elif __has_builtin(__builtin_ctzl)
486 else if (
sizeof(
long) * CHAR_BIT == 64) {
487 return (
unsigned)__builtin_ctzl((
unsigned long)x);
489 else if (
sizeof(
long long) * CHAR_BIT == 64) {
490 return (
unsigned)__builtin_ctzll((
unsigned long long)x);
498 return rb_popcount64((~x) & (x-1));
504 ntz_intptr(uintptr_t x)
506 if (
sizeof(uintptr_t) * CHAR_BIT == 64) {
507 return ntz_int64((uint64_t)x);
509 else if (
sizeof(uintptr_t) * CHAR_BIT == 32) {
510 return ntz_int32((uint32_t)x);
518 RUBY_BIT_ROTL(
VALUE v,
int n)
520 #if __has_builtin(__builtin_rotateleft32) && (SIZEOF_VALUE * CHAR_BIT == 32)
521 return __builtin_rotateleft32(v, n);
523 #elif __has_builtin(__builtin_rotateleft64) && (SIZEOF_VALUE * CHAR_BIT == 64)
524 return __builtin_rotateleft64(v, n);
526 #elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 32)
529 #elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 64)
530 return _rotl64(v, n);
532 #elif defined(_lrotl) && (SIZEOF_VALUE == SIZEOF_LONG)
536 const int m = (
sizeof(
VALUE) * CHAR_BIT) - 1;
537 return (v << (n & m)) | (v >> (-n & m));
542 RUBY_BIT_ROTR(
VALUE v,
int n)
544 #if __has_builtin(__builtin_rotateright32) && (SIZEOF_VALUE * CHAR_BIT == 32)
545 return __builtin_rotateright32(v, n);
547 #elif __has_builtin(__builtin_rotateright64) && (SIZEOF_VALUE * CHAR_BIT == 64)
548 return __builtin_rotateright64(v, n);
550 #elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 32)
553 #elif MSC_VERSION_SINCE(1310) && (SIZEOF_VALUE * CHAR_BIT == 64)
554 return _rotr64(v, n);
556 #elif defined(_lrotr) && (SIZEOF_VALUE == SIZEOF_LONG)
560 const int m = (
sizeof(
VALUE) * CHAR_BIT) - 1;
561 return (v << (-n & m)) | (v >> (n & m));
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
uintptr_t VALUE
Type that represents a Ruby object.