Ruby  3.1.4p223 (2023-03-30 revision HEAD)
atomic.h
Go to the documentation of this file.
1 #ifndef RUBY_ATOMIC_H /*-*-C++-*-vi:se ft=cpp:*/
2 #define RUBY_ATOMIC_H
27 #include "ruby/internal/config.h"
28 
29 #ifdef STDC_HEADERS
30 # include <stddef.h> /* size_t */
31 #endif
32 
33 #ifdef HAVE_SYS_TYPES_H
34 # include <sys/types.h> /* ssize_t */
35 #endif
36 
37 #if RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
38 # pragma intrinsic(_InterlockedOr)
39 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
40 # include <atomic.h>
41 #endif
42 
43 #include "ruby/assert.h"
44 #include "ruby/backward/2/limits.h"
49 #include "ruby/internal/cast.h"
50 #include "ruby/internal/value.h"
52 #include "ruby/internal/stdbool.h"
53 
54 /*
55  * Asserts that your environment supports more than one atomic types. These
56  * days systems tend to have such property (C11 was a standard of decades ago,
57  * right?) but we still support older ones.
58  */
59 #if defined(__DOXYGEN__) || defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
60 # define RUBY_ATOMIC_GENERIC_MACRO 1
61 #endif
62 
68 #if defined(__DOXYGEN__)
69 using rb_atomic_t = std::atomic<unsigned>;
70 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
71 typedef unsigned int rb_atomic_t;
72 #elif defined(HAVE_GCC_SYNC_BUILTINS)
73 typedef unsigned int rb_atomic_t;
74 #elif defined(_WIN32)
75 typedef LONG rb_atomic_t;
76 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
77 typedef unsigned int rb_atomic_t;
78 #else
79 # error No atomic operation found
80 #endif
81 
91 #define RUBY_ATOMIC_FETCH_ADD(var, val) rbimpl_atomic_fetch_add(&(var), (val))
92 
102 #define RUBY_ATOMIC_FETCH_SUB(var, val) rbimpl_atomic_fetch_sub(&(var), (val))
103 
114 #define RUBY_ATOMIC_OR(var, val) rbimpl_atomic_or(&(var), (val))
115 
125 #define RUBY_ATOMIC_EXCHANGE(var, val) rbimpl_atomic_exchange(&(var), (val))
126 
138 #define RUBY_ATOMIC_CAS(var, oldval, newval) \
139  rbimpl_atomic_cas(&(var), (oldval), (newval))
140 
149 #define RUBY_ATOMIC_SET(var, val) rbimpl_atomic_set(&(var), (val))
150 
159 #define RUBY_ATOMIC_ADD(var, val) rbimpl_atomic_add(&(var), (val))
160 
169 #define RUBY_ATOMIC_SUB(var, val) rbimpl_atomic_sub(&(var), (val))
170 
178 #define RUBY_ATOMIC_INC(var) rbimpl_atomic_inc(&(var))
179 
187 #define RUBY_ATOMIC_DEC(var) rbimpl_atomic_dec(&(var))
188 
198 #define RUBY_ATOMIC_SIZE_INC(var) rbimpl_atomic_size_inc(&(var))
199 
209 #define RUBY_ATOMIC_SIZE_DEC(var) rbimpl_atomic_size_dec(&(var))
210 
222 #define RUBY_ATOMIC_SIZE_EXCHANGE(var, val) \
223  rbimpl_atomic_size_exchange(&(var), (val))
224 
236 #define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval) \
237  rbimpl_atomic_size_cas(&(var), (oldval), (newval))
238 
249 #define RUBY_ATOMIC_SIZE_ADD(var, val) rbimpl_atomic_size_add(&(var), (val))
250 
261 #define RUBY_ATOMIC_SIZE_SUB(var, val) rbimpl_atomic_size_sub(&(var), (val))
262 
279 #define RUBY_ATOMIC_PTR_EXCHANGE(var, val) \
280  RBIMPL_CAST(rbimpl_atomic_ptr_exchange((void **)&(var), (void *)val))
281 
293 #define RUBY_ATOMIC_PTR_CAS(var, oldval, newval) \
294  RBIMPL_CAST(rbimpl_atomic_ptr_cas((void **)&(var), (oldval), (newval)))
295 
307 #define RUBY_ATOMIC_VALUE_EXCHANGE(var, val) \
308  rbimpl_atomic_value_exchange(&(var), (val))
309 
321 #define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval) \
322  rbimpl_atomic_value_cas(&(var), (oldval), (newval))
323 
328 static inline rb_atomic_t
329 rbimpl_atomic_fetch_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
330 {
331 #if 0
332 
333 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
334  return __atomic_fetch_add(ptr, val, __ATOMIC_SEQ_CST);
335 
336 #elif defined(HAVE_GCC_SYNC_BUILTINS)
337  return __sync_fetch_and_add(ptr, val);
338 
339 #elif defined(_WIN32)
340  return InterlockedExchangeAdd(ptr, val);
341 
342 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
343  /*
344  * `atomic_add_int_nv` takes its second argument as `int`! Meanwhile our
345  * `rb_atomic_t` is unsigned. We cannot pass `val` as-is. We have to
346  * manually check integer overflow.
347  */
348  RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
349  return atomic_add_int_nv(ptr, val) - val;
350 
351 #else
352 # error Unsupported platform.
353 #endif
354 }
355 
359 static inline void
360 rbimpl_atomic_add(volatile rb_atomic_t *ptr, rb_atomic_t val)
361 {
362 #if 0
363 
364 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
365  /*
366  * GCC on amd64 is smart enough to detect this `__atomic_add_fetch`'s
367  * return value is not used, then compiles it into single `LOCK ADD`
368  * instruction.
369  */
370  __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
371 
372 #elif defined(HAVE_GCC_SYNC_BUILTINS)
373  __sync_add_and_fetch(ptr, val);
374 
375 #elif defined(_WIN32)
376  /*
377  * `InterlockedExchangeAdd` is `LOCK XADD`. It seems there also is
378  * `_InterlockedAdd` intrinsic in ARM Windows but not for x86? Sticking to
379  * `InterlockedExchangeAdd` for better portability.
380  */
381  InterlockedExchangeAdd(ptr, val);
382 
383 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
384  /* Ditto for `atomic_add_int_nv`. */
385  RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
386  atomic_add_int(ptr, val);
387 
388 #else
389 # error Unsupported platform.
390 #endif
391 }
392 
396 static inline void
397 rbimpl_atomic_size_add(volatile size_t *ptr, size_t val)
398 {
399 #if 0
400 
401 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
402  __atomic_add_fetch(ptr, val, __ATOMIC_SEQ_CST);
403 
404 #elif defined(HAVE_GCC_SYNC_BUILTINS)
405  __sync_add_and_fetch(ptr, val);
406 
407 #elif defined(_WIN32) && defined(_M_AMD64)
408  /* Ditto for `InterlockeExchangedAdd`. */
409  InterlockedExchangeAdd64(ptr, val);
410 
411 #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
412  /* Ditto for `atomic_add_int_nv`. */
413  RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
414  atomic_add_long(ptr, val);
415 
416 #else
417  RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
418 
419  volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
420  rbimpl_atomic_add(tmp, val);
421 
422 #endif
423 }
424 
428 static inline void
429 rbimpl_atomic_inc(volatile rb_atomic_t *ptr)
430 {
431 #if 0
432 
433 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
434  rbimpl_atomic_add(ptr, 1);
435 
436 #elif defined(_WIN32)
437  InterlockedIncrement(ptr);
438 
439 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
440  atomic_inc_uint(ptr);
441 
442 #else
443  rbimpl_atomic_add(ptr, 1);
444 
445 #endif
446 }
447 
451 static inline void
452 rbimpl_atomic_size_inc(volatile size_t *ptr)
453 {
454 #if 0
455 
456 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
457  rbimpl_atomic_size_add(ptr, 1);
458 
459 #elif defined(_WIN32) && defined(_M_AMD64)
460  InterlockedIncrement64(ptr);
461 
462 #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
463  atomic_inc_ulong(ptr);
464 
465 #else
466  rbimpl_atomic_size_add(ptr, 1);
467 
468 #endif
469 }
470 
474 static inline rb_atomic_t
475 rbimpl_atomic_fetch_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
476 {
477 #if 0
478 
479 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
480  return __atomic_fetch_sub(ptr, val, __ATOMIC_SEQ_CST);
481 
482 #elif defined(HAVE_GCC_SYNC_BUILTINS)
483  return __sync_fetch_and_sub(ptr, val);
484 
485 #elif defined(_WIN32)
486  /* rb_atomic_t is signed here! Safe to do `-val`. */
487  return InterlockedExchangeAdd(ptr, -val);
488 
489 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
490  /* Ditto for `rbimpl_atomic_fetch_add`. */
491  const signed neg = -1;
492  RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
493  return atomic_add_int_nv(ptr, neg * val) + val;
494 
495 #else
496 # error Unsupported platform.
497 #endif
498 }
499 
503 static inline void
504 rbimpl_atomic_sub(volatile rb_atomic_t *ptr, rb_atomic_t val)
505 {
506 #if 0
507 
508 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
509  __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
510 
511 #elif defined(HAVE_GCC_SYNC_BUILTINS)
512  __sync_sub_and_fetch(ptr, val);
513 
514 #elif defined(_WIN32)
515  InterlockedExchangeAdd(ptr, -val);
516 
517 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
518  const signed neg = -1;
519  RBIMPL_ASSERT_OR_ASSUME(val <= INT_MAX);
520  atomic_add_int(ptr, neg * val);
521 
522 #else
523 # error Unsupported platform.
524 #endif
525 }
526 
530 static inline void
531 rbimpl_atomic_size_sub(volatile size_t *ptr, size_t val)
532 {
533 #if 0
534 
535 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
536  __atomic_sub_fetch(ptr, val, __ATOMIC_SEQ_CST);
537 
538 #elif defined(HAVE_GCC_SYNC_BUILTINS)
539  __sync_sub_and_fetch(ptr, val);
540 
541 #elif defined(_WIN32) && defined(_M_AMD64)
542  const ssize_t neg = -1;
543  InterlockedExchangeAdd64(ptr, neg * val);
544 
545 #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
546  const signed neg = -1;
547  RBIMPL_ASSERT_OR_ASSUME(val <= LONG_MAX);
548  atomic_add_long(ptr, neg * val);
549 
550 #else
551  RBIMPL_STATIC_ASSERT(size_of_rb_atomic_t, sizeof *ptr == sizeof(rb_atomic_t));
552 
553  volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
554  rbimpl_atomic_sub(tmp, val);
555 
556 #endif
557 }
558 
562 static inline void
563 rbimpl_atomic_dec(volatile rb_atomic_t *ptr)
564 {
565 #if 0
566 
567 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
568  rbimpl_atomic_sub(ptr, 1);
569 
570 #elif defined(_WIN32)
571  InterlockedDecrement(ptr);
572 
573 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
574  atomic_dec_uint(ptr);
575 
576 #else
577  rbimpl_atomic_sub(ptr, 1);
578 
579 #endif
580 }
581 
585 static inline void
586 rbimpl_atomic_size_dec(volatile size_t *ptr)
587 {
588 #if 0
589 
590 #elif defined(HAVE_GCC_ATOMIC_BUILTINS) || defined(HAVE_GCC_SYNC_BUILTINS)
591  rbimpl_atomic_size_sub(ptr, 1);
592 
593 #elif defined(_WIN32) && defined(_M_AMD64)
594  InterlockedDecrement64(ptr);
595 
596 #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
597  atomic_dec_ulong(ptr);
598 
599 #else
600  rbimpl_atomic_size_sub(ptr, 1);
601 
602 #endif
603 }
604 
608 static inline void
609 rbimpl_atomic_or(volatile rb_atomic_t *ptr, rb_atomic_t val)
610 {
611 #if 0
612 
613 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
614  __atomic_or_fetch(ptr, val, __ATOMIC_SEQ_CST);
615 
616 #elif defined(HAVE_GCC_SYNC_BUILTINS)
617  __sync_or_and_fetch(ptr, val);
618 
619 #elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
620  _InterlockedOr(ptr, val);
621 
622 #elif defined(_WIN32) && defined(__GNUC__)
623  /* This was for old MinGW. Maybe not needed any longer? */
624  __asm__(
625  "lock\n\t"
626  "orl\t%1, %0"
627  : "=m"(ptr)
628  : "Ir"(val));
629 
630 #elif defined(_WIN32) && defined(_M_IX86)
631  __asm mov eax, ptr;
632  __asm mov ecx, val;
633  __asm lock or [eax], ecx;
634 
635 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
636  atomic_or_uint(ptr, val);
637 
638 #else
639 # error Unsupported platform.
640 #endif
641 }
642 
643 /* Nobody uses this but for theoretical backwards compatibility... */
644 #if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
645 static inline rb_atomic_t
646 rb_w32_atomic_or(volatile rb_atomic_t *var, rb_atomic_t val)
647 {
648  return rbimpl_atomic_or(var, val);
649 }
650 #endif
651 
655 static inline rb_atomic_t
656 rbimpl_atomic_exchange(volatile rb_atomic_t *ptr, rb_atomic_t val)
657 {
658 #if 0
659 
660 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
661  return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
662 
663 #elif defined(HAVE_GCC_SYNC_BUILTINS)
664  return __sync_lock_test_and_set(ptr, val);
665 
666 #elif defined(_WIN32)
667  return InterlockedExchange(ptr, val);
668 
669 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
670  return atomic_swap_uint(ptr, val);
671 
672 #else
673 # error Unsupported platform.
674 #endif
675 }
676 
680 static inline size_t
681 rbimpl_atomic_size_exchange(volatile size_t *ptr, size_t val)
682 {
683 #if 0
684 
685 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
686  return __atomic_exchange_n(ptr, val, __ATOMIC_SEQ_CST);
687 
688 #elif defined(HAVE_GCC_SYNC_BUILTINS)
689  return __sync_lock_test_and_set(ptr, val);
690 
691 #elif defined(_WIN32) && defined(_M_AMD64)
692  return InterlockedExchange64(ptr, val);
693 
694 #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
695  return atomic_swap_ulong(ptr, val);
696 
697 #else
698  RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
699 
700  volatile rb_atomic_t *const tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
701  const rb_atomic_t ret = rbimpl_atomic_exchange(tmp, val);
702  return RBIMPL_CAST((size_t)ret);
703 
704 #endif
705 }
706 
710 static inline void *
711 rbimpl_atomic_ptr_exchange(void *volatile *ptr, const void *val)
712 {
713 #if 0
714 
715 #elif defined(InterlockedExchangePointer)
716  /* const_cast */
717  PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
718  PVOID pval = RBIMPL_CAST((PVOID)val);
719  return InterlockedExchangePointer(pptr, pval);
720 
721 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
722  return atomic_swap_ptr(ptr, RBIMPL_CAST((void *)val));
723 
724 #else
725  RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
726 
727  const size_t sval = RBIMPL_CAST((size_t)val);
728  volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
729  const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
730  return RBIMPL_CAST((void *)sret);
731 
732 #endif
733 }
734 
738 static inline VALUE
739 rbimpl_atomic_value_exchange(volatile VALUE *ptr, VALUE val)
740 {
741  RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
742 
743  const size_t sval = RBIMPL_CAST((size_t)val);
744  volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
745  const size_t sret = rbimpl_atomic_size_exchange(sptr, sval);
746  return RBIMPL_CAST((VALUE)sret);
747 }
748 
752 static inline void
753 rbimpl_atomic_set(volatile rb_atomic_t *ptr, rb_atomic_t val)
754 {
755 #if 0
756 
757 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
758  __atomic_store_n(ptr, val, __ATOMIC_SEQ_CST);
759 
760 #else
761  /* Maybe std::atomic<rb_atomic_t>::store can be faster? */
762  rbimpl_atomic_exchange(ptr, val);
763 
764 #endif
765 }
766 
770 static inline rb_atomic_t
771 rbimpl_atomic_cas(volatile rb_atomic_t *ptr, rb_atomic_t oldval, rb_atomic_t newval)
772 {
773 #if 0
774 
775 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
776  __atomic_compare_exchange_n(
777  ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
778  return oldval;
779 
780 #elif defined(HAVE_GCC_SYNC_BUILTINS)
781  return __sync_val_compare_and_swap(ptr, oldval, newval);
782 
783 #elif RBIMPL_COMPILER_SINCE(MSVC, 13, 0, 0)
784  return InterlockedCompareExchange(ptr, newval, oldval);
785 
786 #elif defined(_WIN32)
787  PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
788  PVOID pold = RBIMPL_CAST((PVOID)oldval);
789  PVOID pnew = RBIMPL_CAST((PVOID)newval);
790  PVOID pret = InterlockedCompareExchange(pptr, pnew, pold);
791  return RBIMPL_CAST((rb_atomic_t)pret);
792 
793 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
794  return atomic_cas_uint(ptr, oldval, newval);
795 
796 #else
797 # error Unsupported platform.
798 #endif
799 }
800 
801 /* Nobody uses this but for theoretical backwards compatibility... */
802 #if RBIMPL_COMPILER_BEFORE(MSVC, 13, 0, 0)
803 static inline rb_atomic_t
804 rb_w32_atomic_cas(volatile rb_atomic_t *var, rb_atomic_t oldval, rb_atomic_t newval)
805 {
806  return rbimpl_atomic_cas(var, oldval, newval);
807 }
808 #endif
809 
813 static inline size_t
814 rbimpl_atomic_size_cas(volatile size_t *ptr, size_t oldval, size_t newval)
815 {
816 #if 0
817 
818 #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
819  __atomic_compare_exchange_n(
820  ptr, &oldval, newval, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
821  return oldval;
822 
823 #elif defined(HAVE_GCC_SYNC_BUILTINS)
824  return __sync_val_compare_and_swap(ptr, oldval, newval);
825 
826 #elif defined(_WIN32) && defined(_M_AMD64)
827  return InterlockedCompareExchange64(ptr, newval, oldval);
828 
829 #elif defined(__sun) && defined(HAVE_ATOMIC_H) && (defined(_LP64) || defined(_I32LPx))
830  return atomic_cas_ulong(ptr, oldval, newval);
831 
832 #else
833  RBIMPL_STATIC_ASSERT(size_of_size_t, sizeof *ptr == sizeof(rb_atomic_t));
834 
835  volatile rb_atomic_t *tmp = RBIMPL_CAST((volatile rb_atomic_t *)ptr);
836  return rbimpl_atomic_cas(tmp, oldval, newval);
837 
838 #endif
839 }
840 
844 static inline void *
845 rbimpl_atomic_ptr_cas(void **ptr, const void *oldval, const void *newval)
846 {
847 #if 0
848 
849 #elif defined(InterlockedExchangePointer)
850  /* ... Can we say that InterlockedCompareExchangePtr surly exists when
851  * InterlockedExchangePointer is defined? Seems so but...?*/
852  PVOID *pptr = RBIMPL_CAST((PVOID *)ptr);
853  PVOID pold = RBIMPL_CAST((PVOID)oldval);
854  PVOID pnew = RBIMPL_CAST((PVOID)newval);
855  return InterlockedCompareExchangePointer(pptr, pnew, pold);
856 
857 #elif defined(__sun) && defined(HAVE_ATOMIC_H)
858  void *pold = RBIMPL_CAST((void *)oldval);
859  void *pnew = RBIMPL_CAST((void *)newval);
860  return atomic_cas_ptr(ptr, pold, pnew);
861 
862 
863 #else
864  RBIMPL_STATIC_ASSERT(sizeof_voidp, sizeof *ptr == sizeof(size_t));
865 
866  const size_t snew = RBIMPL_CAST((size_t)newval);
867  const size_t sold = RBIMPL_CAST((size_t)oldval);
868  volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
869  const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
870  return RBIMPL_CAST((void *)sret);
871 
872 #endif
873 }
874 
878 static inline VALUE
879 rbimpl_atomic_value_cas(volatile VALUE *ptr, VALUE oldval, VALUE newval)
880 {
881  RBIMPL_STATIC_ASSERT(sizeof_value, sizeof *ptr == sizeof(size_t));
882 
883  const size_t snew = RBIMPL_CAST((size_t)newval);
884  const size_t sold = RBIMPL_CAST((size_t)oldval);
885  volatile size_t *const sptr = RBIMPL_CAST((volatile size_t *)ptr);
886  const size_t sret = rbimpl_atomic_size_cas(sptr, sold, snew);
887  return RBIMPL_CAST((VALUE)sret);
888 }
890 #endif /* RUBY_ATOMIC_H */
Defines RBIMPL_ATTR_ARTIFICIAL.
#define RBIMPL_ATTR_ARTIFICIAL()
Wraps (or simulates) __attribute__((artificial))
Definition: artificial.h:41
#define RBIMPL_ASSERT_OR_ASSUME(expr)
This is either RUBY_ASSERT or RBIMPL_ASSUME, depending on RUBY_DEBUG.
Definition: assert.h:229
Atomic operations.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
Defines RBIMPL_COMPILER_SINCE.
Defines RBIMPL_STATIC_ASSERT.
#define RBIMPL_STATIC_ASSERT
Wraps (or simulates) static_assert
Definition: static_assert.h:66
Defines RBIMPL_ATTR_NOALIAS.
#define RBIMPL_ATTR_NOALIAS()
Wraps (or simulates) __declspec((noalias))
Definition: noalias.h:62
Defines RBIMPL_ATTR_NONNULL.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition: nonnull.h:27
C99 shim for <stdbool.h>
Defines VALUE and ID.
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40