12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
17 #ifdef HAVE_SYS_RESOURCE_H
18 #include <sys/resource.h>
20 #ifdef HAVE_THR_STKSEGMENT
23 #if defined(HAVE_FCNTL_H)
25 #elif defined(HAVE_SYS_FCNTL_H)
26 #include <sys/fcntl.h>
28 #ifdef HAVE_SYS_PRCTL_H
29 #include <sys/prctl.h>
31 #if defined(HAVE_SYS_TIME_H)
34 #if defined(__HAIKU__)
35 #include <kernel/OS.h>
38 #include <sys/syscall.h>
43 #if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
44 # define USE_EVENTFD (1)
45 # include <sys/eventfd.h>
47 # define USE_EVENTFD (0)
50 #if defined(SIGVTALRM) && !defined(__CYGWIN__) && !defined(__EMSCRIPTEN__)
51 # define USE_UBF_LIST 1
79 #define UBF_TIMER_NONE 0
80 #define UBF_TIMER_POSIX 1
81 #define UBF_TIMER_PTHREAD 2
84 # if defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_CREATE) && \
85 defined(CLOCK_MONOTONIC) && defined(USE_UBF_LIST)
87 # define UBF_TIMER UBF_TIMER_POSIX
88 # elif defined(USE_UBF_LIST)
90 # define UBF_TIMER UBF_TIMER_PTHREAD
93 # define UBF_TIMER UBF_TIMER_NONE
106 #if UBF_TIMER == UBF_TIMER_POSIX
107 static const struct itimerspec zero;
116 #define TIMER_STATE_DEBUG 0
119 rtimer_state_name(
enum rtimer_state state)
122 case RTIMER_DISARM:
return "disarm";
123 case RTIMER_ARMING:
return "arming";
124 case RTIMER_ARMED:
return "armed";
125 case RTIMER_DEAD:
return "dead";
126 default:
rb_bug(
"unreachable");
130 static enum rtimer_state
131 timer_state_exchange(
enum rtimer_state state)
133 enum rtimer_state prev = ATOMIC_EXCHANGE(timer_posix.state_, state);
134 if (TIMER_STATE_DEBUG) fprintf(stderr,
"state (exc): %s->%s\n", rtimer_state_name(prev), rtimer_state_name(state));
138 static enum rtimer_state
139 timer_state_cas(
enum rtimer_state expected_prev,
enum rtimer_state state)
141 enum rtimer_state prev = ATOMIC_CAS(timer_posix.state_, expected_prev, state);
143 if (TIMER_STATE_DEBUG) {
144 if (prev == expected_prev) {
145 fprintf(stderr,
"state (cas): %s->%s\n", rtimer_state_name(prev), rtimer_state_name(state));
148 fprintf(stderr,
"state (cas): %s (expected:%s)\n", rtimer_state_name(prev), rtimer_state_name(expected_prev));
155 #elif UBF_TIMER == UBF_TIMER_PTHREAD
156 static void *timer_pthread_fn(
void *);
167 static const rb_hrtime_t *sigwait_timeout(
rb_thread_t *,
int sigwait_fd,
170 static void ubf_timer_disarm(
void);
171 static void threadptr_trap_interrupt(
rb_thread_t *);
172 static void clear_thread_cache_altstack(
void);
173 static void ubf_wakeup_all_threads(
void);
174 static int ubf_threads_empty(
void);
176 #define TIMER_THREAD_CREATED_P() (signal_self_pipe.owner_process == getpid())
179 #define BUSY_WAIT_SIGNALS (0)
187 #define THREAD_INVALID ((const rb_thread_t *)-1)
190 #ifdef HAVE_SCHED_YIELD
191 #define native_thread_yield() (void)sched_yield()
193 #define native_thread_yield() ((void)0)
196 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
197 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
198 defined(HAVE_CLOCK_GETTIME)
199 static pthread_condattr_t condattr_mono;
200 static pthread_condattr_t *condattr_monotonic = &condattr_mono;
202 static const void *
const condattr_monotonic = NULL;
208 #define TIME_QUANTUM_MSEC (100)
209 #define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
210 #define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
212 static rb_hrtime_t native_cond_timeout(rb_nativethread_cond_t *, rb_hrtime_t);
213 static int native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
const rb_hrtime_t *abs);
240 static rb_hrtime_t abs;
248 if (gvl->timer_err == ETIMEDOUT) {
249 abs = native_cond_timeout(&nd->cond.gvlq, TIME_QUANTUM_NSEC);
251 gvl->timer_err = native_cond_timedwait(&nd->cond.gvlq, &gvl->lock, &abs);
253 ubf_wakeup_all_threads();
254 ruby_sigchld_handler(vm);
256 if (UNLIKELY(rb_signal_buff_size())) {
257 if (th == vm->ractor.main_thread) {
258 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
261 threadptr_trap_interrupt(vm->ractor.main_thread);
271 RUBY_VM_SET_TIMER_INTERRUPT(gvl->owner->ec);
282 VM_ASSERT(th->unblock.func == 0 &&
283 "we must not be in ubf_list and GVL waitq at the same time");
285 list_add_tail(&gvl->waitq, &nd->node.gvl);
289 do_gvl_timer(gvl, th);
294 }
while (gvl->owner);
296 list_del_init(&nd->node.gvl);
298 if (gvl->need_yield) {
304 gvl->timer_err = ETIMEDOUT;
308 if (!designate_timer_thread(gvl) && !ubf_threads_empty()) {
309 rb_thread_wakeup_timer_thread(-1);
318 gvl_acquire_common(gvl, th);
337 gvl_release_common(gvl);
350 ubf_wakeup_all_threads();
352 next = gvl_release_common(gvl);
355 if (UNLIKELY(gvl->wait_yield)) {
356 while (gvl->wait_yield)
363 while (gvl->need_yield)
370 native_thread_yield();
374 gvl_acquire_common(gvl, th);
384 list_head_init(&gvl->waitq);
387 gvl->timer_err = ETIMEDOUT;
405 clear_thread_cache_altstack();
408 #if defined(HAVE_WORKING_FORK)
409 static void thread_cache_reset(
void);
413 thread_cache_reset();
415 gvl_acquire(gvl, GET_THREAD());
419 #define NATIVE_MUTEX_LOCK_DEBUG 0
422 mutex_debug(
const char *msg,
void *lock)
424 if (NATIVE_MUTEX_LOCK_DEBUG) {
426 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
428 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
429 fprintf(stdout,
"%s: %p\n", msg, lock);
430 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
438 mutex_debug(
"lock", lock);
439 if ((r = pthread_mutex_lock(lock)) != 0) {
448 mutex_debug(
"unlock", lock);
449 if ((r = pthread_mutex_unlock(lock)) != 0) {
458 mutex_debug(
"trylock", lock);
459 if ((r = pthread_mutex_trylock(lock)) != 0) {
473 int r = pthread_mutex_init(lock, 0);
474 mutex_debug(
"init", lock);
483 int r = pthread_mutex_destroy(lock);
484 mutex_debug(
"destroy", lock);
493 int r = pthread_cond_init(cond, condattr_monotonic);
502 int r = pthread_cond_destroy(cond);
523 r = pthread_cond_signal(cond);
524 }
while (r == EAGAIN);
535 r = pthread_cond_broadcast(cond);
536 }
while (r == EAGAIN);
545 int r = pthread_cond_wait(cond, mutex);
552 native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
const rb_hrtime_t *abs)
564 rb_hrtime2timespec(&ts, abs);
565 r = pthread_cond_timedwait(cond, mutex, &ts);
566 }
while (r == EINTR);
568 if (r != 0 && r != ETIMEDOUT) {
578 rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
579 native_cond_timedwait(cond, mutex, &hrmsec);
583 native_cond_timeout(rb_nativethread_cond_t *cond,
const rb_hrtime_t rel)
585 if (condattr_monotonic) {
586 return rb_hrtime_add(rb_hrtime_now(), rel);
592 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
596 #define native_cleanup_push pthread_cleanup_push
597 #define native_cleanup_pop pthread_cleanup_pop
599 #ifdef RB_THREAD_LOCAL_SPECIFIER
600 static RB_THREAD_LOCAL_SPECIFIER
rb_thread_t *ruby_native_thread;
602 static pthread_key_t ruby_native_thread_key;
612 ruby_thread_from_native(
void)
614 #ifdef RB_THREAD_LOCAL_SPECIFIER
615 return ruby_native_thread;
617 return pthread_getspecific(ruby_native_thread_key);
625 rb_ractor_set_current_ec(th->ractor, th->ec);
627 #ifdef RB_THREAD_LOCAL_SPECIFIER
628 ruby_native_thread = th;
631 return pthread_setspecific(ruby_native_thread_key, th) == 0;
640 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
641 if (condattr_monotonic) {
642 int r = pthread_condattr_init(condattr_monotonic);
644 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
646 if (r) condattr_monotonic = NULL;
650 #ifndef RB_THREAD_LOCAL_SPECIFIER
651 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
652 rb_bug(
"pthread_key_create failed (ruby_native_thread_key)");
654 if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
655 rb_bug(
"pthread_key_create failed (ruby_current_ec_key)");
658 th->thread_id = pthread_self();
659 ruby_thread_set_native(th);
660 fill_thread_id_str(th);
661 native_thread_init(th);
662 posix_signal(SIGVTALRM, null_func);
665 #ifdef RB_THREAD_T_HAS_NATIVE_ID
667 get_native_thread_id(
void)
670 return (
int)syscall(SYS_gettid);
671 #elif defined(__FreeBSD__)
672 return pthread_getthreadid_np();
682 #ifdef RB_THREAD_T_HAS_NATIVE_ID
683 th->tid = get_native_thread_id();
686 list_node_init(&nd->node.ubf);
689 if (&nd->cond.gvlq != &nd->cond.intr)
693 #ifndef USE_THREAD_CACHE
694 #define USE_THREAD_CACHE 1
703 if (&nd->cond.gvlq != &nd->cond.intr)
710 if (USE_THREAD_CACHE)
711 ruby_thread_set_native(0);
715 static rb_thread_t *register_cached_thread_and_wait(
void *);
718 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
719 #define STACKADDR_AVAILABLE 1
720 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
721 #define STACKADDR_AVAILABLE 1
722 #undef MAINSTACKADDR_AVAILABLE
723 #define MAINSTACKADDR_AVAILABLE 1
724 void *pthread_get_stackaddr_np(pthread_t);
725 size_t pthread_get_stacksize_np(pthread_t);
726 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
727 #define STACKADDR_AVAILABLE 1
728 #elif defined HAVE_PTHREAD_GETTHRDS_NP
729 #define STACKADDR_AVAILABLE 1
730 #elif defined __HAIKU__
731 #define STACKADDR_AVAILABLE 1
734 #ifndef MAINSTACKADDR_AVAILABLE
735 # ifdef STACKADDR_AVAILABLE
736 # define MAINSTACKADDR_AVAILABLE 1
738 # define MAINSTACKADDR_AVAILABLE 0
741 #if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
742 # define get_main_stack(addr, size) get_stack(addr, size)
745 #ifdef STACKADDR_AVAILABLE
750 get_stack(
void **addr,
size_t *size)
752 #define CHECK_ERR(expr) \
753 {int err = (expr); if (err) return err;}
754 #ifdef HAVE_PTHREAD_GETATTR_NP
757 STACK_GROW_DIR_DETECTION;
758 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
759 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
760 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
761 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
763 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
764 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
766 # ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
767 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
769 guard = getpagesize();
772 pthread_attr_destroy(&attr);
773 #elif defined HAVE_PTHREAD_ATTR_GET_NP
775 CHECK_ERR(pthread_attr_init(&attr));
776 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
777 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
778 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
780 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
781 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
783 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
784 pthread_attr_destroy(&attr);
785 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
786 pthread_t th = pthread_self();
787 *addr = pthread_get_stackaddr_np(th);
788 *size = pthread_get_stacksize_np(th);
789 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
791 # if defined HAVE_THR_STKSEGMENT
792 CHECK_ERR(thr_stksegment(&stk));
794 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
798 #elif defined HAVE_PTHREAD_GETTHRDS_NP
799 pthread_t th = pthread_self();
800 struct __pthrdsinfo thinfo;
802 int regsiz=
sizeof(reg);
803 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
804 &thinfo,
sizeof(thinfo),
806 *addr = thinfo.__pi_stackaddr;
810 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
811 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
812 #elif defined __HAIKU__
814 STACK_GROW_DIR_DETECTION;
815 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
816 *addr = info.stack_base;
817 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
818 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
820 #error STACKADDR_AVAILABLE is defined but not implemented.
828 rb_nativethread_id_t id;
829 size_t stack_maxsize;
831 } native_main_thread;
833 #ifdef STACK_END_ADDRESS
834 extern void *STACK_END_ADDRESS;
838 RUBY_STACK_SPACE_LIMIT = 1024 * 1024,
839 RUBY_STACK_SPACE_RATIO = 5
843 space_size(
size_t stack_size)
845 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
846 if (space_size > RUBY_STACK_SPACE_LIMIT) {
847 return RUBY_STACK_SPACE_LIMIT;
855 static __attribute__((noinline))
void
856 reserve_stack(
volatile char *limit,
size_t size)
859 # error needs alloca()
862 volatile char buf[0x100];
863 enum {stack_check_margin = 0x1000};
865 STACK_GROW_DIR_DETECTION;
867 if (!getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
870 if (size < stack_check_margin)
return;
871 size -= stack_check_margin;
874 if (IS_STACK_DIR_UPPER()) {
875 const volatile char *end = buf +
sizeof(buf);
885 size_t sz = limit - end;
900 size_t sz = buf - limit;
907 # define reserve_stack(limit, size) ((void)(limit), (void)(size))
910 #undef ruby_init_stack
914 native_main_thread.id = pthread_self();
916 #if MAINSTACKADDR_AVAILABLE
917 if (native_main_thread.stack_maxsize)
return;
921 if (get_main_stack(&stackaddr, &size) == 0) {
922 native_main_thread.stack_maxsize = size;
923 native_main_thread.stack_start = stackaddr;
924 reserve_stack(stackaddr, size);
929 #ifdef STACK_END_ADDRESS
930 native_main_thread.stack_start = STACK_END_ADDRESS;
932 if (!native_main_thread.stack_start ||
933 STACK_UPPER((
VALUE *)(
void *)&addr,
934 native_main_thread.stack_start > addr,
935 native_main_thread.stack_start < addr)) {
936 native_main_thread.stack_start = (
VALUE *)addr;
940 #if defined(HAVE_GETRLIMIT)
941 #if defined(PTHREAD_STACK_DEFAULT)
942 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
943 # error "PTHREAD_STACK_DEFAULT is too small"
945 size_t size = PTHREAD_STACK_DEFAULT;
947 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
950 int pagesize = getpagesize();
952 STACK_GROW_DIR_DETECTION;
953 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
954 size = (size_t)rlim.rlim_cur;
956 addr = native_main_thread.stack_start;
957 if (IS_STACK_DIR_UPPER()) {
958 space = ((size_t)((
char *)addr + size) / pagesize) * pagesize - (size_t)addr;
961 space = (size_t)addr - ((
size_t)((
char *)addr - size) / pagesize + 1) * pagesize;
963 native_main_thread.stack_maxsize = space;
967 #if MAINSTACKADDR_AVAILABLE
974 STACK_GROW_DIR_DETECTION;
976 if (IS_STACK_DIR_UPPER()) {
977 start = native_main_thread.stack_start;
978 end = (
char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
981 start = (
char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
982 end = native_main_thread.stack_start;
985 if ((
void *)addr < start || (
void *)addr > end) {
987 native_main_thread.stack_start = (
VALUE *)addr;
988 native_main_thread.stack_maxsize = 0;
993 #define CHECK_ERR(expr) \
994 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
999 rb_nativethread_id_t curr = pthread_self();
1001 if (pthread_equal(curr, native_main_thread.id)) {
1002 th->ec->machine.stack_start = native_main_thread.stack_start;
1003 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
1006 #ifdef STACKADDR_AVAILABLE
1010 if (get_stack(&start, &size) == 0) {
1011 uintptr_t diff = (uintptr_t)start - (uintptr_t)&curr;
1012 th->ec->machine.stack_start = (
VALUE *)&curr;
1013 th->ec->machine.stack_maxsize = size - diff;
1024 #define USE_NATIVE_THREAD_INIT 1
1028 thread_start_func_1(
void *th_ptr)
1031 RB_ALTSTACK_INIT(
void *altstack, th->altstack);
1032 #if USE_THREAD_CACHE
1036 #if !defined USE_NATIVE_THREAD_INIT
1040 fill_thread_id_str(th);
1041 #if defined USE_NATIVE_THREAD_INIT
1042 native_thread_init_stack(th);
1044 native_thread_init(th);
1046 #if defined USE_NATIVE_THREAD_INIT
1047 thread_start_func_2(th, th->ec->machine.stack_start);
1049 thread_start_func_2(th, &stack_start);
1052 #if USE_THREAD_CACHE
1054 if ((th = register_cached_thread_and_wait(RB_ALTSTACK(altstack))) != 0) {
1058 RB_ALTSTACK_FREE(altstack);
1063 struct cached_thread_entry {
1064 rb_nativethread_cond_t cond;
1065 rb_nativethread_id_t thread_id;
1068 struct list_node node;
1071 #if USE_THREAD_CACHE
1072 static rb_nativethread_lock_t thread_cache_lock = RB_NATIVETHREAD_LOCK_INIT;
1073 static LIST_HEAD(cached_thread_head);
1075 # if defined(HAVE_WORKING_FORK)
1077 thread_cache_reset(
void)
1080 list_head_init(&cached_thread_head);
1089 #ifndef THREAD_CACHE_TIME
1090 # define THREAD_CACHE_TIME ((rb_hrtime_t)3 * RB_HRTIME_PER_SEC)
1094 register_cached_thread_and_wait(
void *altstack)
1096 rb_hrtime_t end = THREAD_CACHE_TIME;
1097 struct cached_thread_entry entry;
1100 entry.altstack = altstack;
1102 entry.thread_id = pthread_self();
1103 end = native_cond_timeout(&entry.cond, end);
1107 list_add(&cached_thread_head, &entry.node);
1109 native_cond_timedwait(&entry.cond, &thread_cache_lock, &end);
1111 if (entry.th == NULL) {
1112 list_del(&entry.node);
1119 RB_ALTSTACK_FREE(entry.altstack);
1125 # if defined(HAVE_WORKING_FORK)
1126 static void thread_cache_reset(
void) { }
1133 #if USE_THREAD_CACHE
1134 struct cached_thread_entry *entry;
1137 entry = list_pop(&cached_thread_head,
struct cached_thread_entry, node);
1141 th->thread_id = entry->thread_id;
1142 fill_thread_id_str(th);
1152 clear_thread_cache_altstack(
void)
1154 #if USE_THREAD_CACHE
1155 struct cached_thread_entry *entry;
1158 list_for_each(&cached_thread_head, entry, node) {
1159 void MAYBE_UNUSED(*altstack) = entry->altstack;
1160 entry->altstack = 0;
1161 RB_ALTSTACK_FREE(altstack);
1172 if (use_cached_thread(th)) {
1173 thread_debug(
"create (use cached thread): %p\n", (
void *)th);
1176 pthread_attr_t attr;
1177 const size_t stack_size = th->vm->default_params.thread_machine_stack_size + th->vm->default_params.thread_vm_stack_size;
1178 const size_t space = space_size(stack_size);
1180 #ifdef USE_SIGALTSTACK
1181 th->altstack = rb_allocate_sigaltstack();
1183 th->ec->machine.stack_maxsize = stack_size - space;
1185 CHECK_ERR(pthread_attr_init(&attr));
1187 # ifdef PTHREAD_STACK_MIN
1188 thread_debug(
"create - stack size: %lu\n", (
unsigned long)stack_size);
1189 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
1192 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
1193 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
1195 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
1197 err = pthread_create(&th->thread_id, &attr, thread_start_func_1, th);
1198 thread_debug(
"create: %p (%d)\n", (
void *)th, err);
1200 fill_thread_id_str(th);
1201 CHECK_ERR(pthread_attr_destroy(&attr));
1206 #if USE_NATIVE_THREAD_PRIORITY
1211 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
1212 struct sched_param sp;
1214 int priority = 0 - th->priority;
1216 pthread_getschedparam(th->thread_id, &policy, &sp);
1217 max = sched_get_priority_max(policy);
1218 min = sched_get_priority_min(policy);
1220 if (min > priority) {
1223 else if (max < priority) {
1227 sp.sched_priority = priority;
1228 pthread_setschedparam(th->thread_id, policy, &sp);
1239 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
1243 ubf_pthread_cond_signal(
void *ptr)
1246 thread_debug(
"ubf_pthread_cond_signal (%p)\n", (
void *)th);
1251 native_cond_sleep(
rb_thread_t *th, rb_hrtime_t *rel)
1253 rb_nativethread_lock_t *lock = &th->interrupt_lock;
1254 rb_nativethread_cond_t *cond = &th->native_thread_data.cond.intr;
1264 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
1266 GVL_UNLOCK_BEGIN(th);
1269 th->unblock.func = ubf_pthread_cond_signal;
1270 th->unblock.arg = th;
1272 if (RUBY_VM_INTERRUPTED(th->ec)) {
1274 thread_debug(
"native_sleep: interrupted before sleep\n");
1287 end = native_cond_timeout(cond, *rel);
1288 native_cond_timedwait(cond, lock, &end);
1291 th->unblock.func = 0;
1297 thread_debug(
"native_sleep done\n");
1301 static LIST_HEAD(ubf_list_head);
1302 static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
1305 ubf_list_atfork(
void)
1307 list_head_init(&ubf_list_head);
1315 struct list_node *node = &th->native_thread_data.node.ubf;
1317 if (list_empty((
struct list_head*)node)) {
1319 list_add(&ubf_list_head, node);
1328 struct list_node *node = &th->native_thread_data.node.ubf;
1331 VM_ASSERT(th->unblock.func == 0);
1333 if (!list_empty((
struct list_head*)node)) {
1335 list_del_init(node);
1336 if (list_empty(&ubf_list_head) && !rb_signal_buff_size()) {
1350 thread_debug(
"thread_wait_queue_wakeup (%"PRI_THREAD_ID
")\n", thread_id_str(th));
1351 pthread_kill(th->thread_id, SIGVTALRM);
1355 ubf_select(
void *ptr)
1359 const rb_thread_t *cur = ruby_thread_from_native();
1361 register_ubf_list(th);
1373 if (cur != gvl->timer && cur != sigwait_th) {
1381 rb_thread_wakeup_timer_thread(-1);
1387 ubf_wakeup_thread(th);
1391 ubf_threads_empty(
void)
1393 return list_empty(&ubf_list_head);
1397 ubf_wakeup_all_threads(
void)
1402 if (!ubf_threads_empty()) {
1404 list_for_each(&ubf_list_head, dat, node.ubf) {
1405 th = container_of(dat,
rb_thread_t, native_thread_data);
1406 ubf_wakeup_thread(th);
1413 #define register_ubf_list(th) (void)(th)
1414 #define unregister_ubf_list(th) (void)(th)
1415 #define ubf_select 0
1416 static void ubf_wakeup_all_threads(
void) {
return; }
1417 static int ubf_threads_empty(
void) {
return 1; }
1418 #define ubf_list_atfork() do {} while (0)
1422 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
1430 volatile rb_pid_t owner_process;
1431 } signal_self_pipe = {
1438 rb_thread_wakeup_timer_thread_fd(
int fd)
1441 const uint64_t buff = 1;
1443 const char buff =
'!';
1450 if ((result = write(fd, &buff,
sizeof(buff))) <= 0) {
1453 case EINTR:
goto retry;
1455 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1460 async_bug_fd(
"rb_thread_wakeup_timer_thread: write", e, fd);
1463 if (TT_DEBUG) WRITE_CONST(2,
"rb_thread_wakeup_timer_thread: write\n");
1475 ubf_timer_arm(rb_pid_t current)
1477 #if UBF_TIMER == UBF_TIMER_POSIX
1478 if ((!current || timer_posix.owner == current) &&
1479 timer_state_cas(RTIMER_DISARM, RTIMER_ARMING) == RTIMER_DISARM) {
1480 struct itimerspec it;
1482 it.it_interval.tv_sec = it.it_value.tv_sec = 0;
1483 it.it_interval.tv_nsec = it.it_value.tv_nsec = TIME_QUANTUM_NSEC;
1485 if (timer_settime(timer_posix.timerid, 0, &it, 0))
1486 rb_async_bug_errno(
"timer_settime (arm)", errno);
1488 switch (timer_state_cas(RTIMER_ARMING, RTIMER_ARMED)) {
1492 (void)timer_settime(timer_posix.timerid, 0, &zero, 0);
1494 case RTIMER_ARMING:
return;
1505 (void)timer_settime(timer_posix.timerid, 0, &zero, 0);
1508 rb_async_bug_errno(
"UBF_TIMER_POSIX unknown state", ERANGE);
1511 #elif UBF_TIMER == UBF_TIMER_PTHREAD
1512 if (!current || current == timer_pthread.owner) {
1513 if (ATOMIC_EXCHANGE(timer_pthread.armed, 1) == 0)
1514 rb_thread_wakeup_timer_thread_fd(timer_pthread.low[1]);
1520 rb_thread_wakeup_timer_thread(
int sig)
1526 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.normal[1]);
1535 if (signal_self_pipe.owner_process == current) {
1536 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.normal[1]);
1542 if (system_working > 0) {
1552 mth = vm->ractor.main_thread;
1553 if (!mth || system_working <= 0)
return;
1559 RUBY_VM_SET_TRAP_INTERRUPT(ec);
1560 ubf_timer_arm(current);
1563 if (vm->ubf_async_safe && mth->unblock.func) {
1564 (mth->unblock.func)(mth->unblock.arg);
1571 #define CLOSE_INVALIDATE_PAIR(expr) \
1572 close_invalidate_pair(expr,"close_invalidate: "#expr)
1574 close_invalidate(
int *fdp,
const char *msg)
1579 if (close(fd) < 0) {
1580 async_bug_fd(msg, errno, fd);
1585 close_invalidate_pair(
int fds[2],
const char *msg)
1587 if (USE_EVENTFD && fds[0] == fds[1]) {
1588 close_invalidate(&fds[0], msg);
1592 close_invalidate(&fds[0], msg);
1593 close_invalidate(&fds[1], msg);
1598 set_nonblock(
int fd)
1603 oflags = fcntl(fd, F_GETFL);
1606 oflags |= O_NONBLOCK;
1607 err = fcntl(fd, F_SETFL, oflags);
1614 setup_communication_pipe_internal(
int pipes[2])
1618 if (pipes[0] >= 0 || pipes[1] >= 0) {
1619 VM_ASSERT(pipes[0] >= 0);
1620 VM_ASSERT(pipes[1] >= 0);
1628 #if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
1629 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
1630 if (pipes[0] >= 0) {
1638 rb_warn(
"pipe creation failed for timer: %s, scheduling broken",
1644 set_nonblock(pipes[0]);
1645 set_nonblock(pipes[1]);
1649 #if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
1650 # define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
1655 #if defined(__linux__)
1657 #elif defined(__APPLE__)
1670 #ifdef SET_CURRENT_THREAD_NAME
1672 if (!
NIL_P(loc = th->name)) {
1675 else if ((loc = threadptr_invoke_proc_location(th)) !=
Qnil) {
1677 char buf[THREAD_NAME_MAX];
1682 p = strrchr(name,
'/');
1690 if (len >=
sizeof(buf)) {
1691 buf[
sizeof(buf)-2] =
'*';
1692 buf[
sizeof(buf)-1] =
'\0';
1694 SET_CURRENT_THREAD_NAME(buf);
1700 native_set_another_thread_name(rb_nativethread_id_t thread_id,
VALUE name)
1702 #if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
1703 char buf[THREAD_NAME_MAX];
1705 # if !defined SET_ANOTHER_THREAD_NAME
1706 if (!pthread_equal(pthread_self(), thread_id))
return;
1711 if (n >= (
int)
sizeof(buf)) {
1712 memcpy(buf, s,
sizeof(buf)-1);
1713 buf[
sizeof(buf)-1] =
'\0';
1717 # if defined SET_ANOTHER_THREAD_NAME
1718 SET_ANOTHER_THREAD_NAME(thread_id, s);
1719 # elif defined SET_CURRENT_THREAD_NAME
1720 SET_CURRENT_THREAD_NAME(s);
1725 #if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
1727 native_thread_native_thread_id(
rb_thread_t *target_th)
1729 #ifdef RB_THREAD_T_HAS_NATIVE_ID
1730 int tid = target_th->tid;
1731 if (tid == 0)
return Qnil;
1733 #elif defined(__APPLE__)
1735 int e = pthread_threadid_np(target_th->thread_id, &tid);
1737 return ULL2NUM((
unsigned long long)tid);
1740 # define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
1742 # define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
1746 ubf_timer_invalidate(
void)
1748 #if UBF_TIMER == UBF_TIMER_PTHREAD
1749 CLOSE_INVALIDATE_PAIR(timer_pthread.low);
1754 ubf_timer_pthread_create(rb_pid_t current)
1756 #if UBF_TIMER == UBF_TIMER_PTHREAD
1758 if (timer_pthread.owner == current)
1761 if (setup_communication_pipe_internal(timer_pthread.low) < 0)
1764 err = pthread_create(&timer_pthread.thid, 0, timer_pthread_fn, GET_VM());
1766 timer_pthread.owner = current;
1768 rb_warn(
"pthread_create failed for timer: %s, signals racy",
1774 ubf_timer_create(rb_pid_t current)
1776 #if UBF_TIMER == UBF_TIMER_POSIX
1778 # define UBF_TIMER_CLOCK CLOCK_REALTIME
1780 # define UBF_TIMER_CLOCK CLOCK_MONOTONIC
1783 struct sigevent sev;
1785 sev.sigev_notify = SIGEV_SIGNAL;
1786 sev.sigev_signo = SIGVTALRM;
1787 sev.sigev_value.sival_ptr = &timer_posix;
1789 if (!timer_create(UBF_TIMER_CLOCK, &sev, &timer_posix.timerid)) {
1790 rb_atomic_t prev = timer_state_exchange(RTIMER_DISARM);
1792 if (prev != RTIMER_DEAD) {
1793 rb_bug(
"timer_posix was not dead: %u\n", (
unsigned)prev);
1795 timer_posix.owner = current;
1798 rb_warn(
"timer_create failed: %s, signals racy", strerror(errno));
1801 if (UBF_TIMER == UBF_TIMER_PTHREAD)
1802 ubf_timer_pthread_create(current);
1806 rb_thread_create_timer_thread(
void)
1809 rb_pid_t current = getpid();
1810 rb_pid_t owner = signal_self_pipe.owner_process;
1812 if (owner && owner != current) {
1813 CLOSE_INVALIDATE_PAIR(signal_self_pipe.normal);
1814 CLOSE_INVALIDATE_PAIR(signal_self_pipe.ub_main);
1815 ubf_timer_invalidate();
1818 if (setup_communication_pipe_internal(signal_self_pipe.normal) < 0)
return;
1819 if (setup_communication_pipe_internal(signal_self_pipe.ub_main) < 0)
return;
1821 ubf_timer_create(current);
1822 if (owner != current) {
1824 sigwait_th = THREAD_INVALID;
1825 signal_self_pipe.owner_process = current;
1830 ubf_timer_disarm(
void)
1832 #if UBF_TIMER == UBF_TIMER_POSIX
1835 if (timer_posix.owner && timer_posix.owner != getpid())
return;
1836 prev = timer_state_cas(RTIMER_ARMED, RTIMER_DISARM);
1838 case RTIMER_DISARM:
return;
1839 case RTIMER_ARMING:
return;
1841 if (timer_settime(timer_posix.timerid, 0, &zero, 0)) {
1844 if (err == EINVAL) {
1845 prev = timer_state_cas(RTIMER_DISARM, RTIMER_DISARM);
1848 if (prev == RTIMER_DEAD)
return;
1854 case RTIMER_DEAD:
return;
1856 rb_bug(
"UBF_TIMER_POSIX bad state: %u\n", (
unsigned)prev);
1859 #elif UBF_TIMER == UBF_TIMER_PTHREAD
1860 ATOMIC_SET(timer_pthread.armed, 0);
1865 ubf_timer_destroy(
void)
1867 #if UBF_TIMER == UBF_TIMER_POSIX
1868 if (timer_posix.owner == getpid()) {
1870 size_t i, max = 10000000;
1873 for (i = 0; i < max; i++) {
1874 switch (timer_state_cas(expect, RTIMER_DEAD)) {
1876 if (expect == RTIMER_DISARM)
goto done;
1877 expect = RTIMER_DISARM;
1880 native_thread_yield();
1881 expect = RTIMER_ARMED;
1884 if (expect == RTIMER_ARMED) {
1885 if (timer_settime(timer_posix.timerid, 0, &zero, 0))
1889 expect = RTIMER_ARMED;
1892 rb_bug(
"RTIMER_DEAD unexpected");
1895 rb_bug(
"timed out waiting for timer to arm");
1897 if (timer_delete(timer_posix.timerid) < 0)
1900 VM_ASSERT(timer_state_exchange(RTIMER_DEAD) == RTIMER_DEAD);
1902 #elif UBF_TIMER == UBF_TIMER_PTHREAD
1905 timer_pthread.owner = 0;
1907 rb_thread_wakeup_timer_thread_fd(timer_pthread.low[1]);
1908 err = pthread_join(timer_pthread.thid, 0);
1916 native_stop_timer_thread(
void)
1919 stopped = --system_working <= 0;
1921 ubf_timer_destroy();
1923 if (TT_DEBUG) fprintf(stderr,
"stop timer thread\n");
1928 native_reset_timer_thread(
void)
1930 if (TT_DEBUG) fprintf(stderr,
"reset timer thread\n");
1933 #ifdef HAVE_SIGALTSTACK
1935 ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
1939 const size_t water_mark = 1024 * 1024;
1940 STACK_GROW_DIR_DETECTION;
1942 #ifdef STACKADDR_AVAILABLE
1943 if (get_stack(&base, &size) == 0) {
1945 if (pthread_equal(th->thread_id, native_main_thread.id)) {
1947 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
1948 size = (size_t)rlim.rlim_cur;
1952 base = (
char *)base + STACK_DIR_UPPER(+size, -size);
1957 size = th->ec->machine.stack_maxsize;
1958 base = (
char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
1963 size /= RUBY_STACK_SPACE_RATIO;
1964 if (size > water_mark) size = water_mark;
1965 if (IS_STACK_DIR_UPPER()) {
1966 if (size > ~(
size_t)base+1) size = ~(
size_t)base+1;
1967 if (addr > base && addr <= (
void *)((
char *)base + size))
return 1;
1970 if (size > (
size_t)base) size = (
size_t)base;
1971 if (addr > (
void *)((
char *)base - size) && addr <= base)
return 1;
1984 #if UBF_TIMER == UBF_TIMER_PTHREAD
1985 if (fd == timer_pthread.low[0] || fd == timer_pthread.low[1])
1988 if (fd == signal_self_pipe.normal[0] || fd == signal_self_pipe.normal[1])
1990 if (fd == signal_self_pipe.ub_main[0] || fd == signal_self_pipe.ub_main[1])
1994 if (signal_self_pipe.owner_process == getpid())
1999 rb_nativethread_id_t
2002 return pthread_self();
2008 mjit_worker(
void *arg)
2010 void (*worker_func)(void) = (
void(*)(void))arg;
2012 #ifdef SET_CURRENT_THREAD_NAME
2013 SET_CURRENT_THREAD_NAME(
"ruby-mjitworker");
2021 rb_thread_create_mjit_thread(
void (*worker_func)(
void))
2023 pthread_attr_t attr;
2024 pthread_t worker_pid;
2027 if (pthread_attr_init(&attr) != 0)
return ret;
2030 if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0
2031 && pthread_create(&worker_pid, &attr, mjit_worker, (
void *)worker_func) == 0) {
2034 pthread_attr_destroy(&attr);
2042 if (signal_self_pipe.normal[0] >= 0) {
2043 VM_ASSERT(signal_self_pipe.owner_process == getpid());
2050 if (ATOMIC_PTR_CAS(sigwait_th, THREAD_INVALID, th) == THREAD_INVALID) {
2051 return signal_self_pipe.normal[0];
2062 VM_ASSERT(signal_self_pipe.normal[0] == fd);
2063 old = ATOMIC_PTR_EXCHANGE(sigwait_th, THREAD_INVALID);
2064 if (old != th) assert(old == th);
2070 ruby_ppoll(
struct pollfd *fds, nfds_t nfds,
2071 const struct timespec *ts,
const sigset_t *sigmask)
2078 if (ts->tv_sec > INT_MAX/1000)
2079 timeout_ms = INT_MAX;
2081 tmp = (int)(ts->tv_sec * 1000);
2083 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
2084 if (INT_MAX - tmp < tmp2)
2085 timeout_ms = INT_MAX;
2087 timeout_ms = (int)(tmp + tmp2);
2093 return poll(fds, nfds, timeout_ms);
2095 # define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
2099 rb_sigwait_sleep(
rb_thread_t *th,
int sigwait_fd,
const rb_hrtime_t *rel)
2104 pfd.fd = sigwait_fd;
2105 pfd.events = POLLIN;
2107 if (!BUSY_WAIT_SIGNALS && ubf_threads_empty()) {
2108 (void)ppoll(&pfd, 1, rb_hrtime2timespec(&ts, rel), 0);
2109 check_signals_nogvl(th, sigwait_fd);
2112 rb_hrtime_t to = RB_HRTIME_MAX, end;
2117 end = rb_hrtime_add(rb_hrtime_now(), to);
2128 const rb_hrtime_t *sto = sigwait_timeout(th, sigwait_fd, &to, &n);
2131 n = ppoll(&pfd, 1, rb_hrtime2timespec(&ts, sto), 0);
2132 if (check_signals_nogvl(th, sigwait_fd))
2134 if (n || (th && RUBY_VM_INTERRUPTED(th->ec)))
2136 if (rel && hrtime_update_expire(&to, end))
2148 ubf_ppoll_sleep(
void *ignore)
2150 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.ub_main[1]);
2163 #define GVL_UNLOCK_BEGIN_YIELD(th) do { \
2164 const native_thread_data_t *next; \
2165 rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor); \
2166 RB_GC_SAVE_MACHINE_CONTEXT(th); \
2167 rb_native_mutex_lock(&gvl->lock); \
2168 next = gvl_release_common(gvl); \
2169 rb_native_mutex_unlock(&gvl->lock); \
2170 if (!next && rb_ractor_living_thread_num(th->ractor) > 1) { \
2171 native_thread_yield(); \
2185 native_ppoll_sleep(
rb_thread_t *th, rb_hrtime_t *rel)
2188 th->unblock.func = ubf_ppoll_sleep;
2191 GVL_UNLOCK_BEGIN_YIELD(th);
2193 if (!RUBY_VM_INTERRUPTED(th->ec)) {
2194 struct pollfd pfd[2];
2197 pfd[0].fd = signal_self_pipe.normal[0];
2198 pfd[1].fd = signal_self_pipe.ub_main[0];
2199 pfd[0].events = pfd[1].events = POLLIN;
2200 if (ppoll(pfd, 2, rb_hrtime2timespec(&ts, rel), 0) > 0) {
2201 if (pfd[1].revents & POLLIN) {
2202 (void)consume_communication_pipe(pfd[1].fd);
2211 unblock_function_clear(th);
2218 int sigwait_fd = rb_sigwait_fd_get(th);
2219 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
2221 if (sigwait_fd >= 0) {
2223 th->unblock.func = ubf_sigwait;
2226 GVL_UNLOCK_BEGIN_YIELD(th);
2228 if (!RUBY_VM_INTERRUPTED(th->ec)) {
2229 rb_sigwait_sleep(th, sigwait_fd, rel);
2232 check_signals_nogvl(th, sigwait_fd);
2234 unblock_function_clear(th);
2236 rb_sigwait_fd_put(th, sigwait_fd);
2237 rb_sigwait_fd_migrate(th->vm);
2239 else if (th == th->vm->ractor.main_thread) {
2240 native_ppoll_sleep(th, rel);
2243 native_cond_sleep(th, rel);
2246 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
2249 #if UBF_TIMER == UBF_TIMER_PTHREAD
2251 timer_pthread_fn(
void *p)
2254 pthread_t main_thread_id = vm->ractor.main_thread->thread_id;
2259 pfd.fd = timer_pthread.low[0];
2260 pfd.events = POLLIN;
2262 while (system_working > 0) {
2263 (void)poll(&pfd, 1, timeout);
2264 ccp = consume_communication_pipe(pfd.fd);
2266 if (system_working > 0) {
2267 if (ATOMIC_CAS(timer_pthread.armed, 1, 1)) {
2268 pthread_kill(main_thread_id, SIGVTALRM);
2270 if (rb_signal_buff_size() || !ubf_threads_empty()) {
2271 timeout = TIME_QUANTUM_MSEC;
2274 ATOMIC_SET(timer_pthread.armed, 0);
2279 pthread_kill(main_thread_id, SIGVTALRM);
2280 ATOMIC_SET(timer_pthread.armed, 0);
2291 ubf_caller(
void *ignore)
2304 rb_thread_start_unblock_thread(
void)
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define INT2FIX
Old name of RB_INT2FIX.
#define ULL2NUM
Old name of RB_ULL2NUM.
#define NUM2INT
Old name of RB_NUM2INT.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
void ruby_init_stack(volatile VALUE *addr)
Set stack bottom of Ruby implementation.
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
VALUE rb_eNotImpError
NotImplementedError exception.
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
void rb_bug(const char *fmt,...)
Interpreter panic switch.
void rb_sys_fail(const char *mesg)
Converts a C errno into a Ruby exception, then raises it.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
void rb_bug_errno(const char *mesg, int errno_arg)
This is a wrapper of rb_bug() which automatically constructs appropriate message from the passed errn...
VALUE rb_eThreadError
ThreadError exception.
int rb_cloexec_pipe(int fildes[2])
Opens a pipe with closing on exec.
void rb_update_max_fd(int fd)
Informs the interpreter that the passed fd can be the max.
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
VALUE rb_thread_create(VALUE(*f)(void *g), void *g)
Creates a Ruby thread that is backended by a C function.
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
int rb_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
#define RARRAY_AREF(a, i)
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
#define RSTRING_GETMEM(str, ptrvar, lenvar)
Convenient macro to obtain the contents and length at once.
The data structure which wraps the fd_set bitmap used by select(2).
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.