60 #undef _FORTIFY_SOURCE
61 #undef __USE_FORTIFY_LEVEL
62 #define __USE_FORTIFY_LEVEL 0
66 #include "ruby/internal/config.h"
73 #include "eval_intern.h"
77 #include "internal/class.h"
78 #include "internal/cont.h"
79 #include "internal/error.h"
80 #include "internal/hash.h"
81 #include "internal/io.h"
82 #include "internal/object.h"
83 #include "internal/proc.h"
85 #include "internal/signal.h"
86 #include "internal/thread.h"
87 #include "internal/time.h"
88 #include "internal/warnings.h"
97 #include "ractor_core.h"
101 #ifndef USE_NATIVE_THREAD_PRIORITY
102 #define USE_NATIVE_THREAD_PRIORITY 0
103 #define RUBY_THREAD_PRIORITY_MAX 3
104 #define RUBY_THREAD_PRIORITY_MIN -3
108 #define THREAD_DEBUG 0
111 static VALUE rb_cThreadShield;
113 static VALUE sym_immediate;
114 static VALUE sym_on_blocking;
115 static VALUE sym_never;
118 SLEEP_DEADLOCKABLE = 0x1,
119 SLEEP_SPURIOUS_CHECK = 0x2
122 #define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
123 #define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
126 rb_thread_local_storage(
VALUE thread)
128 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
135 static int sleep_hrtime(
rb_thread_t *, rb_hrtime_t,
unsigned int fl);
136 static void sleep_forever(
rb_thread_t *th,
unsigned int fl);
137 static void rb_thread_sleep_deadly_allow_spurious_wakeup(
VALUE blocker);
140 static int rb_threadptr_pending_interrupt_empty_p(
const rb_thread_t *th);
141 static const char *thread_status_name(
rb_thread_t *th,
int detail);
142 static int hrtime_update_expire(rb_hrtime_t *,
const rb_hrtime_t);
143 NORETURN(
static void async_bug_fd(
const char *mesg,
int errno_arg,
int fd));
144 static int consume_communication_pipe(
int fd);
145 static int check_signals_nogvl(
rb_thread_t *,
int sigwait_fd);
146 void rb_sigwait_fd_migrate(
rb_vm_t *);
148 #define eKillSignal INT2FIX(0)
149 #define eTerminateSignal INT2FIX(1)
150 static volatile int system_working = 1;
153 struct list_node wfd_node;
160 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
163 enum rb_thread_status prev_status;
167 static void unblock_function_clear(
rb_thread_t *th);
173 #define GVL_UNLOCK_BEGIN(th) do { \
174 RB_GC_SAVE_MACHINE_CONTEXT(th); \
175 gvl_release(rb_ractor_gvl(th->ractor));
177 #define GVL_UNLOCK_END(th) \
178 gvl_acquire(rb_ractor_gvl(th->ractor), th); \
179 rb_ractor_thread_switch(th->ractor, th); \
183 #ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
184 #define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
186 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
189 #define only_if_constant(expr, notconst) notconst
191 #define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
192 struct rb_blocking_region_buffer __region; \
193 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
195 !only_if_constant(fail_if_interrupted, TRUE)) { \
197 blocking_region_end(th, &__region); \
205 #define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
211 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
212 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec)))
return FALSE;
215 th->pending_interrupt_queue_checked = 0;
216 RUBY_VM_SET_INTERRUPT(ec);
218 return rb_threadptr_execute_interrupts(th, 1);
224 return vm_check_ints_blocking(ec);
232 #if defined(HAVE_POLL)
233 # if defined(__linux__)
236 # if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
239 # define POLLERR_SET (POLLHUP | POLLERR)
244 timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
248 *rel = rb_timeval2hrtime(timeout);
249 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
258 #ifdef HAVE_VA_ARGS_MACRO
259 void rb_thread_debug(
const char *file,
int line,
const char *fmt, ...);
260 #define thread_debug(...) rb_thread_debug(__FILE__, __LINE__, __VA_ARGS__)
261 #define POSITION_FORMAT "%s:%d:"
262 #define POSITION_ARGS ,file, line
264 void rb_thread_debug(
const char *fmt, ...);
265 #define thread_debug rb_thread_debug
266 #define POSITION_FORMAT
267 #define POSITION_ARGS
270 # ifdef NON_SCALAR_THREAD_ID
271 #define fill_thread_id_string ruby_fill_thread_id_string
273 ruby_fill_thread_id_string(rb_nativethread_id_t thid, rb_thread_id_string_t buf)
275 extern const char ruby_digitmap[];
280 for (i = 0; i <
sizeof(thid); i++) {
281 # ifdef LITTLE_ENDIAN
282 size_t j =
sizeof(thid) - i - 1;
286 unsigned char c = (
unsigned char)((
char *)&thid)[j];
287 buf[2 + i * 2] = ruby_digitmap[(c >> 4) & 0xf];
288 buf[3 + i * 2] = ruby_digitmap[c & 0xf];
290 buf[
sizeof(rb_thread_id_string_t)-1] =
'\0';
293 # define fill_thread_id_str(th) fill_thread_id_string((th)->thread_id, (th)->thread_id_string)
294 # define thread_id_str(th) ((th)->thread_id_string)
295 # define PRI_THREAD_ID "s"
298 # if THREAD_DEBUG < 0
299 static int rb_thread_debug_enabled;
310 rb_thread_s_debug(
VALUE _)
312 return INT2NUM(rb_thread_debug_enabled);
326 rb_thread_debug_enabled =
RTEST(val) ?
NUM2INT(val) : 0;
330 # define rb_thread_debug_enabled THREAD_DEBUG
333 #define thread_debug if(0)printf
336 #ifndef fill_thread_id_str
337 # define fill_thread_id_string(thid, buf) ((void *)(uintptr_t)(thid))
338 # define fill_thread_id_str(th) (void)0
339 # define thread_id_str(th) ((void *)(uintptr_t)(th)->thread_id)
340 # define PRI_THREAD_ID "p"
343 NOINLINE(
static int thread_start_func_2(
rb_thread_t *th,
VALUE *stack_start));
344 void ruby_sigchld_handler(
rb_vm_t *);
347 ubf_sigwait(
void *ignore)
349 rb_thread_wakeup_timer_thread(0);
352 #include THREAD_IMPL_SRC
356 #define DEBUG_OUT() \
357 WaitForSingleObject(&debug_mutex, INFINITE); \
358 printf(POSITION_FORMAT"%#lx - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
360 ReleaseMutex(&debug_mutex);
362 #elif defined(HAVE_PTHREAD_H)
364 #define DEBUG_OUT() \
365 pthread_mutex_lock(&debug_mutex); \
366 printf(POSITION_FORMAT"%"PRI_THREAD_ID" - %s" POSITION_ARGS, \
367 fill_thread_id_string(pthread_self(), thread_id_string), buf); \
369 pthread_mutex_unlock(&debug_mutex);
378 #ifndef BUSY_WAIT_SIGNALS
379 # define BUSY_WAIT_SIGNALS (0)
383 # define USE_EVENTFD (0)
387 static int debug_mutex_initialized = 1;
388 static rb_nativethread_lock_t debug_mutex;
392 #ifdef HAVE_VA_ARGS_MACRO
393 const char *file,
int line,
395 const char *fmt, ...)
399 #ifdef NON_SCALAR_THREAD_ID
400 rb_thread_id_string_t thread_id_string;
403 if (!rb_thread_debug_enabled)
return;
405 if (debug_mutex_initialized == 1) {
406 debug_mutex_initialized = 0;
411 vsnprintf(buf, BUFSIZ, fmt, args);
418 #include "thread_sync.c"
455 if (fail_if_interrupted) {
456 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
461 RUBY_VM_CHECK_INTS(th->ec);
465 }
while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
468 VM_ASSERT(th->unblock.func == NULL);
470 th->unblock.func = func;
471 th->unblock.arg = arg;
481 th->unblock.func = 0;
486 rb_threadptr_interrupt_common(
rb_thread_t *th,
int trap)
491 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
494 RUBY_VM_SET_INTERRUPT(th->ec);
496 if (th->unblock.func != NULL) {
497 (th->unblock.func)(th->unblock.arg);
508 rb_threadptr_interrupt_common(th, 0);
514 rb_threadptr_interrupt_common(th, 1);
522 list_for_each(&r->threads.set, th, lt_node) {
523 if (th != main_thread) {
524 thread_debug(
"terminate_all: begin (thid: %"PRI_THREAD_ID
", status: %s)\n",
525 thread_id_str(th), thread_status_name(th, TRUE));
526 rb_threadptr_pending_interrupt_enque(th, eTerminateSignal);
527 rb_threadptr_interrupt(th);
528 thread_debug(
"terminate_all: end (thid: %"PRI_THREAD_ID
", status: %s)\n",
529 thread_id_str(th), thread_status_name(th, TRUE));
532 thread_debug(
"terminate_all: main thread (%p)\n", (
void *)th);
540 while (thread->join_list) {
544 thread->join_list = join_list->next;
548 if (target_thread->scheduler !=
Qnil && rb_fiberptr_blocking(join_list->fiber) == 0) {
552 rb_threadptr_interrupt(target_thread);
554 switch (target_thread->status) {
556 case THREAD_STOPPED_FOREVER:
557 target_thread->status = THREAD_RUNNABLE;
566 rb_threadptr_unlock_all_locking_mutexes(
rb_thread_t *th)
568 while (th->keeping_mutexes) {
570 th->keeping_mutexes = mutex->next_mutex;
574 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
575 if (error_message)
rb_bug(
"invalid keeping_mutexes: %s", error_message);
584 volatile int sleeping = 0;
586 if (cr->threads.main != th) {
587 rb_bug(
"rb_thread_terminate_all: called by child thread (%p, %p)",
588 (
void *)cr->threads.main, (
void *)th);
592 rb_threadptr_unlock_all_locking_mutexes(th);
595 if (EC_EXEC_TAG() == TAG_NONE) {
597 thread_debug(
"rb_thread_terminate_all (main thread: %p)\n", (
void *)th);
598 terminate_all(cr, th);
600 while (rb_ractor_living_thread_num(cr) > 1) {
601 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
607 native_sleep(th, &rel);
608 RUBY_VM_CHECK_INTS_BLOCKING(ec);
626 void rb_threadptr_root_fiber_terminate(
rb_thread_t *th);
629 thread_cleanup_func_before_exec(
void *th_ptr)
632 th->status = THREAD_KILLED;
635 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
637 rb_threadptr_root_fiber_terminate(th);
641 thread_cleanup_func(
void *th_ptr,
int atfork)
645 th->locking_mutex =
Qfalse;
646 thread_cleanup_func_before_exec(th_ptr);
661 native_thread_destroy(th);
670 native_thread_init_stack(th);
674 rb_vm_proc_local_ep(
VALUE proc)
676 const VALUE *ep = vm_proc_ep(proc);
679 return rb_vm_ep_local_ep(ep);
688 int argc,
const VALUE *argv,
int kw_splat,
VALUE passed_block_handler);
693 VALUE args = th->invoke_arg.proc.args;
694 const VALUE *args_ptr;
696 VALUE procval = th->invoke_arg.proc.proc;
698 GetProcPtr(procval, proc);
700 th->ec->errinfo =
Qnil;
701 th->ec->root_lep = rb_vm_proc_local_ep(procval);
702 th->ec->root_svar =
Qfalse;
704 vm_check_ints_blocking(th->ec);
706 if (th->invoke_type == thread_invoke_type_ractor_proc) {
707 VALUE self = rb_ractor_self(th->ractor);
711 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (
VALUE *)args_ptr);
712 vm_check_ints_blocking(th->ec);
714 return rb_vm_invoke_proc_with_self(
717 th->invoke_arg.proc.kw_splat,
718 VM_BLOCK_HANDLER_NONE
727 th->invoke_arg.proc.args =
Qnil;
733 vm_check_ints_blocking(th->ec);
735 return rb_vm_invoke_proc(
738 th->invoke_arg.proc.kw_splat,
739 VM_BLOCK_HANDLER_NONE
747 native_set_thread_name(th);
752 switch (th->invoke_type) {
753 case thread_invoke_type_proc:
754 result = thread_do_start_proc(th);
757 case thread_invoke_type_ractor_proc:
758 result = thread_do_start_proc(th);
759 rb_ractor_atexit(th->ec, result);
762 case thread_invoke_type_func:
763 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
766 case thread_invoke_type_none:
782 STACK_GROW_DIR_DETECTION;
783 enum ruby_tag_type state;
785 size_t size = th->vm->default_params.thread_vm_stack_size /
sizeof(
VALUE);
786 rb_thread_t *ractor_main_th = th->ractor->threads.main;
787 VALUE * vm_stack = NULL;
789 VM_ASSERT(th != th->vm->ractor.main_thread);
790 thread_debug(
"thread start: %p\n", (
void *)th);
793 gvl_acquire(rb_ractor_gvl(th->ractor), th);
794 ruby_thread_set_native(th);
797 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
800 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
802 r->r_stdin = rb_io_prep_stdin();
803 r->r_stdout = rb_io_prep_stdout();
804 r->r_stderr = rb_io_prep_stderr();
813 vm_stack = alloca(size *
sizeof(
VALUE));
816 rb_ec_initialize_vm_stack(th->ec, vm_stack, size);
817 th->ec->machine.stack_start = STACK_DIR_UPPER(vm_stack + size, vm_stack);
818 th->ec->machine.stack_maxsize -= size *
sizeof(
VALUE);
820 thread_debug(
"thread start (get lock): %p\n", (
void *)th);
823 VM_ASSERT(th->value ==
Qundef);
827 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
828 SAVE_ROOT_JMPBUF(th, thread_do_start(th));
831 errinfo = th->ec->errinfo;
833 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state,
Qundef);
834 if (!
NIL_P(exc)) errinfo = exc;
836 if (state == TAG_FATAL) {
837 if (th->invoke_type == thread_invoke_type_ractor_proc) {
838 rb_ractor_atexit(th->ec,
Qnil);
846 if (th->report_on_exception) {
847 VALUE mesg = rb_thread_to_s(th->self);
848 rb_str_cat_cstr(mesg,
" terminated with exception (report_on_exception is true):\n");
849 rb_write_error_str(mesg);
850 rb_ec_error_print(th->ec, errinfo);
853 if (th->invoke_type == thread_invoke_type_ractor_proc) {
854 rb_ractor_atexit_exception(th->ec);
857 if (th->vm->thread_abort_on_exception ||
869 VM_ASSERT(th->value !=
Qundef);
871 rb_threadptr_join_list_wakeup(th);
872 rb_threadptr_unlock_all_locking_mutexes(th);
874 if (th->invoke_type == thread_invoke_type_ractor_proc) {
875 rb_thread_terminate_all(th);
876 rb_ractor_teardown(th->ec);
879 th->status = THREAD_KILLED;
880 thread_debug(
"thread end: %p\n", (
void *)th);
882 if (th->vm->ractor.main_thread == th) {
888 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
893 rb_ec_clear_current_thread_trace_func(th->ec);
896 if (th->locking_mutex !=
Qfalse) {
897 rb_bug(
"thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE
")",
898 (
void *)th, th->locking_mutex);
901 if (ractor_main_th->status == THREAD_KILLED &&
902 th->ractor->threads.cnt <= 2 ) {
904 rb_threadptr_interrupt(ractor_main_th);
907 rb_check_deadlock(th->ractor);
909 rb_fiber_close(th->ec->fiber_ptr);
911 thread_cleanup_func(th, FALSE);
912 VM_ASSERT(th->ec->vm_stack == NULL);
914 if (th->invoke_type == thread_invoke_type_ractor_proc) {
918 gvl_release(rb_ractor_gvl(th->ractor));
919 rb_ractor_living_threads_remove(th->ractor, th);
922 rb_ractor_living_threads_remove(th->ractor, th);
923 gvl_release(rb_ractor_gvl(th->ractor));
930 enum thread_invoke_type type;
947 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
952 "can't start a new thread (frozen ThreadGroup)");
955 switch (params->type) {
956 case thread_invoke_type_proc:
957 th->invoke_type = thread_invoke_type_proc;
958 th->invoke_arg.proc.args = params->args;
959 th->invoke_arg.proc.proc = params->proc;
963 case thread_invoke_type_ractor_proc:
964 #if RACTOR_CHECK_MODE > 0
965 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
967 th->invoke_type = thread_invoke_type_ractor_proc;
968 th->ractor = params->g;
969 th->ractor->threads.main = th;
970 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
973 rb_ractor_send_parameters(ec, params->g, params->args);
976 case thread_invoke_type_func:
977 th->invoke_type = thread_invoke_type_func;
978 th->invoke_arg.func.func = params->fn;
979 th->invoke_arg.func.arg = (
void *)params->args;
986 th->priority = current_th->priority;
987 th->thgroup = current_th->thgroup;
990 th->pending_interrupt_queue_checked = 0;
991 th->pending_interrupt_mask_stack =
rb_ary_dup(current_th->pending_interrupt_mask_stack);
992 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
996 RUBY_DEBUG_LOG(
"r:%u th:%p", rb_ractor_id(th->ractor), (
void *)th);
998 rb_ractor_living_threads_insert(th->ractor, th);
1001 err = native_thread_create(th);
1003 th->status = THREAD_KILLED;
1004 rb_ractor_living_threads_remove(th->ractor, th);
1010 #define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
1033 thread_s_new(
int argc,
VALUE *argv,
VALUE klass)
1036 VALUE thread = rb_thread_alloc(klass);
1038 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
1043 th = rb_thread_ptr(thread);
1044 if (!threadptr_initialized(th)) {
1065 .type = thread_invoke_type_proc,
1069 return thread_create_core(rb_thread_alloc(klass), ¶ms);
1075 if (th->invoke_type == thread_invoke_type_proc) {
1076 return rb_proc_location(th->invoke_arg.proc.proc);
1092 else if (th->invoke_type != thread_invoke_type_none) {
1093 VALUE loc = threadptr_invoke_proc_location(th);
1096 "already initialized thread - %"PRIsVALUE
":%"PRIsVALUE,
1105 .type = thread_invoke_type_proc,
1109 return thread_create_core(thread, ¶ms);
1117 .type = thread_invoke_type_func,
1121 return thread_create_core(rb_thread_alloc(
rb_cThread), ¶ms);
1128 .type = thread_invoke_type_ractor_proc,
1133 return thread_create_core(rb_thread_alloc(
rb_cThread), ¶ms);
1145 remove_from_join_list(
VALUE arg)
1150 if (target_thread->status != THREAD_KILLED) {
1153 while (*join_list) {
1154 if (*join_list == p->waiter) {
1155 *join_list = (*join_list)->next;
1159 join_list = &(*join_list)->next;
1166 static rb_hrtime_t *double2hrtime(rb_hrtime_t *,
double);
1171 return th->status == THREAD_KILLED || th->value !=
Qundef;
1175 thread_join_sleep(
VALUE arg)
1178 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1179 rb_hrtime_t end = 0, *limit = p->limit;
1182 end = rb_hrtime_add(*limit, rb_hrtime_now());
1185 while (!thread_finished(target_th)) {
1188 if (scheduler !=
Qnil) {
1192 th->status = THREAD_STOPPED_FOREVER;
1193 rb_ractor_sleeper_threads_inc(th->ractor);
1194 rb_check_deadlock(th->ractor);
1195 native_sleep(th, 0);
1196 rb_ractor_sleeper_threads_dec(th->ractor);
1199 if (hrtime_update_expire(limit, end)) {
1200 thread_debug(
"thread_join: timeout (thid: %"PRI_THREAD_ID
")\n",
1201 thread_id_str(target_th));
1204 th->status = THREAD_STOPPED;
1205 native_sleep(th, limit);
1207 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1208 th->status = THREAD_RUNNABLE;
1209 thread_debug(
"thread_join: interrupted (thid: %"PRI_THREAD_ID
", status: %s)\n",
1210 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1222 if (th == target_th) {
1226 if (th->ractor->threads.main == target_th) {
1230 thread_debug(
"thread_join (thid: %"PRI_THREAD_ID
", status: %s)\n",
1231 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1233 if (target_th->status != THREAD_KILLED) {
1235 waiter.next = target_th->join_list;
1237 waiter.fiber = fiber;
1238 target_th->join_list = &waiter;
1241 arg.waiter = &waiter;
1242 arg.target = target_th;
1243 arg.timeout = timeout;
1251 thread_debug(
"thread_join: success (thid: %"PRI_THREAD_ID
", status: %s)\n",
1252 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1254 if (target_th->ec->errinfo !=
Qnil) {
1255 VALUE err = target_th->ec->errinfo;
1260 thread_debug(
"thread_join: terminated (thid: %"PRI_THREAD_ID
", status: %s)\n",
1261 thread_id_str(target_th), thread_status_name(target_th, TRUE));
1266 rb_bug(
"thread_join: Fixnum (%d) should not reach here.",
FIX2INT(err));
1269 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1270 rb_bug(
"thread_join: THROW_DATA should not reach here.");
1277 return target_th->self;
1320 thread_join_m(
int argc,
VALUE *argv,
VALUE self)
1323 rb_hrtime_t rel = 0, *limit = 0;
1334 if (
NIL_P(timeout)) {
1338 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1342 limit = double2hrtime(&rel,
rb_num2dbl(timeout));
1345 return thread_join(rb_thread_ptr(
self), timeout, limit);
1363 thread_value(
VALUE self)
1366 thread_join(th,
Qnil, 0);
1367 if (th->value ==
Qundef) {
1385 #define TIMESPEC_SEC_MAX TIMET_MAX
1386 #define TIMESPEC_SEC_MIN TIMET_MIN
1388 COMPILER_WARNING_PUSH
1389 #if __has_warning("-Wimplicit-int-float-conversion")
1390 COMPILER_WARNING_IGNORED(-Wimplicit-
int-
float-conversion)
1391 #elif defined(_MSC_VER)
1393 COMPILER_WARNING_IGNORED(4305)
1395 static const double TIMESPEC_SEC_MAX_as_double = TIMESPEC_SEC_MAX;
1396 COMPILER_WARNING_POP
1398 static rb_hrtime_t *
1399 double2hrtime(rb_hrtime_t *hrt,
double d)
1402 const double TIMESPEC_SEC_MAX_PLUS_ONE = 2.0 * (TIMESPEC_SEC_MAX_as_double / 2.0 + 1.0);
1404 if (TIMESPEC_SEC_MAX_PLUS_ONE <= d) {
1411 *hrt = (rb_hrtime_t)(d * (
double)RB_HRTIME_PER_SEC);
1419 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1420 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1430 NOINLINE(rb_hrtime_t rb_hrtime_now(
void));
1437 return rb_timespec2hrtime(&ts);
1443 enum rb_thread_status prev_status = th->status;
1444 enum rb_thread_status status;
1447 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1448 th->status = status;
1449 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1450 while (th->status == status) {
1451 if (fl & SLEEP_DEADLOCKABLE) {
1452 rb_ractor_sleeper_threads_inc(th->ractor);
1453 rb_check_deadlock(th->ractor);
1455 native_sleep(th, 0);
1456 if (fl & SLEEP_DEADLOCKABLE) {
1457 rb_ractor_sleeper_threads_dec(th->ractor);
1459 woke = vm_check_ints_blocking(th->ec);
1460 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1463 th->status = prev_status;
1470 COMPILER_WARNING_PUSH
1471 #if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1472 COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1475 #define PRIu64 PRI_64_PREFIX "u"
1483 hrtime_update_expire(rb_hrtime_t *timeout,
const rb_hrtime_t end)
1485 rb_hrtime_t now = rb_hrtime_now();
1487 if (now > end)
return 1;
1488 thread_debug(
"hrtime_update_expire: "
1489 "%"PRIu64
" > %"PRIu64
"\n",
1490 (uint64_t)end, (uint64_t)now);
1491 *timeout = end - now;
1494 COMPILER_WARNING_POP
1497 sleep_hrtime(
rb_thread_t *th, rb_hrtime_t rel,
unsigned int fl)
1499 enum rb_thread_status prev_status = th->status;
1501 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1503 th->status = THREAD_STOPPED;
1504 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1505 while (th->status == THREAD_STOPPED) {
1506 native_sleep(th, &rel);
1507 woke = vm_check_ints_blocking(th->ec);
1508 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1510 if (hrtime_update_expire(&rel, end))
1514 th->status = prev_status;
1521 thread_debug(
"rb_thread_sleep_forever\n");
1522 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1528 thread_debug(
"rb_thread_sleep_deadly\n");
1529 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1533 rb_thread_sleep_interruptible(
void)
1536 enum rb_thread_status prev_status = th->status;
1538 th->status = THREAD_STOPPED;
1539 native_sleep(th, 0);
1540 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1541 th->status = prev_status;
1545 rb_thread_sleep_deadly_allow_spurious_wakeup(
VALUE blocker)
1548 if (scheduler !=
Qnil) {
1552 thread_debug(
"rb_thread_sleep_deadly_allow_spurious_wakeup\n");
1553 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1562 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1575 RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
1583 rb_thread_check_trap_pending(
void)
1585 return rb_signal_buff_size() != 0;
1592 return (
int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1602 rb_thread_schedule_limits(uint32_t limits_us)
1604 thread_debug(
"rb_thread_schedule\n");
1608 if (th->running_time_us >= limits_us) {
1609 thread_debug(
"rb_thread_schedule/switch start\n");
1610 RB_GC_SAVE_MACHINE_CONTEXT(th);
1611 gvl_yield(rb_ractor_gvl(th->ractor), th);
1612 rb_ractor_thread_switch(th->ractor, th);
1613 thread_debug(
"rb_thread_schedule/switch done\n");
1621 rb_thread_schedule_limits(0);
1622 RUBY_VM_CHECK_INTS(GET_EC());
1631 region->prev_status = th->status;
1632 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1633 th->blocking_region_buffer = region;
1634 th->status = THREAD_STOPPED;
1635 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1636 thread_debug(
"enter blocking region (%p)\n", (
void *)th);
1637 RB_GC_SAVE_MACHINE_CONTEXT(th);
1638 gvl_release(rb_ractor_gvl(th->ractor));
1650 unblock_function_clear(th);
1652 unregister_ubf_list(th);
1654 gvl_acquire(rb_ractor_gvl(th->ractor), th);
1655 rb_ractor_thread_switch(th->ractor, th);
1657 thread_debug(
"leave blocking region (%p)\n", (
void *)th);
1658 th->blocking_region_buffer = 0;
1659 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1660 if (th->status == THREAD_STOPPED) {
1661 th->status = region->prev_status;
1673 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1674 bool is_main_thread = vm->ractor.main_thread == th;
1675 int saved_errno = 0;
1682 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1684 vm->ubf_async_safe = 1;
1687 ubf_th = rb_thread_start_unblock_thread();
1691 BLOCKING_REGION(th, {
1693 saved_errno = errno;
1696 if (is_main_thread) vm->ubf_async_safe = 0;
1699 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1706 errno = saved_errno;
1807 return rb_nogvl(func, data1, ubf, data2, 0);
1811 rb_thread_io_blocking_region(rb_blocking_function_t *func,
void *data1,
int fd)
1815 volatile int saved_errno = 0;
1816 enum ruby_tag_type state;
1820 .th = rb_ec_thread_ptr(ec)
1825 list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &
waiting_fd.wfd_node);
1830 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1833 saved_errno = errno;
1849 EC_JUMP_TAG(ec, state);
1852 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1854 errno = saved_errno;
1901 fprintf(stderr,
"[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1906 prev_unblock = th->unblock;
1909 rb_bug(
"rb_thread_call_with_gvl: called by a thread which has GVL.");
1912 blocking_region_end(th, brb);
1916 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1931 ruby_thread_has_gvl_p(
void)
1935 if (th && th->blocking_region_buffer == 0) {
1952 thread_s_pass(
VALUE klass)
1977 rb_threadptr_pending_interrupt_clear(
rb_thread_t *th)
1986 th->pending_interrupt_queue_checked = 0;
1990 threadptr_check_pending_interrupt_queue(
rb_thread_t *th)
1992 if (!th->pending_interrupt_queue) {
1997 enum handle_interrupt_timing {
1999 INTERRUPT_IMMEDIATE,
2000 INTERRUPT_ON_BLOCKING,
2004 static enum handle_interrupt_timing
2008 long mask_stack_len =
RARRAY_LEN(th->pending_interrupt_mask_stack);
2013 for (i=0; i<mask_stack_len; i++) {
2014 mask = mask_stack[mask_stack_len-(i+1)];
2021 klass =
RBASIC(mod)->klass;
2023 else if (mod != RCLASS_ORIGIN(mod)) {
2028 if (sym == sym_immediate) {
2029 return INTERRUPT_IMMEDIATE;
2031 else if (sym == sym_on_blocking) {
2032 return INTERRUPT_ON_BLOCKING;
2034 else if (sym == sym_never) {
2035 return INTERRUPT_NEVER;
2044 return INTERRUPT_NONE;
2048 rb_threadptr_pending_interrupt_empty_p(
const rb_thread_t *th)
2050 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2057 for (i=0; i<
RARRAY_LEN(th->pending_interrupt_queue); i++) {
2067 rb_threadptr_pending_interrupt_deque(
rb_thread_t *th,
enum handle_interrupt_timing timing)
2072 for (i=0; i<
RARRAY_LEN(th->pending_interrupt_queue); i++) {
2075 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th,
CLASS_OF(err));
2077 switch (mask_timing) {
2078 case INTERRUPT_ON_BLOCKING:
2079 if (timing != INTERRUPT_ON_BLOCKING) {
2083 case INTERRUPT_NONE:
2084 case INTERRUPT_IMMEDIATE:
2087 case INTERRUPT_NEVER:
2092 th->pending_interrupt_queue_checked = 1;
2096 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2097 th->pending_interrupt_queue_checked = 1;
2104 threadptr_pending_interrupt_active_p(
rb_thread_t *th)
2111 if (th->pending_interrupt_queue_checked) {
2115 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2127 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2132 *maskp = rb_ident_hash_new();
2248 rb_thread_s_handle_interrupt(
VALUE self,
VALUE mask_arg)
2254 enum ruby_tag_type state;
2261 mask_arg = rb_to_hash_type(mask_arg);
2267 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2268 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2269 th->pending_interrupt_queue_checked = 0;
2270 RUBY_VM_SET_INTERRUPT(th->ec);
2273 EC_PUSH_TAG(th->ec);
2274 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2279 rb_ary_pop(th->pending_interrupt_mask_stack);
2280 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2281 th->pending_interrupt_queue_checked = 0;
2282 RUBY_VM_SET_INTERRUPT(th->ec);
2285 RUBY_VM_CHECK_INTS(th->ec);
2288 EC_JUMP_TAG(th->ec, state);
2305 rb_thread_pending_interrupt_p(
int argc,
VALUE *argv,
VALUE target_thread)
2307 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2309 if (!target_th->pending_interrupt_queue) {
2312 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2316 VALUE err = argv[0];
2320 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2385 rb_thread_s_pending_interrupt_p(
int argc,
VALUE *argv,
VALUE self)
2387 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->
self);
2390 NORETURN(
static void rb_threadptr_to_kill(
rb_thread_t *th));
2395 rb_threadptr_pending_interrupt_clear(th);
2396 th->status = THREAD_RUNNABLE;
2398 th->ec->errinfo =
INT2FIX(TAG_FATAL);
2399 EC_JUMP_TAG(th->ec, TAG_FATAL);
2410 interrupt = ec->interrupt_flag;
2411 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2412 }
while (old != interrupt);
2413 return interrupt & (
rb_atomic_t)~ec->interrupt_mask;
2416 MJIT_FUNC_EXPORTED
int
2417 rb_threadptr_execute_interrupts(
rb_thread_t *th,
int blocking_timing)
2420 int postponed_job_interrupt = 0;
2423 if (th->ec->raised_flag)
return ret;
2425 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2427 int timer_interrupt;
2428 int pending_interrupt;
2430 int terminate_interrupt;
2432 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2433 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2434 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2435 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2436 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK;
2438 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2443 if (postponed_job_interrupt) {
2444 rb_postponed_job_flush(th->vm);
2448 if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2449 enum rb_thread_status prev_status = th->status;
2450 int sigwait_fd = rb_sigwait_fd_get(th);
2452 if (sigwait_fd >= 0) {
2453 (void)consume_communication_pipe(sigwait_fd);
2454 ruby_sigchld_handler(th->vm);
2455 rb_sigwait_fd_put(th, sigwait_fd);
2456 rb_sigwait_fd_migrate(th->vm);
2458 th->status = THREAD_RUNNABLE;
2459 while ((sig = rb_get_next_signal()) != 0) {
2460 ret |= rb_signal_exec(th, sig);
2462 th->status = prev_status;
2466 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2467 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2468 thread_debug(
"rb_thread_execute_interrupts: %"PRIdVALUE
"\n", err);
2474 else if (err == eKillSignal ||
2475 err == eTerminateSignal ||
2477 terminate_interrupt = 1;
2480 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2482 err = ruby_vm_special_exception_copy(err);
2485 if (th->status == THREAD_STOPPED ||
2486 th->status == THREAD_STOPPED_FOREVER)
2487 th->status = THREAD_RUNNABLE;
2492 if (terminate_interrupt) {
2493 rb_threadptr_to_kill(th);
2496 if (timer_interrupt) {
2497 uint32_t limits_us = TIME_QUANTUM_USEC;
2499 if (th->priority > 0)
2500 limits_us <<= th->priority;
2502 limits_us >>= -th->priority;
2504 if (th->status == THREAD_RUNNABLE)
2505 th->running_time_us += TIME_QUANTUM_USEC;
2507 VM_ASSERT(th->ec->cfp);
2511 rb_thread_schedule_limits(limits_us);
2518 rb_thread_execute_interrupts(
VALUE thval)
2520 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2526 rb_threadptr_interrupt(th);
2534 if (rb_threadptr_dead(target_th)) {
2547 if (rb_threadptr_dead(target_th)) {
2551 rb_ec_setup_exception(GET_EC(), exc,
Qundef);
2552 rb_threadptr_pending_interrupt_enque(target_th, exc);
2553 rb_threadptr_interrupt(target_th);
2558 rb_threadptr_signal_raise(
rb_thread_t *th,
int sig)
2564 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2576 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2582 if (ec->raised_flag & RAISED_EXCEPTION) {
2585 ec->raised_flag |= RAISED_EXCEPTION;
2592 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2595 ec->raised_flag &= ~RAISED_EXCEPTION;
2600 rb_notify_fd_close(
int fd,
struct list_head *busy)
2602 rb_vm_t *vm = GET_THREAD()->vm;
2607 list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2608 if (wfd->fd == fd) {
2612 list_del(&wfd->wfd_node);
2613 list_add(busy, &wfd->wfd_node);
2615 err = th->vm->special_exceptions[ruby_error_stream_closed];
2616 rb_threadptr_pending_interrupt_enque(th, err);
2617 rb_threadptr_interrupt(th);
2623 return !list_empty(busy);
2629 struct list_head busy;
2631 list_head_init(&busy);
2632 if (rb_notify_fd_close(fd, &busy)) {
2659 thread_raise_m(
int argc,
VALUE *argv,
VALUE self)
2664 threadptr_check_pending_interrupt_queue(target_th);
2665 rb_threadptr_raise(target_th, argc, argv);
2668 if (current_th == target_th) {
2669 RUBY_VM_CHECK_INTS(target_th->ec);
2691 if (th->to_kill || th->status == THREAD_KILLED) {
2694 if (th == th->vm->ractor.main_thread) {
2698 thread_debug(
"rb_thread_kill: %p (%"PRI_THREAD_ID
")\n", (
void *)th, thread_id_str(th));
2700 if (th == GET_THREAD()) {
2702 rb_threadptr_to_kill(th);
2705 threadptr_check_pending_interrupt_queue(th);
2706 rb_threadptr_pending_interrupt_enque(th, eKillSignal);
2707 rb_threadptr_interrupt(th);
2713 rb_thread_to_be_killed(
VALUE thread)
2717 if (th->to_kill || th->status == THREAD_KILLED) {
2793 if (target_th->status == THREAD_KILLED)
return Qnil;
2795 rb_threadptr_ready(target_th);
2797 if (target_th->status == THREAD_STOPPED ||
2798 target_th->status == THREAD_STOPPED_FOREVER) {
2799 target_th->status = THREAD_RUNNABLE;
2841 "stopping only thread\n\tnote: use sleep to stop forever");
2871 rb_thread_list(
void)
2874 return rb_ractor_thread_list(GET_RACTOR());
2900 return rb_thread_list();
2906 return GET_THREAD()->self;
2919 thread_s_current(
VALUE klass)
2927 return GET_RACTOR()->threads.main->self;
2938 rb_thread_s_main(
VALUE klass)
2965 rb_thread_s_abort_exc(
VALUE _)
2967 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3002 rb_thread_s_abort_exc_set(
VALUE self,
VALUE val)
3004 GET_THREAD()->vm->thread_abort_on_exception =
RTEST(val);
3025 rb_thread_abort_exc(
VALUE thread)
3027 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3045 rb_thread_abort_exc_set(
VALUE thread,
VALUE val)
3047 rb_thread_ptr(thread)->abort_on_exception =
RTEST(val);
3095 rb_thread_s_report_exc(
VALUE _)
3097 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3132 rb_thread_s_report_exc_set(
VALUE self,
VALUE val)
3134 GET_THREAD()->vm->thread_report_on_exception =
RTEST(val);
3151 rb_thread_s_ignore_deadlock(
VALUE _)
3153 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3178 rb_thread_s_ignore_deadlock_set(
VALUE self,
VALUE val)
3180 GET_THREAD()->vm->thread_ignore_deadlock =
RTEST(val);
3202 rb_thread_report_exc(
VALUE thread)
3204 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3222 rb_thread_report_exc_set(
VALUE thread,
VALUE val)
3224 rb_thread_ptr(thread)->report_on_exception =
RTEST(val);
3239 rb_thread_group(
VALUE thread)
3241 return rb_thread_ptr(thread)->thgroup;
3247 switch (th->status) {
3248 case THREAD_RUNNABLE:
3249 return th->to_kill ?
"aborting" :
"run";
3250 case THREAD_STOPPED_FOREVER:
3251 if (detail)
return "sleep_forever";
3252 case THREAD_STOPPED:
3264 return th->status == THREAD_KILLED;
3300 rb_thread_status(
VALUE thread)
3304 if (rb_threadptr_dead(target_th)) {
3305 if (!
NIL_P(target_th->ec->errinfo) &&
3306 !
FIXNUM_P(target_th->ec->errinfo)) {
3314 return rb_str_new2(thread_status_name(target_th, FALSE));
3334 rb_thread_alive_p(
VALUE thread)
3336 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3354 rb_thread_stop_p(
VALUE thread)
3358 if (rb_threadptr_dead(th)) {
3361 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3372 rb_thread_getname(
VALUE thread)
3374 return rb_thread_ptr(thread)->name;
3400 target_th->name = name;
3401 if (threadptr_initialized(target_th)) {
3402 native_set_another_thread_name(target_th->thread_id, name);
3407 #if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3431 rb_thread_native_thread_id(
VALUE thread)
3434 if (rb_threadptr_dead(target_th))
return Qnil;
3435 return native_thread_native_thread_id(target_th);
3438 # define rb_thread_native_thread_id rb_f_notimplement
3449 rb_thread_to_s(
VALUE thread)
3456 status = thread_status_name(target_th, TRUE);
3457 str =
rb_sprintf(
"#<%"PRIsVALUE
":%p", cname, (
void *)thread);
3458 if (!
NIL_P(target_th->name)) {
3461 if ((loc = threadptr_invoke_proc_location(target_th)) !=
Qnil) {
3471 #define recursive_key id__recursive_key__
3476 if (
id == recursive_key) {
3477 return th->ec->local_storage_recursive_hash;
3481 struct rb_id_table *local_storage = th->ec->local_storage;
3483 if (local_storage != NULL && rb_id_table_lookup(local_storage,
id, &val)) {
3495 return threadptr_local_aref(rb_thread_ptr(thread),
id);
3562 if (!
id)
return Qnil;
3580 rb_thread_fetch(
int argc,
VALUE *argv,
VALUE self)
3591 if (block_given && argc == 2) {
3592 rb_warn(
"block supersedes default value argument");
3597 if (
id == recursive_key) {
3598 return target_th->ec->local_storage_recursive_hash;
3600 else if (
id && target_th->ec->local_storage &&
3601 rb_id_table_lookup(target_th->ec->local_storage,
id, &val)) {
3604 else if (block_given) {
3607 else if (argc == 1) {
3608 rb_key_err_raise(
rb_sprintf(
"key not found: %+"PRIsVALUE, key),
self, key);
3618 if (
id == recursive_key) {
3619 th->ec->local_storage_recursive_hash = val;
3623 struct rb_id_table *local_storage = th->ec->local_storage;
3626 if (!local_storage)
return Qnil;
3627 rb_id_table_delete(local_storage,
id);
3631 if (local_storage == NULL) {
3632 th->ec->local_storage = local_storage = rb_id_table_create(0);
3634 rb_id_table_insert(local_storage,
id, val);
3647 return threadptr_local_aset(rb_thread_ptr(thread),
id, val);
3698 rb_thread_variable_get(
VALUE thread,
VALUE key)
3702 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3705 locals = rb_thread_local_storage(thread);
3727 locals = rb_thread_local_storage(thread);
3749 struct rb_id_table *local_storage = rb_thread_ptr(
self)->ec->local_storage;
3751 if (!
id || local_storage == NULL) {
3754 return RBOOL(rb_id_table_lookup(local_storage,
id, &val));
3757 static enum rb_id_table_iterator_result
3758 thread_keys_i(
ID key,
VALUE value,
void *ary)
3761 return ID_TABLE_CONTINUE;
3768 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3786 rb_thread_keys(
VALUE self)
3788 struct rb_id_table *local_storage = rb_thread_ptr(
self)->ec->local_storage;
3791 if (local_storage) {
3792 rb_id_table_foreach(local_storage, thread_keys_i, (
void *)ary);
3822 rb_thread_variables(
VALUE thread)
3828 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3831 locals = rb_thread_local_storage(thread);
3854 rb_thread_variable_p(
VALUE thread,
VALUE key)
3858 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3861 locals = rb_thread_local_storage(thread);
3882 rb_thread_priority(
VALUE thread)
3884 return INT2NUM(rb_thread_ptr(thread)->priority);
3915 rb_thread_priority_set(
VALUE thread,
VALUE prio)
3920 #if USE_NATIVE_THREAD_PRIORITY
3921 target_th->priority =
NUM2INT(prio);
3922 native_thread_apply_priority(th);
3925 if (priority > RUBY_THREAD_PRIORITY_MAX) {
3926 priority = RUBY_THREAD_PRIORITY_MAX;
3928 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3929 priority = RUBY_THREAD_PRIORITY_MIN;
3931 target_th->priority = (int8_t)priority;
3933 return INT2NUM(target_th->priority);
3938 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3976 FD_ZERO(fds->
fdset);
3982 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
3984 if (size <
sizeof(fd_set))
3985 size =
sizeof(fd_set);
4009 size_t m = howmany(n + 1, NFDBITS) *
sizeof(fd_mask);
4010 size_t o = howmany(fds->
maxfd, NFDBITS) *
sizeof(fd_mask);
4012 if (m <
sizeof(fd_set)) m =
sizeof(fd_set);
4013 if (o <
sizeof(fd_set)) o =
sizeof(fd_set);
4017 memset((
char *)fds->
fdset + o, 0, m - o);
4026 FD_SET(n, fds->
fdset);
4032 if (n >= fds->
maxfd)
return;
4033 FD_CLR(n, fds->
fdset);
4039 if (n >= fds->
maxfd)
return 0;
4040 return FD_ISSET(n, fds->
fdset) != 0;
4046 size_t size = howmany(max, NFDBITS) *
sizeof(fd_mask);
4048 if (size <
sizeof(fd_set)) size =
sizeof(fd_set);
4051 memcpy(dst->
fdset, src, size);
4057 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
4059 if (size <
sizeof(fd_set))
4060 size =
sizeof(fd_set);
4069 fd_set *r = NULL, *w = NULL, *e = NULL;
4082 return select(n, r, w, e, timeout);
4085 #define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4092 #define FD_ZERO(f) rb_fd_zero(f)
4093 #define FD_SET(i, f) rb_fd_set((i), (f))
4094 #define FD_CLR(i, f) rb_fd_clr((i), (f))
4095 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
4097 #elif defined(_WIN32)
4102 set->
capa = FD_SETSIZE;
4104 FD_ZERO(set->
fdset);
4126 SOCKET s = rb_w32_get_osfhandle(fd);
4128 for (i = 0; i < set->
fdset->fd_count; i++) {
4129 if (set->
fdset->fd_array[i] == s) {
4133 if (set->
fdset->fd_count >= (
unsigned)set->
capa) {
4134 set->
capa = (set->
fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4136 rb_xrealloc_mul_add(
4137 set->
fdset, set->
capa,
sizeof(SOCKET),
sizeof(
unsigned int));
4139 set->
fdset->fd_array[set->
fdset->fd_count++] = s;
4147 #define FD_ZERO(f) rb_fd_zero(f)
4148 #define FD_SET(i, f) rb_fd_set((i), (f))
4149 #define FD_CLR(i, f) rb_fd_clr((i), (f))
4150 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
4152 #define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4156 #ifndef rb_fd_no_init
4157 #define rb_fd_no_init(fds) (void)(fds)
4161 wait_retryable(
int *result,
int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4170 if (rel && hrtime_update_expire(rel, end)) {
4177 else if (*result == 0) {
4180 return !hrtime_update_expire(rel, end);
4201 select_set_free(
VALUE p)
4205 if (set->sigwait_fd >= 0) {
4206 rb_sigwait_fd_put(set->th, set->sigwait_fd);
4207 rb_sigwait_fd_migrate(set->th->vm);
4217 static const rb_hrtime_t *
4218 sigwait_timeout(
rb_thread_t *th,
int sigwait_fd,
const rb_hrtime_t *orig,
4221 static const rb_hrtime_t quantum = TIME_QUANTUM_USEC * 1000;
4223 if (sigwait_fd >= 0 && (!ubf_threads_empty() || BUSY_WAIT_SIGNALS)) {
4224 *drained_p = check_signals_nogvl(th, sigwait_fd);
4225 if (!orig || *orig > quantum)
4232 #define sigwait_signals_fd(result, cond, sigwait_fd) \
4233 (result > 0 && (cond) ? (result--, (sigwait_fd)) : -1)
4241 rb_hrtime_t *to, rel, end = 0;
4243 timeout_prepare(&to, &rel, &end, set->timeout);
4244 #define restore_fdset(dst, src) \
4245 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4246 #define do_select_update() \
4247 (restore_fdset(set->rset, &set->orig_rset), \
4248 restore_fdset(set->wset, &set->orig_wset), \
4249 restore_fdset(set->eset, &set->orig_eset), \
4256 BLOCKING_REGION(set->th, {
4257 const rb_hrtime_t *sto;
4260 sto = sigwait_timeout(set->th, set->sigwait_fd, to, &drained);
4261 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4262 result = native_fd_select(set->max, set->rset, set->wset,
4264 rb_hrtime2timeval(&tv, sto), set->th);
4265 if (result < 0) lerrno = errno;
4267 }, set->sigwait_fd >= 0 ? ubf_sigwait : ubf_select, set->th, TRUE);
4269 if (set->sigwait_fd >= 0) {
4270 int fd = sigwait_signals_fd(result,
4273 (void)check_signals_nogvl(set->th, fd);
4276 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec);
4277 }
while (wait_retryable(&result, lerrno, to, end) && do_select_update());
4283 return (
VALUE)result;
4304 set.th = GET_THREAD();
4305 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4310 set.timeout = timeout;
4312 if (!set.rset && !set.wset && !set.eset) {
4321 set.sigwait_fd = rb_sigwait_fd_get(set.th);
4322 if (set.sigwait_fd >= 0) {
4326 set.rset = init_set_fd(set.sigwait_fd, &set.orig_rset);
4327 if (set.sigwait_fd >= set.max) {
4328 set.max = set.sigwait_fd + 1;
4331 #define fd_init_copy(f) do { \
4333 rb_fd_resize(set.max - 1, set.f); \
4334 if (&set.orig_##f != set.f) { \
4335 rb_fd_init_copy(&set.orig_##f, set.f); \
4339 rb_fd_no_init(&set.orig_##f); \
4353 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4354 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4355 #define POLLEX_SET (POLLPRI)
4358 # define POLLERR_SET (0)
4365 rb_thread_wait_for_single_fd(
int fd,
int events,
struct timeval *timeout)
4367 struct pollfd fds[2];
4374 volatile int lerrno;
4376 wfd.th = GET_THREAD();
4381 list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
4385 EC_PUSH_TAG(wfd.th->ec);
4386 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4387 rb_hrtime_t *to, rel, end = 0;
4388 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4389 timeout_prepare(&to, &rel, &end, timeout);
4391 fds[0].events = (short)events;
4394 fds[1].fd = rb_sigwait_fd_get(wfd.th);
4396 if (fds[1].fd >= 0) {
4397 fds[1].events = POLLIN;
4408 BLOCKING_REGION(wfd.th, {
4409 const rb_hrtime_t *sto;
4412 sto = sigwait_timeout(wfd.th, fds[1].fd, to, &drained);
4413 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4414 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, sto), 0);
4415 if (result < 0) lerrno = errno;
4417 }, ubf, wfd.th, TRUE);
4419 if (fds[1].fd >= 0) {
4420 int fd1 = sigwait_signals_fd(result, fds[1].revents, fds[1].fd);
4421 (void)check_signals_nogvl(wfd.th, fd1);
4422 rb_sigwait_fd_put(wfd.th, fds[1].fd);
4423 rb_sigwait_fd_migrate(wfd.th->vm);
4425 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4426 }
while (wait_retryable(&result, lerrno, to, end));
4432 list_del(&wfd.wfd_node);
4437 EC_JUMP_TAG(wfd.th->ec, state);
4445 if (fds[0].revents & POLLNVAL) {
4455 if (fds[0].revents & POLLIN_SET)
4456 result |= RB_WAITFD_IN;
4457 if (fds[0].revents & POLLOUT_SET)
4458 result |= RB_WAITFD_OUT;
4459 if (fds[0].revents & POLLEX_SET)
4460 result |= RB_WAITFD_PRI;
4463 if (fds[0].revents & POLLERR_SET)
4482 select_single(
VALUE ptr)
4488 args->read, args->write, args->except, args->tv);
4490 args->as.error = errno;
4493 if (args->read &&
rb_fd_isset(args->as.fd, args->read))
4495 if (args->write &&
rb_fd_isset(args->as.fd, args->write))
4497 if (args->except &&
rb_fd_isset(args->as.fd, args->except))
4504 select_single_cleanup(
VALUE ptr)
4510 list_del(&args->wfd.wfd_node);
4521 rb_thread_wait_for_single_fd(
int fd,
int events,
struct timeval *timeout)
4529 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4530 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4531 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4534 args.wfd.th = GET_THREAD();
4538 list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4542 r = (int)
rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4544 errno = args.as.error;
4554 #ifdef USE_CONSERVATIVE_STACK_END
4556 rb_gc_set_stack_end(
VALUE **stack_end_p)
4559 *stack_end_p = &stack_end;
4571 if (rb_signal_buff_size() > 0) {
4573 threadptr_trap_interrupt(mth);
4578 async_bug_fd(
const char *mesg,
int errno_arg,
int fd)
4581 size_t n = strlcpy(buff, mesg,
sizeof(buff));
4582 if (n <
sizeof(buff)-3) {
4585 rb_async_bug_errno(buff, errno_arg);
4590 consume_communication_pipe(
int fd)
4596 static char buff[1024];
4610 result = read(fd, buff,
sizeof(buff));
4613 if (USE_EVENTFD || result < (ssize_t)
sizeof(buff)) {
4617 else if (result == 0) {
4620 else if (result < 0) {
4626 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4631 async_bug_fd(
"consume_communication_pipe: read", e, fd);
4638 check_signals_nogvl(
rb_thread_t *th,
int sigwait_fd)
4641 int ret = sigwait_fd >= 0 ? consume_communication_pipe(sigwait_fd) : FALSE;
4642 ubf_wakeup_all_threads();
4643 ruby_sigchld_handler(vm);
4644 if (rb_signal_buff_size()) {
4645 if (th == vm->ractor.main_thread) {
4647 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
4650 threadptr_trap_interrupt(vm->ractor.main_thread);
4658 rb_thread_stop_timer_thread(
void)
4660 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4661 native_reset_timer_thread();
4666 rb_thread_reset_timer_thread(
void)
4668 native_reset_timer_thread();
4672 rb_thread_start_timer_thread(
void)
4675 rb_thread_create_timer_thread();
4679 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4687 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4709 rb_clear_coverages(
void)
4711 VALUE coverages = rb_get_coverages();
4712 if (
RTEST(coverages)) {
4717 #if defined(HAVE_WORKING_FORK)
4725 vm->ractor.main_ractor = r;
4726 vm->ractor.main_thread = th;
4727 r->threads.main = th;
4728 r->status_ = ractor_created;
4730 gvl_atfork(rb_ractor_gvl(th->ractor));
4734 list_for_each(&vm->ractor.set, r, vmlr_node) {
4735 list_for_each(&r->threads.set, i, lt_node) {
4739 rb_vm_living_threads_init(vm);
4741 rb_ractor_atfork(vm, th);
4751 rb_ractor_sleeper_threads_clear(th->ractor);
4752 rb_clear_coverages();
4754 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4755 VM_ASSERT(vm->ractor.cnt == 1);
4761 if (th != current_th) {
4762 rb_mutex_abandon_keeping_mutexes(th);
4763 rb_mutex_abandon_locking_mutex(th);
4764 thread_cleanup_func(th, TRUE);
4773 rb_thread_atfork_internal(th, terminate_atfork_i);
4774 th->join_list = NULL;
4775 rb_fiber_atfork(th);
4781 mjit_child_after_fork();
4787 if (th != current_th) {
4788 thread_cleanup_func_before_exec(th);
4796 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4816 thgroup_memsize(
const void *ptr)
4818 return sizeof(
struct thgroup);
4824 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4847 thgroup_s_alloc(
VALUE klass)
4854 data->group = group;
4869 thgroup_list(
VALUE group)
4875 list_for_each(&r->threads.set, th, lt_node) {
4876 if (th->thgroup == group) {
4901 thgroup_enclose(
VALUE group)
4920 thgroup_enclosed_p(
VALUE group)
4925 return RBOOL(data->enclosed);
4965 if (data->enclosed) {
4973 if (data->enclosed) {
4975 "can't move from the enclosed thread group");
4978 target_th->thgroup = group;
4986 thread_shield_mark(
void *ptr)
4993 {thread_shield_mark, 0, 0,},
4994 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4998 thread_shield_alloc(
VALUE klass)
5003 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5004 #define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5005 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5006 #define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5007 STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5008 static inline unsigned int
5009 rb_thread_shield_waiting(
VALUE b)
5011 return ((
RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5015 rb_thread_shield_waiting_inc(
VALUE b)
5017 unsigned int w = rb_thread_shield_waiting(b);
5019 if (w > THREAD_SHIELD_WAITING_MAX)
5021 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5022 RBASIC(b)->flags |= ((
VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5026 rb_thread_shield_waiting_dec(
VALUE b)
5028 unsigned int w = rb_thread_shield_waiting(b);
5031 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5032 RBASIC(b)->flags |= ((
VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5036 rb_thread_shield_new(
void)
5038 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5040 return thread_shield;
5044 rb_thread_shield_owned(
VALUE self)
5046 VALUE mutex = GetThreadShieldPtr(
self);
5047 if (!mutex)
return false;
5051 return m->fiber == GET_EC()->fiber_ptr;
5063 rb_thread_shield_wait(
VALUE self)
5065 VALUE mutex = GetThreadShieldPtr(
self);
5068 if (!mutex)
return Qfalse;
5069 m = mutex_ptr(mutex);
5070 if (m->fiber == GET_EC()->fiber_ptr)
return Qnil;
5071 rb_thread_shield_waiting_inc(
self);
5073 rb_thread_shield_waiting_dec(
self);
5076 return rb_thread_shield_waiting(
self) > 0 ?
Qnil :
Qfalse;
5080 thread_shield_get_mutex(
VALUE self)
5082 VALUE mutex = GetThreadShieldPtr(
self);
5092 rb_thread_shield_release(
VALUE self)
5094 VALUE mutex = thread_shield_get_mutex(
self);
5096 return RBOOL(rb_thread_shield_waiting(
self) > 0);
5103 rb_thread_shield_destroy(
VALUE self)
5105 VALUE mutex = thread_shield_get_mutex(
self);
5108 return RBOOL(rb_thread_shield_waiting(
self) > 0);
5114 return th->ec->local_storage_recursive_hash;
5120 th->ec->local_storage_recursive_hash = hash;
5132 recursive_list_access(
VALUE sym)
5135 VALUE hash = threadptr_recursive_hash(th);
5138 hash = rb_ident_hash_new();
5139 threadptr_recursive_hash_set(th, hash);
5146 list = rb_ident_hash_new();
5161 #if SIZEOF_LONG == SIZEOF_VOIDP
5162 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5163 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5164 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5165 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5171 if (paired_obj_id) {
5173 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5206 VALUE other_paired_obj = pair_list;
5228 if (pair_list ==
Qundef) {
5232 rb_hash_delete_entry(pair_list, paired_obj);
5238 rb_hash_delete_entry(list, obj);
5254 return (*p->func)(p->obj, p->arg, FALSE);
5276 p.list = recursive_list_access(sym);
5280 outermost = outer && !recursive_check(p.list,
ID2SYM(recursive_key), 0);
5282 if (recursive_check(p.list, p.obj, pairid)) {
5283 if (outer && !outermost) {
5286 return (*func)(obj, arg, TRUE);
5289 enum ruby_tag_type state;
5294 recursive_push(p.list,
ID2SYM(recursive_key), 0);
5295 recursive_push(p.list, p.obj, p.pairid);
5296 result = rb_catch_protect(p.list, exec_recursive_i, (
VALUE)&p, &state);
5297 if (!recursive_pop(p.list, p.obj, p.pairid))
goto invalid;
5298 if (!recursive_pop(p.list,
ID2SYM(recursive_key), 0))
goto invalid;
5299 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5300 if (result == p.list) {
5301 result = (*func)(obj, arg, TRUE);
5306 recursive_push(p.list, p.obj, p.pairid);
5307 EC_PUSH_TAG(GET_EC());
5308 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5309 ret = (*func)(obj, arg, FALSE);
5312 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5315 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5324 "for %+"PRIsVALUE
" in %+"PRIsVALUE,
5337 return exec_recursive(func, obj, 0, arg, 0);
5348 return exec_recursive(func, obj,
rb_memory_id(paired_obj), arg, 0);
5360 return exec_recursive(func, obj, 0, arg, 1);
5372 return exec_recursive(func, obj,
rb_memory_id(paired_obj), arg, 1);
5384 rb_thread_backtrace_m(
int argc,
VALUE *argv,
VALUE thval)
5386 return rb_vm_thread_backtrace(argc, argv, thval);
5401 rb_thread_backtrace_locations_m(
int argc,
VALUE *argv,
VALUE thval)
5403 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5407 Init_Thread_Mutex(
void)
5457 #if THREAD_DEBUG < 0
5502 rb_vm_register_special_exception(ruby_error_stream_closed,
rb_eIOError,
5503 "stream closed in another thread");
5513 th->thgroup = th->ractor->thgroup_default =
rb_obj_alloc(cThGroup);
5525 gvl_acquire(gvl, th);
5528 th->pending_interrupt_queue_checked = 0;
5533 rb_thread_create_timer_thread();
5552 rb_str_catf(msg,
"\n%d threads, %d sleeps current:%p main thread:%p\n",
5553 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5554 (
void *)GET_THREAD(), (
void *)r->threads.main);
5556 list_for_each(&r->threads.set, th, lt_node) {
5557 rb_str_catf(msg,
"* %+"PRIsVALUE
"\n rb_thread_t:%p "
5558 "native:%"PRI_THREAD_ID
" int:%u",
5559 th->self, (
void *)th, thread_id_str(th), th->ec->interrupt_flag);
5561 if (th->locking_mutex) {
5562 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5564 (
void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5570 rb_str_catf(msg,
"\n depended by: tb_thread_id:%p", (
void *)list->thread);
5583 if (GET_THREAD()->vm->thread_ignore_deadlock)
return;
5587 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5588 int ltnum = rb_ractor_living_thread_num(r);
5590 if (ltnum > sleeper_num)
return;
5591 if (ltnum < sleeper_num)
rb_bug(
"sleeper must not be more than vm_living_thread_num(vm)");
5592 if (patrol_thread && patrol_thread != GET_THREAD())
return;
5594 list_for_each(&r->threads.set, th, lt_node) {
5595 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5598 else if (th->locking_mutex) {
5599 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5600 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !list_empty(&mutex->waitq))) {
5611 argv[1] =
rb_str_new2(
"No live threads left. Deadlock?");
5612 debug_deadlock_check(r, argv[1]);
5613 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5614 rb_threadptr_raise(r->threads.main, 2, argv);
5622 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5630 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5631 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - cfp->iseq->body->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5652 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5656 long pc = cfp->pc - cfp->iseq->body->iseq_encoded - 1;
5671 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5673 if (!me->def)
return NULL;
5676 switch (me->def->type) {
5677 case VM_METHOD_TYPE_ISEQ: {
5680 path = rb_iseq_path(iseq);
5681 beg_pos_lineno =
INT2FIX(loc->code_location.beg_pos.lineno);
5682 beg_pos_column =
INT2FIX(loc->code_location.beg_pos.column);
5683 end_pos_lineno =
INT2FIX(loc->code_location.end_pos.lineno);
5684 end_pos_column =
INT2FIX(loc->code_location.end_pos.column);
5687 case VM_METHOD_TYPE_BMETHOD: {
5688 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5691 rb_iseq_check(iseq);
5692 path = rb_iseq_path(iseq);
5693 loc = &iseq->body->location;
5694 beg_pos_lineno =
INT2FIX(loc->code_location.beg_pos.lineno);
5695 beg_pos_column =
INT2FIX(loc->code_location.beg_pos.column);
5696 end_pos_lineno =
INT2FIX(loc->code_location.end_pos.lineno);
5697 end_pos_column =
INT2FIX(loc->code_location.end_pos.column);
5702 case VM_METHOD_TYPE_ALIAS:
5703 me = me->def->body.alias.original_me;
5705 case VM_METHOD_TYPE_REFINED:
5706 me = me->def->body.refined.orig_me;
5707 if (!me)
return NULL;
5718 if (resolved_location) {
5719 resolved_location[0] = path;
5720 resolved_location[1] = beg_pos_lineno;
5721 resolved_location[2] = beg_pos_column;
5722 resolved_location[3] = end_pos_lineno;
5723 resolved_location[4] = end_pos_column;
5737 me = rb_resolve_me_location(me, 0);
5748 rb_get_coverages(
void)
5750 return GET_VM()->coverages;
5754 rb_get_coverage_mode(
void)
5756 return GET_VM()->coverage_mode;
5760 rb_set_coverages(
VALUE coverages,
int mode,
VALUE me2counter)
5762 GET_VM()->coverages = coverages;
5763 GET_VM()->me2counter = me2counter;
5764 GET_VM()->coverage_mode = mode;
5768 rb_resume_coverages(
void)
5770 int mode = GET_VM()->coverage_mode;
5771 VALUE me2counter = GET_VM()->me2counter;
5772 rb_add_event_hook2((
rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE,
Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5773 if (mode & COVERAGE_TARGET_BRANCHES) {
5774 rb_add_event_hook2((
rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH,
Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5776 if (mode & COVERAGE_TARGET_METHODS) {
5782 rb_suspend_coverages(
void)
5785 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5788 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5795 rb_reset_coverages(
void)
5797 rb_clear_coverages();
5798 rb_iseq_remove_coverage_all();
5799 GET_VM()->coverages =
Qfalse;
5803 rb_default_coverage(
int n)
5805 VALUE coverage = rb_ary_tmp_new_fill(3);
5807 int mode = GET_VM()->coverage_mode;
5809 if (mode & COVERAGE_TARGET_LINES) {
5812 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5814 if (mode & COVERAGE_TARGET_BRANCHES) {
5815 branches = rb_ary_tmp_new_fill(2);
5843 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5849 uninterruptible_exit(
VALUE v)
5852 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5854 cur_th->pending_interrupt_queue_checked = 0;
5855 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5856 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5864 VALUE interrupt_mask = rb_ident_hash_new();
5869 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5873 RUBY_VM_CHECK_INTS(cur_th->ec);
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
uint32_t rb_event_flag_t
Represents event(s).
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implenentation detail of RB_FL_SET().
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a method.
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
int rb_block_given_p(void)
Determines if the current method is given a block.
#define rb_str_new2
Old name of rb_str_new_cstr.
#define ALLOC
Old name of RB_ALLOC.
#define T_STRING
Old name of RUBY_T_STRING.
#define xfree
Old name of ruby_xfree.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
#define xrealloc
Old name of ruby_xrealloc.
#define ID2SYM
Old name of RB_ID2SYM.
#define OBJ_FREEZE_RAW
Old name of RB_OBJ_FREEZE_RAW.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define CLASS_OF
Old name of rb_class_of.
#define xmalloc
Old name of ruby_xmalloc.
#define LONG2FIX
Old name of RB_INT2FIX.
#define FIX2INT
Old name of RB_FIX2INT.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define T_HASH
Old name of RUBY_T_HASH.
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
#define INT2NUM
Old name of RB_INT2NUM.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define T_OBJECT
Old name of RUBY_T_OBJECT.
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
void rb_bug(const char *fmt,...)
Interpreter panic switch.
VALUE rb_eSystemExit
SystemExit exception.
VALUE rb_eIOError
IOError exception.
VALUE rb_eStandardError
StandardError exception.
VALUE rb_eTypeError
TypeError exception.
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
VALUE rb_eFatal
fatal exception.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
VALUE rb_eArgError
ArgumentError exception.
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
VALUE rb_eThreadError
ThreadError exception.
void rb_exit(int status)
Terminates the current execution context.
VALUE rb_eSignal
SignalException exception.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
VALUE rb_cThread
Thread class.
VALUE rb_cModule
Module class.
VALUE rb_class_inherited_p(VALUE scion, VALUE ascendant)
Determines if the given two modules are relatives.
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
static bool rb_enc_asciicompat(rb_encoding *enc)
Queries if the passed encoding is in some sense compatible with ASCII.
rb_encoding * rb_enc_get(VALUE obj)
Identical to rb_enc_get_index(), except the return type.
static const char * rb_enc_name(rb_encoding *enc)
Queries the (canonical) name of the passed encoding.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_tmp_new(long capa)
Allocates a "temporary" array.
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
VALUE rb_make_exception(int argc, const VALUE *argv)
Constructs an exception object from the list of arguments, in a manner similar to Ruby's raise.
void rb_obj_call_init_kw(VALUE, int, const VALUE *, int)
Identical to rb_obj_call_init(), except you can specify how to handle the last element of the given a...
void rb_gc_mark(VALUE obj)
Marks an object.
void rb_hash_foreach(VALUE hash, int(*func)(VALUE key, VALUE val, VALUE arg), VALUE arg)
Iterates over a hash.
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
Identical to rb_hash_lookup(), except you can specify what to return on misshits.
VALUE rb_hash_aref(VALUE hash, VALUE key)
Queries the given key in the given hash table.
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Identical to rb_hash_aref(), except it always returns RUBY_Qnil for misshits.
VALUE rb_hash_new(void)
Creates a new, empty hash object.
VALUE rb_memory_id(VALUE obj)
Identical to rb_obj_id(), except it hesitates from allocating a new instance of rb_cInteger.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
VALUE rb_str_new_cstr(const char *ptr)
Identical to rb_str_new(), except it assumes the passed pointer is a pointer to a C string.
VALUE rb_str_cat_cstr(VALUE dst, const char *src)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
VALUE rb_thread_main(void)
Obtains the "main" thread.
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
VALUE rb_thread_stop(void)
Stops the current thread.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
VALUE rb_thread_create(VALUE(*f)(void *g), void *g)
Creates a Ruby thread that is backended by a C function.
void rb_thread_check_ints(void)
Checks for interrupts.
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
VALUE rb_thread_current(void)
Obtains the "current" thread.
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
void rb_thread_schedule(void)
Tries to switch to another thread.
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
ID rb_to_id(VALUE str)
Identical to rb_intern(), except it takes an instance of rb_cString.
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
VALUE rb_yield(VALUE val)
Yields the block.
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
int rb_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
int rb_fd_isset(int fd, const rb_fdset_t *f)
Queries if the given FD is in the given set.
void rb_fd_clr(int fd, rb_fdset_t *f)
Releases a specific FD from the given fdset.
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
void rb_fd_zero(rb_fdset_t *f)
Wipes out the current set of FDs.
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
#define ALLOCA_N(type, n)
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
#define rb_fd_init
Initialises the :given :rb_fdset_t.
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
#define RARRAY_AREF(a, i)
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
#define RBASIC(obj)
Convenient casting macro.
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
#define DATA_PTR(obj)
Convenient getter macro.
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Nonblocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
This is the struct that holds necessary info for a struct.
The data structure which wraps the fd_set bitmap used by select(2).
int maxfd
Maximum allowed number of FDs.
fd_set * fdset
File descriptors buffer.
int capa
Maximum allowed number of FDs.
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.