Ruby  3.1.4p223 (2023-03-30 revision HEAD)
thread.c
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author$
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23  model 4: M:N User:Native threads with Global VM lock
24  Combination of model 1 and 2
25 
26  model 5: M:N User:Native thread with fine grain lock
27  Combination of model 1 and 3
28 
29 ------------------------------------------------------------------------
30 
31  model 2:
32  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33  When thread scheduling, running thread release GVL. If running thread
34  try blocking operation, this thread must release GVL and another
35  thread can continue this flow. After blocking operation, thread
36  must check interrupt (RUBY_VM_CHECK_INTS).
37 
38  Every VM can run parallel.
39 
40  Ruby threads are scheduled by OS thread scheduler.
41 
42 ------------------------------------------------------------------------
43 
44  model 3:
45  Every threads run concurrent or parallel and to access shared object
46  exclusive access control is needed. For example, to access String
47  object or Array object, fine grain lock must be locked every time.
48  */
49 
50 
51 /*
52  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53  * 2.15 or later and set _FORTIFY_SOURCE > 0.
54  * However, the implementation is wrong. Even though Linux's select(2)
55  * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57  * it doesn't work correctly and makes program abort. Therefore we need to
58  * disable FORTIFY_SOURCE until glibc fixes it.
59  */
60 #undef _FORTIFY_SOURCE
61 #undef __USE_FORTIFY_LEVEL
62 #define __USE_FORTIFY_LEVEL 0
63 
64 /* for model 2 */
65 
66 #include "ruby/internal/config.h"
67 
68 #ifdef __linux__
69 // Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70 # include <alloca.h>
71 #endif
72 
73 #include "eval_intern.h"
74 #include "gc.h"
75 #include "hrtime.h"
76 #include "internal.h"
77 #include "internal/class.h"
78 #include "internal/cont.h"
79 #include "internal/error.h"
80 #include "internal/hash.h"
81 #include "internal/io.h"
82 #include "internal/object.h"
83 #include "internal/proc.h"
84 #include "ruby/fiber/scheduler.h"
85 #include "internal/signal.h"
86 #include "internal/thread.h"
87 #include "internal/time.h"
88 #include "internal/warnings.h"
89 #include "iseq.h"
90 #include "mjit.h"
91 #include "ruby/debug.h"
92 #include "ruby/io.h"
93 #include "ruby/thread.h"
94 #include "ruby/thread_native.h"
95 #include "timev.h"
96 #include "vm_core.h"
97 #include "ractor_core.h"
98 #include "vm_debug.h"
99 #include "vm_sync.h"
100 
101 #ifndef USE_NATIVE_THREAD_PRIORITY
102 #define USE_NATIVE_THREAD_PRIORITY 0
103 #define RUBY_THREAD_PRIORITY_MAX 3
104 #define RUBY_THREAD_PRIORITY_MIN -3
105 #endif
106 
107 #ifndef THREAD_DEBUG
108 #define THREAD_DEBUG 0
109 #endif
110 
111 static VALUE rb_cThreadShield;
112 
113 static VALUE sym_immediate;
114 static VALUE sym_on_blocking;
115 static VALUE sym_never;
116 
117 enum SLEEP_FLAGS {
118  SLEEP_DEADLOCKABLE = 0x1,
119  SLEEP_SPURIOUS_CHECK = 0x2
120 };
121 
122 #define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
123 #define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
124 
125 static inline VALUE
126 rb_thread_local_storage(VALUE thread)
127 {
128  if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
129  rb_ivar_set(thread, idLocals, rb_hash_new());
130  RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
131  }
132  return rb_ivar_get(thread, idLocals);
133 }
134 
135 static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
136 static void sleep_forever(rb_thread_t *th, unsigned int fl);
137 static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker);
138 static int rb_threadptr_dead(rb_thread_t *th);
139 static void rb_check_deadlock(rb_ractor_t *r);
140 static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
141 static const char *thread_status_name(rb_thread_t *th, int detail);
142 static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
143 NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
144 static int consume_communication_pipe(int fd);
145 static int check_signals_nogvl(rb_thread_t *, int sigwait_fd);
146 void rb_sigwait_fd_migrate(rb_vm_t *); /* process.c */
147 
148 #define eKillSignal INT2FIX(0)
149 #define eTerminateSignal INT2FIX(1)
150 static volatile int system_working = 1;
151 
152 struct waiting_fd {
153  struct list_node wfd_node; /* <=> vm.waiting_fds */
154  rb_thread_t *th;
155  int fd;
156 };
157 
158 /********************************************************************************/
159 
160 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
161 
163  enum rb_thread_status prev_status;
164 };
165 
166 static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
167 static void unblock_function_clear(rb_thread_t *th);
168 
169 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
170  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
171 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
172 
173 #define GVL_UNLOCK_BEGIN(th) do { \
174  RB_GC_SAVE_MACHINE_CONTEXT(th); \
175  gvl_release(rb_ractor_gvl(th->ractor));
176 
177 #define GVL_UNLOCK_END(th) \
178  gvl_acquire(rb_ractor_gvl(th->ractor), th); \
179  rb_ractor_thread_switch(th->ractor, th); \
180 } while(0)
181 
182 #ifdef __GNUC__
183 #ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
184 #define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
185 #else
186 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
187 #endif
188 #else
189 #define only_if_constant(expr, notconst) notconst
190 #endif
191 #define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
192  struct rb_blocking_region_buffer __region; \
193  if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
194  /* always return true unless fail_if_interrupted */ \
195  !only_if_constant(fail_if_interrupted, TRUE)) { \
196  exec; \
197  blocking_region_end(th, &__region); \
198  }; \
199 } while(0)
200 
201 /*
202  * returns true if this thread was spuriously interrupted, false otherwise
203  * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
204  */
205 #define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
206 static inline int
207 vm_check_ints_blocking(rb_execution_context_t *ec)
208 {
209  rb_thread_t *th = rb_ec_thread_ptr(ec);
210 
211  if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
212  if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
213  }
214  else {
215  th->pending_interrupt_queue_checked = 0;
216  RUBY_VM_SET_INTERRUPT(ec);
217  }
218  return rb_threadptr_execute_interrupts(th, 1);
219 }
220 
221 int
222 rb_vm_check_ints_blocking(rb_execution_context_t *ec)
223 {
224  return vm_check_ints_blocking(ec);
225 }
226 
227 /*
228  * poll() is supported by many OSes, but so far Linux is the only
229  * one we know of that supports using poll() in all places select()
230  * would work.
231  */
232 #if defined(HAVE_POLL)
233 # if defined(__linux__)
234 # define USE_POLL
235 # endif
236 # if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
237 # define USE_POLL
238  /* FreeBSD does not set POLLOUT when POLLHUP happens */
239 # define POLLERR_SET (POLLHUP | POLLERR)
240 # endif
241 #endif
242 
243 static void
244 timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
245  const struct timeval *timeout)
246 {
247  if (timeout) {
248  *rel = rb_timeval2hrtime(timeout);
249  *end = rb_hrtime_add(rb_hrtime_now(), *rel);
250  *to = rel;
251  }
252  else {
253  *to = 0;
254  }
255 }
256 
257 #if THREAD_DEBUG
258 #ifdef HAVE_VA_ARGS_MACRO
259 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
260 #define thread_debug(...) rb_thread_debug(__FILE__, __LINE__, __VA_ARGS__)
261 #define POSITION_FORMAT "%s:%d:"
262 #define POSITION_ARGS ,file, line
263 #else
264 void rb_thread_debug(const char *fmt, ...);
265 #define thread_debug rb_thread_debug
266 #define POSITION_FORMAT
267 #define POSITION_ARGS
268 #endif
269 
270 # ifdef NON_SCALAR_THREAD_ID
271 #define fill_thread_id_string ruby_fill_thread_id_string
272 const char *
273 ruby_fill_thread_id_string(rb_nativethread_id_t thid, rb_thread_id_string_t buf)
274 {
275  extern const char ruby_digitmap[];
276  size_t i;
277 
278  buf[0] = '0';
279  buf[1] = 'x';
280  for (i = 0; i < sizeof(thid); i++) {
281 # ifdef LITTLE_ENDIAN
282  size_t j = sizeof(thid) - i - 1;
283 # else
284  size_t j = i;
285 # endif
286  unsigned char c = (unsigned char)((char *)&thid)[j];
287  buf[2 + i * 2] = ruby_digitmap[(c >> 4) & 0xf];
288  buf[3 + i * 2] = ruby_digitmap[c & 0xf];
289  }
290  buf[sizeof(rb_thread_id_string_t)-1] = '\0';
291  return buf;
292 }
293 # define fill_thread_id_str(th) fill_thread_id_string((th)->thread_id, (th)->thread_id_string)
294 # define thread_id_str(th) ((th)->thread_id_string)
295 # define PRI_THREAD_ID "s"
296 # endif
297 
298 # if THREAD_DEBUG < 0
299 static int rb_thread_debug_enabled;
300 
301 /*
302  * call-seq:
303  * Thread.DEBUG -> num
304  *
305  * Returns the thread debug level. Available only if compiled with
306  * THREAD_DEBUG=-1.
307  */
308 
309 static VALUE
310 rb_thread_s_debug(VALUE _)
311 {
312  return INT2NUM(rb_thread_debug_enabled);
313 }
314 
315 /*
316  * call-seq:
317  * Thread.DEBUG = num
318  *
319  * Sets the thread debug level. Available only if compiled with
320  * THREAD_DEBUG=-1.
321  */
322 
323 static VALUE
324 rb_thread_s_debug_set(VALUE self, VALUE val)
325 {
326  rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
327  return val;
328 }
329 # else
330 # define rb_thread_debug_enabled THREAD_DEBUG
331 # endif
332 #else
333 #define thread_debug if(0)printf
334 #endif
335 
336 #ifndef fill_thread_id_str
337 # define fill_thread_id_string(thid, buf) ((void *)(uintptr_t)(thid))
338 # define fill_thread_id_str(th) (void)0
339 # define thread_id_str(th) ((void *)(uintptr_t)(th)->thread_id)
340 # define PRI_THREAD_ID "p"
341 #endif
342 
343 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start));
344 void ruby_sigchld_handler(rb_vm_t *); /* signal.c */
345 
346 static void
347 ubf_sigwait(void *ignore)
348 {
349  rb_thread_wakeup_timer_thread(0);
350 }
351 
352 #include THREAD_IMPL_SRC
353 
354 #if defined(_WIN32)
355 
356 #define DEBUG_OUT() \
357  WaitForSingleObject(&debug_mutex, INFINITE); \
358  printf(POSITION_FORMAT"%#lx - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
359  fflush(stdout); \
360  ReleaseMutex(&debug_mutex);
361 
362 #elif defined(HAVE_PTHREAD_H)
363 
364 #define DEBUG_OUT() \
365  pthread_mutex_lock(&debug_mutex); \
366  printf(POSITION_FORMAT"%"PRI_THREAD_ID" - %s" POSITION_ARGS, \
367  fill_thread_id_string(pthread_self(), thread_id_string), buf); \
368  fflush(stdout); \
369  pthread_mutex_unlock(&debug_mutex);
370 
371 #endif
372 
373 /*
374  * TODO: somebody with win32 knowledge should be able to get rid of
375  * timer-thread by busy-waiting on signals. And it should be possible
376  * to make the GVL in thread_pthread.c be platform-independent.
377  */
378 #ifndef BUSY_WAIT_SIGNALS
379 # define BUSY_WAIT_SIGNALS (0)
380 #endif
381 
382 #ifndef USE_EVENTFD
383 # define USE_EVENTFD (0)
384 #endif
385 
386 #if THREAD_DEBUG
387 static int debug_mutex_initialized = 1;
388 static rb_nativethread_lock_t debug_mutex;
389 
390 void
391 rb_thread_debug(
392 #ifdef HAVE_VA_ARGS_MACRO
393  const char *file, int line,
394 #endif
395  const char *fmt, ...)
396 {
397  va_list args;
398  char buf[BUFSIZ];
399 #ifdef NON_SCALAR_THREAD_ID
400  rb_thread_id_string_t thread_id_string;
401 #endif
402 
403  if (!rb_thread_debug_enabled) return;
404 
405  if (debug_mutex_initialized == 1) {
406  debug_mutex_initialized = 0;
407  rb_native_mutex_initialize(&debug_mutex);
408  }
409 
410  va_start(args, fmt);
411  vsnprintf(buf, BUFSIZ, fmt, args);
412  va_end(args);
413 
414  DEBUG_OUT();
415 }
416 #endif
417 
418 #include "thread_sync.c"
419 
420 void
421 rb_vm_gvl_destroy(rb_global_vm_lock_t *gvl)
422 {
423  gvl_release(gvl);
424  gvl_destroy(gvl);
425 }
426 
427 void
428 rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
429 {
431 }
432 
433 void
434 rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
435 {
437 }
438 
439 void
440 rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
441 {
442  rb_native_mutex_lock(lock);
443 }
444 
445 void
446 rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
447 {
449 }
450 
451 static int
452 unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
453 {
454  do {
455  if (fail_if_interrupted) {
456  if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
457  return FALSE;
458  }
459  }
460  else {
461  RUBY_VM_CHECK_INTS(th->ec);
462  }
463 
464  rb_native_mutex_lock(&th->interrupt_lock);
465  } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
466  (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
467 
468  VM_ASSERT(th->unblock.func == NULL);
469 
470  th->unblock.func = func;
471  th->unblock.arg = arg;
472  rb_native_mutex_unlock(&th->interrupt_lock);
473 
474  return TRUE;
475 }
476 
477 static void
478 unblock_function_clear(rb_thread_t *th)
479 {
480  rb_native_mutex_lock(&th->interrupt_lock);
481  th->unblock.func = 0;
482  rb_native_mutex_unlock(&th->interrupt_lock);
483 }
484 
485 static void
486 rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
487 {
488  rb_native_mutex_lock(&th->interrupt_lock);
489 
490  if (trap) {
491  RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
492  }
493  else {
494  RUBY_VM_SET_INTERRUPT(th->ec);
495  }
496  if (th->unblock.func != NULL) {
497  (th->unblock.func)(th->unblock.arg);
498  }
499  else {
500  /* none */
501  }
502  rb_native_mutex_unlock(&th->interrupt_lock);
503 }
504 
505 void
506 rb_threadptr_interrupt(rb_thread_t *th)
507 {
508  rb_threadptr_interrupt_common(th, 0);
509 }
510 
511 static void
512 threadptr_trap_interrupt(rb_thread_t *th)
513 {
514  rb_threadptr_interrupt_common(th, 1);
515 }
516 
517 static void
518 terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
519 {
520  rb_thread_t *th = 0;
521 
522  list_for_each(&r->threads.set, th, lt_node) {
523  if (th != main_thread) {
524  thread_debug("terminate_all: begin (thid: %"PRI_THREAD_ID", status: %s)\n",
525  thread_id_str(th), thread_status_name(th, TRUE));
526  rb_threadptr_pending_interrupt_enque(th, eTerminateSignal);
527  rb_threadptr_interrupt(th);
528  thread_debug("terminate_all: end (thid: %"PRI_THREAD_ID", status: %s)\n",
529  thread_id_str(th), thread_status_name(th, TRUE));
530  }
531  else {
532  thread_debug("terminate_all: main thread (%p)\n", (void *)th);
533  }
534  }
535 }
536 
537 static void
538 rb_threadptr_join_list_wakeup(rb_thread_t *thread)
539 {
540  while (thread->join_list) {
541  struct rb_waiting_list *join_list = thread->join_list;
542 
543  // Consume the entry from the join list:
544  thread->join_list = join_list->next;
545 
546  rb_thread_t *target_thread = join_list->thread;
547 
548  if (target_thread->scheduler != Qnil && rb_fiberptr_blocking(join_list->fiber) == 0) {
549  rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
550  }
551  else {
552  rb_threadptr_interrupt(target_thread);
553 
554  switch (target_thread->status) {
555  case THREAD_STOPPED:
556  case THREAD_STOPPED_FOREVER:
557  target_thread->status = THREAD_RUNNABLE;
558  default:
559  break;
560  }
561  }
562  }
563 }
564 
565 void
566 rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
567 {
568  while (th->keeping_mutexes) {
569  rb_mutex_t *mutex = th->keeping_mutexes;
570  th->keeping_mutexes = mutex->next_mutex;
571 
572  /* rb_warn("mutex #<%p> remains to be locked by terminated thread", (void *)mutexes); */
573 
574  const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
575  if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
576  }
577 }
578 
579 void
580 rb_thread_terminate_all(rb_thread_t *th)
581 {
582  rb_ractor_t *cr = th->ractor;
583  rb_execution_context_t * volatile ec = th->ec;
584  volatile int sleeping = 0;
585 
586  if (cr->threads.main != th) {
587  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
588  (void *)cr->threads.main, (void *)th);
589  }
590 
591  /* unlock all locking mutexes */
592  rb_threadptr_unlock_all_locking_mutexes(th);
593 
594  EC_PUSH_TAG(ec);
595  if (EC_EXEC_TAG() == TAG_NONE) {
596  retry:
597  thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
598  terminate_all(cr, th);
599 
600  while (rb_ractor_living_thread_num(cr) > 1) {
601  rb_hrtime_t rel = RB_HRTIME_PER_SEC;
602  /*q
603  * Thread exiting routine in thread_start_func_2 notify
604  * me when the last sub-thread exit.
605  */
606  sleeping = 1;
607  native_sleep(th, &rel);
608  RUBY_VM_CHECK_INTS_BLOCKING(ec);
609  sleeping = 0;
610  }
611  }
612  else {
613  /*
614  * When caught an exception (e.g. Ctrl+C), let's broadcast
615  * kill request again to ensure killing all threads even
616  * if they are blocked on sleep, mutex, etc.
617  */
618  if (sleeping) {
619  sleeping = 0;
620  goto retry;
621  }
622  }
623  EC_POP_TAG();
624 }
625 
626 void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
627 
628 static void
629 thread_cleanup_func_before_exec(void *th_ptr)
630 {
631  rb_thread_t *th = th_ptr;
632  th->status = THREAD_KILLED;
633 
634  // The thread stack doesn't exist in the forked process:
635  th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
636 
637  rb_threadptr_root_fiber_terminate(th);
638 }
639 
640 static void
641 thread_cleanup_func(void *th_ptr, int atfork)
642 {
643  rb_thread_t *th = th_ptr;
644 
645  th->locking_mutex = Qfalse;
646  thread_cleanup_func_before_exec(th_ptr);
647 
648  /*
649  * Unfortunately, we can't release native threading resource at fork
650  * because libc may have unstable locking state therefore touching
651  * a threading resource may cause a deadlock.
652  *
653  * FIXME: Skipping native_mutex_destroy(pthread_mutex_destroy) is safe
654  * with NPTL, but native_thread_destroy calls pthread_cond_destroy
655  * which calls free(3), so there is a small memory leak atfork, here.
656  */
657  if (atfork)
658  return;
659 
660  rb_native_mutex_destroy(&th->interrupt_lock);
661  native_thread_destroy(th);
662 }
663 
664 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
665 static VALUE rb_thread_to_s(VALUE thread);
666 
667 void
668 ruby_thread_init_stack(rb_thread_t *th)
669 {
670  native_thread_init_stack(th);
671 }
672 
673 const VALUE *
674 rb_vm_proc_local_ep(VALUE proc)
675 {
676  const VALUE *ep = vm_proc_ep(proc);
677 
678  if (ep) {
679  return rb_vm_ep_local_ep(ep);
680  }
681  else {
682  return NULL;
683  }
684 }
685 
686 // for ractor, defined in vm.c
687 VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
688  int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
689 
690 static VALUE
691 thread_do_start_proc(rb_thread_t *th)
692 {
693  VALUE args = th->invoke_arg.proc.args;
694  const VALUE *args_ptr;
695  int args_len;
696  VALUE procval = th->invoke_arg.proc.proc;
697  rb_proc_t *proc;
698  GetProcPtr(procval, proc);
699 
700  th->ec->errinfo = Qnil;
701  th->ec->root_lep = rb_vm_proc_local_ep(procval);
702  th->ec->root_svar = Qfalse;
703 
704  vm_check_ints_blocking(th->ec);
705 
706  if (th->invoke_type == thread_invoke_type_ractor_proc) {
707  VALUE self = rb_ractor_self(th->ractor);
708  VM_ASSERT(FIXNUM_P(args));
709  args_len = FIX2INT(args);
710  args_ptr = ALLOCA_N(VALUE, args_len);
711  rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
712  vm_check_ints_blocking(th->ec);
713 
714  return rb_vm_invoke_proc_with_self(
715  th->ec, proc, self,
716  args_len, args_ptr,
717  th->invoke_arg.proc.kw_splat,
718  VM_BLOCK_HANDLER_NONE
719  );
720  }
721  else {
722  args_len = RARRAY_LENINT(args);
723  if (args_len < 8) {
724  /* free proc.args if the length is enough small */
725  args_ptr = ALLOCA_N(VALUE, args_len);
726  MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR_TRANSIENT(args), VALUE, args_len);
727  th->invoke_arg.proc.args = Qnil;
728  }
729  else {
730  args_ptr = RARRAY_CONST_PTR(args);
731  }
732 
733  vm_check_ints_blocking(th->ec);
734 
735  return rb_vm_invoke_proc(
736  th->ec, proc,
737  args_len, args_ptr,
738  th->invoke_arg.proc.kw_splat,
739  VM_BLOCK_HANDLER_NONE
740  );
741  }
742 }
743 
744 static void
745 thread_do_start(rb_thread_t *th)
746 {
747  native_set_thread_name(th);
748  VALUE result = Qundef;
749 
750  EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
751 
752  switch (th->invoke_type) {
753  case thread_invoke_type_proc:
754  result = thread_do_start_proc(th);
755  break;
756 
757  case thread_invoke_type_ractor_proc:
758  result = thread_do_start_proc(th);
759  rb_ractor_atexit(th->ec, result);
760  break;
761 
762  case thread_invoke_type_func:
763  result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
764  break;
765 
766  case thread_invoke_type_none:
767  rb_bug("unreachable");
768  }
769 
771 
772  th->value = result;
773 
774  EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
775 }
776 
777 void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
778 
779 static int
780 thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
781 {
782  STACK_GROW_DIR_DETECTION;
783  enum ruby_tag_type state;
784  VALUE errinfo = Qnil;
785  size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
786  rb_thread_t *ractor_main_th = th->ractor->threads.main;
787  VALUE * vm_stack = NULL;
788 
789  VM_ASSERT(th != th->vm->ractor.main_thread);
790  thread_debug("thread start: %p\n", (void *)th);
791 
792  // setup native thread
793  gvl_acquire(rb_ractor_gvl(th->ractor), th);
794  ruby_thread_set_native(th);
795 
796  // setup ractor
797  if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
798  RB_VM_LOCK();
799  {
800  rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
801  rb_ractor_t *r = th->ractor;
802  r->r_stdin = rb_io_prep_stdin();
803  r->r_stdout = rb_io_prep_stdout();
804  r->r_stderr = rb_io_prep_stderr();
805  }
806  RB_VM_UNLOCK();
807  }
808 
809  // This assertion is not passed on win32 env. Check it later.
810  // VM_ASSERT((size * sizeof(VALUE)) <= th->ec->machine.stack_maxsize);
811 
812  // setup VM and machine stack
813  vm_stack = alloca(size * sizeof(VALUE));
814  VM_ASSERT(vm_stack);
815 
816  rb_ec_initialize_vm_stack(th->ec, vm_stack, size);
817  th->ec->machine.stack_start = STACK_DIR_UPPER(vm_stack + size, vm_stack);
818  th->ec->machine.stack_maxsize -= size * sizeof(VALUE);
819 
820  thread_debug("thread start (get lock): %p\n", (void *)th);
821 
822  // Ensure that we are not joinable.
823  VM_ASSERT(th->value == Qundef);
824 
825  EC_PUSH_TAG(th->ec);
826 
827  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
828  SAVE_ROOT_JMPBUF(th, thread_do_start(th));
829  }
830  else {
831  errinfo = th->ec->errinfo;
832 
833  VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
834  if (!NIL_P(exc)) errinfo = exc;
835 
836  if (state == TAG_FATAL) {
837  if (th->invoke_type == thread_invoke_type_ractor_proc) {
838  rb_ractor_atexit(th->ec, Qnil);
839  }
840  /* fatal error within this thread, need to stop whole script */
841  }
842  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
843  /* exit on main_thread. */
844  }
845  else {
846  if (th->report_on_exception) {
847  VALUE mesg = rb_thread_to_s(th->self);
848  rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
849  rb_write_error_str(mesg);
850  rb_ec_error_print(th->ec, errinfo);
851  }
852 
853  if (th->invoke_type == thread_invoke_type_ractor_proc) {
854  rb_ractor_atexit_exception(th->ec);
855  }
856 
857  if (th->vm->thread_abort_on_exception ||
858  th->abort_on_exception || RTEST(ruby_debug)) {
859  /* exit on main_thread */
860  }
861  else {
862  errinfo = Qnil;
863  }
864  }
865  th->value = Qnil;
866  }
867 
868  // The thread is effectively finished and can be joined.
869  VM_ASSERT(th->value != Qundef);
870 
871  rb_threadptr_join_list_wakeup(th);
872  rb_threadptr_unlock_all_locking_mutexes(th);
873 
874  if (th->invoke_type == thread_invoke_type_ractor_proc) {
875  rb_thread_terminate_all(th);
876  rb_ractor_teardown(th->ec);
877  }
878 
879  th->status = THREAD_KILLED;
880  thread_debug("thread end: %p\n", (void *)th);
881 
882  if (th->vm->ractor.main_thread == th) {
883  ruby_stop(0);
884  }
885 
886  if (RB_TYPE_P(errinfo, T_OBJECT)) {
887  /* treat with normal error object */
888  rb_threadptr_raise(ractor_main_th, 1, &errinfo);
889  }
890 
891  EC_POP_TAG();
892 
893  rb_ec_clear_current_thread_trace_func(th->ec);
894 
895  /* locking_mutex must be Qfalse */
896  if (th->locking_mutex != Qfalse) {
897  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
898  (void *)th, th->locking_mutex);
899  }
900 
901  if (ractor_main_th->status == THREAD_KILLED &&
902  th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
903  /* I'm last thread. wake up main thread from rb_thread_terminate_all */
904  rb_threadptr_interrupt(ractor_main_th);
905  }
906 
907  rb_check_deadlock(th->ractor);
908 
909  rb_fiber_close(th->ec->fiber_ptr);
910 
911  thread_cleanup_func(th, FALSE);
912  VM_ASSERT(th->ec->vm_stack == NULL);
913 
914  if (th->invoke_type == thread_invoke_type_ractor_proc) {
915  // after rb_ractor_living_threads_remove()
916  // GC will happen anytime and this ractor can be collected (and destroy GVL).
917  // So gvl_release() should be before it.
918  gvl_release(rb_ractor_gvl(th->ractor));
919  rb_ractor_living_threads_remove(th->ractor, th);
920  }
921  else {
922  rb_ractor_living_threads_remove(th->ractor, th);
923  gvl_release(rb_ractor_gvl(th->ractor));
924  }
925 
926  return 0;
927 }
928 
930  enum thread_invoke_type type;
931 
932  // for normal proc thread
933  VALUE args;
934  VALUE proc;
935 
936  // for ractor
937  rb_ractor_t *g;
938 
939  // for func
940  VALUE (*fn)(void *);
941 };
942 
943 static VALUE
944 thread_create_core(VALUE thval, struct thread_create_params *params)
945 {
946  rb_execution_context_t *ec = GET_EC();
947  rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
948  int err;
949 
950  if (OBJ_FROZEN(current_th->thgroup)) {
952  "can't start a new thread (frozen ThreadGroup)");
953  }
954 
955  switch (params->type) {
956  case thread_invoke_type_proc:
957  th->invoke_type = thread_invoke_type_proc;
958  th->invoke_arg.proc.args = params->args;
959  th->invoke_arg.proc.proc = params->proc;
960  th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
961  break;
962 
963  case thread_invoke_type_ractor_proc:
964 #if RACTOR_CHECK_MODE > 0
965  rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
966 #endif
967  th->invoke_type = thread_invoke_type_ractor_proc;
968  th->ractor = params->g;
969  th->ractor->threads.main = th;
970  th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
971  th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
972  th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
973  rb_ractor_send_parameters(ec, params->g, params->args);
974  break;
975 
976  case thread_invoke_type_func:
977  th->invoke_type = thread_invoke_type_func;
978  th->invoke_arg.func.func = params->fn;
979  th->invoke_arg.func.arg = (void *)params->args;
980  break;
981 
982  default:
983  rb_bug("unreachable");
984  }
985 
986  th->priority = current_th->priority;
987  th->thgroup = current_th->thgroup;
988 
989  th->pending_interrupt_queue = rb_ary_tmp_new(0);
990  th->pending_interrupt_queue_checked = 0;
991  th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
992  RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
993 
994  rb_native_mutex_initialize(&th->interrupt_lock);
995 
996  RUBY_DEBUG_LOG("r:%u th:%p", rb_ractor_id(th->ractor), (void *)th);
997 
998  rb_ractor_living_threads_insert(th->ractor, th);
999 
1000  /* kick thread */
1001  err = native_thread_create(th);
1002  if (err) {
1003  th->status = THREAD_KILLED;
1004  rb_ractor_living_threads_remove(th->ractor, th);
1005  rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
1006  }
1007  return thval;
1008 }
1009 
1010 #define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
1011 
1012 /*
1013  * call-seq:
1014  * Thread.new { ... } -> thread
1015  * Thread.new(*args, &proc) -> thread
1016  * Thread.new(*args) { |args| ... } -> thread
1017  *
1018  * Creates a new thread executing the given block.
1019  *
1020  * Any +args+ given to ::new will be passed to the block:
1021  *
1022  * arr = []
1023  * a, b, c = 1, 2, 3
1024  * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
1025  * arr #=> [1, 2, 3]
1026  *
1027  * A ThreadError exception is raised if ::new is called without a block.
1028  *
1029  * If you're going to subclass Thread, be sure to call super in your
1030  * +initialize+ method, otherwise a ThreadError will be raised.
1031  */
1032 static VALUE
1033 thread_s_new(int argc, VALUE *argv, VALUE klass)
1034 {
1035  rb_thread_t *th;
1036  VALUE thread = rb_thread_alloc(klass);
1037 
1038  if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
1039  rb_raise(rb_eThreadError, "can't alloc thread");
1040  }
1041 
1042  rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
1043  th = rb_thread_ptr(thread);
1044  if (!threadptr_initialized(th)) {
1045  rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
1046  klass);
1047  }
1048  return thread;
1049 }
1050 
1051 /*
1052  * call-seq:
1053  * Thread.start([args]*) {|args| block } -> thread
1054  * Thread.fork([args]*) {|args| block } -> thread
1055  *
1056  * Basically the same as ::new. However, if class Thread is subclassed, then
1057  * calling +start+ in that subclass will not invoke the subclass's
1058  * +initialize+ method.
1059  */
1060 
1061 static VALUE
1062 thread_start(VALUE klass, VALUE args)
1063 {
1064  struct thread_create_params params = {
1065  .type = thread_invoke_type_proc,
1066  .args = args,
1067  .proc = rb_block_proc(),
1068  };
1069  return thread_create_core(rb_thread_alloc(klass), &params);
1070 }
1071 
1072 static VALUE
1073 threadptr_invoke_proc_location(rb_thread_t *th)
1074 {
1075  if (th->invoke_type == thread_invoke_type_proc) {
1076  return rb_proc_location(th->invoke_arg.proc.proc);
1077  }
1078  else {
1079  return Qnil;
1080  }
1081 }
1082 
1083 /* :nodoc: */
1084 static VALUE
1085 thread_initialize(VALUE thread, VALUE args)
1086 {
1087  rb_thread_t *th = rb_thread_ptr(thread);
1088 
1089  if (!rb_block_given_p()) {
1090  rb_raise(rb_eThreadError, "must be called with a block");
1091  }
1092  else if (th->invoke_type != thread_invoke_type_none) {
1093  VALUE loc = threadptr_invoke_proc_location(th);
1094  if (!NIL_P(loc)) {
1096  "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
1097  RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
1098  }
1099  else {
1100  rb_raise(rb_eThreadError, "already initialized thread");
1101  }
1102  }
1103  else {
1104  struct thread_create_params params = {
1105  .type = thread_invoke_type_proc,
1106  .args = args,
1107  .proc = rb_block_proc(),
1108  };
1109  return thread_create_core(thread, &params);
1110  }
1111 }
1112 
1113 VALUE
1114 rb_thread_create(VALUE (*fn)(void *), void *arg)
1115 {
1116  struct thread_create_params params = {
1117  .type = thread_invoke_type_func,
1118  .fn = fn,
1119  .args = (VALUE)arg,
1120  };
1121  return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1122 }
1123 
1124 VALUE
1125 rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc)
1126 {
1127  struct thread_create_params params = {
1128  .type = thread_invoke_type_ractor_proc,
1129  .g = g,
1130  .args = args,
1131  .proc = proc,
1132  };
1133  return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1134 }
1135 
1136 
1137 struct join_arg {
1138  struct rb_waiting_list *waiter;
1139  rb_thread_t *target;
1140  VALUE timeout;
1141  rb_hrtime_t *limit;
1142 };
1143 
1144 static VALUE
1145 remove_from_join_list(VALUE arg)
1146 {
1147  struct join_arg *p = (struct join_arg *)arg;
1148  rb_thread_t *target_thread = p->target;
1149 
1150  if (target_thread->status != THREAD_KILLED) {
1151  struct rb_waiting_list **join_list = &target_thread->join_list;
1152 
1153  while (*join_list) {
1154  if (*join_list == p->waiter) {
1155  *join_list = (*join_list)->next;
1156  break;
1157  }
1158 
1159  join_list = &(*join_list)->next;
1160  }
1161  }
1162 
1163  return Qnil;
1164 }
1165 
1166 static rb_hrtime_t *double2hrtime(rb_hrtime_t *, double);
1167 
1168 static int
1169 thread_finished(rb_thread_t *th)
1170 {
1171  return th->status == THREAD_KILLED || th->value != Qundef;
1172 }
1173 
1174 static VALUE
1175 thread_join_sleep(VALUE arg)
1176 {
1177  struct join_arg *p = (struct join_arg *)arg;
1178  rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1179  rb_hrtime_t end = 0, *limit = p->limit;
1180 
1181  if (limit) {
1182  end = rb_hrtime_add(*limit, rb_hrtime_now());
1183  }
1184 
1185  while (!thread_finished(target_th)) {
1186  VALUE scheduler = rb_fiber_scheduler_current();
1187 
1188  if (scheduler != Qnil) {
1189  rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1190  }
1191  else if (!limit) {
1192  th->status = THREAD_STOPPED_FOREVER;
1193  rb_ractor_sleeper_threads_inc(th->ractor);
1194  rb_check_deadlock(th->ractor);
1195  native_sleep(th, 0);
1196  rb_ractor_sleeper_threads_dec(th->ractor);
1197  }
1198  else {
1199  if (hrtime_update_expire(limit, end)) {
1200  thread_debug("thread_join: timeout (thid: %"PRI_THREAD_ID")\n",
1201  thread_id_str(target_th));
1202  return Qfalse;
1203  }
1204  th->status = THREAD_STOPPED;
1205  native_sleep(th, limit);
1206  }
1207  RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1208  th->status = THREAD_RUNNABLE;
1209  thread_debug("thread_join: interrupted (thid: %"PRI_THREAD_ID", status: %s)\n",
1210  thread_id_str(target_th), thread_status_name(target_th, TRUE));
1211  }
1212  return Qtrue;
1213 }
1214 
1215 static VALUE
1216 thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1217 {
1218  rb_execution_context_t *ec = GET_EC();
1219  rb_thread_t *th = ec->thread_ptr;
1220  rb_fiber_t *fiber = ec->fiber_ptr;
1221 
1222  if (th == target_th) {
1223  rb_raise(rb_eThreadError, "Target thread must not be current thread");
1224  }
1225 
1226  if (th->ractor->threads.main == target_th) {
1227  rb_raise(rb_eThreadError, "Target thread must not be main thread");
1228  }
1229 
1230  thread_debug("thread_join (thid: %"PRI_THREAD_ID", status: %s)\n",
1231  thread_id_str(target_th), thread_status_name(target_th, TRUE));
1232 
1233  if (target_th->status != THREAD_KILLED) {
1234  struct rb_waiting_list waiter;
1235  waiter.next = target_th->join_list;
1236  waiter.thread = th;
1237  waiter.fiber = fiber;
1238  target_th->join_list = &waiter;
1239 
1240  struct join_arg arg;
1241  arg.waiter = &waiter;
1242  arg.target = target_th;
1243  arg.timeout = timeout;
1244  arg.limit = limit;
1245 
1246  if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1247  return Qnil;
1248  }
1249  }
1250 
1251  thread_debug("thread_join: success (thid: %"PRI_THREAD_ID", status: %s)\n",
1252  thread_id_str(target_th), thread_status_name(target_th, TRUE));
1253 
1254  if (target_th->ec->errinfo != Qnil) {
1255  VALUE err = target_th->ec->errinfo;
1256 
1257  if (FIXNUM_P(err)) {
1258  switch (err) {
1259  case INT2FIX(TAG_FATAL):
1260  thread_debug("thread_join: terminated (thid: %"PRI_THREAD_ID", status: %s)\n",
1261  thread_id_str(target_th), thread_status_name(target_th, TRUE));
1262 
1263  /* OK. killed. */
1264  break;
1265  default:
1266  rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1267  }
1268  }
1269  else if (THROW_DATA_P(target_th->ec->errinfo)) {
1270  rb_bug("thread_join: THROW_DATA should not reach here.");
1271  }
1272  else {
1273  /* normal exception */
1274  rb_exc_raise(err);
1275  }
1276  }
1277  return target_th->self;
1278 }
1279 
1280 /*
1281  * call-seq:
1282  * thr.join -> thr
1283  * thr.join(limit) -> thr
1284  *
1285  * The calling thread will suspend execution and run this +thr+.
1286  *
1287  * Does not return until +thr+ exits or until the given +limit+ seconds have
1288  * passed.
1289  *
1290  * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1291  * returned.
1292  *
1293  * Any threads not joined will be killed when the main program exits.
1294  *
1295  * If +thr+ had previously raised an exception and the ::abort_on_exception or
1296  * $DEBUG flags are not set, (so the exception has not yet been processed), it
1297  * will be processed at this time.
1298  *
1299  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1300  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1301  * x.join # Let thread x finish, thread a will be killed on exit.
1302  * #=> "axyz"
1303  *
1304  * The following example illustrates the +limit+ parameter.
1305  *
1306  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1307  * puts "Waiting" until y.join(0.15)
1308  *
1309  * This will produce:
1310  *
1311  * tick...
1312  * Waiting
1313  * tick...
1314  * Waiting
1315  * tick...
1316  * tick...
1317  */
1318 
1319 static VALUE
1320 thread_join_m(int argc, VALUE *argv, VALUE self)
1321 {
1322  VALUE timeout = Qnil;
1323  rb_hrtime_t rel = 0, *limit = 0;
1324 
1325  if (rb_check_arity(argc, 0, 1)) {
1326  timeout = argv[0];
1327  }
1328 
1329  // Convert the timeout eagerly, so it's always converted and deterministic
1330  /*
1331  * This supports INFINITY and negative values, so we can't use
1332  * rb_time_interval right now...
1333  */
1334  if (NIL_P(timeout)) {
1335  /* unlimited */
1336  }
1337  else if (FIXNUM_P(timeout)) {
1338  rel = rb_sec2hrtime(NUM2TIMET(timeout));
1339  limit = &rel;
1340  }
1341  else {
1342  limit = double2hrtime(&rel, rb_num2dbl(timeout));
1343  }
1344 
1345  return thread_join(rb_thread_ptr(self), timeout, limit);
1346 }
1347 
1348 /*
1349  * call-seq:
1350  * thr.value -> obj
1351  *
1352  * Waits for +thr+ to complete, using #join, and returns its value or raises
1353  * the exception which terminated the thread.
1354  *
1355  * a = Thread.new { 2 + 2 }
1356  * a.value #=> 4
1357  *
1358  * b = Thread.new { raise 'something went wrong' }
1359  * b.value #=> RuntimeError: something went wrong
1360  */
1361 
1362 static VALUE
1363 thread_value(VALUE self)
1364 {
1365  rb_thread_t *th = rb_thread_ptr(self);
1366  thread_join(th, Qnil, 0);
1367  if (th->value == Qundef) {
1368  // If the thread is dead because we forked th->value is still Qundef.
1369  return Qnil;
1370  }
1371  return th->value;
1372 }
1373 
1374 /*
1375  * Thread Scheduling
1376  */
1377 
1378 /*
1379  * Back when we used "struct timeval", not all platforms implemented
1380  * tv_sec as time_t. Nowadays we use "struct timespec" and tv_sec
1381  * seems to be implemented more consistently across platforms.
1382  * At least other parts of our code hasn't had to deal with non-time_t
1383  * tv_sec in timespec...
1384  */
1385 #define TIMESPEC_SEC_MAX TIMET_MAX
1386 #define TIMESPEC_SEC_MIN TIMET_MIN
1387 
1388 COMPILER_WARNING_PUSH
1389 #if __has_warning("-Wimplicit-int-float-conversion")
1390 COMPILER_WARNING_IGNORED(-Wimplicit-int-float-conversion)
1391 #elif defined(_MSC_VER)
1392 /* C4305: 'initializing': truncation from '__int64' to 'const double' */
1393 COMPILER_WARNING_IGNORED(4305)
1394 #endif
1395 static const double TIMESPEC_SEC_MAX_as_double = TIMESPEC_SEC_MAX;
1396 COMPILER_WARNING_POP
1397 
1398 static rb_hrtime_t *
1399 double2hrtime(rb_hrtime_t *hrt, double d)
1400 {
1401  /* assume timespec.tv_sec has same signedness as time_t */
1402  const double TIMESPEC_SEC_MAX_PLUS_ONE = 2.0 * (TIMESPEC_SEC_MAX_as_double / 2.0 + 1.0);
1403 
1404  if (TIMESPEC_SEC_MAX_PLUS_ONE <= d) {
1405  return NULL;
1406  }
1407  else if (d <= 0) {
1408  *hrt = 0;
1409  }
1410  else {
1411  *hrt = (rb_hrtime_t)(d * (double)RB_HRTIME_PER_SEC);
1412  }
1413  return hrt;
1414 }
1415 
1416 static void
1417 getclockofday(struct timespec *ts)
1418 {
1419 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1420  if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1421  return;
1422 #endif
1423  rb_timespec_now(ts);
1424 }
1425 
1426 /*
1427  * Don't inline this, since library call is already time consuming
1428  * and we don't want "struct timespec" on stack too long for GC
1429  */
1430 NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1431 rb_hrtime_t
1432 rb_hrtime_now(void)
1433 {
1434  struct timespec ts;
1435 
1436  getclockofday(&ts);
1437  return rb_timespec2hrtime(&ts);
1438 }
1439 
1440 static void
1441 sleep_forever(rb_thread_t *th, unsigned int fl)
1442 {
1443  enum rb_thread_status prev_status = th->status;
1444  enum rb_thread_status status;
1445  int woke;
1446 
1447  status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1448  th->status = status;
1449  RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1450  while (th->status == status) {
1451  if (fl & SLEEP_DEADLOCKABLE) {
1452  rb_ractor_sleeper_threads_inc(th->ractor);
1453  rb_check_deadlock(th->ractor);
1454  }
1455  native_sleep(th, 0);
1456  if (fl & SLEEP_DEADLOCKABLE) {
1457  rb_ractor_sleeper_threads_dec(th->ractor);
1458  }
1459  woke = vm_check_ints_blocking(th->ec);
1460  if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1461  break;
1462  }
1463  th->status = prev_status;
1464 }
1465 
1466 /*
1467  * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1468  * being uninitialized, maybe other versions, too.
1469  */
1470 COMPILER_WARNING_PUSH
1471 #if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1472 COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1473 #endif
1474 #ifndef PRIu64
1475 #define PRIu64 PRI_64_PREFIX "u"
1476 #endif
1477 /*
1478  * @end is the absolute time when @ts is set to expire
1479  * Returns true if @end has past
1480  * Updates @ts and returns false otherwise
1481  */
1482 static int
1483 hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1484 {
1485  rb_hrtime_t now = rb_hrtime_now();
1486 
1487  if (now > end) return 1;
1488  thread_debug("hrtime_update_expire: "
1489  "%"PRIu64" > %"PRIu64"\n",
1490  (uint64_t)end, (uint64_t)now);
1491  *timeout = end - now;
1492  return 0;
1493 }
1494 COMPILER_WARNING_POP
1495 
1496 static int
1497 sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1498 {
1499  enum rb_thread_status prev_status = th->status;
1500  int woke;
1501  rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1502 
1503  th->status = THREAD_STOPPED;
1504  RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1505  while (th->status == THREAD_STOPPED) {
1506  native_sleep(th, &rel);
1507  woke = vm_check_ints_blocking(th->ec);
1508  if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1509  break;
1510  if (hrtime_update_expire(&rel, end))
1511  break;
1512  woke = 1;
1513  }
1514  th->status = prev_status;
1515  return woke;
1516 }
1517 
1518 void
1520 {
1521  thread_debug("rb_thread_sleep_forever\n");
1522  sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1523 }
1524 
1525 void
1527 {
1528  thread_debug("rb_thread_sleep_deadly\n");
1529  sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1530 }
1531 
1532 void
1533 rb_thread_sleep_interruptible(void)
1534 {
1535  rb_thread_t *th = GET_THREAD();
1536  enum rb_thread_status prev_status = th->status;
1537 
1538  th->status = THREAD_STOPPED;
1539  native_sleep(th, 0);
1540  RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1541  th->status = prev_status;
1542 }
1543 
1544 static void
1545 rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker)
1546 {
1547  VALUE scheduler = rb_fiber_scheduler_current();
1548  if (scheduler != Qnil) {
1549  rb_fiber_scheduler_block(scheduler, blocker, Qnil);
1550  }
1551  else {
1552  thread_debug("rb_thread_sleep_deadly_allow_spurious_wakeup\n");
1553  sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1554  }
1555 }
1556 
1557 void
1559 {
1560  rb_thread_t *th = GET_THREAD();
1561 
1562  sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1563 }
1564 
1565 /*
1566  * CAUTION: This function causes thread switching.
1567  * rb_thread_check_ints() check ruby's interrupts.
1568  * some interrupt needs thread switching/invoke handlers,
1569  * and so on.
1570  */
1571 
1572 void
1574 {
1575  RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
1576 }
1577 
1578 /*
1579  * Hidden API for tcl/tk wrapper.
1580  * There is no guarantee to perpetuate it.
1581  */
1582 int
1583 rb_thread_check_trap_pending(void)
1584 {
1585  return rb_signal_buff_size() != 0;
1586 }
1587 
1588 /* This function can be called in blocking region. */
1589 int
1591 {
1592  return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1593 }
1594 
1595 void
1597 {
1599 }
1600 
1601 static void
1602 rb_thread_schedule_limits(uint32_t limits_us)
1603 {
1604  thread_debug("rb_thread_schedule\n");
1605  if (!rb_thread_alone()) {
1606  rb_thread_t *th = GET_THREAD();
1607 
1608  if (th->running_time_us >= limits_us) {
1609  thread_debug("rb_thread_schedule/switch start\n");
1610  RB_GC_SAVE_MACHINE_CONTEXT(th);
1611  gvl_yield(rb_ractor_gvl(th->ractor), th);
1612  rb_ractor_thread_switch(th->ractor, th);
1613  thread_debug("rb_thread_schedule/switch done\n");
1614  }
1615  }
1616 }
1617 
1618 void
1620 {
1621  rb_thread_schedule_limits(0);
1622  RUBY_VM_CHECK_INTS(GET_EC());
1623 }
1624 
1625 /* blocking region */
1626 
1627 static inline int
1628 blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1629  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1630 {
1631  region->prev_status = th->status;
1632  if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1633  th->blocking_region_buffer = region;
1634  th->status = THREAD_STOPPED;
1635  rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1636  thread_debug("enter blocking region (%p)\n", (void *)th);
1637  RB_GC_SAVE_MACHINE_CONTEXT(th);
1638  gvl_release(rb_ractor_gvl(th->ractor));
1639  return TRUE;
1640  }
1641  else {
1642  return FALSE;
1643  }
1644 }
1645 
1646 static inline void
1647 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1648 {
1649  /* entry to ubf_list still permitted at this point, make it impossible: */
1650  unblock_function_clear(th);
1651  /* entry to ubf_list impossible at this point, so unregister is safe: */
1652  unregister_ubf_list(th);
1653 
1654  gvl_acquire(rb_ractor_gvl(th->ractor), th);
1655  rb_ractor_thread_switch(th->ractor, th);
1656 
1657  thread_debug("leave blocking region (%p)\n", (void *)th);
1658  th->blocking_region_buffer = 0;
1659  rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1660  if (th->status == THREAD_STOPPED) {
1661  th->status = region->prev_status;
1662  }
1663 }
1664 
1665 void *
1666 rb_nogvl(void *(*func)(void *), void *data1,
1667  rb_unblock_function_t *ubf, void *data2,
1668  int flags)
1669 {
1670  void *val = 0;
1671  rb_execution_context_t *ec = GET_EC();
1672  rb_thread_t *th = rb_ec_thread_ptr(ec);
1673  rb_vm_t *vm = rb_ec_vm_ptr(ec);
1674  bool is_main_thread = vm->ractor.main_thread == th;
1675  int saved_errno = 0;
1676  VALUE ubf_th = Qfalse;
1677 
1678  if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1679  ubf = ubf_select;
1680  data2 = th;
1681  }
1682  else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1683  if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1684  vm->ubf_async_safe = 1;
1685  }
1686  else {
1687  ubf_th = rb_thread_start_unblock_thread();
1688  }
1689  }
1690 
1691  BLOCKING_REGION(th, {
1692  val = func(data1);
1693  saved_errno = errno;
1694  }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1695 
1696  if (is_main_thread) vm->ubf_async_safe = 0;
1697 
1698  if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1699  RUBY_VM_CHECK_INTS_BLOCKING(ec);
1700  }
1701 
1702  if (ubf_th != Qfalse) {
1703  thread_value(rb_thread_kill(ubf_th));
1704  }
1705 
1706  errno = saved_errno;
1707 
1708  return val;
1709 }
1710 
1711 /*
1712  * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1713  * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1714  * without interrupt process.
1715  *
1716  * rb_thread_call_without_gvl() does:
1717  * (1) Check interrupts.
1718  * (2) release GVL.
1719  * Other Ruby threads may run in parallel.
1720  * (3) call func with data1
1721  * (4) acquire GVL.
1722  * Other Ruby threads can not run in parallel any more.
1723  * (5) Check interrupts.
1724  *
1725  * rb_thread_call_without_gvl2() does:
1726  * (1) Check interrupt and return if interrupted.
1727  * (2) release GVL.
1728  * (3) call func with data1 and a pointer to the flags.
1729  * (4) acquire GVL.
1730  *
1731  * If another thread interrupts this thread (Thread#kill, signal delivery,
1732  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1733  * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1734  * toggling a cancellation flag, canceling the invocation of a call inside
1735  * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1736  *
1737  * There are built-in ubfs and you can specify these ubfs:
1738  *
1739  * * RUBY_UBF_IO: ubf for IO operation
1740  * * RUBY_UBF_PROCESS: ubf for process operation
1741  *
1742  * However, we can not guarantee our built-in ubfs interrupt your `func()'
1743  * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1744  * provide proper ubf(), your program will not stop for Control+C or other
1745  * shutdown events.
1746  *
1747  * "Check interrupts" on above list means checking asynchronous
1748  * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1749  * request, and so on) and calling corresponding procedures
1750  * (such as `trap' for signals, raise an exception for Thread#raise).
1751  * If `func()' finished and received interrupts, you may skip interrupt
1752  * checking. For example, assume the following func() it reads data from file.
1753  *
1754  * read_func(...) {
1755  * // (a) before read
1756  * read(buffer); // (b) reading
1757  * // (c) after read
1758  * }
1759  *
1760  * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1761  * `read_func()' and interrupts are checked. However, if an interrupt occurs
1762  * at (c), after *read* operation is completed, checking interrupts is harmful
1763  * because it causes irrevocable side-effect, the read data will vanish. To
1764  * avoid such problem, the `read_func()' should be used with
1765  * `rb_thread_call_without_gvl2()'.
1766  *
1767  * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1768  * immediately. This function does not show when the execution was interrupted.
1769  * For example, there are 4 possible timing (a), (b), (c) and before calling
1770  * read_func(). You need to record progress of a read_func() and check
1771  * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1772  * `rb_thread_check_ints()' correctly or your program can not process proper
1773  * process such as `trap' and so on.
1774  *
1775  * NOTE: You can not execute most of Ruby C API and touch Ruby
1776  * objects in `func()' and `ubf()', including raising an
1777  * exception, because current thread doesn't acquire GVL
1778  * (it causes synchronization problems). If you need to
1779  * call ruby functions either use rb_thread_call_with_gvl()
1780  * or read source code of C APIs and confirm safety by
1781  * yourself.
1782  *
1783  * NOTE: In short, this API is difficult to use safely. I recommend you
1784  * use other ways if you have. We lack experiences to use this API.
1785  * Please report your problem related on it.
1786  *
1787  * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1788  * for a short running `func()'. Be sure to benchmark and use this
1789  * mechanism when `func()' consumes enough time.
1790  *
1791  * Safe C API:
1792  * * rb_thread_interrupted() - check interrupt flag
1793  * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1794  * they will work without GVL, and may acquire GVL when GC is needed.
1795  */
1796 void *
1797 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1798  rb_unblock_function_t *ubf, void *data2)
1799 {
1800  return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1801 }
1802 
1803 void *
1804 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1805  rb_unblock_function_t *ubf, void *data2)
1806 {
1807  return rb_nogvl(func, data1, ubf, data2, 0);
1808 }
1809 
1810 VALUE
1811 rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1812 {
1813  volatile VALUE val = Qundef; /* shouldn't be used */
1814  rb_execution_context_t * volatile ec = GET_EC();
1815  volatile int saved_errno = 0;
1816  enum ruby_tag_type state;
1817 
1818  struct waiting_fd waiting_fd = {
1819  .fd = fd,
1820  .th = rb_ec_thread_ptr(ec)
1821  };
1822 
1823  RB_VM_LOCK_ENTER();
1824  {
1825  list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &waiting_fd.wfd_node);
1826  }
1827  RB_VM_LOCK_LEAVE();
1828 
1829  EC_PUSH_TAG(ec);
1830  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1831  BLOCKING_REGION(waiting_fd.th, {
1832  val = func(data1);
1833  saved_errno = errno;
1834  }, ubf_select, waiting_fd.th, FALSE);
1835  }
1836  EC_POP_TAG();
1837 
1838  /*
1839  * must be deleted before jump
1840  * this will delete either from waiting_fds or on-stack LIST_HEAD(busy)
1841  */
1842  RB_VM_LOCK_ENTER();
1843  {
1844  list_del(&waiting_fd.wfd_node);
1845  }
1846  RB_VM_LOCK_LEAVE();
1847 
1848  if (state) {
1849  EC_JUMP_TAG(ec, state);
1850  }
1851  /* TODO: check func() */
1852  RUBY_VM_CHECK_INTS_BLOCKING(ec);
1853 
1854  errno = saved_errno;
1855 
1856  return val;
1857 }
1858 
1859 /*
1860  * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1861  *
1862  * After releasing GVL using
1863  * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1864  * methods. If you need to access Ruby you must use this function
1865  * rb_thread_call_with_gvl().
1866  *
1867  * This function rb_thread_call_with_gvl() does:
1868  * (1) acquire GVL.
1869  * (2) call passed function `func'.
1870  * (3) release GVL.
1871  * (4) return a value which is returned at (2).
1872  *
1873  * NOTE: You should not return Ruby object at (2) because such Object
1874  * will not be marked.
1875  *
1876  * NOTE: If an exception is raised in `func', this function DOES NOT
1877  * protect (catch) the exception. If you have any resources
1878  * which should free before throwing exception, you need use
1879  * rb_protect() in `func' and return a value which represents
1880  * exception was raised.
1881  *
1882  * NOTE: This function should not be called by a thread which was not
1883  * created as Ruby thread (created by Thread.new or so). In other
1884  * words, this function *DOES NOT* associate or convert a NON-Ruby
1885  * thread to a Ruby thread.
1886  */
1887 void *
1888 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1889 {
1890  rb_thread_t *th = ruby_thread_from_native();
1891  struct rb_blocking_region_buffer *brb;
1892  struct rb_unblock_callback prev_unblock;
1893  void *r;
1894 
1895  if (th == 0) {
1896  /* Error has occurred, but we can't use rb_bug()
1897  * because this thread is not Ruby's thread.
1898  * What should we do?
1899  */
1900  bp();
1901  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1902  exit(EXIT_FAILURE);
1903  }
1904 
1905  brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1906  prev_unblock = th->unblock;
1907 
1908  if (brb == 0) {
1909  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1910  }
1911 
1912  blocking_region_end(th, brb);
1913  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1914  r = (*func)(data1);
1915  /* leave from Ruby world: You can not access Ruby values, etc. */
1916  int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1917  RUBY_ASSERT_ALWAYS(released);
1918  return r;
1919 }
1920 
1921 /*
1922  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1923  *
1924  ***
1925  *** This API is EXPERIMENTAL!
1926  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1927  ***
1928  */
1929 
1930 int
1931 ruby_thread_has_gvl_p(void)
1932 {
1933  rb_thread_t *th = ruby_thread_from_native();
1934 
1935  if (th && th->blocking_region_buffer == 0) {
1936  return 1;
1937  }
1938  else {
1939  return 0;
1940  }
1941 }
1942 
1943 /*
1944  * call-seq:
1945  * Thread.pass -> nil
1946  *
1947  * Give the thread scheduler a hint to pass execution to another thread.
1948  * A running thread may or may not switch, it depends on OS and processor.
1949  */
1950 
1951 static VALUE
1952 thread_s_pass(VALUE klass)
1953 {
1955  return Qnil;
1956 }
1957 
1958 /*****************************************************/
1959 
1960 /*
1961  * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1962  *
1963  * Async events such as an exception thrown by Thread#raise,
1964  * Thread#kill and thread termination (after main thread termination)
1965  * will be queued to th->pending_interrupt_queue.
1966  * - clear: clear the queue.
1967  * - enque: enqueue err object into queue.
1968  * - deque: dequeue err object from queue.
1969  * - active_p: return 1 if the queue should be checked.
1970  *
1971  * All rb_threadptr_pending_interrupt_* functions are called by
1972  * a GVL acquired thread, of course.
1973  * Note that all "rb_" prefix APIs need GVL to call.
1974  */
1975 
1976 void
1977 rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1978 {
1979  rb_ary_clear(th->pending_interrupt_queue);
1980 }
1981 
1982 void
1983 rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
1984 {
1985  rb_ary_push(th->pending_interrupt_queue, v);
1986  th->pending_interrupt_queue_checked = 0;
1987 }
1988 
1989 static void
1990 threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1991 {
1992  if (!th->pending_interrupt_queue) {
1993  rb_raise(rb_eThreadError, "uninitialized thread");
1994  }
1995 }
1996 
1997 enum handle_interrupt_timing {
1998  INTERRUPT_NONE,
1999  INTERRUPT_IMMEDIATE,
2000  INTERRUPT_ON_BLOCKING,
2001  INTERRUPT_NEVER
2002 };
2003 
2004 static enum handle_interrupt_timing
2005 rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2006 {
2007  VALUE mask;
2008  long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2009  const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2010  VALUE mod;
2011  long i;
2012 
2013  for (i=0; i<mask_stack_len; i++) {
2014  mask = mask_stack[mask_stack_len-(i+1)];
2015 
2016  for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2017  VALUE klass = mod;
2018  VALUE sym;
2019 
2020  if (BUILTIN_TYPE(mod) == T_ICLASS) {
2021  klass = RBASIC(mod)->klass;
2022  }
2023  else if (mod != RCLASS_ORIGIN(mod)) {
2024  continue;
2025  }
2026 
2027  if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2028  if (sym == sym_immediate) {
2029  return INTERRUPT_IMMEDIATE;
2030  }
2031  else if (sym == sym_on_blocking) {
2032  return INTERRUPT_ON_BLOCKING;
2033  }
2034  else if (sym == sym_never) {
2035  return INTERRUPT_NEVER;
2036  }
2037  else {
2038  rb_raise(rb_eThreadError, "unknown mask signature");
2039  }
2040  }
2041  }
2042  /* try to next mask */
2043  }
2044  return INTERRUPT_NONE;
2045 }
2046 
2047 static int
2048 rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2049 {
2050  return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2051 }
2052 
2053 static int
2054 rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2055 {
2056  int i;
2057  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2058  VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2059  if (rb_class_inherited_p(e, err)) {
2060  return TRUE;
2061  }
2062  }
2063  return FALSE;
2064 }
2065 
2066 static VALUE
2067 rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2068 {
2069 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2070  int i;
2071 
2072  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2073  VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2074 
2075  enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2076 
2077  switch (mask_timing) {
2078  case INTERRUPT_ON_BLOCKING:
2079  if (timing != INTERRUPT_ON_BLOCKING) {
2080  break;
2081  }
2082  /* fall through */
2083  case INTERRUPT_NONE: /* default: IMMEDIATE */
2084  case INTERRUPT_IMMEDIATE:
2085  rb_ary_delete_at(th->pending_interrupt_queue, i);
2086  return err;
2087  case INTERRUPT_NEVER:
2088  break;
2089  }
2090  }
2091 
2092  th->pending_interrupt_queue_checked = 1;
2093  return Qundef;
2094 #else
2095  VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2096  if (rb_threadptr_pending_interrupt_empty_p(th)) {
2097  th->pending_interrupt_queue_checked = 1;
2098  }
2099  return err;
2100 #endif
2101 }
2102 
2103 static int
2104 threadptr_pending_interrupt_active_p(rb_thread_t *th)
2105 {
2106  /*
2107  * For optimization, we don't check async errinfo queue
2108  * if the queue and the thread interrupt mask were not changed
2109  * since last check.
2110  */
2111  if (th->pending_interrupt_queue_checked) {
2112  return 0;
2113  }
2114 
2115  if (rb_threadptr_pending_interrupt_empty_p(th)) {
2116  return 0;
2117  }
2118 
2119  return 1;
2120 }
2121 
2122 static int
2123 handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2124 {
2125  VALUE *maskp = (VALUE *)args;
2126 
2127  if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2128  rb_raise(rb_eArgError, "unknown mask signature");
2129  }
2130 
2131  if (!*maskp) {
2132  *maskp = rb_ident_hash_new();
2133  }
2134  rb_hash_aset(*maskp, key, val);
2135 
2136  return ST_CONTINUE;
2137 }
2138 
2139 /*
2140  * call-seq:
2141  * Thread.handle_interrupt(hash) { ... } -> result of the block
2142  *
2143  * Changes asynchronous interrupt timing.
2144  *
2145  * _interrupt_ means asynchronous event and corresponding procedure
2146  * by Thread#raise, Thread#kill, signal trap (not supported yet)
2147  * and main thread termination (if main thread terminates, then all
2148  * other thread will be killed).
2149  *
2150  * The given +hash+ has pairs like <code>ExceptionClass =>
2151  * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2152  * the given block. The TimingSymbol can be one of the following symbols:
2153  *
2154  * [+:immediate+] Invoke interrupts immediately.
2155  * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2156  * [+:never+] Never invoke all interrupts.
2157  *
2158  * _BlockingOperation_ means that the operation will block the calling thread,
2159  * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2160  * operation executed without GVL.
2161  *
2162  * Masked asynchronous interrupts are delayed until they are enabled.
2163  * This method is similar to sigprocmask(3).
2164  *
2165  * === NOTE
2166  *
2167  * Asynchronous interrupts are difficult to use.
2168  *
2169  * If you need to communicate between threads, please consider to use another way such as Queue.
2170  *
2171  * Or use them with deep understanding about this method.
2172  *
2173  * === Usage
2174  *
2175  * In this example, we can guard from Thread#raise exceptions.
2176  *
2177  * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2178  * ignored in the first block of the main thread. In the second
2179  * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2180  *
2181  * th = Thread.new do
2182  * Thread.handle_interrupt(RuntimeError => :never) {
2183  * begin
2184  * # You can write resource allocation code safely.
2185  * Thread.handle_interrupt(RuntimeError => :immediate) {
2186  * # ...
2187  * }
2188  * ensure
2189  * # You can write resource deallocation code safely.
2190  * end
2191  * }
2192  * end
2193  * Thread.pass
2194  * # ...
2195  * th.raise "stop"
2196  *
2197  * While we are ignoring the RuntimeError exception, it's safe to write our
2198  * resource allocation code. Then, the ensure block is where we can safely
2199  * deallocate your resources.
2200  *
2201  * ==== Guarding from Timeout::Error
2202  *
2203  * In the next example, we will guard from the Timeout::Error exception. This
2204  * will help prevent from leaking resources when Timeout::Error exceptions occur
2205  * during normal ensure clause. For this example we use the help of the
2206  * standard library Timeout, from lib/timeout.rb
2207  *
2208  * require 'timeout'
2209  * Thread.handle_interrupt(Timeout::Error => :never) {
2210  * timeout(10){
2211  * # Timeout::Error doesn't occur here
2212  * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
2213  * # possible to be killed by Timeout::Error
2214  * # while blocking operation
2215  * }
2216  * # Timeout::Error doesn't occur here
2217  * }
2218  * }
2219  *
2220  * In the first part of the +timeout+ block, we can rely on Timeout::Error being
2221  * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
2222  * operation that will block the calling thread is susceptible to a
2223  * Timeout::Error exception being raised.
2224  *
2225  * ==== Stack control settings
2226  *
2227  * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2228  * to control more than one ExceptionClass and TimingSymbol at a time.
2229  *
2230  * Thread.handle_interrupt(FooError => :never) {
2231  * Thread.handle_interrupt(BarError => :never) {
2232  * # FooError and BarError are prohibited.
2233  * }
2234  * }
2235  *
2236  * ==== Inheritance with ExceptionClass
2237  *
2238  * All exceptions inherited from the ExceptionClass parameter will be considered.
2239  *
2240  * Thread.handle_interrupt(Exception => :never) {
2241  * # all exceptions inherited from Exception are prohibited.
2242  * }
2243  *
2244  * For handling all interrupts, use +Object+ and not +Exception+
2245  * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2246  */
2247 static VALUE
2248 rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2249 {
2250  VALUE mask;
2251  rb_execution_context_t * volatile ec = GET_EC();
2252  rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2253  volatile VALUE r = Qnil;
2254  enum ruby_tag_type state;
2255 
2256  if (!rb_block_given_p()) {
2257  rb_raise(rb_eArgError, "block is needed.");
2258  }
2259 
2260  mask = 0;
2261  mask_arg = rb_to_hash_type(mask_arg);
2262  rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2263  if (!mask) {
2264  return rb_yield(Qnil);
2265  }
2266  OBJ_FREEZE_RAW(mask);
2267  rb_ary_push(th->pending_interrupt_mask_stack, mask);
2268  if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2269  th->pending_interrupt_queue_checked = 0;
2270  RUBY_VM_SET_INTERRUPT(th->ec);
2271  }
2272 
2273  EC_PUSH_TAG(th->ec);
2274  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2275  r = rb_yield(Qnil);
2276  }
2277  EC_POP_TAG();
2278 
2279  rb_ary_pop(th->pending_interrupt_mask_stack);
2280  if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2281  th->pending_interrupt_queue_checked = 0;
2282  RUBY_VM_SET_INTERRUPT(th->ec);
2283  }
2284 
2285  RUBY_VM_CHECK_INTS(th->ec);
2286 
2287  if (state) {
2288  EC_JUMP_TAG(th->ec, state);
2289  }
2290 
2291  return r;
2292 }
2293 
2294 /*
2295  * call-seq:
2296  * target_thread.pending_interrupt?(error = nil) -> true/false
2297  *
2298  * Returns whether or not the asynchronous queue is empty for the target thread.
2299  *
2300  * If +error+ is given, then check only for +error+ type deferred events.
2301  *
2302  * See ::pending_interrupt? for more information.
2303  */
2304 static VALUE
2305 rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2306 {
2307  rb_thread_t *target_th = rb_thread_ptr(target_thread);
2308 
2309  if (!target_th->pending_interrupt_queue) {
2310  return Qfalse;
2311  }
2312  if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2313  return Qfalse;
2314  }
2315  if (rb_check_arity(argc, 0, 1)) {
2316  VALUE err = argv[0];
2317  if (!rb_obj_is_kind_of(err, rb_cModule)) {
2318  rb_raise(rb_eTypeError, "class or module required for rescue clause");
2319  }
2320  return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2321  }
2322  else {
2323  return Qtrue;
2324  }
2325 }
2326 
2327 /*
2328  * call-seq:
2329  * Thread.pending_interrupt?(error = nil) -> true/false
2330  *
2331  * Returns whether or not the asynchronous queue is empty.
2332  *
2333  * Since Thread::handle_interrupt can be used to defer asynchronous events,
2334  * this method can be used to determine if there are any deferred events.
2335  *
2336  * If you find this method returns true, then you may finish +:never+ blocks.
2337  *
2338  * For example, the following method processes deferred asynchronous events
2339  * immediately.
2340  *
2341  * def Thread.kick_interrupt_immediately
2342  * Thread.handle_interrupt(Object => :immediate) {
2343  * Thread.pass
2344  * }
2345  * end
2346  *
2347  * If +error+ is given, then check only for +error+ type deferred events.
2348  *
2349  * === Usage
2350  *
2351  * th = Thread.new{
2352  * Thread.handle_interrupt(RuntimeError => :on_blocking){
2353  * while true
2354  * ...
2355  * # reach safe point to invoke interrupt
2356  * if Thread.pending_interrupt?
2357  * Thread.handle_interrupt(Object => :immediate){}
2358  * end
2359  * ...
2360  * end
2361  * }
2362  * }
2363  * ...
2364  * th.raise # stop thread
2365  *
2366  * This example can also be written as the following, which you should use to
2367  * avoid asynchronous interrupts.
2368  *
2369  * flag = true
2370  * th = Thread.new{
2371  * Thread.handle_interrupt(RuntimeError => :on_blocking){
2372  * while true
2373  * ...
2374  * # reach safe point to invoke interrupt
2375  * break if flag == false
2376  * ...
2377  * end
2378  * }
2379  * }
2380  * ...
2381  * flag = false # stop thread
2382  */
2383 
2384 static VALUE
2385 rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2386 {
2387  return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2388 }
2389 
2390 NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2391 
2392 static void
2393 rb_threadptr_to_kill(rb_thread_t *th)
2394 {
2395  rb_threadptr_pending_interrupt_clear(th);
2396  th->status = THREAD_RUNNABLE;
2397  th->to_kill = 1;
2398  th->ec->errinfo = INT2FIX(TAG_FATAL);
2399  EC_JUMP_TAG(th->ec, TAG_FATAL);
2400 }
2401 
2402 static inline rb_atomic_t
2403 threadptr_get_interrupts(rb_thread_t *th)
2404 {
2405  rb_execution_context_t *ec = th->ec;
2406  rb_atomic_t interrupt;
2407  rb_atomic_t old;
2408 
2409  do {
2410  interrupt = ec->interrupt_flag;
2411  old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2412  } while (old != interrupt);
2413  return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2414 }
2415 
2416 MJIT_FUNC_EXPORTED int
2417 rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2418 {
2419  rb_atomic_t interrupt;
2420  int postponed_job_interrupt = 0;
2421  int ret = FALSE;
2422 
2423  if (th->ec->raised_flag) return ret;
2424 
2425  while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2426  int sig;
2427  int timer_interrupt;
2428  int pending_interrupt;
2429  int trap_interrupt;
2430  int terminate_interrupt;
2431 
2432  timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2433  pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2434  postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2435  trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2436  terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2437 
2438  if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2439  RB_VM_LOCK_ENTER();
2440  RB_VM_LOCK_LEAVE();
2441  }
2442 
2443  if (postponed_job_interrupt) {
2444  rb_postponed_job_flush(th->vm);
2445  }
2446 
2447  /* signal handling */
2448  if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2449  enum rb_thread_status prev_status = th->status;
2450  int sigwait_fd = rb_sigwait_fd_get(th);
2451 
2452  if (sigwait_fd >= 0) {
2453  (void)consume_communication_pipe(sigwait_fd);
2454  ruby_sigchld_handler(th->vm);
2455  rb_sigwait_fd_put(th, sigwait_fd);
2456  rb_sigwait_fd_migrate(th->vm);
2457  }
2458  th->status = THREAD_RUNNABLE;
2459  while ((sig = rb_get_next_signal()) != 0) {
2460  ret |= rb_signal_exec(th, sig);
2461  }
2462  th->status = prev_status;
2463  }
2464 
2465  /* exception from another thread */
2466  if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2467  VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2468  thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
2469  ret = TRUE;
2470 
2471  if (err == Qundef) {
2472  /* no error */
2473  }
2474  else if (err == eKillSignal /* Thread#kill received */ ||
2475  err == eTerminateSignal /* Terminate thread */ ||
2476  err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2477  terminate_interrupt = 1;
2478  }
2479  else {
2480  if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2481  /* the only special exception to be queued across thread */
2482  err = ruby_vm_special_exception_copy(err);
2483  }
2484  /* set runnable if th was slept. */
2485  if (th->status == THREAD_STOPPED ||
2486  th->status == THREAD_STOPPED_FOREVER)
2487  th->status = THREAD_RUNNABLE;
2488  rb_exc_raise(err);
2489  }
2490  }
2491 
2492  if (terminate_interrupt) {
2493  rb_threadptr_to_kill(th);
2494  }
2495 
2496  if (timer_interrupt) {
2497  uint32_t limits_us = TIME_QUANTUM_USEC;
2498 
2499  if (th->priority > 0)
2500  limits_us <<= th->priority;
2501  else
2502  limits_us >>= -th->priority;
2503 
2504  if (th->status == THREAD_RUNNABLE)
2505  th->running_time_us += TIME_QUANTUM_USEC;
2506 
2507  VM_ASSERT(th->ec->cfp);
2508  EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2509  0, 0, 0, Qundef);
2510 
2511  rb_thread_schedule_limits(limits_us);
2512  }
2513  }
2514  return ret;
2515 }
2516 
2517 void
2518 rb_thread_execute_interrupts(VALUE thval)
2519 {
2520  rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2521 }
2522 
2523 static void
2524 rb_threadptr_ready(rb_thread_t *th)
2525 {
2526  rb_threadptr_interrupt(th);
2527 }
2528 
2529 static VALUE
2530 rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2531 {
2532  VALUE exc;
2533 
2534  if (rb_threadptr_dead(target_th)) {
2535  return Qnil;
2536  }
2537 
2538  if (argc == 0) {
2539  exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2540  }
2541  else {
2542  exc = rb_make_exception(argc, argv);
2543  }
2544 
2545  /* making an exception object can switch thread,
2546  so we need to check thread deadness again */
2547  if (rb_threadptr_dead(target_th)) {
2548  return Qnil;
2549  }
2550 
2551  rb_ec_setup_exception(GET_EC(), exc, Qundef);
2552  rb_threadptr_pending_interrupt_enque(target_th, exc);
2553  rb_threadptr_interrupt(target_th);
2554  return Qnil;
2555 }
2556 
2557 void
2558 rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2559 {
2560  VALUE argv[2];
2561 
2562  argv[0] = rb_eSignal;
2563  argv[1] = INT2FIX(sig);
2564  rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2565 }
2566 
2567 void
2568 rb_threadptr_signal_exit(rb_thread_t *th)
2569 {
2570  VALUE argv[2];
2571 
2572  argv[0] = rb_eSystemExit;
2573  argv[1] = rb_str_new2("exit");
2574 
2575  // TODO: check signal raise deliverly
2576  rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2577 }
2578 
2579 int
2580 rb_ec_set_raised(rb_execution_context_t *ec)
2581 {
2582  if (ec->raised_flag & RAISED_EXCEPTION) {
2583  return 1;
2584  }
2585  ec->raised_flag |= RAISED_EXCEPTION;
2586  return 0;
2587 }
2588 
2589 int
2590 rb_ec_reset_raised(rb_execution_context_t *ec)
2591 {
2592  if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2593  return 0;
2594  }
2595  ec->raised_flag &= ~RAISED_EXCEPTION;
2596  return 1;
2597 }
2598 
2599 int
2600 rb_notify_fd_close(int fd, struct list_head *busy)
2601 {
2602  rb_vm_t *vm = GET_THREAD()->vm;
2603  struct waiting_fd *wfd = 0, *next;
2604 
2605  RB_VM_LOCK_ENTER();
2606  {
2607  list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2608  if (wfd->fd == fd) {
2609  rb_thread_t *th = wfd->th;
2610  VALUE err;
2611 
2612  list_del(&wfd->wfd_node);
2613  list_add(busy, &wfd->wfd_node);
2614 
2615  err = th->vm->special_exceptions[ruby_error_stream_closed];
2616  rb_threadptr_pending_interrupt_enque(th, err);
2617  rb_threadptr_interrupt(th);
2618  }
2619  }
2620  }
2621  RB_VM_LOCK_LEAVE();
2622 
2623  return !list_empty(busy);
2624 }
2625 
2626 void
2628 {
2629  struct list_head busy;
2630 
2631  list_head_init(&busy);
2632  if (rb_notify_fd_close(fd, &busy)) {
2633  do rb_thread_schedule(); while (!list_empty(&busy));
2634  }
2635 }
2636 
2637 /*
2638  * call-seq:
2639  * thr.raise
2640  * thr.raise(string)
2641  * thr.raise(exception [, string [, array]])
2642  *
2643  * Raises an exception from the given thread. The caller does not have to be
2644  * +thr+. See Kernel#raise for more information.
2645  *
2646  * Thread.abort_on_exception = true
2647  * a = Thread.new { sleep(200) }
2648  * a.raise("Gotcha")
2649  *
2650  * This will produce:
2651  *
2652  * prog.rb:3: Gotcha (RuntimeError)
2653  * from prog.rb:2:in `initialize'
2654  * from prog.rb:2:in `new'
2655  * from prog.rb:2
2656  */
2657 
2658 static VALUE
2659 thread_raise_m(int argc, VALUE *argv, VALUE self)
2660 {
2661  rb_thread_t *target_th = rb_thread_ptr(self);
2662  const rb_thread_t *current_th = GET_THREAD();
2663 
2664  threadptr_check_pending_interrupt_queue(target_th);
2665  rb_threadptr_raise(target_th, argc, argv);
2666 
2667  /* To perform Thread.current.raise as Kernel.raise */
2668  if (current_th == target_th) {
2669  RUBY_VM_CHECK_INTS(target_th->ec);
2670  }
2671  return Qnil;
2672 }
2673 
2674 
2675 /*
2676  * call-seq:
2677  * thr.exit -> thr
2678  * thr.kill -> thr
2679  * thr.terminate -> thr
2680  *
2681  * Terminates +thr+ and schedules another thread to be run, returning
2682  * the terminated Thread. If this is the main thread, or the last
2683  * thread, exits the process.
2684  */
2685 
2686 VALUE
2688 {
2689  rb_thread_t *th = rb_thread_ptr(thread);
2690 
2691  if (th->to_kill || th->status == THREAD_KILLED) {
2692  return thread;
2693  }
2694  if (th == th->vm->ractor.main_thread) {
2695  rb_exit(EXIT_SUCCESS);
2696  }
2697 
2698  thread_debug("rb_thread_kill: %p (%"PRI_THREAD_ID")\n", (void *)th, thread_id_str(th));
2699 
2700  if (th == GET_THREAD()) {
2701  /* kill myself immediately */
2702  rb_threadptr_to_kill(th);
2703  }
2704  else {
2705  threadptr_check_pending_interrupt_queue(th);
2706  rb_threadptr_pending_interrupt_enque(th, eKillSignal);
2707  rb_threadptr_interrupt(th);
2708  }
2709  return thread;
2710 }
2711 
2712 int
2713 rb_thread_to_be_killed(VALUE thread)
2714 {
2715  rb_thread_t *th = rb_thread_ptr(thread);
2716 
2717  if (th->to_kill || th->status == THREAD_KILLED) {
2718  return TRUE;
2719  }
2720  return FALSE;
2721 }
2722 
2723 /*
2724  * call-seq:
2725  * Thread.kill(thread) -> thread
2726  *
2727  * Causes the given +thread+ to exit, see also Thread::exit.
2728  *
2729  * count = 0
2730  * a = Thread.new { loop { count += 1 } }
2731  * sleep(0.1) #=> 0
2732  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2733  * count #=> 93947
2734  * a.alive? #=> false
2735  */
2736 
2737 static VALUE
2738 rb_thread_s_kill(VALUE obj, VALUE th)
2739 {
2740  return rb_thread_kill(th);
2741 }
2742 
2743 
2744 /*
2745  * call-seq:
2746  * Thread.exit -> thread
2747  *
2748  * Terminates the currently running thread and schedules another thread to be
2749  * run.
2750  *
2751  * If this thread is already marked to be killed, ::exit returns the Thread.
2752  *
2753  * If this is the main thread, or the last thread, exit the process.
2754  */
2755 
2756 static VALUE
2757 rb_thread_exit(VALUE _)
2758 {
2759  rb_thread_t *th = GET_THREAD();
2760  return rb_thread_kill(th->self);
2761 }
2762 
2763 
2764 /*
2765  * call-seq:
2766  * thr.wakeup -> thr
2767  *
2768  * Marks a given thread as eligible for scheduling, however it may still
2769  * remain blocked on I/O.
2770  *
2771  * *Note:* This does not invoke the scheduler, see #run for more information.
2772  *
2773  * c = Thread.new { Thread.stop; puts "hey!" }
2774  * sleep 0.1 while c.status!='sleep'
2775  * c.wakeup
2776  * c.join
2777  * #=> "hey!"
2778  */
2779 
2780 VALUE
2782 {
2783  if (!RTEST(rb_thread_wakeup_alive(thread))) {
2784  rb_raise(rb_eThreadError, "killed thread");
2785  }
2786  return thread;
2787 }
2788 
2789 VALUE
2791 {
2792  rb_thread_t *target_th = rb_thread_ptr(thread);
2793  if (target_th->status == THREAD_KILLED) return Qnil;
2794 
2795  rb_threadptr_ready(target_th);
2796 
2797  if (target_th->status == THREAD_STOPPED ||
2798  target_th->status == THREAD_STOPPED_FOREVER) {
2799  target_th->status = THREAD_RUNNABLE;
2800  }
2801 
2802  return thread;
2803 }
2804 
2805 
2806 /*
2807  * call-seq:
2808  * thr.run -> thr
2809  *
2810  * Wakes up +thr+, making it eligible for scheduling.
2811  *
2812  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2813  * sleep 0.1 while a.status!='sleep'
2814  * puts "Got here"
2815  * a.run
2816  * a.join
2817  *
2818  * This will produce:
2819  *
2820  * a
2821  * Got here
2822  * c
2823  *
2824  * See also the instance method #wakeup.
2825  */
2826 
2827 VALUE
2829 {
2830  rb_thread_wakeup(thread);
2832  return thread;
2833 }
2834 
2835 
2836 VALUE
2838 {
2839  if (rb_thread_alone()) {
2841  "stopping only thread\n\tnote: use sleep to stop forever");
2842  }
2844  return Qnil;
2845 }
2846 
2847 /*
2848  * call-seq:
2849  * Thread.stop -> nil
2850  *
2851  * Stops execution of the current thread, putting it into a ``sleep'' state,
2852  * and schedules execution of another thread.
2853  *
2854  * a = Thread.new { print "a"; Thread.stop; print "c" }
2855  * sleep 0.1 while a.status!='sleep'
2856  * print "b"
2857  * a.run
2858  * a.join
2859  * #=> "abc"
2860  */
2861 
2862 static VALUE
2863 thread_stop(VALUE _)
2864 {
2865  return rb_thread_stop();
2866 }
2867 
2868 /********************************************************************/
2869 
2870 VALUE
2871 rb_thread_list(void)
2872 {
2873  // TODO
2874  return rb_ractor_thread_list(GET_RACTOR());
2875 }
2876 
2877 /*
2878  * call-seq:
2879  * Thread.list -> array
2880  *
2881  * Returns an array of Thread objects for all threads that are either runnable
2882  * or stopped.
2883  *
2884  * Thread.new { sleep(200) }
2885  * Thread.new { 1000000.times {|i| i*i } }
2886  * Thread.new { Thread.stop }
2887  * Thread.list.each {|t| p t}
2888  *
2889  * This will produce:
2890  *
2891  * #<Thread:0x401b3e84 sleep>
2892  * #<Thread:0x401b3f38 run>
2893  * #<Thread:0x401b3fb0 sleep>
2894  * #<Thread:0x401bdf4c run>
2895  */
2896 
2897 static VALUE
2898 thread_list(VALUE _)
2899 {
2900  return rb_thread_list();
2901 }
2902 
2903 VALUE
2905 {
2906  return GET_THREAD()->self;
2907 }
2908 
2909 /*
2910  * call-seq:
2911  * Thread.current -> thread
2912  *
2913  * Returns the currently executing thread.
2914  *
2915  * Thread.current #=> #<Thread:0x401bdf4c run>
2916  */
2917 
2918 static VALUE
2919 thread_s_current(VALUE klass)
2920 {
2921  return rb_thread_current();
2922 }
2923 
2924 VALUE
2926 {
2927  return GET_RACTOR()->threads.main->self;
2928 }
2929 
2930 /*
2931  * call-seq:
2932  * Thread.main -> thread
2933  *
2934  * Returns the main thread.
2935  */
2936 
2937 static VALUE
2938 rb_thread_s_main(VALUE klass)
2939 {
2940  return rb_thread_main();
2941 }
2942 
2943 
2944 /*
2945  * call-seq:
2946  * Thread.abort_on_exception -> true or false
2947  *
2948  * Returns the status of the global ``abort on exception'' condition.
2949  *
2950  * The default is +false+.
2951  *
2952  * When set to +true+, if any thread is aborted by an exception, the
2953  * raised exception will be re-raised in the main thread.
2954  *
2955  * Can also be specified by the global $DEBUG flag or command line option
2956  * +-d+.
2957  *
2958  * See also ::abort_on_exception=.
2959  *
2960  * There is also an instance level method to set this for a specific thread,
2961  * see #abort_on_exception.
2962  */
2963 
2964 static VALUE
2965 rb_thread_s_abort_exc(VALUE _)
2966 {
2967  return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
2968 }
2969 
2970 
2971 /*
2972  * call-seq:
2973  * Thread.abort_on_exception= boolean -> true or false
2974  *
2975  * When set to +true+, if any thread is aborted by an exception, the
2976  * raised exception will be re-raised in the main thread.
2977  * Returns the new state.
2978  *
2979  * Thread.abort_on_exception = true
2980  * t1 = Thread.new do
2981  * puts "In new thread"
2982  * raise "Exception from thread"
2983  * end
2984  * sleep(1)
2985  * puts "not reached"
2986  *
2987  * This will produce:
2988  *
2989  * In new thread
2990  * prog.rb:4: Exception from thread (RuntimeError)
2991  * from prog.rb:2:in `initialize'
2992  * from prog.rb:2:in `new'
2993  * from prog.rb:2
2994  *
2995  * See also ::abort_on_exception.
2996  *
2997  * There is also an instance level method to set this for a specific thread,
2998  * see #abort_on_exception=.
2999  */
3000 
3001 static VALUE
3002 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3003 {
3004  GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3005  return val;
3006 }
3007 
3008 
3009 /*
3010  * call-seq:
3011  * thr.abort_on_exception -> true or false
3012  *
3013  * Returns the status of the thread-local ``abort on exception'' condition for
3014  * this +thr+.
3015  *
3016  * The default is +false+.
3017  *
3018  * See also #abort_on_exception=.
3019  *
3020  * There is also a class level method to set this for all threads, see
3021  * ::abort_on_exception.
3022  */
3023 
3024 static VALUE
3025 rb_thread_abort_exc(VALUE thread)
3026 {
3027  return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3028 }
3029 
3030 
3031 /*
3032  * call-seq:
3033  * thr.abort_on_exception= boolean -> true or false
3034  *
3035  * When set to +true+, if this +thr+ is aborted by an exception, the
3036  * raised exception will be re-raised in the main thread.
3037  *
3038  * See also #abort_on_exception.
3039  *
3040  * There is also a class level method to set this for all threads, see
3041  * ::abort_on_exception=.
3042  */
3043 
3044 static VALUE
3045 rb_thread_abort_exc_set(VALUE thread, VALUE val)
3046 {
3047  rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3048  return val;
3049 }
3050 
3051 
3052 /*
3053  * call-seq:
3054  * Thread.report_on_exception -> true or false
3055  *
3056  * Returns the status of the global ``report on exception'' condition.
3057  *
3058  * The default is +true+ since Ruby 2.5.
3059  *
3060  * All threads created when this flag is true will report
3061  * a message on $stderr if an exception kills the thread.
3062  *
3063  * Thread.new { 1.times { raise } }
3064  *
3065  * will produce this output on $stderr:
3066  *
3067  * #<Thread:...> terminated with exception (report_on_exception is true):
3068  * Traceback (most recent call last):
3069  * 2: from -e:1:in `block in <main>'
3070  * 1: from -e:1:in `times'
3071  *
3072  * This is done to catch errors in threads early.
3073  * In some cases, you might not want this output.
3074  * There are multiple ways to avoid the extra output:
3075  *
3076  * * If the exception is not intended, the best is to fix the cause of
3077  * the exception so it does not happen anymore.
3078  * * If the exception is intended, it might be better to rescue it closer to
3079  * where it is raised rather then let it kill the Thread.
3080  * * If it is guaranteed the Thread will be joined with Thread#join or
3081  * Thread#value, then it is safe to disable this report with
3082  * <code>Thread.current.report_on_exception = false</code>
3083  * when starting the Thread.
3084  * However, this might handle the exception much later, or not at all
3085  * if the Thread is never joined due to the parent thread being blocked, etc.
3086  *
3087  * See also ::report_on_exception=.
3088  *
3089  * There is also an instance level method to set this for a specific thread,
3090  * see #report_on_exception=.
3091  *
3092  */
3093 
3094 static VALUE
3095 rb_thread_s_report_exc(VALUE _)
3096 {
3097  return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3098 }
3099 
3100 
3101 /*
3102  * call-seq:
3103  * Thread.report_on_exception= boolean -> true or false
3104  *
3105  * Returns the new state.
3106  * When set to +true+, all threads created afterwards will inherit the
3107  * condition and report a message on $stderr if an exception kills a thread:
3108  *
3109  * Thread.report_on_exception = true
3110  * t1 = Thread.new do
3111  * puts "In new thread"
3112  * raise "Exception from thread"
3113  * end
3114  * sleep(1)
3115  * puts "In the main thread"
3116  *
3117  * This will produce:
3118  *
3119  * In new thread
3120  * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3121  * Traceback (most recent call last):
3122  * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3123  * In the main thread
3124  *
3125  * See also ::report_on_exception.
3126  *
3127  * There is also an instance level method to set this for a specific thread,
3128  * see #report_on_exception=.
3129  */
3130 
3131 static VALUE
3132 rb_thread_s_report_exc_set(VALUE self, VALUE val)
3133 {
3134  GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3135  return val;
3136 }
3137 
3138 
3139 /*
3140  * call-seq:
3141  * Thread.ignore_deadlock -> true or false
3142  *
3143  * Returns the status of the global ``ignore deadlock'' condition.
3144  * The default is +false+, so that deadlock conditions are not ignored.
3145  *
3146  * See also ::ignore_deadlock=.
3147  *
3148  */
3149 
3150 static VALUE
3151 rb_thread_s_ignore_deadlock(VALUE _)
3152 {
3153  return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3154 }
3155 
3156 
3157 /*
3158  * call-seq:
3159  * Thread.ignore_deadlock = boolean -> true or false
3160  *
3161  * Returns the new state.
3162  * When set to +true+, the VM will not check for deadlock conditions.
3163  * It is only useful to set this if your application can break a
3164  * deadlock condition via some other means, such as a signal.
3165  *
3166  * Thread.ignore_deadlock = true
3167  * queue = Thread::Queue.new
3168  *
3169  * trap(:SIGUSR1){queue.push "Received signal"}
3170  *
3171  * # raises fatal error unless ignoring deadlock
3172  * puts queue.pop
3173  *
3174  * See also ::ignore_deadlock.
3175  */
3176 
3177 static VALUE
3178 rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3179 {
3180  GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3181  return val;
3182 }
3183 
3184 
3185 /*
3186  * call-seq:
3187  * thr.report_on_exception -> true or false
3188  *
3189  * Returns the status of the thread-local ``report on exception'' condition for
3190  * this +thr+.
3191  *
3192  * The default value when creating a Thread is the value of
3193  * the global flag Thread.report_on_exception.
3194  *
3195  * See also #report_on_exception=.
3196  *
3197  * There is also a class level method to set this for all new threads, see
3198  * ::report_on_exception=.
3199  */
3200 
3201 static VALUE
3202 rb_thread_report_exc(VALUE thread)
3203 {
3204  return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3205 }
3206 
3207 
3208 /*
3209  * call-seq:
3210  * thr.report_on_exception= boolean -> true or false
3211  *
3212  * When set to +true+, a message is printed on $stderr if an exception
3213  * kills this +thr+. See ::report_on_exception for details.
3214  *
3215  * See also #report_on_exception.
3216  *
3217  * There is also a class level method to set this for all new threads, see
3218  * ::report_on_exception=.
3219  */
3220 
3221 static VALUE
3222 rb_thread_report_exc_set(VALUE thread, VALUE val)
3223 {
3224  rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3225  return val;
3226 }
3227 
3228 
3229 /*
3230  * call-seq:
3231  * thr.group -> thgrp or nil
3232  *
3233  * Returns the ThreadGroup which contains the given thread.
3234  *
3235  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3236  */
3237 
3238 VALUE
3239 rb_thread_group(VALUE thread)
3240 {
3241  return rb_thread_ptr(thread)->thgroup;
3242 }
3243 
3244 static const char *
3245 thread_status_name(rb_thread_t *th, int detail)
3246 {
3247  switch (th->status) {
3248  case THREAD_RUNNABLE:
3249  return th->to_kill ? "aborting" : "run";
3250  case THREAD_STOPPED_FOREVER:
3251  if (detail) return "sleep_forever";
3252  case THREAD_STOPPED:
3253  return "sleep";
3254  case THREAD_KILLED:
3255  return "dead";
3256  default:
3257  return "unknown";
3258  }
3259 }
3260 
3261 static int
3262 rb_threadptr_dead(rb_thread_t *th)
3263 {
3264  return th->status == THREAD_KILLED;
3265 }
3266 
3267 
3268 /*
3269  * call-seq:
3270  * thr.status -> string, false or nil
3271  *
3272  * Returns the status of +thr+.
3273  *
3274  * [<tt>"sleep"</tt>]
3275  * Returned if this thread is sleeping or waiting on I/O
3276  * [<tt>"run"</tt>]
3277  * When this thread is executing
3278  * [<tt>"aborting"</tt>]
3279  * If this thread is aborting
3280  * [+false+]
3281  * When this thread is terminated normally
3282  * [+nil+]
3283  * If terminated with an exception.
3284  *
3285  * a = Thread.new { raise("die now") }
3286  * b = Thread.new { Thread.stop }
3287  * c = Thread.new { Thread.exit }
3288  * d = Thread.new { sleep }
3289  * d.kill #=> #<Thread:0x401b3678 aborting>
3290  * a.status #=> nil
3291  * b.status #=> "sleep"
3292  * c.status #=> false
3293  * d.status #=> "aborting"
3294  * Thread.current.status #=> "run"
3295  *
3296  * See also the instance methods #alive? and #stop?
3297  */
3298 
3299 static VALUE
3300 rb_thread_status(VALUE thread)
3301 {
3302  rb_thread_t *target_th = rb_thread_ptr(thread);
3303 
3304  if (rb_threadptr_dead(target_th)) {
3305  if (!NIL_P(target_th->ec->errinfo) &&
3306  !FIXNUM_P(target_th->ec->errinfo)) {
3307  return Qnil;
3308  }
3309  else {
3310  return Qfalse;
3311  }
3312  }
3313  else {
3314  return rb_str_new2(thread_status_name(target_th, FALSE));
3315  }
3316 }
3317 
3318 
3319 /*
3320  * call-seq:
3321  * thr.alive? -> true or false
3322  *
3323  * Returns +true+ if +thr+ is running or sleeping.
3324  *
3325  * thr = Thread.new { }
3326  * thr.join #=> #<Thread:0x401b3fb0 dead>
3327  * Thread.current.alive? #=> true
3328  * thr.alive? #=> false
3329  *
3330  * See also #stop? and #status.
3331  */
3332 
3333 static VALUE
3334 rb_thread_alive_p(VALUE thread)
3335 {
3336  return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3337 }
3338 
3339 /*
3340  * call-seq:
3341  * thr.stop? -> true or false
3342  *
3343  * Returns +true+ if +thr+ is dead or sleeping.
3344  *
3345  * a = Thread.new { Thread.stop }
3346  * b = Thread.current
3347  * a.stop? #=> true
3348  * b.stop? #=> false
3349  *
3350  * See also #alive? and #status.
3351  */
3352 
3353 static VALUE
3354 rb_thread_stop_p(VALUE thread)
3355 {
3356  rb_thread_t *th = rb_thread_ptr(thread);
3357 
3358  if (rb_threadptr_dead(th)) {
3359  return Qtrue;
3360  }
3361  return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3362 }
3363 
3364 /*
3365  * call-seq:
3366  * thr.name -> string
3367  *
3368  * show the name of the thread.
3369  */
3370 
3371 static VALUE
3372 rb_thread_getname(VALUE thread)
3373 {
3374  return rb_thread_ptr(thread)->name;
3375 }
3376 
3377 /*
3378  * call-seq:
3379  * thr.name=(name) -> string
3380  *
3381  * set given name to the ruby thread.
3382  * On some platform, it may set the name to pthread and/or kernel.
3383  */
3384 
3385 static VALUE
3386 rb_thread_setname(VALUE thread, VALUE name)
3387 {
3388  rb_thread_t *target_th = rb_thread_ptr(thread);
3389 
3390  if (!NIL_P(name)) {
3391  rb_encoding *enc;
3392  StringValueCStr(name);
3393  enc = rb_enc_get(name);
3394  if (!rb_enc_asciicompat(enc)) {
3395  rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3396  rb_enc_name(enc));
3397  }
3398  name = rb_str_new_frozen(name);
3399  }
3400  target_th->name = name;
3401  if (threadptr_initialized(target_th)) {
3402  native_set_another_thread_name(target_th->thread_id, name);
3403  }
3404  return name;
3405 }
3406 
3407 #if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3408 /*
3409  * call-seq:
3410  * thr.native_thread_id -> integer
3411  *
3412  * Return the native thread ID which is used by the Ruby thread.
3413  *
3414  * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3415  * * On Linux it is TID returned by gettid(2).
3416  * * On macOS it is the system-wide unique integral ID of thread returned
3417  * by pthread_threadid_np(3).
3418  * * On FreeBSD it is the unique integral ID of the thread returned by
3419  * pthread_getthreadid_np(3).
3420  * * On Windows it is the thread identifier returned by GetThreadId().
3421  * * On other platforms, it raises NotImplementedError.
3422  *
3423  * NOTE:
3424  * If the thread is not associated yet or already deassociated with a native
3425  * thread, it returns _nil_.
3426  * If the Ruby implementation uses M:N thread model, the ID may change
3427  * depending on the timing.
3428  */
3429 
3430 static VALUE
3431 rb_thread_native_thread_id(VALUE thread)
3432 {
3433  rb_thread_t *target_th = rb_thread_ptr(thread);
3434  if (rb_threadptr_dead(target_th)) return Qnil;
3435  return native_thread_native_thread_id(target_th);
3436 }
3437 #else
3438 # define rb_thread_native_thread_id rb_f_notimplement
3439 #endif
3440 
3441 /*
3442  * call-seq:
3443  * thr.to_s -> string
3444  *
3445  * Dump the name, id, and status of _thr_ to a string.
3446  */
3447 
3448 static VALUE
3449 rb_thread_to_s(VALUE thread)
3450 {
3451  VALUE cname = rb_class_path(rb_obj_class(thread));
3452  rb_thread_t *target_th = rb_thread_ptr(thread);
3453  const char *status;
3454  VALUE str, loc;
3455 
3456  status = thread_status_name(target_th, TRUE);
3457  str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3458  if (!NIL_P(target_th->name)) {
3459  rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3460  }
3461  if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3462  rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3463  RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3464  }
3465  rb_str_catf(str, " %s>", status);
3466 
3467  return str;
3468 }
3469 
3470 /* variables for recursive traversals */
3471 #define recursive_key id__recursive_key__
3472 
3473 static VALUE
3474 threadptr_local_aref(rb_thread_t *th, ID id)
3475 {
3476  if (id == recursive_key) {
3477  return th->ec->local_storage_recursive_hash;
3478  }
3479  else {
3480  VALUE val;
3481  struct rb_id_table *local_storage = th->ec->local_storage;
3482 
3483  if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3484  return val;
3485  }
3486  else {
3487  return Qnil;
3488  }
3489  }
3490 }
3491 
3492 VALUE
3494 {
3495  return threadptr_local_aref(rb_thread_ptr(thread), id);
3496 }
3497 
3498 /*
3499  * call-seq:
3500  * thr[sym] -> obj or nil
3501  *
3502  * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3503  * if not explicitly inside a Fiber), using either a symbol or a string name.
3504  * If the specified variable does not exist, returns +nil+.
3505  *
3506  * [
3507  * Thread.new { Thread.current["name"] = "A" },
3508  * Thread.new { Thread.current[:name] = "B" },
3509  * Thread.new { Thread.current["name"] = "C" }
3510  * ].each do |th|
3511  * th.join
3512  * puts "#{th.inspect}: #{th[:name]}"
3513  * end
3514  *
3515  * This will produce:
3516  *
3517  * #<Thread:0x00000002a54220 dead>: A
3518  * #<Thread:0x00000002a541a8 dead>: B
3519  * #<Thread:0x00000002a54130 dead>: C
3520  *
3521  * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3522  * This confusion did not exist in Ruby 1.8 because
3523  * fibers are only available since Ruby 1.9.
3524  * Ruby 1.9 chooses that the methods behaves fiber-local to save
3525  * following idiom for dynamic scope.
3526  *
3527  * def meth(newvalue)
3528  * begin
3529  * oldvalue = Thread.current[:name]
3530  * Thread.current[:name] = newvalue
3531  * yield
3532  * ensure
3533  * Thread.current[:name] = oldvalue
3534  * end
3535  * end
3536  *
3537  * The idiom may not work as dynamic scope if the methods are thread-local
3538  * and a given block switches fiber.
3539  *
3540  * f = Fiber.new {
3541  * meth(1) {
3542  * Fiber.yield
3543  * }
3544  * }
3545  * meth(2) {
3546  * f.resume
3547  * }
3548  * f.resume
3549  * p Thread.current[:name]
3550  * #=> nil if fiber-local
3551  * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3552  *
3553  * For thread-local variables, please see #thread_variable_get and
3554  * #thread_variable_set.
3555  *
3556  */
3557 
3558 static VALUE
3559 rb_thread_aref(VALUE thread, VALUE key)
3560 {
3561  ID id = rb_check_id(&key);
3562  if (!id) return Qnil;
3563  return rb_thread_local_aref(thread, id);
3564 }
3565 
3566 /*
3567  * call-seq:
3568  * thr.fetch(sym) -> obj
3569  * thr.fetch(sym) { } -> obj
3570  * thr.fetch(sym, default) -> obj
3571  *
3572  * Returns a fiber-local for the given key. If the key can't be
3573  * found, there are several options: With no other arguments, it will
3574  * raise a KeyError exception; if <i>default</i> is given, then that
3575  * will be returned; if the optional code block is specified, then
3576  * that will be run and its result returned. See Thread#[] and
3577  * Hash#fetch.
3578  */
3579 static VALUE
3580 rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3581 {
3582  VALUE key, val;
3583  ID id;
3584  rb_thread_t *target_th = rb_thread_ptr(self);
3585  int block_given;
3586 
3587  rb_check_arity(argc, 1, 2);
3588  key = argv[0];
3589 
3590  block_given = rb_block_given_p();
3591  if (block_given && argc == 2) {
3592  rb_warn("block supersedes default value argument");
3593  }
3594 
3595  id = rb_check_id(&key);
3596 
3597  if (id == recursive_key) {
3598  return target_th->ec->local_storage_recursive_hash;
3599  }
3600  else if (id && target_th->ec->local_storage &&
3601  rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3602  return val;
3603  }
3604  else if (block_given) {
3605  return rb_yield(key);
3606  }
3607  else if (argc == 1) {
3608  rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3609  }
3610  else {
3611  return argv[1];
3612  }
3613 }
3614 
3615 static VALUE
3616 threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3617 {
3618  if (id == recursive_key) {
3619  th->ec->local_storage_recursive_hash = val;
3620  return val;
3621  }
3622  else {
3623  struct rb_id_table *local_storage = th->ec->local_storage;
3624 
3625  if (NIL_P(val)) {
3626  if (!local_storage) return Qnil;
3627  rb_id_table_delete(local_storage, id);
3628  return Qnil;
3629  }
3630  else {
3631  if (local_storage == NULL) {
3632  th->ec->local_storage = local_storage = rb_id_table_create(0);
3633  }
3634  rb_id_table_insert(local_storage, id, val);
3635  return val;
3636  }
3637  }
3638 }
3639 
3640 VALUE
3642 {
3643  if (OBJ_FROZEN(thread)) {
3644  rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3645  }
3646 
3647  return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3648 }
3649 
3650 /*
3651  * call-seq:
3652  * thr[sym] = obj -> obj
3653  *
3654  * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3655  * using either a symbol or a string.
3656  *
3657  * See also Thread#[].
3658  *
3659  * For thread-local variables, please see #thread_variable_set and
3660  * #thread_variable_get.
3661  */
3662 
3663 static VALUE
3664 rb_thread_aset(VALUE self, VALUE id, VALUE val)
3665 {
3666  return rb_thread_local_aset(self, rb_to_id(id), val);
3667 }
3668 
3669 /*
3670  * call-seq:
3671  * thr.thread_variable_get(key) -> obj or nil
3672  *
3673  * Returns the value of a thread local variable that has been set. Note that
3674  * these are different than fiber local values. For fiber local values,
3675  * please see Thread#[] and Thread#[]=.
3676  *
3677  * Thread local values are carried along with threads, and do not respect
3678  * fibers. For example:
3679  *
3680  * Thread.new {
3681  * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3682  * Thread.current["foo"] = "bar" # set a fiber local
3683  *
3684  * Fiber.new {
3685  * Fiber.yield [
3686  * Thread.current.thread_variable_get("foo"), # get the thread local
3687  * Thread.current["foo"], # get the fiber local
3688  * ]
3689  * }.resume
3690  * }.join.value # => ['bar', nil]
3691  *
3692  * The value "bar" is returned for the thread local, where nil is returned
3693  * for the fiber local. The fiber is executed in the same thread, so the
3694  * thread local values are available.
3695  */
3696 
3697 static VALUE
3698 rb_thread_variable_get(VALUE thread, VALUE key)
3699 {
3700  VALUE locals;
3701 
3702  if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3703  return Qnil;
3704  }
3705  locals = rb_thread_local_storage(thread);
3706  return rb_hash_aref(locals, rb_to_symbol(key));
3707 }
3708 
3709 /*
3710  * call-seq:
3711  * thr.thread_variable_set(key, value)
3712  *
3713  * Sets a thread local with +key+ to +value+. Note that these are local to
3714  * threads, and not to fibers. Please see Thread#thread_variable_get and
3715  * Thread#[] for more information.
3716  */
3717 
3718 static VALUE
3719 rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3720 {
3721  VALUE locals;
3722 
3723  if (OBJ_FROZEN(thread)) {
3724  rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3725  }
3726 
3727  locals = rb_thread_local_storage(thread);
3728  return rb_hash_aset(locals, rb_to_symbol(key), val);
3729 }
3730 
3731 /*
3732  * call-seq:
3733  * thr.key?(sym) -> true or false
3734  *
3735  * Returns +true+ if the given string (or symbol) exists as a fiber-local
3736  * variable.
3737  *
3738  * me = Thread.current
3739  * me[:oliver] = "a"
3740  * me.key?(:oliver) #=> true
3741  * me.key?(:stanley) #=> false
3742  */
3743 
3744 static VALUE
3745 rb_thread_key_p(VALUE self, VALUE key)
3746 {
3747  VALUE val;
3748  ID id = rb_check_id(&key);
3749  struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3750 
3751  if (!id || local_storage == NULL) {
3752  return Qfalse;
3753  }
3754  return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3755 }
3756 
3757 static enum rb_id_table_iterator_result
3758 thread_keys_i(ID key, VALUE value, void *ary)
3759 {
3760  rb_ary_push((VALUE)ary, ID2SYM(key));
3761  return ID_TABLE_CONTINUE;
3762 }
3763 
3764 int
3766 {
3767  // TODO
3768  return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3769 }
3770 
3771 /*
3772  * call-seq:
3773  * thr.keys -> array
3774  *
3775  * Returns an array of the names of the fiber-local variables (as Symbols).
3776  *
3777  * thr = Thread.new do
3778  * Thread.current[:cat] = 'meow'
3779  * Thread.current["dog"] = 'woof'
3780  * end
3781  * thr.join #=> #<Thread:0x401b3f10 dead>
3782  * thr.keys #=> [:dog, :cat]
3783  */
3784 
3785 static VALUE
3786 rb_thread_keys(VALUE self)
3787 {
3788  struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3789  VALUE ary = rb_ary_new();
3790 
3791  if (local_storage) {
3792  rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3793  }
3794  return ary;
3795 }
3796 
3797 static int
3798 keys_i(VALUE key, VALUE value, VALUE ary)
3799 {
3800  rb_ary_push(ary, key);
3801  return ST_CONTINUE;
3802 }
3803 
3804 /*
3805  * call-seq:
3806  * thr.thread_variables -> array
3807  *
3808  * Returns an array of the names of the thread-local variables (as Symbols).
3809  *
3810  * thr = Thread.new do
3811  * Thread.current.thread_variable_set(:cat, 'meow')
3812  * Thread.current.thread_variable_set("dog", 'woof')
3813  * end
3814  * thr.join #=> #<Thread:0x401b3f10 dead>
3815  * thr.thread_variables #=> [:dog, :cat]
3816  *
3817  * Note that these are not fiber local variables. Please see Thread#[] and
3818  * Thread#thread_variable_get for more details.
3819  */
3820 
3821 static VALUE
3822 rb_thread_variables(VALUE thread)
3823 {
3824  VALUE locals;
3825  VALUE ary;
3826 
3827  ary = rb_ary_new();
3828  if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3829  return ary;
3830  }
3831  locals = rb_thread_local_storage(thread);
3832  rb_hash_foreach(locals, keys_i, ary);
3833 
3834  return ary;
3835 }
3836 
3837 /*
3838  * call-seq:
3839  * thr.thread_variable?(key) -> true or false
3840  *
3841  * Returns +true+ if the given string (or symbol) exists as a thread-local
3842  * variable.
3843  *
3844  * me = Thread.current
3845  * me.thread_variable_set(:oliver, "a")
3846  * me.thread_variable?(:oliver) #=> true
3847  * me.thread_variable?(:stanley) #=> false
3848  *
3849  * Note that these are not fiber local variables. Please see Thread#[] and
3850  * Thread#thread_variable_get for more details.
3851  */
3852 
3853 static VALUE
3854 rb_thread_variable_p(VALUE thread, VALUE key)
3855 {
3856  VALUE locals;
3857 
3858  if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3859  return Qfalse;
3860  }
3861  locals = rb_thread_local_storage(thread);
3862 
3863  return RBOOL(rb_hash_lookup(locals, rb_to_symbol(key)) != Qnil);
3864 }
3865 
3866 /*
3867  * call-seq:
3868  * thr.priority -> integer
3869  *
3870  * Returns the priority of <i>thr</i>. Default is inherited from the
3871  * current thread which creating the new thread, or zero for the
3872  * initial main thread; higher-priority thread will run more frequently
3873  * than lower-priority threads (but lower-priority threads can also run).
3874  *
3875  * This is just hint for Ruby thread scheduler. It may be ignored on some
3876  * platform.
3877  *
3878  * Thread.current.priority #=> 0
3879  */
3880 
3881 static VALUE
3882 rb_thread_priority(VALUE thread)
3883 {
3884  return INT2NUM(rb_thread_ptr(thread)->priority);
3885 }
3886 
3887 
3888 /*
3889  * call-seq:
3890  * thr.priority= integer -> thr
3891  *
3892  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3893  * will run more frequently than lower-priority threads (but lower-priority
3894  * threads can also run).
3895  *
3896  * This is just hint for Ruby thread scheduler. It may be ignored on some
3897  * platform.
3898  *
3899  * count1 = count2 = 0
3900  * a = Thread.new do
3901  * loop { count1 += 1 }
3902  * end
3903  * a.priority = -1
3904  *
3905  * b = Thread.new do
3906  * loop { count2 += 1 }
3907  * end
3908  * b.priority = -2
3909  * sleep 1 #=> 1
3910  * count1 #=> 622504
3911  * count2 #=> 5832
3912  */
3913 
3914 static VALUE
3915 rb_thread_priority_set(VALUE thread, VALUE prio)
3916 {
3917  rb_thread_t *target_th = rb_thread_ptr(thread);
3918  int priority;
3919 
3920 #if USE_NATIVE_THREAD_PRIORITY
3921  target_th->priority = NUM2INT(prio);
3922  native_thread_apply_priority(th);
3923 #else
3924  priority = NUM2INT(prio);
3925  if (priority > RUBY_THREAD_PRIORITY_MAX) {
3926  priority = RUBY_THREAD_PRIORITY_MAX;
3927  }
3928  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3929  priority = RUBY_THREAD_PRIORITY_MIN;
3930  }
3931  target_th->priority = (int8_t)priority;
3932 #endif
3933  return INT2NUM(target_th->priority);
3934 }
3935 
3936 /* for IO */
3937 
3938 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3939 
3940 /*
3941  * several Unix platforms support file descriptors bigger than FD_SETSIZE
3942  * in select(2) system call.
3943  *
3944  * - Linux 2.2.12 (?)
3945  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3946  * select(2) documents how to allocate fd_set dynamically.
3947  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3948  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3949  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3950  * select(2) documents how to allocate fd_set dynamically.
3951  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3952  * - HP-UX documents how to allocate fd_set dynamically.
3953  * http://docs.hp.com/en/B2355-60105/select.2.html
3954  * - Solaris 8 has select_large_fdset
3955  * - Mac OS X 10.7 (Lion)
3956  * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3957  * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3958  * http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
3959  *
3960  * When fd_set is not big enough to hold big file descriptors,
3961  * it should be allocated dynamically.
3962  * Note that this assumes fd_set is structured as bitmap.
3963  *
3964  * rb_fd_init allocates the memory.
3965  * rb_fd_term free the memory.
3966  * rb_fd_set may re-allocates bitmap.
3967  *
3968  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3969  */
3970 
3971 void
3972 rb_fd_init(rb_fdset_t *fds)
3973 {
3974  fds->maxfd = 0;
3975  fds->fdset = ALLOC(fd_set);
3976  FD_ZERO(fds->fdset);
3977 }
3978 
3979 void
3980 rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
3981 {
3982  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3983 
3984  if (size < sizeof(fd_set))
3985  size = sizeof(fd_set);
3986  dst->maxfd = src->maxfd;
3987  dst->fdset = xmalloc(size);
3988  memcpy(dst->fdset, src->fdset, size);
3989 }
3990 
3991 void
3992 rb_fd_term(rb_fdset_t *fds)
3993 {
3994  if (fds->fdset) xfree(fds->fdset);
3995  fds->maxfd = 0;
3996  fds->fdset = 0;
3997 }
3998 
3999 void
4000 rb_fd_zero(rb_fdset_t *fds)
4001 {
4002  if (fds->fdset)
4003  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4004 }
4005 
4006 static void
4007 rb_fd_resize(int n, rb_fdset_t *fds)
4008 {
4009  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4010  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4011 
4012  if (m < sizeof(fd_set)) m = sizeof(fd_set);
4013  if (o < sizeof(fd_set)) o = sizeof(fd_set);
4014 
4015  if (m > o) {
4016  fds->fdset = xrealloc(fds->fdset, m);
4017  memset((char *)fds->fdset + o, 0, m - o);
4018  }
4019  if (n >= fds->maxfd) fds->maxfd = n + 1;
4020 }
4021 
4022 void
4023 rb_fd_set(int n, rb_fdset_t *fds)
4024 {
4025  rb_fd_resize(n, fds);
4026  FD_SET(n, fds->fdset);
4027 }
4028 
4029 void
4030 rb_fd_clr(int n, rb_fdset_t *fds)
4031 {
4032  if (n >= fds->maxfd) return;
4033  FD_CLR(n, fds->fdset);
4034 }
4035 
4036 int
4037 rb_fd_isset(int n, const rb_fdset_t *fds)
4038 {
4039  if (n >= fds->maxfd) return 0;
4040  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4041 }
4042 
4043 void
4044 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4045 {
4046  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4047 
4048  if (size < sizeof(fd_set)) size = sizeof(fd_set);
4049  dst->maxfd = max;
4050  dst->fdset = xrealloc(dst->fdset, size);
4051  memcpy(dst->fdset, src, size);
4052 }
4053 
4054 void
4055 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4056 {
4057  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4058 
4059  if (size < sizeof(fd_set))
4060  size = sizeof(fd_set);
4061  dst->maxfd = src->maxfd;
4062  dst->fdset = xrealloc(dst->fdset, size);
4063  memcpy(dst->fdset, src->fdset, size);
4064 }
4065 
4066 int
4067 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4068 {
4069  fd_set *r = NULL, *w = NULL, *e = NULL;
4070  if (readfds) {
4071  rb_fd_resize(n - 1, readfds);
4072  r = rb_fd_ptr(readfds);
4073  }
4074  if (writefds) {
4075  rb_fd_resize(n - 1, writefds);
4076  w = rb_fd_ptr(writefds);
4077  }
4078  if (exceptfds) {
4079  rb_fd_resize(n - 1, exceptfds);
4080  e = rb_fd_ptr(exceptfds);
4081  }
4082  return select(n, r, w, e, timeout);
4083 }
4084 
4085 #define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4086 
4087 #undef FD_ZERO
4088 #undef FD_SET
4089 #undef FD_CLR
4090 #undef FD_ISSET
4091 
4092 #define FD_ZERO(f) rb_fd_zero(f)
4093 #define FD_SET(i, f) rb_fd_set((i), (f))
4094 #define FD_CLR(i, f) rb_fd_clr((i), (f))
4095 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
4096 
4097 #elif defined(_WIN32)
4098 
4099 void
4100 rb_fd_init(rb_fdset_t *set)
4101 {
4102  set->capa = FD_SETSIZE;
4103  set->fdset = ALLOC(fd_set);
4104  FD_ZERO(set->fdset);
4105 }
4106 
4107 void
4108 rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4109 {
4110  rb_fd_init(dst);
4111  rb_fd_dup(dst, src);
4112 }
4113 
4114 void
4115 rb_fd_term(rb_fdset_t *set)
4116 {
4117  xfree(set->fdset);
4118  set->fdset = NULL;
4119  set->capa = 0;
4120 }
4121 
4122 void
4123 rb_fd_set(int fd, rb_fdset_t *set)
4124 {
4125  unsigned int i;
4126  SOCKET s = rb_w32_get_osfhandle(fd);
4127 
4128  for (i = 0; i < set->fdset->fd_count; i++) {
4129  if (set->fdset->fd_array[i] == s) {
4130  return;
4131  }
4132  }
4133  if (set->fdset->fd_count >= (unsigned)set->capa) {
4134  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4135  set->fdset =
4136  rb_xrealloc_mul_add(
4137  set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4138  }
4139  set->fdset->fd_array[set->fdset->fd_count++] = s;
4140 }
4141 
4142 #undef FD_ZERO
4143 #undef FD_SET
4144 #undef FD_CLR
4145 #undef FD_ISSET
4146 
4147 #define FD_ZERO(f) rb_fd_zero(f)
4148 #define FD_SET(i, f) rb_fd_set((i), (f))
4149 #define FD_CLR(i, f) rb_fd_clr((i), (f))
4150 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
4151 
4152 #define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4153 
4154 #endif
4155 
4156 #ifndef rb_fd_no_init
4157 #define rb_fd_no_init(fds) (void)(fds)
4158 #endif
4159 
4160 static int
4161 wait_retryable(int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4162 {
4163  if (*result < 0) {
4164  switch (errnum) {
4165  case EINTR:
4166 #ifdef ERESTART
4167  case ERESTART:
4168 #endif
4169  *result = 0;
4170  if (rel && hrtime_update_expire(rel, end)) {
4171  *rel = 0;
4172  }
4173  return TRUE;
4174  }
4175  return FALSE;
4176  }
4177  else if (*result == 0) {
4178  /* check for spurious wakeup */
4179  if (rel) {
4180  return !hrtime_update_expire(rel, end);
4181  }
4182  return TRUE;
4183  }
4184  return FALSE;
4185 }
4186 
4187 struct select_set {
4188  int max;
4189  int sigwait_fd;
4190  rb_thread_t *th;
4191  rb_fdset_t *rset;
4192  rb_fdset_t *wset;
4193  rb_fdset_t *eset;
4194  rb_fdset_t orig_rset;
4195  rb_fdset_t orig_wset;
4196  rb_fdset_t orig_eset;
4197  struct timeval *timeout;
4198 };
4199 
4200 static VALUE
4201 select_set_free(VALUE p)
4202 {
4203  struct select_set *set = (struct select_set *)p;
4204 
4205  if (set->sigwait_fd >= 0) {
4206  rb_sigwait_fd_put(set->th, set->sigwait_fd);
4207  rb_sigwait_fd_migrate(set->th->vm);
4208  }
4209 
4210  rb_fd_term(&set->orig_rset);
4211  rb_fd_term(&set->orig_wset);
4212  rb_fd_term(&set->orig_eset);
4213 
4214  return Qfalse;
4215 }
4216 
4217 static const rb_hrtime_t *
4218 sigwait_timeout(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *orig,
4219  int *drained_p)
4220 {
4221  static const rb_hrtime_t quantum = TIME_QUANTUM_USEC * 1000;
4222 
4223  if (sigwait_fd >= 0 && (!ubf_threads_empty() || BUSY_WAIT_SIGNALS)) {
4224  *drained_p = check_signals_nogvl(th, sigwait_fd);
4225  if (!orig || *orig > quantum)
4226  return &quantum;
4227  }
4228 
4229  return orig;
4230 }
4231 
4232 #define sigwait_signals_fd(result, cond, sigwait_fd) \
4233  (result > 0 && (cond) ? (result--, (sigwait_fd)) : -1)
4234 
4235 static VALUE
4236 do_select(VALUE p)
4237 {
4238  struct select_set *set = (struct select_set *)p;
4239  int result = 0;
4240  int lerrno;
4241  rb_hrtime_t *to, rel, end = 0;
4242 
4243  timeout_prepare(&to, &rel, &end, set->timeout);
4244 #define restore_fdset(dst, src) \
4245  ((dst) ? rb_fd_dup(dst, src) : (void)0)
4246 #define do_select_update() \
4247  (restore_fdset(set->rset, &set->orig_rset), \
4248  restore_fdset(set->wset, &set->orig_wset), \
4249  restore_fdset(set->eset, &set->orig_eset), \
4250  TRUE)
4251 
4252  do {
4253  int drained;
4254  lerrno = 0;
4255 
4256  BLOCKING_REGION(set->th, {
4257  const rb_hrtime_t *sto;
4258  struct timeval tv;
4259 
4260  sto = sigwait_timeout(set->th, set->sigwait_fd, to, &drained);
4261  if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4262  result = native_fd_select(set->max, set->rset, set->wset,
4263  set->eset,
4264  rb_hrtime2timeval(&tv, sto), set->th);
4265  if (result < 0) lerrno = errno;
4266  }
4267  }, set->sigwait_fd >= 0 ? ubf_sigwait : ubf_select, set->th, TRUE);
4268 
4269  if (set->sigwait_fd >= 0) {
4270  int fd = sigwait_signals_fd(result,
4271  rb_fd_isset(set->sigwait_fd, set->rset),
4272  set->sigwait_fd);
4273  (void)check_signals_nogvl(set->th, fd);
4274  }
4275 
4276  RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4277  } while (wait_retryable(&result, lerrno, to, end) && do_select_update());
4278 
4279  if (result < 0) {
4280  errno = lerrno;
4281  }
4282 
4283  return (VALUE)result;
4284 }
4285 
4286 static rb_fdset_t *
4287 init_set_fd(int fd, rb_fdset_t *fds)
4288 {
4289  if (fd < 0) {
4290  return 0;
4291  }
4292  rb_fd_init(fds);
4293  rb_fd_set(fd, fds);
4294 
4295  return fds;
4296 }
4297 
4298 int
4299 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4300  struct timeval *timeout)
4301 {
4302  struct select_set set;
4303 
4304  set.th = GET_THREAD();
4305  RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4306  set.max = max;
4307  set.rset = read;
4308  set.wset = write;
4309  set.eset = except;
4310  set.timeout = timeout;
4311 
4312  if (!set.rset && !set.wset && !set.eset) {
4313  if (!timeout) {
4315  return 0;
4316  }
4317  rb_thread_wait_for(*timeout);
4318  return 0;
4319  }
4320 
4321  set.sigwait_fd = rb_sigwait_fd_get(set.th);
4322  if (set.sigwait_fd >= 0) {
4323  if (set.rset)
4324  rb_fd_set(set.sigwait_fd, set.rset);
4325  else
4326  set.rset = init_set_fd(set.sigwait_fd, &set.orig_rset);
4327  if (set.sigwait_fd >= set.max) {
4328  set.max = set.sigwait_fd + 1;
4329  }
4330  }
4331 #define fd_init_copy(f) do { \
4332  if (set.f) { \
4333  rb_fd_resize(set.max - 1, set.f); \
4334  if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4335  rb_fd_init_copy(&set.orig_##f, set.f); \
4336  } \
4337  } \
4338  else { \
4339  rb_fd_no_init(&set.orig_##f); \
4340  } \
4341  } while (0)
4342  fd_init_copy(rset);
4343  fd_init_copy(wset);
4344  fd_init_copy(eset);
4345 #undef fd_init_copy
4346 
4347  return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4348 }
4349 
4350 #ifdef USE_POLL
4351 
4352 /* The same with linux kernel. TODO: make platform independent definition. */
4353 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4354 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4355 #define POLLEX_SET (POLLPRI)
4356 
4357 #ifndef POLLERR_SET /* defined for FreeBSD for now */
4358 # define POLLERR_SET (0)
4359 #endif
4360 
4361 /*
4362  * returns a mask of events
4363  */
4364 int
4365 rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4366 {
4367  struct pollfd fds[2];
4368  int result = 0;
4369  int drained;
4370  nfds_t nfds;
4371  rb_unblock_function_t *ubf;
4372  struct waiting_fd wfd;
4373  int state;
4374  volatile int lerrno;
4375 
4376  wfd.th = GET_THREAD();
4377  wfd.fd = fd;
4378 
4379  RB_VM_LOCK_ENTER();
4380  {
4381  list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
4382  }
4383  RB_VM_LOCK_LEAVE();
4384 
4385  EC_PUSH_TAG(wfd.th->ec);
4386  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4387  rb_hrtime_t *to, rel, end = 0;
4388  RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4389  timeout_prepare(&to, &rel, &end, timeout);
4390  fds[0].fd = fd;
4391  fds[0].events = (short)events;
4392  fds[0].revents = 0;
4393  do {
4394  fds[1].fd = rb_sigwait_fd_get(wfd.th);
4395 
4396  if (fds[1].fd >= 0) {
4397  fds[1].events = POLLIN;
4398  fds[1].revents = 0;
4399  nfds = 2;
4400  ubf = ubf_sigwait;
4401  }
4402  else {
4403  nfds = 1;
4404  ubf = ubf_select;
4405  }
4406 
4407  lerrno = 0;
4408  BLOCKING_REGION(wfd.th, {
4409  const rb_hrtime_t *sto;
4410  struct timespec ts;
4411 
4412  sto = sigwait_timeout(wfd.th, fds[1].fd, to, &drained);
4413  if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4414  result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, sto), 0);
4415  if (result < 0) lerrno = errno;
4416  }
4417  }, ubf, wfd.th, TRUE);
4418 
4419  if (fds[1].fd >= 0) {
4420  int fd1 = sigwait_signals_fd(result, fds[1].revents, fds[1].fd);
4421  (void)check_signals_nogvl(wfd.th, fd1);
4422  rb_sigwait_fd_put(wfd.th, fds[1].fd);
4423  rb_sigwait_fd_migrate(wfd.th->vm);
4424  }
4425  RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4426  } while (wait_retryable(&result, lerrno, to, end));
4427  }
4428  EC_POP_TAG();
4429 
4430  RB_VM_LOCK_ENTER();
4431  {
4432  list_del(&wfd.wfd_node);
4433  }
4434  RB_VM_LOCK_LEAVE();
4435 
4436  if (state) {
4437  EC_JUMP_TAG(wfd.th->ec, state);
4438  }
4439 
4440  if (result < 0) {
4441  errno = lerrno;
4442  return -1;
4443  }
4444 
4445  if (fds[0].revents & POLLNVAL) {
4446  errno = EBADF;
4447  return -1;
4448  }
4449 
4450  /*
4451  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4452  * Therefore we need to fix it up.
4453  */
4454  result = 0;
4455  if (fds[0].revents & POLLIN_SET)
4456  result |= RB_WAITFD_IN;
4457  if (fds[0].revents & POLLOUT_SET)
4458  result |= RB_WAITFD_OUT;
4459  if (fds[0].revents & POLLEX_SET)
4460  result |= RB_WAITFD_PRI;
4461 
4462  /* all requested events are ready if there is an error */
4463  if (fds[0].revents & POLLERR_SET)
4464  result |= events;
4465 
4466  return result;
4467 }
4468 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4469 struct select_args {
4470  union {
4471  int fd;
4472  int error;
4473  } as;
4474  rb_fdset_t *read;
4475  rb_fdset_t *write;
4476  rb_fdset_t *except;
4477  struct waiting_fd wfd;
4478  struct timeval *tv;
4479 };
4480 
4481 static VALUE
4482 select_single(VALUE ptr)
4483 {
4484  struct select_args *args = (struct select_args *)ptr;
4485  int r;
4486 
4487  r = rb_thread_fd_select(args->as.fd + 1,
4488  args->read, args->write, args->except, args->tv);
4489  if (r == -1)
4490  args->as.error = errno;
4491  if (r > 0) {
4492  r = 0;
4493  if (args->read && rb_fd_isset(args->as.fd, args->read))
4494  r |= RB_WAITFD_IN;
4495  if (args->write && rb_fd_isset(args->as.fd, args->write))
4496  r |= RB_WAITFD_OUT;
4497  if (args->except && rb_fd_isset(args->as.fd, args->except))
4498  r |= RB_WAITFD_PRI;
4499  }
4500  return (VALUE)r;
4501 }
4502 
4503 static VALUE
4504 select_single_cleanup(VALUE ptr)
4505 {
4506  struct select_args *args = (struct select_args *)ptr;
4507 
4508  RB_VM_LOCK_ENTER();
4509  {
4510  list_del(&args->wfd.wfd_node);
4511  }
4512  RB_VM_LOCK_LEAVE();
4513  if (args->read) rb_fd_term(args->read);
4514  if (args->write) rb_fd_term(args->write);
4515  if (args->except) rb_fd_term(args->except);
4516 
4517  return (VALUE)-1;
4518 }
4519 
4520 int
4521 rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4522 {
4523  rb_fdset_t rfds, wfds, efds;
4524  struct select_args args;
4525  int r;
4526  VALUE ptr = (VALUE)&args;
4527 
4528  args.as.fd = fd;
4529  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4530  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4531  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4532  args.tv = timeout;
4533  args.wfd.fd = fd;
4534  args.wfd.th = GET_THREAD();
4535 
4536  RB_VM_LOCK_ENTER();
4537  {
4538  list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4539  }
4540  RB_VM_LOCK_LEAVE();
4541 
4542  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4543  if (r == -1)
4544  errno = args.as.error;
4545 
4546  return r;
4547 }
4548 #endif /* ! USE_POLL */
4549 
4550 /*
4551  * for GC
4552  */
4553 
4554 #ifdef USE_CONSERVATIVE_STACK_END
4555 void
4556 rb_gc_set_stack_end(VALUE **stack_end_p)
4557 {
4558  VALUE stack_end;
4559  *stack_end_p = &stack_end;
4560 }
4561 #endif
4562 
4563 /*
4564  *
4565  */
4566 
4567 void
4568 rb_threadptr_check_signal(rb_thread_t *mth)
4569 {
4570  /* mth must be main_thread */
4571  if (rb_signal_buff_size() > 0) {
4572  /* wakeup main thread */
4573  threadptr_trap_interrupt(mth);
4574  }
4575 }
4576 
4577 static void
4578 async_bug_fd(const char *mesg, int errno_arg, int fd)
4579 {
4580  char buff[64];
4581  size_t n = strlcpy(buff, mesg, sizeof(buff));
4582  if (n < sizeof(buff)-3) {
4583  ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4584  }
4585  rb_async_bug_errno(buff, errno_arg);
4586 }
4587 
4588 /* VM-dependent API is not available for this function */
4589 static int
4590 consume_communication_pipe(int fd)
4591 {
4592 #if USE_EVENTFD
4593  uint64_t buff[1];
4594 #else
4595  /* buffer can be shared because no one refers to them. */
4596  static char buff[1024];
4597 #endif
4598  ssize_t result;
4599  int ret = FALSE; /* for rb_sigwait_sleep */
4600 
4601  /*
4602  * disarm UBF_TIMER before we read, because it can become
4603  * re-armed at any time via sighandler and the pipe will refill
4604  * We can disarm it because this thread is now processing signals
4605  * and we do not want unnecessary SIGVTALRM
4606  */
4607  ubf_timer_disarm();
4608 
4609  while (1) {
4610  result = read(fd, buff, sizeof(buff));
4611  if (result > 0) {
4612  ret = TRUE;
4613  if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4614  return ret;
4615  }
4616  }
4617  else if (result == 0) {
4618  return ret;
4619  }
4620  else if (result < 0) {
4621  int e = errno;
4622  switch (e) {
4623  case EINTR:
4624  continue; /* retry */
4625  case EAGAIN:
4626 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4627  case EWOULDBLOCK:
4628 #endif
4629  return ret;
4630  default:
4631  async_bug_fd("consume_communication_pipe: read", e, fd);
4632  }
4633  }
4634  }
4635 }
4636 
4637 static int
4638 check_signals_nogvl(rb_thread_t *th, int sigwait_fd)
4639 {
4640  rb_vm_t *vm = GET_VM(); /* th may be 0 */
4641  int ret = sigwait_fd >= 0 ? consume_communication_pipe(sigwait_fd) : FALSE;
4642  ubf_wakeup_all_threads();
4643  ruby_sigchld_handler(vm);
4644  if (rb_signal_buff_size()) {
4645  if (th == vm->ractor.main_thread) {
4646  /* no need to lock + wakeup if already in main thread */
4647  RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
4648  }
4649  else {
4650  threadptr_trap_interrupt(vm->ractor.main_thread);
4651  }
4652  ret = TRUE; /* for SIGCHLD_LOSSY && rb_sigwait_sleep */
4653  }
4654  return ret;
4655 }
4656 
4657 void
4658 rb_thread_stop_timer_thread(void)
4659 {
4660  if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4661  native_reset_timer_thread();
4662  }
4663 }
4664 
4665 void
4666 rb_thread_reset_timer_thread(void)
4667 {
4668  native_reset_timer_thread();
4669 }
4670 
4671 void
4672 rb_thread_start_timer_thread(void)
4673 {
4674  system_working = 1;
4675  rb_thread_create_timer_thread();
4676 }
4677 
4678 static int
4679 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4680 {
4681  int i;
4682  VALUE coverage = (VALUE)val;
4683  VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4684  VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4685 
4686  if (lines) {
4687  if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4688  rb_ary_clear(lines);
4689  }
4690  else {
4691  int i;
4692  for (i = 0; i < RARRAY_LEN(lines); i++) {
4693  if (RARRAY_AREF(lines, i) != Qnil)
4694  RARRAY_ASET(lines, i, INT2FIX(0));
4695  }
4696  }
4697  }
4698  if (branches) {
4699  VALUE counters = RARRAY_AREF(branches, 1);
4700  for (i = 0; i < RARRAY_LEN(counters); i++) {
4701  RARRAY_ASET(counters, i, INT2FIX(0));
4702  }
4703  }
4704 
4705  return ST_CONTINUE;
4706 }
4707 
4708 void
4709 rb_clear_coverages(void)
4710 {
4711  VALUE coverages = rb_get_coverages();
4712  if (RTEST(coverages)) {
4713  rb_hash_foreach(coverages, clear_coverage_i, 0);
4714  }
4715 }
4716 
4717 #if defined(HAVE_WORKING_FORK)
4718 
4719 static void
4720 rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4721 {
4722  rb_thread_t *i = 0;
4723  rb_vm_t *vm = th->vm;
4724  rb_ractor_t *r = th->ractor;
4725  vm->ractor.main_ractor = r;
4726  vm->ractor.main_thread = th;
4727  r->threads.main = th;
4728  r->status_ = ractor_created;
4729 
4730  gvl_atfork(rb_ractor_gvl(th->ractor));
4731  ubf_list_atfork();
4732 
4733  // OK. Only this thread accesses:
4734  list_for_each(&vm->ractor.set, r, vmlr_node) {
4735  list_for_each(&r->threads.set, i, lt_node) {
4736  atfork(i, th);
4737  }
4738  }
4739  rb_vm_living_threads_init(vm);
4740 
4741  rb_ractor_atfork(vm, th);
4742 
4743  /* may be held by MJIT threads in parent */
4744  rb_native_mutex_initialize(&vm->waitpid_lock);
4745  rb_native_mutex_initialize(&vm->workqueue_lock);
4746 
4747  /* may be held by any thread in parent */
4748  rb_native_mutex_initialize(&th->interrupt_lock);
4749 
4750  vm->fork_gen++;
4751  rb_ractor_sleeper_threads_clear(th->ractor);
4752  rb_clear_coverages();
4753 
4754  VM_ASSERT(vm->ractor.blocking_cnt == 0);
4755  VM_ASSERT(vm->ractor.cnt == 1);
4756 }
4757 
4758 static void
4759 terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4760 {
4761  if (th != current_th) {
4762  rb_mutex_abandon_keeping_mutexes(th);
4763  rb_mutex_abandon_locking_mutex(th);
4764  thread_cleanup_func(th, TRUE);
4765  }
4766 }
4767 
4768 void rb_fiber_atfork(rb_thread_t *);
4769 void
4770 rb_thread_atfork(void)
4771 {
4772  rb_thread_t *th = GET_THREAD();
4773  rb_thread_atfork_internal(th, terminate_atfork_i);
4774  th->join_list = NULL;
4775  rb_fiber_atfork(th);
4776 
4777  /* We don't want reproduce CVE-2003-0900. */
4779 
4780  /* For child, starting MJIT worker thread in this place which is safer than immediately after `after_fork_ruby`. */
4781  mjit_child_after_fork();
4782 }
4783 
4784 static void
4785 terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4786 {
4787  if (th != current_th) {
4788  thread_cleanup_func_before_exec(th);
4789  }
4790 }
4791 
4792 void
4794 {
4795  rb_thread_t *th = GET_THREAD();
4796  rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4797 }
4798 #else
4799 void
4801 {
4802 }
4803 
4804 void
4806 {
4807 }
4808 #endif
4809 
4810 struct thgroup {
4811  int enclosed;
4812  VALUE group;
4813 };
4814 
4815 static size_t
4816 thgroup_memsize(const void *ptr)
4817 {
4818  return sizeof(struct thgroup);
4819 }
4820 
4821 static const rb_data_type_t thgroup_data_type = {
4822  "thgroup",
4823  {0, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,},
4824  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4825 };
4826 
4827 /*
4828  * Document-class: ThreadGroup
4829  *
4830  * ThreadGroup provides a means of keeping track of a number of threads as a
4831  * group.
4832  *
4833  * A given Thread object can only belong to one ThreadGroup at a time; adding
4834  * a thread to a new group will remove it from any previous group.
4835  *
4836  * Newly created threads belong to the same group as the thread from which they
4837  * were created.
4838  */
4839 
4840 /*
4841  * Document-const: Default
4842  *
4843  * The default ThreadGroup created when Ruby starts; all Threads belong to it
4844  * by default.
4845  */
4846 static VALUE
4847 thgroup_s_alloc(VALUE klass)
4848 {
4849  VALUE group;
4850  struct thgroup *data;
4851 
4852  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4853  data->enclosed = 0;
4854  data->group = group;
4855 
4856  return group;
4857 }
4858 
4859 /*
4860  * call-seq:
4861  * thgrp.list -> array
4862  *
4863  * Returns an array of all existing Thread objects that belong to this group.
4864  *
4865  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4866  */
4867 
4868 static VALUE
4869 thgroup_list(VALUE group)
4870 {
4871  VALUE ary = rb_ary_new();
4872  rb_thread_t *th = 0;
4873  rb_ractor_t *r = GET_RACTOR();
4874 
4875  list_for_each(&r->threads.set, th, lt_node) {
4876  if (th->thgroup == group) {
4877  rb_ary_push(ary, th->self);
4878  }
4879  }
4880  return ary;
4881 }
4882 
4883 
4884 /*
4885  * call-seq:
4886  * thgrp.enclose -> thgrp
4887  *
4888  * Prevents threads from being added to or removed from the receiving
4889  * ThreadGroup.
4890  *
4891  * New threads can still be started in an enclosed ThreadGroup.
4892  *
4893  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4894  * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4895  * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4896  * tg.add thr
4897  * #=> ThreadError: can't move from the enclosed thread group
4898  */
4899 
4900 static VALUE
4901 thgroup_enclose(VALUE group)
4902 {
4903  struct thgroup *data;
4904 
4905  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4906  data->enclosed = 1;
4907 
4908  return group;
4909 }
4910 
4911 
4912 /*
4913  * call-seq:
4914  * thgrp.enclosed? -> true or false
4915  *
4916  * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4917  */
4918 
4919 static VALUE
4920 thgroup_enclosed_p(VALUE group)
4921 {
4922  struct thgroup *data;
4923 
4924  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4925  return RBOOL(data->enclosed);
4926 }
4927 
4928 
4929 /*
4930  * call-seq:
4931  * thgrp.add(thread) -> thgrp
4932  *
4933  * Adds the given +thread+ to this group, removing it from any other
4934  * group to which it may have previously been a member.
4935  *
4936  * puts "Initial group is #{ThreadGroup::Default.list}"
4937  * tg = ThreadGroup.new
4938  * t1 = Thread.new { sleep }
4939  * t2 = Thread.new { sleep }
4940  * puts "t1 is #{t1}"
4941  * puts "t2 is #{t2}"
4942  * tg.add(t1)
4943  * puts "Initial group now #{ThreadGroup::Default.list}"
4944  * puts "tg group now #{tg.list}"
4945  *
4946  * This will produce:
4947  *
4948  * Initial group is #<Thread:0x401bdf4c>
4949  * t1 is #<Thread:0x401b3c90>
4950  * t2 is #<Thread:0x401b3c18>
4951  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4952  * tg group now #<Thread:0x401b3c90>
4953  */
4954 
4955 static VALUE
4956 thgroup_add(VALUE group, VALUE thread)
4957 {
4958  rb_thread_t *target_th = rb_thread_ptr(thread);
4959  struct thgroup *data;
4960 
4961  if (OBJ_FROZEN(group)) {
4962  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4963  }
4964  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4965  if (data->enclosed) {
4966  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4967  }
4968 
4969  if (OBJ_FROZEN(target_th->thgroup)) {
4970  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4971  }
4972  TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4973  if (data->enclosed) {
4975  "can't move from the enclosed thread group");
4976  }
4977 
4978  target_th->thgroup = group;
4979  return group;
4980 }
4981 
4982 /*
4983  * Document-class: ThreadShield
4984  */
4985 static void
4986 thread_shield_mark(void *ptr)
4987 {
4988  rb_gc_mark((VALUE)ptr);
4989 }
4990 
4991 static const rb_data_type_t thread_shield_data_type = {
4992  "thread_shield",
4993  {thread_shield_mark, 0, 0,},
4994  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4995 };
4996 
4997 static VALUE
4998 thread_shield_alloc(VALUE klass)
4999 {
5000  return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
5001 }
5002 
5003 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5004 #define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5005 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5006 #define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5007 STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5008 static inline unsigned int
5009 rb_thread_shield_waiting(VALUE b)
5010 {
5011  return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5012 }
5013 
5014 static inline void
5015 rb_thread_shield_waiting_inc(VALUE b)
5016 {
5017  unsigned int w = rb_thread_shield_waiting(b);
5018  w++;
5019  if (w > THREAD_SHIELD_WAITING_MAX)
5020  rb_raise(rb_eRuntimeError, "waiting count overflow");
5021  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5022  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5023 }
5024 
5025 static inline void
5026 rb_thread_shield_waiting_dec(VALUE b)
5027 {
5028  unsigned int w = rb_thread_shield_waiting(b);
5029  if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5030  w--;
5031  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5032  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5033 }
5034 
5035 VALUE
5036 rb_thread_shield_new(void)
5037 {
5038  VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5039  rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5040  return thread_shield;
5041 }
5042 
5043 bool
5044 rb_thread_shield_owned(VALUE self)
5045 {
5046  VALUE mutex = GetThreadShieldPtr(self);
5047  if (!mutex) return false;
5048 
5049  rb_mutex_t *m = mutex_ptr(mutex);
5050 
5051  return m->fiber == GET_EC()->fiber_ptr;
5052 }
5053 
5054 /*
5055  * Wait a thread shield.
5056  *
5057  * Returns
5058  * true: acquired the thread shield
5059  * false: the thread shield was destroyed and no other threads waiting
5060  * nil: the thread shield was destroyed but still in use
5061  */
5062 VALUE
5063 rb_thread_shield_wait(VALUE self)
5064 {
5065  VALUE mutex = GetThreadShieldPtr(self);
5066  rb_mutex_t *m;
5067 
5068  if (!mutex) return Qfalse;
5069  m = mutex_ptr(mutex);
5070  if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5071  rb_thread_shield_waiting_inc(self);
5072  rb_mutex_lock(mutex);
5073  rb_thread_shield_waiting_dec(self);
5074  if (DATA_PTR(self)) return Qtrue;
5075  rb_mutex_unlock(mutex);
5076  return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5077 }
5078 
5079 static VALUE
5080 thread_shield_get_mutex(VALUE self)
5081 {
5082  VALUE mutex = GetThreadShieldPtr(self);
5083  if (!mutex)
5084  rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5085  return mutex;
5086 }
5087 
5088 /*
5089  * Release a thread shield, and return true if it has waiting threads.
5090  */
5091 VALUE
5092 rb_thread_shield_release(VALUE self)
5093 {
5094  VALUE mutex = thread_shield_get_mutex(self);
5095  rb_mutex_unlock(mutex);
5096  return RBOOL(rb_thread_shield_waiting(self) > 0);
5097 }
5098 
5099 /*
5100  * Release and destroy a thread shield, and return true if it has waiting threads.
5101  */
5102 VALUE
5103 rb_thread_shield_destroy(VALUE self)
5104 {
5105  VALUE mutex = thread_shield_get_mutex(self);
5106  DATA_PTR(self) = 0;
5107  rb_mutex_unlock(mutex);
5108  return RBOOL(rb_thread_shield_waiting(self) > 0);
5109 }
5110 
5111 static VALUE
5112 threadptr_recursive_hash(rb_thread_t *th)
5113 {
5114  return th->ec->local_storage_recursive_hash;
5115 }
5116 
5117 static void
5118 threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5119 {
5120  th->ec->local_storage_recursive_hash = hash;
5121 }
5122 
5123 ID rb_frame_last_func(void);
5124 
5125 /*
5126  * Returns the current "recursive list" used to detect recursion.
5127  * This list is a hash table, unique for the current thread and for
5128  * the current __callee__.
5129  */
5130 
5131 static VALUE
5132 recursive_list_access(VALUE sym)
5133 {
5134  rb_thread_t *th = GET_THREAD();
5135  VALUE hash = threadptr_recursive_hash(th);
5136  VALUE list;
5137  if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5138  hash = rb_ident_hash_new();
5139  threadptr_recursive_hash_set(th, hash);
5140  list = Qnil;
5141  }
5142  else {
5143  list = rb_hash_aref(hash, sym);
5144  }
5145  if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5146  list = rb_ident_hash_new();
5147  rb_hash_aset(hash, sym, list);
5148  }
5149  return list;
5150 }
5151 
5152 /*
5153  * Returns Qtrue if and only if obj (or the pair <obj, paired_obj>) is already
5154  * in the recursion list.
5155  * Assumes the recursion list is valid.
5156  */
5157 
5158 static VALUE
5159 recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5160 {
5161 #if SIZEOF_LONG == SIZEOF_VOIDP
5162  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5163 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5164  #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5165  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5166 #endif
5167 
5168  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5169  if (pair_list == Qundef)
5170  return Qfalse;
5171  if (paired_obj_id) {
5172  if (!RB_TYPE_P(pair_list, T_HASH)) {
5173  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5174  return Qfalse;
5175  }
5176  else {
5177  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5178  return Qfalse;
5179  }
5180  }
5181  return Qtrue;
5182 }
5183 
5184 /*
5185  * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5186  * For a single obj, it sets list[obj] to Qtrue.
5187  * For a pair, it sets list[obj] to paired_obj_id if possible,
5188  * otherwise list[obj] becomes a hash like:
5189  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5190  * Assumes the recursion list is valid.
5191  */
5192 
5193 static void
5194 recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5195 {
5196  VALUE pair_list;
5197 
5198  if (!paired_obj) {
5199  rb_hash_aset(list, obj, Qtrue);
5200  }
5201  else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
5202  rb_hash_aset(list, obj, paired_obj);
5203  }
5204  else {
5205  if (!RB_TYPE_P(pair_list, T_HASH)){
5206  VALUE other_paired_obj = pair_list;
5207  pair_list = rb_hash_new();
5208  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5209  rb_hash_aset(list, obj, pair_list);
5210  }
5211  rb_hash_aset(pair_list, paired_obj, Qtrue);
5212  }
5213 }
5214 
5215 /*
5216  * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5217  * For a pair, if list[obj] is a hash, then paired_obj_id is
5218  * removed from the hash and no attempt is made to simplify
5219  * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5220  * Assumes the recursion list is valid.
5221  */
5222 
5223 static int
5224 recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5225 {
5226  if (paired_obj) {
5227  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5228  if (pair_list == Qundef) {
5229  return 0;
5230  }
5231  if (RB_TYPE_P(pair_list, T_HASH)) {
5232  rb_hash_delete_entry(pair_list, paired_obj);
5233  if (!RHASH_EMPTY_P(pair_list)) {
5234  return 1; /* keep hash until is empty */
5235  }
5236  }
5237  }
5238  rb_hash_delete_entry(list, obj);
5239  return 1;
5240 }
5241 
5243  VALUE (*func) (VALUE, VALUE, int);
5244  VALUE list;
5245  VALUE obj;
5246  VALUE pairid;
5247  VALUE arg;
5248 };
5249 
5250 static VALUE
5251 exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5252 {
5253  struct exec_recursive_params *p = (void *)data;
5254  return (*p->func)(p->obj, p->arg, FALSE);
5255 }
5256 
5257 /*
5258  * Calls func(obj, arg, recursive), where recursive is non-zero if the
5259  * current method is called recursively on obj, or on the pair <obj, pairid>
5260  * If outer is 0, then the innermost func will be called with recursive set
5261  * to Qtrue, otherwise the outermost func will be called. In the latter case,
5262  * all inner func are short-circuited by throw.
5263  * Implementation details: the value thrown is the recursive list which is
5264  * proper to the current method and unlikely to be caught anywhere else.
5265  * list[recursive_key] is used as a flag for the outermost call.
5266  */
5267 
5268 static VALUE
5269 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
5270 {
5271  VALUE result = Qundef;
5272  const ID mid = rb_frame_last_func();
5273  const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5274  struct exec_recursive_params p;
5275  int outermost;
5276  p.list = recursive_list_access(sym);
5277  p.obj = obj;
5278  p.pairid = pairid;
5279  p.arg = arg;
5280  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5281 
5282  if (recursive_check(p.list, p.obj, pairid)) {
5283  if (outer && !outermost) {
5284  rb_throw_obj(p.list, p.list);
5285  }
5286  return (*func)(obj, arg, TRUE);
5287  }
5288  else {
5289  enum ruby_tag_type state;
5290 
5291  p.func = func;
5292 
5293  if (outermost) {
5294  recursive_push(p.list, ID2SYM(recursive_key), 0);
5295  recursive_push(p.list, p.obj, p.pairid);
5296  result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5297  if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5298  if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5299  if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5300  if (result == p.list) {
5301  result = (*func)(obj, arg, TRUE);
5302  }
5303  }
5304  else {
5305  volatile VALUE ret = Qundef;
5306  recursive_push(p.list, p.obj, p.pairid);
5307  EC_PUSH_TAG(GET_EC());
5308  if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5309  ret = (*func)(obj, arg, FALSE);
5310  }
5311  EC_POP_TAG();
5312  if (!recursive_pop(p.list, p.obj, p.pairid)) {
5313  goto invalid;
5314  }
5315  if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5316  result = ret;
5317  }
5318  }
5319  *(volatile struct exec_recursive_params *)&p;
5320  return result;
5321 
5322  invalid:
5323  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5324  "for %+"PRIsVALUE" in %+"PRIsVALUE,
5325  sym, rb_thread_current());
5327 }
5328 
5329 /*
5330  * Calls func(obj, arg, recursive), where recursive is non-zero if the
5331  * current method is called recursively on obj
5332  */
5333 
5334 VALUE
5335 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5336 {
5337  return exec_recursive(func, obj, 0, arg, 0);
5338 }
5339 
5340 /*
5341  * Calls func(obj, arg, recursive), where recursive is non-zero if the
5342  * current method is called recursively on the ordered pair <obj, paired_obj>
5343  */
5344 
5345 VALUE
5346 rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5347 {
5348  return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0);
5349 }
5350 
5351 /*
5352  * If recursion is detected on the current method and obj, the outermost
5353  * func will be called with (obj, arg, Qtrue). All inner func will be
5354  * short-circuited using throw.
5355  */
5356 
5357 VALUE
5358 rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5359 {
5360  return exec_recursive(func, obj, 0, arg, 1);
5361 }
5362 
5363 /*
5364  * If recursion is detected on the current method, obj and paired_obj,
5365  * the outermost func will be called with (obj, arg, Qtrue). All inner
5366  * func will be short-circuited using throw.
5367  */
5368 
5369 VALUE
5370 rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5371 {
5372  return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1);
5373 }
5374 
5375 /*
5376  * call-seq:
5377  * thread.backtrace -> array or nil
5378  *
5379  * Returns the current backtrace of the target thread.
5380  *
5381  */
5382 
5383 static VALUE
5384 rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5385 {
5386  return rb_vm_thread_backtrace(argc, argv, thval);
5387 }
5388 
5389 /* call-seq:
5390  * thread.backtrace_locations(*args) -> array or nil
5391  *
5392  * Returns the execution stack for the target thread---an array containing
5393  * backtrace location objects.
5394  *
5395  * See Thread::Backtrace::Location for more information.
5396  *
5397  * This method behaves similarly to Kernel#caller_locations except it applies
5398  * to a specific thread.
5399  */
5400 static VALUE
5401 rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5402 {
5403  return rb_vm_thread_backtrace_locations(argc, argv, thval);
5404 }
5405 
5406 void
5407 Init_Thread_Mutex(void)
5408 {
5409  rb_thread_t *th = GET_THREAD();
5410 
5411  rb_native_mutex_initialize(&th->vm->waitpid_lock);
5412  rb_native_mutex_initialize(&th->vm->workqueue_lock);
5413  rb_native_mutex_initialize(&th->interrupt_lock);
5414 }
5415 
5416 /*
5417  * Document-class: ThreadError
5418  *
5419  * Raised when an invalid operation is attempted on a thread.
5420  *
5421  * For example, when no other thread has been started:
5422  *
5423  * Thread.stop
5424  *
5425  * This will raises the following exception:
5426  *
5427  * ThreadError: stopping only thread
5428  * note: use sleep to stop forever
5429  */
5430 
5431 void
5432 Init_Thread(void)
5433 {
5434  VALUE cThGroup;
5435  rb_thread_t *th = GET_THREAD();
5436 
5437  sym_never = ID2SYM(rb_intern_const("never"));
5438  sym_immediate = ID2SYM(rb_intern_const("immediate"));
5439  sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5440 
5441  rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5442  rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5443  rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5444  rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5445  rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5446  rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5447  rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5448  rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5449  rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5450  rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5451  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5452  rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5453  rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5454  rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5455  rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5456  rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5457 #if THREAD_DEBUG < 0
5458  rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
5459  rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
5460 #endif
5461  rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5462  rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5463  rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5464 
5465  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5466  rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5467  rb_define_method(rb_cThread, "join", thread_join_m, -1);
5468  rb_define_method(rb_cThread, "value", thread_value, 0);
5470  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5474  rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5475  rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5476  rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5477  rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5478  rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5479  rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5480  rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5481  rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5482  rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5483  rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5484  rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5485  rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5486  rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5487  rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5488  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5489  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5490  rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5491  rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5492  rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5493  rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5494  rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5495 
5496  rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5497  rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5498  rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5499  rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5500  rb_define_alias(rb_cThread, "inspect", "to_s");
5501 
5502  rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5503  "stream closed in another thread");
5504 
5505  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5506  rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5507  rb_define_method(cThGroup, "list", thgroup_list, 0);
5508  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5509  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5510  rb_define_method(cThGroup, "add", thgroup_add, 1);
5511 
5512  {
5513  th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5514  rb_define_const(cThGroup, "Default", th->thgroup);
5515  }
5516 
5518 
5519  /* init thread core */
5520  {
5521  /* main thread setting */
5522  {
5523  /* acquire global vm lock */
5524  rb_global_vm_lock_t *gvl = rb_ractor_gvl(th->ractor);
5525  gvl_acquire(gvl, th);
5526 
5527  th->pending_interrupt_queue = rb_ary_tmp_new(0);
5528  th->pending_interrupt_queue_checked = 0;
5529  th->pending_interrupt_mask_stack = rb_ary_tmp_new(0);
5530  }
5531  }
5532 
5533  rb_thread_create_timer_thread();
5534 
5535  Init_thread_sync();
5536 }
5537 
5538 int
5540 {
5541  rb_thread_t *th = ruby_thread_from_native();
5542 
5543  return th != 0;
5544 }
5545 
5546 static void
5547 debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5548 {
5549  rb_thread_t *th = 0;
5550  VALUE sep = rb_str_new_cstr("\n ");
5551 
5552  rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5553  rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5554  (void *)GET_THREAD(), (void *)r->threads.main);
5555 
5556  list_for_each(&r->threads.set, th, lt_node) {
5557  rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5558  "native:%"PRI_THREAD_ID" int:%u",
5559  th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag);
5560 
5561  if (th->locking_mutex) {
5562  rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5563  rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5564  (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5565  }
5566 
5567  {
5568  struct rb_waiting_list *list = th->join_list;
5569  while (list) {
5570  rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5571  list = list->next;
5572  }
5573  }
5574  rb_str_catf(msg, "\n ");
5575  rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5576  rb_str_catf(msg, "\n");
5577  }
5578 }
5579 
5580 static void
5581 rb_check_deadlock(rb_ractor_t *r)
5582 {
5583  if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5584 
5585  int found = 0;
5586  rb_thread_t *th = NULL;
5587  int sleeper_num = rb_ractor_sleeper_thread_num(r);
5588  int ltnum = rb_ractor_living_thread_num(r);
5589 
5590  if (ltnum > sleeper_num) return;
5591  if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5592  if (patrol_thread && patrol_thread != GET_THREAD()) return;
5593 
5594  list_for_each(&r->threads.set, th, lt_node) {
5595  if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5596  found = 1;
5597  }
5598  else if (th->locking_mutex) {
5599  rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5600  if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !list_empty(&mutex->waitq))) {
5601  found = 1;
5602  }
5603  }
5604  if (found)
5605  break;
5606  }
5607 
5608  if (!found) {
5609  VALUE argv[2];
5610  argv[0] = rb_eFatal;
5611  argv[1] = rb_str_new2("No live threads left. Deadlock?");
5612  debug_deadlock_check(r, argv[1]);
5613  rb_ractor_sleeper_threads_dec(GET_RACTOR());
5614  rb_threadptr_raise(r->threads.main, 2, argv);
5615  }
5616 }
5617 
5618 static void
5619 update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5620 {
5621  const rb_control_frame_t *cfp = GET_EC()->cfp;
5622  VALUE coverage = rb_iseq_coverage(cfp->iseq);
5623  if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5624  VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5625  if (lines) {
5626  long line = rb_sourceline() - 1;
5627  long count;
5628  VALUE num;
5629  void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5630  if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5631  rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - cfp->iseq->body->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5632  rb_ary_push(lines, LONG2FIX(line + 1));
5633  return;
5634  }
5635  if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5636  return;
5637  }
5638  num = RARRAY_AREF(lines, line);
5639  if (!FIXNUM_P(num)) return;
5640  count = FIX2LONG(num) + 1;
5641  if (POSFIXABLE(count)) {
5642  RARRAY_ASET(lines, line, LONG2FIX(count));
5643  }
5644  }
5645  }
5646 }
5647 
5648 static void
5649 update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5650 {
5651  const rb_control_frame_t *cfp = GET_EC()->cfp;
5652  VALUE coverage = rb_iseq_coverage(cfp->iseq);
5653  if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5654  VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5655  if (branches) {
5656  long pc = cfp->pc - cfp->iseq->body->iseq_encoded - 1;
5657  long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5658  VALUE counters = RARRAY_AREF(branches, 1);
5659  VALUE num = RARRAY_AREF(counters, idx);
5660  count = FIX2LONG(num) + 1;
5661  if (POSFIXABLE(count)) {
5662  RARRAY_ASET(counters, idx, LONG2FIX(count));
5663  }
5664  }
5665  }
5666 }
5667 
5668 const rb_method_entry_t *
5669 rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5670 {
5671  VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5672 
5673  if (!me->def) return NULL; // negative cme
5674 
5675  retry:
5676  switch (me->def->type) {
5677  case VM_METHOD_TYPE_ISEQ: {
5678  const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5679  rb_iseq_location_t *loc = &iseq->body->location;
5680  path = rb_iseq_path(iseq);
5681  beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5682  beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5683  end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5684  end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5685  break;
5686  }
5687  case VM_METHOD_TYPE_BMETHOD: {
5688  const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5689  if (iseq) {
5690  rb_iseq_location_t *loc;
5691  rb_iseq_check(iseq);
5692  path = rb_iseq_path(iseq);
5693  loc = &iseq->body->location;
5694  beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5695  beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5696  end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5697  end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5698  break;
5699  }
5700  return NULL;
5701  }
5702  case VM_METHOD_TYPE_ALIAS:
5703  me = me->def->body.alias.original_me;
5704  goto retry;
5705  case VM_METHOD_TYPE_REFINED:
5706  me = me->def->body.refined.orig_me;
5707  if (!me) return NULL;
5708  goto retry;
5709  default:
5710  return NULL;
5711  }
5712 
5713  /* found */
5714  if (RB_TYPE_P(path, T_ARRAY)) {
5715  path = rb_ary_entry(path, 1);
5716  if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5717  }
5718  if (resolved_location) {
5719  resolved_location[0] = path;
5720  resolved_location[1] = beg_pos_lineno;
5721  resolved_location[2] = beg_pos_column;
5722  resolved_location[3] = end_pos_lineno;
5723  resolved_location[4] = end_pos_column;
5724  }
5725  return me;
5726 }
5727 
5728 static void
5729 update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5730 {
5731  const rb_control_frame_t *cfp = GET_EC()->cfp;
5732  const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5733  const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5734  VALUE rcount;
5735  long count;
5736 
5737  me = rb_resolve_me_location(me, 0);
5738  if (!me) return;
5739 
5740  rcount = rb_hash_aref(me2counter, (VALUE) me);
5741  count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5742  if (POSFIXABLE(count)) {
5743  rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5744  }
5745 }
5746 
5747 VALUE
5748 rb_get_coverages(void)
5749 {
5750  return GET_VM()->coverages;
5751 }
5752 
5753 int
5754 rb_get_coverage_mode(void)
5755 {
5756  return GET_VM()->coverage_mode;
5757 }
5758 
5759 void
5760 rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5761 {
5762  GET_VM()->coverages = coverages;
5763  GET_VM()->me2counter = me2counter;
5764  GET_VM()->coverage_mode = mode;
5765 }
5766 
5767 void
5768 rb_resume_coverages(void)
5769 {
5770  int mode = GET_VM()->coverage_mode;
5771  VALUE me2counter = GET_VM()->me2counter;
5772  rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5773  if (mode & COVERAGE_TARGET_BRANCHES) {
5774  rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5775  }
5776  if (mode & COVERAGE_TARGET_METHODS) {
5777  rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5778  }
5779 }
5780 
5781 void
5782 rb_suspend_coverages(void)
5783 {
5784  rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5785  if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5786  rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5787  }
5788  if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5789  rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5790  }
5791 }
5792 
5793 /* Make coverage arrays empty so old covered files are no longer tracked. */
5794 void
5795 rb_reset_coverages(void)
5796 {
5797  rb_clear_coverages();
5798  rb_iseq_remove_coverage_all();
5799  GET_VM()->coverages = Qfalse;
5800 }
5801 
5802 VALUE
5803 rb_default_coverage(int n)
5804 {
5805  VALUE coverage = rb_ary_tmp_new_fill(3);
5806  VALUE lines = Qfalse, branches = Qfalse;
5807  int mode = GET_VM()->coverage_mode;
5808 
5809  if (mode & COVERAGE_TARGET_LINES) {
5810  lines = n > 0 ? rb_ary_tmp_new_fill(n) : rb_ary_tmp_new(0);
5811  }
5812  RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5813 
5814  if (mode & COVERAGE_TARGET_BRANCHES) {
5815  branches = rb_ary_tmp_new_fill(2);
5816  /* internal data structures for branch coverage:
5817  *
5818  * { branch base node =>
5819  * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5820  * branch target id =>
5821  * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5822  * ...
5823  * }],
5824  * ...
5825  * }
5826  *
5827  * Example:
5828  * { NODE_CASE =>
5829  * [1, 0, 4, 3, {
5830  * NODE_WHEN => [2, 8, 2, 9, 0],
5831  * NODE_WHEN => [3, 8, 3, 9, 1],
5832  * ...
5833  * }],
5834  * ...
5835  * }
5836  */
5837  VALUE structure = rb_hash_new();
5838  rb_obj_hide(structure);
5839  RARRAY_ASET(branches, 0, structure);
5840  /* branch execution counters */
5841  RARRAY_ASET(branches, 1, rb_ary_tmp_new(0));
5842  }
5843  RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5844 
5845  return coverage;
5846 }
5847 
5848 static VALUE
5849 uninterruptible_exit(VALUE v)
5850 {
5851  rb_thread_t *cur_th = GET_THREAD();
5852  rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5853 
5854  cur_th->pending_interrupt_queue_checked = 0;
5855  if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5856  RUBY_VM_SET_INTERRUPT(cur_th->ec);
5857  }
5858  return Qnil;
5859 }
5860 
5861 VALUE
5862 rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5863 {
5864  VALUE interrupt_mask = rb_ident_hash_new();
5865  rb_thread_t *cur_th = GET_THREAD();
5866 
5867  rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5868  OBJ_FREEZE_RAW(interrupt_mask);
5869  rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5870 
5871  VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5872 
5873  RUBY_VM_CHECK_INTS(cur_th->ec);
5874  return ret;
5875 }
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition: assert.h:167
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
Definition: cxxanyargs.hpp:685
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition: event.h:85
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition: vm_trace.c:292
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition: event.h:53
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition: event.h:115
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:103
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition: event.h:37
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition: event.h:54
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implenentation detail of RB_FL_SET().
Definition: fl_type.h:644
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:837
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:2116
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition: eval.c:1084
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a method.
Definition: class.c:1914
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition: eval.c:867
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:854
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition: string.h:1738
#define ALLOC
Old name of RB_ALLOC.
Definition: memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition: xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition: fl_type.h:145
#define xrealloc
Old name of ruby_xrealloc.
Definition: xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition: symbol.h:44
#define OBJ_FREEZE_RAW
Old name of RB_OBJ_FREEZE_RAW.
Definition: fl_type.h:144
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition: assume.h:31
#define CLASS_OF
Old name of rb_class_of.
Definition: globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition: xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition: long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition: int.h:41
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition: value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition: int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition: int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition: long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition: value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition: fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition: value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:289
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition: error.h:470
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition: error.c:3025
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:675
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:802
VALUE rb_eSystemExit
SystemExit exception.
Definition: error.c:1092
VALUE rb_eIOError
IOError exception.
Definition: io.c:187
VALUE rb_eStandardError
StandardError exception.
Definition: error.c:1096
VALUE rb_eTypeError
TypeError exception.
Definition: error.c:1099
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition: error.c:3347
VALUE rb_eFatal
fatal exception.
Definition: error.c:1095
VALUE rb_eRuntimeError
RuntimeError exception.
Definition: error.c:1097
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
Definition: error.c:418
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition: error.c:1137
VALUE rb_eArgError
ArgumentError exception.
Definition: error.c:1100
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:983
VALUE rb_eThreadError
ThreadError exception.
Definition: eval.c:872
void rb_exit(int status)
Terminates the current execution context.
Definition: process.c:4471
VALUE rb_eSignal
SignalException exception.
Definition: error.c:1094
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition: object.c:1909
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:82
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition: object.c:188
VALUE rb_cThread
Thread class.
Definition: vm.c:397
VALUE rb_cModule
Module class.
Definition: object.c:51
VALUE rb_class_inherited_p(VALUE scion, VALUE ascendant)
Determines if the given two modules are relatives.
Definition: object.c:1608
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition: object.c:3532
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition: object.c:731
static bool rb_enc_asciicompat(rb_encoding *enc)
Queries if the passed encoding is in some sense compatible with ASCII.
Definition: encoding.h:782
rb_encoding * rb_enc_get(VALUE obj)
Identical to rb_enc_get_index(), except the return type.
Definition: encoding.c:1072
static const char * rb_enc_name(rb_encoding *enc)
Queries the (canonical) name of the passed encoding.
Definition: encoding.h:433
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
Definition: array.c:1420
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
Definition: array.c:2663
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
Definition: array.c:3941
VALUE rb_ary_new(void)
Allocates a new, empty array.
Definition: array.c:750
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
Definition: array.c:1357
VALUE rb_ary_tmp_new(long capa)
Allocates a "temporary" array.
Definition: array.c:847
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
Definition: array.c:4465
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
Definition: array.c:1308
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
Definition: array.c:1679
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
Definition: array.c:2777
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition: error.h:294
VALUE rb_make_exception(int argc, const VALUE *argv)
Constructs an exception object from the list of arguments, in a manner similar to Ruby's raise.
Definition: eval.c:821
void rb_obj_call_init_kw(VALUE, int, const VALUE *, int)
Identical to rb_obj_call_init(), except you can specify how to handle the last element of the given a...
Definition: eval.c:1576
void rb_gc_mark(VALUE obj)
Marks an object.
Definition: gc.c:6775
void rb_hash_foreach(VALUE hash, int(*func)(VALUE key, VALUE val, VALUE arg), VALUE arg)
Iterates over a hash.
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
Identical to rb_hash_lookup(), except you can specify what to return on misshits.
Definition: hash.c:2095
VALUE rb_hash_aref(VALUE hash, VALUE key)
Queries the given key in the given hash table.
Definition: hash.c:2082
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
Definition: hash.c:2903
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Identical to rb_hash_aref(), except it always returns RUBY_Qnil for misshits.
Definition: hash.c:2108
VALUE rb_hash_new(void)
Creates a new, empty hash object.
Definition: hash.c:1529
VALUE rb_memory_id(VALUE obj)
Identical to rb_obj_id(), except it hesitates from allocating a new instance of rb_cInteger.
Definition: gc.c:4414
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition: proc.c:848
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition: random.c:1755
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition: string.c:1356
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition: string.c:3418
VALUE rb_str_new_cstr(const char *ptr)
Identical to rb_str_new(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:952
VALUE rb_str_cat_cstr(VALUE dst, const char *src)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:3171
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition: thread.c:1590
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition: thread.c:3493
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition: thread.c:2687
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition: thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition: thread.c:2925
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition: thread.c:1519
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition: thread.c:2627
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition: thread.c:1558
VALUE rb_thread_stop(void)
Stops the current thread.
Definition: thread.c:2837
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition: thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition: thread.c:4805
VALUE rb_thread_create(VALUE(*f)(void *g), void *g)
Creates a Ruby thread that is backended by a C function.
void rb_thread_check_ints(void)
Checks for interrupts.
Definition: thread.c:1573
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition: thread.c:2828
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition: thread.c:2781
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
Definition: thread_sync.c:472
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition: thread.c:1526
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition: thread.c:4800
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition: thread.c:2904
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition: thread.c:3765
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition: thread.c:3641
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition: thread.c:1619
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition: thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition: thread.c:2790
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
Definition: thread_sync.c:399
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition: thread.c:1596
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition: time.c:1908
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition: time.c:2662
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition: variable.c:1575
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition: variable.c:1285
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition: variable.c:172
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition: vm.c:1620
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition: symbol.h:276
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition: symbol.c:1066
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition: string.c:11904
ID rb_to_id(VALUE str)
Identical to rb_intern(), except it takes an instance of rb_cString.
Definition: string.c:11894
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition: variable.c:3253
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition: thread.c:1797
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition: thread.h:60
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition: thread.h:48
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition: thread.c:1888
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition: thread.c:1666
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition: sprintf.c:1201
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition: sprintf.c:1241
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition: iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition: vm_eval.c:1357
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition: vm_eval.c:2264
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition: largesize.h:212
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition: largesize.h:198
int rb_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
int rb_fd_isset(int fd, const rb_fdset_t *f)
Queries if the given FD is in the given set.
void rb_fd_clr(int fd, rb_fdset_t *f)
Releases a specific FD from the given fdset.
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
void rb_fd_zero(rb_fdset_t *f)
Wipes out the current set of FDs.
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition: memory.h:366
#define ALLOCA_N(type, n)
Definition: memory.h:286
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition: memory.h:354
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition: posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition: posix.h:54
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:68
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
Definition: rarray.h:70
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition: rarray.h:324
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition: rarray.h:571
#define RARRAY_AREF(a, i)
Definition: rarray.h:588
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition: rarray.h:69
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition: rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition: rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition: rclass.h:46
#define DATA_PTR(obj)
Convenient getter macro.
Definition: rdata.h:71
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition: rhash.h:92
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition: rstring.h:95
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition: rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition: rtypeddata.h:507
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition: rtypeddata.h:441
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition: rtypeddata.h:489
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition: thread.c:5539
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition: sprintf.c:1035
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition: scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition: scheduler.c:126
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Nonblocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition: scheduler.c:203
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition: scheduler.c:91
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition: scheduler.c:209
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition: thread.c:4299
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition: select.h:43
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition: stdarg.h:35
Definition: method.h:62
This is the struct that holds necessary info for a struct.
Definition: rtypeddata.h:190
The data structure which wraps the fd_set bitmap used by select(2).
Definition: largesize.h:74
int maxfd
Maximum allowed number of FDs.
Definition: largesize.h:75
fd_set * fdset
File descriptors buffer.
Definition: largesize.h:76
int capa
Maximum allowed number of FDs.
Definition: win32.h:50
Definition: method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition: thread.c:440
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition: thread.c:446
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition: thread.c:428
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition: thread.c:434
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:375