Ruby  3.1.4p223 (2023-03-30 revision HEAD)
thread_win32.c
1 /* -*-c-*- */
2 /**********************************************************************
3 
4  thread_win32.c -
5 
6  $Author$
7 
8  Copyright (C) 2004-2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13 
14 #include <process.h>
15 
16 #define TIME_QUANTUM_USEC (10 * 1000)
17 #define RB_CONDATTR_CLOCK_MONOTONIC 1 /* no effect */
18 
19 #undef Sleep
20 
21 #define native_thread_yield() Sleep(0)
22 #define unregister_ubf_list(th)
23 #define ubf_wakeup_all_threads() do {} while (0)
24 #define ubf_threads_empty() (1)
25 #define ubf_timer_disarm() do {} while (0)
26 #define ubf_list_atfork() do {} while (0)
27 
28 static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
29 
30 static int w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th);
31 
33 static void
34 w32_error(const char *func)
35 {
36  LPVOID lpMsgBuf;
37  DWORD err = GetLastError();
38  if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
39  FORMAT_MESSAGE_FROM_SYSTEM |
40  FORMAT_MESSAGE_IGNORE_INSERTS,
41  NULL,
42  err,
43  MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
44  (LPTSTR) & lpMsgBuf, 0, NULL) == 0)
45  FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
46  FORMAT_MESSAGE_FROM_SYSTEM |
47  FORMAT_MESSAGE_IGNORE_INSERTS,
48  NULL,
49  err,
50  MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
51  (LPTSTR) & lpMsgBuf, 0, NULL);
52  rb_bug("%s: %s", func, (char*)lpMsgBuf);
54 }
55 
56 static int
57 w32_mutex_lock(HANDLE lock, bool try)
58 {
59  DWORD result;
60  while (1) {
61  thread_debug("rb_native_mutex_lock: %p\n", lock);
62  result = w32_wait_events(&lock, 1, try ? 0 : INFINITE, 0);
63  switch (result) {
64  case WAIT_OBJECT_0:
65  /* get mutex object */
66  thread_debug("acquire mutex: %p\n", lock);
67  return 0;
68  case WAIT_OBJECT_0 + 1:
69  /* interrupt */
70  errno = EINTR;
71  thread_debug("acquire mutex interrupted: %p\n", lock);
72  return 0;
73  case WAIT_TIMEOUT:
74  thread_debug("timeout mutex: %p\n", lock);
75  return EBUSY;
76  case WAIT_ABANDONED:
77  rb_bug("win32_mutex_lock: WAIT_ABANDONED");
78  break;
79  default:
80  rb_bug("win32_mutex_lock: unknown result (%ld)", result);
81  break;
82  }
83  }
84  return 0;
85 }
86 
87 static HANDLE
88 w32_mutex_create(void)
89 {
90  HANDLE lock = CreateMutex(NULL, FALSE, NULL);
91  if (lock == NULL) {
92  w32_error("rb_native_mutex_initialize");
93  }
94  return lock;
95 }
96 
97 #define GVL_DEBUG 0
98 
99 static void
100 gvl_acquire(rb_global_vm_lock_t *gvl, rb_thread_t *th)
101 {
102  w32_mutex_lock(gvl->lock, false);
103  if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
104 }
105 
106 static void
107 gvl_release(rb_global_vm_lock_t *gvl)
108 {
109  ReleaseMutex(gvl->lock);
110 }
111 
112 static void
113 gvl_yield(rb_global_vm_lock_t *gvl, rb_thread_t *th)
114 {
115  gvl_release(gvl);
116  native_thread_yield();
117  gvl_acquire(gvl, th);
118 }
119 
120 void
121 rb_gvl_init(rb_global_vm_lock_t *gvl)
122 {
123  if (GVL_DEBUG) fprintf(stderr, "gvl init\n");
124  gvl->lock = w32_mutex_create();
125 }
126 
127 static void
128 gvl_destroy(rb_global_vm_lock_t *gvl)
129 {
130  if (GVL_DEBUG) fprintf(stderr, "gvl destroy\n");
131  CloseHandle(gvl->lock);
132 }
133 
134 rb_thread_t *
135 ruby_thread_from_native(void)
136 {
137  return TlsGetValue(ruby_native_thread_key);
138 }
139 
140 int
141 ruby_thread_set_native(rb_thread_t *th)
142 {
143  if (th && th->ec) {
144  rb_ractor_set_current_ec(th->ractor, th->ec);
145  }
146  return TlsSetValue(ruby_native_thread_key, th);
147 }
148 
149 void
150 Init_native_thread(rb_thread_t *th)
151 {
152  if ((ruby_current_ec_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
153  rb_bug("TlsAlloc() for ruby_current_ec_key fails");
154  }
155  if ((ruby_native_thread_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
156  rb_bug("TlsAlloc() for ruby_native_thread_key fails");
157  }
158  ruby_thread_set_native(th);
159  DuplicateHandle(GetCurrentProcess(),
160  GetCurrentThread(),
161  GetCurrentProcess(),
162  &th->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
163 
164  th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
165 
166  thread_debug("initial thread (th: %p, thid: %p, event: %p)\n",
167  th, GET_THREAD()->thread_id,
168  th->native_thread_data.interrupt_event);
169 }
170 
171 static int
172 w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
173 {
174  HANDLE *targets = events;
175  HANDLE intr;
176  const int initcount = count;
177  DWORD ret;
178 
179  thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
180  events, count, timeout, th);
181  if (th && (intr = th->native_thread_data.interrupt_event)) {
182  if (ResetEvent(intr) && (!RUBY_VM_INTERRUPTED(th->ec) || SetEvent(intr))) {
183  targets = ALLOCA_N(HANDLE, count + 1);
184  memcpy(targets, events, sizeof(HANDLE) * count);
185 
186  targets[count++] = intr;
187  thread_debug(" * handle: %p (count: %d, intr)\n", intr, count);
188  }
189  else if (intr == th->native_thread_data.interrupt_event) {
190  w32_error("w32_wait_events");
191  }
192  }
193 
194  thread_debug(" WaitForMultipleObjects start (count: %d)\n", count);
195  ret = WaitForMultipleObjects(count, targets, FALSE, timeout);
196  thread_debug(" WaitForMultipleObjects end (ret: %lu)\n", ret);
197 
198  if (ret == (DWORD)(WAIT_OBJECT_0 + initcount) && th) {
199  errno = EINTR;
200  }
201  if (ret == WAIT_FAILED && THREAD_DEBUG) {
202  int i;
203  DWORD dmy;
204  for (i = 0; i < count; i++) {
205  thread_debug(" * error handle %d - %s\n", i,
206  GetHandleInformation(targets[i], &dmy) ? "OK" : "NG");
207  }
208  }
209  return ret;
210 }
211 
212 static void ubf_handle(void *ptr);
213 #define ubf_select ubf_handle
214 
215 int
216 rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
217 {
218  return w32_wait_events(events, num, timeout, ruby_thread_from_native());
219 }
220 
221 int
222 rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
223 {
224  int ret;
225  rb_thread_t *th = GET_THREAD();
226 
227  BLOCKING_REGION(th, ret = rb_w32_wait_events_blocking(events, num, timeout),
228  ubf_handle, ruby_thread_from_native(), FALSE);
229  return ret;
230 }
231 
232 static void
233 w32_close_handle(HANDLE handle)
234 {
235  if (CloseHandle(handle) == 0) {
236  w32_error("w32_close_handle");
237  }
238 }
239 
240 static void
241 w32_resume_thread(HANDLE handle)
242 {
243  if (ResumeThread(handle) == (DWORD)-1) {
244  w32_error("w32_resume_thread");
245  }
246 }
247 
248 #ifdef _MSC_VER
249 #define HAVE__BEGINTHREADEX 1
250 #else
251 #undef HAVE__BEGINTHREADEX
252 #endif
253 
254 #ifdef HAVE__BEGINTHREADEX
255 #define start_thread (HANDLE)_beginthreadex
256 #define thread_errno errno
257 typedef unsigned long (__stdcall *w32_thread_start_func)(void*);
258 #else
259 #define start_thread CreateThread
260 #define thread_errno rb_w32_map_errno(GetLastError())
261 typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
262 #endif
263 
264 static HANDLE
265 w32_create_thread(DWORD stack_size, w32_thread_start_func func, void *val)
266 {
267  return start_thread(0, stack_size, func, val, CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 0);
268 }
269 
270 int
271 rb_w32_sleep(unsigned long msec)
272 {
273  return w32_wait_events(0, 0, msec, ruby_thread_from_native());
274 }
275 
276 int WINAPI
277 rb_w32_Sleep(unsigned long msec)
278 {
279  int ret;
280  rb_thread_t *th = GET_THREAD();
281 
282  BLOCKING_REGION(th, ret = rb_w32_sleep(msec),
283  ubf_handle, ruby_thread_from_native(), FALSE);
284  return ret;
285 }
286 
287 static DWORD
288 hrtime2msec(rb_hrtime_t hrt)
289 {
290  return (DWORD)hrt / (DWORD)RB_HRTIME_PER_MSEC;
291 }
292 
293 static void
294 native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
295 {
296  const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
297 
298  GVL_UNLOCK_BEGIN(th);
299  {
300  DWORD ret;
301 
302  rb_native_mutex_lock(&th->interrupt_lock);
303  th->unblock.func = ubf_handle;
304  th->unblock.arg = th;
305  rb_native_mutex_unlock(&th->interrupt_lock);
306 
307  if (RUBY_VM_INTERRUPTED(th->ec)) {
308  /* interrupted. return immediate */
309  }
310  else {
311  thread_debug("native_sleep start (%lu)\n", msec);
312  ret = w32_wait_events(0, 0, msec, th);
313  thread_debug("native_sleep done (%lu)\n", ret);
314  }
315 
316  rb_native_mutex_lock(&th->interrupt_lock);
317  th->unblock.func = 0;
318  th->unblock.arg = 0;
319  rb_native_mutex_unlock(&th->interrupt_lock);
320  }
321  GVL_UNLOCK_END(th);
322 }
323 
324 void
325 rb_native_mutex_lock(rb_nativethread_lock_t *lock)
326 {
327 #ifdef USE_WIN32_MUTEX
328  w32_mutex_lock(lock->mutex, false);
329 #else
330  EnterCriticalSection(&lock->crit);
331 #endif
332 }
333 
334 int
335 rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
336 {
337 #ifdef USE_WIN32_MUTEX
338  return w32_mutex_lock(lock->mutex, true);
339 #else
340  return TryEnterCriticalSection(&lock->crit) == 0 ? EBUSY : 0;
341 #endif
342 }
343 
344 void
345 rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
346 {
347 #ifdef USE_WIN32_MUTEX
348  thread_debug("release mutex: %p\n", lock->mutex);
349  ReleaseMutex(lock->mutex);
350 #else
351  LeaveCriticalSection(&lock->crit);
352 #endif
353 }
354 
355 void
356 rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
357 {
358 #ifdef USE_WIN32_MUTEX
359  lock->mutex = w32_mutex_create();
360  /* thread_debug("initialize mutex: %p\n", lock->mutex); */
361 #else
362  InitializeCriticalSection(&lock->crit);
363 #endif
364 }
365 
366 void
367 rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
368 {
369 #ifdef USE_WIN32_MUTEX
370  w32_close_handle(lock->mutex);
371 #else
372  DeleteCriticalSection(&lock->crit);
373 #endif
374 }
375 
376 struct cond_event_entry {
377  struct cond_event_entry* next;
378  struct cond_event_entry* prev;
379  HANDLE event;
380 };
381 
382 void
383 rb_native_cond_signal(rb_nativethread_cond_t *cond)
384 {
385  /* cond is guarded by mutex */
386  struct cond_event_entry *e = cond->next;
387  struct cond_event_entry *head = (struct cond_event_entry*)cond;
388 
389  if (e != head) {
390  struct cond_event_entry *next = e->next;
391  struct cond_event_entry *prev = e->prev;
392 
393  prev->next = next;
394  next->prev = prev;
395  e->next = e->prev = e;
396 
397  SetEvent(e->event);
398  }
399 }
400 
401 void
402 rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
403 {
404  /* cond is guarded by mutex */
405  struct cond_event_entry *e = cond->next;
406  struct cond_event_entry *head = (struct cond_event_entry*)cond;
407 
408  while (e != head) {
409  struct cond_event_entry *next = e->next;
410  struct cond_event_entry *prev = e->prev;
411 
412  SetEvent(e->event);
413 
414  prev->next = next;
415  next->prev = prev;
416  e->next = e->prev = e;
417 
418  e = next;
419  }
420 }
421 
422 static int
423 native_cond_timedwait_ms(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
424 {
425  DWORD r;
426  struct cond_event_entry entry;
427  struct cond_event_entry *head = (struct cond_event_entry*)cond;
428 
429  entry.event = CreateEvent(0, FALSE, FALSE, 0);
430 
431  /* cond is guarded by mutex */
432  entry.next = head;
433  entry.prev = head->prev;
434  head->prev->next = &entry;
435  head->prev = &entry;
436 
437  rb_native_mutex_unlock(mutex);
438  {
439  r = WaitForSingleObject(entry.event, msec);
440  if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
441  rb_bug("rb_native_cond_wait: WaitForSingleObject returns %lu", r);
442  }
443  }
444  rb_native_mutex_lock(mutex);
445 
446  entry.prev->next = entry.next;
447  entry.next->prev = entry.prev;
448 
449  w32_close_handle(entry.event);
450  return (r == WAIT_OBJECT_0) ? 0 : ETIMEDOUT;
451 }
452 
453 void
454 rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
455 {
456  native_cond_timedwait_ms(cond, mutex, INFINITE);
457 }
458 
459 static unsigned long
460 abs_timespec_to_timeout_ms(const struct timespec *ts)
461 {
462  struct timeval tv;
463  struct timeval now;
464 
465  gettimeofday(&now, NULL);
466  tv.tv_sec = ts->tv_sec;
467  tv.tv_usec = ts->tv_nsec / 1000;
468 
469  if (!rb_w32_time_subtract(&tv, &now))
470  return 0;
471 
472  return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
473 }
474 
475 static int
476 native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, const struct timespec *ts)
477 {
478  unsigned long timeout_ms;
479 
480  timeout_ms = abs_timespec_to_timeout_ms(ts);
481  if (!timeout_ms)
482  return ETIMEDOUT;
483 
484  return native_cond_timedwait_ms(cond, mutex, timeout_ms);
485 }
486 
487 static struct timespec native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel);
488 
489 void
490 rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
491 {
492  struct timespec rel = {
493  .tv_sec = msec / 1000,
494  .tv_nsec = (msec % 1000) * 1000 * 1000,
495  };
496  struct timespec ts = native_cond_timeout(cond, rel);
497  native_cond_timedwait(cond, mutex, &ts);
498 }
499 
500 static struct timespec
501 native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
502 {
503  int ret;
504  struct timeval tv;
505  struct timespec timeout;
506  struct timespec now;
507 
508  ret = gettimeofday(&tv, 0);
509  if (ret != 0)
510  rb_sys_fail(0);
511  now.tv_sec = tv.tv_sec;
512  now.tv_nsec = tv.tv_usec * 1000;
513 
514  timeout.tv_sec = now.tv_sec;
515  timeout.tv_nsec = now.tv_nsec;
516  timeout.tv_sec += timeout_rel.tv_sec;
517  timeout.tv_nsec += timeout_rel.tv_nsec;
518 
519  if (timeout.tv_nsec >= 1000*1000*1000) {
520  timeout.tv_sec++;
521  timeout.tv_nsec -= 1000*1000*1000;
522  }
523 
524  if (timeout.tv_sec < now.tv_sec)
525  timeout.tv_sec = TIMET_MAX;
526 
527  return timeout;
528 }
529 
530 void
531 rb_native_cond_initialize(rb_nativethread_cond_t *cond)
532 {
533  cond->next = (struct cond_event_entry *)cond;
534  cond->prev = (struct cond_event_entry *)cond;
535 }
536 
537 void
538 rb_native_cond_destroy(rb_nativethread_cond_t *cond)
539 {
540  /* */
541 }
542 
543 void
544 ruby_init_stack(volatile VALUE *addr)
545 {
546 }
547 
548 #define CHECK_ERR(expr) \
549  {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
550 
551 COMPILER_WARNING_PUSH
552 #if defined(__GNUC__)
553 COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
554 #endif
555 static inline SIZE_T
556 query_memory_basic_info(PMEMORY_BASIC_INFORMATION mi)
557 {
558  return VirtualQuery(mi, mi, sizeof(*mi));
559 }
560 COMPILER_WARNING_POP
561 
562 static void
563 native_thread_init_stack(rb_thread_t *th)
564 {
565  MEMORY_BASIC_INFORMATION mi;
566  char *base, *end;
567  DWORD size, space;
568 
569  CHECK_ERR(query_memory_basic_info(&mi));
570  base = mi.AllocationBase;
571  end = mi.BaseAddress;
572  end += mi.RegionSize;
573  size = end - base;
574  space = size / 5;
575  if (space > 1024*1024) space = 1024*1024;
576  th->ec->machine.stack_start = (VALUE *)end - 1;
577  th->ec->machine.stack_maxsize = size - space;
578 }
579 
580 #ifndef InterlockedExchangePointer
581 #define InterlockedExchangePointer(t, v) \
582  (void *)InterlockedExchange((long *)(t), (long)(v))
583 #endif
584 static void
585 native_thread_destroy(rb_thread_t *th)
586 {
587  HANDLE intr = InterlockedExchangePointer(&th->native_thread_data.interrupt_event, 0);
588  thread_debug("close handle - intr: %p, thid: %p\n", intr, th->thread_id);
589  w32_close_handle(intr);
590 }
591 
592 static unsigned long __stdcall
593 thread_start_func_1(void *th_ptr)
594 {
595  rb_thread_t *th = th_ptr;
596  volatile HANDLE thread_id = th->thread_id;
597 
598  native_thread_init_stack(th);
599  th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
600 
601  /* run */
602  thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th,
603  th->thread_id, th->native_thread_data.interrupt_event);
604 
605  thread_start_func_2(th, th->ec->machine.stack_start);
606 
607  w32_close_handle(thread_id);
608  thread_debug("thread deleted (th: %p)\n", th);
609  return 0;
610 }
611 
612 static int
613 native_thread_create(rb_thread_t *th)
614 {
615  const size_t stack_size = th->vm->default_params.thread_machine_stack_size + th->vm->default_params.thread_vm_stack_size;
616  th->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
617 
618  if ((th->thread_id) == 0) {
619  return thread_errno;
620  }
621 
622  w32_resume_thread(th->thread_id);
623 
624  if (THREAD_DEBUG) {
625  Sleep(0);
626  thread_debug("create: (th: %p, thid: %p, intr: %p), stack size: %"PRIuSIZE"\n",
627  th, th->thread_id,
628  th->native_thread_data.interrupt_event, stack_size);
629  }
630  return 0;
631 }
632 
633 static void
634 native_thread_join(HANDLE th)
635 {
636  w32_wait_events(&th, 1, INFINITE, 0);
637 }
638 
639 #if USE_NATIVE_THREAD_PRIORITY
640 
641 static void
642 native_thread_apply_priority(rb_thread_t *th)
643 {
644  int priority = th->priority;
645  if (th->priority > 0) {
646  priority = THREAD_PRIORITY_ABOVE_NORMAL;
647  }
648  else if (th->priority < 0) {
649  priority = THREAD_PRIORITY_BELOW_NORMAL;
650  }
651  else {
652  priority = THREAD_PRIORITY_NORMAL;
653  }
654 
655  SetThreadPriority(th->thread_id, priority);
656 }
657 
658 #endif /* USE_NATIVE_THREAD_PRIORITY */
659 
660 int rb_w32_select_with_thread(int, fd_set *, fd_set *, fd_set *, struct timeval *, void *); /* @internal */
661 
662 static int
663 native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
664 {
665  fd_set *r = NULL, *w = NULL, *e = NULL;
666  if (readfds) {
667  rb_fd_resize(n - 1, readfds);
668  r = rb_fd_ptr(readfds);
669  }
670  if (writefds) {
671  rb_fd_resize(n - 1, writefds);
672  w = rb_fd_ptr(writefds);
673  }
674  if (exceptfds) {
675  rb_fd_resize(n - 1, exceptfds);
676  e = rb_fd_ptr(exceptfds);
677  }
678  return rb_w32_select_with_thread(n, r, w, e, timeout, th);
679 }
680 
681 /* @internal */
682 int
683 rb_w32_check_interrupt(rb_thread_t *th)
684 {
685  return w32_wait_events(0, 0, 0, th);
686 }
687 
688 static void
689 ubf_handle(void *ptr)
690 {
691  rb_thread_t *th = (rb_thread_t *)ptr;
692  thread_debug("ubf_handle: %p\n", th);
693 
694  if (!SetEvent(th->native_thread_data.interrupt_event)) {
695  w32_error("ubf_handle");
696  }
697 }
698 
699 int rb_w32_set_thread_description(HANDLE th, const WCHAR *name);
700 int rb_w32_set_thread_description_str(HANDLE th, VALUE name);
701 #define native_set_another_thread_name rb_w32_set_thread_description_str
702 
703 static struct {
704  HANDLE id;
705  HANDLE lock;
706 } timer_thread;
707 #define TIMER_THREAD_CREATED_P() (timer_thread.id != 0)
708 
709 static unsigned long __stdcall
710 timer_thread_func(void *dummy)
711 {
712  rb_vm_t *vm = GET_VM();
713  thread_debug("timer_thread\n");
714  rb_w32_set_thread_description(GetCurrentThread(), L"ruby-timer-thread");
715  while (WaitForSingleObject(timer_thread.lock,
716  TIME_QUANTUM_USEC/1000) == WAIT_TIMEOUT) {
717  vm->clock++;
718  ruby_sigchld_handler(vm); /* probably no-op */
719  rb_threadptr_check_signal(vm->ractor.main_thread);
720  }
721  thread_debug("timer killed\n");
722  return 0;
723 }
724 
725 void
726 rb_thread_wakeup_timer_thread(int sig)
727 {
728  /* do nothing */
729 }
730 
731 static VALUE
732 rb_thread_start_unblock_thread(void)
733 {
734  return Qfalse; /* no-op */
735 }
736 
737 static void
738 rb_thread_create_timer_thread(void)
739 {
740  if (timer_thread.id == 0) {
741  if (!timer_thread.lock) {
742  timer_thread.lock = CreateEvent(0, TRUE, FALSE, 0);
743  }
744  timer_thread.id = w32_create_thread(1024 + (THREAD_DEBUG ? BUFSIZ : 0),
745  timer_thread_func, 0);
746  w32_resume_thread(timer_thread.id);
747  }
748 }
749 
750 static int
751 native_stop_timer_thread(void)
752 {
753  int stopped = --system_working <= 0;
754  if (stopped) {
755  SetEvent(timer_thread.lock);
756  native_thread_join(timer_thread.id);
757  CloseHandle(timer_thread.lock);
758  timer_thread.lock = 0;
759  }
760  return stopped;
761 }
762 
763 static void
764 native_reset_timer_thread(void)
765 {
766  if (timer_thread.id) {
767  CloseHandle(timer_thread.id);
768  timer_thread.id = 0;
769  }
770 }
771 
772 int
773 ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
774 {
775  return rb_ec_raised_p(th->ec, RAISED_STACKOVERFLOW);
776 }
777 
778 #if defined(__MINGW32__)
779 LONG WINAPI
780 rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *exception)
781 {
782  if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
783  rb_ec_raised_set(GET_EC(), RAISED_STACKOVERFLOW);
784  raise(SIGSEGV);
785  }
786  return EXCEPTION_CONTINUE_SEARCH;
787 }
788 #endif
789 
790 #ifdef RUBY_ALLOCA_CHKSTK
791 void
792 ruby_alloca_chkstk(size_t len, void *sp)
793 {
794  if (ruby_stack_length(NULL) * sizeof(VALUE) >= len) {
795  rb_execution_context_t *ec = GET_EC();
796  if (!rb_ec_raised_p(ec, RAISED_STACKOVERFLOW)) {
797  rb_ec_raised_set(ec, RAISED_STACKOVERFLOW);
798  rb_exc_raise(sysstack_error);
799  }
800  }
801 }
802 #endif
803 int
804 rb_reserved_fd_p(int fd)
805 {
806  return 0;
807 }
808 
809 int
810 rb_sigwait_fd_get(rb_thread_t *th)
811 {
812  return -1; /* TODO */
813 }
814 
815 NORETURN(void rb_sigwait_fd_put(rb_thread_t *, int));
816 void
817 rb_sigwait_fd_put(rb_thread_t *th, int fd)
818 {
819  rb_bug("not implemented, should not be called");
820 }
821 
822 NORETURN(void rb_sigwait_sleep(const rb_thread_t *, int, const rb_hrtime_t *));
823 void
824 rb_sigwait_sleep(const rb_thread_t *th, int fd, const rb_hrtime_t *rel)
825 {
826  rb_bug("not implemented, should not be called");
827 }
828 
829 rb_nativethread_id_t
831 {
832  return GetCurrentThread();
833 }
834 
835 static void
836 native_set_thread_name(rb_thread_t *th)
837 {
838 }
839 
840 static VALUE
841 native_thread_native_thread_id(rb_thread_t *th)
842 {
843  DWORD tid = GetThreadId(th->thread_id);
844  if (tid == 0) rb_sys_fail("GetThreadId");
845  return ULONG2NUM(tid);
846 }
847 #define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
848 
849 #if USE_MJIT
850 static unsigned long __stdcall
851 mjit_worker(void *arg)
852 {
853  void (*worker_func)(void) = arg;
854  rb_w32_set_thread_description(GetCurrentThread(), L"ruby-mjitworker");
855  worker_func();
856  return 0;
857 }
858 
859 /* Launch MJIT thread. Returns FALSE if it fails to create thread. */
860 int
861 rb_thread_create_mjit_thread(void (*worker_func)(void))
862 {
863  size_t stack_size = 4 * 1024; /* 4KB is the minimum commit size */
864  HANDLE thread_id = w32_create_thread(stack_size, mjit_worker, worker_func);
865  if (thread_id == 0) {
866  return FALSE;
867  }
868 
869  w32_resume_thread(thread_id);
870  return TRUE;
871 }
872 #endif
873 
874 #endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition: assume.h:30
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition: long.h:60
#define Qfalse
Old name of RUBY_Qfalse.
void ruby_init_stack(volatile VALUE *addr)
Set stack bottom of Ruby implementation.
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition: gc.c:6141
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:675
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:802
void rb_sys_fail(const char *mesg)
Converts a C errno into a Ruby exception, then raises it.
Definition: error.c:3149
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
RBIMPL_ATTR_NORETURN() void rb_eof_error(void)
Utility function to raise rb_eEOFError.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition: largesize.h:198
#define ALLOCA_N(type, n)
Definition: memory.h:286
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition: select.h:43
The data structure which wraps the fd_set bitmap used by select(2).
Definition: largesize.h:74
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40