Ruby  3.1.4p223 (2023-03-30 revision HEAD)
gc.c
1 /**********************************************************************
2 
3  gc.c -
4 
5  $Author$
6  created at: Tue Oct 5 09:44:46 JST 1993
7 
8  Copyright (C) 1993-2007 Yukihiro Matsumoto
9  Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10  Copyright (C) 2000 Information-technology Promotion Agency, Japan
11 
12 **********************************************************************/
13 
14 #define rb_data_object_alloc rb_data_object_alloc
15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc
16 
17 #include "ruby/internal/config.h"
18 #ifdef _WIN32
19 # include "ruby/ruby.h"
20 #endif
21 
22 #include <signal.h>
23 
24 #define sighandler_t ruby_sighandler_t
25 
26 #ifndef _WIN32
27 #include <unistd.h>
28 #include <sys/mman.h>
29 #endif
30 
31 #include <setjmp.h>
32 #include <stdarg.h>
33 #include <stdio.h>
34 
35 /* MALLOC_HEADERS_BEGIN */
36 #ifndef HAVE_MALLOC_USABLE_SIZE
37 # ifdef _WIN32
38 # define HAVE_MALLOC_USABLE_SIZE
39 # define malloc_usable_size(a) _msize(a)
40 # elif defined HAVE_MALLOC_SIZE
41 # define HAVE_MALLOC_USABLE_SIZE
42 # define malloc_usable_size(a) malloc_size(a)
43 # endif
44 #endif
45 
46 #ifdef HAVE_MALLOC_USABLE_SIZE
47 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
48 /* Alternative malloc header is included in ruby/missing.h */
49 # elif defined(HAVE_MALLOC_H)
50 # include <malloc.h>
51 # elif defined(HAVE_MALLOC_NP_H)
52 # include <malloc_np.h>
53 # elif defined(HAVE_MALLOC_MALLOC_H)
54 # include <malloc/malloc.h>
55 # endif
56 #endif
57 
58 #if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
59 /* LIST_HEAD conflicts with sys/queue.h on macOS */
60 # include <sys/user.h>
61 #endif
62 /* MALLOC_HEADERS_END */
63 
64 #ifdef HAVE_SYS_TIME_H
65 # include <sys/time.h>
66 #endif
67 
68 #ifdef HAVE_SYS_RESOURCE_H
69 # include <sys/resource.h>
70 #endif
71 
72 #if defined _WIN32 || defined __CYGWIN__
73 # include <windows.h>
74 #elif defined(HAVE_POSIX_MEMALIGN)
75 #elif defined(HAVE_MEMALIGN)
76 # include <malloc.h>
77 #endif
78 
79 #include <sys/types.h>
80 
81 #ifdef __EMSCRIPTEN__
82 #include <emscripten.h>
83 #endif
84 
85 #undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
86 
87 #include "constant.h"
88 #include "debug_counter.h"
89 #include "eval_intern.h"
90 #include "gc.h"
91 #include "id_table.h"
92 #include "internal.h"
93 #include "internal/class.h"
94 #include "internal/complex.h"
95 #include "internal/cont.h"
96 #include "internal/error.h"
97 #include "internal/eval.h"
98 #include "internal/gc.h"
99 #include "internal/hash.h"
100 #include "internal/imemo.h"
101 #include "internal/io.h"
102 #include "internal/numeric.h"
103 #include "internal/object.h"
104 #include "internal/proc.h"
105 #include "internal/rational.h"
106 #include "internal/sanitizers.h"
107 #include "internal/struct.h"
108 #include "internal/symbol.h"
109 #include "internal/thread.h"
110 #include "internal/variable.h"
111 #include "internal/warnings.h"
112 #include "mjit.h"
113 #include "probes.h"
114 #include "regint.h"
115 #include "ruby/debug.h"
116 #include "ruby/io.h"
117 #include "ruby/re.h"
118 #include "ruby/st.h"
119 #include "ruby/thread.h"
120 #include "ruby/util.h"
121 #include "ruby_assert.h"
122 #include "ruby_atomic.h"
123 #include "symbol.h"
124 #include "transient_heap.h"
125 #include "vm_core.h"
126 #include "vm_sync.h"
127 #include "vm_callinfo.h"
128 #include "ractor_core.h"
129 
130 #include "builtin.h"
131 
132 #define rb_setjmp(env) RUBY_SETJMP(env)
133 #define rb_jmp_buf rb_jmpbuf_t
134 #undef rb_data_object_wrap
135 
136 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
137 #define MAP_ANONYMOUS MAP_ANON
138 #endif
139 
140 static inline struct rbimpl_size_mul_overflow_tag
141 size_add_overflow(size_t x, size_t y)
142 {
143  size_t z;
144  bool p;
145 #if 0
146 
147 #elif __has_builtin(__builtin_add_overflow)
148  p = __builtin_add_overflow(x, y, &z);
149 
150 #elif defined(DSIZE_T)
151  RB_GNUC_EXTENSION DSIZE_T dx = x;
152  RB_GNUC_EXTENSION DSIZE_T dy = y;
153  RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
154  p = dz > SIZE_MAX;
155  z = (size_t)dz;
156 
157 #else
158  z = x + y;
159  p = z < y;
160 
161 #endif
162  return (struct rbimpl_size_mul_overflow_tag) { p, z, };
163 }
164 
165 static inline struct rbimpl_size_mul_overflow_tag
166 size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
167 {
168  struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
169  struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
170  return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
171 }
172 
173 static inline struct rbimpl_size_mul_overflow_tag
174 size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
175 {
176  struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
177  struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
178  struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
179  return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
180 }
181 
182 PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
183 
184 static inline size_t
185 size_mul_or_raise(size_t x, size_t y, VALUE exc)
186 {
187  struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
188  if (LIKELY(!t.left)) {
189  return t.right;
190  }
191  else if (rb_during_gc()) {
192  rb_memerror(); /* or...? */
193  }
194  else {
195  gc_raise(
196  exc,
197  "integer overflow: %"PRIuSIZE
198  " * %"PRIuSIZE
199  " > %"PRIuSIZE,
200  x, y, (size_t)SIZE_MAX);
201  }
202 }
203 
204 size_t
205 rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
206 {
207  return size_mul_or_raise(x, y, exc);
208 }
209 
210 static inline size_t
211 size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
212 {
213  struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
214  if (LIKELY(!t.left)) {
215  return t.right;
216  }
217  else if (rb_during_gc()) {
218  rb_memerror(); /* or...? */
219  }
220  else {
221  gc_raise(
222  exc,
223  "integer overflow: %"PRIuSIZE
224  " * %"PRIuSIZE
225  " + %"PRIuSIZE
226  " > %"PRIuSIZE,
227  x, y, z, (size_t)SIZE_MAX);
228  }
229 }
230 
231 size_t
232 rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
233 {
234  return size_mul_add_or_raise(x, y, z, exc);
235 }
236 
237 static inline size_t
238 size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
239 {
240  struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
241  if (LIKELY(!t.left)) {
242  return t.right;
243  }
244  else if (rb_during_gc()) {
245  rb_memerror(); /* or...? */
246  }
247  else {
248  gc_raise(
249  exc,
250  "integer overflow: %"PRIdSIZE
251  " * %"PRIdSIZE
252  " + %"PRIdSIZE
253  " * %"PRIdSIZE
254  " > %"PRIdSIZE,
255  x, y, z, w, (size_t)SIZE_MAX);
256  }
257 }
258 
259 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
260 /* trick the compiler into thinking a external signal handler uses this */
261 volatile VALUE rb_gc_guarded_val;
262 volatile VALUE *
263 rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
264 {
265  rb_gc_guarded_val = val;
266 
267  return ptr;
268 }
269 #endif
270 
271 #ifndef GC_HEAP_INIT_SLOTS
272 #define GC_HEAP_INIT_SLOTS 10000
273 #endif
274 #ifndef GC_HEAP_FREE_SLOTS
275 #define GC_HEAP_FREE_SLOTS 4096
276 #endif
277 #ifndef GC_HEAP_GROWTH_FACTOR
278 #define GC_HEAP_GROWTH_FACTOR 1.8
279 #endif
280 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
281 #define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
282 #endif
283 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
284 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
285 #endif
286 
287 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
288 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
289 #endif
290 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
291 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
292 #endif
293 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
294 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
295 #endif
296 
297 #ifndef GC_MALLOC_LIMIT_MIN
298 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
299 #endif
300 #ifndef GC_MALLOC_LIMIT_MAX
301 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
302 #endif
303 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
304 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
305 #endif
306 
307 #ifndef GC_OLDMALLOC_LIMIT_MIN
308 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
309 #endif
310 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
311 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
312 #endif
313 #ifndef GC_OLDMALLOC_LIMIT_MAX
314 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
315 #endif
316 
317 #ifndef PRINT_MEASURE_LINE
318 #define PRINT_MEASURE_LINE 0
319 #endif
320 #ifndef PRINT_ENTER_EXIT_TICK
321 #define PRINT_ENTER_EXIT_TICK 0
322 #endif
323 #ifndef PRINT_ROOT_TICKS
324 #define PRINT_ROOT_TICKS 0
325 #endif
326 
327 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
328 #define TICK_TYPE 1
329 
330 typedef struct {
331  size_t heap_init_slots;
332  size_t heap_free_slots;
333  double growth_factor;
334  size_t growth_max_slots;
335 
336  double heap_free_slots_min_ratio;
337  double heap_free_slots_goal_ratio;
338  double heap_free_slots_max_ratio;
339  double oldobject_limit_factor;
340 
341  size_t malloc_limit_min;
342  size_t malloc_limit_max;
343  double malloc_limit_growth_factor;
344 
345  size_t oldmalloc_limit_min;
346  size_t oldmalloc_limit_max;
347  double oldmalloc_limit_growth_factor;
348 
349  VALUE gc_stress;
351 
352 static ruby_gc_params_t gc_params = {
353  GC_HEAP_INIT_SLOTS,
354  GC_HEAP_FREE_SLOTS,
355  GC_HEAP_GROWTH_FACTOR,
356  GC_HEAP_GROWTH_MAX_SLOTS,
357 
358  GC_HEAP_FREE_SLOTS_MIN_RATIO,
359  GC_HEAP_FREE_SLOTS_GOAL_RATIO,
360  GC_HEAP_FREE_SLOTS_MAX_RATIO,
361  GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
362 
363  GC_MALLOC_LIMIT_MIN,
364  GC_MALLOC_LIMIT_MAX,
365  GC_MALLOC_LIMIT_GROWTH_FACTOR,
366 
367  GC_OLDMALLOC_LIMIT_MIN,
368  GC_OLDMALLOC_LIMIT_MAX,
369  GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
370 
371  FALSE,
372 };
373 
374 /* GC_DEBUG:
375  * enable to embed GC debugging information.
376  */
377 #ifndef GC_DEBUG
378 #define GC_DEBUG 0
379 #endif
380 
381 /* RGENGC_DEBUG:
382  * 1: basic information
383  * 2: remember set operation
384  * 3: mark
385  * 4:
386  * 5: sweep
387  */
388 #ifndef RGENGC_DEBUG
389 #ifdef RUBY_DEVEL
390 #define RGENGC_DEBUG -1
391 #else
392 #define RGENGC_DEBUG 0
393 #endif
394 #endif
395 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
396 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
397 #elif defined(HAVE_VA_ARGS_MACRO)
398 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
399 #else
400 # define RGENGC_DEBUG_ENABLED(level) 0
401 #endif
402 int ruby_rgengc_debug;
403 
404 /* RGENGC_CHECK_MODE
405  * 0: disable all assertions
406  * 1: enable assertions (to debug RGenGC)
407  * 2: enable internal consistency check at each GC (for debugging)
408  * 3: enable internal consistency check at each GC steps (for debugging)
409  * 4: enable liveness check
410  * 5: show all references
411  */
412 #ifndef RGENGC_CHECK_MODE
413 #define RGENGC_CHECK_MODE 0
414 #endif
415 
416 // Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
417 #define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
418 
419 /* RGENGC_OLD_NEWOBJ_CHECK
420  * 0: disable all assertions
421  * >0: make a OLD object when new object creation.
422  *
423  * Make one OLD object per RGENGC_OLD_NEWOBJ_CHECK WB protected objects creation.
424  */
425 #ifndef RGENGC_OLD_NEWOBJ_CHECK
426 #define RGENGC_OLD_NEWOBJ_CHECK 0
427 #endif
428 
429 /* RGENGC_PROFILE
430  * 0: disable RGenGC profiling
431  * 1: enable profiling for basic information
432  * 2: enable profiling for each types
433  */
434 #ifndef RGENGC_PROFILE
435 #define RGENGC_PROFILE 0
436 #endif
437 
438 /* RGENGC_ESTIMATE_OLDMALLOC
439  * Enable/disable to estimate increase size of malloc'ed size by old objects.
440  * If estimation exceeds threshold, then will invoke full GC.
441  * 0: disable estimation.
442  * 1: enable estimation.
443  */
444 #ifndef RGENGC_ESTIMATE_OLDMALLOC
445 #define RGENGC_ESTIMATE_OLDMALLOC 1
446 #endif
447 
448 /* RGENGC_FORCE_MAJOR_GC
449  * Force major/full GC if this macro is not 0.
450  */
451 #ifndef RGENGC_FORCE_MAJOR_GC
452 #define RGENGC_FORCE_MAJOR_GC 0
453 #endif
454 
455 #ifndef GC_PROFILE_MORE_DETAIL
456 #define GC_PROFILE_MORE_DETAIL 0
457 #endif
458 #ifndef GC_PROFILE_DETAIL_MEMORY
459 #define GC_PROFILE_DETAIL_MEMORY 0
460 #endif
461 #ifndef GC_ENABLE_INCREMENTAL_MARK
462 #define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
463 #endif
464 #ifndef GC_ENABLE_LAZY_SWEEP
465 #define GC_ENABLE_LAZY_SWEEP 1
466 #endif
467 #ifndef CALC_EXACT_MALLOC_SIZE
468 #define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
469 #endif
470 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
471 #ifndef MALLOC_ALLOCATED_SIZE
472 #define MALLOC_ALLOCATED_SIZE 0
473 #endif
474 #else
475 #define MALLOC_ALLOCATED_SIZE 0
476 #endif
477 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
478 #define MALLOC_ALLOCATED_SIZE_CHECK 0
479 #endif
480 
481 #ifndef GC_DEBUG_STRESS_TO_CLASS
482 #define GC_DEBUG_STRESS_TO_CLASS 0
483 #endif
484 
485 #ifndef RGENGC_OBJ_INFO
486 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
487 #endif
488 
489 typedef enum {
490  GPR_FLAG_NONE = 0x000,
491  /* major reason */
492  GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
493  GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
494  GPR_FLAG_MAJOR_BY_SHADY = 0x004,
495  GPR_FLAG_MAJOR_BY_FORCE = 0x008,
496 #if RGENGC_ESTIMATE_OLDMALLOC
497  GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
498 #endif
499  GPR_FLAG_MAJOR_MASK = 0x0ff,
500 
501  /* gc reason */
502  GPR_FLAG_NEWOBJ = 0x100,
503  GPR_FLAG_MALLOC = 0x200,
504  GPR_FLAG_METHOD = 0x400,
505  GPR_FLAG_CAPI = 0x800,
506  GPR_FLAG_STRESS = 0x1000,
507 
508  /* others */
509  GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
510  GPR_FLAG_HAVE_FINALIZE = 0x4000,
511  GPR_FLAG_IMMEDIATE_MARK = 0x8000,
512  GPR_FLAG_FULL_MARK = 0x10000,
513  GPR_FLAG_COMPACT = 0x20000,
514 
515  GPR_DEFAULT_REASON =
516  (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
517  GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
518 } gc_profile_record_flag;
519 
520 typedef struct gc_profile_record {
521  unsigned int flags;
522 
523  double gc_time;
524  double gc_invoke_time;
525 
526  size_t heap_total_objects;
527  size_t heap_use_size;
528  size_t heap_total_size;
529  size_t moved_objects;
530 
531 #if GC_PROFILE_MORE_DETAIL
532  double gc_mark_time;
533  double gc_sweep_time;
534 
535  size_t heap_use_pages;
536  size_t heap_live_objects;
537  size_t heap_free_objects;
538 
539  size_t allocate_increase;
540  size_t allocate_limit;
541 
542  double prepare_time;
543  size_t removing_objects;
544  size_t empty_objects;
545 #if GC_PROFILE_DETAIL_MEMORY
546  long maxrss;
547  long minflt;
548  long majflt;
549 #endif
550 #endif
551 #if MALLOC_ALLOCATED_SIZE
552  size_t allocated_size;
553 #endif
554 
555 #if RGENGC_PROFILE > 0
556  size_t old_objects;
557  size_t remembered_normal_objects;
558  size_t remembered_shady_objects;
559 #endif
561 
562 #define FL_FROM_FREELIST FL_USER0
563 
564 struct RMoved {
565  VALUE flags;
566  VALUE dummy;
567  VALUE destination;
568 };
569 
570 #define RMOVED(obj) ((struct RMoved *)(obj))
571 
572 typedef struct RVALUE {
573  union {
574  struct {
575  VALUE flags; /* always 0 for freed obj */
576  struct RVALUE *next;
577  } free;
578  struct RMoved moved;
579  struct RBasic basic;
580  struct RObject object;
581  struct RClass klass;
582  struct RFloat flonum;
583  struct RString string;
584  struct RArray array;
585  struct RRegexp regexp;
586  struct RHash hash;
587  struct RData data;
588  struct RTypedData typeddata;
589  struct RStruct rstruct;
590  struct RBignum bignum;
591  struct RFile file;
592  struct RMatch match;
593  struct RRational rational;
594  struct RComplex complex;
595  struct RSymbol symbol;
596  union {
597  rb_cref_t cref;
598  struct vm_svar svar;
599  struct vm_throw_data throw_data;
600  struct vm_ifunc ifunc;
601  struct MEMO memo;
602  struct rb_method_entry_struct ment;
603  const rb_iseq_t iseq;
604  rb_env_t env;
605  struct rb_imemo_tmpbuf_struct alloc;
606  rb_ast_t ast;
607  } imemo;
608  struct {
609  struct RBasic basic;
610  VALUE v1;
611  VALUE v2;
612  VALUE v3;
613  } values;
614  } as;
615 #if GC_DEBUG
616  const char *file;
617  int line;
618 #endif
619 } RVALUE;
620 
621 #if GC_DEBUG
622 STATIC_ASSERT(sizeof_rvalue, offsetof(RVALUE, file) == SIZEOF_VALUE * 5);
623 #else
624 STATIC_ASSERT(sizeof_rvalue, sizeof(RVALUE) == SIZEOF_VALUE * 5);
625 #endif
626 STATIC_ASSERT(alignof_rvalue, RUBY_ALIGNOF(RVALUE) == SIZEOF_VALUE);
627 
628 typedef uintptr_t bits_t;
629 enum {
630  BITS_SIZE = sizeof(bits_t),
631  BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
632 };
633 #define popcount_bits rb_popcount_intptr
634 
636  struct heap_page *page;
637 };
638 
640  struct heap_page_header header;
641  /* char gap[]; */
642  /* RVALUE values[]; */
643 };
644 
645 struct gc_list {
646  VALUE *varptr;
647  struct gc_list *next;
648 };
649 
650 #define STACK_CHUNK_SIZE 500
651 
652 typedef struct stack_chunk {
653  VALUE data[STACK_CHUNK_SIZE];
654  struct stack_chunk *next;
655 } stack_chunk_t;
656 
657 typedef struct mark_stack {
658  stack_chunk_t *chunk;
659  stack_chunk_t *cache;
660  int index;
661  int limit;
662  size_t cache_size;
663  size_t unused_cache_size;
664 } mark_stack_t;
665 
666 #define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
667 #define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
668 
669 typedef struct rb_heap_struct {
670  struct heap_page *free_pages;
671  struct list_head pages;
672  struct heap_page *sweeping_page; /* iterator for .pages */
673  struct heap_page *compact_cursor;
674  RVALUE * compact_cursor_index;
675 #if GC_ENABLE_INCREMENTAL_MARK
676  struct heap_page *pooled_pages;
677 #endif
678  size_t total_pages; /* total page count in a heap */
679  size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
680 } rb_heap_t;
681 
682 typedef struct rb_size_pool_struct {
683  short slot_size;
684 
685  size_t allocatable_pages;
686 
687 #if USE_RVARGC
688  /* Sweeping statistics */
689  size_t freed_slots;
690  size_t empty_slots;
691 
692  /* Global statistics */
693  size_t force_major_gc_count;
694 #endif
695 
696  rb_heap_t eden_heap;
697  rb_heap_t tomb_heap;
699 
700 enum gc_mode {
701  gc_mode_none,
702  gc_mode_marking,
703  gc_mode_sweeping
704 };
705 
706 typedef struct rb_objspace {
707  struct {
708  size_t limit;
709  size_t increase;
710 #if MALLOC_ALLOCATED_SIZE
711  size_t allocated_size;
712  size_t allocations;
713 #endif
714  } malloc_params;
715 
716  struct {
717  unsigned int mode : 2;
718  unsigned int immediate_sweep : 1;
719  unsigned int dont_gc : 1;
720  unsigned int dont_incremental : 1;
721  unsigned int during_gc : 1;
722  unsigned int during_compacting : 1;
723  unsigned int gc_stressful: 1;
724  unsigned int has_hook: 1;
725  unsigned int during_minor_gc : 1;
726 #if GC_ENABLE_INCREMENTAL_MARK
727  unsigned int during_incremental_marking : 1;
728 #endif
729  unsigned int measure_gc : 1;
730  } flags;
731 
732  rb_event_flag_t hook_events;
733  size_t total_allocated_objects;
734  VALUE next_object_id;
735 
736  rb_size_pool_t size_pools[SIZE_POOL_COUNT];
737 
738  struct {
739  rb_atomic_t finalizing;
740  } atomic_flags;
741 
743  size_t marked_slots;
744 
745  struct {
746  struct heap_page **sorted;
747  size_t allocated_pages;
748  size_t allocatable_pages;
749  size_t sorted_length;
750  RVALUE *range[2];
751  size_t freeable_pages;
752 
753  /* final */
754  size_t final_slots;
755  VALUE deferred_final;
756  } heap_pages;
757 
758  st_table *finalizer_table;
759 
760  struct {
761  int run;
762  unsigned int latest_gc_info;
763  gc_profile_record *records;
764  gc_profile_record *current_record;
765  size_t next_index;
766  size_t size;
767 
768 #if GC_PROFILE_MORE_DETAIL
769  double prepare_time;
770 #endif
771  double invoke_time;
772 
773  size_t minor_gc_count;
774  size_t major_gc_count;
775  size_t compact_count;
776  size_t read_barrier_faults;
777 #if RGENGC_PROFILE > 0
778  size_t total_generated_normal_object_count;
779  size_t total_generated_shady_object_count;
780  size_t total_shade_operation_count;
781  size_t total_promoted_count;
782  size_t total_remembered_normal_object_count;
783  size_t total_remembered_shady_object_count;
784 
785 #if RGENGC_PROFILE >= 2
786  size_t generated_normal_object_count_types[RUBY_T_MASK];
787  size_t generated_shady_object_count_types[RUBY_T_MASK];
788  size_t shade_operation_count_types[RUBY_T_MASK];
789  size_t promoted_types[RUBY_T_MASK];
790  size_t remembered_normal_object_count_types[RUBY_T_MASK];
791  size_t remembered_shady_object_count_types[RUBY_T_MASK];
792 #endif
793 #endif /* RGENGC_PROFILE */
794 
795  /* temporary profiling space */
796  double gc_sweep_start_time;
797  size_t total_allocated_objects_at_gc_start;
798  size_t heap_used_at_gc_start;
799 
800  /* basic statistics */
801  size_t count;
802  size_t total_freed_objects;
803  size_t total_allocated_pages;
804  size_t total_freed_pages;
805  uint64_t total_time_ns;
806  struct timespec start_time;
807  } profile;
808  struct gc_list *global_list;
809 
810  VALUE gc_stress_mode;
811 
812  struct {
813  VALUE parent_object;
814  int need_major_gc;
815  size_t last_major_gc;
816  size_t uncollectible_wb_unprotected_objects;
817  size_t uncollectible_wb_unprotected_objects_limit;
818  size_t old_objects;
819  size_t old_objects_limit;
820 
821 #if RGENGC_ESTIMATE_OLDMALLOC
822  size_t oldmalloc_increase;
823  size_t oldmalloc_increase_limit;
824 #endif
825 
826 #if RGENGC_CHECK_MODE >= 2
827  struct st_table *allrefs_table;
828  size_t error_count;
829 #endif
830  } rgengc;
831 
832  struct {
833  size_t considered_count_table[T_MASK];
834  size_t moved_count_table[T_MASK];
835  size_t total_moved;
836  } rcompactor;
837 
838 #if GC_ENABLE_INCREMENTAL_MARK
839  struct {
840  size_t pooled_slots;
841  size_t step_slots;
842  } rincgc;
843 #endif
844 
845  st_table *id_to_obj_tbl;
846  st_table *obj_to_id_tbl;
847 
848 #if GC_DEBUG_STRESS_TO_CLASS
849  VALUE stress_to_class;
850 #endif
851 } rb_objspace_t;
852 
853 
854 /* default tiny heap size: 16KB */
855 #define HEAP_PAGE_ALIGN_LOG 14
856 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
857 enum {
858  HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
859  HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
860  HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
861  HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)),
862  HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, sizeof(struct RVALUE)), BITS_BITLENGTH),
863  HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
864 };
865 #define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
866 #define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
867 
868 #ifdef HAVE_MMAP
869 # if HAVE_CONST_PAGE_SIZE
870 /* If we have the HEAP_PAGE and it is a constant, then we can directly use it. */
871 static const bool USE_MMAP_ALIGNED_ALLOC = (PAGE_SIZE <= HEAP_PAGE_SIZE);
872 # elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
873 /* PAGE_SIZE <= HEAP_PAGE_SIZE */
874 static const bool USE_MMAP_ALIGNED_ALLOC = true;
875 # else
876 /* Otherwise, fall back to determining if we can use mmap during runtime. */
877 # define USE_MMAP_ALIGNED_ALLOC (use_mmap_aligned_alloc != false)
878 
879 static bool use_mmap_aligned_alloc;
880 # endif
881 #elif !defined(__MINGW32__) && !defined(_WIN32)
882 static const bool USE_MMAP_ALIGNED_ALLOC = false;
883 #endif
884 
885 struct heap_page {
886  short slot_size;
887  short total_slots;
888  short free_slots;
889  short pinned_slots;
890  short final_slots;
891  struct {
892  unsigned int before_sweep : 1;
893  unsigned int has_remembered_objects : 1;
894  unsigned int has_uncollectible_shady_objects : 1;
895  unsigned int in_tomb : 1;
896  } flags;
897 
898  rb_size_pool_t *size_pool;
899 
900  struct heap_page *free_next;
901  RVALUE *start;
902  RVALUE *freelist;
903  struct list_node page_node;
904 
905  bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
906  /* the following three bitmaps are cleared at the beginning of full GC */
907  bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
908  bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
909  bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
910 
911  /* If set, the object is not movable */
912  bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
913 };
914 
915 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
916 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
917 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
918 
919 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
920 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
921 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
922 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
923 
924 /* Bitmap Operations */
925 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
926 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
927 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
928 
929 /* getting bitmap */
930 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
931 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
932 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
933 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
934 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
935 
936 /* Aliases */
937 #define rb_objspace (*rb_objspace_of(GET_VM()))
938 #define rb_objspace_of(vm) ((vm)->objspace)
939 
940 #define ruby_initial_gc_stress gc_params.gc_stress
941 
942 VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
943 
944 #define malloc_limit objspace->malloc_params.limit
945 #define malloc_increase objspace->malloc_params.increase
946 #define malloc_allocated_size objspace->malloc_params.allocated_size
947 #define heap_pages_sorted objspace->heap_pages.sorted
948 #define heap_allocated_pages objspace->heap_pages.allocated_pages
949 #define heap_pages_sorted_length objspace->heap_pages.sorted_length
950 #define heap_pages_lomem objspace->heap_pages.range[0]
951 #define heap_pages_himem objspace->heap_pages.range[1]
952 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
953 #define heap_pages_final_slots objspace->heap_pages.final_slots
954 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
955 #define size_pools objspace->size_pools
956 #define during_gc objspace->flags.during_gc
957 #define finalizing objspace->atomic_flags.finalizing
958 #define finalizer_table objspace->finalizer_table
959 #define global_list objspace->global_list
960 #define ruby_gc_stressful objspace->flags.gc_stressful
961 #define ruby_gc_stress_mode objspace->gc_stress_mode
962 #if GC_DEBUG_STRESS_TO_CLASS
963 #define stress_to_class objspace->stress_to_class
964 #else
965 #define stress_to_class 0
966 #endif
967 
968 #if 0
969 #define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
970 #define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
971 #define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
972 #define dont_gc_val() (objspace->flags.dont_gc)
973 #else
974 #define dont_gc_on() (objspace->flags.dont_gc = 1)
975 #define dont_gc_off() (objspace->flags.dont_gc = 0)
976 #define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
977 #define dont_gc_val() (objspace->flags.dont_gc)
978 #endif
979 
980 static inline enum gc_mode
981 gc_mode_verify(enum gc_mode mode)
982 {
983 #if RGENGC_CHECK_MODE > 0
984  switch (mode) {
985  case gc_mode_none:
986  case gc_mode_marking:
987  case gc_mode_sweeping:
988  break;
989  default:
990  rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
991  }
992 #endif
993  return mode;
994 }
995 
996 static inline bool
997 has_sweeping_pages(rb_objspace_t *objspace)
998 {
999  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1000  if (SIZE_POOL_EDEN_HEAP(&size_pools[i])->sweeping_page) {
1001  return TRUE;
1002  }
1003  }
1004  return FALSE;
1005 }
1006 
1007 static inline size_t
1008 heap_eden_total_pages(rb_objspace_t *objspace)
1009 {
1010  size_t count = 0;
1011  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1012  count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_pages;
1013  }
1014  return count;
1015 }
1016 
1017 static inline size_t
1018 heap_eden_total_slots(rb_objspace_t *objspace)
1019 {
1020  size_t count = 0;
1021  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1022  count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_slots;
1023  }
1024  return count;
1025 }
1026 
1027 static inline size_t
1028 heap_tomb_total_pages(rb_objspace_t *objspace)
1029 {
1030  size_t count = 0;
1031  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1032  count += SIZE_POOL_TOMB_HEAP(&size_pools[i])->total_pages;
1033  }
1034  return count;
1035 }
1036 
1037 static inline size_t
1038 heap_allocatable_pages(rb_objspace_t *objspace)
1039 {
1040  size_t count = 0;
1041  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1042  count += size_pools[i].allocatable_pages;
1043  }
1044  return count;
1045 }
1046 
1047 static inline size_t
1048 heap_allocatable_slots(rb_objspace_t *objspace)
1049 {
1050  size_t count = 0;
1051  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1052  rb_size_pool_t *size_pool = &size_pools[i];
1053  int slot_size_multiple = size_pool->slot_size / sizeof(RVALUE);
1054  count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
1055  }
1056  return count;
1057 }
1058 
1059 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1060 #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
1061 
1062 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1063 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1064 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1065 #if GC_ENABLE_INCREMENTAL_MARK
1066 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1067 #else
1068 #define is_incremental_marking(objspace) FALSE
1069 #endif
1070 #if GC_ENABLE_INCREMENTAL_MARK
1071 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1072 #else
1073 #define will_be_incremental_marking(objspace) FALSE
1074 #endif
1075 #define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1076 
1077 #if SIZEOF_LONG == SIZEOF_VOIDP
1078 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
1079 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
1080 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1081 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
1082 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1083  ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1084 #else
1085 # error not supported
1086 #endif
1087 
1088 #define RANY(o) ((RVALUE*)(o))
1089 
1090 struct RZombie {
1091  struct RBasic basic;
1092  VALUE next;
1093  void (*dfree)(void *);
1094  void *data;
1095 };
1096 
1097 #define RZOMBIE(o) ((struct RZombie *)(o))
1098 
1099 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1100 
1101 #if RUBY_MARK_FREE_DEBUG
1102 int ruby_gc_debug_indent = 0;
1103 #endif
1105 int ruby_disable_gc = 0;
1106 int ruby_enable_autocompact = 0;
1107 
1108 void rb_iseq_mark(const rb_iseq_t *iseq);
1109 void rb_iseq_update_references(rb_iseq_t *iseq);
1110 void rb_iseq_free(const rb_iseq_t *iseq);
1111 size_t rb_iseq_memsize(const rb_iseq_t *iseq);
1112 void rb_vm_update_references(void *ptr);
1113 
1114 void rb_gcdebug_print_obj_condition(VALUE obj);
1115 
1116 static VALUE define_final0(VALUE obj, VALUE block);
1117 
1118 NORETURN(static void *gc_vraise(void *ptr));
1119 NORETURN(static void gc_raise(VALUE exc, const char *fmt, ...));
1120 NORETURN(static void negative_size_allocation_error(const char *));
1121 
1122 static void init_mark_stack(mark_stack_t *stack);
1123 
1124 static int ready_to_gc(rb_objspace_t *objspace);
1125 
1126 static int garbage_collect(rb_objspace_t *, unsigned int reason);
1127 
1128 static int gc_start(rb_objspace_t *objspace, unsigned int reason);
1129 static void gc_rest(rb_objspace_t *objspace);
1130 
1131 enum gc_enter_event {
1132  gc_enter_event_start,
1133  gc_enter_event_mark_continue,
1134  gc_enter_event_sweep_continue,
1135  gc_enter_event_rest,
1136  gc_enter_event_finalizer,
1137  gc_enter_event_rb_memerror,
1138 };
1139 
1140 static inline void gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1141 static inline void gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1142 
1143 static void gc_marks(rb_objspace_t *objspace, int full_mark);
1144 static void gc_marks_start(rb_objspace_t *objspace, int full);
1145 static int gc_marks_finish(rb_objspace_t *objspace);
1146 static void gc_marks_rest(rb_objspace_t *objspace);
1147 static void gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1148 
1149 static void gc_sweep(rb_objspace_t *objspace);
1150 static void gc_sweep_start(rb_objspace_t *objspace);
1151 static void gc_sweep_finish(rb_objspace_t *objspace);
1152 static int gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1153 static void gc_sweep_rest(rb_objspace_t *objspace);
1154 static void gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1155 
1156 static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1157 static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1158 static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1159 static void gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr);
1160 NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr));
1161 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
1162 
1163 static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1164 static int gc_mark_stacked_objects_all(rb_objspace_t *);
1165 static void gc_grey(rb_objspace_t *objspace, VALUE ptr);
1166 
1167 static inline int gc_mark_set(rb_objspace_t *objspace, VALUE obj);
1168 NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr));
1169 
1170 static void push_mark_stack(mark_stack_t *, VALUE);
1171 static int pop_mark_stack(mark_stack_t *, VALUE *);
1172 static size_t mark_stack_size(mark_stack_t *stack);
1173 static void shrink_stack_chunk_cache(mark_stack_t *stack);
1174 
1175 static size_t obj_memsize_of(VALUE obj, int use_all_types);
1176 static void gc_verify_internal_consistency(rb_objspace_t *objspace);
1177 static int gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj);
1178 static int gc_verify_heap_pages(rb_objspace_t *objspace);
1179 
1180 static void gc_stress_set(rb_objspace_t *objspace, VALUE flag);
1181 static VALUE gc_disable_no_rest(rb_objspace_t *);
1182 
1183 static double getrusage_time(void);
1184 static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason);
1185 static inline void gc_prof_timer_start(rb_objspace_t *);
1186 static inline void gc_prof_timer_stop(rb_objspace_t *);
1187 static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1188 static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1189 static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1190 static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1191 static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1192 static inline void gc_prof_set_heap_info(rb_objspace_t *);
1193 
1194 #define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1195  if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1196  *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1197  } \
1198 } while (0)
1199 
1200 #define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1201 
1202 #define gc_prof_record(objspace) (objspace)->profile.current_record
1203 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1204 
1205 #ifdef HAVE_VA_ARGS_MACRO
1206 # define gc_report(level, objspace, ...) \
1207  if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1208 #else
1209 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1210 #endif
1211 PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1212 static const char *obj_info(VALUE obj);
1213 static const char *obj_type_name(VALUE obj);
1214 
1215 /*
1216  * 1 - TSC (H/W Time Stamp Counter)
1217  * 2 - getrusage
1218  */
1219 #ifndef TICK_TYPE
1220 #define TICK_TYPE 1
1221 #endif
1222 
1223 #if USE_TICK_T
1224 
1225 #if TICK_TYPE == 1
1226 /* the following code is only for internal tuning. */
1227 
1228 /* Source code to use RDTSC is quoted and modified from
1229  * http://www.mcs.anl.gov/~kazutomo/rdtsc.html
1230  * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1231  */
1232 
1233 #if defined(__GNUC__) && defined(__i386__)
1234 typedef unsigned long long tick_t;
1235 #define PRItick "llu"
1236 static inline tick_t
1237 tick(void)
1238 {
1239  unsigned long long int x;
1240  __asm__ __volatile__ ("rdtsc" : "=A" (x));
1241  return x;
1242 }
1243 
1244 #elif defined(__GNUC__) && defined(__x86_64__)
1245 typedef unsigned long long tick_t;
1246 #define PRItick "llu"
1247 
1248 static __inline__ tick_t
1249 tick(void)
1250 {
1251  unsigned long hi, lo;
1252  __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1253  return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1254 }
1255 
1256 #elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1257 typedef unsigned long long tick_t;
1258 #define PRItick "llu"
1259 
1260 static __inline__ tick_t
1261 tick(void)
1262 {
1263  unsigned long long val = __builtin_ppc_get_timebase();
1264  return val;
1265 }
1266 
1267 #elif defined(__aarch64__) && defined(__GNUC__)
1268 typedef unsigned long tick_t;
1269 #define PRItick "lu"
1270 
1271 static __inline__ tick_t
1272 tick(void)
1273 {
1274  unsigned long val;
1275  __asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (val));
1276  return val;
1277 }
1278 
1279 
1280 #elif defined(_WIN32) && defined(_MSC_VER)
1281 #include <intrin.h>
1282 typedef unsigned __int64 tick_t;
1283 #define PRItick "llu"
1284 
1285 static inline tick_t
1286 tick(void)
1287 {
1288  return __rdtsc();
1289 }
1290 
1291 #else /* use clock */
1292 typedef clock_t tick_t;
1293 #define PRItick "llu"
1294 
1295 static inline tick_t
1296 tick(void)
1297 {
1298  return clock();
1299 }
1300 #endif /* TSC */
1301 
1302 #elif TICK_TYPE == 2
1303 typedef double tick_t;
1304 #define PRItick "4.9f"
1305 
1306 static inline tick_t
1307 tick(void)
1308 {
1309  return getrusage_time();
1310 }
1311 #else /* TICK_TYPE */
1312 #error "choose tick type"
1313 #endif /* TICK_TYPE */
1314 
1315 #define MEASURE_LINE(expr) do { \
1316  volatile tick_t start_time = tick(); \
1317  volatile tick_t end_time; \
1318  expr; \
1319  end_time = tick(); \
1320  fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1321 } while (0)
1322 
1323 #else /* USE_TICK_T */
1324 #define MEASURE_LINE(expr) expr
1325 #endif /* USE_TICK_T */
1326 
1327 static inline void *
1328 asan_unpoison_object_temporary(VALUE obj)
1329 {
1330  void *ptr = asan_poisoned_object_p(obj);
1331  asan_unpoison_object(obj, false);
1332  return ptr;
1333 }
1334 
1335 #define FL_CHECK2(name, x, pred) \
1336  ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1337  (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1338 #define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1339 #define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1340 #define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1341 
1342 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1343 #define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1344 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1345 
1346 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1347 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1348 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1349 
1350 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1351 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1352 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1353 
1354 #define RVALUE_OLD_AGE 3
1355 #define RVALUE_AGE_SHIFT 5 /* FL_PROMOTED0 bit */
1356 
1357 static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
1358 static int rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj);
1359 static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1360 static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1361 static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1362 
1363 static inline int
1364 RVALUE_FLAGS_AGE(VALUE flags)
1365 {
1366  return (int)((flags & (FL_PROMOTED0 | FL_PROMOTED1)) >> RVALUE_AGE_SHIFT);
1367 }
1368 
1369 static int
1370 check_rvalue_consistency_force(const VALUE obj, int terminate)
1371 {
1372  int err = 0;
1373  rb_objspace_t *objspace = &rb_objspace;
1374 
1375  RB_VM_LOCK_ENTER_NO_BARRIER();
1376  {
1377  if (SPECIAL_CONST_P(obj)) {
1378  fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1379  err++;
1380  }
1381  else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1382  /* check if it is in tomb_pages */
1383  struct heap_page *page = NULL;
1384  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1385  rb_size_pool_t *size_pool = &size_pools[i];
1386  list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
1387  if (&page->start[0] <= (RVALUE *)obj &&
1388  (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * size_pool->slot_size))) {
1389  fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1390  (void *)obj, (void *)page);
1391  err++;
1392  goto skip;
1393  }
1394  }
1395  }
1396  bp();
1397  fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1398  err++;
1399  skip:
1400  ;
1401  }
1402  else {
1403  const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1404  const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1405  const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1406  const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1407  const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1408 
1409  if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1410  fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1411  err++;
1412  }
1413  if (BUILTIN_TYPE(obj) == T_NONE) {
1414  fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1415  err++;
1416  }
1417  if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1418  fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1419  err++;
1420  }
1421 
1422  obj_memsize_of((VALUE)obj, FALSE);
1423 
1424  /* check generation
1425  *
1426  * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1427  */
1428  if (age > 0 && wb_unprotected_bit) {
1429  fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1430  err++;
1431  }
1432 
1433  if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1434  fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1435  err++;
1436  }
1437 
1438  if (!is_full_marking(objspace)) {
1439  if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1440  fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1441  obj_info(obj), age);
1442  err++;
1443  }
1444  if (remembered_bit && age != RVALUE_OLD_AGE) {
1445  fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1446  obj_info(obj), age);
1447  err++;
1448  }
1449  }
1450 
1451  /*
1452  * check coloring
1453  *
1454  * marking:false marking:true
1455  * marked:false white *invalid*
1456  * marked:true black grey
1457  */
1458  if (is_incremental_marking(objspace) && marking_bit) {
1459  if (!is_marking(objspace) && !mark_bit) {
1460  fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1461  err++;
1462  }
1463  }
1464  }
1465  }
1466  RB_VM_LOCK_LEAVE_NO_BARRIER();
1467 
1468  if (err > 0 && terminate) {
1469  rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1470  }
1471  return err;
1472 }
1473 
1474 #if RGENGC_CHECK_MODE == 0
1475 static inline VALUE
1476 check_rvalue_consistency(const VALUE obj)
1477 {
1478  return obj;
1479 }
1480 #else
1481 static VALUE
1482 check_rvalue_consistency(const VALUE obj)
1483 {
1484  check_rvalue_consistency_force(obj, TRUE);
1485  return obj;
1486 }
1487 #endif
1488 
1489 static inline int
1490 gc_object_moved_p(rb_objspace_t * objspace, VALUE obj)
1491 {
1492  if (RB_SPECIAL_CONST_P(obj)) {
1493  return FALSE;
1494  }
1495  else {
1496  void *poisoned = asan_poisoned_object_p(obj);
1497  asan_unpoison_object(obj, false);
1498 
1499  int ret = BUILTIN_TYPE(obj) == T_MOVED;
1500  /* Re-poison slot if it's not the one we want */
1501  if (poisoned) {
1502  GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
1503  asan_poison_object(obj);
1504  }
1505  return ret;
1506  }
1507 }
1508 
1509 static inline int
1510 RVALUE_MARKED(VALUE obj)
1511 {
1512  check_rvalue_consistency(obj);
1513  return RVALUE_MARK_BITMAP(obj) != 0;
1514 }
1515 
1516 static inline int
1517 RVALUE_PINNED(VALUE obj)
1518 {
1519  check_rvalue_consistency(obj);
1520  return RVALUE_PIN_BITMAP(obj) != 0;
1521 }
1522 
1523 static inline int
1524 RVALUE_WB_UNPROTECTED(VALUE obj)
1525 {
1526  check_rvalue_consistency(obj);
1527  return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1528 }
1529 
1530 static inline int
1531 RVALUE_MARKING(VALUE obj)
1532 {
1533  check_rvalue_consistency(obj);
1534  return RVALUE_MARKING_BITMAP(obj) != 0;
1535 }
1536 
1537 static inline int
1538 RVALUE_REMEMBERED(VALUE obj)
1539 {
1540  check_rvalue_consistency(obj);
1541  return RVALUE_MARKING_BITMAP(obj) != 0;
1542 }
1543 
1544 static inline int
1545 RVALUE_UNCOLLECTIBLE(VALUE obj)
1546 {
1547  check_rvalue_consistency(obj);
1548  return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1549 }
1550 
1551 static inline int
1552 RVALUE_OLD_P_RAW(VALUE obj)
1553 {
1554  const VALUE promoted = FL_PROMOTED0 | FL_PROMOTED1;
1555  return (RBASIC(obj)->flags & promoted) == promoted;
1556 }
1557 
1558 static inline int
1559 RVALUE_OLD_P(VALUE obj)
1560 {
1561  check_rvalue_consistency(obj);
1562  return RVALUE_OLD_P_RAW(obj);
1563 }
1564 
1565 #if RGENGC_CHECK_MODE || GC_DEBUG
1566 static inline int
1567 RVALUE_AGE(VALUE obj)
1568 {
1569  check_rvalue_consistency(obj);
1570  return RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1571 }
1572 #endif
1573 
1574 static inline void
1575 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1576 {
1577  MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1578  objspace->rgengc.old_objects++;
1579  rb_transient_heap_promote(obj);
1580 
1581 #if RGENGC_PROFILE >= 2
1582  objspace->profile.total_promoted_count++;
1583  objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1584 #endif
1585 }
1586 
1587 static inline void
1588 RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1589 {
1590  RB_DEBUG_COUNTER_INC(obj_promote);
1591  RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1592 }
1593 
1594 static inline VALUE
1595 RVALUE_FLAGS_AGE_SET(VALUE flags, int age)
1596 {
1597  flags &= ~(FL_PROMOTED0 | FL_PROMOTED1);
1598  flags |= (age << RVALUE_AGE_SHIFT);
1599  return flags;
1600 }
1601 
1602 /* set age to age+1 */
1603 static inline void
1604 RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1605 {
1606  VALUE flags = RBASIC(obj)->flags;
1607  int age = RVALUE_FLAGS_AGE(flags);
1608 
1609  if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1610  rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1611  }
1612 
1613  age++;
1614  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1615 
1616  if (age == RVALUE_OLD_AGE) {
1617  RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1618  }
1619  check_rvalue_consistency(obj);
1620 }
1621 
1622 /* set age to RVALUE_OLD_AGE */
1623 static inline void
1624 RVALUE_AGE_SET_OLD(rb_objspace_t *objspace, VALUE obj)
1625 {
1626  check_rvalue_consistency(obj);
1627  GC_ASSERT(!RVALUE_OLD_P(obj));
1628 
1629  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE);
1630  RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1631 
1632  check_rvalue_consistency(obj);
1633 }
1634 
1635 /* set age to RVALUE_OLD_AGE - 1 */
1636 static inline void
1637 RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1638 {
1639  check_rvalue_consistency(obj);
1640  GC_ASSERT(!RVALUE_OLD_P(obj));
1641 
1642  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1643 
1644  check_rvalue_consistency(obj);
1645 }
1646 
1647 static inline void
1648 RVALUE_DEMOTE_RAW(rb_objspace_t *objspace, VALUE obj)
1649 {
1650  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1651  CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1652 }
1653 
1654 static inline void
1655 RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1656 {
1657  check_rvalue_consistency(obj);
1658  GC_ASSERT(RVALUE_OLD_P(obj));
1659 
1660  if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1661  CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
1662  }
1663 
1664  RVALUE_DEMOTE_RAW(objspace, obj);
1665 
1666  if (RVALUE_MARKED(obj)) {
1667  objspace->rgengc.old_objects--;
1668  }
1669 
1670  check_rvalue_consistency(obj);
1671 }
1672 
1673 static inline void
1674 RVALUE_AGE_RESET_RAW(VALUE obj)
1675 {
1676  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1677 }
1678 
1679 static inline void
1680 RVALUE_AGE_RESET(VALUE obj)
1681 {
1682  check_rvalue_consistency(obj);
1683  GC_ASSERT(!RVALUE_OLD_P(obj));
1684 
1685  RVALUE_AGE_RESET_RAW(obj);
1686  check_rvalue_consistency(obj);
1687 }
1688 
1689 static inline int
1690 RVALUE_BLACK_P(VALUE obj)
1691 {
1692  return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1693 }
1694 
1695 #if 0
1696 static inline int
1697 RVALUE_GREY_P(VALUE obj)
1698 {
1699  return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1700 }
1701 #endif
1702 
1703 static inline int
1704 RVALUE_WHITE_P(VALUE obj)
1705 {
1706  return RVALUE_MARKED(obj) == FALSE;
1707 }
1708 
1709 /*
1710  --------------------------- ObjectSpace -----------------------------
1711 */
1712 
1713 static inline void *
1714 calloc1(size_t n)
1715 {
1716  return calloc(1, n);
1717 }
1718 
1719 rb_objspace_t *
1720 rb_objspace_alloc(void)
1721 {
1722  rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
1723  objspace->flags.measure_gc = 1;
1724  malloc_limit = gc_params.malloc_limit_min;
1725 
1726  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1727  rb_size_pool_t *size_pool = &size_pools[i];
1728 
1729  size_pool->slot_size = sizeof(RVALUE) * (1 << i);
1730 
1731  list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
1732  list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
1733  }
1734 
1735  dont_gc_on();
1736 
1737  return objspace;
1738 }
1739 
1740 static void free_stack_chunks(mark_stack_t *);
1741 static void mark_stack_free_cache(mark_stack_t *);
1742 static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1743 
1744 void
1745 rb_objspace_free(rb_objspace_t *objspace)
1746 {
1747  if (is_lazy_sweeping(objspace))
1748  rb_bug("lazy sweeping underway when freeing object space");
1749 
1750  if (objspace->profile.records) {
1751  free(objspace->profile.records);
1752  objspace->profile.records = 0;
1753  }
1754 
1755  if (global_list) {
1756  struct gc_list *list, *next;
1757  for (list = global_list; list; list = next) {
1758  next = list->next;
1759  xfree(list);
1760  }
1761  }
1762  if (heap_pages_sorted) {
1763  size_t i;
1764  for (i = 0; i < heap_allocated_pages; ++i) {
1765  heap_page_free(objspace, heap_pages_sorted[i]);
1766  }
1767  free(heap_pages_sorted);
1768  heap_allocated_pages = 0;
1769  heap_pages_sorted_length = 0;
1770  heap_pages_lomem = 0;
1771  heap_pages_himem = 0;
1772 
1773  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1774  rb_size_pool_t *size_pool = &size_pools[i];
1775  SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
1776  SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
1777  }
1778  }
1779  st_free_table(objspace->id_to_obj_tbl);
1780  st_free_table(objspace->obj_to_id_tbl);
1781 
1782  free_stack_chunks(&objspace->mark_stack);
1783  mark_stack_free_cache(&objspace->mark_stack);
1784 
1785  free(objspace);
1786 }
1787 
1788 static void
1789 heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
1790 {
1791  struct heap_page **sorted;
1792  size_t size = size_mul_or_raise(next_length, sizeof(struct heap_page *), rb_eRuntimeError);
1793 
1794  gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %"PRIdSIZE", size: %"PRIdSIZE"\n",
1795  next_length, size);
1796 
1797  if (heap_pages_sorted_length > 0) {
1798  sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
1799  if (sorted) heap_pages_sorted = sorted;
1800  }
1801  else {
1802  sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
1803  }
1804 
1805  if (sorted == 0) {
1806  rb_memerror();
1807  }
1808 
1809  heap_pages_sorted_length = next_length;
1810 }
1811 
1812 static void
1813 heap_pages_expand_sorted(rb_objspace_t *objspace)
1814 {
1815  /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
1816  * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
1817  * however, if there are pages which do not have empty slots, then try to create new pages
1818  * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
1819  */
1820  size_t next_length = heap_allocatable_pages(objspace);
1821  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1822  rb_size_pool_t *size_pool = &size_pools[i];
1823  next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
1824  next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
1825  }
1826 
1827  if (next_length > heap_pages_sorted_length) {
1828  heap_pages_expand_sorted_to(objspace, next_length);
1829  }
1830 
1831  GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
1832  GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
1833 }
1834 
1835 static void
1836 size_pool_allocatable_pages_set(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t s)
1837 {
1838  size_pool->allocatable_pages = s;
1839  heap_pages_expand_sorted(objspace);
1840 }
1841 
1842 static inline void
1843 heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1844 {
1845  ASSERT_vm_locking();
1846 
1847  RVALUE *p = (RVALUE *)obj;
1848 
1849  asan_unpoison_object(obj, false);
1850 
1851  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1852 
1853  p->as.free.flags = 0;
1854  p->as.free.next = page->freelist;
1855  page->freelist = p;
1856  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1857 
1858  if (RGENGC_CHECK_MODE &&
1859  /* obj should belong to page */
1860  !(&page->start[0] <= (RVALUE *)obj &&
1861  (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
1862  obj % sizeof(RVALUE) == 0)) {
1863  rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
1864  }
1865 
1866  asan_poison_object(obj);
1867  gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
1868 }
1869 
1870 static inline void
1871 heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
1872 {
1873  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1874  GC_ASSERT(page->free_slots != 0);
1875  GC_ASSERT(page->freelist != NULL);
1876 
1877  page->free_next = heap->free_pages;
1878  heap->free_pages = page;
1879 
1880  RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page, (void *)page->freelist);
1881 
1882  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1883 }
1884 
1885 #if GC_ENABLE_INCREMENTAL_MARK
1886 static inline void
1887 heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1888 {
1889  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1890  GC_ASSERT(page->free_slots != 0);
1891  GC_ASSERT(page->freelist != NULL);
1892 
1893  page->free_next = heap->pooled_pages;
1894  heap->pooled_pages = page;
1895  objspace->rincgc.pooled_slots += page->free_slots;
1896 
1897  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1898 }
1899 #endif
1900 
1901 static void
1902 heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1903 {
1904  list_del(&page->page_node);
1905  heap->total_pages--;
1906  heap->total_slots -= page->total_slots;
1907 }
1908 
1909 static void rb_aligned_free(void *ptr, size_t size);
1910 
1911 static void
1912 heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
1913 {
1914  heap_allocated_pages--;
1915  objspace->profile.total_freed_pages++;
1916  rb_aligned_free(GET_PAGE_BODY(page->start), HEAP_PAGE_SIZE);
1917  free(page);
1918 }
1919 
1920 static void
1921 heap_pages_free_unused_pages(rb_objspace_t *objspace)
1922 {
1923  size_t i, j;
1924 
1925  bool has_pages_in_tomb_heap = FALSE;
1926  for (i = 0; i < SIZE_POOL_COUNT; i++) {
1927  if (!list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
1928  has_pages_in_tomb_heap = TRUE;
1929  break;
1930  }
1931  }
1932 
1933  if (has_pages_in_tomb_heap) {
1934  for (i = j = 1; j < heap_allocated_pages; i++) {
1935  struct heap_page *page = heap_pages_sorted[i];
1936 
1937  if (page->flags.in_tomb && page->free_slots == page->total_slots) {
1938  heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
1939  heap_page_free(objspace, page);
1940  }
1941  else {
1942  if (i != j) {
1943  heap_pages_sorted[j] = page;
1944  }
1945  j++;
1946  }
1947  }
1948 
1949  struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
1950  uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size);
1951  GC_ASSERT(himem <= (uintptr_t)heap_pages_himem);
1952  heap_pages_himem = (RVALUE *)himem;
1953 
1954  GC_ASSERT(j == heap_allocated_pages);
1955  }
1956 }
1957 
1958 static struct heap_page *
1959 heap_page_allocate(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
1960 {
1961  uintptr_t start, end, p;
1962  struct heap_page *page;
1963  struct heap_page_body *page_body = 0;
1964  uintptr_t hi, lo, mid;
1965  size_t stride = size_pool->slot_size;
1966  unsigned int limit = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)))/(int)stride;
1967 
1968  /* assign heap_page body (contains heap_page_header and RVALUEs) */
1969  page_body = (struct heap_page_body *)rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
1970  if (page_body == 0) {
1971  rb_memerror();
1972  }
1973 
1974  /* assign heap_page entry */
1975  page = calloc1(sizeof(struct heap_page));
1976  if (page == 0) {
1977  rb_aligned_free(page_body, HEAP_PAGE_SIZE);
1978  rb_memerror();
1979  }
1980 
1981  /* adjust obj_limit (object number available in this page) */
1982  start = (uintptr_t)((VALUE)page_body + sizeof(struct heap_page_header));
1983 
1984  if ((VALUE)start % sizeof(RVALUE) != 0) {
1985  int delta = (int)sizeof(RVALUE) - (start % (int)sizeof(RVALUE));
1986  start = start + delta;
1987  GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
1988 
1989  /* Find a num in page that is evenly divisible by `stride`.
1990  * This is to ensure that objects are aligned with bit planes.
1991  * In other words, ensure there are an even number of objects
1992  * per bit plane. */
1993  if (NUM_IN_PAGE(start) == 1) {
1994  start += stride - sizeof(RVALUE);
1995  }
1996 
1997  GC_ASSERT(NUM_IN_PAGE(start) * sizeof(RVALUE) % stride == 0);
1998 
1999  limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(int)stride;
2000  }
2001  end = start + (limit * (int)stride);
2002 
2003  /* setup heap_pages_sorted */
2004  lo = 0;
2005  hi = (uintptr_t)heap_allocated_pages;
2006  while (lo < hi) {
2007  struct heap_page *mid_page;
2008 
2009  mid = (lo + hi) / 2;
2010  mid_page = heap_pages_sorted[mid];
2011  if ((uintptr_t)mid_page->start < start) {
2012  lo = mid + 1;
2013  }
2014  else if ((uintptr_t)mid_page->start > start) {
2015  hi = mid;
2016  }
2017  else {
2018  rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
2019  }
2020  }
2021 
2022  if (hi < (uintptr_t)heap_allocated_pages) {
2023  MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_allocated_pages - hi);
2024  }
2025 
2026  heap_pages_sorted[hi] = page;
2027 
2028  heap_allocated_pages++;
2029 
2030  GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
2031  GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
2032  GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2033 
2034  objspace->profile.total_allocated_pages++;
2035 
2036  if (heap_allocated_pages > heap_pages_sorted_length) {
2037  rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")",
2038  heap_allocated_pages, heap_pages_sorted_length);
2039  }
2040 
2041  if (heap_pages_lomem == 0 || (uintptr_t)heap_pages_lomem > start) heap_pages_lomem = (RVALUE *)start;
2042  if ((uintptr_t)heap_pages_himem < end) heap_pages_himem = (RVALUE *)end;
2043 
2044  page->start = (RVALUE *)start;
2045  page->total_slots = limit;
2046  page->slot_size = size_pool->slot_size;
2047  page->size_pool = size_pool;
2048  page_body->header.page = page;
2049 
2050  for (p = start; p != end; p += stride) {
2051  gc_report(3, objspace, "assign_heap_page: %p is added to freelist\n", (void *)p);
2052  heap_page_add_freeobj(objspace, page, (VALUE)p);
2053  }
2054  page->free_slots = limit;
2055 
2056  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
2057  return page;
2058 }
2059 
2060 static struct heap_page *
2061 heap_page_resurrect(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2062 {
2063  struct heap_page *page = 0, *next;
2064 
2065  list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
2066  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
2067  if (page->freelist != NULL) {
2068  heap_unlink_page(objspace, &size_pool->tomb_heap, page);
2069  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
2070  return page;
2071  }
2072  }
2073 
2074  return NULL;
2075 }
2076 
2077 static struct heap_page *
2078 heap_page_create(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2079 {
2080  struct heap_page *page;
2081  const char *method = "recycle";
2082 
2083  size_pool->allocatable_pages--;
2084 
2085  page = heap_page_resurrect(objspace, size_pool);
2086 
2087  if (page == NULL) {
2088  page = heap_page_allocate(objspace, size_pool);
2089  method = "allocate";
2090  }
2091  if (0) fprintf(stderr, "heap_page_create: %s - %p, "
2092  "heap_allocated_pages: %"PRIdSIZE", "
2093  "heap_allocated_pages: %"PRIdSIZE", "
2094  "tomb->total_pages: %"PRIdSIZE"\n",
2095  method, (void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
2096  return page;
2097 }
2098 
2099 static void
2100 heap_add_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct heap_page *page)
2101 {
2102  /* Adding to eden heap during incremental sweeping is forbidden */
2103  GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
2104  page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
2105  list_add_tail(&heap->pages, &page->page_node);
2106  heap->total_pages++;
2107  heap->total_slots += page->total_slots;
2108 }
2109 
2110 static void
2111 heap_assign_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2112 {
2113  struct heap_page *page = heap_page_create(objspace, size_pool);
2114  heap_add_page(objspace, size_pool, heap, page);
2115  heap_add_freepage(heap, page);
2116 }
2117 
2118 static void
2119 heap_add_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, size_t add)
2120 {
2121  size_t i;
2122 
2123  size_pool_allocatable_pages_set(objspace, size_pool, add);
2124 
2125  for (i = 0; i < add; i++) {
2126  heap_assign_page(objspace, size_pool, heap);
2127  }
2128 
2129  GC_ASSERT(size_pool->allocatable_pages == 0);
2130 }
2131 
2132 static size_t
2133 heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots, size_t used)
2134 {
2135  double goal_ratio = gc_params.heap_free_slots_goal_ratio;
2136  size_t next_used;
2137 
2138  if (goal_ratio == 0.0) {
2139  next_used = (size_t)(used * gc_params.growth_factor);
2140  }
2141  else {
2142  /* Find `f' where free_slots = f * total_slots * goal_ratio
2143  * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
2144  */
2145  double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
2146 
2147  if (f > gc_params.growth_factor) f = gc_params.growth_factor;
2148  if (f < 1.0) f = 1.1;
2149 
2150  next_used = (size_t)(f * used);
2151 
2152  if (0) {
2153  fprintf(stderr,
2154  "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
2155  " G(%1.2f), f(%1.2f),"
2156  " used(%8"PRIuSIZE") => next_used(%8"PRIuSIZE")\n",
2157  free_slots, total_slots, free_slots/(double)total_slots,
2158  goal_ratio, f, used, next_used);
2159  }
2160  }
2161 
2162  if (gc_params.growth_max_slots > 0) {
2163  size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
2164  if (next_used > max_used) next_used = max_used;
2165  }
2166 
2167  size_t extend_page_count = next_used - used;
2168  /* Extend by at least 1 page. */
2169  if (extend_page_count == 0) extend_page_count = 1;
2170 
2171  return extend_page_count;
2172 }
2173 
2174 static int
2175 heap_increment(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2176 {
2177  if (size_pool->allocatable_pages > 0) {
2178  gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %"PRIdSIZE", "
2179  "heap_pages_inc: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
2180  heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
2181 
2182  GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2183  GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2184 
2185  heap_assign_page(objspace, size_pool, heap);
2186  return TRUE;
2187  }
2188  return FALSE;
2189 }
2190 
2191 static void
2192 heap_prepare(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2193 {
2194  GC_ASSERT(heap->free_pages == NULL);
2195 
2196  if (is_lazy_sweeping(objspace)) {
2197  gc_sweep_continue(objspace, size_pool, heap);
2198  }
2199  else if (is_incremental_marking(objspace)) {
2200  gc_marks_continue(objspace, size_pool, heap);
2201  }
2202 
2203  if (heap->free_pages == NULL &&
2204  (will_be_incremental_marking(objspace) || heap_increment(objspace, size_pool, heap) == FALSE) &&
2205  gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2206  rb_memerror();
2207  }
2208 }
2209 
2210 void
2211 rb_objspace_set_event_hook(const rb_event_flag_t event)
2212 {
2213  rb_objspace_t *objspace = &rb_objspace;
2214  objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
2215  objspace->flags.has_hook = (objspace->hook_events != 0);
2216 }
2217 
2218 static void
2219 gc_event_hook_body(rb_execution_context_t *ec, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
2220 {
2221  if (UNLIKELY(!ec->cfp)) return;
2222  const VALUE *pc = ec->cfp->pc;
2223  if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
2224  /* increment PC because source line is calculated with PC-1 */
2225  ec->cfp->pc++;
2226  }
2227  EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2228  ec->cfp->pc = pc;
2229 }
2230 
2231 #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2232 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2233 
2234 #define gc_event_hook_prep(objspace, event, data, prep) do { \
2235  if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2236  prep; \
2237  gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2238  } \
2239 } while (0)
2240 
2241 #define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2242 
2243 static inline VALUE
2244 newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2245 {
2246 #if !__has_feature(memory_sanitizer)
2247  GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
2248  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2249 #endif
2250  RVALUE *p = RANY(obj);
2251  p->as.basic.flags = flags;
2252  *((VALUE *)&p->as.basic.klass) = klass;
2253 
2254 #if RACTOR_CHECK_MODE
2255  rb_ractor_setup_belonging(obj);
2256 #endif
2257 
2258 #if RGENGC_CHECK_MODE
2259  p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2260 
2261  RB_VM_LOCK_ENTER_NO_BARRIER();
2262  {
2263  check_rvalue_consistency(obj);
2264 
2265  GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2266  GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2267  GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2268  GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2269 
2270  if (flags & FL_PROMOTED1) {
2271  if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2272  }
2273  else {
2274  if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2275  }
2276  if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
2277  }
2278  RB_VM_LOCK_LEAVE_NO_BARRIER();
2279 #endif
2280 
2281  if (UNLIKELY(wb_protected == FALSE)) {
2282  ASSERT_vm_locking();
2283  MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2284  }
2285 
2286  // TODO: make it atomic, or ractor local
2287  objspace->total_allocated_objects++;
2288 
2289 #if RGENGC_PROFILE
2290  if (wb_protected) {
2291  objspace->profile.total_generated_normal_object_count++;
2292 #if RGENGC_PROFILE >= 2
2293  objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2294 #endif
2295  }
2296  else {
2297  objspace->profile.total_generated_shady_object_count++;
2298 #if RGENGC_PROFILE >= 2
2299  objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2300 #endif
2301  }
2302 #endif
2303 
2304 #if GC_DEBUG
2305  RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2306  GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2307 #endif
2308 
2309  gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
2310 
2311 #if RGENGC_OLD_NEWOBJ_CHECK > 0
2312  {
2313  static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2314 
2315  if (!is_incremental_marking(objspace) &&
2316  flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
2317  ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
2318  if (--newobj_cnt == 0) {
2319  newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2320 
2321  gc_mark_set(objspace, obj);
2322  RVALUE_AGE_SET_OLD(objspace, obj);
2323 
2324  rb_gc_writebarrier_remember(obj);
2325  }
2326  }
2327  }
2328 #endif
2329  // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
2330  return obj;
2331 }
2332 
2333 static inline void heap_add_freepage(rb_heap_t *heap, struct heap_page *page);
2334 static struct heap_page *heap_next_freepage(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
2335 static inline void ractor_set_cache(rb_ractor_t *cr, struct heap_page *page, size_t size_pool_idx);
2336 
2337 size_t
2338 rb_gc_obj_slot_size(VALUE obj)
2339 {
2340  return GET_HEAP_PAGE(obj)->slot_size;
2341 }
2342 
2343 static inline size_t
2344 size_pool_slot_size(unsigned char pool_id)
2345 {
2346  GC_ASSERT(pool_id < SIZE_POOL_COUNT);
2347 
2348  size_t slot_size = (1 << pool_id) * sizeof(RVALUE);
2349 
2350 #if RGENGC_CHECK_MODE
2351  rb_objspace_t *objspace = &rb_objspace;
2352  GC_ASSERT(size_pools[pool_id].slot_size == (short)slot_size);
2353 #endif
2354 
2355  return slot_size;
2356 }
2357 
2358 bool
2359 rb_gc_size_allocatable_p(size_t size)
2360 {
2361  return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
2362 }
2363 
2364 static inline VALUE
2365 ractor_cached_free_region(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2366 {
2367  rb_ractor_newobj_size_pool_cache_t *cache = &cr->newobj_cache.size_pool_caches[size_pool_idx];
2368  RVALUE *p = cache->freelist;
2369 
2370  if (p) {
2371  VALUE obj = (VALUE)p;
2372  cache->freelist = p->as.free.next;
2373  asan_unpoison_object(obj, true);
2374 #if RGENGC_CHECK_MODE
2375  // zero clear
2376  MEMZERO((char *)obj, char, size_pool_slot_size(size_pool_idx));
2377 #endif
2378  return obj;
2379  }
2380  else {
2381  return Qfalse;
2382  }
2383 }
2384 
2385 static struct heap_page *
2386 heap_next_freepage(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2387 {
2388  ASSERT_vm_locking();
2389 
2390  struct heap_page *page;
2391 
2392  while (heap->free_pages == NULL) {
2393  heap_prepare(objspace, size_pool, heap);
2394  }
2395  page = heap->free_pages;
2396  heap->free_pages = page->free_next;
2397 
2398  GC_ASSERT(page->free_slots != 0);
2399  RUBY_DEBUG_LOG("page:%p freelist:%p cnt:%d", (void *)page, (void *)page->freelist, page->free_slots);
2400 
2401  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
2402 
2403  return page;
2404 }
2405 
2406 static inline void
2407 ractor_set_cache(rb_ractor_t *cr, struct heap_page *page, size_t size_pool_idx)
2408 {
2409  gc_report(3, &rb_objspace, "ractor_set_cache: Using page %p\n", (void *)GET_PAGE_BODY(page->start));
2410 
2411  rb_ractor_newobj_size_pool_cache_t *cache = &cr->newobj_cache.size_pool_caches[size_pool_idx];
2412 
2413  cache->using_page = page;
2414  cache->freelist = page->freelist;
2415  page->free_slots = 0;
2416  page->freelist = NULL;
2417 
2418  asan_unpoison_object((VALUE)cache->freelist, false);
2419  GC_ASSERT(RB_TYPE_P((VALUE)cache->freelist, T_NONE));
2420  asan_poison_object((VALUE)cache->freelist);
2421 }
2422 
2423 static inline void
2424 ractor_cache_slots(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2425 {
2426  ASSERT_vm_locking();
2427 
2428  rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
2429  struct heap_page *page = heap_next_freepage(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
2430 
2431  ractor_set_cache(cr, page, size_pool_idx);
2432 }
2433 
2434 static inline VALUE
2435 newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
2436 {
2437  RVALUE *p = (RVALUE *)obj;
2438  p->as.values.v1 = v1;
2439  p->as.values.v2 = v2;
2440  p->as.values.v3 = v3;
2441  return obj;
2442 }
2443 
2444 static inline size_t
2445 size_pool_idx_for_size(size_t size)
2446 {
2447 #if USE_RVARGC
2448  size_t slot_count = CEILDIV(size, sizeof(RVALUE));
2449 
2450  /* size_pool_idx is ceil(log2(slot_count)) */
2451  size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
2452  if (size_pool_idx >= SIZE_POOL_COUNT) {
2453  rb_bug("size_pool_idx_for_size: allocation size too large");
2454  }
2455 
2456  return size_pool_idx;
2457 #else
2458  GC_ASSERT(size <= sizeof(RVALUE));
2459  return 0;
2460 #endif
2461 }
2462 
2463 ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx));
2464 
2465 static inline VALUE
2466 newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx)
2467 {
2468  VALUE obj;
2469  unsigned int lev;
2470 
2471  RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2472  {
2473  if (UNLIKELY(during_gc || ruby_gc_stressful)) {
2474  if (during_gc) {
2475  dont_gc_on();
2476  during_gc = 0;
2477  rb_bug("object allocation during garbage collection phase");
2478  }
2479 
2480  if (ruby_gc_stressful) {
2481  if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2482  rb_memerror();
2483  }
2484  }
2485  }
2486 
2487  // allocate new slot
2488  while ((obj = ractor_cached_free_region(objspace, cr, size_pool_idx)) == Qfalse) {
2489  ractor_cache_slots(objspace, cr, size_pool_idx);
2490  }
2491  GC_ASSERT(obj != 0);
2492  newobj_init(klass, flags, wb_protected, objspace, obj);
2493 
2494  gc_event_hook_prep(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj, newobj_fill(obj, 0, 0, 0));
2495  }
2496  RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2497 
2498  return obj;
2499 }
2500 
2501 NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
2502  rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
2503 NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
2504  rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
2505 
2506 static VALUE
2507 newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2508 {
2509  return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
2510 }
2511 
2512 static VALUE
2513 newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2514 {
2515  return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
2516 }
2517 
2518 static inline VALUE
2519 newobj_of0(VALUE klass, VALUE flags, int wb_protected, rb_ractor_t *cr, size_t alloc_size)
2520 {
2521  VALUE obj;
2522  rb_objspace_t *objspace = &rb_objspace;
2523 
2524  RB_DEBUG_COUNTER_INC(obj_newobj);
2525  (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2526 
2527 #if GC_DEBUG_STRESS_TO_CLASS
2528  if (UNLIKELY(stress_to_class)) {
2529  long i, cnt = RARRAY_LEN(stress_to_class);
2530  for (i = 0; i < cnt; ++i) {
2531  if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
2532  }
2533  }
2534 #endif
2535 
2536  size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
2537 
2538  if ((!UNLIKELY(during_gc ||
2539  ruby_gc_stressful ||
2540  gc_event_hook_available_p(objspace)) &&
2541  wb_protected &&
2542  (obj = ractor_cached_free_region(objspace, cr, size_pool_idx)) != Qfalse)) {
2543 
2544  newobj_init(klass, flags, wb_protected, objspace, obj);
2545  }
2546  else {
2547  RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2548 
2549  obj = wb_protected ?
2550  newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
2551  newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
2552  }
2553 
2554  return obj;
2555 }
2556 
2557 static inline VALUE
2558 newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, size_t alloc_size)
2559 {
2560  VALUE obj = newobj_of0(klass, flags, wb_protected, GET_RACTOR(), alloc_size);
2561  return newobj_fill(obj, v1, v2, v3);
2562 }
2563 
2564 static inline VALUE
2565 newobj_of_cr(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, size_t alloc_size)
2566 {
2567  VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
2568  return newobj_fill(obj, v1, v2, v3);
2569 }
2570 
2571 VALUE
2572 rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
2573 {
2574  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2575  return newobj_of(klass, flags, 0, 0, 0, FALSE, size);
2576 }
2577 
2578 VALUE
2579 rb_wb_protected_newobj_of(VALUE klass, VALUE flags, size_t size)
2580 {
2581  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2582  return newobj_of(klass, flags, 0, 0, 0, TRUE, size);
2583 }
2584 
2585 VALUE
2586 rb_ec_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
2587 {
2588  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2589  return newobj_of_cr(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
2590 }
2591 
2592 /* for compatibility */
2593 
2594 VALUE
2596 {
2597  return newobj_of(0, T_NONE, 0, 0, 0, FALSE, sizeof(RVALUE));
2598 }
2599 
2600 VALUE
2601 rb_newobj_of(VALUE klass, VALUE flags)
2602 {
2603  if ((flags & RUBY_T_MASK) == T_OBJECT) {
2604  st_table *index_tbl = RCLASS_IV_INDEX_TBL(klass);
2605 
2606  VALUE obj = newobj_of(klass, (flags | ROBJECT_EMBED) & ~FL_WB_PROTECTED , Qundef, Qundef, Qundef, flags & FL_WB_PROTECTED, sizeof(RVALUE));
2607 
2608  if (index_tbl && index_tbl->num_entries > ROBJECT_EMBED_LEN_MAX) {
2609  rb_init_iv_list(obj);
2610  }
2611  return obj;
2612  }
2613  else {
2614  return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED, sizeof(RVALUE));
2615  }
2616 }
2617 
2618 #define UNEXPECTED_NODE(func) \
2619  rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2620  BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2621 
2622 const char *
2623 rb_imemo_name(enum imemo_type type)
2624 {
2625  // put no default case to get a warning if an imemo type is missing
2626  switch (type) {
2627 #define IMEMO_NAME(x) case imemo_##x: return #x;
2628  IMEMO_NAME(env);
2629  IMEMO_NAME(cref);
2630  IMEMO_NAME(svar);
2631  IMEMO_NAME(throw_data);
2632  IMEMO_NAME(ifunc);
2633  IMEMO_NAME(memo);
2634  IMEMO_NAME(ment);
2635  IMEMO_NAME(iseq);
2636  IMEMO_NAME(tmpbuf);
2637  IMEMO_NAME(ast);
2638  IMEMO_NAME(parser_strterm);
2639  IMEMO_NAME(callinfo);
2640  IMEMO_NAME(callcache);
2641  IMEMO_NAME(constcache);
2642 #undef IMEMO_NAME
2643  }
2644  return "unknown";
2645 }
2646 
2647 #undef rb_imemo_new
2648 
2649 VALUE
2650 rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
2651 {
2652  size_t size = sizeof(RVALUE);
2653  VALUE flags = T_IMEMO | (type << FL_USHIFT);
2654  return newobj_of(v0, flags, v1, v2, v3, TRUE, size);
2655 }
2656 
2657 static VALUE
2658 rb_imemo_tmpbuf_new(VALUE v1, VALUE v2, VALUE v3, VALUE v0)
2659 {
2660  size_t size = sizeof(RVALUE);
2661  VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
2662  return newobj_of(v0, flags, v1, v2, v3, FALSE, size);
2663 }
2664 
2665 static VALUE
2666 rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf, size_t cnt)
2667 {
2668  return rb_imemo_tmpbuf_new((VALUE)buf, 0, (VALUE)cnt, 0);
2669 }
2670 
2672 rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
2673 {
2674  return (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new((VALUE)buf, (VALUE)old_heap, (VALUE)cnt, 0);
2675 }
2676 
2677 static size_t
2678 imemo_memsize(VALUE obj)
2679 {
2680  size_t size = 0;
2681  switch (imemo_type(obj)) {
2682  case imemo_ment:
2683  size += sizeof(RANY(obj)->as.imemo.ment.def);
2684  break;
2685  case imemo_iseq:
2686  size += rb_iseq_memsize((rb_iseq_t *)obj);
2687  break;
2688  case imemo_env:
2689  size += RANY(obj)->as.imemo.env.env_size * sizeof(VALUE);
2690  break;
2691  case imemo_tmpbuf:
2692  size += RANY(obj)->as.imemo.alloc.cnt * sizeof(VALUE);
2693  break;
2694  case imemo_ast:
2695  size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
2696  break;
2697  case imemo_cref:
2698  case imemo_svar:
2699  case imemo_throw_data:
2700  case imemo_ifunc:
2701  case imemo_memo:
2702  case imemo_parser_strterm:
2703  break;
2704  default:
2705  /* unreachable */
2706  break;
2707  }
2708  return size;
2709 }
2710 
2711 #if IMEMO_DEBUG
2712 VALUE
2713 rb_imemo_new_debug(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0, const char *file, int line)
2714 {
2715  VALUE memo = rb_imemo_new(type, v1, v2, v3, v0);
2716  fprintf(stderr, "memo %p (type: %d) @ %s:%d\n", (void *)memo, imemo_type(memo), file, line);
2717  return memo;
2718 }
2719 #endif
2720 
2721 VALUE
2722 rb_class_allocate_instance(VALUE klass)
2723 {
2724  st_table *index_tbl = RCLASS_IV_INDEX_TBL(klass);
2725 
2726  VALUE flags = T_OBJECT | ROBJECT_EMBED;
2727 
2728  VALUE obj = newobj_of(klass, flags, Qundef, Qundef, Qundef, RGENGC_WB_PROTECTED_OBJECT, sizeof(RVALUE));
2729 
2730  if (index_tbl && index_tbl->num_entries > ROBJECT_EMBED_LEN_MAX) {
2731  rb_init_iv_list(obj);
2732  }
2733 
2734  return obj;
2735 }
2736 
2737 static inline void
2738 rb_data_object_check(VALUE klass)
2739 {
2740  if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
2741  rb_undef_alloc_func(klass);
2742 #if RUBY_VERSION_SINCE(3, 2)
2743  RBIMPL_TODO("enable the warning at this release");
2744  rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
2745 #endif
2746  }
2747 }
2748 
2749 VALUE
2750 rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
2751 {
2752  RUBY_ASSERT_ALWAYS(dfree != (RUBY_DATA_FUNC)1);
2753  if (klass) rb_data_object_check(klass);
2754  return newobj_of(klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, FALSE, sizeof(RVALUE));
2755 }
2756 
2757 VALUE
2759 {
2760  VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
2761  DATA_PTR(obj) = xcalloc(1, size);
2762  return obj;
2763 }
2764 
2765 VALUE
2767 {
2768  RBIMPL_NONNULL_ARG(type);
2769  if (klass) rb_data_object_check(klass);
2770  return newobj_of(klass, T_DATA, (VALUE)type, (VALUE)1, (VALUE)datap, type->flags & RUBY_FL_WB_PROTECTED, sizeof(RVALUE));
2771 }
2772 
2773 VALUE
2775 {
2776  VALUE obj = rb_data_typed_object_wrap(klass, 0, type);
2777  DATA_PTR(obj) = xcalloc(1, size);
2778  return obj;
2779 }
2780 
2781 size_t
2782 rb_objspace_data_type_memsize(VALUE obj)
2783 {
2784  if (RTYPEDDATA_P(obj)) {
2785  const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
2786  const void *ptr = RTYPEDDATA_DATA(obj);
2787  if (ptr && type->function.dsize) {
2788  return type->function.dsize(ptr);
2789  }
2790  }
2791  return 0;
2792 }
2793 
2794 const char *
2795 rb_objspace_data_type_name(VALUE obj)
2796 {
2797  if (RTYPEDDATA_P(obj)) {
2798  return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
2799  }
2800  else {
2801  return 0;
2802  }
2803 }
2804 
2805 PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);)
2806 static inline int
2807 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
2808 {
2809  register RVALUE *p = RANY(ptr);
2810  register struct heap_page *page;
2811  register size_t hi, lo, mid;
2812 
2813  RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2814 
2815  if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
2816  RB_DEBUG_COUNTER_INC(gc_isptr_range);
2817 
2818  if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
2819  RB_DEBUG_COUNTER_INC(gc_isptr_align);
2820 
2821  /* check if p looks like a pointer using bsearch*/
2822  lo = 0;
2823  hi = heap_allocated_pages;
2824  while (lo < hi) {
2825  mid = (lo + hi) / 2;
2826  page = heap_pages_sorted[mid];
2827  if (page->start <= p) {
2828  if ((uintptr_t)p < ((uintptr_t)page->start + (page->total_slots * page->slot_size))) {
2829  RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2830 
2831  if (page->flags.in_tomb) {
2832  return FALSE;
2833  }
2834  else {
2835  if ((NUM_IN_PAGE(p) * sizeof(RVALUE)) % page->slot_size != 0) return FALSE;
2836 
2837  return TRUE;
2838  }
2839  }
2840  lo = mid + 1;
2841  }
2842  else {
2843  hi = mid;
2844  }
2845  }
2846  return FALSE;
2847 }
2848 
2849 static enum rb_id_table_iterator_result
2850 free_const_entry_i(VALUE value, void *data)
2851 {
2852  rb_const_entry_t *ce = (rb_const_entry_t *)value;
2853  xfree(ce);
2854  return ID_TABLE_CONTINUE;
2855 }
2856 
2857 void
2858 rb_free_const_table(struct rb_id_table *tbl)
2859 {
2860  rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
2861  rb_id_table_free(tbl);
2862 }
2863 
2864 static int
2865 free_iv_index_tbl_free_i(st_data_t key, st_data_t value, st_data_t data)
2866 {
2867  xfree((void *)value);
2868  return ST_CONTINUE;
2869 }
2870 
2871 static void
2872 iv_index_tbl_free(struct st_table *tbl)
2873 {
2874  st_foreach(tbl, free_iv_index_tbl_free_i, 0);
2875  st_free_table(tbl);
2876 }
2877 
2878 // alive: if false, target pointers can be freed already.
2879 // To check it, we need objspace parameter.
2880 static void
2881 vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, rb_objspace_t *objspace, VALUE klass)
2882 {
2883  if (ccs->entries) {
2884  for (int i=0; i<ccs->len; i++) {
2885  const struct rb_callcache *cc = ccs->entries[i].cc;
2886  if (!alive) {
2887  void *ptr = asan_poisoned_object_p((VALUE)cc);
2888  asan_unpoison_object((VALUE)cc, false);
2889  // ccs can be free'ed.
2890  if (is_pointer_to_heap(objspace, (void *)cc) &&
2891  IMEMO_TYPE_P(cc, imemo_callcache) &&
2892  cc->klass == klass) {
2893  // OK. maybe target cc.
2894  }
2895  else {
2896  if (ptr) {
2897  asan_poison_object((VALUE)cc);
2898  }
2899  continue;
2900  }
2901  if (ptr) {
2902  asan_poison_object((VALUE)cc);
2903  }
2904  }
2905  vm_cc_invalidate(cc);
2906  }
2907  ruby_xfree(ccs->entries);
2908  }
2909  ruby_xfree(ccs);
2910 }
2911 
2912 void
2913 rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
2914 {
2915  RB_DEBUG_COUNTER_INC(ccs_free);
2916  vm_ccs_free(ccs, TRUE, NULL, Qundef);
2917 }
2918 
2920  rb_objspace_t *objspace;
2921  VALUE klass;
2922  bool alive;
2923 };
2924 
2925 static enum rb_id_table_iterator_result
2926 cc_table_mark_i(ID id, VALUE ccs_ptr, void *data_ptr)
2927 {
2928  struct cc_tbl_i_data *data = data_ptr;
2929  struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
2930  VM_ASSERT(vm_ccs_p(ccs));
2931  VM_ASSERT(id == ccs->cme->called_id);
2932 
2933  if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2934  rb_vm_ccs_free(ccs);
2935  return ID_TABLE_DELETE;
2936  }
2937  else {
2938  gc_mark(data->objspace, (VALUE)ccs->cme);
2939 
2940  for (int i=0; i<ccs->len; i++) {
2941  VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
2942  VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
2943 
2944  gc_mark(data->objspace, (VALUE)ccs->entries[i].ci);
2945  gc_mark(data->objspace, (VALUE)ccs->entries[i].cc);
2946  }
2947  return ID_TABLE_CONTINUE;
2948  }
2949 }
2950 
2951 static void
2952 cc_table_mark(rb_objspace_t *objspace, VALUE klass)
2953 {
2954  struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2955  if (cc_tbl) {
2956  struct cc_tbl_i_data data = {
2957  .objspace = objspace,
2958  .klass = klass,
2959  };
2960  rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
2961  }
2962 }
2963 
2964 static enum rb_id_table_iterator_result
2965 cc_table_free_i(VALUE ccs_ptr, void *data_ptr)
2966 {
2967  struct cc_tbl_i_data *data = data_ptr;
2968  struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
2969  VM_ASSERT(vm_ccs_p(ccs));
2970  vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
2971  return ID_TABLE_CONTINUE;
2972 }
2973 
2974 static void
2975 cc_table_free(rb_objspace_t *objspace, VALUE klass, bool alive)
2976 {
2977  struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2978 
2979  if (cc_tbl) {
2980  struct cc_tbl_i_data data = {
2981  .objspace = objspace,
2982  .klass = klass,
2983  .alive = alive,
2984  };
2985  rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
2986  rb_id_table_free(cc_tbl);
2987  }
2988 }
2989 
2990 static enum rb_id_table_iterator_result
2991 cvar_table_free_i(VALUE value, void * ctx)
2992 {
2993  xfree((void *) value);
2994  return ID_TABLE_CONTINUE;
2995 }
2996 
2997 void
2998 rb_cc_table_free(VALUE klass)
2999 {
3000  cc_table_free(&rb_objspace, klass, TRUE);
3001 }
3002 
3003 static inline void
3004 make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
3005 {
3006  struct RZombie *zombie = RZOMBIE(obj);
3007  zombie->basic.flags = T_ZOMBIE | (zombie->basic.flags & FL_SEEN_OBJ_ID);
3008  zombie->dfree = dfree;
3009  zombie->data = data;
3010  zombie->next = heap_pages_deferred_final;
3011  heap_pages_deferred_final = (VALUE)zombie;
3012 
3013  struct heap_page *page = GET_HEAP_PAGE(obj);
3014  page->final_slots++;
3015  heap_pages_final_slots++;
3016 }
3017 
3018 static inline void
3019 make_io_zombie(rb_objspace_t *objspace, VALUE obj)
3020 {
3021  rb_io_t *fptr = RANY(obj)->as.file.fptr;
3022  make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
3023 }
3024 
3025 static void
3026 obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
3027 {
3028  ASSERT_vm_locking();
3029  st_data_t o = (st_data_t)obj, id;
3030 
3031  GC_ASSERT(FL_TEST(obj, FL_SEEN_OBJ_ID));
3032  FL_UNSET(obj, FL_SEEN_OBJ_ID);
3033 
3034  if (st_delete(objspace->obj_to_id_tbl, &o, &id)) {
3035  GC_ASSERT(id);
3036  st_delete(objspace->id_to_obj_tbl, &id, NULL);
3037  }
3038  else {
3039  rb_bug("Object ID seen, but not in mapping table: %s\n", obj_info(obj));
3040  }
3041 }
3042 
3043 static int
3044 obj_free(rb_objspace_t *objspace, VALUE obj)
3045 {
3046  RB_DEBUG_COUNTER_INC(obj_free);
3047  // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
3048 
3049  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_FREEOBJ, obj);
3050 
3051  switch (BUILTIN_TYPE(obj)) {
3052  case T_NIL:
3053  case T_FIXNUM:
3054  case T_TRUE:
3055  case T_FALSE:
3056  rb_bug("obj_free() called for broken object");
3057  break;
3058  default:
3059  break;
3060  }
3061 
3062  if (FL_TEST(obj, FL_EXIVAR)) {
3064  FL_UNSET(obj, FL_EXIVAR);
3065  }
3066 
3067  if (FL_TEST(obj, FL_SEEN_OBJ_ID) && !FL_TEST(obj, FL_FINALIZE)) {
3068  obj_free_object_id(objspace, obj);
3069  }
3070 
3071  if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
3072 
3073 #if RGENGC_CHECK_MODE
3074 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3075  CHECK(RVALUE_WB_UNPROTECTED);
3076  CHECK(RVALUE_MARKED);
3077  CHECK(RVALUE_MARKING);
3078  CHECK(RVALUE_UNCOLLECTIBLE);
3079 #undef CHECK
3080 #endif
3081 
3082  switch (BUILTIN_TYPE(obj)) {
3083  case T_OBJECT:
3084  if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
3085  RB_DEBUG_COUNTER_INC(obj_obj_embed);
3086  }
3087  else if (ROBJ_TRANSIENT_P(obj)) {
3088  RB_DEBUG_COUNTER_INC(obj_obj_transient);
3089  }
3090  else {
3091  xfree(RANY(obj)->as.object.as.heap.ivptr);
3092  RB_DEBUG_COUNTER_INC(obj_obj_ptr);
3093  }
3094  break;
3095  case T_MODULE:
3096  case T_CLASS:
3097  rb_id_table_free(RCLASS_M_TBL(obj));
3098  cc_table_free(objspace, obj, FALSE);
3099  if (RCLASS_IV_TBL(obj)) {
3100  st_free_table(RCLASS_IV_TBL(obj));
3101  }
3102  if (RCLASS_CONST_TBL(obj)) {
3103  rb_free_const_table(RCLASS_CONST_TBL(obj));
3104  }
3105  if (RCLASS_IV_INDEX_TBL(obj)) {
3106  iv_index_tbl_free(RCLASS_IV_INDEX_TBL(obj));
3107  }
3108  if (RCLASS_CVC_TBL(obj)) {
3109  rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
3110  rb_id_table_free(RCLASS_CVC_TBL(obj));
3111  }
3112  rb_class_remove_subclass_head(obj);
3113  rb_class_remove_from_module_subclasses(obj);
3114  rb_class_remove_from_super_subclasses(obj);
3115 #if !USE_RVARGC
3116  if (RCLASS_EXT(obj))
3117  xfree(RCLASS_EXT(obj));
3118 #endif
3119 
3120  (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
3121  (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
3122  break;
3123  case T_STRING:
3124  rb_str_free(obj);
3125  break;
3126  case T_ARRAY:
3127  rb_ary_free(obj);
3128  break;
3129  case T_HASH:
3130 #if USE_DEBUG_COUNTER
3131  switch (RHASH_SIZE(obj)) {
3132  case 0:
3133  RB_DEBUG_COUNTER_INC(obj_hash_empty);
3134  break;
3135  case 1:
3136  RB_DEBUG_COUNTER_INC(obj_hash_1);
3137  break;
3138  case 2:
3139  RB_DEBUG_COUNTER_INC(obj_hash_2);
3140  break;
3141  case 3:
3142  RB_DEBUG_COUNTER_INC(obj_hash_3);
3143  break;
3144  case 4:
3145  RB_DEBUG_COUNTER_INC(obj_hash_4);
3146  break;
3147  case 5:
3148  case 6:
3149  case 7:
3150  case 8:
3151  RB_DEBUG_COUNTER_INC(obj_hash_5_8);
3152  break;
3153  default:
3154  GC_ASSERT(RHASH_SIZE(obj) > 8);
3155  RB_DEBUG_COUNTER_INC(obj_hash_g8);
3156  }
3157 
3158  if (RHASH_AR_TABLE_P(obj)) {
3159  if (RHASH_AR_TABLE(obj) == NULL) {
3160  RB_DEBUG_COUNTER_INC(obj_hash_null);
3161  }
3162  else {
3163  RB_DEBUG_COUNTER_INC(obj_hash_ar);
3164  }
3165  }
3166  else {
3167  RB_DEBUG_COUNTER_INC(obj_hash_st);
3168  }
3169 #endif
3170  if (/* RHASH_AR_TABLE_P(obj) */ !FL_TEST_RAW(obj, RHASH_ST_TABLE_FLAG)) {
3171  struct ar_table_struct *tab = RHASH(obj)->as.ar;
3172 
3173  if (tab) {
3174  if (RHASH_TRANSIENT_P(obj)) {
3175  RB_DEBUG_COUNTER_INC(obj_hash_transient);
3176  }
3177  else {
3178  ruby_xfree(tab);
3179  }
3180  }
3181  }
3182  else {
3183  GC_ASSERT(RHASH_ST_TABLE_P(obj));
3184  st_free_table(RHASH(obj)->as.st);
3185  }
3186  break;
3187  case T_REGEXP:
3188  if (RANY(obj)->as.regexp.ptr) {
3189  onig_free(RANY(obj)->as.regexp.ptr);
3190  RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
3191  }
3192  break;
3193  case T_DATA:
3194  if (DATA_PTR(obj)) {
3195  int free_immediately = FALSE;
3196  void (*dfree)(void *);
3197  void *data = DATA_PTR(obj);
3198 
3199  if (RTYPEDDATA_P(obj)) {
3200  free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
3201  dfree = RANY(obj)->as.typeddata.type->function.dfree;
3202  if (0 && free_immediately == 0) {
3203  /* to expose non-free-immediate T_DATA */
3204  fprintf(stderr, "not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
3205  }
3206  }
3207  else {
3208  dfree = RANY(obj)->as.data.dfree;
3209  }
3210 
3211  if (dfree) {
3212  if (dfree == RUBY_DEFAULT_FREE) {
3213  xfree(data);
3214  RB_DEBUG_COUNTER_INC(obj_data_xfree);
3215  }
3216  else if (free_immediately) {
3217  (*dfree)(data);
3218  RB_DEBUG_COUNTER_INC(obj_data_imm_free);
3219  }
3220  else {
3221  make_zombie(objspace, obj, dfree, data);
3222  RB_DEBUG_COUNTER_INC(obj_data_zombie);
3223  return FALSE;
3224  }
3225  }
3226  else {
3227  RB_DEBUG_COUNTER_INC(obj_data_empty);
3228  }
3229  }
3230  break;
3231  case T_MATCH:
3232  if (RANY(obj)->as.match.rmatch) {
3233  struct rmatch *rm = RANY(obj)->as.match.rmatch;
3234 #if USE_DEBUG_COUNTER
3235  if (rm->regs.num_regs >= 8) {
3236  RB_DEBUG_COUNTER_INC(obj_match_ge8);
3237  }
3238  else if (rm->regs.num_regs >= 4) {
3239  RB_DEBUG_COUNTER_INC(obj_match_ge4);
3240  }
3241  else if (rm->regs.num_regs >= 1) {
3242  RB_DEBUG_COUNTER_INC(obj_match_under4);
3243  }
3244 #endif
3245  onig_region_free(&rm->regs, 0);
3246  if (rm->char_offset)
3247  xfree(rm->char_offset);
3248  xfree(rm);
3249 
3250  RB_DEBUG_COUNTER_INC(obj_match_ptr);
3251  }
3252  break;
3253  case T_FILE:
3254  if (RANY(obj)->as.file.fptr) {
3255  make_io_zombie(objspace, obj);
3256  RB_DEBUG_COUNTER_INC(obj_file_ptr);
3257  return FALSE;
3258  }
3259  break;
3260  case T_RATIONAL:
3261  RB_DEBUG_COUNTER_INC(obj_rational);
3262  break;
3263  case T_COMPLEX:
3264  RB_DEBUG_COUNTER_INC(obj_complex);
3265  break;
3266  case T_MOVED:
3267  break;
3268  case T_ICLASS:
3269  /* Basically , T_ICLASS shares table with the module */
3270  if (RICLASS_OWNS_M_TBL_P(obj)) {
3271  /* Method table is not shared for origin iclasses of classes */
3272  rb_id_table_free(RCLASS_M_TBL(obj));
3273  }
3274  if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3275  rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
3276  }
3277  rb_class_remove_subclass_head(obj);
3278  cc_table_free(objspace, obj, FALSE);
3279  rb_class_remove_from_module_subclasses(obj);
3280  rb_class_remove_from_super_subclasses(obj);
3281 #if !USE_RVARGC
3282  xfree(RCLASS_EXT(obj));
3283 #endif
3284 
3285  RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3286  break;
3287 
3288  case T_FLOAT:
3289  RB_DEBUG_COUNTER_INC(obj_float);
3290  break;
3291 
3292  case T_BIGNUM:
3293  if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3294  xfree(BIGNUM_DIGITS(obj));
3295  RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3296  }
3297  else {
3298  RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3299  }
3300  break;
3301 
3302  case T_NODE:
3303  UNEXPECTED_NODE(obj_free);
3304  break;
3305 
3306  case T_STRUCT:
3307  if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3308  RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3309  RB_DEBUG_COUNTER_INC(obj_struct_embed);
3310  }
3311  else if (RSTRUCT_TRANSIENT_P(obj)) {
3312  RB_DEBUG_COUNTER_INC(obj_struct_transient);
3313  }
3314  else {
3315  xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
3316  RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3317  }
3318  break;
3319 
3320  case T_SYMBOL:
3321  {
3322  rb_gc_free_dsymbol(obj);
3323  RB_DEBUG_COUNTER_INC(obj_symbol);
3324  }
3325  break;
3326 
3327  case T_IMEMO:
3328  switch (imemo_type(obj)) {
3329  case imemo_ment:
3330  rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3331  RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3332  break;
3333  case imemo_iseq:
3334  rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3335  RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3336  break;
3337  case imemo_env:
3338  GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3339  xfree((VALUE *)RANY(obj)->as.imemo.env.env);
3340  RB_DEBUG_COUNTER_INC(obj_imemo_env);
3341  break;
3342  case imemo_tmpbuf:
3343  xfree(RANY(obj)->as.imemo.alloc.ptr);
3344  RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3345  break;
3346  case imemo_ast:
3347  rb_ast_free(&RANY(obj)->as.imemo.ast);
3348  RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3349  break;
3350  case imemo_cref:
3351  RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3352  break;
3353  case imemo_svar:
3354  RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3355  break;
3356  case imemo_throw_data:
3357  RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3358  break;
3359  case imemo_ifunc:
3360  RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3361  break;
3362  case imemo_memo:
3363  RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3364  break;
3365  case imemo_parser_strterm:
3366  RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3367  break;
3368  case imemo_callinfo:
3369  RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3370  break;
3371  case imemo_callcache:
3372  RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3373  break;
3374  case imemo_constcache:
3375  RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3376  break;
3377  }
3378  return TRUE;
3379 
3380  default:
3381  rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3382  BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
3383  }
3384 
3385  if (FL_TEST(obj, FL_FINALIZE)) {
3386  make_zombie(objspace, obj, 0, 0);
3387  return FALSE;
3388  }
3389  else {
3390  return TRUE;
3391  }
3392 }
3393 
3394 
3395 #define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3396 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3397 
3398 static int
3399 object_id_cmp(st_data_t x, st_data_t y)
3400 {
3401  if (RB_BIGNUM_TYPE_P(x)) {
3402  return !rb_big_eql(x, y);
3403  }
3404  else {
3405  return x != y;
3406  }
3407 }
3408 
3409 static st_index_t
3410 object_id_hash(st_data_t n)
3411 {
3412  if (RB_BIGNUM_TYPE_P(n)) {
3413  return FIX2LONG(rb_big_hash(n));
3414  }
3415  else {
3416  return st_numhash(n);
3417  }
3418 }
3419 static const struct st_hash_type object_id_hash_type = {
3420  object_id_cmp,
3421  object_id_hash,
3422 };
3423 
3424 void
3425 Init_heap(void)
3426 {
3427  rb_objspace_t *objspace = &rb_objspace;
3428 
3429 #if defined(HAVE_MMAP) && !HAVE_CONST_PAGE_SIZE && !defined(PAGE_MAX_SIZE)
3430  /* Need to determine if we can use mmap at runtime. */
3431 # ifdef PAGE_SIZE
3432  /* If the PAGE_SIZE macro can be used. */
3433  use_mmap_aligned_alloc = PAGE_SIZE <= HEAP_PAGE_SIZE;
3434 # elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
3435  /* If we can use sysconf to determine the page size. */
3436  use_mmap_aligned_alloc = sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE;
3437 # else
3438  /* Otherwise we can't determine the system page size, so don't use mmap. */
3439  use_mmap_aligned_alloc = FALSE;
3440 # endif
3441 #endif
3442 
3443  objspace->next_object_id = INT2FIX(OBJ_ID_INITIAL);
3444  objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3445  objspace->obj_to_id_tbl = st_init_numtable();
3446 
3447 #if RGENGC_ESTIMATE_OLDMALLOC
3448  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3449 #endif
3450 
3451  heap_add_pages(objspace, &size_pools[0], SIZE_POOL_EDEN_HEAP(&size_pools[0]), gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
3452 
3453  /* Give other size pools allocatable pages. */
3454  for (int i = 1; i < SIZE_POOL_COUNT; i++) {
3455  rb_size_pool_t *size_pool = &size_pools[i];
3456  int multiple = size_pool->slot_size / sizeof(RVALUE);
3457  size_pool->allocatable_pages = gc_params.heap_init_slots * multiple / HEAP_PAGE_OBJ_LIMIT;
3458  }
3459  heap_pages_expand_sorted(objspace);
3460 
3461  init_mark_stack(&objspace->mark_stack);
3462 
3463  objspace->profile.invoke_time = getrusage_time();
3464  finalizer_table = st_init_numtable();
3465 }
3466 
3467 void
3468 Init_gc_stress(void)
3469 {
3470  rb_objspace_t *objspace = &rb_objspace;
3471 
3472  gc_stress_set(objspace, ruby_initial_gc_stress);
3473 }
3474 
3475 typedef int each_obj_callback(void *, void *, size_t, void *);
3476 
3477 static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected);
3478 static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
3479 
3481  rb_objspace_t *objspace;
3482  bool reenable_incremental;
3483 
3484  each_obj_callback *callback;
3485  void *data;
3486 
3487  struct heap_page **pages[SIZE_POOL_COUNT];
3488  size_t pages_counts[SIZE_POOL_COUNT];
3489 };
3490 
3491 static VALUE
3492 objspace_each_objects_ensure(VALUE arg)
3493 {
3494  struct each_obj_data *data = (struct each_obj_data *)arg;
3495  rb_objspace_t *objspace = data->objspace;
3496 
3497  /* Reenable incremental GC */
3498  if (data->reenable_incremental) {
3499  objspace->flags.dont_incremental = FALSE;
3500  }
3501 
3502  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3503  struct heap_page **pages = data->pages[i];
3504  /* pages could be NULL if an error was raised during setup (e.g.
3505  * malloc failed due to out of memory). */
3506  if (pages) {
3507  free(pages);
3508  }
3509  }
3510 
3511  return Qnil;
3512 }
3513 
3514 static VALUE
3515 objspace_each_objects_try(VALUE arg)
3516 {
3517  struct each_obj_data *data = (struct each_obj_data *)arg;
3518  rb_objspace_t *objspace = data->objspace;
3519 
3520  /* Copy pages from all size_pools to their respective buffers. */
3521  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3522  rb_size_pool_t *size_pool = &size_pools[i];
3523  size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
3524 
3525  struct heap_page **pages = malloc(size);
3526  if (!pages) rb_memerror();
3527 
3528  /* Set up pages buffer by iterating over all pages in the current eden
3529  * heap. This will be a snapshot of the state of the heap before we
3530  * call the callback over each page that exists in this buffer. Thus it
3531  * is safe for the callback to allocate objects without possibly entering
3532  * an infinite loop. */
3533  struct heap_page *page = 0;
3534  size_t pages_count = 0;
3535  list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
3536  pages[pages_count] = page;
3537  pages_count++;
3538  }
3539  data->pages[i] = pages;
3540  data->pages_counts[i] = pages_count;
3541  GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
3542  }
3543 
3544  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3545  rb_size_pool_t *size_pool = &size_pools[i];
3546  size_t pages_count = data->pages_counts[i];
3547  struct heap_page **pages = data->pages[i];
3548 
3549  struct heap_page *page = list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, struct heap_page, page_node);
3550  for (size_t i = 0; i < pages_count; i++) {
3551  /* If we have reached the end of the linked list then there are no
3552  * more pages, so break. */
3553  if (page == NULL) break;
3554 
3555  /* If this page does not match the one in the buffer, then move to
3556  * the next page in the buffer. */
3557  if (pages[i] != page) continue;
3558 
3559  uintptr_t pstart = (uintptr_t)page->start;
3560  uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
3561 
3562  if ((*data->callback)((void *)pstart, (void *)pend, size_pool->slot_size, data->data)) {
3563  break;
3564  }
3565 
3566  page = list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
3567  }
3568  }
3569 
3570  return Qnil;
3571 }
3572 
3573 /*
3574  * rb_objspace_each_objects() is special C API to walk through
3575  * Ruby object space. This C API is too difficult to use it.
3576  * To be frank, you should not use it. Or you need to read the
3577  * source code of this function and understand what this function does.
3578  *
3579  * 'callback' will be called several times (the number of heap page,
3580  * at current implementation) with:
3581  * vstart: a pointer to the first living object of the heap_page.
3582  * vend: a pointer to next to the valid heap_page area.
3583  * stride: a distance to next VALUE.
3584  *
3585  * If callback() returns non-zero, the iteration will be stopped.
3586  *
3587  * This is a sample callback code to iterate liveness objects:
3588  *
3589  * int
3590  * sample_callback(void *vstart, void *vend, int stride, void *data) {
3591  * VALUE v = (VALUE)vstart;
3592  * for (; v != (VALUE)vend; v += stride) {
3593  * if (RBASIC(v)->flags) { // liveness check
3594  * // do something with live object 'v'
3595  * }
3596  * return 0; // continue to iteration
3597  * }
3598  *
3599  * Note: 'vstart' is not a top of heap_page. This point the first
3600  * living object to grasp at least one object to avoid GC issue.
3601  * This means that you can not walk through all Ruby object page
3602  * including freed object page.
3603  *
3604  * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3605  * However, there are possibilities to pass variable values with
3606  * 'stride' with some reasons. You must use stride instead of
3607  * use some constant value in the iteration.
3608  */
3609 void
3610 rb_objspace_each_objects(each_obj_callback *callback, void *data)
3611 {
3612  objspace_each_objects(&rb_objspace, callback, data, TRUE);
3613 }
3614 
3615 static void
3616 objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
3617 {
3618  /* Disable incremental GC */
3619  bool reenable_incremental = FALSE;
3620  if (protected) {
3621  reenable_incremental = !objspace->flags.dont_incremental;
3622 
3623  gc_rest(objspace);
3624  objspace->flags.dont_incremental = TRUE;
3625  }
3626 
3627  struct each_obj_data each_obj_data = {
3628  .objspace = objspace,
3629  .reenable_incremental = reenable_incremental,
3630 
3631  .callback = callback,
3632  .data = data,
3633 
3634  .pages = {NULL},
3635  .pages_counts = {0},
3636  };
3637  rb_ensure(objspace_each_objects_try, (VALUE)&each_obj_data,
3638  objspace_each_objects_ensure, (VALUE)&each_obj_data);
3639 }
3640 
3641 void
3642 rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
3643 {
3644  objspace_each_objects(&rb_objspace, callback, data, FALSE);
3645 }
3646 
3648  size_t num;
3649  VALUE of;
3650 };
3651 
3652 static int
3653 internal_object_p(VALUE obj)
3654 {
3655  RVALUE *p = (RVALUE *)obj;
3656  void *ptr = __asan_region_is_poisoned(p, SIZEOF_VALUE);
3657  asan_unpoison_object(obj, false);
3658  bool used_p = p->as.basic.flags;
3659 
3660  if (used_p) {
3661  switch (BUILTIN_TYPE(obj)) {
3662  case T_NODE:
3663  UNEXPECTED_NODE(internal_object_p);
3664  break;
3665  case T_NONE:
3666  case T_MOVED:
3667  case T_IMEMO:
3668  case T_ICLASS:
3669  case T_ZOMBIE:
3670  break;
3671  case T_CLASS:
3672  if (!p->as.basic.klass) break;
3673  if (FL_TEST(obj, FL_SINGLETON)) {
3674  return rb_singleton_class_internal_p(obj);
3675  }
3676  return 0;
3677  default:
3678  if (!p->as.basic.klass) break;
3679  return 0;
3680  }
3681  }
3682  if (ptr || ! used_p) {
3683  asan_poison_object(obj);
3684  }
3685  return 1;
3686 }
3687 
3688 int
3689 rb_objspace_internal_object_p(VALUE obj)
3690 {
3691  return internal_object_p(obj);
3692 }
3693 
3694 static int
3695 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
3696 {
3697  struct os_each_struct *oes = (struct os_each_struct *)data;
3698 
3699  VALUE v = (VALUE)vstart;
3700  for (; v != (VALUE)vend; v += stride) {
3701  if (!internal_object_p(v)) {
3702  if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
3703  if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
3704  rb_yield(v);
3705  oes->num++;
3706  }
3707  }
3708  }
3709  }
3710 
3711  return 0;
3712 }
3713 
3714 static VALUE
3715 os_obj_of(VALUE of)
3716 {
3717  struct os_each_struct oes;
3718 
3719  oes.num = 0;
3720  oes.of = of;
3721  rb_objspace_each_objects(os_obj_of_i, &oes);
3722  return SIZET2NUM(oes.num);
3723 }
3724 
3725 /*
3726  * call-seq:
3727  * ObjectSpace.each_object([module]) {|obj| ... } -> integer
3728  * ObjectSpace.each_object([module]) -> an_enumerator
3729  *
3730  * Calls the block once for each living, nonimmediate object in this
3731  * Ruby process. If <i>module</i> is specified, calls the block
3732  * for only those classes or modules that match (or are a subclass of)
3733  * <i>module</i>. Returns the number of objects found. Immediate
3734  * objects (<code>Fixnum</code>s, <code>Symbol</code>s
3735  * <code>true</code>, <code>false</code>, and <code>nil</code>) are
3736  * never returned. In the example below, #each_object returns both
3737  * the numbers we defined and several constants defined in the Math
3738  * module.
3739  *
3740  * If no block is given, an enumerator is returned instead.
3741  *
3742  * a = 102.7
3743  * b = 95 # Won't be returned
3744  * c = 12345678987654321
3745  * count = ObjectSpace.each_object(Numeric) {|x| p x }
3746  * puts "Total count: #{count}"
3747  *
3748  * <em>produces:</em>
3749  *
3750  * 12345678987654321
3751  * 102.7
3752  * 2.71828182845905
3753  * 3.14159265358979
3754  * 2.22044604925031e-16
3755  * 1.7976931348623157e+308
3756  * 2.2250738585072e-308
3757  * Total count: 7
3758  *
3759  */
3760 
3761 static VALUE
3762 os_each_obj(int argc, VALUE *argv, VALUE os)
3763 {
3764  VALUE of;
3765 
3766  of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
3767  RETURN_ENUMERATOR(os, 1, &of);
3768  return os_obj_of(of);
3769 }
3770 
3771 /*
3772  * call-seq:
3773  * ObjectSpace.undefine_finalizer(obj)
3774  *
3775  * Removes all finalizers for <i>obj</i>.
3776  *
3777  */
3778 
3779 static VALUE
3780 undefine_final(VALUE os, VALUE obj)
3781 {
3782  return rb_undefine_finalizer(obj);
3783 }
3784 
3785 VALUE
3787 {
3788  rb_objspace_t *objspace = &rb_objspace;
3789  st_data_t data = obj;
3790  rb_check_frozen(obj);
3791  st_delete(finalizer_table, &data, 0);
3792  FL_UNSET(obj, FL_FINALIZE);
3793  return obj;
3794 }
3795 
3796 static void
3797 should_be_callable(VALUE block)
3798 {
3799  if (!rb_obj_respond_to(block, idCall, TRUE)) {
3800  rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
3801  rb_obj_class(block));
3802  }
3803 }
3804 
3805 static void
3806 should_be_finalizable(VALUE obj)
3807 {
3808  if (!FL_ABLE(obj)) {
3809  rb_raise(rb_eArgError, "cannot define finalizer for %s",
3810  rb_obj_classname(obj));
3811  }
3812  rb_check_frozen(obj);
3813 }
3814 
3815 /*
3816  * call-seq:
3817  * ObjectSpace.define_finalizer(obj, aProc=proc())
3818  *
3819  * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
3820  * was destroyed. The object ID of the <i>obj</i> will be passed
3821  * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
3822  * method, make sure it can be called with a single argument.
3823  *
3824  * The return value is an array <code>[0, aProc]</code>.
3825  *
3826  * The two recommended patterns are to either create the finaliser proc
3827  * in a non-instance method where it can safely capture the needed state,
3828  * or to use a custom callable object that stores the needed state
3829  * explicitly as instance variables.
3830  *
3831  * class Foo
3832  * def initialize(data_needed_for_finalization)
3833  * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
3834  * end
3835  *
3836  * def self.create_finalizer(data_needed_for_finalization)
3837  * proc {
3838  * puts "finalizing #{data_needed_for_finalization}"
3839  * }
3840  * end
3841  * end
3842  *
3843  * class Bar
3844  * class Remover
3845  * def initialize(data_needed_for_finalization)
3846  * @data_needed_for_finalization = data_needed_for_finalization
3847  * end
3848  *
3849  * def call(id)
3850  * puts "finalizing #{@data_needed_for_finalization}"
3851  * end
3852  * end
3853  *
3854  * def initialize(data_needed_for_finalization)
3855  * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
3856  * end
3857  * end
3858  *
3859  * Note that if your finalizer references the object to be
3860  * finalized it will never be run on GC, although it will still be
3861  * run at exit. You will get a warning if you capture the object
3862  * to be finalized as the receiver of the finalizer.
3863  *
3864  * class CapturesSelf
3865  * def initialize(name)
3866  * ObjectSpace.define_finalizer(self, proc {
3867  * # this finalizer will only be run on exit
3868  * puts "finalizing #{name}"
3869  * })
3870  * end
3871  * end
3872  *
3873  * Also note that finalization can be unpredictable and is never guaranteed
3874  * to be run except on exit.
3875  */
3876 
3877 static VALUE
3878 define_final(int argc, VALUE *argv, VALUE os)
3879 {
3880  VALUE obj, block;
3881 
3882  rb_scan_args(argc, argv, "11", &obj, &block);
3883  should_be_finalizable(obj);
3884  if (argc == 1) {
3885  block = rb_block_proc();
3886  }
3887  else {
3888  should_be_callable(block);
3889  }
3890 
3891  if (rb_callable_receiver(block) == obj) {
3892  rb_warn("finalizer references object to be finalized");
3893  }
3894 
3895  return define_final0(obj, block);
3896 }
3897 
3898 static VALUE
3899 define_final0(VALUE obj, VALUE block)
3900 {
3901  rb_objspace_t *objspace = &rb_objspace;
3902  VALUE table;
3903  st_data_t data;
3904 
3905  RBASIC(obj)->flags |= FL_FINALIZE;
3906 
3907  if (st_lookup(finalizer_table, obj, &data)) {
3908  table = (VALUE)data;
3909 
3910  /* avoid duplicate block, table is usually small */
3911  {
3912  long len = RARRAY_LEN(table);
3913  long i;
3914 
3915  for (i = 0; i < len; i++) {
3916  VALUE recv = RARRAY_AREF(table, i);
3917  if (rb_equal(recv, block)) {
3918  block = recv;
3919  goto end;
3920  }
3921  }
3922  }
3923 
3924  rb_ary_push(table, block);
3925  }
3926  else {
3927  table = rb_ary_new3(1, block);
3928  RBASIC_CLEAR_CLASS(table);
3929  st_add_direct(finalizer_table, obj, table);
3930  }
3931  end:
3932  block = rb_ary_new3(2, INT2FIX(0), block);
3933  OBJ_FREEZE(block);
3934  return block;
3935 }
3936 
3937 VALUE
3939 {
3940  should_be_finalizable(obj);
3941  should_be_callable(block);
3942  return define_final0(obj, block);
3943 }
3944 
3945 void
3947 {
3948  rb_objspace_t *objspace = &rb_objspace;
3949  VALUE table;
3950  st_data_t data;
3951 
3952  if (!FL_TEST(obj, FL_FINALIZE)) return;
3953  if (st_lookup(finalizer_table, obj, &data)) {
3954  table = (VALUE)data;
3955  st_insert(finalizer_table, dest, table);
3956  }
3957  FL_SET(dest, FL_FINALIZE);
3958 }
3959 
3960 static VALUE
3961 run_single_final(VALUE cmd, VALUE objid)
3962 {
3963  return rb_check_funcall(cmd, idCall, 1, &objid);
3964 }
3965 
3966 static void
3967 warn_exception_in_finalizer(rb_execution_context_t *ec, VALUE final)
3968 {
3969  if (final != Qundef && !NIL_P(ruby_verbose)) {
3970  VALUE errinfo = ec->errinfo;
3971  rb_warn("Exception in finalizer %+"PRIsVALUE, final);
3972  rb_ec_error_print(ec, errinfo);
3973  }
3974 }
3975 
3976 static void
3977 run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
3978 {
3979  long i;
3980  enum ruby_tag_type state;
3981  volatile struct {
3982  VALUE errinfo;
3983  VALUE objid;
3984  VALUE final;
3985  rb_control_frame_t *cfp;
3986  long finished;
3987  } saved;
3988  rb_execution_context_t * volatile ec = GET_EC();
3989 #define RESTORE_FINALIZER() (\
3990  ec->cfp = saved.cfp, \
3991  ec->errinfo = saved.errinfo)
3992 
3993  saved.errinfo = ec->errinfo;
3994  saved.objid = rb_obj_id(obj);
3995  saved.cfp = ec->cfp;
3996  saved.finished = 0;
3997  saved.final = Qundef;
3998 
3999  EC_PUSH_TAG(ec);
4000  state = EC_EXEC_TAG();
4001  if (state != TAG_NONE) {
4002  ++saved.finished; /* skip failed finalizer */
4003  warn_exception_in_finalizer(ec, ATOMIC_VALUE_EXCHANGE(saved.final, Qundef));
4004  }
4005  for (i = saved.finished;
4006  RESTORE_FINALIZER(), i<RARRAY_LEN(table);
4007  saved.finished = ++i) {
4008  run_single_final(saved.final = RARRAY_AREF(table, i), saved.objid);
4009  }
4010  EC_POP_TAG();
4011 #undef RESTORE_FINALIZER
4012 }
4013 
4014 static void
4015 run_final(rb_objspace_t *objspace, VALUE zombie)
4016 {
4017  st_data_t key, table;
4018 
4019  if (RZOMBIE(zombie)->dfree) {
4020  RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
4021  }
4022 
4023  key = (st_data_t)zombie;
4024  if (st_delete(finalizer_table, &key, &table)) {
4025  run_finalizer(objspace, zombie, (VALUE)table);
4026  }
4027 }
4028 
4029 static void
4030 finalize_list(rb_objspace_t *objspace, VALUE zombie)
4031 {
4032  while (zombie) {
4033  VALUE next_zombie;
4034  struct heap_page *page;
4035  asan_unpoison_object(zombie, false);
4036  next_zombie = RZOMBIE(zombie)->next;
4037  page = GET_HEAP_PAGE(zombie);
4038 
4039  run_final(objspace, zombie);
4040 
4041  RB_VM_LOCK_ENTER();
4042  {
4043  GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
4044  if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
4045  obj_free_object_id(objspace, zombie);
4046  }
4047 
4048  GC_ASSERT(heap_pages_final_slots > 0);
4049  GC_ASSERT(page->final_slots > 0);
4050 
4051  heap_pages_final_slots--;
4052  page->final_slots--;
4053  page->free_slots++;
4054  heap_page_add_freeobj(objspace, page, zombie);
4055  objspace->profile.total_freed_objects++;
4056  }
4057  RB_VM_LOCK_LEAVE();
4058 
4059  zombie = next_zombie;
4060  }
4061 }
4062 
4063 static void
4064 finalize_deferred(rb_objspace_t *objspace)
4065 {
4066  VALUE zombie;
4067  rb_execution_context_t *ec = GET_EC();
4068  ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
4069 
4070  while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
4071  finalize_list(objspace, zombie);
4072  }
4073 
4074  ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
4075 }
4076 
4077 static void
4078 gc_finalize_deferred(void *dmy)
4079 {
4080  rb_objspace_t *objspace = dmy;
4081  if (ATOMIC_EXCHANGE(finalizing, 1)) return;
4082 
4083  finalize_deferred(objspace);
4084  ATOMIC_SET(finalizing, 0);
4085 }
4086 
4087 static void
4088 gc_finalize_deferred_register(rb_objspace_t *objspace)
4089 {
4090  if (rb_postponed_job_register_one(0, gc_finalize_deferred, objspace) == 0) {
4091  rb_bug("gc_finalize_deferred_register: can't register finalizer.");
4092  }
4093 }
4094 
4096  VALUE obj;
4097  VALUE table;
4098  struct force_finalize_list *next;
4099 };
4100 
4101 static int
4102 force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
4103 {
4104  struct force_finalize_list **prev = (struct force_finalize_list **)arg;
4105  struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
4106  curr->obj = key;
4107  curr->table = val;
4108  curr->next = *prev;
4109  *prev = curr;
4110  return ST_CONTINUE;
4111 }
4112 
4113 bool rb_obj_is_main_ractor(VALUE gv);
4114 
4115 void
4116 rb_objspace_call_finalizer(rb_objspace_t *objspace)
4117 {
4118  size_t i;
4119 
4120 #if RGENGC_CHECK_MODE >= 2
4121  gc_verify_internal_consistency(objspace);
4122 #endif
4123  gc_rest(objspace);
4124 
4125  if (ATOMIC_EXCHANGE(finalizing, 1)) return;
4126 
4127  /* run finalizers */
4128  finalize_deferred(objspace);
4129  GC_ASSERT(heap_pages_deferred_final == 0);
4130 
4131  gc_rest(objspace);
4132  /* prohibit incremental GC */
4133  objspace->flags.dont_incremental = 1;
4134 
4135  /* force to run finalizer */
4136  while (finalizer_table->num_entries) {
4137  struct force_finalize_list *list = 0;
4138  st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
4139  while (list) {
4140  struct force_finalize_list *curr = list;
4141  st_data_t obj = (st_data_t)curr->obj;
4142  run_finalizer(objspace, curr->obj, curr->table);
4143  st_delete(finalizer_table, &obj, 0);
4144  list = curr->next;
4145  xfree(curr);
4146  }
4147  }
4148 
4149  /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
4150  dont_gc_on();
4151 
4152  /* running data/file finalizers are part of garbage collection */
4153  unsigned int lock_lev;
4154  gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
4155 
4156  /* run data/file object's finalizers */
4157  for (i = 0; i < heap_allocated_pages; i++) {
4158  struct heap_page *page = heap_pages_sorted[i];
4159  short stride = page->slot_size;
4160 
4161  uintptr_t p = (uintptr_t)page->start;
4162  uintptr_t pend = p + page->total_slots * stride;
4163  for (; p < pend; p += stride) {
4164  VALUE vp = (VALUE)p;
4165  void *poisoned = asan_poisoned_object_p(vp);
4166  asan_unpoison_object(vp, false);
4167  switch (BUILTIN_TYPE(vp)) {
4168  case T_DATA:
4169  if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
4170  if (rb_obj_is_thread(vp)) break;
4171  if (rb_obj_is_mutex(vp)) break;
4172  if (rb_obj_is_fiber(vp)) break;
4173  if (rb_obj_is_main_ractor(vp)) break;
4174  if (RTYPEDDATA_P(vp)) {
4175  RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
4176  }
4177  RANY(p)->as.free.flags = 0;
4178  if (RANY(p)->as.data.dfree == RUBY_DEFAULT_FREE) {
4179  xfree(DATA_PTR(p));
4180  }
4181  else if (RANY(p)->as.data.dfree) {
4182  make_zombie(objspace, vp, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
4183  }
4184  break;
4185  case T_FILE:
4186  if (RANY(p)->as.file.fptr) {
4187  make_io_zombie(objspace, vp);
4188  }
4189  break;
4190  default:
4191  break;
4192  }
4193  if (poisoned) {
4194  GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
4195  asan_poison_object(vp);
4196  }
4197  }
4198  }
4199 
4200  gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
4201 
4202  if (heap_pages_deferred_final) {
4203  finalize_list(objspace, heap_pages_deferred_final);
4204  }
4205 
4206  st_free_table(finalizer_table);
4207  finalizer_table = 0;
4208  ATOMIC_SET(finalizing, 0);
4209 }
4210 
4211 static inline int
4212 is_swept_object(rb_objspace_t *objspace, VALUE ptr)
4213 {
4214  struct heap_page *page = GET_HEAP_PAGE(ptr);
4215  return page->flags.before_sweep ? FALSE : TRUE;
4216 }
4217 
4218 /* garbage objects will be collected soon. */
4219 static inline int
4220 is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
4221 {
4222  if (!is_lazy_sweeping(objspace) ||
4223  is_swept_object(objspace, ptr) ||
4224  MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
4225 
4226  return FALSE;
4227  }
4228  else {
4229  return TRUE;
4230  }
4231 }
4232 
4233 static inline int
4234 is_live_object(rb_objspace_t *objspace, VALUE ptr)
4235 {
4236  switch (BUILTIN_TYPE(ptr)) {
4237  case T_NONE:
4238  case T_MOVED:
4239  case T_ZOMBIE:
4240  return FALSE;
4241  default:
4242  break;
4243  }
4244 
4245  if (!is_garbage_object(objspace, ptr)) {
4246  return TRUE;
4247  }
4248  else {
4249  return FALSE;
4250  }
4251 }
4252 
4253 static inline int
4254 is_markable_object(rb_objspace_t *objspace, VALUE obj)
4255 {
4256  if (rb_special_const_p(obj)) return FALSE; /* special const is not markable */
4257  check_rvalue_consistency(obj);
4258  return TRUE;
4259 }
4260 
4261 int
4262 rb_objspace_markable_object_p(VALUE obj)
4263 {
4264  rb_objspace_t *objspace = &rb_objspace;
4265  return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
4266 }
4267 
4268 int
4269 rb_objspace_garbage_object_p(VALUE obj)
4270 {
4271  rb_objspace_t *objspace = &rb_objspace;
4272  return is_garbage_object(objspace, obj);
4273 }
4274 
4275 static VALUE
4276 id2ref_obj_tbl(rb_objspace_t *objspace, VALUE objid)
4277 {
4278  VALUE orig;
4279  if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
4280  return orig;
4281  }
4282  else {
4283  return Qundef;
4284  }
4285 }
4286 
4287 /*
4288  * call-seq:
4289  * ObjectSpace._id2ref(object_id) -> an_object
4290  *
4291  * Converts an object id to a reference to the object. May not be
4292  * called on an object id passed as a parameter to a finalizer.
4293  *
4294  * s = "I am a string" #=> "I am a string"
4295  * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
4296  * r == s #=> true
4297  *
4298  * On multi-ractor mode, if the object is not shareable, it raises
4299  * RangeError.
4300  */
4301 
4302 static VALUE
4303 id2ref(VALUE objid)
4304 {
4305 #if SIZEOF_LONG == SIZEOF_VOIDP
4306 #define NUM2PTR(x) NUM2ULONG(x)
4307 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4308 #define NUM2PTR(x) NUM2ULL(x)
4309 #endif
4310  rb_objspace_t *objspace = &rb_objspace;
4311  VALUE ptr;
4312  VALUE orig;
4313  void *p0;
4314 
4315  objid = rb_to_int(objid);
4316  if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
4317  ptr = NUM2PTR(objid);
4318  if (ptr == Qtrue) return Qtrue;
4319  if (ptr == Qfalse) return Qfalse;
4320  if (NIL_P(ptr)) return Qnil;
4321  if (FIXNUM_P(ptr)) return (VALUE)ptr;
4322  if (FLONUM_P(ptr)) return (VALUE)ptr;
4323 
4324  ptr = obj_id_to_ref(objid);
4325  if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
4326  ID symid = ptr / sizeof(RVALUE);
4327  p0 = (void *)ptr;
4328  if (rb_id2str(symid) == 0)
4329  rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
4330  return ID2SYM(symid);
4331  }
4332  }
4333 
4334  if ((orig = id2ref_obj_tbl(objspace, objid)) != Qundef &&
4335  is_live_object(objspace, orig)) {
4336 
4337  if (!rb_multi_ractor_p() || rb_ractor_shareable_p(orig)) {
4338  return orig;
4339  }
4340  else {
4341  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4342  }
4343  }
4344 
4345  if (rb_int_ge(objid, objspace->next_object_id)) {
4346  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
4347  }
4348  else {
4349  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_int2str(objid, 10));
4350  }
4351 }
4352 
4353 static VALUE
4354 os_id2ref(VALUE os, VALUE objid)
4355 {
4356  return id2ref(objid);
4357 }
4358 
4359 static VALUE
4360 rb_find_object_id(VALUE obj, VALUE (*get_heap_object_id)(VALUE))
4361 {
4362  if (STATIC_SYM_P(obj)) {
4363  return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
4364  }
4365  else if (FLONUM_P(obj)) {
4366 #if SIZEOF_LONG == SIZEOF_VOIDP
4367  return LONG2NUM((SIGNED_VALUE)obj);
4368 #else
4369  return LL2NUM((SIGNED_VALUE)obj);
4370 #endif
4371  }
4372  else if (SPECIAL_CONST_P(obj)) {
4373  return LONG2NUM((SIGNED_VALUE)obj);
4374  }
4375 
4376  return get_heap_object_id(obj);
4377 }
4378 
4379 static VALUE
4380 cached_object_id(VALUE obj)
4381 {
4382  VALUE id;
4383  rb_objspace_t *objspace = &rb_objspace;
4384 
4385  RB_VM_LOCK_ENTER();
4386  if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &id)) {
4387  GC_ASSERT(FL_TEST(obj, FL_SEEN_OBJ_ID));
4388  }
4389  else {
4390  GC_ASSERT(!FL_TEST(obj, FL_SEEN_OBJ_ID));
4391 
4392  id = objspace->next_object_id;
4393  objspace->next_object_id = rb_int_plus(id, INT2FIX(OBJ_ID_INCREMENT));
4394 
4395  VALUE already_disabled = rb_gc_disable_no_rest();
4396  st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
4397  st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
4398  if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
4399  FL_SET(obj, FL_SEEN_OBJ_ID);
4400  }
4401  RB_VM_LOCK_LEAVE();
4402 
4403  return id;
4404 }
4405 
4406 static VALUE
4407 nonspecial_obj_id_(VALUE obj)
4408 {
4409  return nonspecial_obj_id(obj);
4410 }
4411 
4412 
4413 VALUE
4415 {
4416  return rb_find_object_id(obj, nonspecial_obj_id_);
4417 }
4418 
4419 /*
4420  * Document-method: __id__
4421  * Document-method: object_id
4422  *
4423  * call-seq:
4424  * obj.__id__ -> integer
4425  * obj.object_id -> integer
4426  *
4427  * Returns an integer identifier for +obj+.
4428  *
4429  * The same number will be returned on all calls to +object_id+ for a given
4430  * object, and no two active objects will share an id.
4431  *
4432  * Note: that some objects of builtin classes are reused for optimization.
4433  * This is the case for immediate values and frozen string literals.
4434  *
4435  * BasicObject implements +__id__+, Kernel implements +object_id+.
4436  *
4437  * Immediate values are not passed by reference but are passed by value:
4438  * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
4439  *
4440  * Object.new.object_id == Object.new.object_id # => false
4441  * (21 * 2).object_id == (21 * 2).object_id # => true
4442  * "hello".object_id == "hello".object_id # => false
4443  * "hi".freeze.object_id == "hi".freeze.object_id # => true
4444  */
4445 
4446 VALUE
4448 {
4449  /*
4450  * 32-bit VALUE space
4451  * MSB ------------------------ LSB
4452  * false 00000000000000000000000000000000
4453  * true 00000000000000000000000000000010
4454  * nil 00000000000000000000000000000100
4455  * undef 00000000000000000000000000000110
4456  * symbol ssssssssssssssssssssssss00001110
4457  * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
4458  * fixnum fffffffffffffffffffffffffffffff1
4459  *
4460  * object_id space
4461  * LSB
4462  * false 00000000000000000000000000000000
4463  * true 00000000000000000000000000000010
4464  * nil 00000000000000000000000000000100
4465  * undef 00000000000000000000000000000110
4466  * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
4467  * object oooooooooooooooooooooooooooooo0 o...o % A = 0
4468  * fixnum fffffffffffffffffffffffffffffff1 bignum if required
4469  *
4470  * where A = sizeof(RVALUE)/4
4471  *
4472  * sizeof(RVALUE) is
4473  * 20 if 32-bit, double is 4-byte aligned
4474  * 24 if 32-bit, double is 8-byte aligned
4475  * 40 if 64-bit
4476  */
4477 
4478  return rb_find_object_id(obj, cached_object_id);
4479 }
4480 
4481 static enum rb_id_table_iterator_result
4482 cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
4483 {
4484  size_t *total_size = data_ptr;
4485  struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
4486  *total_size += sizeof(*ccs);
4487  *total_size += sizeof(ccs->entries[0]) * ccs->capa;
4488  return ID_TABLE_CONTINUE;
4489 }
4490 
4491 static size_t
4492 cc_table_memsize(struct rb_id_table *cc_table)
4493 {
4494  size_t total = rb_id_table_memsize(cc_table);
4495  rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
4496  return total;
4497 }
4498 
4499 static size_t
4500 obj_memsize_of(VALUE obj, int use_all_types)
4501 {
4502  size_t size = 0;
4503 
4504  if (SPECIAL_CONST_P(obj)) {
4505  return 0;
4506  }
4507 
4508  if (FL_TEST(obj, FL_EXIVAR)) {
4509  size += rb_generic_ivar_memsize(obj);
4510  }
4511 
4512  switch (BUILTIN_TYPE(obj)) {
4513  case T_OBJECT:
4514  if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
4515  size += ROBJECT_NUMIV(obj) * sizeof(VALUE);
4516  }
4517  break;
4518  case T_MODULE:
4519  case T_CLASS:
4520  if (RCLASS_EXT(obj)) {
4521  if (RCLASS_M_TBL(obj)) {
4522  size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4523  }
4524  if (RCLASS_IV_TBL(obj)) {
4525  size += st_memsize(RCLASS_IV_TBL(obj));
4526  }
4527  if (RCLASS_CVC_TBL(obj)) {
4528  size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
4529  }
4530  if (RCLASS_IV_INDEX_TBL(obj)) {
4531  // TODO: more correct value
4532  size += st_memsize(RCLASS_IV_INDEX_TBL(obj));
4533  }
4534  if (RCLASS_EXT(obj)->iv_tbl) {
4535  size += st_memsize(RCLASS_EXT(obj)->iv_tbl);
4536  }
4537  if (RCLASS_EXT(obj)->const_tbl) {
4538  size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
4539  }
4540  if (RCLASS_CC_TBL(obj)) {
4541  size += cc_table_memsize(RCLASS_CC_TBL(obj));
4542  }
4543 #if !USE_RVARGC
4544  size += sizeof(rb_classext_t);
4545 #endif
4546  }
4547  break;
4548  case T_ICLASS:
4549  if (RICLASS_OWNS_M_TBL_P(obj)) {
4550  if (RCLASS_M_TBL(obj)) {
4551  size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4552  }
4553  }
4554  if (RCLASS_EXT(obj) && RCLASS_CC_TBL(obj)) {
4555  size += cc_table_memsize(RCLASS_CC_TBL(obj));
4556  }
4557  break;
4558  case T_STRING:
4559  size += rb_str_memsize(obj);
4560  break;
4561  case T_ARRAY:
4562  size += rb_ary_memsize(obj);
4563  break;
4564  case T_HASH:
4565  if (RHASH_AR_TABLE_P(obj)) {
4566  if (RHASH_AR_TABLE(obj) != NULL) {
4567  size_t rb_hash_ar_table_size(void);
4568  size += rb_hash_ar_table_size();
4569  }
4570  }
4571  else {
4572  VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
4573  size += st_memsize(RHASH_ST_TABLE(obj));
4574  }
4575  break;
4576  case T_REGEXP:
4577  if (RREGEXP_PTR(obj)) {
4578  size += onig_memsize(RREGEXP_PTR(obj));
4579  }
4580  break;
4581  case T_DATA:
4582  if (use_all_types) size += rb_objspace_data_type_memsize(obj);
4583  break;
4584  case T_MATCH:
4585  if (RMATCH(obj)->rmatch) {
4586  struct rmatch *rm = RMATCH(obj)->rmatch;
4587  size += onig_region_memsize(&rm->regs);
4588  size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
4589  size += sizeof(struct rmatch);
4590  }
4591  break;
4592  case T_FILE:
4593  if (RFILE(obj)->fptr) {
4594  size += rb_io_memsize(RFILE(obj)->fptr);
4595  }
4596  break;
4597  case T_RATIONAL:
4598  case T_COMPLEX:
4599  break;
4600  case T_IMEMO:
4601  size += imemo_memsize(obj);
4602  break;
4603 
4604  case T_FLOAT:
4605  case T_SYMBOL:
4606  break;
4607 
4608  case T_BIGNUM:
4609  if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
4610  size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
4611  }
4612  break;
4613 
4614  case T_NODE:
4615  UNEXPECTED_NODE(obj_memsize_of);
4616  break;
4617 
4618  case T_STRUCT:
4619  if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
4620  RSTRUCT(obj)->as.heap.ptr) {
4621  size += sizeof(VALUE) * RSTRUCT_LEN(obj);
4622  }
4623  break;
4624 
4625  case T_ZOMBIE:
4626  case T_MOVED:
4627  break;
4628 
4629  default:
4630  rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
4631  BUILTIN_TYPE(obj), (void*)obj);
4632  }
4633 
4634  return size + GET_HEAP_PAGE(obj)->slot_size;
4635 }
4636 
4637 size_t
4638 rb_obj_memsize_of(VALUE obj)
4639 {
4640  return obj_memsize_of(obj, TRUE);
4641 }
4642 
4643 static int
4644 set_zero(st_data_t key, st_data_t val, st_data_t arg)
4645 {
4646  VALUE k = (VALUE)key;
4647  VALUE hash = (VALUE)arg;
4648  rb_hash_aset(hash, k, INT2FIX(0));
4649  return ST_CONTINUE;
4650 }
4651 
4652 static VALUE
4653 type_sym(size_t type)
4654 {
4655  switch (type) {
4656 #define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4657  COUNT_TYPE(T_NONE);
4658  COUNT_TYPE(T_OBJECT);
4659  COUNT_TYPE(T_CLASS);
4660  COUNT_TYPE(T_MODULE);
4661  COUNT_TYPE(T_FLOAT);
4662  COUNT_TYPE(T_STRING);
4663  COUNT_TYPE(T_REGEXP);
4664  COUNT_TYPE(T_ARRAY);
4665  COUNT_TYPE(T_HASH);
4666  COUNT_TYPE(T_STRUCT);
4667  COUNT_TYPE(T_BIGNUM);
4668  COUNT_TYPE(T_FILE);
4669  COUNT_TYPE(T_DATA);
4670  COUNT_TYPE(T_MATCH);
4671  COUNT_TYPE(T_COMPLEX);
4672  COUNT_TYPE(T_RATIONAL);
4673  COUNT_TYPE(T_NIL);
4674  COUNT_TYPE(T_TRUE);
4675  COUNT_TYPE(T_FALSE);
4676  COUNT_TYPE(T_SYMBOL);
4677  COUNT_TYPE(T_FIXNUM);
4678  COUNT_TYPE(T_IMEMO);
4679  COUNT_TYPE(T_UNDEF);
4680  COUNT_TYPE(T_NODE);
4681  COUNT_TYPE(T_ICLASS);
4682  COUNT_TYPE(T_ZOMBIE);
4683  COUNT_TYPE(T_MOVED);
4684 #undef COUNT_TYPE
4685  default: return SIZET2NUM(type); break;
4686  }
4687 }
4688 
4689 /*
4690  * call-seq:
4691  * ObjectSpace.count_objects([result_hash]) -> hash
4692  *
4693  * Counts all objects grouped by type.
4694  *
4695  * It returns a hash, such as:
4696  * {
4697  * :TOTAL=>10000,
4698  * :FREE=>3011,
4699  * :T_OBJECT=>6,
4700  * :T_CLASS=>404,
4701  * # ...
4702  * }
4703  *
4704  * The contents of the returned hash are implementation specific.
4705  * It may be changed in future.
4706  *
4707  * The keys starting with +:T_+ means live objects.
4708  * For example, +:T_ARRAY+ is the number of arrays.
4709  * +:FREE+ means object slots which is not used now.
4710  * +:TOTAL+ means sum of above.
4711  *
4712  * If the optional argument +result_hash+ is given,
4713  * it is overwritten and returned. This is intended to avoid probe effect.
4714  *
4715  * h = {}
4716  * ObjectSpace.count_objects(h)
4717  * puts h
4718  * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
4719  *
4720  * This method is only expected to work on C Ruby.
4721  *
4722  */
4723 
4724 static VALUE
4725 count_objects(int argc, VALUE *argv, VALUE os)
4726 {
4727  rb_objspace_t *objspace = &rb_objspace;
4728  size_t counts[T_MASK+1];
4729  size_t freed = 0;
4730  size_t total = 0;
4731  size_t i;
4732  VALUE hash = Qnil;
4733 
4734  if (rb_check_arity(argc, 0, 1) == 1) {
4735  hash = argv[0];
4736  if (!RB_TYPE_P(hash, T_HASH))
4737  rb_raise(rb_eTypeError, "non-hash given");
4738  }
4739 
4740  for (i = 0; i <= T_MASK; i++) {
4741  counts[i] = 0;
4742  }
4743 
4744  for (i = 0; i < heap_allocated_pages; i++) {
4745  struct heap_page *page = heap_pages_sorted[i];
4746  short stride = page->slot_size;
4747 
4748  uintptr_t p = (uintptr_t)page->start;
4749  uintptr_t pend = p + page->total_slots * stride;
4750  for (;p < pend; p += stride) {
4751  VALUE vp = (VALUE)p;
4752  GC_ASSERT((NUM_IN_PAGE(vp) * sizeof(RVALUE)) % page->slot_size == 0);
4753 
4754  void *poisoned = asan_poisoned_object_p(vp);
4755  asan_unpoison_object(vp, false);
4756  if (RANY(p)->as.basic.flags) {
4757  counts[BUILTIN_TYPE(vp)]++;
4758  }
4759  else {
4760  freed++;
4761  }
4762  if (poisoned) {
4763  GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
4764  asan_poison_object(vp);
4765  }
4766  }
4767  total += page->total_slots;
4768  }
4769 
4770  if (NIL_P(hash)) {
4771  hash = rb_hash_new();
4772  }
4773  else if (!RHASH_EMPTY_P(hash)) {
4774  rb_hash_stlike_foreach(hash, set_zero, hash);
4775  }
4776  rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
4777  rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
4778 
4779  for (i = 0; i <= T_MASK; i++) {
4780  VALUE type = type_sym(i);
4781  if (counts[i])
4782  rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
4783  }
4784 
4785  return hash;
4786 }
4787 
4788 /*
4789  ------------------------ Garbage Collection ------------------------
4790 */
4791 
4792 /* Sweeping */
4793 
4794 static size_t
4795 objspace_available_slots(rb_objspace_t *objspace)
4796 {
4797  size_t total_slots = 0;
4798  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
4799  rb_size_pool_t *size_pool = &size_pools[i];
4800  total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
4801  total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
4802  }
4803  return total_slots;
4804 }
4805 
4806 static size_t
4807 objspace_live_slots(rb_objspace_t *objspace)
4808 {
4809  return (objspace->total_allocated_objects - objspace->profile.total_freed_objects) - heap_pages_final_slots;
4810 }
4811 
4812 static size_t
4813 objspace_free_slots(rb_objspace_t *objspace)
4814 {
4815  return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
4816 }
4817 
4818 static void
4819 gc_setup_mark_bits(struct heap_page *page)
4820 {
4821  /* copy oldgen bitmap to mark bitmap */
4822  memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
4823 }
4824 
4825 static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
4826 static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t slot_size);
4827 
4828 static void
4829 lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
4830 {
4831 #if defined(_WIN32)
4832  DWORD old_protect;
4833 
4834  if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_NOACCESS, &old_protect)) {
4835 #else
4836  if (mprotect(body, HEAP_PAGE_SIZE, PROT_NONE)) {
4837 #endif
4838  rb_bug("Couldn't protect page %p, errno: %s", (void *)body, strerror(errno));
4839  }
4840  else {
4841  gc_report(5, objspace, "Protecting page in move %p\n", (void *)body);
4842  }
4843 }
4844 
4845 static void
4846 unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
4847 {
4848 #if defined(_WIN32)
4849  DWORD old_protect;
4850 
4851  if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_READWRITE, &old_protect)) {
4852 #else
4853  if (mprotect(body, HEAP_PAGE_SIZE, PROT_READ | PROT_WRITE)) {
4854 #endif
4855  rb_bug("Couldn't unprotect page %p, errno: %s", (void *)body, strerror(errno));
4856  }
4857  else {
4858  gc_report(5, objspace, "Unprotecting page in move %p\n", (void *)body);
4859  }
4860 }
4861 
4862 static inline bool
4863 try_move_plane(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page, uintptr_t p, bits_t bits, VALUE dest)
4864 {
4865  if (bits) {
4866  do {
4867  if (bits & 1) {
4868  /* We're trying to move "p" */
4869  objspace->rcompactor.considered_count_table[BUILTIN_TYPE((VALUE)p)]++;
4870 
4871  if (gc_is_moveable_obj(objspace, (VALUE)p)) {
4872  /* We were able to move "p" */
4873  objspace->rcompactor.moved_count_table[BUILTIN_TYPE((VALUE)p)]++;
4874  objspace->rcompactor.total_moved++;
4875 
4876  bool from_freelist = false;
4877 
4878  if (BUILTIN_TYPE(dest) == T_NONE) {
4879  from_freelist = true;
4880  }
4881 
4882  gc_move(objspace, (VALUE)p, dest, page->slot_size);
4883  gc_pin(objspace, (VALUE)p);
4884  heap->compact_cursor_index = (RVALUE *)p;
4885  if (from_freelist) {
4886  FL_SET((VALUE)p, FL_FROM_FREELIST);
4887  }
4888 
4889  return true;
4890  }
4891  }
4892  p += sizeof(RVALUE);
4893  bits >>= 1;
4894  } while (bits);
4895  }
4896 
4897  return false;
4898 }
4899 
4900 static short
4901 try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page, VALUE dest)
4902 {
4903  struct heap_page * cursor = heap->compact_cursor;
4904 
4905  GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
4906 
4907  /* T_NONE objects came from the free list. If the object is *not* a
4908  * T_NONE, it is an object that just got freed but hasn't been
4909  * added to the freelist yet */
4910 
4911  while (1) {
4912  size_t index;
4913 
4914  bits_t *mark_bits = cursor->mark_bits;
4915  bits_t *pin_bits = cursor->pinned_bits;
4916  RVALUE * p;
4917 
4918  if (heap->compact_cursor_index) {
4919  index = BITMAP_INDEX(heap->compact_cursor_index);
4920  p = heap->compact_cursor_index;
4921  GC_ASSERT(cursor == GET_HEAP_PAGE(p));
4922  }
4923  else {
4924  index = 0;
4925  p = cursor->start;
4926  }
4927 
4928  bits_t bits = mark_bits[index] & ~pin_bits[index];
4929 
4930  bits >>= NUM_IN_PAGE(p);
4931  if (try_move_plane(objspace, heap, sweep_page, (uintptr_t)p, bits, dest)) return 1;
4932 
4933  if (index == 0) {
4934  p = cursor->start + (BITS_BITLENGTH - NUM_IN_PAGE(cursor->start));
4935  }
4936  else {
4937  p = cursor->start + (BITS_BITLENGTH - NUM_IN_PAGE(cursor->start)) + (BITS_BITLENGTH * index);
4938  }
4939 
4940  /* Find an object to move and move it. Movable objects must be
4941  * marked, so we iterate using the marking bitmap */
4942  for (size_t i = index + 1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4943  bits_t bits = mark_bits[i] & ~pin_bits[i];
4944  if (try_move_plane(objspace, heap, sweep_page, (uintptr_t)p, bits, dest)) return 1;
4945  p += BITS_BITLENGTH;
4946  }
4947 
4948  /* We couldn't find a movable object on the compact cursor, so lets
4949  * move to the next page (previous page since we are traveling in the
4950  * opposite direction of the sweep cursor) and look there. */
4951 
4952  struct heap_page * next;
4953 
4954  next = list_prev(&heap->pages, cursor, page_node);
4955 
4956  /* Protect the current cursor since it probably has T_MOVED slots. */
4957  lock_page_body(objspace, GET_PAGE_BODY(cursor->start));
4958 
4959  heap->compact_cursor = next;
4960  heap->compact_cursor_index = 0;
4961  cursor = next;
4962 
4963  // Cursors have met, lets quit. We set `heap->compact_cursor` equal
4964  // to `heap->sweeping_page` so we know how far to iterate through
4965  // the heap when unprotecting pages.
4966  if (next == sweep_page) {
4967  break;
4968  }
4969  }
4970 
4971  return 0;
4972 }
4973 
4974 static void
4975 gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
4976 {
4977  struct heap_page *cursor = heap->compact_cursor;
4978 
4979  while (cursor) {
4980  unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
4981  cursor = list_next(&heap->pages, cursor, page_node);
4982  }
4983 }
4984 
4985 static void gc_update_references(rb_objspace_t * objspace);
4986 static void invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page);
4987 
4988 static void
4989 read_barrier_handler(uintptr_t address)
4990 {
4991  VALUE obj;
4992  rb_objspace_t * objspace = &rb_objspace;
4993 
4994  address -= address % sizeof(RVALUE);
4995 
4996  obj = (VALUE)address;
4997 
4998  RB_VM_LOCK_ENTER();
4999  {
5000  unlock_page_body(objspace, GET_PAGE_BODY(obj));
5001 
5002  objspace->profile.read_barrier_faults++;
5003 
5004  invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
5005  }
5006  RB_VM_LOCK_LEAVE();
5007 }
5008 
5009 #if defined(_WIN32)
5010 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
5011 typedef void (*signal_handler)(int);
5012 static signal_handler old_sigsegv_handler;
5013 
5014 static LONG WINAPI
5015 read_barrier_signal(EXCEPTION_POINTERS * info)
5016 {
5017  /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
5018  if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
5019  /* > The second array element specifies the virtual address of the inaccessible data.
5020  * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
5021  *
5022  * Use this address to invalidate the page */
5023  read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
5024  return EXCEPTION_CONTINUE_EXECUTION;
5025  }
5026  else {
5027  return EXCEPTION_CONTINUE_SEARCH;
5028  }
5029 }
5030 
5031 static void
5032 uninstall_handlers(void)
5033 {
5034  signal(SIGSEGV, old_sigsegv_handler);
5035  SetUnhandledExceptionFilter(old_handler);
5036 }
5037 
5038 static void
5039 install_handlers(void)
5040 {
5041  /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
5042  old_sigsegv_handler = signal(SIGSEGV, NULL);
5043  /* Unhandled Exception Filter has access to the violation address similar
5044  * to si_addr from sigaction */
5045  old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
5046 }
5047 #else
5048 static struct sigaction old_sigbus_handler;
5049 static struct sigaction old_sigsegv_handler;
5050 
5051 static void
5052 read_barrier_signal(int sig, siginfo_t * info, void * data)
5053 {
5054  // setup SEGV/BUS handlers for errors
5055  struct sigaction prev_sigbus, prev_sigsegv;
5056  sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
5057  sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
5058 
5059  // enable SIGBUS/SEGV
5060  sigset_t set, prev_set;
5061  sigemptyset(&set);
5062  sigaddset(&set, SIGBUS);
5063  sigaddset(&set, SIGSEGV);
5064  sigprocmask(SIG_UNBLOCK, &set, &prev_set);
5065 
5066  // run handler
5067  read_barrier_handler((uintptr_t)info->si_addr);
5068 
5069  // reset SEGV/BUS handlers
5070  sigaction(SIGBUS, &prev_sigbus, NULL);
5071  sigaction(SIGSEGV, &prev_sigsegv, NULL);
5072  sigprocmask(SIG_SETMASK, &prev_set, NULL);
5073 }
5074 
5075 static void
5076 uninstall_handlers(void)
5077 {
5078  sigaction(SIGBUS, &old_sigbus_handler, NULL);
5079  sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
5080 }
5081 
5082 static void
5083 install_handlers(void)
5084 {
5085  struct sigaction action;
5086  memset(&action, 0, sizeof(struct sigaction));
5087  sigemptyset(&action.sa_mask);
5088  action.sa_sigaction = read_barrier_signal;
5089  action.sa_flags = SA_SIGINFO | SA_ONSTACK;
5090 
5091  sigaction(SIGBUS, &action, &old_sigbus_handler);
5092  sigaction(SIGSEGV, &action, &old_sigsegv_handler);
5093 }
5094 #endif
5095 
5096 static void
5097 revert_stack_objects(VALUE stack_obj, void *ctx)
5098 {
5099  rb_objspace_t * objspace = (rb_objspace_t*)ctx;
5100 
5101  if (BUILTIN_TYPE(stack_obj) == T_MOVED) {
5102  /* For now we'll revert the whole page if the object made it to the
5103  * stack. I think we can change this to move just the one object
5104  * back though */
5105  invalidate_moved_page(objspace, GET_HEAP_PAGE(stack_obj));
5106  }
5107 }
5108 
5109 static void
5110 revert_machine_stack_references(rb_objspace_t *objspace, VALUE v)
5111 {
5112  if (is_pointer_to_heap(objspace, (void *)v)) {
5113  if (BUILTIN_TYPE(v) == T_MOVED) {
5114  /* For now we'll revert the whole page if the object made it to the
5115  * stack. I think we can change this to move just the one object
5116  * back though */
5117  invalidate_moved_page(objspace, GET_HEAP_PAGE(v));
5118  }
5119  }
5120 }
5121 
5122 static void each_machine_stack_value(const rb_execution_context_t *ec, void (*cb)(rb_objspace_t *, VALUE));
5123 
5124 static void
5125 check_stack_for_moved(rb_objspace_t *objspace)
5126 {
5127  rb_execution_context_t *ec = GET_EC();
5128  rb_vm_t *vm = rb_ec_vm_ptr(ec);
5129  rb_vm_each_stack_value(vm, revert_stack_objects, (void*)objspace);
5130  each_machine_stack_value(ec, revert_machine_stack_references);
5131 }
5132 
5133 static void
5134 gc_compact_finish(rb_objspace_t *objspace, rb_size_pool_t *pool, rb_heap_t *heap)
5135 {
5136  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5137  rb_size_pool_t *size_pool = &size_pools[i];
5138  rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5139  gc_unprotect_pages(objspace, heap);
5140  }
5141 
5142  uninstall_handlers();
5143 
5144  /* The mutator is allowed to run during incremental sweeping. T_MOVED
5145  * objects can get pushed on the stack and when the compaction process
5146  * finishes up, it may remove the read barrier before anything has a
5147  * chance to read from the T_MOVED address. To fix this, we scan the stack
5148  * then revert any moved objects that made it to the stack. */
5149  check_stack_for_moved(objspace);
5150 
5151  gc_update_references(objspace);
5152  objspace->profile.compact_count++;
5153 
5154  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5155  rb_size_pool_t *size_pool = &size_pools[i];
5156  rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5157  heap->compact_cursor = NULL;
5158  heap->compact_cursor_index = 0;
5159  }
5160 
5161  if (gc_prof_enabled(objspace)) {
5162  gc_profile_record *record = gc_prof_record(objspace);
5163  record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
5164  }
5165  objspace->flags.during_compacting = FALSE;
5166 }
5167 
5169  struct heap_page *page;
5170  int final_slots;
5171  int freed_slots;
5172  int empty_slots;
5173 };
5174 
5175 static inline void
5176 gc_fill_swept_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, bool *finished_compacting, struct gc_sweep_context *ctx)
5177 {
5178  struct heap_page * sweep_page = ctx->page;
5179 
5180  if (bitset) {
5181  short slot_size = sweep_page->slot_size;
5182  short slot_bits = slot_size / sizeof(RVALUE);
5183 
5184  do {
5185  if (bitset & 1) {
5186  VALUE dest = (VALUE)p;
5187 
5188  GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(dest), dest));
5189  GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
5190 
5191  CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(dest), dest);
5192 
5193  if (*finished_compacting) {
5194  if (BUILTIN_TYPE(dest) == T_NONE) {
5195  ctx->empty_slots++;
5196  }
5197  else {
5198  ctx->freed_slots++;
5199  }
5200  (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)dest, sizeof(RVALUE));
5201  heap_page_add_freeobj(objspace, sweep_page, dest);
5202  }
5203  else {
5204  /* Zombie slots don't get marked, but we can't reuse
5205  * their memory until they have their finalizers run.*/
5206  if (BUILTIN_TYPE(dest) != T_ZOMBIE) {
5207  if (!try_move(objspace, heap, sweep_page, dest)) {
5208  *finished_compacting = true;
5209  (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
5210  gc_report(5, objspace, "Quit compacting, couldn't find an object to move\n");
5211  if (BUILTIN_TYPE(dest) == T_NONE) {
5212  ctx->empty_slots++;
5213  }
5214  else {
5215  ctx->freed_slots++;
5216  }
5217  heap_page_add_freeobj(objspace, sweep_page, dest);
5218  gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(dest));
5219  }
5220  else {
5221  //moved_slots++;
5222  }
5223  }
5224  }
5225  }
5226  p += slot_size;
5227  bitset >>= slot_bits;
5228  } while (bitset);
5229  }
5230 }
5231 
5232 static bool
5233 gc_fill_swept_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page, struct gc_sweep_context *ctx)
5234 {
5235  /* Find any pinned but not marked objects and try to fill those slots */
5236  bool finished_compacting = false;
5237  bits_t *mark_bits, *pin_bits;
5238  bits_t bitset;
5239  uintptr_t p;
5240 
5241  mark_bits = sweep_page->mark_bits;
5242  pin_bits = sweep_page->pinned_bits;
5243 
5244  p = (uintptr_t)sweep_page->start;
5245 
5246  struct heap_page * cursor = heap->compact_cursor;
5247 
5248  unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
5249 
5250  /* *Want to move* objects are pinned but not marked. */
5251  bitset = pin_bits[0] & ~mark_bits[0];
5252  bitset >>= NUM_IN_PAGE(p); // Skip header / dead space bits
5253  gc_fill_swept_plane(objspace, heap, (uintptr_t)p, bitset, &finished_compacting, ctx);
5254  p += ((BITS_BITLENGTH - NUM_IN_PAGE(p)) * sizeof(RVALUE));
5255 
5256  for (int i = 1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5257  /* *Want to move* objects are pinned but not marked. */
5258  bitset = pin_bits[i] & ~mark_bits[i];
5259  gc_fill_swept_plane(objspace, heap, (uintptr_t)p, bitset, &finished_compacting, ctx);
5260  p += ((BITS_BITLENGTH) * sizeof(RVALUE));
5261  }
5262 
5263  lock_page_body(objspace, GET_PAGE_BODY(heap->compact_cursor->start));
5264 
5265  return finished_compacting;
5266 }
5267 
5268 static inline void
5269 gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct gc_sweep_context *ctx)
5270 {
5271  struct heap_page * sweep_page = ctx->page;
5272  short slot_size = sweep_page->slot_size;
5273  short slot_bits = slot_size / sizeof(RVALUE);
5274  GC_ASSERT(slot_bits > 0);
5275 
5276  do {
5277  VALUE vp = (VALUE)p;
5278  GC_ASSERT(vp % sizeof(RVALUE) == 0);
5279 
5280  asan_unpoison_object(vp, false);
5281  if (bitset & 1) {
5282  switch (BUILTIN_TYPE(vp)) {
5283  default: /* majority case */
5284  gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
5285 #if RGENGC_CHECK_MODE
5286  if (!is_full_marking(objspace)) {
5287  if (RVALUE_OLD_P(vp)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
5288  if (rgengc_remembered_sweep(objspace, vp)) rb_bug("page_sweep: %p - remembered.", (void *)p);
5289  }
5290 #endif
5291  if (obj_free(objspace, vp)) {
5292  if (heap->compact_cursor) {
5293  /* We *want* to fill this slot */
5294  MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp), vp);
5295  }
5296  else {
5297  (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
5298  heap_page_add_freeobj(objspace, sweep_page, vp);
5299  gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
5300  ctx->freed_slots++;
5301  }
5302  }
5303  else {
5304  ctx->final_slots++;
5305  }
5306  break;
5307 
5308  case T_MOVED:
5309  if (objspace->flags.during_compacting) {
5310  /* The sweep cursor shouldn't have made it to any
5311  * T_MOVED slots while the compact flag is enabled.
5312  * The sweep cursor and compact cursor move in
5313  * opposite directions, and when they meet references will
5314  * get updated and "during_compacting" should get disabled */
5315  rb_bug("T_MOVED shouldn't be seen until compaction is finished\n");
5316  }
5317  gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
5318  if (FL_TEST(vp, FL_FROM_FREELIST)) {
5319  ctx->empty_slots++;
5320  }
5321  else {
5322  ctx->freed_slots++;
5323  }
5324  heap_page_add_freeobj(objspace, sweep_page, vp);
5325  break;
5326  case T_ZOMBIE:
5327  /* already counted */
5328  break;
5329  case T_NONE:
5330  if (heap->compact_cursor) {
5331  /* We *want* to fill this slot */
5332  MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp), vp);
5333  }
5334  else {
5335  ctx->empty_slots++; /* already freed */
5336  }
5337  break;
5338  }
5339  }
5340  p += slot_size;
5341  bitset >>= slot_bits;
5342  } while (bitset);
5343 }
5344 
5345 static inline void
5346 gc_sweep_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct gc_sweep_context *ctx)
5347 {
5348  struct heap_page *sweep_page = ctx->page;
5349 
5350  int i;
5351 
5352  RVALUE *p;
5353  bits_t *bits, bitset;
5354 
5355  gc_report(2, objspace, "page_sweep: start.\n");
5356 
5357  if (heap->compact_cursor) {
5358  if (sweep_page == heap->compact_cursor) {
5359  /* The compaction cursor and sweep page met, so we need to quit compacting */
5360  gc_report(5, objspace, "Quit compacting, mark and compact cursor met\n");
5361  gc_compact_finish(objspace, size_pool, heap);
5362  }
5363  else {
5364  /* We anticipate filling the page, so NULL out the freelist. */
5365  asan_unpoison_memory_region(&sweep_page->freelist, sizeof(RVALUE*), false);
5366  sweep_page->freelist = NULL;
5367  asan_poison_memory_region(&sweep_page->freelist, sizeof(RVALUE*));
5368  }
5369  }
5370 
5371  sweep_page->flags.before_sweep = FALSE;
5372  sweep_page->free_slots = 0;
5373 
5374  p = sweep_page->start;
5375  bits = sweep_page->mark_bits;
5376 
5377  int page_rvalue_count = sweep_page->total_slots * (size_pool->slot_size / sizeof(RVALUE));
5378  int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
5379  if (out_of_range_bits != 0) { // sizeof(RVALUE) == 64
5380  bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
5381  }
5382 
5383  // Skip out of range slots at the head of the page
5384  bitset = ~bits[0];
5385  bitset >>= NUM_IN_PAGE(p);
5386  if (bitset) {
5387  gc_sweep_plane(objspace, heap, (uintptr_t)p, bitset, ctx);
5388  }
5389  p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
5390 
5391  for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5392  bitset = ~bits[i];
5393  if (bitset) {
5394  gc_sweep_plane(objspace, heap, (uintptr_t)p, bitset, ctx);
5395  }
5396  p += BITS_BITLENGTH;
5397  }
5398 
5399  if (heap->compact_cursor) {
5400  if (gc_fill_swept_page(objspace, heap, sweep_page, ctx)) {
5401  gc_compact_finish(objspace, size_pool, heap);
5402  }
5403  }
5404 
5405  if (!heap->compact_cursor) {
5406  gc_setup_mark_bits(sweep_page);
5407  }
5408 
5409 #if GC_PROFILE_MORE_DETAIL
5410  if (gc_prof_enabled(objspace)) {
5411  gc_profile_record *record = gc_prof_record(objspace);
5412  record->removing_objects += ctx->final_slots + ctx->freed_slots;
5413  record->empty_objects += ctx->empty_slots;
5414  }
5415 #endif
5416  if (0) fprintf(stderr, "gc_sweep_page(%"PRIdSIZE"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5417  rb_gc_count(),
5418  sweep_page->total_slots,
5419  ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
5420 
5421  sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
5422  objspace->profile.total_freed_objects += ctx->freed_slots;
5423 
5424  if (heap_pages_deferred_final && !finalizing) {
5425  rb_thread_t *th = GET_THREAD();
5426  if (th) {
5427  gc_finalize_deferred_register(objspace);
5428  }
5429  }
5430 
5431 #if RGENGC_CHECK_MODE
5432  short freelist_len = 0;
5433  RVALUE *ptr = sweep_page->freelist;
5434  while (ptr) {
5435  freelist_len++;
5436  ptr = ptr->as.free.next;
5437  }
5438  if (freelist_len != sweep_page->free_slots) {
5439  rb_bug("inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
5440  }
5441 #endif
5442 
5443  gc_report(2, objspace, "page_sweep: end.\n");
5444 }
5445 
5446 #if !USE_RVARGC
5447 /* allocate additional minimum page to work */
5448 static void
5449 gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
5450 {
5451  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5452  if (!heap->free_pages && heap_increment(objspace, size_pool, heap) == FALSE) {
5453  /* there is no free after page_sweep() */
5454  size_pool_allocatable_pages_set(objspace, size_pool, 1);
5455  if (!heap_increment(objspace, size_pool, heap)) { /* can't allocate additional free objects */
5456  rb_memerror();
5457  }
5458  }
5459  }
5460 }
5461 #endif
5462 
5463 static const char *
5464 gc_mode_name(enum gc_mode mode)
5465 {
5466  switch (mode) {
5467  case gc_mode_none: return "none";
5468  case gc_mode_marking: return "marking";
5469  case gc_mode_sweeping: return "sweeping";
5470  default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
5471  }
5472 }
5473 
5474 static void
5475 gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
5476 {
5477 #if RGENGC_CHECK_MODE
5478  enum gc_mode prev_mode = gc_mode(objspace);
5479  switch (prev_mode) {
5480  case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
5481  case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
5482  case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none); break;
5483  }
5484 #endif
5485  if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5486  gc_mode_set(objspace, mode);
5487 }
5488 
5489 static void
5490 heap_page_freelist_append(struct heap_page *page, RVALUE *freelist)
5491 {
5492  if (freelist) {
5493  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
5494  if (page->freelist) {
5495  RVALUE *p = page->freelist;
5496  asan_unpoison_object((VALUE)p, false);
5497  while (p->as.free.next) {
5498  RVALUE *prev = p;
5499  p = p->as.free.next;
5500  asan_poison_object((VALUE)prev);
5501  asan_unpoison_object((VALUE)p, false);
5502  }
5503  p->as.free.next = freelist;
5504  asan_poison_object((VALUE)p);
5505  }
5506  else {
5507  page->freelist = freelist;
5508  }
5509  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
5510  }
5511 }
5512 
5513 static void
5514 gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
5515 {
5516  heap->sweeping_page = list_top(&heap->pages, struct heap_page, page_node);
5517  heap->free_pages = NULL;
5518 #if GC_ENABLE_INCREMENTAL_MARK
5519  heap->pooled_pages = NULL;
5520 #endif
5521 }
5522 
5523 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5524 __attribute__((noinline))
5525 #endif
5526 static void
5527 gc_sweep_start(rb_objspace_t *objspace)
5528 {
5529  gc_mode_transition(objspace, gc_mode_sweeping);
5530 
5531 #if GC_ENABLE_INCREMENTAL_MARK
5532  objspace->rincgc.pooled_slots = 0;
5533 #endif
5534 
5535  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5536  rb_size_pool_t *size_pool = &size_pools[i];
5537 
5538  gc_sweep_start_heap(objspace, SIZE_POOL_EDEN_HEAP(size_pool));
5539  }
5540 
5541  rb_ractor_t *r = NULL;
5542  list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5543  rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
5544  }
5545 }
5546 
5547 #if USE_RVARGC
5548 static void
5549 gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
5550 {
5551  rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5552  size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5553  size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5554  size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
5555 
5556  size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5557 
5558  if (swept_slots < min_free_slots) {
5559  bool grow_heap = is_full_marking(objspace);
5560 
5561  if (!is_full_marking(objspace)) {
5562  /* The heap is a growth heap if it freed more slots than had empty slots. */
5563  bool is_growth_heap = size_pool->empty_slots == 0 ||
5564  size_pool->freed_slots > size_pool->empty_slots;
5565 
5566  if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5567  grow_heap = TRUE;
5568  }
5569  else if (is_growth_heap) { /* Only growth heaps are allowed to start a major GC. */
5570  objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5571  size_pool->force_major_gc_count++;
5572  }
5573  }
5574 
5575  if (grow_heap) {
5576  size_t extend_page_count = heap_extend_pages(objspace, swept_slots, total_slots, total_pages);
5577 
5578  if (extend_page_count > size_pool->allocatable_pages) {
5579  size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5580  }
5581 
5582  heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5583  }
5584  }
5585 }
5586 #endif
5587 
5588 static void
5589 gc_sweep_finish(rb_objspace_t *objspace)
5590 {
5591  gc_report(1, objspace, "gc_sweep_finish\n");
5592 
5593  gc_prof_set_heap_info(objspace);
5594  heap_pages_free_unused_pages(objspace);
5595 
5596  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5597  rb_size_pool_t *size_pool = &size_pools[i];
5598 
5599  /* if heap_pages has unused pages, then assign them to increment */
5600  size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5601  if (size_pool->allocatable_pages < tomb_pages) {
5602  size_pool->allocatable_pages = tomb_pages;
5603  }
5604 
5605 #if USE_RVARGC
5606  size_pool->freed_slots = 0;
5607  size_pool->empty_slots = 0;
5608 
5609 #if GC_ENABLE_INCREMENTAL_MARK
5610  if (!will_be_incremental_marking(objspace)) {
5611  rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
5612  struct heap_page *end_page = eden_heap->free_pages;
5613  if (end_page) {
5614  while (end_page->free_next) end_page = end_page->free_next;
5615  end_page->free_next = eden_heap->pooled_pages;
5616  }
5617  else {
5618  eden_heap->free_pages = eden_heap->pooled_pages;
5619  }
5620  eden_heap->pooled_pages = NULL;
5621  objspace->rincgc.pooled_slots = 0;
5622  }
5623 #endif
5624 #endif
5625  }
5626  heap_pages_expand_sorted(objspace);
5627 
5628  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_SWEEP, 0);
5629  gc_mode_transition(objspace, gc_mode_none);
5630 
5631 #if RGENGC_CHECK_MODE >= 2
5632  gc_verify_internal_consistency(objspace);
5633 #endif
5634 }
5635 
5636 static int
5637 gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
5638 {
5639  struct heap_page *sweep_page = heap->sweeping_page;
5640  int unlink_limit = 3;
5641 
5642 #if GC_ENABLE_INCREMENTAL_MARK
5643  int swept_slots = 0;
5644 #if USE_RVARGC
5645  bool need_pool = TRUE;
5646 #else
5647  int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
5648 #endif
5649 
5650  gc_report(2, objspace, "gc_sweep_step (need_pool: %d)\n", need_pool);
5651 #else
5652  gc_report(2, objspace, "gc_sweep_step\n");
5653 #endif
5654 
5655  if (sweep_page == NULL) return FALSE;
5656 
5657 #if GC_ENABLE_LAZY_SWEEP
5658  gc_prof_sweep_timer_start(objspace);
5659 #endif
5660 
5661  do {
5662  RUBY_DEBUG_LOG("sweep_page:%p", (void *)sweep_page);
5663 
5664  struct gc_sweep_context ctx = {
5665  .page = sweep_page,
5666  .final_slots = 0,
5667  .freed_slots = 0,
5668  .empty_slots = 0,
5669  };
5670  gc_sweep_page(objspace, size_pool, heap, &ctx);
5671  int free_slots = ctx.freed_slots + ctx.empty_slots;
5672 
5673  heap->sweeping_page = list_next(&heap->pages, sweep_page, page_node);
5674 
5675  if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
5676  heap_pages_freeable_pages > 0 &&
5677  unlink_limit > 0) {
5678  heap_pages_freeable_pages--;
5679  unlink_limit--;
5680  /* there are no living objects -> move this page to tomb heap */
5681  heap_unlink_page(objspace, heap, sweep_page);
5682  heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
5683  }
5684  else if (free_slots > 0) {
5685 #if USE_RVARGC
5686  size_pool->freed_slots += ctx.freed_slots;
5687  size_pool->empty_slots += ctx.empty_slots;
5688 #endif
5689 
5690 #if GC_ENABLE_INCREMENTAL_MARK
5691  if (need_pool) {
5692  heap_add_poolpage(objspace, heap, sweep_page);
5693  need_pool = FALSE;
5694  }
5695  else {
5696  heap_add_freepage(heap, sweep_page);
5697  swept_slots += free_slots;
5698  if (swept_slots > 2048) {
5699  break;
5700  }
5701  }
5702 #else
5703  heap_add_freepage(heap, sweep_page);
5704  break;
5705 #endif
5706  }
5707  else {
5708  sweep_page->free_next = NULL;
5709  }
5710  } while ((sweep_page = heap->sweeping_page));
5711 
5712  if (!heap->sweeping_page) {
5713 #if USE_RVARGC
5714  gc_sweep_finish_size_pool(objspace, size_pool);
5715 #endif
5716 
5717  if (!has_sweeping_pages(objspace)) {
5718  gc_sweep_finish(objspace);
5719  }
5720  }
5721 
5722 #if GC_ENABLE_LAZY_SWEEP
5723  gc_prof_sweep_timer_stop(objspace);
5724 #endif
5725 
5726  return heap->free_pages != NULL;
5727 }
5728 
5729 static void
5730 gc_sweep_rest(rb_objspace_t *objspace)
5731 {
5732  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5733  rb_size_pool_t *size_pool = &size_pools[i];
5734 
5735  while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
5736  gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5737  }
5738  }
5739 }
5740 
5741 static void
5742 gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *sweep_size_pool, rb_heap_t *heap)
5743 {
5744  GC_ASSERT(dont_gc_val() == FALSE);
5745  if (!GC_ENABLE_LAZY_SWEEP) return;
5746 
5747  unsigned int lock_lev;
5748  gc_enter(objspace, gc_enter_event_sweep_continue, &lock_lev);
5749 
5750  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5751  rb_size_pool_t *size_pool = &size_pools[i];
5752  if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
5753 #if USE_RVARGC
5754  /* sweep_size_pool requires a free slot but sweeping did not yield any. */
5755  if (size_pool == sweep_size_pool) {
5756  if (size_pool->allocatable_pages > 0) {
5757  heap_increment(objspace, size_pool, heap);
5758  }
5759  else {
5760  /* Not allowed to create a new page so finish sweeping. */
5761  gc_sweep_rest(objspace);
5762  break;
5763  }
5764  }
5765 #endif
5766  }
5767  }
5768 
5769  gc_exit(objspace, gc_enter_event_sweep_continue, &lock_lev);
5770 }
5771 
5772 static void
5773 invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_t p, bits_t bitset)
5774 {
5775  if (bitset) {
5776  do {
5777  if (bitset & 1) {
5778  VALUE forwarding_object = (VALUE)p;
5779  VALUE object;
5780 
5781  if (BUILTIN_TYPE(forwarding_object) == T_MOVED) {
5782  GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
5783  GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
5784 
5785  CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
5786 
5787  bool from_freelist = FL_TEST_RAW(forwarding_object, FL_FROM_FREELIST);
5788  object = rb_gc_location(forwarding_object);
5789 
5790  gc_move(objspace, object, forwarding_object, page->slot_size);
5791  /* forwarding_object is now our actual object, and "object"
5792  * is the free slot for the original page */
5793  struct heap_page *orig_page = GET_HEAP_PAGE(object);
5794  orig_page->free_slots++;
5795  if (!from_freelist) {
5796  objspace->profile.total_freed_objects++;
5797  }
5798  heap_page_add_freeobj(objspace, orig_page, object);
5799 
5800  GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
5801  GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_MOVED);
5802  GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_NONE);
5803  }
5804  }
5805  p += sizeof(RVALUE);
5806  bitset >>= 1;
5807  } while (bitset);
5808  }
5809 }
5810 
5811 static void
5812 invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
5813 {
5814  int i;
5815  bits_t *mark_bits, *pin_bits;
5816  bits_t bitset;
5817  RVALUE *p;
5818 
5819  mark_bits = page->mark_bits;
5820  pin_bits = page->pinned_bits;
5821 
5822  p = page->start;
5823 
5824  // Skip out of range slots at the head of the page
5825  bitset = pin_bits[0] & ~mark_bits[0];
5826  bitset >>= NUM_IN_PAGE(p);
5827  invalidate_moved_plane(objspace, page, (uintptr_t)p, bitset);
5828  p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
5829 
5830  for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5831  /* Moved objects are pinned but never marked. We reuse the pin bits
5832  * to indicate there is a moved object in this slot. */
5833  bitset = pin_bits[i] & ~mark_bits[i];
5834 
5835  invalidate_moved_plane(objspace, page, (uintptr_t)p, bitset);
5836  p += BITS_BITLENGTH;
5837  }
5838 }
5839 
5840 static void
5841 gc_compact_start(rb_objspace_t *objspace)
5842 {
5843  struct heap_page *page = NULL;
5844 
5845  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5846  rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
5847  list_for_each(&heap->pages, page, page_node) {
5848  page->flags.before_sweep = TRUE;
5849  }
5850 
5851  heap->compact_cursor = list_tail(&heap->pages, struct heap_page, page_node);
5852  heap->compact_cursor_index = 0;
5853  }
5854 
5855  if (gc_prof_enabled(objspace)) {
5856  gc_profile_record *record = gc_prof_record(objspace);
5857  record->moved_objects = objspace->rcompactor.total_moved;
5858  }
5859 
5860  memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
5861  memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
5862 
5863  /* Set up read barrier for pages containing MOVED objects */
5864  install_handlers();
5865 }
5866 
5867 static void
5868 gc_sweep(rb_objspace_t *objspace)
5869 {
5870  const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
5871 
5872  gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
5873 
5874  if (immediate_sweep) {
5875 #if !GC_ENABLE_LAZY_SWEEP
5876  gc_prof_sweep_timer_start(objspace);
5877 #endif
5878  gc_sweep_start(objspace);
5879  if (objspace->flags.during_compacting) {
5880  gc_compact_start(objspace);
5881  }
5882 
5883  gc_sweep_rest(objspace);
5884 #if !GC_ENABLE_LAZY_SWEEP
5885  gc_prof_sweep_timer_stop(objspace);
5886 #endif
5887  }
5888  else {
5889  struct heap_page *page = NULL;
5890  gc_sweep_start(objspace);
5891 
5892  if (ruby_enable_autocompact && is_full_marking(objspace)) {
5893  gc_compact_start(objspace);
5894  }
5895 
5896  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5897  list_for_each(&(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages), page, page_node) {
5898  page->flags.before_sweep = TRUE;
5899  }
5900  }
5901 
5902  /* Sweep every size pool. */
5903  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5904  rb_size_pool_t *size_pool = &size_pools[i];
5905  gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5906  }
5907  }
5908 
5909 #if !USE_RVARGC
5910  rb_size_pool_t *size_pool = &size_pools[0];
5911  gc_heap_prepare_minimum_pages(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5912 #endif
5913 }
5914 
5915 /* Marking - Marking stack */
5916 
5917 static stack_chunk_t *
5918 stack_chunk_alloc(void)
5919 {
5920  stack_chunk_t *res;
5921 
5922  res = malloc(sizeof(stack_chunk_t));
5923  if (!res)
5924  rb_memerror();
5925 
5926  return res;
5927 }
5928 
5929 static inline int
5930 is_mark_stack_empty(mark_stack_t *stack)
5931 {
5932  return stack->chunk == NULL;
5933 }
5934 
5935 static size_t
5936 mark_stack_size(mark_stack_t *stack)
5937 {
5938  size_t size = stack->index;
5939  stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
5940 
5941  while (chunk) {
5942  size += stack->limit;
5943  chunk = chunk->next;
5944  }
5945  return size;
5946 }
5947 
5948 static void
5949 add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
5950 {
5951  chunk->next = stack->cache;
5952  stack->cache = chunk;
5953  stack->cache_size++;
5954 }
5955 
5956 static void
5957 shrink_stack_chunk_cache(mark_stack_t *stack)
5958 {
5959  stack_chunk_t *chunk;
5960 
5961  if (stack->unused_cache_size > (stack->cache_size/2)) {
5962  chunk = stack->cache;
5963  stack->cache = stack->cache->next;
5964  stack->cache_size--;
5965  free(chunk);
5966  }
5967  stack->unused_cache_size = stack->cache_size;
5968 }
5969 
5970 static void
5971 push_mark_stack_chunk(mark_stack_t *stack)
5972 {
5973  stack_chunk_t *next;
5974 
5975  GC_ASSERT(stack->index == stack->limit);
5976 
5977  if (stack->cache_size > 0) {
5978  next = stack->cache;
5979  stack->cache = stack->cache->next;
5980  stack->cache_size--;
5981  if (stack->unused_cache_size > stack->cache_size)
5982  stack->unused_cache_size = stack->cache_size;
5983  }
5984  else {
5985  next = stack_chunk_alloc();
5986  }
5987  next->next = stack->chunk;
5988  stack->chunk = next;
5989  stack->index = 0;
5990 }
5991 
5992 static void
5993 pop_mark_stack_chunk(mark_stack_t *stack)
5994 {
5995  stack_chunk_t *prev;
5996 
5997  prev = stack->chunk->next;
5998  GC_ASSERT(stack->index == 0);
5999  add_stack_chunk_cache(stack, stack->chunk);
6000  stack->chunk = prev;
6001  stack->index = stack->limit;
6002 }
6003 
6004 static void
6005 mark_stack_chunk_list_free(stack_chunk_t *chunk)
6006 {
6007  stack_chunk_t *next = NULL;
6008 
6009  while (chunk != NULL) {
6010  next = chunk->next;
6011  free(chunk);
6012  chunk = next;
6013  }
6014 }
6015 
6016 static void
6017 free_stack_chunks(mark_stack_t *stack)
6018 {
6019  mark_stack_chunk_list_free(stack->chunk);
6020 }
6021 
6022 static void
6023 mark_stack_free_cache(mark_stack_t *stack)
6024 {
6025  mark_stack_chunk_list_free(stack->cache);
6026  stack->cache_size = 0;
6027  stack->unused_cache_size = 0;
6028 }
6029 
6030 static void
6031 push_mark_stack(mark_stack_t *stack, VALUE data)
6032 {
6033  VALUE obj = data;
6034  switch (BUILTIN_TYPE(obj)) {
6035  case T_OBJECT:
6036  case T_CLASS:
6037  case T_MODULE:
6038  case T_FLOAT:
6039  case T_STRING:
6040  case T_REGEXP:
6041  case T_ARRAY:
6042  case T_HASH:
6043  case T_STRUCT:
6044  case T_BIGNUM:
6045  case T_FILE:
6046  case T_DATA:
6047  case T_MATCH:
6048  case T_COMPLEX:
6049  case T_RATIONAL:
6050  case T_TRUE:
6051  case T_FALSE:
6052  case T_SYMBOL:
6053  case T_IMEMO:
6054  case T_ICLASS:
6055  if (stack->index == stack->limit) {
6056  push_mark_stack_chunk(stack);
6057  }
6058  stack->chunk->data[stack->index++] = data;
6059  return;
6060 
6061  case T_NONE:
6062  case T_NIL:
6063  case T_FIXNUM:
6064  case T_MOVED:
6065  case T_ZOMBIE:
6066  case T_UNDEF:
6067  case T_MASK:
6068  rb_bug("push_mark_stack() called for broken object");
6069  break;
6070 
6071  case T_NODE:
6072  UNEXPECTED_NODE(push_mark_stack);
6073  break;
6074  }
6075 
6076  rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
6077  BUILTIN_TYPE(obj), (void *)data,
6078  is_pointer_to_heap(&rb_objspace, (void *)data) ? "corrupted object" : "non object");
6079 }
6080 
6081 static int
6082 pop_mark_stack(mark_stack_t *stack, VALUE *data)
6083 {
6084  if (is_mark_stack_empty(stack)) {
6085  return FALSE;
6086  }
6087  if (stack->index == 1) {
6088  *data = stack->chunk->data[--stack->index];
6089  pop_mark_stack_chunk(stack);
6090  }
6091  else {
6092  *data = stack->chunk->data[--stack->index];
6093  }
6094  return TRUE;
6095 }
6096 
6097 static void
6098 init_mark_stack(mark_stack_t *stack)
6099 {
6100  int i;
6101 
6102  MEMZERO(stack, mark_stack_t, 1);
6103  stack->index = stack->limit = STACK_CHUNK_SIZE;
6104 
6105  for (i=0; i < 4; i++) {
6106  add_stack_chunk_cache(stack, stack_chunk_alloc());
6107  }
6108  stack->unused_cache_size = stack->cache_size;
6109 }
6110 
6111 /* Marking */
6112 
6113 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6114 
6115 #define STACK_START (ec->machine.stack_start)
6116 #define STACK_END (ec->machine.stack_end)
6117 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6118 
6119 #if STACK_GROW_DIRECTION < 0
6120 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6121 #elif STACK_GROW_DIRECTION > 0
6122 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6123 #else
6124 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6125  : (size_t)(STACK_END - STACK_START + 1))
6126 #endif
6127 #if !STACK_GROW_DIRECTION
6128 int ruby_stack_grow_direction;
6129 int
6130 ruby_get_stack_grow_direction(volatile VALUE *addr)
6131 {
6132  VALUE *end;
6133  SET_MACHINE_STACK_END(&end);
6134 
6135  if (end > addr) return ruby_stack_grow_direction = 1;
6136  return ruby_stack_grow_direction = -1;
6137 }
6138 #endif
6139 
6140 size_t
6142 {
6143  rb_execution_context_t *ec = GET_EC();
6144  SET_STACK_END;
6145  if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
6146  return STACK_LENGTH;
6147 }
6148 
6149 #define PREVENT_STACK_OVERFLOW 1
6150 #ifndef PREVENT_STACK_OVERFLOW
6151 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6152 # define PREVENT_STACK_OVERFLOW 1
6153 #else
6154 # define PREVENT_STACK_OVERFLOW 0
6155 #endif
6156 #endif
6157 #if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6158 static int
6159 stack_check(rb_execution_context_t *ec, int water_mark)
6160 {
6161  SET_STACK_END;
6162 
6163  size_t length = STACK_LENGTH;
6164  size_t maximum_length = STACK_LEVEL_MAX - water_mark;
6165 
6166  return length > maximum_length;
6167 }
6168 #else
6169 #define stack_check(ec, water_mark) FALSE
6170 #endif
6171 
6172 #define STACKFRAME_FOR_CALL_CFUNC 2048
6173 
6174 MJIT_FUNC_EXPORTED int
6175 rb_ec_stack_check(rb_execution_context_t *ec)
6176 {
6177  return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
6178 }
6179 
6180 int
6182 {
6183  return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
6184 }
6185 
6186 ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(rb_objspace_t *objspace, register const VALUE *x, register long n, void (*cb)(rb_objspace_t *, VALUE)));
6187 static void
6188 each_location(rb_objspace_t *objspace, register const VALUE *x, register long n, void (*cb)(rb_objspace_t *, VALUE))
6189 {
6190  VALUE v;
6191  while (n--) {
6192  v = *x;
6193  cb(objspace, v);
6194  x++;
6195  }
6196 }
6197 
6198 static void
6199 gc_mark_locations(rb_objspace_t *objspace, const VALUE *start, const VALUE *end, void (*cb)(rb_objspace_t *, VALUE))
6200 {
6201  long n;
6202 
6203  if (end <= start) return;
6204  n = end - start;
6205  each_location(objspace, start, n, cb);
6206 }
6207 
6208 void
6209 rb_gc_mark_locations(const VALUE *start, const VALUE *end)
6210 {
6211  gc_mark_locations(&rb_objspace, start, end, gc_mark_maybe);
6212 }
6213 
6214 static void
6215 gc_mark_values(rb_objspace_t *objspace, long n, const VALUE *values)
6216 {
6217  long i;
6218 
6219  for (i=0; i<n; i++) {
6220  gc_mark(objspace, values[i]);
6221  }
6222 }
6223 
6224 void
6225 rb_gc_mark_values(long n, const VALUE *values)
6226 {
6227  long i;
6228  rb_objspace_t *objspace = &rb_objspace;
6229 
6230  for (i=0; i<n; i++) {
6231  gc_mark_and_pin(objspace, values[i]);
6232  }
6233 }
6234 
6235 static void
6236 gc_mark_stack_values(rb_objspace_t *objspace, long n, const VALUE *values)
6237 {
6238  long i;
6239 
6240  for (i=0; i<n; i++) {
6241  if (is_markable_object(objspace, values[i])) {
6242  gc_mark_and_pin(objspace, values[i]);
6243  }
6244  }
6245 }
6246 
6247 void
6248 rb_gc_mark_vm_stack_values(long n, const VALUE *values)
6249 {
6250  rb_objspace_t *objspace = &rb_objspace;
6251  gc_mark_stack_values(objspace, n, values);
6252 }
6253 
6254 static int
6255 mark_value(st_data_t key, st_data_t value, st_data_t data)
6256 {
6257  rb_objspace_t *objspace = (rb_objspace_t *)data;
6258  gc_mark(objspace, (VALUE)value);
6259  return ST_CONTINUE;
6260 }
6261 
6262 static int
6263 mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
6264 {
6265  rb_objspace_t *objspace = (rb_objspace_t *)data;
6266  gc_mark_and_pin(objspace, (VALUE)value);
6267  return ST_CONTINUE;
6268 }
6269 
6270 static void
6271 mark_tbl_no_pin(rb_objspace_t *objspace, st_table *tbl)
6272 {
6273  if (!tbl || tbl->num_entries == 0) return;
6274  st_foreach(tbl, mark_value, (st_data_t)objspace);
6275 }
6276 
6277 static void
6278 mark_tbl(rb_objspace_t *objspace, st_table *tbl)
6279 {
6280  if (!tbl || tbl->num_entries == 0) return;
6281  st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
6282 }
6283 
6284 static int
6285 mark_key(st_data_t key, st_data_t value, st_data_t data)
6286 {
6287  rb_objspace_t *objspace = (rb_objspace_t *)data;
6288  gc_mark_and_pin(objspace, (VALUE)key);
6289  return ST_CONTINUE;
6290 }
6291 
6292 static void
6293 mark_set(rb_objspace_t *objspace, st_table *tbl)
6294 {
6295  if (!tbl) return;
6296  st_foreach(tbl, mark_key, (st_data_t)objspace);
6297 }
6298 
6299 static int
6300 pin_value(st_data_t key, st_data_t value, st_data_t data)
6301 {
6302  rb_objspace_t *objspace = (rb_objspace_t *)data;
6303  gc_mark_and_pin(objspace, (VALUE)value);
6304  return ST_CONTINUE;
6305 }
6306 
6307 static void
6308 mark_finalizer_tbl(rb_objspace_t *objspace, st_table *tbl)
6309 {
6310  if (!tbl) return;
6311  st_foreach(tbl, pin_value, (st_data_t)objspace);
6312 }
6313 
6314 void
6316 {
6317  mark_set(&rb_objspace, tbl);
6318 }
6319 
6320 static int
6321 mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
6322 {
6323  rb_objspace_t *objspace = (rb_objspace_t *)data;
6324 
6325  gc_mark(objspace, (VALUE)key);
6326  gc_mark(objspace, (VALUE)value);
6327  return ST_CONTINUE;
6328 }
6329 
6330 static int
6331 pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
6332 {
6333  rb_objspace_t *objspace = (rb_objspace_t *)data;
6334 
6335  gc_mark_and_pin(objspace, (VALUE)key);
6336  gc_mark_and_pin(objspace, (VALUE)value);
6337  return ST_CONTINUE;
6338 }
6339 
6340 static int
6341 pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
6342 {
6343  rb_objspace_t *objspace = (rb_objspace_t *)data;
6344 
6345  gc_mark_and_pin(objspace, (VALUE)key);
6346  gc_mark(objspace, (VALUE)value);
6347  return ST_CONTINUE;
6348 }
6349 
6350 static void
6351 mark_hash(rb_objspace_t *objspace, VALUE hash)
6352 {
6353  if (rb_hash_compare_by_id_p(hash)) {
6354  rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
6355  }
6356  else {
6357  rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
6358  }
6359 
6360  if (RHASH_AR_TABLE_P(hash)) {
6361  if (LIKELY(during_gc) && RHASH_TRANSIENT_P(hash)) {
6362  rb_transient_heap_mark(hash, RHASH_AR_TABLE(hash));
6363  }
6364  }
6365  else {
6366  VM_ASSERT(!RHASH_TRANSIENT_P(hash));
6367  }
6368  gc_mark(objspace, RHASH(hash)->ifnone);
6369 }
6370 
6371 static void
6372 mark_st(rb_objspace_t *objspace, st_table *tbl)
6373 {
6374  if (!tbl) return;
6375  st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
6376 }
6377 
6378 void
6380 {
6381  mark_st(&rb_objspace, tbl);
6382 }
6383 
6384 static void
6385 mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
6386 {
6387  const rb_method_definition_t *def = me->def;
6388 
6389  gc_mark(objspace, me->owner);
6390  gc_mark(objspace, me->defined_class);
6391 
6392  if (def) {
6393  switch (def->type) {
6394  case VM_METHOD_TYPE_ISEQ:
6395  if (def->body.iseq.iseqptr) gc_mark(objspace, (VALUE)def->body.iseq.iseqptr);
6396  gc_mark(objspace, (VALUE)def->body.iseq.cref);
6397 
6398  if (def->iseq_overload && me->defined_class) {
6399  // it can be a key of "overloaded_cme" table
6400  // so it should be pinned.
6401  gc_mark_and_pin(objspace, (VALUE)me);
6402  }
6403  break;
6404  case VM_METHOD_TYPE_ATTRSET:
6405  case VM_METHOD_TYPE_IVAR:
6406  gc_mark(objspace, def->body.attr.location);
6407  break;
6408  case VM_METHOD_TYPE_BMETHOD:
6409  gc_mark(objspace, def->body.bmethod.proc);
6410  if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
6411  break;
6412  case VM_METHOD_TYPE_ALIAS:
6413  gc_mark(objspace, (VALUE)def->body.alias.original_me);
6414  return;
6415  case VM_METHOD_TYPE_REFINED:
6416  gc_mark(objspace, (VALUE)def->body.refined.orig_me);
6417  gc_mark(objspace, (VALUE)def->body.refined.owner);
6418  break;
6419  case VM_METHOD_TYPE_CFUNC:
6420  case VM_METHOD_TYPE_ZSUPER:
6421  case VM_METHOD_TYPE_MISSING:
6422  case VM_METHOD_TYPE_OPTIMIZED:
6423  case VM_METHOD_TYPE_UNDEF:
6424  case VM_METHOD_TYPE_NOTIMPLEMENTED:
6425  break;
6426  }
6427  }
6428 }
6429 
6430 static enum rb_id_table_iterator_result
6431 mark_method_entry_i(VALUE me, void *data)
6432 {
6433  rb_objspace_t *objspace = (rb_objspace_t *)data;
6434 
6435  gc_mark(objspace, me);
6436  return ID_TABLE_CONTINUE;
6437 }
6438 
6439 static void
6440 mark_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
6441 {
6442  if (tbl) {
6443  rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
6444  }
6445 }
6446 
6447 static enum rb_id_table_iterator_result
6448 mark_const_entry_i(VALUE value, void *data)
6449 {
6450  const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
6451  rb_objspace_t *objspace = data;
6452 
6453  gc_mark(objspace, ce->value);
6454  gc_mark(objspace, ce->file);
6455  return ID_TABLE_CONTINUE;
6456 }
6457 
6458 static void
6459 mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
6460 {
6461  if (!tbl) return;
6462  rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
6463 }
6464 
6465 #if STACK_GROW_DIRECTION < 0
6466 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6467 #elif STACK_GROW_DIRECTION > 0
6468 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6469 #else
6470 #define GET_STACK_BOUNDS(start, end, appendix) \
6471  ((STACK_END < STACK_START) ? \
6472  ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6473 #endif
6474 
6475 static void each_stack_location(rb_objspace_t *objspace, const rb_execution_context_t *ec,
6476  const VALUE *stack_start, const VALUE *stack_end, void (*cb)(rb_objspace_t *, VALUE));
6477 
6478 #ifndef __EMSCRIPTEN__
6479 static void
6480 mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6481 {
6482  union {
6483  rb_jmp_buf j;
6484  VALUE v[sizeof(rb_jmp_buf) / (sizeof(VALUE))];
6485  } save_regs_gc_mark;
6486  VALUE *stack_start, *stack_end;
6487 
6488  FLUSH_REGISTER_WINDOWS;
6489  memset(&save_regs_gc_mark, 0, sizeof(save_regs_gc_mark));
6490  /* This assumes that all registers are saved into the jmp_buf (and stack) */
6491  rb_setjmp(save_regs_gc_mark.j);
6492 
6493  /* SET_STACK_END must be called in this function because
6494  * the stack frame of this function may contain
6495  * callee save registers and they should be marked. */
6496  SET_STACK_END;
6497  GET_STACK_BOUNDS(stack_start, stack_end, 1);
6498 
6499  each_location(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v), gc_mark_maybe);
6500 
6501  each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6502 }
6503 #else
6504 
6505 static VALUE *rb_emscripten_stack_range_tmp[2];
6506 
6507 static void
6508 rb_emscripten_mark_locations(void *begin, void *end)
6509 {
6510  rb_emscripten_stack_range_tmp[0] = begin;
6511  rb_emscripten_stack_range_tmp[1] = end;
6512 }
6513 
6514 static void
6515 mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6516 {
6517  emscripten_scan_stack(rb_emscripten_mark_locations);
6518  each_stack_location(objspace, ec, rb_emscripten_stack_range_tmp[0], rb_emscripten_stack_range_tmp[1], gc_mark_maybe);
6519 
6520  emscripten_scan_registers(rb_emscripten_mark_locations);
6521  each_stack_location(objspace, ec, rb_emscripten_stack_range_tmp[0], rb_emscripten_stack_range_tmp[1], gc_mark_maybe);
6522 }
6523 #endif
6524 
6525 static void
6526 each_machine_stack_value(const rb_execution_context_t *ec, void (*cb)(rb_objspace_t *, VALUE))
6527 {
6528  rb_objspace_t *objspace = &rb_objspace;
6529  VALUE *stack_start, *stack_end;
6530 
6531  GET_STACK_BOUNDS(stack_start, stack_end, 0);
6532  each_stack_location(objspace, ec, stack_start, stack_end, cb);
6533 }
6534 
6535 void
6536 rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
6537 {
6538  each_machine_stack_value(ec, gc_mark_maybe);
6539 }
6540 
6541 static void
6542 each_stack_location(rb_objspace_t *objspace, const rb_execution_context_t *ec,
6543  const VALUE *stack_start, const VALUE *stack_end, void (*cb)(rb_objspace_t *, VALUE))
6544 {
6545 
6546  gc_mark_locations(objspace, stack_start, stack_end, cb);
6547 
6548 #if defined(__mc68000__)
6549  gc_mark_locations(objspace,
6550  (VALUE*)((char*)stack_start + 2),
6551  (VALUE*)((char*)stack_end - 2), cb);
6552 #endif
6553 }
6554 
6555 void
6557 {
6558  mark_tbl(&rb_objspace, tbl);
6559 }
6560 
6561 void
6563 {
6564  mark_tbl_no_pin(&rb_objspace, tbl);
6565 }
6566 
6567 static void
6568 gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
6569 {
6570  (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
6571 
6572  if (is_pointer_to_heap(objspace, (void *)obj)) {
6573  void *ptr = __asan_region_is_poisoned((void *)obj, SIZEOF_VALUE);
6574  asan_unpoison_object(obj, false);
6575 
6576  /* Garbage can live on the stack, so do not mark or pin */
6577  switch (BUILTIN_TYPE(obj)) {
6578  case T_ZOMBIE:
6579  case T_NONE:
6580  break;
6581  default:
6582  gc_mark_and_pin(objspace, obj);
6583  break;
6584  }
6585 
6586  if (ptr) {
6587  GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
6588  asan_poison_object(obj);
6589  }
6590  }
6591 }
6592 
6593 void
6595 {
6596  gc_mark_maybe(&rb_objspace, obj);
6597 }
6598 
6599 static inline int
6600 gc_mark_set(rb_objspace_t *objspace, VALUE obj)
6601 {
6602  ASSERT_vm_locking();
6603  if (RVALUE_MARKED(obj)) return 0;
6604  MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6605  return 1;
6606 }
6607 
6608 static int
6609 gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
6610 {
6611  struct heap_page *page = GET_HEAP_PAGE(obj);
6612  bits_t *uncollectible_bits = &page->uncollectible_bits[0];
6613 
6614  if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
6615  page->flags.has_uncollectible_shady_objects = TRUE;
6616  MARK_IN_BITMAP(uncollectible_bits, obj);
6617  objspace->rgengc.uncollectible_wb_unprotected_objects++;
6618 
6619 #if RGENGC_PROFILE > 0
6620  objspace->profile.total_remembered_shady_object_count++;
6621 #if RGENGC_PROFILE >= 2
6622  objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
6623 #endif
6624 #endif
6625  return TRUE;
6626  }
6627  else {
6628  return FALSE;
6629  }
6630 }
6631 
6632 static void
6633 rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
6634 {
6635  const VALUE old_parent = objspace->rgengc.parent_object;
6636 
6637  if (old_parent) { /* parent object is old */
6638  if (RVALUE_WB_UNPROTECTED(obj)) {
6639  if (gc_remember_unprotected(objspace, obj)) {
6640  gc_report(2, objspace, "relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6641  }
6642  }
6643  else {
6644  if (!RVALUE_OLD_P(obj)) {
6645  if (RVALUE_MARKED(obj)) {
6646  /* An object pointed from an OLD object should be OLD. */
6647  gc_report(2, objspace, "relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6648  RVALUE_AGE_SET_OLD(objspace, obj);
6649  if (is_incremental_marking(objspace)) {
6650  if (!RVALUE_MARKING(obj)) {
6651  gc_grey(objspace, obj);
6652  }
6653  }
6654  else {
6655  rgengc_remember(objspace, obj);
6656  }
6657  }
6658  else {
6659  gc_report(2, objspace, "relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6660  RVALUE_AGE_SET_CANDIDATE(objspace, obj);
6661  }
6662  }
6663  }
6664  }
6665 
6666  GC_ASSERT(old_parent == objspace->rgengc.parent_object);
6667 }
6668 
6669 static void
6670 gc_grey(rb_objspace_t *objspace, VALUE obj)
6671 {
6672 #if RGENGC_CHECK_MODE
6673  if (RVALUE_MARKED(obj) == FALSE) rb_bug("gc_grey: %s is not marked.", obj_info(obj));
6674  if (RVALUE_MARKING(obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj));
6675 #endif
6676 
6677 #if GC_ENABLE_INCREMENTAL_MARK
6678  if (is_incremental_marking(objspace)) {
6679  MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
6680  }
6681 #endif
6682 
6683  push_mark_stack(&objspace->mark_stack, obj);
6684 }
6685 
6686 static void
6687 gc_aging(rb_objspace_t *objspace, VALUE obj)
6688 {
6689  struct heap_page *page = GET_HEAP_PAGE(obj);
6690 
6691  GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
6692  check_rvalue_consistency(obj);
6693 
6694  if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
6695  if (!RVALUE_OLD_P(obj)) {
6696  gc_report(3, objspace, "gc_aging: YOUNG: %s\n", obj_info(obj));
6697  RVALUE_AGE_INC(objspace, obj);
6698  }
6699  else if (is_full_marking(objspace)) {
6700  GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
6701  RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
6702  }
6703  }
6704  check_rvalue_consistency(obj);
6705 
6706  objspace->marked_slots++;
6707 }
6708 
6709 NOINLINE(static void gc_mark_ptr(rb_objspace_t *objspace, VALUE obj));
6710 static void reachable_objects_from_callback(VALUE obj);
6711 
6712 static void
6713 gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
6714 {
6715  if (LIKELY(during_gc)) {
6716  rgengc_check_relation(objspace, obj);
6717  if (!gc_mark_set(objspace, obj)) return; /* already marked */
6718 
6719  if (0) { // for debug GC marking miss
6720  if (objspace->rgengc.parent_object) {
6721  RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
6722  (void *)obj, obj_type_name(obj),
6723  (void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
6724  }
6725  else {
6726  RUBY_DEBUG_LOG("%p (%s)", (void *)obj, obj_type_name(obj));
6727  }
6728  }
6729 
6730  if (UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
6731  rp(obj);
6732  rb_bug("try to mark T_NONE object"); /* check here will help debugging */
6733  }
6734  gc_aging(objspace, obj);
6735  gc_grey(objspace, obj);
6736  }
6737  else {
6738  reachable_objects_from_callback(obj);
6739  }
6740 }
6741 
6742 static inline void
6743 gc_pin(rb_objspace_t *objspace, VALUE obj)
6744 {
6745  GC_ASSERT(is_markable_object(objspace, obj));
6746  if (UNLIKELY(objspace->flags.during_compacting)) {
6747  if (LIKELY(during_gc)) {
6748  MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
6749  }
6750  }
6751 }
6752 
6753 static inline void
6754 gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
6755 {
6756  if (!is_markable_object(objspace, obj)) return;
6757  gc_pin(objspace, obj);
6758  gc_mark_ptr(objspace, obj);
6759 }
6760 
6761 static inline void
6762 gc_mark(rb_objspace_t *objspace, VALUE obj)
6763 {
6764  if (!is_markable_object(objspace, obj)) return;
6765  gc_mark_ptr(objspace, obj);
6766 }
6767 
6768 void
6770 {
6771  gc_mark(&rb_objspace, ptr);
6772 }
6773 
6774 void
6776 {
6777  gc_mark_and_pin(&rb_objspace, ptr);
6778 }
6779 
6780 /* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
6781  * This function is only for GC_END_MARK timing.
6782  */
6783 
6784 int
6785 rb_objspace_marked_object_p(VALUE obj)
6786 {
6787  return RVALUE_MARKED(obj) ? TRUE : FALSE;
6788 }
6789 
6790 static inline void
6791 gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
6792 {
6793  if (RVALUE_OLD_P(obj)) {
6794  objspace->rgengc.parent_object = obj;
6795  }
6796  else {
6797  objspace->rgengc.parent_object = Qfalse;
6798  }
6799 }
6800 
6801 static void
6802 gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
6803 {
6804  switch (imemo_type(obj)) {
6805  case imemo_env:
6806  {
6807  const rb_env_t *env = (const rb_env_t *)obj;
6808 
6809  if (LIKELY(env->ep)) {
6810  // just after newobj() can be NULL here.
6811  GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
6812  GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
6813  gc_mark_values(objspace, (long)env->env_size, env->env);
6814  VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
6815  gc_mark(objspace, (VALUE)rb_vm_env_prev_env(env));
6816  gc_mark(objspace, (VALUE)env->iseq);
6817  }
6818  }
6819  return;
6820  case imemo_cref:
6821  gc_mark(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
6822  gc_mark(objspace, (VALUE)RANY(obj)->as.imemo.cref.next);
6823  gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
6824  return;
6825  case imemo_svar:
6826  gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
6827  gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
6828  gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
6829  gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
6830  return;
6831  case imemo_throw_data:
6832  gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
6833  return;
6834  case imemo_ifunc:
6835  gc_mark_maybe(objspace, (VALUE)RANY(obj)->as.imemo.ifunc.data);
6836  return;
6837  case imemo_memo:
6838  gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
6839  gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
6840  gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
6841  return;
6842  case imemo_ment:
6843  mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
6844  return;
6845  case imemo_iseq:
6846  rb_iseq_mark((rb_iseq_t *)obj);
6847  return;
6848  case imemo_tmpbuf:
6849  {
6850  const rb_imemo_tmpbuf_t *m = &RANY(obj)->as.imemo.alloc;
6851  do {
6852  rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
6853  } while ((m = m->next) != NULL);
6854  }
6855  return;
6856  case imemo_ast:
6857  rb_ast_mark(&RANY(obj)->as.imemo.ast);
6858  return;
6859  case imemo_parser_strterm:
6860  rb_strterm_mark(obj);
6861  return;
6862  case imemo_callinfo:
6863  return;
6864  case imemo_callcache:
6865  {
6866  const struct rb_callcache *cc = (const struct rb_callcache *)obj;
6867  // should not mark klass here
6868  gc_mark(objspace, (VALUE)vm_cc_cme(cc));
6869  }
6870  return;
6871  case imemo_constcache:
6872  {
6873  const struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)obj;
6874  gc_mark(objspace, ice->value);
6875  }
6876  return;
6877 #if VM_CHECK_MODE > 0
6878  default:
6879  VM_UNREACHABLE(gc_mark_imemo);
6880 #endif
6881  }
6882 }
6883 
6884 static void
6885 gc_mark_children(rb_objspace_t *objspace, VALUE obj)
6886 {
6887  register RVALUE *any = RANY(obj);
6888  gc_mark_set_parent(objspace, obj);
6889 
6890  if (FL_TEST(obj, FL_EXIVAR)) {
6891  rb_mark_generic_ivar(obj);
6892  }
6893 
6894  switch (BUILTIN_TYPE(obj)) {
6895  case T_FLOAT:
6896  case T_BIGNUM:
6897  case T_SYMBOL:
6898  /* Not immediates, but does not have references and singleton
6899  * class */
6900  return;
6901 
6902  case T_NIL:
6903  case T_FIXNUM:
6904  rb_bug("rb_gc_mark() called for broken object");
6905  break;
6906 
6907  case T_NODE:
6908  UNEXPECTED_NODE(rb_gc_mark);
6909  break;
6910 
6911  case T_IMEMO:
6912  gc_mark_imemo(objspace, obj);
6913  return;
6914 
6915  default:
6916  break;
6917  }
6918 
6919  gc_mark(objspace, any->as.basic.klass);
6920 
6921  switch (BUILTIN_TYPE(obj)) {
6922  case T_CLASS:
6923  case T_MODULE:
6924  if (RCLASS_SUPER(obj)) {
6925  gc_mark(objspace, RCLASS_SUPER(obj));
6926  }
6927  if (!RCLASS_EXT(obj)) break;
6928 
6929  mark_m_tbl(objspace, RCLASS_M_TBL(obj));
6930  cc_table_mark(objspace, obj);
6931  mark_tbl_no_pin(objspace, RCLASS_IV_TBL(obj));
6932  mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
6933  break;
6934 
6935  case T_ICLASS:
6936  if (RICLASS_OWNS_M_TBL_P(obj)) {
6937  mark_m_tbl(objspace, RCLASS_M_TBL(obj));
6938  }
6939  if (RCLASS_SUPER(obj)) {
6940  gc_mark(objspace, RCLASS_SUPER(obj));
6941  }
6942  if (!RCLASS_EXT(obj)) break;
6943  mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
6944  cc_table_mark(objspace, obj);
6945  break;
6946 
6947  case T_ARRAY:
6948  if (FL_TEST(obj, ELTS_SHARED)) {
6949  VALUE root = any->as.array.as.heap.aux.shared_root;
6950  gc_mark(objspace, root);
6951  }
6952  else {
6953  long i, len = RARRAY_LEN(obj);
6954  const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(obj);
6955  for (i=0; i < len; i++) {
6956  gc_mark(objspace, ptr[i]);
6957  }
6958 
6959  if (LIKELY(during_gc)) {
6960  if (!FL_TEST_RAW(obj, RARRAY_EMBED_FLAG) &&
6961  RARRAY_TRANSIENT_P(obj)) {
6962  rb_transient_heap_mark(obj, ptr);
6963  }
6964  }
6965  }
6966  break;
6967 
6968  case T_HASH:
6969  mark_hash(objspace, obj);
6970  break;
6971 
6972  case T_STRING:
6973  if (STR_SHARED_P(obj)) {
6974  gc_mark(objspace, any->as.string.as.heap.aux.shared);
6975  }
6976  break;
6977 
6978  case T_DATA:
6979  {
6980  void *const ptr = DATA_PTR(obj);
6981  if (ptr) {
6982  RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
6983  any->as.typeddata.type->function.dmark :
6984  any->as.data.dmark;
6985  if (mark_func) (*mark_func)(ptr);
6986  }
6987  }
6988  break;
6989 
6990  case T_OBJECT:
6991  {
6992  const VALUE * const ptr = ROBJECT_IVPTR(obj);
6993 
6994  uint32_t i, len = ROBJECT_NUMIV(obj);
6995  for (i = 0; i < len; i++) {
6996  gc_mark(objspace, ptr[i]);
6997  }
6998 
6999  if (LIKELY(during_gc) &&
7000  ROBJ_TRANSIENT_P(obj)) {
7001  rb_transient_heap_mark(obj, ptr);
7002  }
7003  }
7004  break;
7005 
7006  case T_FILE:
7007  if (any->as.file.fptr) {
7008  gc_mark(objspace, any->as.file.fptr->self);
7009  gc_mark(objspace, any->as.file.fptr->pathv);
7010  gc_mark(objspace, any->as.file.fptr->tied_io_for_writing);
7011  gc_mark(objspace, any->as.file.fptr->writeconv_asciicompat);
7012  gc_mark(objspace, any->as.file.fptr->writeconv_pre_ecopts);
7013  gc_mark(objspace, any->as.file.fptr->encs.ecopts);
7014  gc_mark(objspace, any->as.file.fptr->write_lock);
7015  }
7016  break;
7017 
7018  case T_REGEXP:
7019  gc_mark(objspace, any->as.regexp.src);
7020  break;
7021 
7022  case T_MATCH:
7023  gc_mark(objspace, any->as.match.regexp);
7024  if (any->as.match.str) {
7025  gc_mark(objspace, any->as.match.str);
7026  }
7027  break;
7028 
7029  case T_RATIONAL:
7030  gc_mark(objspace, any->as.rational.num);
7031  gc_mark(objspace, any->as.rational.den);
7032  break;
7033 
7034  case T_COMPLEX:
7035  gc_mark(objspace, any->as.complex.real);
7036  gc_mark(objspace, any->as.complex.imag);
7037  break;
7038 
7039  case T_STRUCT:
7040  {
7041  long i;
7042  const long len = RSTRUCT_LEN(obj);
7043  const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
7044 
7045  for (i=0; i<len; i++) {
7046  gc_mark(objspace, ptr[i]);
7047  }
7048 
7049  if (LIKELY(during_gc) &&
7050  RSTRUCT_TRANSIENT_P(obj)) {
7051  rb_transient_heap_mark(obj, ptr);
7052  }
7053  }
7054  break;
7055 
7056  default:
7057 #if GC_DEBUG
7058  rb_gcdebug_print_obj_condition((VALUE)obj);
7059 #endif
7060  if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
7061  if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
7062  if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
7063  rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
7064  BUILTIN_TYPE(obj), (void *)any,
7065  is_pointer_to_heap(objspace, any) ? "corrupted object" : "non object");
7066  }
7067 }
7068 
7073 static inline int
7074 gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
7075 {
7076  mark_stack_t *mstack = &objspace->mark_stack;
7077  VALUE obj;
7078 #if GC_ENABLE_INCREMENTAL_MARK
7079  size_t marked_slots_at_the_beginning = objspace->marked_slots;
7080  size_t popped_count = 0;
7081 #endif
7082 
7083  while (pop_mark_stack(mstack, &obj)) {
7084  if (obj == Qundef) continue; /* skip */
7085 
7086  if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
7087  rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
7088  }
7089  gc_mark_children(objspace, obj);
7090 
7091 #if GC_ENABLE_INCREMENTAL_MARK
7092  if (incremental) {
7093  if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
7094  rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
7095  }
7096  CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7097  popped_count++;
7098 
7099  if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
7100  break;
7101  }
7102  }
7103  else {
7104  /* just ignore marking bits */
7105  }
7106 #endif
7107  }
7108 
7109  if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7110 
7111  if (is_mark_stack_empty(mstack)) {
7112  shrink_stack_chunk_cache(mstack);
7113  return TRUE;
7114  }
7115  else {
7116  return FALSE;
7117  }
7118 }
7119 
7120 static int
7121 gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
7122 {
7123  return gc_mark_stacked_objects(objspace, TRUE, count);
7124 }
7125 
7126 static int
7127 gc_mark_stacked_objects_all(rb_objspace_t *objspace)
7128 {
7129  return gc_mark_stacked_objects(objspace, FALSE, 0);
7130 }
7131 
7132 #if PRINT_ROOT_TICKS
7133 #define MAX_TICKS 0x100
7134 static tick_t mark_ticks[MAX_TICKS];
7135 static const char *mark_ticks_categories[MAX_TICKS];
7136 
7137 static void
7138 show_mark_ticks(void)
7139 {
7140  int i;
7141  fprintf(stderr, "mark ticks result:\n");
7142  for (i=0; i<MAX_TICKS; i++) {
7143  const char *category = mark_ticks_categories[i];
7144  if (category) {
7145  fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
7146  }
7147  else {
7148  break;
7149  }
7150  }
7151 }
7152 
7153 #endif /* PRINT_ROOT_TICKS */
7154 
7155 static void
7156 gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
7157 {
7158  struct gc_list *list;
7159  rb_execution_context_t *ec = GET_EC();
7160  rb_vm_t *vm = rb_ec_vm_ptr(ec);
7161 
7162 #if PRINT_ROOT_TICKS
7163  tick_t start_tick = tick();
7164  int tick_count = 0;
7165  const char *prev_category = 0;
7166 
7167  if (mark_ticks_categories[0] == 0) {
7168  atexit(show_mark_ticks);
7169  }
7170 #endif
7171 
7172  if (categoryp) *categoryp = "xxx";
7173 
7174  objspace->rgengc.parent_object = Qfalse;
7175 
7176 #if PRINT_ROOT_TICKS
7177 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7178  if (prev_category) { \
7179  tick_t t = tick(); \
7180  mark_ticks[tick_count] = t - start_tick; \
7181  mark_ticks_categories[tick_count] = prev_category; \
7182  tick_count++; \
7183  } \
7184  prev_category = category; \
7185  start_tick = tick(); \
7186 } while (0)
7187 #else /* PRINT_ROOT_TICKS */
7188 #define MARK_CHECKPOINT_PRINT_TICK(category)
7189 #endif
7190 
7191 #define MARK_CHECKPOINT(category) do { \
7192  if (categoryp) *categoryp = category; \
7193  MARK_CHECKPOINT_PRINT_TICK(category); \
7194 } while (0)
7195 
7196  MARK_CHECKPOINT("vm");
7197  SET_STACK_END;
7198  rb_vm_mark(vm);
7199  if (vm->self) gc_mark(objspace, vm->self);
7200 
7201  MARK_CHECKPOINT("finalizers");
7202  mark_finalizer_tbl(objspace, finalizer_table);
7203 
7204  MARK_CHECKPOINT("machine_context");
7205  mark_current_machine_context(objspace, ec);
7206 
7207  /* mark protected global variables */
7208  MARK_CHECKPOINT("global_list");
7209  for (list = global_list; list; list = list->next) {
7210  gc_mark_maybe(objspace, *list->varptr);
7211  }
7212 
7213  MARK_CHECKPOINT("end_proc");
7214  rb_mark_end_proc();
7215 
7216  MARK_CHECKPOINT("global_tbl");
7217  rb_gc_mark_global_tbl();
7218 
7219  MARK_CHECKPOINT("object_id");
7220  rb_gc_mark(objspace->next_object_id);
7221  mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl); /* Only mark ids */
7222 
7223  if (stress_to_class) rb_gc_mark(stress_to_class);
7224 
7225  MARK_CHECKPOINT("finish");
7226 #undef MARK_CHECKPOINT
7227 }
7228 
7229 #if RGENGC_CHECK_MODE >= 4
7230 
7231 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7232 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7233 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7234 
7235 struct reflist {
7236  VALUE *list;
7237  int pos;
7238  int size;
7239 };
7240 
7241 static struct reflist *
7242 reflist_create(VALUE obj)
7243 {
7244  struct reflist *refs = xmalloc(sizeof(struct reflist));
7245  refs->size = 1;
7246  refs->list = ALLOC_N(VALUE, refs->size);
7247  refs->list[0] = obj;
7248  refs->pos = 1;
7249  return refs;
7250 }
7251 
7252 static void
7253 reflist_destruct(struct reflist *refs)
7254 {
7255  xfree(refs->list);
7256  xfree(refs);
7257 }
7258 
7259 static void
7260 reflist_add(struct reflist *refs, VALUE obj)
7261 {
7262  if (refs->pos == refs->size) {
7263  refs->size *= 2;
7264  SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
7265  }
7266 
7267  refs->list[refs->pos++] = obj;
7268 }
7269 
7270 static void
7271 reflist_dump(struct reflist *refs)
7272 {
7273  int i;
7274  for (i=0; i<refs->pos; i++) {
7275  VALUE obj = refs->list[i];
7276  if (IS_ROOTSIG(obj)) { /* root */
7277  fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
7278  }
7279  else {
7280  fprintf(stderr, "<%s>", obj_info(obj));
7281  }
7282  if (i+1 < refs->pos) fprintf(stderr, ", ");
7283  }
7284 }
7285 
7286 static int
7287 reflist_referred_from_machine_context(struct reflist *refs)
7288 {
7289  int i;
7290  for (i=0; i<refs->pos; i++) {
7291  VALUE obj = refs->list[i];
7292  if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
7293  }
7294  return 0;
7295 }
7296 
7297 struct allrefs {
7298  rb_objspace_t *objspace;
7299  /* a -> obj1
7300  * b -> obj1
7301  * c -> obj1
7302  * c -> obj2
7303  * d -> obj3
7304  * #=> {obj1 => [a, b, c], obj2 => [c, d]}
7305  */
7306  struct st_table *references;
7307  const char *category;
7308  VALUE root_obj;
7310 };
7311 
7312 static int
7313 allrefs_add(struct allrefs *data, VALUE obj)
7314 {
7315  struct reflist *refs;
7316  st_data_t r;
7317 
7318  if (st_lookup(data->references, obj, &r)) {
7319  refs = (struct reflist *)r;
7320  reflist_add(refs, data->root_obj);
7321  return 0;
7322  }
7323  else {
7324  refs = reflist_create(data->root_obj);
7325  st_insert(data->references, obj, (st_data_t)refs);
7326  return 1;
7327  }
7328 }
7329 
7330 static void
7331 allrefs_i(VALUE obj, void *ptr)
7332 {
7333  struct allrefs *data = (struct allrefs *)ptr;
7334 
7335  if (allrefs_add(data, obj)) {
7336  push_mark_stack(&data->mark_stack, obj);
7337  }
7338 }
7339 
7340 static void
7341 allrefs_roots_i(VALUE obj, void *ptr)
7342 {
7343  struct allrefs *data = (struct allrefs *)ptr;
7344  if (strlen(data->category) == 0) rb_bug("!!!");
7345  data->root_obj = MAKE_ROOTSIG(data->category);
7346 
7347  if (allrefs_add(data, obj)) {
7348  push_mark_stack(&data->mark_stack, obj);
7349  }
7350 }
7351 #define PUSH_MARK_FUNC_DATA(v) do { \
7352  struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7353  GET_RACTOR()->mfd = (v);
7354 
7355 #define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7356 
7357 static st_table *
7358 objspace_allrefs(rb_objspace_t *objspace)
7359 {
7360  struct allrefs data;
7361  struct gc_mark_func_data_struct mfd;
7362  VALUE obj;
7363  int prev_dont_gc = dont_gc_val();
7364  dont_gc_on();
7365 
7366  data.objspace = objspace;
7367  data.references = st_init_numtable();
7368  init_mark_stack(&data.mark_stack);
7369 
7370  mfd.mark_func = allrefs_roots_i;
7371  mfd.data = &data;
7372 
7373  /* traverse root objects */
7374  PUSH_MARK_FUNC_DATA(&mfd);
7375  GET_RACTOR()->mfd = &mfd;
7376  gc_mark_roots(objspace, &data.category);
7377  POP_MARK_FUNC_DATA();
7378 
7379  /* traverse rest objects reachable from root objects */
7380  while (pop_mark_stack(&data.mark_stack, &obj)) {
7381  rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
7382  }
7383  free_stack_chunks(&data.mark_stack);
7384 
7385  dont_gc_set(prev_dont_gc);
7386  return data.references;
7387 }
7388 
7389 static int
7390 objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
7391 {
7392  struct reflist *refs = (struct reflist *)value;
7393  reflist_destruct(refs);
7394  return ST_CONTINUE;
7395 }
7396 
7397 static void
7398 objspace_allrefs_destruct(struct st_table *refs)
7399 {
7400  st_foreach(refs, objspace_allrefs_destruct_i, 0);
7401  st_free_table(refs);
7402 }
7403 
7404 #if RGENGC_CHECK_MODE >= 5
7405 static int
7406 allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
7407 {
7408  VALUE obj = (VALUE)k;
7409  struct reflist *refs = (struct reflist *)v;
7410  fprintf(stderr, "[allrefs_dump_i] %s <- ", obj_info(obj));
7411  reflist_dump(refs);
7412  fprintf(stderr, "\n");
7413  return ST_CONTINUE;
7414 }
7415 
7416 static void
7417 allrefs_dump(rb_objspace_t *objspace)
7418 {
7419  VALUE size = objspace->rgengc.allrefs_table->num_entries;
7420  fprintf(stderr, "[all refs] (size: %"PRIuVALUE")\n", size);
7421  st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
7422 }
7423 #endif
7424 
7425 static int
7426 gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
7427 {
7428  VALUE obj = k;
7429  struct reflist *refs = (struct reflist *)v;
7430  rb_objspace_t *objspace = (rb_objspace_t *)ptr;
7431 
7432  /* object should be marked or oldgen */
7433  if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
7434  fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
7435  fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
7436  reflist_dump(refs);
7437 
7438  if (reflist_referred_from_machine_context(refs)) {
7439  fprintf(stderr, " (marked from machine stack).\n");
7440  /* marked from machine context can be false positive */
7441  }
7442  else {
7443  objspace->rgengc.error_count++;
7444  fprintf(stderr, "\n");
7445  }
7446  }
7447  return ST_CONTINUE;
7448 }
7449 
7450 static void
7451 gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
7452 {
7453  size_t saved_malloc_increase = objspace->malloc_params.increase;
7454 #if RGENGC_ESTIMATE_OLDMALLOC
7455  size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
7456 #endif
7457  VALUE already_disabled = rb_objspace_gc_disable(objspace);
7458 
7459  objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
7460 
7461  if (checker_func) {
7462  st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
7463  }
7464 
7465  if (objspace->rgengc.error_count > 0) {
7466 #if RGENGC_CHECK_MODE >= 5
7467  allrefs_dump(objspace);
7468 #endif
7469  if (checker_name) rb_bug("%s: GC has problem.", checker_name);
7470  }
7471 
7472  objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
7473  objspace->rgengc.allrefs_table = 0;
7474 
7475  if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
7476  objspace->malloc_params.increase = saved_malloc_increase;
7477 #if RGENGC_ESTIMATE_OLDMALLOC
7478  objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
7479 #endif
7480 }
7481 #endif /* RGENGC_CHECK_MODE >= 4 */
7482 
7484  rb_objspace_t *objspace;
7485  int err_count;
7486  size_t live_object_count;
7487  size_t zombie_object_count;
7488 
7489  VALUE parent;
7490  size_t old_object_count;
7491  size_t remembered_shady_count;
7492 };
7493 
7494 static void
7495 check_generation_i(const VALUE child, void *ptr)
7496 {
7498  const VALUE parent = data->parent;
7499 
7500  if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
7501 
7502  if (!RVALUE_OLD_P(child)) {
7503  if (!RVALUE_REMEMBERED(parent) &&
7504  !RVALUE_REMEMBERED(child) &&
7505  !RVALUE_UNCOLLECTIBLE(child)) {
7506  fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
7507  data->err_count++;
7508  }
7509  }
7510 }
7511 
7512 static void
7513 check_color_i(const VALUE child, void *ptr)
7514 {
7516  const VALUE parent = data->parent;
7517 
7518  if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
7519  fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7520  obj_info(parent), obj_info(child));
7521  data->err_count++;
7522  }
7523 }
7524 
7525 static void
7526 check_children_i(const VALUE child, void *ptr)
7527 {
7529  if (check_rvalue_consistency_force(child, FALSE) != 0) {
7530  fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
7531  obj_info(child), obj_info(data->parent));
7532  rb_print_backtrace(); /* C backtrace will help to debug */
7533 
7534  data->err_count++;
7535  }
7536 }
7537 
7538 static int
7539 verify_internal_consistency_i(void *page_start, void *page_end, size_t stride,
7541 {
7542  VALUE obj;
7543  rb_objspace_t *objspace = data->objspace;
7544 
7545  for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
7546  void *poisoned = asan_poisoned_object_p(obj);
7547  asan_unpoison_object(obj, false);
7548 
7549  if (is_live_object(objspace, obj)) {
7550  /* count objects */
7551  data->live_object_count++;
7552  data->parent = obj;
7553 
7554  /* Normally, we don't expect T_MOVED objects to be in the heap.
7555  * But they can stay alive on the stack, */
7556  if (!gc_object_moved_p(objspace, obj)) {
7557  /* moved slots don't have children */
7558  rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
7559  }
7560 
7561  /* check health of children */
7562  if (RVALUE_OLD_P(obj)) data->old_object_count++;
7563  if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
7564 
7565  if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
7566  /* reachable objects from an oldgen object should be old or (young with remember) */
7567  data->parent = obj;
7568  rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
7569  }
7570 
7571  if (is_incremental_marking(objspace)) {
7572  if (RVALUE_BLACK_P(obj)) {
7573  /* reachable objects from black objects should be black or grey objects */
7574  data->parent = obj;
7575  rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
7576  }
7577  }
7578  }
7579  else {
7580  if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
7581  GC_ASSERT((RBASIC(obj)->flags & ~FL_SEEN_OBJ_ID) == T_ZOMBIE);
7582  data->zombie_object_count++;
7583  }
7584  }
7585  if (poisoned) {
7586  GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
7587  asan_poison_object(obj);
7588  }
7589  }
7590 
7591  return 0;
7592 }
7593 
7594 static int
7595 gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
7596 {
7597  int i;
7598  unsigned int has_remembered_shady = FALSE;
7599  unsigned int has_remembered_old = FALSE;
7600  int remembered_old_objects = 0;
7601  int free_objects = 0;
7602  int zombie_objects = 0;
7603  int stride = page->slot_size / sizeof(RVALUE);
7604 
7605  for (i=0; i<page->total_slots; i+=stride) {
7606  VALUE val = (VALUE)&page->start[i];
7607  void *poisoned = asan_poisoned_object_p(val);
7608  asan_unpoison_object(val, false);
7609 
7610  if (RBASIC(val) == 0) free_objects++;
7611  if (BUILTIN_TYPE(val) == T_ZOMBIE) zombie_objects++;
7612  if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
7613  has_remembered_shady = TRUE;
7614  }
7615  if (RVALUE_PAGE_MARKING(page, val)) {
7616  has_remembered_old = TRUE;
7617  remembered_old_objects++;
7618  }
7619 
7620  if (poisoned) {
7621  GC_ASSERT(BUILTIN_TYPE(val) == T_NONE);
7622  asan_poison_object(val);
7623  }
7624  }
7625 
7626  if (!is_incremental_marking(objspace) &&
7627  page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
7628 
7629  for (i=0; i<page->total_slots; i++) {
7630  VALUE val = (VALUE)&page->start[i];
7631  if (RVALUE_PAGE_MARKING(page, val)) {
7632  fprintf(stderr, "marking -> %s\n", obj_info(val));
7633  }
7634  }
7635  rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
7636  (void *)page, remembered_old_objects, obj ? obj_info(obj) : "");
7637  }
7638 
7639  if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
7640  rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
7641  (void *)page, obj ? obj_info(obj) : "");
7642  }
7643 
7644  if (0) {
7645  /* free_slots may not equal to free_objects */
7646  if (page->free_slots != free_objects) {
7647  rb_bug("page %p's free_slots should be %d, but %d\n", (void *)page, page->free_slots, free_objects);
7648  }
7649  }
7650  if (page->final_slots != zombie_objects) {
7651  rb_bug("page %p's final_slots should be %d, but %d\n", (void *)page, page->final_slots, zombie_objects);
7652  }
7653 
7654  return remembered_old_objects;
7655 }
7656 
7657 static int
7658 gc_verify_heap_pages_(rb_objspace_t *objspace, struct list_head *head)
7659 {
7660  int remembered_old_objects = 0;
7661  struct heap_page *page = 0;
7662 
7663  list_for_each(head, page, page_node) {
7664  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
7665  RVALUE *p = page->freelist;
7666  while (p) {
7667  VALUE vp = (VALUE)p;
7668  VALUE prev = vp;
7669  asan_unpoison_object(vp, false);
7670  if (BUILTIN_TYPE(vp) != T_NONE) {
7671  fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
7672  }
7673  p = p->as.free.next;
7674  asan_poison_object(prev);
7675  }
7676  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
7677 
7678  if (page->flags.has_remembered_objects == FALSE) {
7679  remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
7680  }
7681  }
7682 
7683  return remembered_old_objects;
7684 }
7685 
7686 static int
7687 gc_verify_heap_pages(rb_objspace_t *objspace)
7688 {
7689  int remembered_old_objects = 0;
7690  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
7691  remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages));
7692  remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages));
7693  }
7694  return remembered_old_objects;
7695 }
7696 
7697 /*
7698  * call-seq:
7699  * GC.verify_internal_consistency -> nil
7700  *
7701  * Verify internal consistency.
7702  *
7703  * This method is implementation specific.
7704  * Now this method checks generational consistency
7705  * if RGenGC is supported.
7706  */
7707 static VALUE
7708 gc_verify_internal_consistency_m(VALUE dummy)
7709 {
7710  gc_verify_internal_consistency(&rb_objspace);
7711  return Qnil;
7712 }
7713 
7714 static void
7715 gc_verify_internal_consistency_(rb_objspace_t *objspace)
7716 {
7717  struct verify_internal_consistency_struct data = {0};
7718 
7719  data.objspace = objspace;
7720  gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
7721 
7722  /* check relations */
7723  for (size_t i = 0; i < heap_allocated_pages; i++) {
7724  struct heap_page *page = heap_pages_sorted[i];
7725  short slot_size = page->slot_size;
7726 
7727  uintptr_t start = (uintptr_t)page->start;
7728  uintptr_t end = start + page->total_slots * slot_size;
7729 
7730  verify_internal_consistency_i((void *)start, (void *)end, slot_size, &data);
7731  }
7732 
7733  if (data.err_count != 0) {
7734 #if RGENGC_CHECK_MODE >= 5
7735  objspace->rgengc.error_count = data.err_count;
7736  gc_marks_check(objspace, NULL, NULL);
7737  allrefs_dump(objspace);
7738 #endif
7739  rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
7740  }
7741 
7742  /* check heap_page status */
7743  gc_verify_heap_pages(objspace);
7744 
7745  /* check counters */
7746 
7747  if (!is_lazy_sweeping(objspace) &&
7748  !finalizing &&
7749  ruby_single_main_ractor != NULL) {
7750  if (objspace_live_slots(objspace) != data.live_object_count) {
7751  fprintf(stderr, "heap_pages_final_slots: %"PRIdSIZE", "
7752  "objspace->profile.total_freed_objects: %"PRIdSIZE"\n",
7753  heap_pages_final_slots, objspace->profile.total_freed_objects);
7754  rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
7755  objspace_live_slots(objspace), data.live_object_count);
7756  }
7757  }
7758 
7759  if (!is_marking(objspace)) {
7760  if (objspace->rgengc.old_objects != data.old_object_count) {
7761  rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
7762  objspace->rgengc.old_objects, data.old_object_count);
7763  }
7764  if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
7765  rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE", but %"PRIuSIZE".",
7766  objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
7767  }
7768  }
7769 
7770  if (!finalizing) {
7771  size_t list_count = 0;
7772 
7773  {
7774  VALUE z = heap_pages_deferred_final;
7775  while (z) {
7776  list_count++;
7777  z = RZOMBIE(z)->next;
7778  }
7779  }
7780 
7781  if (heap_pages_final_slots != data.zombie_object_count ||
7782  heap_pages_final_slots != list_count) {
7783 
7784  rb_bug("inconsistent finalizing object count:\n"
7785  " expect %"PRIuSIZE"\n"
7786  " but %"PRIuSIZE" zombies\n"
7787  " heap_pages_deferred_final list has %"PRIuSIZE" items.",
7788  heap_pages_final_slots,
7789  data.zombie_object_count,
7790  list_count);
7791  }
7792  }
7793 
7794  gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
7795 }
7796 
7797 static void
7798 gc_verify_internal_consistency(rb_objspace_t *objspace)
7799 {
7800  RB_VM_LOCK_ENTER();
7801  {
7802  rb_vm_barrier(); // stop other ractors
7803 
7804  unsigned int prev_during_gc = during_gc;
7805  during_gc = FALSE; // stop gc here
7806  {
7807  gc_verify_internal_consistency_(objspace);
7808  }
7809  during_gc = prev_during_gc;
7810  }
7811  RB_VM_LOCK_LEAVE();
7812 }
7813 
7814 void
7815 rb_gc_verify_internal_consistency(void)
7816 {
7817  gc_verify_internal_consistency(&rb_objspace);
7818 }
7819 
7820 static VALUE
7821 gc_verify_transient_heap_internal_consistency(VALUE dmy)
7822 {
7823  rb_transient_heap_verify();
7824  return Qnil;
7825 }
7826 
7827 /* marks */
7828 
7829 static void
7830 gc_marks_start(rb_objspace_t *objspace, int full_mark)
7831 {
7832  /* start marking */
7833  gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
7834  gc_mode_transition(objspace, gc_mode_marking);
7835 
7836  if (full_mark) {
7837 #if GC_ENABLE_INCREMENTAL_MARK
7838  objspace->rincgc.step_slots = (objspace->marked_slots * 2) / ((objspace->rincgc.pooled_slots / HEAP_PAGE_OBJ_LIMIT) + 1);
7839 
7840  if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
7841  "objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
7842  "objspace->rincgc.step_slots: %"PRIdSIZE", \n",
7843  objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
7844 #endif
7845  objspace->flags.during_minor_gc = FALSE;
7846  if (ruby_enable_autocompact) {
7847  objspace->flags.during_compacting |= TRUE;
7848  }
7849  objspace->profile.major_gc_count++;
7850  objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
7851  objspace->rgengc.old_objects = 0;
7852  objspace->rgengc.last_major_gc = objspace->profile.count;
7853  objspace->marked_slots = 0;
7854 
7855  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
7856  rgengc_mark_and_rememberset_clear(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
7857  }
7858  }
7859  else {
7860  objspace->flags.during_minor_gc = TRUE;
7861  objspace->marked_slots =
7862  objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
7863  objspace->profile.minor_gc_count++;
7864 
7865  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
7866  rgengc_rememberset_mark(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
7867  }
7868  }
7869 
7870  gc_mark_roots(objspace, NULL);
7871 
7872  gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %"PRIdSIZE"\n",
7873  full_mark ? "full" : "minor", mark_stack_size(&objspace->mark_stack));
7874 }
7875 
7876 #if GC_ENABLE_INCREMENTAL_MARK
7877 static inline void
7878 gc_marks_wb_unprotected_objects_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bits)
7879 {
7880  if (bits) {
7881  do {
7882  if (bits & 1) {
7883  gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
7884  GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
7885  GC_ASSERT(RVALUE_MARKED((VALUE)p));
7886  gc_mark_children(objspace, (VALUE)p);
7887  }
7888  p += sizeof(RVALUE);
7889  bits >>= 1;
7890  } while (bits);
7891  }
7892 }
7893 
7894 static void
7895 gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap)
7896 {
7897  struct heap_page *page = 0;
7898 
7899  list_for_each(&heap->pages, page, page_node) {
7900  bits_t *mark_bits = page->mark_bits;
7901  bits_t *wbun_bits = page->wb_unprotected_bits;
7902  RVALUE *p = page->start;
7903  size_t j;
7904 
7905  bits_t bits = mark_bits[0] & wbun_bits[0];
7906  bits >>= NUM_IN_PAGE(p);
7907  gc_marks_wb_unprotected_objects_plane(objspace, (uintptr_t)p, bits);
7908  p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
7909 
7910  for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
7911  bits_t bits = mark_bits[j] & wbun_bits[j];
7912 
7913  gc_marks_wb_unprotected_objects_plane(objspace, (uintptr_t)p, bits);
7914  p += BITS_BITLENGTH;
7915  }
7916  }
7917 
7918  gc_mark_stacked_objects_all(objspace);
7919 }
7920 
7921 static struct heap_page *
7922 heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
7923 {
7924  struct heap_page *page = heap->pooled_pages;
7925 
7926  if (page) {
7927  heap->pooled_pages = page->free_next;
7928  heap_add_freepage(heap, page);
7929  }
7930 
7931  return page;
7932 }
7933 #endif
7934 
7935 static int
7936 gc_marks_finish(rb_objspace_t *objspace)
7937 {
7938 #if GC_ENABLE_INCREMENTAL_MARK
7939  /* finish incremental GC */
7940  if (is_incremental_marking(objspace)) {
7941  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
7942  rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
7943  if (heap->pooled_pages) {
7944  heap_move_pooled_pages_to_free_pages(heap);
7945  gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n");
7946  return FALSE; /* continue marking phase */
7947  }
7948  }
7949 
7950  if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
7951  rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
7952  mark_stack_size(&objspace->mark_stack));
7953  }
7954 
7955  gc_mark_roots(objspace, 0);
7956 
7957  if (is_mark_stack_empty(&objspace->mark_stack) == FALSE) {
7958  gc_report(1, objspace, "gc_marks_finish: not empty (%"PRIdSIZE"). retry.\n",
7959  mark_stack_size(&objspace->mark_stack));
7960  return FALSE;
7961  }
7962 
7963 #if RGENGC_CHECK_MODE >= 2
7964  if (gc_verify_heap_pages(objspace) != 0) {
7965  rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
7966  }
7967 #endif
7968 
7969  objspace->flags.during_incremental_marking = FALSE;
7970  /* check children of all marked wb-unprotected objects */
7971  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
7972  gc_marks_wb_unprotected_objects(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
7973  }
7974  }
7975 #endif /* GC_ENABLE_INCREMENTAL_MARK */
7976 
7977 #if RGENGC_CHECK_MODE >= 2
7978  gc_verify_internal_consistency(objspace);
7979 #endif
7980 
7981  if (is_full_marking(objspace)) {
7982  /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
7983  const double r = gc_params.oldobject_limit_factor;
7984  objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
7985  objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
7986  }
7987 
7988 #if RGENGC_CHECK_MODE >= 4
7989  during_gc = FALSE;
7990  gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
7991  during_gc = TRUE;
7992 #endif
7993 
7994  {
7995  /* decide full GC is needed or not */
7996  size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
7997  size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
7998  size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
7999  size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
8000  int full_marking = is_full_marking(objspace);
8001  const int r_cnt = GET_VM()->ractor.cnt;
8002  const int r_mul = r_cnt > 8 ? 8 : r_cnt; // upto 8
8003 
8004  GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
8005 
8006  /* setup free-able page counts */
8007  if (max_free_slots < gc_params.heap_init_slots * r_mul) {
8008  max_free_slots = gc_params.heap_init_slots * r_mul;
8009  }
8010 
8011  if (sweep_slots > max_free_slots) {
8012  heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
8013  }
8014  else {
8015  heap_pages_freeable_pages = 0;
8016  }
8017 
8018  /* check free_min */
8019  if (min_free_slots < gc_params.heap_free_slots * r_mul) {
8020  min_free_slots = gc_params.heap_free_slots * r_mul;
8021  }
8022 
8023  if (sweep_slots < min_free_slots) {
8024  if (!full_marking) {
8025  if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
8026  full_marking = TRUE;
8027  /* do not update last_major_gc, because full marking is not done. */
8028  /* goto increment; */
8029  }
8030  else {
8031  gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
8032  objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
8033  }
8034  }
8035 
8036 #if !USE_RVARGC
8037  if (full_marking) {
8038  /* increment: */
8039  gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
8040  rb_size_pool_t *size_pool = &size_pools[0];
8041  size_pool_allocatable_pages_set(objspace, size_pool, heap_extend_pages(objspace, sweep_slots, total_slots, heap_allocated_pages + heap_allocatable_pages(objspace)));
8042 
8043  heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
8044  }
8045 #endif
8046  }
8047 
8048  if (full_marking) {
8049  /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
8050  const double r = gc_params.oldobject_limit_factor;
8051  objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
8052  objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8053  }
8054 
8055  if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
8056  objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
8057  }
8058  if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
8059  objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
8060  }
8061  if (RGENGC_FORCE_MAJOR_GC) {
8062  objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
8063  }
8064 
8065  gc_report(1, objspace, "gc_marks_finish (marks %"PRIdSIZE" objects, "
8066  "old %"PRIdSIZE" objects, total %"PRIdSIZE" slots, "
8067  "sweep %"PRIdSIZE" slots, increment: %"PRIdSIZE", next GC: %s)\n",
8068  objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
8069  objspace->rgengc.need_major_gc ? "major" : "minor");
8070  }
8071 
8072  rb_transient_heap_finish_marking();
8073  rb_ractor_finish_marking();
8074 
8075  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_MARK, 0);
8076 
8077  return TRUE;
8078 }
8079 
8080 #if GC_ENABLE_INCREMENTAL_MARK
8081 static void
8082 gc_marks_step(rb_objspace_t *objspace, size_t slots)
8083 {
8084  GC_ASSERT(is_marking(objspace));
8085 
8086  if (gc_mark_stacked_objects_incremental(objspace, slots)) {
8087  if (gc_marks_finish(objspace)) {
8088  /* finish */
8089  gc_sweep(objspace);
8090  }
8091  }
8092  if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE"\n", objspace->marked_slots);
8093 }
8094 #endif
8095 
8096 static void
8097 gc_marks_rest(rb_objspace_t *objspace)
8098 {
8099  gc_report(1, objspace, "gc_marks_rest\n");
8100 
8101 #if GC_ENABLE_INCREMENTAL_MARK
8102  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8103  SIZE_POOL_EDEN_HEAP(&size_pools[i])->pooled_pages = NULL;
8104  }
8105 #endif
8106 
8107  if (is_incremental_marking(objspace)) {
8108  do {
8109  while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
8110  } while (gc_marks_finish(objspace) == FALSE);
8111  }
8112  else {
8113  gc_mark_stacked_objects_all(objspace);
8114  gc_marks_finish(objspace);
8115  }
8116 
8117  /* move to sweep */
8118  gc_sweep(objspace);
8119 }
8120 
8121 static void
8122 gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
8123 {
8124  GC_ASSERT(dont_gc_val() == FALSE);
8125 #if GC_ENABLE_INCREMENTAL_MARK
8126 
8127  unsigned int lock_lev;
8128  gc_enter(objspace, gc_enter_event_mark_continue, &lock_lev);
8129 
8130  int slots = 0;
8131  const char *from;
8132 
8133  if (heap->pooled_pages) {
8134  while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) {
8135  struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
8136  slots += page->free_slots;
8137  }
8138  from = "pooled-pages";
8139  }
8140  else if (heap_increment(objspace, size_pool, heap)) {
8141  slots = heap->free_pages->free_slots;
8142  from = "incremented-pages";
8143  }
8144 
8145  if (slots > 0) {
8146  gc_report(2, objspace, "gc_marks_continue: provide %d slots from %s.\n",
8147  slots, from);
8148  gc_marks_step(objspace, objspace->rincgc.step_slots);
8149  }
8150  else {
8151  gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n",
8152  mark_stack_size(&objspace->mark_stack));
8153  gc_marks_rest(objspace);
8154  }
8155 
8156  gc_exit(objspace, gc_enter_event_mark_continue, &lock_lev);
8157 #endif
8158 }
8159 
8160 static void
8161 gc_marks(rb_objspace_t *objspace, int full_mark)
8162 {
8163  gc_prof_mark_timer_start(objspace);
8164 
8165  /* setup marking */
8166 
8167  gc_marks_start(objspace, full_mark);
8168  if (!is_incremental_marking(objspace)) {
8169  gc_marks_rest(objspace);
8170  }
8171 
8172 #if RGENGC_PROFILE > 0
8173  if (gc_prof_record(objspace)) {
8174  gc_profile_record *record = gc_prof_record(objspace);
8175  record->old_objects = objspace->rgengc.old_objects;
8176  }
8177 #endif
8178  gc_prof_mark_timer_stop(objspace);
8179 }
8180 
8181 /* RGENGC */
8182 
8183 static void
8184 gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
8185 {
8186  if (level <= RGENGC_DEBUG) {
8187  char buf[1024];
8188  FILE *out = stderr;
8189  va_list args;
8190  const char *status = " ";
8191 
8192  if (during_gc) {
8193  status = is_full_marking(objspace) ? "+" : "-";
8194  }
8195  else {
8196  if (is_lazy_sweeping(objspace)) {
8197  status = "S";
8198  }
8199  if (is_incremental_marking(objspace)) {
8200  status = "M";
8201  }
8202  }
8203 
8204  va_start(args, fmt);
8205  vsnprintf(buf, 1024, fmt, args);
8206  va_end(args);
8207 
8208  fprintf(out, "%s|", status);
8209  fputs(buf, out);
8210  }
8211 }
8212 
8213 /* bit operations */
8214 
8215 static int
8216 rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
8217 {
8218  return RVALUE_REMEMBERED(obj);
8219 }
8220 
8221 static int
8222 rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
8223 {
8224  struct heap_page *page = GET_HEAP_PAGE(obj);
8225  bits_t *bits = &page->marking_bits[0];
8226 
8227  GC_ASSERT(!is_incremental_marking(objspace));
8228 
8229  if (MARKED_IN_BITMAP(bits, obj)) {
8230  return FALSE;
8231  }
8232  else {
8233  page->flags.has_remembered_objects = TRUE;
8234  MARK_IN_BITMAP(bits, obj);
8235  return TRUE;
8236  }
8237 }
8238 
8239 /* wb, etc */
8240 
8241 /* return FALSE if already remembered */
8242 static int
8243 rgengc_remember(rb_objspace_t *objspace, VALUE obj)
8244 {
8245  gc_report(6, objspace, "rgengc_remember: %s %s\n", obj_info(obj),
8246  rgengc_remembersetbits_get(objspace, obj) ? "was already remembered" : "is remembered now");
8247 
8248  check_rvalue_consistency(obj);
8249 
8250  if (RGENGC_CHECK_MODE) {
8251  if (RVALUE_WB_UNPROTECTED(obj)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj));
8252  }
8253 
8254 #if RGENGC_PROFILE > 0
8255  if (!rgengc_remembered(objspace, obj)) {
8256  if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8257  objspace->profile.total_remembered_normal_object_count++;
8258 #if RGENGC_PROFILE >= 2
8259  objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
8260 #endif
8261  }
8262  }
8263 #endif /* RGENGC_PROFILE > 0 */
8264 
8265  return rgengc_remembersetbits_set(objspace, obj);
8266 }
8267 
8268 static int
8269 rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj)
8270 {
8271  int result = rgengc_remembersetbits_get(objspace, obj);
8272  check_rvalue_consistency(obj);
8273  return result;
8274 }
8275 
8276 static int
8277 rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
8278 {
8279  gc_report(6, objspace, "rgengc_remembered: %s\n", obj_info(obj));
8280  return rgengc_remembered_sweep(objspace, obj);
8281 }
8282 
8283 #ifndef PROFILE_REMEMBERSET_MARK
8284 #define PROFILE_REMEMBERSET_MARK 0
8285 #endif
8286 
8287 static inline void
8288 rgengc_rememberset_mark_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
8289 {
8290  if (bitset) {
8291  do {
8292  if (bitset & 1) {
8293  VALUE obj = (VALUE)p;
8294  gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", obj_info(obj));
8295  GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
8296  GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
8297 
8298  gc_mark_children(objspace, obj);
8299  }
8300  p += sizeof(RVALUE);
8301  bitset >>= 1;
8302  } while (bitset);
8303  }
8304 }
8305 
8306 static void
8307 rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
8308 {
8309  size_t j;
8310  struct heap_page *page = 0;
8311 #if PROFILE_REMEMBERSET_MARK
8312  int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
8313 #endif
8314  gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
8315 
8316  list_for_each(&heap->pages, page, page_node) {
8317  if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
8318  RVALUE *p = page->start;
8319  bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
8320  bits_t *marking_bits = page->marking_bits;
8321  bits_t *uncollectible_bits = page->uncollectible_bits;
8322  bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
8323 #if PROFILE_REMEMBERSET_MARK
8324  if (page->flags.has_remembered_objects && page->flags.has_uncollectible_shady_objects) has_both++;
8325  else if (page->flags.has_remembered_objects) has_old++;
8326  else if (page->flags.has_uncollectible_shady_objects) has_shady++;
8327 #endif
8328  for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8329  bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
8330  marking_bits[j] = 0;
8331  }
8332  page->flags.has_remembered_objects = FALSE;
8333 
8334  bitset = bits[0];
8335  bitset >>= NUM_IN_PAGE(p);
8336  rgengc_rememberset_mark_plane(objspace, (uintptr_t)p, bitset);
8337  p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
8338 
8339  for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8340  bitset = bits[j];
8341  rgengc_rememberset_mark_plane(objspace, (uintptr_t)p, bitset);
8342  p += BITS_BITLENGTH;
8343  }
8344  }
8345 #if PROFILE_REMEMBERSET_MARK
8346  else {
8347  skip++;
8348  }
8349 #endif
8350  }
8351 
8352 #if PROFILE_REMEMBERSET_MARK
8353  fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
8354 #endif
8355  gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
8356 }
8357 
8358 static void
8359 rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
8360 {
8361  struct heap_page *page = 0;
8362 
8363  list_for_each(&heap->pages, page, page_node) {
8364  memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8365  memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8366  memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8367  memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8368  page->flags.has_uncollectible_shady_objects = FALSE;
8369  page->flags.has_remembered_objects = FALSE;
8370  }
8371 }
8372 
8373 /* RGENGC: APIs */
8374 
8375 NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
8376 
8377 static void
8378 gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
8379 {
8380  if (RGENGC_CHECK_MODE) {
8381  if (!RVALUE_OLD_P(a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a));
8382  if ( RVALUE_OLD_P(b)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b));
8383  if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
8384  }
8385 
8386 #if 1
8387  /* mark `a' and remember (default behavior) */
8388  if (!rgengc_remembered(objspace, a)) {
8389  RB_VM_LOCK_ENTER_NO_BARRIER();
8390  {
8391  rgengc_remember(objspace, a);
8392  }
8393  RB_VM_LOCK_LEAVE_NO_BARRIER();
8394  gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
8395  }
8396 #else
8397  /* mark `b' and remember */
8398  MARK_IN_BITMAP(GET_HEAP_MARK_BITS(b), b);
8399  if (RVALUE_WB_UNPROTECTED(b)) {
8400  gc_remember_unprotected(objspace, b);
8401  }
8402  else {
8403  RVALUE_AGE_SET_OLD(objspace, b);
8404  rgengc_remember(objspace, b);
8405  }
8406 
8407  gc_report(1, objspace, "gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
8408 #endif
8409 
8410  check_rvalue_consistency(a);
8411  check_rvalue_consistency(b);
8412 }
8413 
8414 #if GC_ENABLE_INCREMENTAL_MARK
8415 static void
8416 gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
8417 {
8418  gc_mark_set_parent(objspace, parent);
8419  rgengc_check_relation(objspace, obj);
8420  if (gc_mark_set(objspace, obj) == FALSE) return;
8421  gc_aging(objspace, obj);
8422  gc_grey(objspace, obj);
8423 }
8424 
8425 NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
8426 
8427 static void
8428 gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
8429 {
8430  gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, obj_info(b));
8431 
8432  if (RVALUE_BLACK_P(a)) {
8433  if (RVALUE_WHITE_P(b)) {
8434  if (!RVALUE_WB_UNPROTECTED(a)) {
8435  gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, obj_info(b));
8436  gc_mark_from(objspace, b, a);
8437  }
8438  }
8439  else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
8440  if (!RVALUE_WB_UNPROTECTED(b)) {
8441  gc_report(1, objspace, "gc_writebarrier_incremental: [GN] %p -> %s\n", (void *)a, obj_info(b));
8442  RVALUE_AGE_SET_OLD(objspace, b);
8443 
8444  if (RVALUE_BLACK_P(b)) {
8445  gc_grey(objspace, b);
8446  }
8447  }
8448  else {
8449  gc_report(1, objspace, "gc_writebarrier_incremental: [LL] %p -> %s\n", (void *)a, obj_info(b));
8450  gc_remember_unprotected(objspace, b);
8451  }
8452  }
8453 
8454  if (UNLIKELY(objspace->flags.during_compacting)) {
8455  MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
8456  }
8457  }
8458 }
8459 #else
8460 #define gc_writebarrier_incremental(a, b, objspace)
8461 #endif
8462 
8463 void
8465 {
8466  rb_objspace_t *objspace = &rb_objspace;
8467 
8468  if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const");
8469  if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const");
8470 
8471  retry:
8472  if (!is_incremental_marking(objspace)) {
8473  if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
8474  // do nothing
8475  }
8476  else {
8477  gc_writebarrier_generational(a, b, objspace);
8478  }
8479  }
8480  else {
8481  bool retry = false;
8482  /* slow path */
8483  RB_VM_LOCK_ENTER_NO_BARRIER();
8484  {
8485  if (is_incremental_marking(objspace)) {
8486  gc_writebarrier_incremental(a, b, objspace);
8487  }
8488  else {
8489  retry = true;
8490  }
8491  }
8492  RB_VM_LOCK_LEAVE_NO_BARRIER();
8493 
8494  if (retry) goto retry;
8495  }
8496  return;
8497 }
8498 
8499 void
8501 {
8502  if (RVALUE_WB_UNPROTECTED(obj)) {
8503  return;
8504  }
8505  else {
8506  rb_objspace_t *objspace = &rb_objspace;
8507 
8508  gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
8509  rgengc_remembered(objspace, obj) ? " (already remembered)" : "");
8510 
8511  if (RVALUE_OLD_P(obj)) {
8512  gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
8513  RVALUE_DEMOTE(objspace, obj);
8514  gc_mark_set(objspace, obj);
8515  gc_remember_unprotected(objspace, obj);
8516 
8517 #if RGENGC_PROFILE
8518  objspace->profile.total_shade_operation_count++;
8519 #if RGENGC_PROFILE >= 2
8520  objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
8521 #endif /* RGENGC_PROFILE >= 2 */
8522 #endif /* RGENGC_PROFILE */
8523  }
8524  else {
8525  RVALUE_AGE_RESET(obj);
8526  }
8527 
8528  RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
8529  MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
8530  }
8531 }
8532 
8533 /*
8534  * remember `obj' if needed.
8535  */
8536 MJIT_FUNC_EXPORTED void
8537 rb_gc_writebarrier_remember(VALUE obj)
8538 {
8539  rb_objspace_t *objspace = &rb_objspace;
8540 
8541  gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", obj_info(obj));
8542 
8543  if (is_incremental_marking(objspace)) {
8544  if (RVALUE_BLACK_P(obj)) {
8545  gc_grey(objspace, obj);
8546  }
8547  }
8548  else {
8549  if (RVALUE_OLD_P(obj)) {
8550  rgengc_remember(objspace, obj);
8551  }
8552  }
8553 }
8554 
8555 static st_table *rgengc_unprotect_logging_table;
8556 
8557 static int
8558 rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
8559 {
8560  fprintf(stderr, "%s\t%"PRIuVALUE"\n", (char *)key, (VALUE)val);
8561  return ST_CONTINUE;
8562 }
8563 
8564 static void
8565 rgengc_unprotect_logging_exit_func(void)
8566 {
8567  st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
8568 }
8569 
8570 void
8571 rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
8572 {
8573  VALUE obj = (VALUE)objptr;
8574 
8575  if (rgengc_unprotect_logging_table == 0) {
8576  rgengc_unprotect_logging_table = st_init_strtable();
8577  atexit(rgengc_unprotect_logging_exit_func);
8578  }
8579 
8580  if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8581  char buff[0x100];
8582  st_data_t cnt = 1;
8583  char *ptr = buff;
8584 
8585  snprintf(ptr, 0x100 - 1, "%s|%s:%d", obj_info(obj), filename, line);
8586 
8587  if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
8588  cnt++;
8589  }
8590  else {
8591  ptr = (strdup)(buff);
8592  if (!ptr) rb_memerror();
8593  }
8594  st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
8595  }
8596 }
8597 
8598 void
8599 rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
8600 {
8601  rb_objspace_t *objspace = &rb_objspace;
8602 
8603  if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
8604  if (!RVALUE_OLD_P(dest)) {
8605  MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
8606  RVALUE_AGE_RESET_RAW(dest);
8607  }
8608  else {
8609  RVALUE_DEMOTE(objspace, dest);
8610  }
8611  }
8612 
8613  check_rvalue_consistency(dest);
8614 }
8615 
8616 /* RGENGC analysis information */
8617 
8618 VALUE
8619 rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
8620 {
8621  return RVALUE_WB_UNPROTECTED(obj) ? Qfalse : Qtrue;
8622 }
8623 
8624 VALUE
8625 rb_obj_rgengc_promoted_p(VALUE obj)
8626 {
8627  return RBOOL(OBJ_PROMOTED(obj));
8628 }
8629 
8630 size_t
8631 rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
8632 {
8633  size_t n = 0;
8634  static ID ID_marked;
8635  static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
8636 
8637  if (!ID_marked) {
8638 #define I(s) ID_##s = rb_intern(#s);
8639  I(marked);
8640  I(wb_protected);
8641  I(old);
8642  I(marking);
8643  I(uncollectible);
8644  I(pinned);
8645 #undef I
8646  }
8647 
8648  if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
8649  if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
8650  if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
8651  if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
8652  if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
8653  if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
8654  return n;
8655 }
8656 
8657 /* GC */
8658 
8659 void
8660 rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
8661 {
8662  for (size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
8663  rb_ractor_newobj_size_pool_cache_t *cache = &newobj_cache->size_pool_caches[size_pool_idx];
8664 
8665  struct heap_page *page = cache->using_page;
8666  RVALUE *freelist = cache->freelist;
8667  RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page, (void *)freelist);
8668 
8669  heap_page_freelist_append(page, freelist);
8670 
8671  cache->using_page = NULL;
8672  cache->freelist = NULL;
8673  }
8674 }
8675 
8676 void
8678 {
8679  /* no-op */
8680 }
8681 
8682 #ifndef MARK_OBJECT_ARY_BUCKET_SIZE
8683 #define MARK_OBJECT_ARY_BUCKET_SIZE 1024
8684 #endif
8685 
8686 void
8688 {
8689  if (!is_pointer_to_heap(&rb_objspace, (void *)obj))
8690  return;
8691 
8692  RB_VM_LOCK_ENTER();
8693  {
8694  VALUE ary_ary = GET_VM()->mark_object_ary;
8695  VALUE ary = rb_ary_last(0, 0, ary_ary);
8696 
8697  if (NIL_P(ary) || RARRAY_LEN(ary) >= MARK_OBJECT_ARY_BUCKET_SIZE) {
8698  ary = rb_ary_tmp_new(MARK_OBJECT_ARY_BUCKET_SIZE);
8699  rb_ary_push(ary_ary, ary);
8700  }
8701 
8702  rb_ary_push(ary, obj);
8703  }
8704  RB_VM_LOCK_LEAVE();
8705 }
8706 
8707 void
8709 {
8710  rb_objspace_t *objspace = &rb_objspace;
8711  struct gc_list *tmp;
8712 
8713  tmp = ALLOC(struct gc_list);
8714  tmp->next = global_list;
8715  tmp->varptr = addr;
8716  global_list = tmp;
8717 }
8718 
8719 void
8721 {
8722  rb_objspace_t *objspace = &rb_objspace;
8723  struct gc_list *tmp = global_list;
8724 
8725  if (tmp->varptr == addr) {
8726  global_list = tmp->next;
8727  xfree(tmp);
8728  return;
8729  }
8730  while (tmp->next) {
8731  if (tmp->next->varptr == addr) {
8732  struct gc_list *t = tmp->next;
8733 
8734  tmp->next = tmp->next->next;
8735  xfree(t);
8736  break;
8737  }
8738  tmp = tmp->next;
8739  }
8740 }
8741 
8742 void
8744 {
8746 }
8747 
8748 #define GC_NOTIFY 0
8749 
8750 enum {
8751  gc_stress_no_major,
8752  gc_stress_no_immediate_sweep,
8753  gc_stress_full_mark_after_malloc,
8754  gc_stress_max
8755 };
8756 
8757 #define gc_stress_full_mark_after_malloc_p() \
8758  (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
8759 
8760 static void
8761 heap_ready_to_gc(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
8762 {
8763  if (!heap->free_pages) {
8764  if (!heap_increment(objspace, size_pool, heap)) {
8765  size_pool_allocatable_pages_set(objspace, size_pool, 1);
8766  heap_increment(objspace, size_pool, heap);
8767  }
8768  }
8769 }
8770 
8771 static int
8772 ready_to_gc(rb_objspace_t *objspace)
8773 {
8774  if (dont_gc_val() || during_gc || ruby_disable_gc) {
8775  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8776  rb_size_pool_t *size_pool = &size_pools[i];
8777  heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
8778  }
8779  return FALSE;
8780  }
8781  else {
8782  return TRUE;
8783  }
8784 }
8785 
8786 static void
8787 gc_reset_malloc_info(rb_objspace_t *objspace, bool full_mark)
8788 {
8789  gc_prof_set_malloc_info(objspace);
8790  {
8791  size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
8792  size_t old_limit = malloc_limit;
8793 
8794  if (inc > malloc_limit) {
8795  malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
8796  if (malloc_limit > gc_params.malloc_limit_max) {
8797  malloc_limit = gc_params.malloc_limit_max;
8798  }
8799  }
8800  else {
8801  malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
8802  if (malloc_limit < gc_params.malloc_limit_min) {
8803  malloc_limit = gc_params.malloc_limit_min;
8804  }
8805  }
8806 
8807  if (0) {
8808  if (old_limit != malloc_limit) {
8809  fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
8810  rb_gc_count(), old_limit, malloc_limit);
8811  }
8812  else {
8813  fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
8814  rb_gc_count(), malloc_limit);
8815  }
8816  }
8817  }
8818 
8819  /* reset oldmalloc info */
8820 #if RGENGC_ESTIMATE_OLDMALLOC
8821  if (!full_mark) {
8822  if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
8823  objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
8824  objspace->rgengc.oldmalloc_increase_limit =
8825  (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
8826 
8827  if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
8828  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
8829  }
8830  }
8831 
8832  if (0) fprintf(stderr, "%"PRIdSIZE"\t%d\t%"PRIuSIZE"\t%"PRIuSIZE"\t%"PRIdSIZE"\n",
8833  rb_gc_count(),
8834  objspace->rgengc.need_major_gc,
8835  objspace->rgengc.oldmalloc_increase,
8836  objspace->rgengc.oldmalloc_increase_limit,
8837  gc_params.oldmalloc_limit_max);
8838  }
8839  else {
8840  /* major GC */
8841  objspace->rgengc.oldmalloc_increase = 0;
8842 
8843  if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
8844  objspace->rgengc.oldmalloc_increase_limit =
8845  (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
8846  if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
8847  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
8848  }
8849  }
8850  }
8851 #endif
8852 }
8853 
8854 static int
8855 garbage_collect(rb_objspace_t *objspace, unsigned int reason)
8856 {
8857  int ret;
8858 
8859  RB_VM_LOCK_ENTER();
8860  {
8861 #if GC_PROFILE_MORE_DETAIL
8862  objspace->profile.prepare_time = getrusage_time();
8863 #endif
8864 
8865  gc_rest(objspace);
8866 
8867 #if GC_PROFILE_MORE_DETAIL
8868  objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
8869 #endif
8870 
8871  ret = gc_start(objspace, reason);
8872  }
8873  RB_VM_LOCK_LEAVE();
8874 
8875  return ret;
8876 }
8877 
8878 static int
8879 gc_start(rb_objspace_t *objspace, unsigned int reason)
8880 {
8881  unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
8882 #if GC_ENABLE_INCREMENTAL_MARK
8883  unsigned int immediate_mark = reason & GPR_FLAG_IMMEDIATE_MARK;
8884 #endif
8885 
8886  /* reason may be clobbered, later, so keep set immediate_sweep here */
8887  objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
8888 
8889  /* Explicitly enable compaction (GC.compact) */
8890  objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
8891 
8892  if (!heap_allocated_pages) return FALSE; /* heap is not ready */
8893  if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
8894 
8895  GC_ASSERT(gc_mode(objspace) == gc_mode_none);
8896  GC_ASSERT(!is_lazy_sweeping(objspace));
8897  GC_ASSERT(!is_incremental_marking(objspace));
8898 
8899  unsigned int lock_lev;
8900  gc_enter(objspace, gc_enter_event_start, &lock_lev);
8901 
8902 #if RGENGC_CHECK_MODE >= 2
8903  gc_verify_internal_consistency(objspace);
8904 #endif
8905 
8906  if (ruby_gc_stressful) {
8907  int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
8908 
8909  if ((flag & (1<<gc_stress_no_major)) == 0) {
8910  do_full_mark = TRUE;
8911  }
8912 
8913  objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
8914  }
8915  else {
8916  if (objspace->rgengc.need_major_gc) {
8917  reason |= objspace->rgengc.need_major_gc;
8918  do_full_mark = TRUE;
8919  }
8920  else if (RGENGC_FORCE_MAJOR_GC) {
8921  reason = GPR_FLAG_MAJOR_BY_FORCE;
8922  do_full_mark = TRUE;
8923  }
8924 
8925  objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
8926  }
8927 
8928  if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
8929  reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
8930  }
8931 
8932 #if GC_ENABLE_INCREMENTAL_MARK
8933  if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
8934  objspace->flags.during_incremental_marking = FALSE;
8935  }
8936  else {
8937  objspace->flags.during_incremental_marking = do_full_mark;
8938  }
8939 #endif
8940 
8941  if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
8942  objspace->flags.immediate_sweep = TRUE;
8943  }
8944 
8945  if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
8946 
8947  gc_report(1, objspace, "gc_start(reason: %x) => %u, %d, %d\n",
8948  reason,
8949  do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
8950 
8951 #if USE_DEBUG_COUNTER
8952  RB_DEBUG_COUNTER_INC(gc_count);
8953 
8954  if (reason & GPR_FLAG_MAJOR_MASK) {
8955  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
8956  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
8957  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
8958  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
8959 #if RGENGC_ESTIMATE_OLDMALLOC
8960  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
8961 #endif
8962  }
8963  else {
8964  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
8965  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
8966  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
8967  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
8968  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
8969  }
8970 #endif
8971 
8972  objspace->profile.count++;
8973  objspace->profile.latest_gc_info = reason;
8974  objspace->profile.total_allocated_objects_at_gc_start = objspace->total_allocated_objects;
8975  objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
8976  gc_prof_setup_new_record(objspace, reason);
8977  gc_reset_malloc_info(objspace, do_full_mark);
8978  rb_transient_heap_start_marking(do_full_mark);
8979 
8980  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
8981  GC_ASSERT(during_gc);
8982 
8983  gc_prof_timer_start(objspace);
8984  {
8985  gc_marks(objspace, do_full_mark);
8986  }
8987  gc_prof_timer_stop(objspace);
8988 
8989  gc_exit(objspace, gc_enter_event_start, &lock_lev);
8990  return TRUE;
8991 }
8992 
8993 static void
8994 gc_rest(rb_objspace_t *objspace)
8995 {
8996  int marking = is_incremental_marking(objspace);
8997  int sweeping = is_lazy_sweeping(objspace);
8998 
8999  if (marking || sweeping) {
9000  unsigned int lock_lev;
9001  gc_enter(objspace, gc_enter_event_rest, &lock_lev);
9002 
9003  if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
9004 
9005  if (is_incremental_marking(objspace)) {
9006  gc_marks_rest(objspace);
9007  }
9008  if (is_lazy_sweeping(objspace)) {
9009  gc_sweep_rest(objspace);
9010  }
9011  gc_exit(objspace, gc_enter_event_rest, &lock_lev);
9012  }
9013 }
9014 
9016  rb_objspace_t *objspace;
9017  unsigned int reason;
9018 };
9019 
9020 static void
9021 gc_current_status_fill(rb_objspace_t *objspace, char *buff)
9022 {
9023  int i = 0;
9024  if (is_marking(objspace)) {
9025  buff[i++] = 'M';
9026  if (is_full_marking(objspace)) buff[i++] = 'F';
9027 #if GC_ENABLE_INCREMENTAL_MARK
9028  if (is_incremental_marking(objspace)) buff[i++] = 'I';
9029 #endif
9030  }
9031  else if (is_sweeping(objspace)) {
9032  buff[i++] = 'S';
9033  if (is_lazy_sweeping(objspace)) buff[i++] = 'L';
9034  }
9035  else {
9036  buff[i++] = 'N';
9037  }
9038  buff[i] = '\0';
9039 }
9040 
9041 static const char *
9042 gc_current_status(rb_objspace_t *objspace)
9043 {
9044  static char buff[0x10];
9045  gc_current_status_fill(objspace, buff);
9046  return buff;
9047 }
9048 
9049 #if PRINT_ENTER_EXIT_TICK
9050 
9051 static tick_t last_exit_tick;
9052 static tick_t enter_tick;
9053 static int enter_count = 0;
9054 static char last_gc_status[0x10];
9055 
9056 static inline void
9057 gc_record(rb_objspace_t *objspace, int direction, const char *event)
9058 {
9059  if (direction == 0) { /* enter */
9060  enter_count++;
9061  enter_tick = tick();
9062  gc_current_status_fill(objspace, last_gc_status);
9063  }
9064  else { /* exit */
9065  tick_t exit_tick = tick();
9066  char current_gc_status[0x10];
9067  gc_current_status_fill(objspace, current_gc_status);
9068 #if 1
9069  /* [last mutator time] [gc time] [event] */
9070  fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
9071  enter_tick - last_exit_tick,
9072  exit_tick - enter_tick,
9073  event,
9074  last_gc_status, current_gc_status,
9075  (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
9076  last_exit_tick = exit_tick;
9077 #else
9078  /* [enter_tick] [gc time] [event] */
9079  fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
9080  enter_tick,
9081  exit_tick - enter_tick,
9082  event,
9083  last_gc_status, current_gc_status,
9084  (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
9085 #endif
9086  }
9087 }
9088 #else /* PRINT_ENTER_EXIT_TICK */
9089 static inline void
9090 gc_record(rb_objspace_t *objspace, int direction, const char *event)
9091 {
9092  /* null */
9093 }
9094 #endif /* PRINT_ENTER_EXIT_TICK */
9095 
9096 static const char *
9097 gc_enter_event_cstr(enum gc_enter_event event)
9098 {
9099  switch (event) {
9100  case gc_enter_event_start: return "start";
9101  case gc_enter_event_mark_continue: return "mark_continue";
9102  case gc_enter_event_sweep_continue: return "sweep_continue";
9103  case gc_enter_event_rest: return "rest";
9104  case gc_enter_event_finalizer: return "finalizer";
9105  case gc_enter_event_rb_memerror: return "rb_memerror";
9106  }
9107  return NULL;
9108 }
9109 
9110 static void
9111 gc_enter_count(enum gc_enter_event event)
9112 {
9113  switch (event) {
9114  case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start); break;
9115  case gc_enter_event_mark_continue: RB_DEBUG_COUNTER_INC(gc_enter_mark_continue); break;
9116  case gc_enter_event_sweep_continue: RB_DEBUG_COUNTER_INC(gc_enter_sweep_continue); break;
9117  case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest); break;
9118  case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer); break;
9119  case gc_enter_event_rb_memerror: /* nothing */ break;
9120  }
9121 }
9122 
9123 #ifndef MEASURE_GC
9124 #define MEASURE_GC (objspace->flags.measure_gc)
9125 #endif
9126 
9127 static bool
9128 gc_enter_event_measure_p(rb_objspace_t *objspace, enum gc_enter_event event)
9129 {
9130  if (!MEASURE_GC) return false;
9131 
9132  switch (event) {
9133  case gc_enter_event_start:
9134  case gc_enter_event_mark_continue:
9135  case gc_enter_event_sweep_continue:
9136  case gc_enter_event_rest:
9137  return true;
9138 
9139  default:
9140  // case gc_enter_event_finalizer:
9141  // case gc_enter_event_rb_memerror:
9142  return false;
9143  }
9144 }
9145 
9146 static bool current_process_time(struct timespec *ts);
9147 
9148 static void
9149 gc_enter_clock(rb_objspace_t *objspace, enum gc_enter_event event)
9150 {
9151  if (gc_enter_event_measure_p(objspace, event)) {
9152  if (!current_process_time(&objspace->profile.start_time)) {
9153  objspace->profile.start_time.tv_sec = 0;
9154  objspace->profile.start_time.tv_nsec = 0;
9155  }
9156  }
9157 }
9158 
9159 static void
9160 gc_exit_clock(rb_objspace_t *objspace, enum gc_enter_event event)
9161 {
9162  if (gc_enter_event_measure_p(objspace, event)) {
9163  struct timespec end_time;
9164 
9165  if ((objspace->profile.start_time.tv_sec > 0 ||
9166  objspace->profile.start_time.tv_nsec > 0) &&
9167  current_process_time(&end_time)) {
9168 
9169  if (end_time.tv_sec < objspace->profile.start_time.tv_sec) {
9170  return; // ignore
9171  }
9172  else {
9173  uint64_t ns =
9174  (uint64_t)(end_time.tv_sec - objspace->profile.start_time.tv_sec) * (1000 * 1000 * 1000) +
9175  (end_time.tv_nsec - objspace->profile.start_time.tv_nsec);
9176  objspace->profile.total_time_ns += ns;
9177  }
9178  }
9179  }
9180 }
9181 
9182 static inline void
9183 gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
9184 {
9185  RB_VM_LOCK_ENTER_LEV(lock_lev);
9186 
9187  gc_enter_clock(objspace, event);
9188 
9189  switch (event) {
9190  case gc_enter_event_rest:
9191  if (!is_marking(objspace)) break;
9192  // fall through
9193  case gc_enter_event_start:
9194  case gc_enter_event_mark_continue:
9195  // stop other ractors
9196  rb_vm_barrier();
9197  break;
9198  default:
9199  break;
9200  }
9201 
9202  gc_enter_count(event);
9203  if (UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
9204  if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
9205 
9206  mjit_gc_start_hook();
9207 
9208  during_gc = TRUE;
9209  RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
9210  gc_report(1, objspace, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9211  gc_record(objspace, 0, gc_enter_event_cstr(event));
9212  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
9213 }
9214 
9215 static inline void
9216 gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
9217 {
9218  GC_ASSERT(during_gc != 0);
9219 
9220  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passsed? */
9221  gc_record(objspace, 1, gc_enter_event_cstr(event));
9222  RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
9223  gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9224  during_gc = FALSE;
9225 
9226  mjit_gc_exit_hook();
9227  gc_exit_clock(objspace, event);
9228  RB_VM_LOCK_LEAVE_LEV(lock_lev);
9229 }
9230 
9231 static void *
9232 gc_with_gvl(void *ptr)
9233 {
9234  struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
9235  return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
9236 }
9237 
9238 static int
9239 garbage_collect_with_gvl(rb_objspace_t *objspace, unsigned int reason)
9240 {
9241  if (dont_gc_val()) return TRUE;
9242  if (ruby_thread_has_gvl_p()) {
9243  return garbage_collect(objspace, reason);
9244  }
9245  else {
9246  if (ruby_native_thread_p()) {
9247  struct objspace_and_reason oar;
9248  oar.objspace = objspace;
9249  oar.reason = reason;
9250  return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
9251  }
9252  else {
9253  /* no ruby thread */
9254  fprintf(stderr, "[FATAL] failed to allocate memory\n");
9255  exit(EXIT_FAILURE);
9256  }
9257  }
9258 }
9259 
9260 static VALUE
9261 gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
9262 {
9263  rb_objspace_t *objspace = &rb_objspace;
9264  unsigned int reason = (GPR_FLAG_FULL_MARK |
9265  GPR_FLAG_IMMEDIATE_MARK |
9266  GPR_FLAG_IMMEDIATE_SWEEP |
9267  GPR_FLAG_METHOD);
9268 
9269  /* For now, compact implies full mark / sweep, so ignore other flags */
9270  if (RTEST(compact)) {
9271  /* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
9272  * the read barrier, so we must disable compaction. */
9273 #if !defined(__MINGW32__) && !defined(_WIN32)
9274  if (!USE_MMAP_ALIGNED_ALLOC) {
9275  rb_raise(rb_eNotImpError, "Compaction isn't available on this platform");
9276  }
9277 #endif
9278 
9279  reason |= GPR_FLAG_COMPACT;
9280  }
9281  else {
9282  if (!RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
9283  if (!RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
9284  if (!RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
9285  }
9286 
9287  garbage_collect(objspace, reason);
9288  gc_finalize_deferred(objspace);
9289 
9290  return Qnil;
9291 }
9292 
9293 static int
9294 gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
9295 {
9296  GC_ASSERT(!SPECIAL_CONST_P(obj));
9297 
9298  switch (BUILTIN_TYPE(obj)) {
9299  case T_NONE:
9300  case T_NIL:
9301  case T_MOVED:
9302  case T_ZOMBIE:
9303  return FALSE;
9304  case T_SYMBOL:
9305  if (DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->id & ~ID_SCOPE_MASK)) {
9306  return FALSE;
9307  }
9308  /* fall through */
9309  case T_STRING:
9310  case T_OBJECT:
9311  case T_FLOAT:
9312  case T_IMEMO:
9313  case T_ARRAY:
9314  case T_BIGNUM:
9315  case T_ICLASS:
9316  case T_MODULE:
9317  case T_REGEXP:
9318  case T_DATA:
9319  case T_MATCH:
9320  case T_STRUCT:
9321  case T_HASH:
9322  case T_FILE:
9323  case T_COMPLEX:
9324  case T_RATIONAL:
9325  case T_NODE:
9326  case T_CLASS:
9327  if (FL_TEST(obj, FL_FINALIZE)) {
9328  /* The finalizer table is a numtable. It looks up objects by address.
9329  * We can't mark the keys in the finalizer table because that would
9330  * prevent the objects from being collected. This check prevents
9331  * objects that are keys in the finalizer table from being moved
9332  * without directly pinning them. */
9333  if (st_is_member(finalizer_table, obj)) {
9334  return FALSE;
9335  }
9336  }
9337  GC_ASSERT(RVALUE_MARKED(obj));
9338  GC_ASSERT(!RVALUE_PINNED(obj));
9339 
9340  return TRUE;
9341 
9342  default:
9343  rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
9344  break;
9345  }
9346 
9347  return FALSE;
9348 }
9349 
9350 static VALUE
9351 gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t slot_size)
9352 {
9353  int marked;
9354  int wb_unprotected;
9355  int uncollectible;
9356  int marking;
9357  RVALUE *dest = (RVALUE *)free;
9358  RVALUE *src = (RVALUE *)scan;
9359 
9360  gc_report(4, objspace, "Moving object: %p -> %p\n", (void*)scan, (void *)free);
9361 
9362  GC_ASSERT(BUILTIN_TYPE(scan) != T_NONE);
9363  GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free), free));
9364 
9365  /* Save off bits for current object. */
9366  marked = rb_objspace_marked_object_p((VALUE)src);
9367  wb_unprotected = RVALUE_WB_UNPROTECTED((VALUE)src);
9368  uncollectible = RVALUE_UNCOLLECTIBLE((VALUE)src);
9369  marking = RVALUE_MARKING((VALUE)src);
9370 
9371  /* Clear bits for eventual T_MOVED */
9372  CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)src), (VALUE)src);
9373  CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)src), (VALUE)src);
9374  CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)src), (VALUE)src);
9375  CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)src), (VALUE)src);
9376 
9377  if (FL_TEST((VALUE)src, FL_EXIVAR)) {
9378  /* Same deal as below. Generic ivars are held in st tables.
9379  * Resizing the table could cause a GC to happen and we can't allow it */
9380  VALUE already_disabled = rb_gc_disable_no_rest();
9381  rb_mv_generic_ivar((VALUE)src, (VALUE)dest);
9382  if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
9383  }
9384 
9385  st_data_t srcid = (st_data_t)src, id;
9386 
9387  /* If the source object's object_id has been seen, we need to update
9388  * the object to object id mapping. */
9389  if (st_lookup(objspace->obj_to_id_tbl, srcid, &id)) {
9390  gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
9391  /* inserting in the st table can cause the GC to run. We need to
9392  * prevent re-entry in to the GC since `gc_move` is running in the GC,
9393  * so temporarily disable the GC around the st table mutation */
9394  VALUE already_disabled = rb_gc_disable_no_rest();
9395  st_delete(objspace->obj_to_id_tbl, &srcid, 0);
9396  st_insert(objspace->obj_to_id_tbl, (st_data_t)dest, id);
9397  if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
9398  }
9399 
9400  /* Move the object */
9401  memcpy(dest, src, slot_size);
9402  memset(src, 0, slot_size);
9403 
9404  /* Set bits for object in new location */
9405  if (marking) {
9406  MARK_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)dest), (VALUE)dest);
9407  }
9408  else {
9409  CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)dest), (VALUE)dest);
9410  }
9411 
9412  if (marked) {
9413  MARK_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
9414  }
9415  else {
9416  CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
9417  }
9418 
9419  if (wb_unprotected) {
9420  MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
9421  }
9422  else {
9423  CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
9424  }
9425 
9426  if (uncollectible) {
9427  MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
9428  }
9429  else {
9430  CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
9431  }
9432 
9433  /* Assign forwarding address */
9434  src->as.moved.flags = T_MOVED;
9435  src->as.moved.dummy = Qundef;
9436  src->as.moved.destination = (VALUE)dest;
9437  GC_ASSERT(BUILTIN_TYPE((VALUE)dest) != T_NONE);
9438 
9439  return (VALUE)src;
9440 }
9441 
9442 static int
9443 compare_free_slots(const void *left, const void *right, void *dummy)
9444 {
9445  struct heap_page *left_page;
9446  struct heap_page *right_page;
9447 
9448  left_page = *(struct heap_page * const *)left;
9449  right_page = *(struct heap_page * const *)right;
9450 
9451  return left_page->free_slots - right_page->free_slots;
9452 }
9453 
9454 static void
9455 gc_sort_heap_by_empty_slots(rb_objspace_t *objspace)
9456 {
9457  for (int j = 0; j < SIZE_POOL_COUNT; j++) {
9458  rb_size_pool_t *size_pool = &size_pools[j];
9459 
9460  size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
9461  size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
9462  struct heap_page *page = 0, **page_list = malloc(size);
9463  size_t i = 0;
9464 
9465  list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
9466  page_list[i++] = page;
9467  GC_ASSERT(page);
9468  }
9469 
9470  GC_ASSERT((size_t)i == total_pages);
9471 
9472  /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
9473  * head of the list, so empty pages will end up at the start of the heap */
9474  ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_free_slots, NULL);
9475 
9476  /* Reset the eden heap */
9477  list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
9478 
9479  for (i = 0; i < total_pages; i++) {
9480  list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
9481  if (page_list[i]->free_slots != 0) {
9482  heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
9483  }
9484  }
9485 
9486  free(page_list);
9487  }
9488 }
9489 
9490 static void
9491 gc_ref_update_array(rb_objspace_t * objspace, VALUE v)
9492 {
9493  long i, len;
9494 
9495  if (FL_TEST(v, ELTS_SHARED))
9496  return;
9497 
9498  len = RARRAY_LEN(v);
9499  if (len > 0) {
9500  VALUE *ptr = (VALUE *)RARRAY_CONST_PTR_TRANSIENT(v);
9501  for (i = 0; i < len; i++) {
9502  UPDATE_IF_MOVED(objspace, ptr[i]);
9503  }
9504  }
9505 }
9506 
9507 static void
9508 gc_ref_update_object(rb_objspace_t * objspace, VALUE v)
9509 {
9510  VALUE *ptr = ROBJECT_IVPTR(v);
9511 
9512  uint32_t i, len = ROBJECT_NUMIV(v);
9513  for (i = 0; i < len; i++) {
9514  UPDATE_IF_MOVED(objspace, ptr[i]);
9515  }
9516 }
9517 
9518 static int
9519 hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
9520 {
9521  rb_objspace_t *objspace = (rb_objspace_t *)argp;
9522 
9523  if (gc_object_moved_p(objspace, (VALUE)*key)) {
9524  *key = rb_gc_location((VALUE)*key);
9525  }
9526 
9527  if (gc_object_moved_p(objspace, (VALUE)*value)) {
9528  *value = rb_gc_location((VALUE)*value);
9529  }
9530 
9531  return ST_CONTINUE;
9532 }
9533 
9534 static int
9535 hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp, int error)
9536 {
9537  rb_objspace_t *objspace;
9538 
9539  objspace = (rb_objspace_t *)argp;
9540 
9541  if (gc_object_moved_p(objspace, (VALUE)key)) {
9542  return ST_REPLACE;
9543  }
9544 
9545  if (gc_object_moved_p(objspace, (VALUE)value)) {
9546  return ST_REPLACE;
9547  }
9548  return ST_CONTINUE;
9549 }
9550 
9551 static int
9552 hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
9553 {
9554  rb_objspace_t *objspace = (rb_objspace_t *)argp;
9555 
9556  if (gc_object_moved_p(objspace, (VALUE)*value)) {
9557  *value = rb_gc_location((VALUE)*value);
9558  }
9559 
9560  return ST_CONTINUE;
9561 }
9562 
9563 static int
9564 hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp, int error)
9565 {
9566  rb_objspace_t *objspace;
9567 
9568  objspace = (rb_objspace_t *)argp;
9569 
9570  if (gc_object_moved_p(objspace, (VALUE)value)) {
9571  return ST_REPLACE;
9572  }
9573  return ST_CONTINUE;
9574 }
9575 
9576 static void
9577 gc_update_tbl_refs(rb_objspace_t * objspace, st_table *tbl)
9578 {
9579  if (!tbl || tbl->num_entries == 0) return;
9580 
9581  if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
9582  rb_raise(rb_eRuntimeError, "hash modified during iteration");
9583  }
9584 }
9585 
9586 static void
9587 gc_update_table_refs(rb_objspace_t * objspace, st_table *tbl)
9588 {
9589  if (!tbl || tbl->num_entries == 0) return;
9590 
9591  if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
9592  rb_raise(rb_eRuntimeError, "hash modified during iteration");
9593  }
9594 }
9595 
9596 /* Update MOVED references in an st_table */
9597 void
9599 {
9600  rb_objspace_t *objspace = &rb_objspace;
9601  gc_update_table_refs(objspace, ptr);
9602 }
9603 
9604 static void
9605 gc_ref_update_hash(rb_objspace_t * objspace, VALUE v)
9606 {
9607  rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
9608 }
9609 
9610 static void
9611 gc_ref_update_method_entry(rb_objspace_t *objspace, rb_method_entry_t *me)
9612 {
9613  rb_method_definition_t *def = me->def;
9614 
9615  UPDATE_IF_MOVED(objspace, me->owner);
9616  UPDATE_IF_MOVED(objspace, me->defined_class);
9617 
9618  if (def) {
9619  switch (def->type) {
9620  case VM_METHOD_TYPE_ISEQ:
9621  if (def->body.iseq.iseqptr) {
9622  TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, def->body.iseq.iseqptr);
9623  }
9624  TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, def->body.iseq.cref);
9625  break;
9626  case VM_METHOD_TYPE_ATTRSET:
9627  case VM_METHOD_TYPE_IVAR:
9628  UPDATE_IF_MOVED(objspace, def->body.attr.location);
9629  break;
9630  case VM_METHOD_TYPE_BMETHOD:
9631  UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
9632  break;
9633  case VM_METHOD_TYPE_ALIAS:
9634  TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.alias.original_me);
9635  return;
9636  case VM_METHOD_TYPE_REFINED:
9637  TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.refined.orig_me);
9638  UPDATE_IF_MOVED(objspace, def->body.refined.owner);
9639  break;
9640  case VM_METHOD_TYPE_CFUNC:
9641  case VM_METHOD_TYPE_ZSUPER:
9642  case VM_METHOD_TYPE_MISSING:
9643  case VM_METHOD_TYPE_OPTIMIZED:
9644  case VM_METHOD_TYPE_UNDEF:
9645  case VM_METHOD_TYPE_NOTIMPLEMENTED:
9646  break;
9647  }
9648  }
9649 }
9650 
9651 static void
9652 gc_update_values(rb_objspace_t *objspace, long n, VALUE *values)
9653 {
9654  long i;
9655 
9656  for (i=0; i<n; i++) {
9657  UPDATE_IF_MOVED(objspace, values[i]);
9658  }
9659 }
9660 
9661 static void
9662 gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
9663 {
9664  switch (imemo_type(obj)) {
9665  case imemo_env:
9666  {
9667  rb_env_t *env = (rb_env_t *)obj;
9668  if (LIKELY(env->ep)) {
9669  // just after newobj() can be NULL here.
9670  TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, env->iseq);
9671  UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
9672  gc_update_values(objspace, (long)env->env_size, (VALUE *)env->env);
9673  }
9674  }
9675  break;
9676  case imemo_cref:
9677  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
9678  TYPED_UPDATE_IF_MOVED(objspace, struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
9679  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
9680  break;
9681  case imemo_svar:
9682  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
9683  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
9684  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
9685  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
9686  break;
9687  case imemo_throw_data:
9688  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
9689  break;
9690  case imemo_ifunc:
9691  break;
9692  case imemo_memo:
9693  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
9694  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
9695  break;
9696  case imemo_ment:
9697  gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
9698  break;
9699  case imemo_iseq:
9700  rb_iseq_update_references((rb_iseq_t *)obj);
9701  break;
9702  case imemo_ast:
9703  rb_ast_update_references((rb_ast_t *)obj);
9704  break;
9705  case imemo_callcache:
9706  {
9707  const struct rb_callcache *cc = (const struct rb_callcache *)obj;
9708  if (cc->klass) {
9709  UPDATE_IF_MOVED(objspace, cc->klass);
9710  if (!is_live_object(objspace, cc->klass)) {
9711  *((VALUE *)(&cc->klass)) = (VALUE)0;
9712  }
9713  }
9714 
9715  if (cc->cme_) {
9716  TYPED_UPDATE_IF_MOVED(objspace, struct rb_callable_method_entry_struct *, cc->cme_);
9717  if (!is_live_object(objspace, (VALUE)cc->cme_)) {
9718  *((struct rb_callable_method_entry_struct **)(&cc->cme_)) = (struct rb_callable_method_entry_struct *)0;
9719  }
9720  }
9721  }
9722  break;
9723  case imemo_constcache:
9724  {
9725  const struct iseq_inline_constant_cache_entry *ice = (struct iseq_inline_constant_cache_entry *)obj;
9726  UPDATE_IF_MOVED(objspace, ice->value);
9727  }
9728  break;
9729  case imemo_parser_strterm:
9730  case imemo_tmpbuf:
9731  case imemo_callinfo:
9732  break;
9733  default:
9734  rb_bug("not reachable %d", imemo_type(obj));
9735  break;
9736  }
9737 }
9738 
9739 static enum rb_id_table_iterator_result
9740 check_id_table_move(ID id, VALUE value, void *data)
9741 {
9742  rb_objspace_t *objspace = (rb_objspace_t *)data;
9743 
9744  if (gc_object_moved_p(objspace, (VALUE)value)) {
9745  return ID_TABLE_REPLACE;
9746  }
9747 
9748  return ID_TABLE_CONTINUE;
9749 }
9750 
9751 /* Returns the new location of an object, if it moved. Otherwise returns
9752  * the existing location. */
9753 VALUE
9755 {
9756 
9757  VALUE destination;
9758 
9759  if (!SPECIAL_CONST_P(value)) {
9760  void *poisoned = asan_poisoned_object_p(value);
9761  asan_unpoison_object(value, false);
9762 
9763  if (BUILTIN_TYPE(value) == T_MOVED) {
9764  destination = (VALUE)RMOVED(value)->destination;
9765  GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
9766  }
9767  else {
9768  destination = value;
9769  }
9770 
9771  /* Re-poison slot if it's not the one we want */
9772  if (poisoned) {
9773  GC_ASSERT(BUILTIN_TYPE(value) == T_NONE);
9774  asan_poison_object(value);
9775  }
9776  }
9777  else {
9778  destination = value;
9779  }
9780 
9781  return destination;
9782 }
9783 
9784 static enum rb_id_table_iterator_result
9785 update_id_table(ID *key, VALUE * value, void *data, int existing)
9786 {
9787  rb_objspace_t *objspace = (rb_objspace_t *)data;
9788 
9789  if (gc_object_moved_p(objspace, (VALUE)*value)) {
9790  *value = rb_gc_location((VALUE)*value);
9791  }
9792 
9793  return ID_TABLE_CONTINUE;
9794 }
9795 
9796 static void
9797 update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
9798 {
9799  if (tbl) {
9800  rb_id_table_foreach_with_replace(tbl, check_id_table_move, update_id_table, objspace);
9801  }
9802 }
9803 
9804 static enum rb_id_table_iterator_result
9805 update_cc_tbl_i(ID id, VALUE ccs_ptr, void *data)
9806 {
9807  rb_objspace_t *objspace = (rb_objspace_t *)data;
9808  struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
9809  VM_ASSERT(vm_ccs_p(ccs));
9810 
9811  if (gc_object_moved_p(objspace, (VALUE)ccs->cme)) {
9812  ccs->cme = (const rb_callable_method_entry_t *)rb_gc_location((VALUE)ccs->cme);
9813  }
9814 
9815  for (int i=0; i<ccs->len; i++) {
9816  if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].ci)) {
9817  ccs->entries[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)ccs->entries[i].ci);
9818  }
9819  if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
9820  ccs->entries[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
9821  }
9822  }
9823 
9824  // do not replace
9825  return ID_TABLE_CONTINUE;
9826 }
9827 
9828 static void
9829 update_cc_tbl(rb_objspace_t *objspace, VALUE klass)
9830 {
9831  struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
9832  if (tbl) {
9833  rb_id_table_foreach_with_replace(tbl, update_cc_tbl_i, 0, objspace);
9834  }
9835 }
9836 
9837 static enum rb_id_table_iterator_result
9838 update_cvc_tbl_i(ID id, VALUE cvc_entry, void *data)
9839 {
9840  struct rb_cvar_class_tbl_entry *entry;
9841 
9842  entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
9843 
9844  entry->class_value = rb_gc_location(entry->class_value);
9845 
9846  return ID_TABLE_CONTINUE;
9847 }
9848 
9849 static void
9850 update_cvc_tbl(rb_objspace_t *objspace, VALUE klass)
9851 {
9852  struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
9853  if (tbl) {
9854  rb_id_table_foreach_with_replace(tbl, update_cvc_tbl_i, 0, objspace);
9855  }
9856 }
9857 
9858 static enum rb_id_table_iterator_result
9859 update_const_table(VALUE value, void *data)
9860 {
9861  rb_const_entry_t *ce = (rb_const_entry_t *)value;
9862  rb_objspace_t * objspace = (rb_objspace_t *)data;
9863 
9864  if (gc_object_moved_p(objspace, ce->value)) {
9865  ce->value = rb_gc_location(ce->value);
9866  }
9867 
9868  if (gc_object_moved_p(objspace, ce->file)) {
9869  ce->file = rb_gc_location(ce->file);
9870  }
9871 
9872  return ID_TABLE_CONTINUE;
9873 }
9874 
9875 static void
9876 update_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
9877 {
9878  if (!tbl) return;
9879  rb_id_table_foreach_values(tbl, update_const_table, objspace);
9880 }
9881 
9882 static void
9883 update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
9884 {
9885  while (entry) {
9886  UPDATE_IF_MOVED(objspace, entry->klass);
9887  entry = entry->next;
9888  }
9889 }
9890 
9891 static int
9892 update_iv_index_tbl_i(st_data_t key, st_data_t value, st_data_t arg)
9893 {
9894  rb_objspace_t *objspace = (rb_objspace_t *)arg;
9895  struct rb_iv_index_tbl_entry *ent = (struct rb_iv_index_tbl_entry *)value;
9896  UPDATE_IF_MOVED(objspace, ent->class_value);
9897  return ST_CONTINUE;
9898 }
9899 
9900 static void
9901 update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
9902 {
9903  UPDATE_IF_MOVED(objspace, ext->origin_);
9904  UPDATE_IF_MOVED(objspace, ext->refined_class);
9905  update_subclass_entries(objspace, ext->subclasses);
9906 
9907  // ext->iv_index_tbl
9908  if (ext->iv_index_tbl) {
9909  st_foreach(ext->iv_index_tbl, update_iv_index_tbl_i, (st_data_t)objspace);
9910  }
9911 }
9912 
9913 static void
9914 gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
9915 {
9916  RVALUE *any = RANY(obj);
9917 
9918  gc_report(4, objspace, "update-refs: %p ->\n", (void *)obj);
9919 
9920  switch (BUILTIN_TYPE(obj)) {
9921  case T_CLASS:
9922  case T_MODULE:
9923  if (RCLASS_SUPER((VALUE)obj)) {
9924  UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
9925  }
9926  if (!RCLASS_EXT(obj)) break;
9927  update_m_tbl(objspace, RCLASS_M_TBL(obj));
9928  update_cc_tbl(objspace, obj);
9929  update_cvc_tbl(objspace, obj);
9930 
9931  gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
9932 
9933  update_class_ext(objspace, RCLASS_EXT(obj));
9934  update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
9935  break;
9936 
9937  case T_ICLASS:
9938  if (FL_TEST(obj, RICLASS_IS_ORIGIN) &&
9939  !FL_TEST(obj, RICLASS_ORIGIN_SHARED_MTBL)) {
9940  update_m_tbl(objspace, RCLASS_M_TBL(obj));
9941  }
9942  if (RCLASS_SUPER((VALUE)obj)) {
9943  UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
9944  }
9945  if (!RCLASS_EXT(obj)) break;
9946  if (RCLASS_IV_TBL(obj)) {
9947  gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
9948  }
9949  update_class_ext(objspace, RCLASS_EXT(obj));
9950  update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
9951  update_cc_tbl(objspace, obj);
9952  break;
9953 
9954  case T_IMEMO:
9955  gc_ref_update_imemo(objspace, obj);
9956  return;
9957 
9958  case T_NIL:
9959  case T_FIXNUM:
9960  case T_NODE:
9961  case T_MOVED:
9962  case T_NONE:
9963  /* These can't move */
9964  return;
9965 
9966  case T_ARRAY:
9967  if (FL_TEST(obj, ELTS_SHARED)) {
9968  UPDATE_IF_MOVED(objspace, any->as.array.as.heap.aux.shared_root);
9969  }
9970  else {
9971  gc_ref_update_array(objspace, obj);
9972  }
9973  break;
9974 
9975  case T_HASH:
9976  gc_ref_update_hash(objspace, obj);
9977  UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
9978  break;
9979 
9980  case T_STRING:
9981  if (STR_SHARED_P(obj)) {
9982 #if USE_RVARGC
9983  VALUE orig_shared = any->as.string.as.heap.aux.shared;
9984 #endif
9985  UPDATE_IF_MOVED(objspace, any->as.string.as.heap.aux.shared);
9986 #if USE_RVARGC
9987  VALUE shared = any->as.string.as.heap.aux.shared;
9988  if (STR_EMBED_P(shared)) {
9989  size_t offset = (size_t)any->as.string.as.heap.ptr - (size_t)RSTRING(orig_shared)->as.embed.ary;
9990  GC_ASSERT(any->as.string.as.heap.ptr >= RSTRING(orig_shared)->as.embed.ary);
9991  GC_ASSERT(offset <= (size_t)RSTRING(shared)->as.embed.len);
9992  any->as.string.as.heap.ptr = RSTRING(shared)->as.embed.ary + offset;
9993  }
9994 #endif
9995  }
9996  break;
9997 
9998  case T_DATA:
9999  /* Call the compaction callback, if it exists */
10000  {
10001  void *const ptr = DATA_PTR(obj);
10002  if (ptr) {
10003  if (RTYPEDDATA_P(obj)) {
10004  RUBY_DATA_FUNC compact_func = any->as.typeddata.type->function.dcompact;
10005  if (compact_func) (*compact_func)(ptr);
10006  }
10007  }
10008  }
10009  break;
10010 
10011  case T_OBJECT:
10012  gc_ref_update_object(objspace, obj);
10013  break;
10014 
10015  case T_FILE:
10016  if (any->as.file.fptr) {
10017  UPDATE_IF_MOVED(objspace, any->as.file.fptr->self);
10018  UPDATE_IF_MOVED(objspace, any->as.file.fptr->pathv);
10019  UPDATE_IF_MOVED(objspace, any->as.file.fptr->tied_io_for_writing);
10020  UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_asciicompat);
10021  UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_pre_ecopts);
10022  UPDATE_IF_MOVED(objspace, any->as.file.fptr->encs.ecopts);
10023  UPDATE_IF_MOVED(objspace, any->as.file.fptr->write_lock);
10024  }
10025  break;
10026  case T_REGEXP:
10027  UPDATE_IF_MOVED(objspace, any->as.regexp.src);
10028  break;
10029 
10030  case T_SYMBOL:
10031  if (DYNAMIC_SYM_P((VALUE)any)) {
10032  UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
10033  }
10034  break;
10035 
10036  case T_FLOAT:
10037  case T_BIGNUM:
10038  break;
10039 
10040  case T_MATCH:
10041  UPDATE_IF_MOVED(objspace, any->as.match.regexp);
10042 
10043  if (any->as.match.str) {
10044  UPDATE_IF_MOVED(objspace, any->as.match.str);
10045  }
10046  break;
10047 
10048  case T_RATIONAL:
10049  UPDATE_IF_MOVED(objspace, any->as.rational.num);
10050  UPDATE_IF_MOVED(objspace, any->as.rational.den);
10051  break;
10052 
10053  case T_COMPLEX:
10054  UPDATE_IF_MOVED(objspace, any->as.complex.real);
10055  UPDATE_IF_MOVED(objspace, any->as.complex.imag);
10056 
10057  break;
10058 
10059  case T_STRUCT:
10060  {
10061  long i, len = RSTRUCT_LEN(obj);
10062  VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
10063 
10064  for (i = 0; i < len; i++) {
10065  UPDATE_IF_MOVED(objspace, ptr[i]);
10066  }
10067  }
10068  break;
10069  default:
10070 #if GC_DEBUG
10071  rb_gcdebug_print_obj_condition((VALUE)obj);
10072  rb_obj_info_dump(obj);
10073  rb_bug("unreachable");
10074 #endif
10075  break;
10076 
10077  }
10078 
10079  UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
10080 
10081  gc_report(4, objspace, "update-refs: %p <-\n", (void *)obj);
10082 }
10083 
10084 static int
10085 gc_ref_update(void *vstart, void *vend, size_t stride, rb_objspace_t * objspace, struct heap_page *page)
10086 {
10087  VALUE v = (VALUE)vstart;
10088  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
10089  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
10090  page->flags.has_uncollectible_shady_objects = FALSE;
10091  page->flags.has_remembered_objects = FALSE;
10092 
10093  /* For each object on the page */
10094  for (; v != (VALUE)vend; v += stride) {
10095  void *poisoned = asan_poisoned_object_p(v);
10096  asan_unpoison_object(v, false);
10097 
10098  switch (BUILTIN_TYPE(v)) {
10099  case T_NONE:
10100  case T_MOVED:
10101  case T_ZOMBIE:
10102  break;
10103  default:
10104  if (RVALUE_WB_UNPROTECTED(v)) {
10105  page->flags.has_uncollectible_shady_objects = TRUE;
10106  }
10107  if (RVALUE_PAGE_MARKING(page, v)) {
10108  page->flags.has_remembered_objects = TRUE;
10109  }
10110  if (page->flags.before_sweep) {
10111  if (RVALUE_MARKED(v)) {
10112  gc_update_object_references(objspace, v);
10113  }
10114  }
10115  else {
10116  gc_update_object_references(objspace, v);
10117  }
10118  }
10119 
10120  if (poisoned) {
10121  asan_poison_object(v);
10122  }
10123  }
10124 
10125  return 0;
10126 }
10127 
10128 extern rb_symbols_t ruby_global_symbols;
10129 #define global_symbols ruby_global_symbols
10130 
10131 static void
10132 gc_update_references(rb_objspace_t *objspace)
10133 {
10134  rb_execution_context_t *ec = GET_EC();
10135  rb_vm_t *vm = rb_ec_vm_ptr(ec);
10136 
10137  struct heap_page *page = NULL;
10138 
10139  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
10140  bool should_set_mark_bits = TRUE;
10141  rb_size_pool_t *size_pool = &size_pools[i];
10142  rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10143 
10144  list_for_each(&heap->pages, page, page_node) {
10145  uintptr_t start = (uintptr_t)page->start;
10146  uintptr_t end = start + (page->total_slots * size_pool->slot_size);
10147 
10148  gc_ref_update((void *)start, (void *)end, size_pool->slot_size, objspace, page);
10149  if (page == heap->sweeping_page) {
10150  should_set_mark_bits = FALSE;
10151  }
10152  if (should_set_mark_bits) {
10153  gc_setup_mark_bits(page);
10154  }
10155  }
10156  }
10157  rb_vm_update_references(vm);
10158  rb_transient_heap_update_references();
10159  rb_gc_update_global_tbl();
10160  global_symbols.ids = rb_gc_location(global_symbols.ids);
10161  global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
10162  gc_update_tbl_refs(objspace, objspace->obj_to_id_tbl);
10163  gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
10164  gc_update_table_refs(objspace, global_symbols.str_sym);
10165  gc_update_table_refs(objspace, finalizer_table);
10166 }
10167 
10168 static VALUE
10169 gc_compact_stats(rb_execution_context_t *ec, VALUE self)
10170 {
10171  size_t i;
10172  rb_objspace_t *objspace = &rb_objspace;
10173  VALUE h = rb_hash_new();
10174  VALUE considered = rb_hash_new();
10175  VALUE moved = rb_hash_new();
10176 
10177  for (i=0; i<T_MASK; i++) {
10178  if (objspace->rcompactor.considered_count_table[i]) {
10179  rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
10180  }
10181 
10182  if (objspace->rcompactor.moved_count_table[i]) {
10183  rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
10184  }
10185  }
10186 
10187  rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
10188  rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
10189 
10190  return h;
10191 }
10192 
10193 static void
10194 root_obj_check_moved_i(const char *category, VALUE obj, void *data)
10195 {
10196  if (gc_object_moved_p(&rb_objspace, obj)) {
10197  rb_bug("ROOT %s points to MOVED: %p -> %s\n", category, (void *)obj, obj_info(rb_gc_location(obj)));
10198  }
10199 }
10200 
10201 static void
10202 reachable_object_check_moved_i(VALUE ref, void *data)
10203 {
10204  VALUE parent = (VALUE)data;
10205  if (gc_object_moved_p(&rb_objspace, ref)) {
10206  rb_bug("Object %s points to MOVED: %p -> %s\n", obj_info(parent), (void *)ref, obj_info(rb_gc_location(ref)));
10207  }
10208 }
10209 
10210 static int
10211 heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
10212 {
10213  VALUE v = (VALUE)vstart;
10214  for (; v != (VALUE)vend; v += stride) {
10215  if (gc_object_moved_p(&rb_objspace, v)) {
10216  /* Moved object still on the heap, something may have a reference. */
10217  }
10218  else {
10219  void *poisoned = asan_poisoned_object_p(v);
10220  asan_unpoison_object(v, false);
10221 
10222  switch (BUILTIN_TYPE(v)) {
10223  case T_NONE:
10224  case T_ZOMBIE:
10225  break;
10226  default:
10227  if (!rb_objspace_garbage_object_p(v)) {
10228  rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
10229  }
10230  }
10231 
10232  if (poisoned) {
10233  GC_ASSERT(BUILTIN_TYPE(v) == T_NONE);
10234  asan_poison_object(v);
10235  }
10236  }
10237  }
10238 
10239  return 0;
10240 }
10241 
10242 static VALUE
10243 gc_compact(rb_execution_context_t *ec, VALUE self)
10244 {
10245  /* Run GC with compaction enabled */
10246  gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue);
10247 
10248  return gc_compact_stats(ec, self);
10249 }
10250 
10251 static VALUE
10252 gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE toward_empty)
10253 {
10254  rb_objspace_t *objspace = &rb_objspace;
10255 
10256  /* Clear the heap. */
10257  gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qfalse);
10258 
10259  RB_VM_LOCK_ENTER();
10260  {
10261  gc_rest(objspace);
10262 
10263  if (RTEST(double_heap)) {
10264  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
10265  rb_size_pool_t *size_pool = &size_pools[i];
10266  rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10267  heap_add_pages(objspace, size_pool, heap, heap->total_pages);
10268  }
10269  }
10270 
10271  if (RTEST(toward_empty)) {
10272  gc_sort_heap_by_empty_slots(objspace);
10273  }
10274  }
10275  RB_VM_LOCK_LEAVE();
10276 
10277  gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue);
10278 
10279  objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
10280  objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
10281 
10282  return gc_compact_stats(ec, self);
10283 }
10284 
10285 VALUE
10287 {
10288  rb_gc();
10289  return Qnil;
10290 }
10291 
10292 void
10293 rb_gc(void)
10294 {
10295  rb_objspace_t *objspace = &rb_objspace;
10296  unsigned int reason = GPR_DEFAULT_REASON;
10297  garbage_collect(objspace, reason);
10298 }
10299 
10300 int
10302 {
10303  rb_objspace_t *objspace = &rb_objspace;
10304  return during_gc;
10305 }
10306 
10307 #if RGENGC_PROFILE >= 2
10308 
10309 static const char *type_name(int type, VALUE obj);
10310 
10311 static void
10312 gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
10313 {
10314  VALUE result = rb_hash_new_with_size(T_MASK);
10315  int i;
10316  for (i=0; i<T_MASK; i++) {
10317  const char *type = type_name(i, 0);
10318  rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
10319  }
10320  rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
10321 }
10322 #endif
10323 
10324 size_t
10326 {
10327  return rb_objspace.profile.count;
10328 }
10329 
10330 static VALUE
10331 gc_count(rb_execution_context_t *ec, VALUE self)
10332 {
10333  return SIZET2NUM(rb_gc_count());
10334 }
10335 
10336 static VALUE
10337 gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const unsigned int orig_flags)
10338 {
10339  static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
10340  static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
10341 #if RGENGC_ESTIMATE_OLDMALLOC
10342  static VALUE sym_oldmalloc;
10343 #endif
10344  static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
10345  static VALUE sym_none, sym_marking, sym_sweeping;
10346  VALUE hash = Qnil, key = Qnil;
10347  VALUE major_by;
10348  unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
10349 
10350  if (SYMBOL_P(hash_or_key)) {
10351  key = hash_or_key;
10352  }
10353  else if (RB_TYPE_P(hash_or_key, T_HASH)) {
10354  hash = hash_or_key;
10355  }
10356  else {
10357  rb_raise(rb_eTypeError, "non-hash or symbol given");
10358  }
10359 
10360  if (NIL_P(sym_major_by)) {
10361 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
10362  S(major_by);
10363  S(gc_by);
10364  S(immediate_sweep);
10365  S(have_finalizer);
10366  S(state);
10367 
10368  S(stress);
10369  S(nofree);
10370  S(oldgen);
10371  S(shady);
10372  S(force);
10373 #if RGENGC_ESTIMATE_OLDMALLOC
10374  S(oldmalloc);
10375 #endif
10376  S(newobj);
10377  S(malloc);
10378  S(method);
10379  S(capi);
10380 
10381  S(none);
10382  S(marking);
10383  S(sweeping);
10384 #undef S
10385  }
10386 
10387 #define SET(name, attr) \
10388  if (key == sym_##name) \
10389  return (attr); \
10390  else if (hash != Qnil) \
10391  rb_hash_aset(hash, sym_##name, (attr));
10392 
10393  major_by =
10394  (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
10395  (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
10396  (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
10397  (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
10398 #if RGENGC_ESTIMATE_OLDMALLOC
10399  (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
10400 #endif
10401  Qnil;
10402  SET(major_by, major_by);
10403 
10404  SET(gc_by,
10405  (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
10406  (flags & GPR_FLAG_MALLOC) ? sym_malloc :
10407  (flags & GPR_FLAG_METHOD) ? sym_method :
10408  (flags & GPR_FLAG_CAPI) ? sym_capi :
10409  (flags & GPR_FLAG_STRESS) ? sym_stress :
10410  Qnil
10411  );
10412 
10413  SET(have_finalizer, RBOOL(flags & GPR_FLAG_HAVE_FINALIZE));
10414  SET(immediate_sweep, RBOOL(flags & GPR_FLAG_IMMEDIATE_SWEEP));
10415 
10416  if (orig_flags == 0) {
10417  SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
10418  gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
10419  }
10420 #undef SET
10421 
10422  if (!NIL_P(key)) {/* matched key should return above */
10423  rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
10424  }
10425 
10426  return hash;
10427 }
10428 
10429 VALUE
10431 {
10432  rb_objspace_t *objspace = &rb_objspace;
10433  return gc_info_decode(objspace, key, 0);
10434 }
10435 
10436 static VALUE
10437 gc_latest_gc_info(rb_execution_context_t *ec, VALUE self, VALUE arg)
10438 {
10439  rb_objspace_t *objspace = &rb_objspace;
10440 
10441  if (NIL_P(arg)) {
10442  arg = rb_hash_new();
10443  }
10444  else if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
10445  rb_raise(rb_eTypeError, "non-hash or symbol given");
10446  }
10447 
10448  return gc_info_decode(objspace, arg, 0);
10449 }
10450 
10451 enum gc_stat_sym {
10452  gc_stat_sym_count,
10453  gc_stat_sym_time,
10454  gc_stat_sym_heap_allocated_pages,
10455  gc_stat_sym_heap_sorted_length,
10456  gc_stat_sym_heap_allocatable_pages,
10457  gc_stat_sym_heap_available_slots,
10458  gc_stat_sym_heap_live_slots,
10459  gc_stat_sym_heap_free_slots,
10460  gc_stat_sym_heap_final_slots,
10461  gc_stat_sym_heap_marked_slots,
10462  gc_stat_sym_heap_eden_pages,
10463  gc_stat_sym_heap_tomb_pages,
10464  gc_stat_sym_total_allocated_pages,
10465  gc_stat_sym_total_freed_pages,
10466  gc_stat_sym_total_allocated_objects,
10467  gc_stat_sym_total_freed_objects,
10468  gc_stat_sym_malloc_increase_bytes,
10469  gc_stat_sym_malloc_increase_bytes_limit,
10470  gc_stat_sym_minor_gc_count,
10471  gc_stat_sym_major_gc_count,
10472  gc_stat_sym_compact_count,
10473  gc_stat_sym_read_barrier_faults,
10474  gc_stat_sym_total_moved_objects,
10475  gc_stat_sym_remembered_wb_unprotected_objects,
10476  gc_stat_sym_remembered_wb_unprotected_objects_limit,
10477  gc_stat_sym_old_objects,
10478  gc_stat_sym_old_objects_limit,
10479 #if RGENGC_ESTIMATE_OLDMALLOC
10480  gc_stat_sym_oldmalloc_increase_bytes,
10481  gc_stat_sym_oldmalloc_increase_bytes_limit,
10482 #endif
10483 #if RGENGC_PROFILE
10484  gc_stat_sym_total_generated_normal_object_count,
10485  gc_stat_sym_total_generated_shady_object_count,
10486  gc_stat_sym_total_shade_operation_count,
10487  gc_stat_sym_total_promoted_count,
10488  gc_stat_sym_total_remembered_normal_object_count,
10489  gc_stat_sym_total_remembered_shady_object_count,
10490 #endif
10491  gc_stat_sym_last
10492 };
10493 
10494 static VALUE gc_stat_symbols[gc_stat_sym_last];
10495 
10496 static void
10497 setup_gc_stat_symbols(void)
10498 {
10499  if (gc_stat_symbols[0] == 0) {
10500 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
10501  S(count);
10502  S(time);
10503  S(heap_allocated_pages);
10504  S(heap_sorted_length);
10505  S(heap_allocatable_pages);
10506  S(heap_available_slots);
10507  S(heap_live_slots);
10508  S(heap_free_slots);
10509  S(heap_final_slots);
10510  S(heap_marked_slots);
10511  S(heap_eden_pages);
10512  S(heap_tomb_pages);
10513  S(total_allocated_pages);
10514  S(total_freed_pages);
10515  S(total_allocated_objects);
10516  S(total_freed_objects);
10517  S(malloc_increase_bytes);
10518  S(malloc_increase_bytes_limit);
10519  S(minor_gc_count);
10520  S(major_gc_count);
10521  S(compact_count);
10522  S(read_barrier_faults);
10523  S(total_moved_objects);
10524  S(remembered_wb_unprotected_objects);
10525  S(remembered_wb_unprotected_objects_limit);
10526  S(old_objects);
10527  S(old_objects_limit);
10528 #if RGENGC_ESTIMATE_OLDMALLOC
10529  S(oldmalloc_increase_bytes);
10530  S(oldmalloc_increase_bytes_limit);
10531 #endif
10532 #if RGENGC_PROFILE
10533  S(total_generated_normal_object_count);
10534  S(total_generated_shady_object_count);
10535  S(total_shade_operation_count);
10536  S(total_promoted_count);
10537  S(total_remembered_normal_object_count);
10538  S(total_remembered_shady_object_count);
10539 #endif /* RGENGC_PROFILE */
10540 #undef S
10541  }
10542 }
10543 
10544 static size_t
10545 gc_stat_internal(VALUE hash_or_sym)
10546 {
10547  rb_objspace_t *objspace = &rb_objspace;
10548  VALUE hash = Qnil, key = Qnil;
10549 
10550  setup_gc_stat_symbols();
10551 
10552  if (RB_TYPE_P(hash_or_sym, T_HASH)) {
10553  hash = hash_or_sym;
10554  }
10555  else if (SYMBOL_P(hash_or_sym)) {
10556  key = hash_or_sym;
10557  }
10558  else {
10559  rb_raise(rb_eTypeError, "non-hash or symbol argument");
10560  }
10561 
10562 #define SET(name, attr) \
10563  if (key == gc_stat_symbols[gc_stat_sym_##name]) \
10564  return attr; \
10565  else if (hash != Qnil) \
10566  rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
10567 
10568  SET(count, objspace->profile.count);
10569  SET(time, (size_t) (objspace->profile.total_time_ns / (1000 * 1000) /* ns -> ms */)); // TODO: UINT64T2NUM
10570 
10571  /* implementation dependent counters */
10572  SET(heap_allocated_pages, heap_allocated_pages);
10573  SET(heap_sorted_length, heap_pages_sorted_length);
10574  SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
10575  SET(heap_available_slots, objspace_available_slots(objspace));
10576  SET(heap_live_slots, objspace_live_slots(objspace));
10577  SET(heap_free_slots, objspace_free_slots(objspace));
10578  SET(heap_final_slots, heap_pages_final_slots);
10579  SET(heap_marked_slots, objspace->marked_slots);
10580  SET(heap_eden_pages, heap_eden_total_pages(objspace));
10581  SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
10582  SET(total_allocated_pages, objspace->profile.total_allocated_pages);
10583  SET(total_freed_pages, objspace->profile.total_freed_pages);
10584  SET(total_allocated_objects, objspace->total_allocated_objects);
10585  SET(total_freed_objects, objspace->profile.total_freed_objects);
10586  SET(malloc_increase_bytes, malloc_increase);
10587  SET(malloc_increase_bytes_limit, malloc_limit);
10588  SET(minor_gc_count, objspace->profile.minor_gc_count);
10589  SET(major_gc_count, objspace->profile.major_gc_count);
10590  SET(compact_count, objspace->profile.compact_count);
10591  SET(read_barrier_faults, objspace->profile.read_barrier_faults);
10592  SET(total_moved_objects, objspace->rcompactor.total_moved);
10593  SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
10594  SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
10595  SET(old_objects, objspace->rgengc.old_objects);
10596  SET(old_objects_limit, objspace->rgengc.old_objects_limit);
10597 #if RGENGC_ESTIMATE_OLDMALLOC
10598  SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
10599  SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
10600 #endif
10601 
10602 #if RGENGC_PROFILE
10603  SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
10604  SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
10605  SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
10606  SET(total_promoted_count, objspace->profile.total_promoted_count);
10607  SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
10608  SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
10609 #endif /* RGENGC_PROFILE */
10610 #undef SET
10611 
10612  if (!NIL_P(key)) { /* matched key should return above */
10613  rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
10614  }
10615 
10616 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
10617  if (hash != Qnil) {
10618  gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
10619  gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
10620  gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
10621  gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
10622  gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
10623  gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
10624  }
10625 #endif
10626 
10627  return 0;
10628 }
10629 
10630 static VALUE
10631 gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
10632 {
10633  if (NIL_P(arg)) {
10634  arg = rb_hash_new();
10635  }
10636  else if (SYMBOL_P(arg)) {
10637  size_t value = gc_stat_internal(arg);
10638  return SIZET2NUM(value);
10639  }
10640  else if (RB_TYPE_P(arg, T_HASH)) {
10641  // ok
10642  }
10643  else {
10644  rb_raise(rb_eTypeError, "non-hash or symbol given");
10645  }
10646 
10647  gc_stat_internal(arg);
10648  return arg;
10649 }
10650 
10651 size_t
10653 {
10654  if (SYMBOL_P(key)) {
10655  size_t value = gc_stat_internal(key);
10656  return value;
10657  }
10658  else {
10659  gc_stat_internal(key);
10660  return 0;
10661  }
10662 }
10663 
10664 static VALUE
10665 gc_stress_get(rb_execution_context_t *ec, VALUE self)
10666 {
10667  rb_objspace_t *objspace = &rb_objspace;
10668  return ruby_gc_stress_mode;
10669 }
10670 
10671 static void
10672 gc_stress_set(rb_objspace_t *objspace, VALUE flag)
10673 {
10674  objspace->flags.gc_stressful = RTEST(flag);
10675  objspace->gc_stress_mode = flag;
10676 }
10677 
10678 static VALUE
10679 gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
10680 {
10681  rb_objspace_t *objspace = &rb_objspace;
10682  gc_stress_set(objspace, flag);
10683  return flag;
10684 }
10685 
10686 VALUE
10688 {
10689  rb_objspace_t *objspace = &rb_objspace;
10690  return rb_objspace_gc_enable(objspace);
10691 }
10692 
10693 VALUE
10694 rb_objspace_gc_enable(rb_objspace_t *objspace)
10695 {
10696  int old = dont_gc_val();
10697 
10698  dont_gc_off();
10699  return RBOOL(old);
10700 }
10701 
10702 static VALUE
10703 gc_enable(rb_execution_context_t *ec, VALUE _)
10704 {
10705  return rb_gc_enable();
10706 }
10707 
10708 VALUE
10709 rb_gc_disable_no_rest(void)
10710 {
10711  rb_objspace_t *objspace = &rb_objspace;
10712  return gc_disable_no_rest(objspace);
10713 }
10714 
10715 static VALUE
10716 gc_disable_no_rest(rb_objspace_t *objspace)
10717 {
10718  int old = dont_gc_val();
10719  dont_gc_on();
10720  return RBOOL(old);
10721 }
10722 
10723 VALUE
10725 {
10726  rb_objspace_t *objspace = &rb_objspace;
10727  return rb_objspace_gc_disable(objspace);
10728 }
10729 
10730 VALUE
10731 rb_objspace_gc_disable(rb_objspace_t *objspace)
10732 {
10733  gc_rest(objspace);
10734  return gc_disable_no_rest(objspace);
10735 }
10736 
10737 static VALUE
10738 gc_disable(rb_execution_context_t *ec, VALUE _)
10739 {
10740  return rb_gc_disable();
10741 }
10742 
10743 static VALUE
10744 gc_set_auto_compact(rb_execution_context_t *ec, VALUE _, VALUE v)
10745 {
10746  /* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
10747  * the read barrier, so we must disable automatic compaction. */
10748 #if !defined(__MINGW32__) && !defined(_WIN32)
10749  if (!USE_MMAP_ALIGNED_ALLOC) {
10750  rb_raise(rb_eNotImpError, "Automatic compaction isn't available on this platform");
10751  }
10752 #endif
10753 
10754  ruby_enable_autocompact = RTEST(v);
10755  return v;
10756 }
10757 
10758 static VALUE
10759 gc_get_auto_compact(rb_execution_context_t *ec, VALUE _)
10760 {
10761  return RBOOL(ruby_enable_autocompact);
10762 }
10763 
10764 static int
10765 get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
10766 {
10767  const char *ptr = getenv(name);
10768  ssize_t val;
10769 
10770  if (ptr != NULL && *ptr) {
10771  size_t unit = 0;
10772  char *end;
10773 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
10774  val = strtoll(ptr, &end, 0);
10775 #else
10776  val = strtol(ptr, &end, 0);
10777 #endif
10778  switch (*end) {
10779  case 'k': case 'K':
10780  unit = 1024;
10781  ++end;
10782  break;
10783  case 'm': case 'M':
10784  unit = 1024*1024;
10785  ++end;
10786  break;
10787  case 'g': case 'G':
10788  unit = 1024*1024*1024;
10789  ++end;
10790  break;
10791  }
10792  while (*end && isspace((unsigned char)*end)) end++;
10793  if (*end) {
10794  if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
10795  return 0;
10796  }
10797  if (unit > 0) {
10798  if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
10799  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
10800  return 0;
10801  }
10802  val *= unit;
10803  }
10804  if (val > 0 && (size_t)val > lower_bound) {
10805  if (RTEST(ruby_verbose)) {
10806  fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
10807  }
10808  *default_value = (size_t)val;
10809  return 1;
10810  }
10811  else {
10812  if (RTEST(ruby_verbose)) {
10813  fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
10814  name, val, *default_value, lower_bound);
10815  }
10816  return 0;
10817  }
10818  }
10819  return 0;
10820 }
10821 
10822 static int
10823 get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
10824 {
10825  const char *ptr = getenv(name);
10826  double val;
10827 
10828  if (ptr != NULL && *ptr) {
10829  char *end;
10830  val = strtod(ptr, &end);
10831  if (!*ptr || *end) {
10832  if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
10833  return 0;
10834  }
10835 
10836  if (accept_zero && val == 0.0) {
10837  goto accept;
10838  }
10839  else if (val <= lower_bound) {
10840  if (RTEST(ruby_verbose)) {
10841  fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
10842  name, val, *default_value, lower_bound);
10843  }
10844  }
10845  else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
10846  val > upper_bound) {
10847  if (RTEST(ruby_verbose)) {
10848  fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
10849  name, val, *default_value, upper_bound);
10850  }
10851  }
10852  else {
10853  goto accept;
10854  }
10855  }
10856  return 0;
10857 
10858  accept:
10859  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
10860  *default_value = val;
10861  return 1;
10862 }
10863 
10864 static void
10865 gc_set_initial_pages(rb_objspace_t *objspace)
10866 {
10867  gc_rest(objspace);
10868 
10869  for (int i = 0; i < SIZE_POOL_COUNT; i++) {
10870  rb_size_pool_t *size_pool = &size_pools[i];
10871 
10872  if (gc_params.heap_init_slots > size_pool->eden_heap.total_slots) {
10873  size_t slots = gc_params.heap_init_slots - size_pool->eden_heap.total_slots;
10874  int multiple = size_pool->slot_size / sizeof(RVALUE);
10875  size_pool->allocatable_pages = slots * multiple / HEAP_PAGE_OBJ_LIMIT;
10876  }
10877  else {
10878  /* We already have more slots than heap_init_slots allows, so
10879  * prevent creating more pages. */
10880  size_pool->allocatable_pages = 0;
10881  }
10882  }
10883  heap_pages_expand_sorted(objspace);
10884 }
10885 
10886 /*
10887  * GC tuning environment variables
10888  *
10889  * * RUBY_GC_HEAP_INIT_SLOTS
10890  * - Initial allocation slots.
10891  * * RUBY_GC_HEAP_FREE_SLOTS
10892  * - Prepare at least this amount of slots after GC.
10893  * - Allocate slots if there are not enough slots.
10894  * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
10895  * - Allocate slots by this factor.
10896  * - (next slots number) = (current slots number) * (this factor)
10897  * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
10898  * - Allocation rate is limited to this number of slots.
10899  * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
10900  * - Allocate additional pages when the number of free slots is
10901  * lower than the value (total_slots * (this ratio)).
10902  * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
10903  * - Allocate slots to satisfy this formula:
10904  * free_slots = total_slots * goal_ratio
10905  * - In other words, prepare (total_slots * goal_ratio) free slots.
10906  * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
10907  * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
10908  * - Allow to free pages when the number of free slots is
10909  * greater than the value (total_slots * (this ratio)).
10910  * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
10911  * - Do full GC when the number of old objects is more than R * N
10912  * where R is this factor and
10913  * N is the number of old objects just after last full GC.
10914  *
10915  * * obsolete
10916  * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
10917  * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
10918  *
10919  * * RUBY_GC_MALLOC_LIMIT
10920  * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
10921  * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
10922  *
10923  * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
10924  * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
10925  * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
10926  */
10927 
10928 void
10929 ruby_gc_set_params(void)
10930 {
10931  rb_objspace_t *objspace = &rb_objspace;
10932  /* RUBY_GC_HEAP_FREE_SLOTS */
10933  if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
10934  /* ok */
10935  }
10936 
10937  /* RUBY_GC_HEAP_INIT_SLOTS */
10938  if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
10939  gc_set_initial_pages(objspace);
10940  }
10941 
10942  get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
10943  get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
10944  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
10945  0.0, 1.0, FALSE);
10946  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
10947  gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
10948  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
10949  gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
10950  get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
10951 
10952  get_envparam_size ("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0);
10953  get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
10954  if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
10955  gc_params.malloc_limit_max = SIZE_MAX;
10956  }
10957  get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
10958 
10959 #if RGENGC_ESTIMATE_OLDMALLOC
10960  if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
10961  rb_objspace_t *objspace = &rb_objspace;
10962  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
10963  }
10964  get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
10965  get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
10966 #endif
10967 }
10968 
10969 static void
10970 reachable_objects_from_callback(VALUE obj)
10971 {
10972  rb_ractor_t *cr = GET_RACTOR();
10973  cr->mfd->mark_func(obj, cr->mfd->data);
10974 }
10975 
10976 void
10977 rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
10978 {
10979  rb_objspace_t *objspace = &rb_objspace;
10980 
10981  if (during_gc) rb_bug("rb_objspace_reachable_objects_from() is not supported while during_gc == true");
10982 
10983  if (is_markable_object(objspace, obj)) {
10984  rb_ractor_t *cr = GET_RACTOR();
10985  struct gc_mark_func_data_struct mfd = {
10986  .mark_func = func,
10987  .data = data,
10988  }, *prev_mfd = cr->mfd;
10989 
10990  cr->mfd = &mfd;
10991  gc_mark_children(objspace, obj);
10992  cr->mfd = prev_mfd;
10993  }
10994 }
10995 
10997  const char *category;
10998  void (*func)(const char *category, VALUE, void *);
10999  void *data;
11000 };
11001 
11002 static void
11003 root_objects_from(VALUE obj, void *ptr)
11004 {
11005  const struct root_objects_data *data = (struct root_objects_data *)ptr;
11006  (*data->func)(data->category, obj, data->data);
11007 }
11008 
11009 void
11010 rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
11011 {
11012  rb_objspace_t *objspace = &rb_objspace;
11013  objspace_reachable_objects_from_root(objspace, func, passing_data);
11014 }
11015 
11016 static void
11017 objspace_reachable_objects_from_root(rb_objspace_t *objspace, void (func)(const char *category, VALUE, void *), void *passing_data)
11018 {
11019  if (during_gc) rb_bug("objspace_reachable_objects_from_root() is not supported while during_gc == true");
11020 
11021  rb_ractor_t *cr = GET_RACTOR();
11022  struct root_objects_data data = {
11023  .func = func,
11024  .data = passing_data,
11025  };
11026  struct gc_mark_func_data_struct mfd = {
11027  .mark_func = root_objects_from,
11028  .data = &data,
11029  }, *prev_mfd = cr->mfd;
11030 
11031  cr->mfd = &mfd;
11032  gc_mark_roots(objspace, &data.category);
11033  cr->mfd = prev_mfd;
11034 }
11035 
11036 /*
11037  ------------------------ Extended allocator ------------------------
11038 */
11039 
11041  VALUE exc;
11042  const char *fmt;
11043  va_list *ap;
11044 };
11045 
11046 static void *
11047 gc_vraise(void *ptr)
11048 {
11049  struct gc_raise_tag *argv = ptr;
11050  rb_vraise(argv->exc, argv->fmt, *argv->ap);
11051  UNREACHABLE_RETURN(NULL);
11052 }
11053 
11054 static void
11055 gc_raise(VALUE exc, const char *fmt, ...)
11056 {
11057  va_list ap;
11058  va_start(ap, fmt);
11059  struct gc_raise_tag argv = {
11060  exc, fmt, &ap,
11061  };
11062 
11063  if (ruby_thread_has_gvl_p()) {
11064  gc_vraise(&argv);
11065  UNREACHABLE;
11066  }
11067  else if (ruby_native_thread_p()) {
11068  rb_thread_call_with_gvl(gc_vraise, &argv);
11069  UNREACHABLE;
11070  }
11071  else {
11072  /* Not in a ruby thread */
11073  fprintf(stderr, "%s", "[FATAL] ");
11074  vfprintf(stderr, fmt, ap);
11075  }
11076 
11077  va_end(ap);
11078  abort();
11079 }
11080 
11081 static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
11082 
11083 static void
11084 negative_size_allocation_error(const char *msg)
11085 {
11086  gc_raise(rb_eNoMemError, "%s", msg);
11087 }
11088 
11089 static void *
11090 ruby_memerror_body(void *dummy)
11091 {
11092  rb_memerror();
11093  return 0;
11094 }
11095 
11096 NORETURN(static void ruby_memerror(void));
11098 static void
11099 ruby_memerror(void)
11100 {
11101  if (ruby_thread_has_gvl_p()) {
11102  rb_memerror();
11103  }
11104  else {
11105  if (ruby_native_thread_p()) {
11106  rb_thread_call_with_gvl(ruby_memerror_body, 0);
11107  }
11108  else {
11109  /* no ruby thread */
11110  fprintf(stderr, "[FATAL] failed to allocate memory\n");
11111  }
11112  }
11113  exit(EXIT_FAILURE);
11114 }
11115 
11116 void
11118 {
11119  rb_execution_context_t *ec = GET_EC();
11120  rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
11121  VALUE exc;
11122 
11123  if (0) {
11124  // Print out pid, sleep, so you can attach debugger to see what went wrong:
11125  fprintf(stderr, "rb_memerror pid=%"PRI_PIDT_PREFIX"d\n", getpid());
11126  sleep(60);
11127  }
11128 
11129  if (during_gc) {
11130  // TODO: OMG!! How to implement it?
11131  gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
11132  }
11133 
11134  exc = nomem_error;
11135  if (!exc ||
11136  rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
11137  fprintf(stderr, "[FATAL] failed to allocate memory\n");
11138  exit(EXIT_FAILURE);
11139  }
11140  if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
11141  rb_ec_raised_clear(ec);
11142  }
11143  else {
11144  rb_ec_raised_set(ec, RAISED_NOMEMORY);
11145  exc = ruby_vm_special_exception_copy(exc);
11146  }
11147  ec->errinfo = exc;
11148  EC_JUMP_TAG(ec, TAG_RAISE);
11149 }
11150 
11151 void *
11152 rb_aligned_malloc(size_t alignment, size_t size)
11153 {
11154  void *res;
11155 
11156 #if defined __MINGW32__
11157  res = __mingw_aligned_malloc(size, alignment);
11158 #elif defined _WIN32
11159  void *_aligned_malloc(size_t, size_t);
11160  res = _aligned_malloc(size, alignment);
11161 #else
11162  if (USE_MMAP_ALIGNED_ALLOC) {
11163  GC_ASSERT(alignment % sysconf(_SC_PAGE_SIZE) == 0);
11164 
11165  char *ptr = mmap(NULL, alignment + size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
11166  if (ptr == MAP_FAILED) {
11167  return NULL;
11168  }
11169 
11170  char *aligned = ptr + alignment;
11171  aligned -= ((VALUE)aligned & (alignment - 1));
11172  GC_ASSERT(aligned > ptr);
11173  GC_ASSERT(aligned <= ptr + alignment);
11174 
11175  size_t start_out_of_range_size = aligned - ptr;
11176  GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
11177  if (start_out_of_range_size > 0) {
11178  if (munmap(ptr, start_out_of_range_size)) {
11179  rb_bug("rb_aligned_malloc: munmap failed for start");
11180  }
11181  }
11182 
11183  size_t end_out_of_range_size = alignment - start_out_of_range_size;
11184  GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
11185  if (end_out_of_range_size > 0) {
11186  if (munmap(aligned + size, end_out_of_range_size)) {
11187  rb_bug("rb_aligned_malloc: munmap failed for end");
11188  }
11189  }
11190 
11191  res = (void *)aligned;
11192  }
11193  else {
11194 # if defined(HAVE_POSIX_MEMALIGN)
11195  if (posix_memalign(&res, alignment, size) != 0) {
11196  return NULL;
11197  }
11198 # elif defined(HAVE_MEMALIGN)
11199  res = memalign(alignment, size);
11200 # else
11201  char* aligned;
11202  res = malloc(alignment + size + sizeof(void*));
11203  aligned = (char*)res + alignment + sizeof(void*);
11204  aligned -= ((VALUE)aligned & (alignment - 1));
11205  ((void**)aligned)[-1] = res;
11206  res = (void*)aligned;
11207 # endif
11208  }
11209 #endif
11210 
11211  /* alignment must be a power of 2 */
11212  GC_ASSERT(((alignment - 1) & alignment) == 0);
11213  GC_ASSERT(alignment % sizeof(void*) == 0);
11214  return res;
11215 }
11216 
11217 static void
11218 rb_aligned_free(void *ptr, size_t size)
11219 {
11220 #if defined __MINGW32__
11221  __mingw_aligned_free(ptr);
11222 #elif defined _WIN32
11223  _aligned_free(ptr);
11224 #else
11225  if (USE_MMAP_ALIGNED_ALLOC) {
11226  GC_ASSERT(size % sysconf(_SC_PAGE_SIZE) == 0);
11227  if (munmap(ptr, size)) {
11228  rb_bug("rb_aligned_free: munmap failed");
11229  }
11230  }
11231  else {
11232 # if defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
11233  free(ptr);
11234 # else
11235  free(((void**)ptr)[-1]);
11236 # endif
11237  }
11238 #endif
11239 }
11240 
11241 static inline size_t
11242 objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
11243 {
11244 #ifdef HAVE_MALLOC_USABLE_SIZE
11245  return malloc_usable_size(ptr);
11246 #else
11247  return hint;
11248 #endif
11249 }
11250 
11251 enum memop_type {
11252  MEMOP_TYPE_MALLOC = 0,
11253  MEMOP_TYPE_FREE,
11254  MEMOP_TYPE_REALLOC
11255 };
11256 
11257 static inline void
11258 atomic_sub_nounderflow(size_t *var, size_t sub)
11259 {
11260  if (sub == 0) return;
11261 
11262  while (1) {
11263  size_t val = *var;
11264  if (val < sub) sub = val;
11265  if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
11266  }
11267 }
11268 
11269 static void
11270 objspace_malloc_gc_stress(rb_objspace_t *objspace)
11271 {
11272  if (ruby_gc_stressful && ruby_native_thread_p()) {
11273  unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
11274  GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
11275 
11276  if (gc_stress_full_mark_after_malloc_p()) {
11277  reason |= GPR_FLAG_FULL_MARK;
11278  }
11279  garbage_collect_with_gvl(objspace, reason);
11280  }
11281 }
11282 
11283 static inline bool
11284 objspace_malloc_increase_report(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
11285 {
11286  if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE", old_size: %"PRIdSIZE"\n",
11287  mem,
11288  type == MEMOP_TYPE_MALLOC ? "malloc" :
11289  type == MEMOP_TYPE_FREE ? "free " :
11290  type == MEMOP_TYPE_REALLOC ? "realloc": "error",
11291  new_size, old_size);
11292  return false;
11293 }
11294 
11295 static bool
11296 objspace_malloc_increase_body(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
11297 {
11298  if (new_size > old_size) {
11299  ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
11300 #if RGENGC_ESTIMATE_OLDMALLOC
11301  ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
11302 #endif
11303  }
11304  else {
11305  atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
11306 #if RGENGC_ESTIMATE_OLDMALLOC
11307  atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
11308 #endif
11309  }
11310 
11311  if (type == MEMOP_TYPE_MALLOC) {
11312  retry:
11313  if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
11314  if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
11315  gc_rest(objspace); /* gc_rest can reduce malloc_increase */
11316  goto retry;
11317  }
11318  garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
11319  }
11320  }
11321 
11322 #if MALLOC_ALLOCATED_SIZE
11323  if (new_size >= old_size) {
11324  ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
11325  }
11326  else {
11327  size_t dec_size = old_size - new_size;
11328  size_t allocated_size = objspace->malloc_params.allocated_size;
11329 
11330 #if MALLOC_ALLOCATED_SIZE_CHECK
11331  if (allocated_size < dec_size) {
11332  rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
11333  }
11334 #endif
11335  atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
11336  }
11337 
11338  switch (type) {
11339  case MEMOP_TYPE_MALLOC:
11340  ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
11341  break;
11342  case MEMOP_TYPE_FREE:
11343  {
11344  size_t allocations = objspace->malloc_params.allocations;
11345  if (allocations > 0) {
11346  atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
11347  }
11348 #if MALLOC_ALLOCATED_SIZE_CHECK
11349  else {
11350  GC_ASSERT(objspace->malloc_params.allocations > 0);
11351  }
11352 #endif
11353  }
11354  break;
11355  case MEMOP_TYPE_REALLOC: /* ignore */ break;
11356  }
11357 #endif
11358  return true;
11359 }
11360 
11361 #define objspace_malloc_increase(...) \
11362  for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
11363  !malloc_increase_done; \
11364  malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
11365 
11366 struct malloc_obj_info { /* 4 words */
11367  size_t size;
11368 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11369  size_t gen;
11370  const char *file;
11371  size_t line;
11372 #endif
11373 };
11374 
11375 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11376 const char *ruby_malloc_info_file;
11377 int ruby_malloc_info_line;
11378 #endif
11379 
11380 static inline size_t
11381 objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
11382 {
11383  if (size == 0) size = 1;
11384 
11385 #if CALC_EXACT_MALLOC_SIZE
11386  size += sizeof(struct malloc_obj_info);
11387 #endif
11388 
11389  return size;
11390 }
11391 
11392 static inline void *
11393 objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
11394 {
11395  size = objspace_malloc_size(objspace, mem, size);
11396  objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
11397 
11398 #if CALC_EXACT_MALLOC_SIZE
11399  {
11400  struct malloc_obj_info *info = mem;
11401  info->size = size;
11402 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11403  info->gen = objspace->profile.count;
11404  info->file = ruby_malloc_info_file;
11405  info->line = info->file ? ruby_malloc_info_line : 0;
11406 #endif
11407  mem = info + 1;
11408  }
11409 #endif
11410 
11411  return mem;
11412 }
11413 
11414 #if defined(__GNUC__) && RUBY_DEBUG
11415 #define RB_BUG_INSTEAD_OF_RB_MEMERROR
11416 #endif
11417 
11418 #ifdef RB_BUG_INSTEAD_OF_RB_MEMERROR
11419 #define TRY_WITH_GC(siz, expr) do { \
11420  const gc_profile_record_flag gpr = \
11421  GPR_FLAG_FULL_MARK | \
11422  GPR_FLAG_IMMEDIATE_MARK | \
11423  GPR_FLAG_IMMEDIATE_SWEEP | \
11424  GPR_FLAG_MALLOC; \
11425  objspace_malloc_gc_stress(objspace); \
11426  \
11427  if (LIKELY((expr))) { \
11428  /* Success on 1st try */ \
11429  } \
11430  else if (!garbage_collect_with_gvl(objspace, gpr)) { \
11431  /* @shyouhei thinks this doesn't happen */ \
11432  rb_bug("TRY_WITH_GC: could not GC"); \
11433  } \
11434  else if ((expr)) { \
11435  /* Success on 2nd try */ \
11436  } \
11437  else { \
11438  rb_bug("TRY_WITH_GC: could not allocate:" \
11439  "%"PRIdSIZE" bytes for %s", \
11440  siz, # expr); \
11441  } \
11442  } while (0)
11443 #else
11444 #define TRY_WITH_GC(siz, alloc) do { \
11445  objspace_malloc_gc_stress(objspace); \
11446  if (!(alloc) && \
11447  (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
11448  GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
11449  GPR_FLAG_MALLOC) || \
11450  !(alloc))) { \
11451  ruby_memerror(); \
11452  } \
11453  } while (0)
11454 #endif
11455 
11456 /* these shouldn't be called directly.
11457  * objspace_* functions do not check allocation size.
11458  */
11459 static void *
11460 objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
11461 {
11462  void *mem;
11463 
11464  size = objspace_malloc_prepare(objspace, size);
11465  TRY_WITH_GC(size, mem = malloc(size));
11466  RB_DEBUG_COUNTER_INC(heap_xmalloc);
11467  return objspace_malloc_fixup(objspace, mem, size);
11468 }
11469 
11470 static inline size_t
11471 xmalloc2_size(const size_t count, const size_t elsize)
11472 {
11473  return size_mul_or_raise(count, elsize, rb_eArgError);
11474 }
11475 
11476 static void *
11477 objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
11478 {
11479  void *mem;
11480 
11481  if (!ptr) return objspace_xmalloc0(objspace, new_size);
11482 
11483  /*
11484  * The behavior of realloc(ptr, 0) is implementation defined.
11485  * Therefore we don't use realloc(ptr, 0) for portability reason.
11486  * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
11487  */
11488  if (new_size == 0) {
11489  if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
11490  /*
11491  * - OpenBSD's malloc(3) man page says that when 0 is passed, it
11492  * returns a non-NULL pointer to an access-protected memory page.
11493  * The returned pointer cannot be read / written at all, but
11494  * still be a valid argument of free().
11495  *
11496  * https://man.openbsd.org/malloc.3
11497  *
11498  * - Linux's malloc(3) man page says that it _might_ perhaps return
11499  * a non-NULL pointer when its argument is 0. That return value
11500  * is safe (and is expected) to be passed to free().
11501  *
11502  * http://man7.org/linux/man-pages/man3/malloc.3.html
11503  *
11504  * - As I read the implementation jemalloc's malloc() returns fully
11505  * normal 16 bytes memory region when its argument is 0.
11506  *
11507  * - As I read the implementation musl libc's malloc() returns
11508  * fully normal 32 bytes memory region when its argument is 0.
11509  *
11510  * - Other malloc implementations can also return non-NULL.
11511  */
11512  objspace_xfree(objspace, ptr, old_size);
11513  return mem;
11514  }
11515  else {
11516  /*
11517  * It is dangerous to return NULL here, because that could lead to
11518  * RCE. Fallback to 1 byte instead of zero.
11519  *
11520  * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
11521  */
11522  new_size = 1;
11523  }
11524  }
11525 
11526 #if CALC_EXACT_MALLOC_SIZE
11527  {
11528  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
11529  new_size += sizeof(struct malloc_obj_info);
11530  ptr = info;
11531  old_size = info->size;
11532  }
11533 #endif
11534 
11535  old_size = objspace_malloc_size(objspace, ptr, old_size);
11536  TRY_WITH_GC(new_size, mem = realloc(ptr, new_size));
11537  new_size = objspace_malloc_size(objspace, mem, new_size);
11538 
11539 #if CALC_EXACT_MALLOC_SIZE
11540  {
11541  struct malloc_obj_info *info = mem;
11542  info->size = new_size;
11543  mem = info + 1;
11544  }
11545 #endif
11546 
11547  objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
11548 
11549  RB_DEBUG_COUNTER_INC(heap_xrealloc);
11550  return mem;
11551 }
11552 
11553 #if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
11554 
11555 #define MALLOC_INFO_GEN_SIZE 100
11556 #define MALLOC_INFO_SIZE_SIZE 10
11557 static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
11558 static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
11559 static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
11560 static st_table *malloc_info_file_table;
11561 
11562 static int
11563 mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
11564 {
11565  const char *file = (void *)key;
11566  const size_t *data = (void *)val;
11567 
11568  fprintf(stderr, "%s\t%"PRIdSIZE"\t%"PRIdSIZE"\n", file, data[0], data[1]);
11569 
11570  return ST_CONTINUE;
11571 }
11572 
11573 __attribute__((destructor))
11574 void
11575 rb_malloc_info_show_results(void)
11576 {
11577  int i;
11578 
11579  fprintf(stderr, "* malloc_info gen statistics\n");
11580  for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
11581  if (i == MALLOC_INFO_GEN_SIZE-1) {
11582  fprintf(stderr, "more\t%"PRIdSIZE"\t%"PRIdSIZE"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
11583  }
11584  else {
11585  fprintf(stderr, "%d\t%"PRIdSIZE"\t%"PRIdSIZE"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
11586  }
11587  }
11588 
11589  fprintf(stderr, "* malloc_info size statistics\n");
11590  for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
11591  int s = 16 << i;
11592  fprintf(stderr, "%d\t%"PRIdSIZE"\n", s, malloc_info_size[i]);
11593  }
11594  fprintf(stderr, "more\t%"PRIdSIZE"\n", malloc_info_size[i]);
11595 
11596  if (malloc_info_file_table) {
11597  fprintf(stderr, "* malloc_info file statistics\n");
11598  st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
11599  }
11600 }
11601 #else
11602 void
11603 rb_malloc_info_show_results(void)
11604 {
11605 }
11606 #endif
11607 
11608 static void
11609 objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
11610 {
11611  if (!ptr) {
11612  /*
11613  * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
11614  * its first version. We would better follow.
11615  */
11616  return;
11617  }
11618 #if CALC_EXACT_MALLOC_SIZE
11619  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
11620  ptr = info;
11621  old_size = info->size;
11622 
11623 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11624  {
11625  int gen = (int)(objspace->profile.count - info->gen);
11626  int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
11627  int i;
11628 
11629  malloc_info_gen_cnt[gen_index]++;
11630  malloc_info_gen_size[gen_index] += info->size;
11631 
11632  for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
11633  size_t s = 16 << i;
11634  if (info->size <= s) {
11635  malloc_info_size[i]++;
11636  goto found;
11637  }
11638  }
11639  malloc_info_size[i]++;
11640  found:;
11641 
11642  {
11643  st_data_t key = (st_data_t)info->file, d;
11644  size_t *data;
11645 
11646  if (malloc_info_file_table == NULL) {
11647  malloc_info_file_table = st_init_numtable_with_size(1024);
11648  }
11649  if (st_lookup(malloc_info_file_table, key, &d)) {
11650  /* hit */
11651  data = (size_t *)d;
11652  }
11653  else {
11654  data = malloc(xmalloc2_size(2, sizeof(size_t)));
11655  if (data == NULL) rb_bug("objspace_xfree: can not allocate memory");
11656  data[0] = data[1] = 0;
11657  st_insert(malloc_info_file_table, key, (st_data_t)data);
11658  }
11659  data[0] ++;
11660  data[1] += info->size;
11661  };
11662  if (0 && gen >= 2) { /* verbose output */
11663  if (info->file) {
11664  fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d, pos: %s:%"PRIdSIZE"\n",
11665  info->size, gen, info->file, info->line);
11666  }
11667  else {
11668  fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d\n",
11669  info->size, gen);
11670  }
11671  }
11672  }
11673 #endif
11674 #endif
11675  old_size = objspace_malloc_size(objspace, ptr, old_size);
11676 
11677  objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
11678  free(ptr);
11679  RB_DEBUG_COUNTER_INC(heap_xfree);
11680  }
11681 }
11682 
11683 static void *
11684 ruby_xmalloc0(size_t size)
11685 {
11686  return objspace_xmalloc0(&rb_objspace, size);
11687 }
11688 
11689 void *
11690 ruby_xmalloc_body(size_t size)
11691 {
11692  if ((ssize_t)size < 0) {
11693  negative_size_allocation_error("too large allocation size");
11694  }
11695  return ruby_xmalloc0(size);
11696 }
11697 
11698 void
11699 ruby_malloc_size_overflow(size_t count, size_t elsize)
11700 {
11702  "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
11703  count, elsize);
11704 }
11705 
11706 void *
11707 ruby_xmalloc2_body(size_t n, size_t size)
11708 {
11709  return objspace_xmalloc0(&rb_objspace, xmalloc2_size(n, size));
11710 }
11711 
11712 static void *
11713 objspace_xcalloc(rb_objspace_t *objspace, size_t size)
11714 {
11715  void *mem;
11716 
11717  size = objspace_malloc_prepare(objspace, size);
11718  TRY_WITH_GC(size, mem = calloc1(size));
11719  return objspace_malloc_fixup(objspace, mem, size);
11720 }
11721 
11722 void *
11723 ruby_xcalloc_body(size_t n, size_t size)
11724 {
11725  return objspace_xcalloc(&rb_objspace, xmalloc2_size(n, size));
11726 }
11727 
11728 #ifdef ruby_sized_xrealloc
11729 #undef ruby_sized_xrealloc
11730 #endif
11731 void *
11732 ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
11733 {
11734  if ((ssize_t)new_size < 0) {
11735  negative_size_allocation_error("too large allocation size");
11736  }
11737 
11738  return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
11739 }
11740 
11741 void *
11742 ruby_xrealloc_body(void *ptr, size_t new_size)
11743 {
11744  return ruby_sized_xrealloc(ptr, new_size, 0);
11745 }
11746 
11747 #ifdef ruby_sized_xrealloc2
11748 #undef ruby_sized_xrealloc2
11749 #endif
11750 void *
11751 ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
11752 {
11753  size_t len = xmalloc2_size(n, size);
11754  return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
11755 }
11756 
11757 void *
11758 ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
11759 {
11760  return ruby_sized_xrealloc2(ptr, n, size, 0);
11761 }
11762 
11763 #ifdef ruby_sized_xfree
11764 #undef ruby_sized_xfree
11765 #endif
11766 void
11767 ruby_sized_xfree(void *x, size_t size)
11768 {
11769  if (x) {
11770  objspace_xfree(&rb_objspace, x, size);
11771  }
11772 }
11773 
11774 void
11775 ruby_xfree(void *x)
11776 {
11777  ruby_sized_xfree(x, 0);
11778 }
11779 
11780 void *
11781 rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
11782 {
11783  size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
11784  return ruby_xmalloc(w);
11785 }
11786 
11787 void *
11788 rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
11789 {
11790  size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
11791  return ruby_xrealloc((void *)p, w);
11792 }
11793 
11794 void *
11795 rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
11796 {
11797  size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
11798  return ruby_xmalloc(u);
11799 }
11800 
11801 void *
11802 rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
11803 {
11804  size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
11805  return ruby_xcalloc(u, 1);
11806 }
11807 
11808 /* Mimic ruby_xmalloc, but need not rb_objspace.
11809  * should return pointer suitable for ruby_xfree
11810  */
11811 void *
11812 ruby_mimmalloc(size_t size)
11813 {
11814  void *mem;
11815 #if CALC_EXACT_MALLOC_SIZE
11816  size += sizeof(struct malloc_obj_info);
11817 #endif
11818  mem = malloc(size);
11819 #if CALC_EXACT_MALLOC_SIZE
11820  if (!mem) {
11821  return NULL;
11822  }
11823  else
11824  /* set 0 for consistency of allocated_size/allocations */
11825  {
11826  struct malloc_obj_info *info = mem;
11827  info->size = 0;
11828 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11829  info->gen = 0;
11830  info->file = NULL;
11831  info->line = 0;
11832 #endif
11833  mem = info + 1;
11834  }
11835 #endif
11836  return mem;
11837 }
11838 
11839 void
11840 ruby_mimfree(void *ptr)
11841 {
11842 #if CALC_EXACT_MALLOC_SIZE
11843  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
11844  ptr = info;
11845 #endif
11846  free(ptr);
11847 }
11848 
11849 void *
11850 rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
11851 {
11852  void *ptr;
11853  VALUE imemo;
11854  rb_imemo_tmpbuf_t *tmpbuf;
11855 
11856  /* Keep the order; allocate an empty imemo first then xmalloc, to
11857  * get rid of potential memory leak */
11858  imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
11859  *store = imemo;
11860  ptr = ruby_xmalloc0(size);
11861  tmpbuf = (rb_imemo_tmpbuf_t *)imemo;
11862  tmpbuf->ptr = ptr;
11863  tmpbuf->cnt = cnt;
11864  return ptr;
11865 }
11866 
11867 void *
11868 rb_alloc_tmp_buffer(volatile VALUE *store, long len)
11869 {
11870  long cnt;
11871 
11872  if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
11873  rb_raise(rb_eArgError, "negative buffer size (or size too big)");
11874  }
11875 
11876  return rb_alloc_tmp_buffer_with_count(store, len, cnt);
11877 }
11878 
11879 void
11880 rb_free_tmp_buffer(volatile VALUE *store)
11881 {
11882  rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
11883  if (s) {
11884  void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
11885  s->cnt = 0;
11886  ruby_xfree(ptr);
11887  }
11888 }
11889 
11890 #if MALLOC_ALLOCATED_SIZE
11891 /*
11892  * call-seq:
11893  * GC.malloc_allocated_size -> Integer
11894  *
11895  * Returns the size of memory allocated by malloc().
11896  *
11897  * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
11898  */
11899 
11900 static VALUE
11901 gc_malloc_allocated_size(VALUE self)
11902 {
11903  return UINT2NUM(rb_objspace.malloc_params.allocated_size);
11904 }
11905 
11906 /*
11907  * call-seq:
11908  * GC.malloc_allocations -> Integer
11909  *
11910  * Returns the number of malloc() allocations.
11911  *
11912  * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
11913  */
11914 
11915 static VALUE
11916 gc_malloc_allocations(VALUE self)
11917 {
11918  return UINT2NUM(rb_objspace.malloc_params.allocations);
11919 }
11920 #endif
11921 
11922 void
11924 {
11925  rb_objspace_t *objspace = &rb_objspace;
11926  if (diff > 0) {
11927  objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
11928  }
11929  else if (diff < 0) {
11930  objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
11931  }
11932 }
11933 
11934 /*
11935  ------------------------------ WeakMap ------------------------------
11936 */
11937 
11938 struct weakmap {
11939  st_table *obj2wmap; /* obj -> [ref,...] */
11940  st_table *wmap2obj; /* ref -> obj */
11941  VALUE final;
11942 };
11943 
11944 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
11945 
11946 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
11947 static int
11948 wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
11949 {
11950  rb_objspace_t *objspace = (rb_objspace_t *)arg;
11951  VALUE obj = (VALUE)val;
11952  if (!is_live_object(objspace, obj)) return ST_DELETE;
11953  return ST_CONTINUE;
11954 }
11955 #endif
11956 
11957 static int
11958 wmap_replace_ref(st_data_t *key, st_data_t *value, st_data_t _argp, int existing)
11959 {
11960  *key = rb_gc_location((VALUE)*key);
11961 
11962  VALUE *values = (VALUE *)*value;
11963  VALUE size = values[0];
11964 
11965  for (VALUE index = 1; index <= size; index++) {
11966  values[index] = rb_gc_location(values[index]);
11967  }
11968 
11969  return ST_CONTINUE;
11970 }
11971 
11972 static int
11973 wmap_foreach_replace(st_data_t key, st_data_t value, st_data_t _argp, int error)
11974 {
11975  if (rb_gc_location((VALUE)key) != (VALUE)key) {
11976  return ST_REPLACE;
11977  }
11978 
11979  VALUE *values = (VALUE *)value;
11980  VALUE size = values[0];
11981 
11982  for (VALUE index = 1; index <= size; index++) {
11983  VALUE val = values[index];
11984  if (rb_gc_location(val) != val) {
11985  return ST_REPLACE;
11986  }
11987  }
11988 
11989  return ST_CONTINUE;
11990 }
11991 
11992 static void
11993 wmap_compact(void *ptr)
11994 {
11995  struct weakmap *w = ptr;
11996  if (w->wmap2obj) rb_gc_update_tbl_refs(w->wmap2obj);
11997  if (w->obj2wmap) st_foreach_with_replace(w->obj2wmap, wmap_foreach_replace, wmap_replace_ref, (st_data_t)NULL);
11998  w->final = rb_gc_location(w->final);
11999 }
12000 
12001 static void
12002 wmap_mark(void *ptr)
12003 {
12004  struct weakmap *w = ptr;
12005 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
12006  if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
12007 #endif
12008  rb_gc_mark_movable(w->final);
12009 }
12010 
12011 static int
12012 wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
12013 {
12014  VALUE *ptr = (VALUE *)val;
12015  ruby_sized_xfree(ptr, (ptr[0] + 1) * sizeof(VALUE));
12016  return ST_CONTINUE;
12017 }
12018 
12019 static void
12020 wmap_free(void *ptr)
12021 {
12022  struct weakmap *w = ptr;
12023  st_foreach(w->obj2wmap, wmap_free_map, 0);
12024  st_free_table(w->obj2wmap);
12025  st_free_table(w->wmap2obj);
12026  xfree(w);
12027 }
12028 
12029 static int
12030 wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
12031 {
12032  VALUE *ptr = (VALUE *)val;
12033  *(size_t *)arg += (ptr[0] + 1) * sizeof(VALUE);
12034  return ST_CONTINUE;
12035 }
12036 
12037 static size_t
12038 wmap_memsize(const void *ptr)
12039 {
12040  size_t size;
12041  const struct weakmap *w = ptr;
12042  size = sizeof(*w);
12043  size += st_memsize(w->obj2wmap);
12044  size += st_memsize(w->wmap2obj);
12045  st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
12046  return size;
12047 }
12048 
12049 static const rb_data_type_t weakmap_type = {
12050  "weakmap",
12051  {
12052  wmap_mark,
12053  wmap_free,
12054  wmap_memsize,
12055  wmap_compact,
12056  },
12057  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
12058 };
12059 
12060 static VALUE wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self));
12061 
12062 static VALUE
12063 wmap_allocate(VALUE klass)
12064 {
12065  struct weakmap *w;
12066  VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
12067  w->obj2wmap = rb_init_identtable();
12068  w->wmap2obj = rb_init_identtable();
12069  w->final = rb_func_lambda_new(wmap_finalize, obj, 1, 1);
12070  return obj;
12071 }
12072 
12073 static int
12074 wmap_live_p(rb_objspace_t *objspace, VALUE obj)
12075 {
12076  if (SPECIAL_CONST_P(obj)) return TRUE;
12077  /* If is_pointer_to_heap returns false, the page could be in the tomb heap
12078  * or have already been freed. */
12079  if (!is_pointer_to_heap(objspace, (void *)obj)) return FALSE;
12080 
12081  void *poisoned = asan_unpoison_object_temporary(obj);
12082 
12083  enum ruby_value_type t = BUILTIN_TYPE(obj);
12084  int ret = (!(t == T_NONE || t >= T_FIXNUM || t == T_ICLASS) &&
12085  is_live_object(objspace, obj));
12086 
12087  if (poisoned) {
12088  asan_poison_object(obj);
12089  }
12090 
12091  return ret;
12092 }
12093 
12094 static int
12095 wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
12096 {
12097  VALUE wmap, *ptr, size, i, j;
12098  if (!existing) return ST_STOP;
12099  wmap = (VALUE)arg, ptr = (VALUE *)*value;
12100  for (i = j = 1, size = ptr[0]; i <= size; ++i) {
12101  if (ptr[i] != wmap) {
12102  ptr[j++] = ptr[i];
12103  }
12104  }
12105  if (j == 1) {
12106  ruby_sized_xfree(ptr, i * sizeof(VALUE));
12107  return ST_DELETE;
12108  }
12109  if (j < i) {
12110  SIZED_REALLOC_N(ptr, VALUE, j, i);
12111  ptr[0] = j - 1;
12112  *value = (st_data_t)ptr;
12113  }
12114  return ST_CONTINUE;
12115 }
12116 
12117 /* :nodoc: */
12118 static VALUE
12119 wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self))
12120 {
12121  st_data_t orig, wmap, data;
12122  VALUE obj, *rids, i, size;
12123  struct weakmap *w;
12124 
12125  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12126  /* Get reference from object id. */
12127  if ((obj = id2ref_obj_tbl(&rb_objspace, objid)) == Qundef) {
12128  rb_bug("wmap_finalize: objid is not found.");
12129  }
12130 
12131  /* obj is original referenced object and/or weak reference. */
12132  orig = (st_data_t)obj;
12133  if (st_delete(w->obj2wmap, &orig, &data)) {
12134  rids = (VALUE *)data;
12135  size = *rids++;
12136  for (i = 0; i < size; ++i) {
12137  wmap = (st_data_t)rids[i];
12138  st_delete(w->wmap2obj, &wmap, NULL);
12139  }
12140  ruby_sized_xfree((VALUE *)data, (size + 1) * sizeof(VALUE));
12141  }
12142 
12143  wmap = (st_data_t)obj;
12144  if (st_delete(w->wmap2obj, &wmap, &orig)) {
12145  wmap = (st_data_t)obj;
12146  st_update(w->obj2wmap, orig, wmap_final_func, wmap);
12147  }
12148  return self;
12149 }
12150 
12152  rb_objspace_t *objspace;
12153  VALUE value;
12154 };
12155 
12156 static VALUE
12157 wmap_inspect_append(rb_objspace_t *objspace, VALUE str, VALUE obj)
12158 {
12159  if (SPECIAL_CONST_P(obj)) {
12160  return rb_str_append(str, rb_inspect(obj));
12161  }
12162  else if (wmap_live_p(objspace, obj)) {
12163  return rb_str_append(str, rb_any_to_s(obj));
12164  }
12165  else {
12166  return rb_str_catf(str, "#<collected:%p>", (void*)obj);
12167  }
12168 }
12169 
12170 static int
12171 wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
12172 {
12173  struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
12174  rb_objspace_t *objspace = argp->objspace;
12175  VALUE str = argp->value;
12176  VALUE k = (VALUE)key, v = (VALUE)val;
12177 
12178  if (RSTRING_PTR(str)[0] == '#') {
12179  rb_str_cat2(str, ", ");
12180  }
12181  else {
12182  rb_str_cat2(str, ": ");
12183  RSTRING_PTR(str)[0] = '#';
12184  }
12185  wmap_inspect_append(objspace, str, k);
12186  rb_str_cat2(str, " => ");
12187  wmap_inspect_append(objspace, str, v);
12188 
12189  return ST_CONTINUE;
12190 }
12191 
12192 static VALUE
12193 wmap_inspect(VALUE self)
12194 {
12195  VALUE str;
12196  VALUE c = rb_class_name(CLASS_OF(self));
12197  struct weakmap *w;
12198  struct wmap_iter_arg args;
12199 
12200  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12201  str = rb_sprintf("-<%"PRIsVALUE":%p", c, (void *)self);
12202  if (w->wmap2obj) {
12203  args.objspace = &rb_objspace;
12204  args.value = str;
12205  st_foreach(w->wmap2obj, wmap_inspect_i, (st_data_t)&args);
12206  }
12207  RSTRING_PTR(str)[0] = '#';
12208  rb_str_cat2(str, ">");
12209  return str;
12210 }
12211 
12212 static int
12213 wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
12214 {
12215  rb_objspace_t *objspace = (rb_objspace_t *)arg;
12216  VALUE obj = (VALUE)val;
12217  if (wmap_live_p(objspace, obj)) {
12218  rb_yield_values(2, (VALUE)key, obj);
12219  }
12220  return ST_CONTINUE;
12221 }
12222 
12223 /* Iterates over keys and objects in a weakly referenced object */
12224 static VALUE
12225 wmap_each(VALUE self)
12226 {
12227  struct weakmap *w;
12228  rb_objspace_t *objspace = &rb_objspace;
12229 
12230  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12231  st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
12232  return self;
12233 }
12234 
12235 static int
12236 wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
12237 {
12238  rb_objspace_t *objspace = (rb_objspace_t *)arg;
12239  VALUE obj = (VALUE)val;
12240  if (wmap_live_p(objspace, obj)) {
12241  rb_yield((VALUE)key);
12242  }
12243  return ST_CONTINUE;
12244 }
12245 
12246 /* Iterates over keys and objects in a weakly referenced object */
12247 static VALUE
12248 wmap_each_key(VALUE self)
12249 {
12250  struct weakmap *w;
12251  rb_objspace_t *objspace = &rb_objspace;
12252 
12253  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12254  st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
12255  return self;
12256 }
12257 
12258 static int
12259 wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
12260 {
12261  rb_objspace_t *objspace = (rb_objspace_t *)arg;
12262  VALUE obj = (VALUE)val;
12263  if (wmap_live_p(objspace, obj)) {
12264  rb_yield(obj);
12265  }
12266  return ST_CONTINUE;
12267 }
12268 
12269 /* Iterates over keys and objects in a weakly referenced object */
12270 static VALUE
12271 wmap_each_value(VALUE self)
12272 {
12273  struct weakmap *w;
12274  rb_objspace_t *objspace = &rb_objspace;
12275 
12276  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12277  st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
12278  return self;
12279 }
12280 
12281 static int
12282 wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
12283 {
12284  struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
12285  rb_objspace_t *objspace = argp->objspace;
12286  VALUE ary = argp->value;
12287  VALUE obj = (VALUE)val;
12288  if (wmap_live_p(objspace, obj)) {
12289  rb_ary_push(ary, (VALUE)key);
12290  }
12291  return ST_CONTINUE;
12292 }
12293 
12294 /* Iterates over keys and objects in a weakly referenced object */
12295 static VALUE
12296 wmap_keys(VALUE self)
12297 {
12298  struct weakmap *w;
12299  struct wmap_iter_arg args;
12300 
12301  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12302  args.objspace = &rb_objspace;
12303  args.value = rb_ary_new();
12304  st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
12305  return args.value;
12306 }
12307 
12308 static int
12309 wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
12310 {
12311  struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
12312  rb_objspace_t *objspace = argp->objspace;
12313  VALUE ary = argp->value;
12314  VALUE obj = (VALUE)val;
12315  if (wmap_live_p(objspace, obj)) {
12316  rb_ary_push(ary, obj);
12317  }
12318  return ST_CONTINUE;
12319 }
12320 
12321 /* Iterates over values and objects in a weakly referenced object */
12322 static VALUE
12323 wmap_values(VALUE self)
12324 {
12325  struct weakmap *w;
12326  struct wmap_iter_arg args;
12327 
12328  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12329  args.objspace = &rb_objspace;
12330  args.value = rb_ary_new();
12331  st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
12332  return args.value;
12333 }
12334 
12335 static int
12336 wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
12337 {
12338  VALUE size, *ptr, *optr;
12339  if (existing) {
12340  size = (ptr = optr = (VALUE *)*val)[0];
12341  ++size;
12342  SIZED_REALLOC_N(ptr, VALUE, size + 1, size);
12343  }
12344  else {
12345  optr = 0;
12346  size = 1;
12347  ptr = ruby_xmalloc0(2 * sizeof(VALUE));
12348  }
12349  ptr[0] = size;
12350  ptr[size] = (VALUE)arg;
12351  if (ptr == optr) return ST_STOP;
12352  *val = (st_data_t)ptr;
12353  return ST_CONTINUE;
12354 }
12355 
12356 /* Creates a weak reference from the given key to the given value */
12357 static VALUE
12358 wmap_aset(VALUE self, VALUE key, VALUE value)
12359 {
12360  struct weakmap *w;
12361 
12362  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12363  if (FL_ABLE(value)) {
12364  define_final0(value, w->final);
12365  }
12366  if (FL_ABLE(key)) {
12367  define_final0(key, w->final);
12368  }
12369 
12370  st_update(w->obj2wmap, (st_data_t)value, wmap_aset_update, key);
12371  st_insert(w->wmap2obj, (st_data_t)key, (st_data_t)value);
12372  return nonspecial_obj_id(value);
12373 }
12374 
12375 /* Retrieves a weakly referenced object with the given key */
12376 static VALUE
12377 wmap_lookup(VALUE self, VALUE key)
12378 {
12379  st_data_t data;
12380  VALUE obj;
12381  struct weakmap *w;
12382  rb_objspace_t *objspace = &rb_objspace;
12383 
12384  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12385  if (!st_lookup(w->wmap2obj, (st_data_t)key, &data)) return Qundef;
12386  obj = (VALUE)data;
12387  if (!wmap_live_p(objspace, obj)) return Qundef;
12388  return obj;
12389 }
12390 
12391 /* Retrieves a weakly referenced object with the given key */
12392 static VALUE
12393 wmap_aref(VALUE self, VALUE key)
12394 {
12395  VALUE obj = wmap_lookup(self, key);
12396  return obj != Qundef ? obj : Qnil;
12397 }
12398 
12399 /* Returns +true+ if +key+ is registered */
12400 static VALUE
12401 wmap_has_key(VALUE self, VALUE key)
12402 {
12403  return wmap_lookup(self, key) == Qundef ? Qfalse : Qtrue;
12404 }
12405 
12406 /* Returns the number of referenced objects */
12407 static VALUE
12408 wmap_size(VALUE self)
12409 {
12410  struct weakmap *w;
12411  st_index_t n;
12412 
12413  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12414  n = w->wmap2obj->num_entries;
12415 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
12416  return ULONG2NUM(n);
12417 #else
12418  return ULL2NUM(n);
12419 #endif
12420 }
12421 
12422 /*
12423  ------------------------------ GC profiler ------------------------------
12424 */
12425 
12426 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
12427 
12428 static bool
12429 current_process_time(struct timespec *ts)
12430 {
12431 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
12432  {
12433  static int try_clock_gettime = 1;
12434  if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
12435  return true;
12436  }
12437  else {
12438  try_clock_gettime = 0;
12439  }
12440  }
12441 #endif
12442 
12443 #ifdef RUSAGE_SELF
12444  {
12445  struct rusage usage;
12446  struct timeval time;
12447  if (getrusage(RUSAGE_SELF, &usage) == 0) {
12448  time = usage.ru_utime;
12449  ts->tv_sec = time.tv_sec;
12450  ts->tv_nsec = (int32_t)time.tv_usec * 1000;
12451  return true;
12452  }
12453  }
12454 #endif
12455 
12456 #ifdef _WIN32
12457  {
12458  FILETIME creation_time, exit_time, kernel_time, user_time;
12459  ULARGE_INTEGER ui;
12460 
12461  if (GetProcessTimes(GetCurrentProcess(),
12462  &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
12463  memcpy(&ui, &user_time, sizeof(FILETIME));
12464 #define PER100NSEC (uint64_t)(1000 * 1000 * 10)
12465  ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
12466  ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
12467  return true;
12468  }
12469  }
12470 #endif
12471 
12472  return false;
12473 }
12474 
12475 static double
12476 getrusage_time(void)
12477 {
12478  struct timespec ts;
12479  if (current_process_time(&ts)) {
12480  return ts.tv_sec + ts.tv_nsec * 1e-9;
12481  }
12482  else {
12483  return 0.0;
12484  }
12485 }
12486 
12487 
12488 static inline void
12489 gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason)
12490 {
12491  if (objspace->profile.run) {
12492  size_t index = objspace->profile.next_index;
12493  gc_profile_record *record;
12494 
12495  /* create new record */
12496  objspace->profile.next_index++;
12497 
12498  if (!objspace->profile.records) {
12499  objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
12500  objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
12501  }
12502  if (index >= objspace->profile.size) {
12503  void *ptr;
12504  objspace->profile.size += 1000;
12505  ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
12506  if (!ptr) rb_memerror();
12507  objspace->profile.records = ptr;
12508  }
12509  if (!objspace->profile.records) {
12510  rb_bug("gc_profile malloc or realloc miss");
12511  }
12512  record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
12513  MEMZERO(record, gc_profile_record, 1);
12514 
12515  /* setup before-GC parameter */
12516  record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
12517 #if MALLOC_ALLOCATED_SIZE
12518  record->allocated_size = malloc_allocated_size;
12519 #endif
12520 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
12521 #ifdef RUSAGE_SELF
12522  {
12523  struct rusage usage;
12524  if (getrusage(RUSAGE_SELF, &usage) == 0) {
12525  record->maxrss = usage.ru_maxrss;
12526  record->minflt = usage.ru_minflt;
12527  record->majflt = usage.ru_majflt;
12528  }
12529  }
12530 #endif
12531 #endif
12532  }
12533 }
12534 
12535 static inline void
12536 gc_prof_timer_start(rb_objspace_t *objspace)
12537 {
12538  if (gc_prof_enabled(objspace)) {
12539  gc_profile_record *record = gc_prof_record(objspace);
12540 #if GC_PROFILE_MORE_DETAIL
12541  record->prepare_time = objspace->profile.prepare_time;
12542 #endif
12543  record->gc_time = 0;
12544  record->gc_invoke_time = getrusage_time();
12545  }
12546 }
12547 
12548 static double
12549 elapsed_time_from(double time)
12550 {
12551  double now = getrusage_time();
12552  if (now > time) {
12553  return now - time;
12554  }
12555  else {
12556  return 0;
12557  }
12558 }
12559 
12560 static inline void
12561 gc_prof_timer_stop(rb_objspace_t *objspace)
12562 {
12563  if (gc_prof_enabled(objspace)) {
12564  gc_profile_record *record = gc_prof_record(objspace);
12565  record->gc_time = elapsed_time_from(record->gc_invoke_time);
12566  record->gc_invoke_time -= objspace->profile.invoke_time;
12567  }
12568 }
12569 
12570 #define RUBY_DTRACE_GC_HOOK(name) \
12571  do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
12572 static inline void
12573 gc_prof_mark_timer_start(rb_objspace_t *objspace)
12574 {
12575  RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
12576 #if GC_PROFILE_MORE_DETAIL
12577  if (gc_prof_enabled(objspace)) {
12578  gc_prof_record(objspace)->gc_mark_time = getrusage_time();
12579  }
12580 #endif
12581 }
12582 
12583 static inline void
12584 gc_prof_mark_timer_stop(rb_objspace_t *objspace)
12585 {
12586  RUBY_DTRACE_GC_HOOK(MARK_END);
12587 #if GC_PROFILE_MORE_DETAIL
12588  if (gc_prof_enabled(objspace)) {
12589  gc_profile_record *record = gc_prof_record(objspace);
12590  record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
12591  }
12592 #endif
12593 }
12594 
12595 static inline void
12596 gc_prof_sweep_timer_start(rb_objspace_t *objspace)
12597 {
12598  RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
12599  if (gc_prof_enabled(objspace)) {
12600  gc_profile_record *record = gc_prof_record(objspace);
12601 
12602  if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
12603  objspace->profile.gc_sweep_start_time = getrusage_time();
12604  }
12605  }
12606 }
12607 
12608 static inline void
12609 gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
12610 {
12611  RUBY_DTRACE_GC_HOOK(SWEEP_END);
12612 
12613  if (gc_prof_enabled(objspace)) {
12614  double sweep_time;
12615  gc_profile_record *record = gc_prof_record(objspace);
12616 
12617  if (record->gc_time > 0) {
12618  sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
12619  /* need to accumulate GC time for lazy sweep after gc() */
12620  record->gc_time += sweep_time;
12621  }
12622  else if (GC_PROFILE_MORE_DETAIL) {
12623  sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
12624  }
12625 
12626 #if GC_PROFILE_MORE_DETAIL
12627  record->gc_sweep_time += sweep_time;
12628  if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
12629 #endif
12630  if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
12631  }
12632 }
12633 
12634 static inline void
12635 gc_prof_set_malloc_info(rb_objspace_t *objspace)
12636 {
12637 #if GC_PROFILE_MORE_DETAIL
12638  if (gc_prof_enabled(objspace)) {
12639  gc_profile_record *record = gc_prof_record(objspace);
12640  record->allocate_increase = malloc_increase;
12641  record->allocate_limit = malloc_limit;
12642  }
12643 #endif
12644 }
12645 
12646 static inline void
12647 gc_prof_set_heap_info(rb_objspace_t *objspace)
12648 {
12649  if (gc_prof_enabled(objspace)) {
12650  gc_profile_record *record = gc_prof_record(objspace);
12651  size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
12652  size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
12653 
12654 #if GC_PROFILE_MORE_DETAIL
12655  record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
12656  record->heap_live_objects = live;
12657  record->heap_free_objects = total - live;
12658 #endif
12659 
12660  record->heap_total_objects = total;
12661  record->heap_use_size = live * sizeof(RVALUE);
12662  record->heap_total_size = total * sizeof(RVALUE);
12663  }
12664 }
12665 
12666 /*
12667  * call-seq:
12668  * GC::Profiler.clear -> nil
12669  *
12670  * Clears the GC profiler data.
12671  *
12672  */
12673 
12674 static VALUE
12675 gc_profile_clear(VALUE _)
12676 {
12677  rb_objspace_t *objspace = &rb_objspace;
12678  void *p = objspace->profile.records;
12679  objspace->profile.records = NULL;
12680  objspace->profile.size = 0;
12681  objspace->profile.next_index = 0;
12682  objspace->profile.current_record = 0;
12683  if (p) {
12684  free(p);
12685  }
12686  return Qnil;
12687 }
12688 
12689 /*
12690  * call-seq:
12691  * GC::Profiler.raw_data -> [Hash, ...]
12692  *
12693  * Returns an Array of individual raw profile data Hashes ordered
12694  * from earliest to latest by +:GC_INVOKE_TIME+.
12695  *
12696  * For example:
12697  *
12698  * [
12699  * {
12700  * :GC_TIME=>1.3000000000000858e-05,
12701  * :GC_INVOKE_TIME=>0.010634999999999999,
12702  * :HEAP_USE_SIZE=>289640,
12703  * :HEAP_TOTAL_SIZE=>588960,
12704  * :HEAP_TOTAL_OBJECTS=>14724,
12705  * :GC_IS_MARKED=>false
12706  * },
12707  * # ...
12708  * ]
12709  *
12710  * The keys mean:
12711  *
12712  * +:GC_TIME+::
12713  * Time elapsed in seconds for this GC run
12714  * +:GC_INVOKE_TIME+::
12715  * Time elapsed in seconds from startup to when the GC was invoked
12716  * +:HEAP_USE_SIZE+::
12717  * Total bytes of heap used
12718  * +:HEAP_TOTAL_SIZE+::
12719  * Total size of heap in bytes
12720  * +:HEAP_TOTAL_OBJECTS+::
12721  * Total number of objects
12722  * +:GC_IS_MARKED+::
12723  * Returns +true+ if the GC is in mark phase
12724  *
12725  * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
12726  * to the following hash keys:
12727  *
12728  * +:GC_MARK_TIME+::
12729  * +:GC_SWEEP_TIME+::
12730  * +:ALLOCATE_INCREASE+::
12731  * +:ALLOCATE_LIMIT+::
12732  * +:HEAP_USE_PAGES+::
12733  * +:HEAP_LIVE_OBJECTS+::
12734  * +:HEAP_FREE_OBJECTS+::
12735  * +:HAVE_FINALIZE+::
12736  *
12737  */
12738 
12739 static VALUE
12740 gc_profile_record_get(VALUE _)
12741 {
12742  VALUE prof;
12743  VALUE gc_profile = rb_ary_new();
12744  size_t i;
12745  rb_objspace_t *objspace = (&rb_objspace);
12746 
12747  if (!objspace->profile.run) {
12748  return Qnil;
12749  }
12750 
12751  for (i =0; i < objspace->profile.next_index; i++) {
12752  gc_profile_record *record = &objspace->profile.records[i];
12753 
12754  prof = rb_hash_new();
12755  rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record->flags));
12756  rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
12757  rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
12758  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
12759  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
12760  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
12761  rb_hash_aset(prof, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record->moved_objects));
12762  rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
12763 #if GC_PROFILE_MORE_DETAIL
12764  rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
12765  rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
12766  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
12767  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
12768  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
12769  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
12770  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
12771 
12772  rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
12773  rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
12774 
12775  rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), RBOOL(record->flags & GPR_FLAG_HAVE_FINALIZE));
12776 #endif
12777 
12778 #if RGENGC_PROFILE > 0
12779  rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
12780  rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
12781  rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
12782 #endif
12783  rb_ary_push(gc_profile, prof);
12784  }
12785 
12786  return gc_profile;
12787 }
12788 
12789 #if GC_PROFILE_MORE_DETAIL
12790 #define MAJOR_REASON_MAX 0x10
12791 
12792 static char *
12793 gc_profile_dump_major_reason(unsigned int flags, char *buff)
12794 {
12795  unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
12796  int i = 0;
12797 
12798  if (reason == GPR_FLAG_NONE) {
12799  buff[0] = '-';
12800  buff[1] = 0;
12801  }
12802  else {
12803 #define C(x, s) \
12804  if (reason & GPR_FLAG_MAJOR_BY_##x) { \
12805  buff[i++] = #x[0]; \
12806  if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
12807  buff[i] = 0; \
12808  }
12809  C(NOFREE, N);
12810  C(OLDGEN, O);
12811  C(SHADY, S);
12812 #if RGENGC_ESTIMATE_OLDMALLOC
12813  C(OLDMALLOC, M);
12814 #endif
12815 #undef C
12816  }
12817  return buff;
12818 }
12819 #endif
12820 
12821 static void
12822 gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
12823 {
12824  rb_objspace_t *objspace = &rb_objspace;
12825  size_t count = objspace->profile.next_index;
12826 #ifdef MAJOR_REASON_MAX
12827  char reason_str[MAJOR_REASON_MAX];
12828 #endif
12829 
12830  if (objspace->profile.run && count /* > 1 */) {
12831  size_t i;
12832  const gc_profile_record *record;
12833 
12834  append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
12835  append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
12836 
12837  for (i = 0; i < count; i++) {
12838  record = &objspace->profile.records[i];
12839  append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
12840  i+1, record->gc_invoke_time, record->heap_use_size,
12841  record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
12842  }
12843 
12844 #if GC_PROFILE_MORE_DETAIL
12845  const char *str = "\n\n" \
12846  "More detail.\n" \
12847  "Prepare Time = Previously GC's rest sweep time\n"
12848  "Index Flags Allocate Inc. Allocate Limit"
12849 #if CALC_EXACT_MALLOC_SIZE
12850  " Allocated Size"
12851 #endif
12852  " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
12853 #if RGENGC_PROFILE
12854  " OldgenObj RemNormObj RemShadObj"
12855 #endif
12856 #if GC_PROFILE_DETAIL_MEMORY
12857  " MaxRSS(KB) MinorFLT MajorFLT"
12858 #endif
12859  "\n";
12860  append(out, rb_str_new_cstr(str));
12861 
12862  for (i = 0; i < count; i++) {
12863  record = &objspace->profile.records[i];
12864  append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
12865 #if CALC_EXACT_MALLOC_SIZE
12866  " %15"PRIuSIZE
12867 #endif
12868  " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
12869 #if RGENGC_PROFILE
12870  "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
12871 #endif
12872 #if GC_PROFILE_DETAIL_MEMORY
12873  "%11ld %8ld %8ld"
12874 #endif
12875 
12876  "\n",
12877  i+1,
12878  gc_profile_dump_major_reason(record->flags, reason_str),
12879  (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
12880  (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
12881  (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
12882  (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
12883  (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
12884  (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
12885  record->allocate_increase, record->allocate_limit,
12886 #if CALC_EXACT_MALLOC_SIZE
12887  record->allocated_size,
12888 #endif
12889  record->heap_use_pages,
12890  record->gc_mark_time*1000,
12891  record->gc_sweep_time*1000,
12892  record->prepare_time*1000,
12893 
12894  record->heap_live_objects,
12895  record->heap_free_objects,
12896  record->removing_objects,
12897  record->empty_objects
12898 #if RGENGC_PROFILE
12899  ,
12900  record->old_objects,
12901  record->remembered_normal_objects,
12902  record->remembered_shady_objects
12903 #endif
12904 #if GC_PROFILE_DETAIL_MEMORY
12905  ,
12906  record->maxrss / 1024,
12907  record->minflt,
12908  record->majflt
12909 #endif
12910 
12911  ));
12912  }
12913 #endif
12914  }
12915 }
12916 
12917 /*
12918  * call-seq:
12919  * GC::Profiler.result -> String
12920  *
12921  * Returns a profile data report such as:
12922  *
12923  * GC 1 invokes.
12924  * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
12925  * 1 0.012 159240 212940 10647 0.00000000000001530000
12926  */
12927 
12928 static VALUE
12929 gc_profile_result(VALUE _)
12930 {
12931  VALUE str = rb_str_buf_new(0);
12932  gc_profile_dump_on(str, rb_str_buf_append);
12933  return str;
12934 }
12935 
12936 /*
12937  * call-seq:
12938  * GC::Profiler.report
12939  * GC::Profiler.report(io)
12940  *
12941  * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
12942  *
12943  */
12944 
12945 static VALUE
12946 gc_profile_report(int argc, VALUE *argv, VALUE self)
12947 {
12948  VALUE out;
12949 
12950  out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
12951  gc_profile_dump_on(out, rb_io_write);
12952 
12953  return Qnil;
12954 }
12955 
12956 /*
12957  * call-seq:
12958  * GC::Profiler.total_time -> float
12959  *
12960  * The total time used for garbage collection in seconds
12961  */
12962 
12963 static VALUE
12964 gc_profile_total_time(VALUE self)
12965 {
12966  double time = 0;
12967  rb_objspace_t *objspace = &rb_objspace;
12968 
12969  if (objspace->profile.run && objspace->profile.next_index > 0) {
12970  size_t i;
12971  size_t count = objspace->profile.next_index;
12972 
12973  for (i = 0; i < count; i++) {
12974  time += objspace->profile.records[i].gc_time;
12975  }
12976  }
12977  return DBL2NUM(time);
12978 }
12979 
12980 /*
12981  * call-seq:
12982  * GC::Profiler.enabled? -> true or false
12983  *
12984  * The current status of GC profile mode.
12985  */
12986 
12987 static VALUE
12988 gc_profile_enable_get(VALUE self)
12989 {
12990  rb_objspace_t *objspace = &rb_objspace;
12991  return RBOOL(objspace->profile.run);
12992 }
12993 
12994 /*
12995  * call-seq:
12996  * GC::Profiler.enable -> nil
12997  *
12998  * Starts the GC profiler.
12999  *
13000  */
13001 
13002 static VALUE
13003 gc_profile_enable(VALUE _)
13004 {
13005  rb_objspace_t *objspace = &rb_objspace;
13006  objspace->profile.run = TRUE;
13007  objspace->profile.current_record = 0;
13008  return Qnil;
13009 }
13010 
13011 /*
13012  * call-seq:
13013  * GC::Profiler.disable -> nil
13014  *
13015  * Stops the GC profiler.
13016  *
13017  */
13018 
13019 static VALUE
13020 gc_profile_disable(VALUE _)
13021 {
13022  rb_objspace_t *objspace = &rb_objspace;
13023 
13024  objspace->profile.run = FALSE;
13025  objspace->profile.current_record = 0;
13026  return Qnil;
13027 }
13028 
13029 /*
13030  ------------------------------ DEBUG ------------------------------
13031 */
13032 
13033 static const char *
13034 type_name(int type, VALUE obj)
13035 {
13036  switch (type) {
13037 #define TYPE_NAME(t) case (t): return #t;
13038  TYPE_NAME(T_NONE);
13039  TYPE_NAME(T_OBJECT);
13040  TYPE_NAME(T_CLASS);
13041  TYPE_NAME(T_MODULE);
13042  TYPE_NAME(T_FLOAT);
13043  TYPE_NAME(T_STRING);
13044  TYPE_NAME(T_REGEXP);
13045  TYPE_NAME(T_ARRAY);
13046  TYPE_NAME(T_HASH);
13047  TYPE_NAME(T_STRUCT);
13048  TYPE_NAME(T_BIGNUM);
13049  TYPE_NAME(T_FILE);
13050  TYPE_NAME(T_MATCH);
13051  TYPE_NAME(T_COMPLEX);
13052  TYPE_NAME(T_RATIONAL);
13053  TYPE_NAME(T_NIL);
13054  TYPE_NAME(T_TRUE);
13055  TYPE_NAME(T_FALSE);
13056  TYPE_NAME(T_SYMBOL);
13057  TYPE_NAME(T_FIXNUM);
13058  TYPE_NAME(T_UNDEF);
13059  TYPE_NAME(T_IMEMO);
13060  TYPE_NAME(T_ICLASS);
13061  TYPE_NAME(T_MOVED);
13062  TYPE_NAME(T_ZOMBIE);
13063  case T_DATA:
13064  if (obj && rb_objspace_data_type_name(obj)) {
13065  return rb_objspace_data_type_name(obj);
13066  }
13067  return "T_DATA";
13068 #undef TYPE_NAME
13069  }
13070  return "unknown";
13071 }
13072 
13073 static const char *
13074 obj_type_name(VALUE obj)
13075 {
13076  return type_name(TYPE(obj), obj);
13077 }
13078 
13079 const char *
13080 rb_method_type_name(rb_method_type_t type)
13081 {
13082  switch (type) {
13083  case VM_METHOD_TYPE_ISEQ: return "iseq";
13084  case VM_METHOD_TYPE_ATTRSET: return "attrest";
13085  case VM_METHOD_TYPE_IVAR: return "ivar";
13086  case VM_METHOD_TYPE_BMETHOD: return "bmethod";
13087  case VM_METHOD_TYPE_ALIAS: return "alias";
13088  case VM_METHOD_TYPE_REFINED: return "refined";
13089  case VM_METHOD_TYPE_CFUNC: return "cfunc";
13090  case VM_METHOD_TYPE_ZSUPER: return "zsuper";
13091  case VM_METHOD_TYPE_MISSING: return "missing";
13092  case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
13093  case VM_METHOD_TYPE_UNDEF: return "undef";
13094  case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
13095  }
13096  rb_bug("rb_method_type_name: unreachable (type: %d)", type);
13097 }
13098 
13099 /* from array.c */
13100 # define ARY_SHARED_P(ary) \
13101  (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
13102  FL_TEST((ary),ELTS_SHARED)!=0)
13103 # define ARY_EMBED_P(ary) \
13104  (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
13105  FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
13106 
13107 static void
13108 rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
13109 {
13110  if (buff_size > 0 && iseq->body && iseq->body->location.label && !RB_TYPE_P(iseq->body->location.pathobj, T_MOVED)) {
13111  VALUE path = rb_iseq_path(iseq);
13112  VALUE n = iseq->body->location.first_lineno;
13113  snprintf(buff, buff_size, " %s@%s:%d",
13114  RSTRING_PTR(iseq->body->location.label),
13115  RSTRING_PTR(path),
13116  n ? FIX2INT(n) : 0 );
13117  }
13118 }
13119 
13120 static int
13121 str_len_no_raise(VALUE str)
13122 {
13123  long len = RSTRING_LEN(str);
13124  if (len < 0) return 0;
13125  if (len > INT_MAX) return INT_MAX;
13126  return (int)len;
13127 }
13128 
13129 const char *
13130 rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
13131 {
13132  int pos = 0;
13133  void *poisoned = asan_poisoned_object_p(obj);
13134  asan_unpoison_object(obj, false);
13135 
13136 #define BUFF_ARGS buff + pos, buff_size - pos
13137 #define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
13138  if (SPECIAL_CONST_P(obj)) {
13139  APPENDF((BUFF_ARGS, "%s", obj_type_name(obj)));
13140 
13141  if (FIXNUM_P(obj)) {
13142  APPENDF((BUFF_ARGS, " %ld", FIX2LONG(obj)));
13143  }
13144  else if (SYMBOL_P(obj)) {
13145  APPENDF((BUFF_ARGS, " %s", rb_id2name(SYM2ID(obj))));
13146  }
13147  }
13148  else {
13149 #define TF(c) ((c) != 0 ? "true" : "false")
13150 #define C(c, s) ((c) != 0 ? (s) : " ")
13151  const int type = BUILTIN_TYPE(obj);
13152  const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
13153 
13154  if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
13155  APPENDF((BUFF_ARGS, "%p [%d%s%s%s%s%s%s] %s ",
13156  (void *)obj, age,
13157  C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
13158  C(RVALUE_MARK_BITMAP(obj), "M"),
13159  C(RVALUE_PIN_BITMAP(obj), "P"),
13160  C(RVALUE_MARKING_BITMAP(obj), "R"),
13161  C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
13162  C(rb_objspace_garbage_object_p(obj), "G"),
13163  obj_type_name(obj)));
13164  }
13165  else {
13166  /* fake */
13167  APPENDF((BUFF_ARGS, "%p [%dXXXX] %s",
13168  (void *)obj, age,
13169  obj_type_name(obj)));
13170  }
13171 
13172  if (internal_object_p(obj)) {
13173  /* ignore */
13174  }
13175  else if (RBASIC(obj)->klass == 0) {
13176  APPENDF((BUFF_ARGS, "(temporary internal)"));
13177  }
13178  else {
13179  if (RTEST(RBASIC(obj)->klass)) {
13180  VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
13181  if (!NIL_P(class_path)) {
13182  APPENDF((BUFF_ARGS, "(%s)", RSTRING_PTR(class_path)));
13183  }
13184  }
13185  }
13186 
13187 #if GC_DEBUG
13188  APPENDF((BUFF_ARGS, "@%s:%d", RANY(obj)->file, RANY(obj)->line));
13189 #endif
13190 
13191  switch (type) {
13192  case T_NODE:
13193  UNEXPECTED_NODE(rb_raw_obj_info);
13194  break;
13195  case T_ARRAY:
13196  if (FL_TEST(obj, ELTS_SHARED)) {
13197  APPENDF((BUFF_ARGS, "shared -> %s",
13198  rb_obj_info(RARRAY(obj)->as.heap.aux.shared_root)));
13199  }
13200  else if (FL_TEST(obj, RARRAY_EMBED_FLAG)) {
13201  APPENDF((BUFF_ARGS, "[%s%s] len: %ld (embed)",
13202  C(ARY_EMBED_P(obj), "E"),
13203  C(ARY_SHARED_P(obj), "S"),
13204  RARRAY_LEN(obj)));
13205  }
13206  else {
13207  APPENDF((BUFF_ARGS, "[%s%s%s] len: %ld, capa:%ld ptr:%p",
13208  C(ARY_EMBED_P(obj), "E"),
13209  C(ARY_SHARED_P(obj), "S"),
13210  C(RARRAY_TRANSIENT_P(obj), "T"),
13211  RARRAY_LEN(obj),
13212  ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
13213  (void *)RARRAY_CONST_PTR_TRANSIENT(obj)));
13214  }
13215  break;
13216  case T_STRING: {
13217  if (STR_SHARED_P(obj)) APPENDF((BUFF_ARGS, " [shared] "));
13218  APPENDF((BUFF_ARGS, "%.*s", str_len_no_raise(obj), RSTRING_PTR(obj)));
13219  break;
13220  }
13221  case T_SYMBOL: {
13222  VALUE fstr = RSYMBOL(obj)->fstr;
13223  ID id = RSYMBOL(obj)->id;
13224  if (RB_TYPE_P(fstr, T_STRING)) {
13225  APPENDF((BUFF_ARGS, ":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id));
13226  }
13227  else {
13228  APPENDF((BUFF_ARGS, "(%p) id:%d", (void *)fstr, (unsigned int)id));
13229  }
13230  break;
13231  }
13232  case T_MOVED: {
13233  APPENDF((BUFF_ARGS, "-> %p", (void*)rb_gc_location(obj)));
13234  break;
13235  }
13236  case T_HASH: {
13237  APPENDF((BUFF_ARGS, "[%c%c] %"PRIdSIZE,
13238  RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
13239  RHASH_TRANSIENT_P(obj) ? 'T' : ' ',
13240  RHASH_SIZE(obj)));
13241  break;
13242  }
13243  case T_CLASS:
13244  case T_MODULE:
13245  {
13246  VALUE class_path = rb_class_path_cached(obj);
13247  if (!NIL_P(class_path)) {
13248  APPENDF((BUFF_ARGS, "%s", RSTRING_PTR(class_path)));
13249  }
13250  else {
13251  APPENDF((BUFF_ARGS, "(annon)"));
13252  }
13253  break;
13254  }
13255  case T_ICLASS:
13256  {
13257  VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
13258  if (!NIL_P(class_path)) {
13259  APPENDF((BUFF_ARGS, "src:%s", RSTRING_PTR(class_path)));
13260  }
13261  break;
13262  }
13263  case T_OBJECT:
13264  {
13265  uint32_t len = ROBJECT_NUMIV(obj);
13266 
13267  if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
13268  APPENDF((BUFF_ARGS, "(embed) len:%d", len));
13269  }
13270  else {
13271  VALUE *ptr = ROBJECT_IVPTR(obj);
13272  APPENDF((BUFF_ARGS, "len:%d ptr:%p", len, (void *)ptr));
13273  }
13274  }
13275  break;
13276  case T_DATA: {
13277  const struct rb_block *block;
13278  const rb_iseq_t *iseq;
13279  if (rb_obj_is_proc(obj) &&
13280  (block = vm_proc_block(obj)) != NULL &&
13281  (vm_block_type(block) == block_type_iseq) &&
13282  (iseq = vm_block_iseq(block)) != NULL) {
13283  rb_raw_iseq_info(BUFF_ARGS, iseq);
13284  }
13285  else if (rb_ractor_p(obj)) {
13286  rb_ractor_t *r = (void *)DATA_PTR(obj);
13287  if (r) {
13288  APPENDF((BUFF_ARGS, "r:%d", r->pub.id));
13289  }
13290  }
13291  else {
13292  const char * const type_name = rb_objspace_data_type_name(obj);
13293  if (type_name) {
13294  APPENDF((BUFF_ARGS, "%s", type_name));
13295  }
13296  }
13297  break;
13298  }
13299  case T_IMEMO: {
13300  APPENDF((BUFF_ARGS, "<%s> ", rb_imemo_name(imemo_type(obj))));
13301 
13302  switch (imemo_type(obj)) {
13303  case imemo_ment:
13304  {
13305  const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
13306 
13307  APPENDF((BUFF_ARGS, ":%s (%s%s%s%s) type:%s alias:%d owner:%p defined_class:%p",
13308  rb_id2name(me->called_id),
13309  METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
13310  METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
13311  METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
13312  METHOD_ENTRY_CACHED(me) ? ",cc" : "",
13313  METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
13314  me->def ? rb_method_type_name(me->def->type) : "NULL",
13315  me->def ? me->def->alias_count : -1,
13316  (void *)me->owner, // obj_info(me->owner),
13317  (void *)me->defined_class)); //obj_info(me->defined_class)));
13318 
13319  if (me->def) {
13320  switch (me->def->type) {
13321  case VM_METHOD_TYPE_ISEQ:
13322  APPENDF((BUFF_ARGS, " (iseq:%s)", obj_info((VALUE)me->def->body.iseq.iseqptr)));
13323  break;
13324  default:
13325  break;
13326  }
13327  }
13328 
13329  break;
13330  }
13331  case imemo_iseq: {
13332  const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
13333  rb_raw_iseq_info(BUFF_ARGS, iseq);
13334  break;
13335  }
13336  case imemo_callinfo:
13337  {
13338  const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
13339  APPENDF((BUFF_ARGS, "(mid:%s, flag:%x argc:%d, kwarg:%s)",
13340  rb_id2name(vm_ci_mid(ci)),
13341  vm_ci_flag(ci),
13342  vm_ci_argc(ci),
13343  vm_ci_kwarg(ci) ? "available" : "NULL"));
13344  break;
13345  }
13346  case imemo_callcache:
13347  {
13348  const struct rb_callcache *cc = (const struct rb_callcache *)obj;
13349  VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
13350  const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
13351 
13352  APPENDF((BUFF_ARGS, "(klass:%s cme:%s%s (%p) call:%p",
13353  NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
13354  cme ? rb_id2name(cme->called_id) : "<NULL>",
13355  cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
13356  (void *)cme,
13357  (void *)vm_cc_call(cc)));
13358  break;
13359  }
13360  default:
13361  break;
13362  }
13363  }
13364  default:
13365  break;
13366  }
13367 #undef TF
13368 #undef C
13369  }
13370  end:
13371  if (poisoned) {
13372  asan_poison_object(obj);
13373  }
13374 
13375  return buff;
13376 #undef APPENDF
13377 #undef BUFF_ARGS
13378 }
13379 
13380 #if RGENGC_OBJ_INFO
13381 #define OBJ_INFO_BUFFERS_NUM 10
13382 #define OBJ_INFO_BUFFERS_SIZE 0x100
13383 static int obj_info_buffers_index = 0;
13384 static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
13385 
13386 static const char *
13387 obj_info(VALUE obj)
13388 {
13389  const int index = obj_info_buffers_index++;
13390  char *const buff = &obj_info_buffers[index][0];
13391 
13392  if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
13393  obj_info_buffers_index = 0;
13394  }
13395 
13396  return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
13397 }
13398 #else
13399 static const char *
13400 obj_info(VALUE obj)
13401 {
13402  return obj_type_name(obj);
13403 }
13404 #endif
13405 
13406 MJIT_FUNC_EXPORTED const char *
13407 rb_obj_info(VALUE obj)
13408 {
13409  return obj_info(obj);
13410 }
13411 
13412 void
13413 rb_obj_info_dump(VALUE obj)
13414 {
13415  char buff[0x100];
13416  fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
13417 }
13418 
13419 MJIT_FUNC_EXPORTED void
13420 rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
13421 {
13422  char buff[0x100];
13423  fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
13424 }
13425 
13426 #if GC_DEBUG
13427 
13428 void
13429 rb_gcdebug_print_obj_condition(VALUE obj)
13430 {
13431  rb_objspace_t *objspace = &rb_objspace;
13432 
13433  fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
13434 
13435  if (BUILTIN_TYPE(obj) == T_MOVED) {
13436  fprintf(stderr, "moved?: true\n");
13437  }
13438  else {
13439  fprintf(stderr, "moved?: false\n");
13440  }
13441  if (is_pointer_to_heap(objspace, (void *)obj)) {
13442  fprintf(stderr, "pointer to heap?: true\n");
13443  }
13444  else {
13445  fprintf(stderr, "pointer to heap?: false\n");
13446  return;
13447  }
13448 
13449  fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
13450  fprintf(stderr, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ? "true" : "false");
13451  fprintf(stderr, "age? : %d\n", RVALUE_AGE(obj));
13452  fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
13453  fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
13454  fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false");
13455 
13456  if (is_lazy_sweeping(objspace)) {
13457  fprintf(stderr, "lazy sweeping?: true\n");
13458  fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet");
13459  }
13460  else {
13461  fprintf(stderr, "lazy sweeping?: false\n");
13462  }
13463 }
13464 
13465 static VALUE
13466 gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj, name))
13467 {
13468  fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
13469  return Qnil;
13470 }
13471 
13472 void
13473 rb_gcdebug_sentinel(VALUE obj, const char *name)
13474 {
13475  rb_define_finalizer(obj, rb_proc_new(gcdebug_sentinel, (VALUE)name));
13476 }
13477 
13478 #endif /* GC_DEBUG */
13479 
13480 #if GC_DEBUG_STRESS_TO_CLASS
13481 /*
13482  * call-seq:
13483  * GC.add_stress_to_class(class[, ...])
13484  *
13485  * Raises NoMemoryError when allocating an instance of the given classes.
13486  *
13487  */
13488 static VALUE
13489 rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
13490 {
13491  rb_objspace_t *objspace = &rb_objspace;
13492 
13493  if (!stress_to_class) {
13494  stress_to_class = rb_ary_tmp_new(argc);
13495  }
13496  rb_ary_cat(stress_to_class, argv, argc);
13497  return self;
13498 }
13499 
13500 /*
13501  * call-seq:
13502  * GC.remove_stress_to_class(class[, ...])
13503  *
13504  * No longer raises NoMemoryError when allocating an instance of the
13505  * given classes.
13506  *
13507  */
13508 static VALUE
13509 rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
13510 {
13511  rb_objspace_t *objspace = &rb_objspace;
13512  int i;
13513 
13514  if (stress_to_class) {
13515  for (i = 0; i < argc; ++i) {
13516  rb_ary_delete_same(stress_to_class, argv[i]);
13517  }
13518  if (RARRAY_LEN(stress_to_class) == 0) {
13519  stress_to_class = 0;
13520  }
13521  }
13522  return Qnil;
13523 }
13524 #endif
13525 
13526 /*
13527  * Document-module: ObjectSpace
13528  *
13529  * The ObjectSpace module contains a number of routines
13530  * that interact with the garbage collection facility and allow you to
13531  * traverse all living objects with an iterator.
13532  *
13533  * ObjectSpace also provides support for object finalizers, procs that will be
13534  * called when a specific object is about to be destroyed by garbage
13535  * collection. See the documentation for
13536  * <code>ObjectSpace.define_finalizer</code> for important information on
13537  * how to use this method correctly.
13538  *
13539  * a = "A"
13540  * b = "B"
13541  *
13542  * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
13543  * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
13544  *
13545  * a = nil
13546  * b = nil
13547  *
13548  * _produces:_
13549  *
13550  * Finalizer two on 537763470
13551  * Finalizer one on 537763480
13552  */
13553 
13554 /*
13555  * Document-class: ObjectSpace::WeakMap
13556  *
13557  * An ObjectSpace::WeakMap object holds references to
13558  * any objects, but those objects can get garbage collected.
13559  *
13560  * This class is mostly used internally by WeakRef, please use
13561  * +lib/weakref.rb+ for the public interface.
13562  */
13563 
13564 /* Document-class: GC::Profiler
13565  *
13566  * The GC profiler provides access to information on GC runs including time,
13567  * length and object space size.
13568  *
13569  * Example:
13570  *
13571  * GC::Profiler.enable
13572  *
13573  * require 'rdoc/rdoc'
13574  *
13575  * GC::Profiler.report
13576  *
13577  * GC::Profiler.disable
13578  *
13579  * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
13580  */
13581 
13582 #include "gc.rbinc"
13583 
13584 void
13585 Init_GC(void)
13586 {
13587 #undef rb_intern
13588  VALUE rb_mObjSpace;
13589  VALUE rb_mProfiler;
13590  VALUE gc_constants;
13591 
13592  rb_mGC = rb_define_module("GC");
13593 
13594  gc_constants = rb_hash_new();
13595  rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), RBOOL(GC_DEBUG));
13596  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
13597  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
13598  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
13599  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE));
13600  rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(SIZE_POOL_COUNT));
13601  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT - 1)));
13602  OBJ_FREEZE(gc_constants);
13603  /* internal constants */
13604  rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
13605 
13606  rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
13607  rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
13608  rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
13609  rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
13610  rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
13611  rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
13612  rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
13613  rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
13614  rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
13615 
13616  rb_mObjSpace = rb_define_module("ObjectSpace");
13617 
13618  rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
13619 
13620  rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
13621  rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
13622 
13623  rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
13624 
13625  rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
13626 
13628  rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
13629 
13630  rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
13631 
13632  {
13633  VALUE rb_cWeakMap = rb_define_class_under(rb_mObjSpace, "WeakMap", rb_cObject);
13634  rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
13635  rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
13636  rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
13637  rb_define_method(rb_cWeakMap, "include?", wmap_has_key, 1);
13638  rb_define_method(rb_cWeakMap, "member?", wmap_has_key, 1);
13639  rb_define_method(rb_cWeakMap, "key?", wmap_has_key, 1);
13640  rb_define_method(rb_cWeakMap, "inspect", wmap_inspect, 0);
13641  rb_define_method(rb_cWeakMap, "each", wmap_each, 0);
13642  rb_define_method(rb_cWeakMap, "each_pair", wmap_each, 0);
13643  rb_define_method(rb_cWeakMap, "each_key", wmap_each_key, 0);
13644  rb_define_method(rb_cWeakMap, "each_value", wmap_each_value, 0);
13645  rb_define_method(rb_cWeakMap, "keys", wmap_keys, 0);
13646  rb_define_method(rb_cWeakMap, "values", wmap_values, 0);
13647  rb_define_method(rb_cWeakMap, "size", wmap_size, 0);
13648  rb_define_method(rb_cWeakMap, "length", wmap_size, 0);
13649  rb_include_module(rb_cWeakMap, rb_mEnumerable);
13650  }
13651 
13652  /* internal methods */
13653  rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
13654  rb_define_singleton_method(rb_mGC, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency, 0);
13655 #if MALLOC_ALLOCATED_SIZE
13656  rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
13657  rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
13658 #endif
13659 
13660 #if GC_DEBUG_STRESS_TO_CLASS
13661  rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
13662  rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
13663 #endif
13664 
13665  {
13666  VALUE opts;
13667  /* GC build options */
13668  rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
13669 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
13670  OPT(GC_DEBUG);
13671  OPT(USE_RGENGC);
13672  OPT(RGENGC_DEBUG);
13673  OPT(RGENGC_CHECK_MODE);
13674  OPT(RGENGC_PROFILE);
13675  OPT(RGENGC_ESTIMATE_OLDMALLOC);
13676  OPT(GC_PROFILE_MORE_DETAIL);
13677  OPT(GC_ENABLE_LAZY_SWEEP);
13678  OPT(CALC_EXACT_MALLOC_SIZE);
13679  OPT(MALLOC_ALLOCATED_SIZE);
13680  OPT(MALLOC_ALLOCATED_SIZE_CHECK);
13681  OPT(GC_PROFILE_DETAIL_MEMORY);
13682 #undef OPT
13683  OBJ_FREEZE(opts);
13684  }
13685 }
13686 
13687 #ifdef ruby_xmalloc
13688 #undef ruby_xmalloc
13689 #endif
13690 #ifdef ruby_xmalloc2
13691 #undef ruby_xmalloc2
13692 #endif
13693 #ifdef ruby_xcalloc
13694 #undef ruby_xcalloc
13695 #endif
13696 #ifdef ruby_xrealloc
13697 #undef ruby_xrealloc
13698 #endif
13699 #ifdef ruby_xrealloc2
13700 #undef ruby_xrealloc2
13701 #endif
13702 
13703 void *
13704 ruby_xmalloc(size_t size)
13705 {
13706 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13707  ruby_malloc_info_file = __FILE__;
13708  ruby_malloc_info_line = __LINE__;
13709 #endif
13710  return ruby_xmalloc_body(size);
13711 }
13712 
13713 void *
13714 ruby_xmalloc2(size_t n, size_t size)
13715 {
13716 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13717  ruby_malloc_info_file = __FILE__;
13718  ruby_malloc_info_line = __LINE__;
13719 #endif
13720  return ruby_xmalloc2_body(n, size);
13721 }
13722 
13723 void *
13724 ruby_xcalloc(size_t n, size_t size)
13725 {
13726 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13727  ruby_malloc_info_file = __FILE__;
13728  ruby_malloc_info_line = __LINE__;
13729 #endif
13730  return ruby_xcalloc_body(n, size);
13731 }
13732 
13733 void *
13734 ruby_xrealloc(void *ptr, size_t new_size)
13735 {
13736 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13737  ruby_malloc_info_file = __FILE__;
13738  ruby_malloc_info_line = __LINE__;
13739 #endif
13740  return ruby_xrealloc_body(ptr, new_size);
13741 }
13742 
13743 void *
13744 ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
13745 {
13746 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
13747  ruby_malloc_info_file = __FILE__;
13748  ruby_malloc_info_line = __LINE__;
13749 #endif
13750  return ruby_xrealloc2_body(ptr, n, new_size);
13751 }
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition: assert.h:167
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
#define RUBY_ALIGNOF
Wraps (or simulates) alignof.
Definition: stdalign.h:28
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
Definition: cxxanyargs.hpp:685
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Identical to rb_postponed_job_register_one(), except it additionally checks for duplicated registrati...
Definition: vm_trace.c:1653
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
Definition: defines.h:89
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
Definition: event.h:94
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
Definition: event.h:93
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
Definition: event.h:92
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
Definition: event.h:91
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
Definition: event.h:95
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition: event.h:89
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition: event.h:90
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:103
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition: event.h:88
@ RUBY_FL_WB_PROTECTED
Definition: fl_type.h:207
void rb_include_module(VALUE klass, VALUE module)
Includes a module to a class.
Definition: class.c:1043
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition: class.c:869
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition: class.c:948
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
Definition: class.c:972
void rb_define_module_function(VALUE module, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a module function for a module.
Definition: class.c:2100
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition: class.c:2406
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a method.
Definition: class.c:1914
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition: value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition: value_type.h:107
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition: fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition: value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition: fl_type.h:67
#define ALLOC
Old name of RB_ALLOC.
Definition: memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition: xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition: value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition: value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition: assume.h:30
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition: value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition: value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition: symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition: value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition: value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition: fl_type.h:143
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition: long.h:60
#define ELTS_SHARED
Old name of RUBY_ELTS_SHARED.
Definition: fl_type.h:93
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition: value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition: assume.h:31
#define SYM2ID
Old name of RB_SYM2ID.
Definition: symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition: value_type.h:60
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
Definition: fl_type.h:66
#define FL_PROMOTED0
Old name of RUBY_FL_PROMOTED0.
Definition: fl_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition: long_long.h:30
#define CLASS_OF
Old name of rb_class_of.
Definition: globals.h:203
#define T_NONE
Old name of RUBY_T_NONE.
Definition: value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition: value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition: size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition: xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition: long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition: int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition: fl_type.h:62
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition: value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition: value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition: value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition: value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition: memory.h:393
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition: fl_type.h:130
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition: fl_type.h:140
#define FL_SET
Old name of RB_FL_SET.
Definition: fl_type.h:137
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition: array.h:652
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition: long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition: value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition: long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition: value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define DYNAMIC_SYM_P
Old name of RB_DYNAMIC_SYM_P.
Definition: value_type.h:86
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition: value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition: long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition: value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition: fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition: value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition: double.h:29
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition: value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition: value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition: value_type.h:85
#define OBJ_PROMOTED
Old name of RB_OBJ_PROMOTED.
Definition: rgengc.h:237
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition: value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition: fl_type.h:139
#define FL_PROMOTED1
Old name of RUBY_FL_PROMOTED1.
Definition: fl_type.h:61
#define xcalloc
Old name of ruby_xcalloc.
Definition: xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition: fl_type.h:141
#define UINT2NUM
Old name of RB_UINT2NUM.
Definition: int.h:46
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition: fl_type.h:70
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition: value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition: gc.c:6141
int ruby_stack_check(void)
Checks for stack overflow.
Definition: gc.c:6181
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition: error.c:3025
VALUE rb_eNotImpError
NotImplementedError exception.
Definition: error.c:1109
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:802
VALUE rb_eNoMemError
NoMemoryError exception.
Definition: error.c:1110
VALUE rb_eRangeError
RangeError exception.
Definition: error.c:1103
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition: error.h:459
VALUE rb_eTypeError
TypeError exception.
Definition: error.c:1099
VALUE rb_eRuntimeError
RuntimeError exception.
Definition: error.c:1097
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
Definition: error.c:418
VALUE rb_eArgError
ArgumentError exception.
Definition: error.c:1100
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:983
VALUE rb_mKernel
Kernel module.
Definition: object.c:49
VALUE rb_any_to_s(VALUE obj)
Generates a textual representation of the given object.
Definition: object.c:553
VALUE rb_mEnumerable
Enumerable module.
Definition: enum.c:27
VALUE rb_mGC
GC module.
Definition: gc.c:1104
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition: object.c:188
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition: object.c:564
VALUE rb_cBasicObject
BasicObject class.
Definition: object.c:48
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition: object.c:120
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition: object.c:731
VALUE rb_stdout
STDOUT constant.
Definition: io.c:198
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition: object.c:2998
void rb_gc_register_address(VALUE *valptr)
Inform the garbage collector that valptr points to a live Ruby object that should not be moved.
Definition: gc.c:8708
void rb_gc_unregister_address(VALUE *valptr)
Inform the garbage collector that a pointer previously passed to rb_gc_register_address() no longer p...
Definition: gc.c:8720
void rb_global_variable(VALUE *)
An alias for rb_gc_register_address().
Definition: gc.c:8743
void rb_gc_register_mark_object(VALUE object)
Inform the garbage collector that object is a live Ruby object that should not be moved.
Definition: gc.c:8687
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
Definition: array.c:865
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
Definition: array.c:1321
VALUE rb_ary_new(void)
Allocates a new, empty array.
Definition: array.c:750
VALUE rb_ary_tmp_new(long capa)
Allocates a "temporary" array.
Definition: array.c:847
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
Definition: array.c:1308
VALUE rb_big_eql(VALUE lhs, VALUE rhs)
Equality, in terms of eql?.
Definition: bignum.c:5542
VALUE rb_obj_is_fiber(VALUE obj)
Queries if an object is a fiber.
Definition: cont.c:1106
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition: enumerator.h:239
#define rb_check_frozen
Just another name of rb_check_frozen.
Definition: error.h:278
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition: error.h:294
void rb_gc_mark(VALUE obj)
Marks an object.
Definition: gc.c:6775
void rb_mark_tbl_no_pin(struct st_table *tbl)
Identical to rb_mark_tbl(), except it marks objects using rb_gc_mark_movable().
Definition: gc.c:6562
void rb_memerror(void)
Triggers out-of-memory error.
Definition: gc.c:11117
size_t rb_gc_stat(VALUE key_or_buf)
Obtains various GC related profiles.
Definition: gc.c:10652
void rb_gc_mark_movable(VALUE obj)
Maybe this is the only function provided for C extensions to control the pinning of objects,...
Definition: gc.c:6769
VALUE rb_gc_disable(void)
Disables GC.
Definition: gc.c:10724
VALUE rb_gc_start(void)
Identical to rb_gc(), except the return value.
Definition: gc.c:10286
VALUE rb_gc_latest_gc_info(VALUE key_or_buf)
Obtains various info regarding the most recent GC run.
Definition: gc.c:10430
void rb_mark_tbl(struct st_table *tbl)
Identical to rb_mark_hash(), except it marks only values of the table and leave their associated keys...
Definition: gc.c:6556
VALUE rb_gc_enable(void)
(Re-) enables GC.
Definition: gc.c:10687
void rb_mark_hash(struct st_table *tbl)
Marks keys and values associated inside of the given table.
Definition: gc.c:6379
VALUE rb_undefine_finalizer(VALUE obj)
Modifies the object so that it has no finalisers at all.
Definition: gc.c:3786
int rb_during_gc(void)
Queries if the GC is busy.
Definition: gc.c:10301
void rb_gc_mark_maybe(VALUE obj)
Identical to rb_gc_mark(), except it allows the passed value be a non-object.
Definition: gc.c:6594
VALUE rb_gc_location(VALUE obj)
Finds a new "location" of an object.
Definition: gc.c:9754
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Marks objects between the two pointers.
Definition: gc.c:6209
void rb_gc(void)
Triggers a GC process.
Definition: gc.c:10293
void rb_gc_force_recycle(VALUE obj)
Asserts that the passed object is no longer needed.
Definition: gc.c:8677
void rb_gc_update_tbl_refs(st_table *ptr)
Updates references inside of tables.
Definition: gc.c:9598
void rb_mark_set(struct st_table *tbl)
Identical to rb_mark_hash(), except it marks only keys of the table and leave their associated values...
Definition: gc.c:6315
VALUE rb_define_finalizer(VALUE obj, VALUE block)
Assigns a finaliser for an object.
Definition: gc.c:3938
void rb_gc_copy_finalizer(VALUE dst, VALUE src)
Copy&paste an object's finaliser to another.
Definition: gc.c:3946
void rb_gc_adjust_memory_usage(ssize_t diff)
Informs that there are external memory usages.
Definition: gc.c:11923
size_t rb_gc_count(void)
Identical to rb_gc_stat(), with "count" parameter.
Definition: gc.c:10325
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
Definition: hash.c:2903
VALUE rb_hash_new(void)
Creates a new, empty hash object.
Definition: hash.c:1529
VALUE rb_io_write(VALUE io, VALUE str)
Writes the given string to the given IO.
Definition: io.c:2063
VALUE rb_obj_id(VALUE obj)
Finds or creates an integer primary key of the given object.
Definition: gc.c:4447
VALUE rb_memory_id(VALUE obj)
Identical to rb_obj_id(), except it hesitates from allocating a new instance of rb_cInteger.
Definition: gc.c:4414
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition: proc.c:848
VALUE rb_proc_new(rb_block_call_func_t func, VALUE callback_arg)
This is an rb_iterate() + rb_block_proc() combo.
Definition: proc.c:3241
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition: proc.c:175
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition: string.c:1546
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition: string.c:3317
VALUE rb_str_cat2(VALUE, const char *)
Just another name of rb_str_cat_cstr.
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition: string.c:3302
VALUE rb_str_new_cstr(const char *ptr)
Identical to rb_str_new(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:952
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
Definition: string.c:1506
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition: variable.c:181
VALUE rb_class_name(VALUE obj)
Queries the name of the given object's class.
Definition: variable.c:294
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition: variable.c:1155
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition: vm_method.c:1117
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition: vm_eval.c:664
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition: vm_method.c:1123
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition: vm_method.c:2749
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
Definition: symbol.c:941
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
Definition: symbol.c:782
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
Definition: symbol.c:924
VALUE rb_id2str(ID id)
Identical to rb_id2name(), except it returns a Ruby's String instead of C's.
Definition: symbol.c:935
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition: variable.c:3253
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition: thread.c:1888
#define strtod(s, e)
Just another name of ruby_strtod.
Definition: util.h:212
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define strdup(s)
Just another name of ruby_strdup.
Definition: util.h:176
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition: sprintf.c:1201
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition: sprintf.c:1241
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition: iterator.h:58
VALUE rb_yield_values(int n,...)
Identical to rb_yield(), except it takes variadic number of parameters and pass them to the block.
Definition: vm_eval.c:1369
VALUE rb_yield(VALUE val)
Yields the block.
Definition: vm_eval.c:1357
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
Definition: maybe_unused.h:33
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition: memory.h:354
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition: memory.h:378
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
int st_foreach(st_table *q, int_type *w, st_data_t e)
Iteration over the given table.
Definition: cxxanyargs.hpp:432
VALUE rb_newobj(void)
This is the implementation detail of RB_NEWOBJ.
Definition: gc.c:2595
VALUE rb_newobj_of(VALUE klass, VALUE flags)
This is the implementation detail of RB_NEWOBJ_OF.
Definition: gc.c:2601
#define PRI_PIDT_PREFIX
A rb_sprintf() format prefix to be used for a pid_t parameter.
Definition: pid_t.h:38
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition: ractor.h:249
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:68
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
Definition: rarray.h:70
#define RARRAY(obj)
Convenient casting macro.
Definition: rarray.h:56
static bool RARRAY_TRANSIENT_P(VALUE ary)
Queries if the array is a transient array.
Definition: rarray.h:345
#define RARRAY_AREF(a, i)
Definition: rarray.h:588
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition: rbasic.h:152
#define RBASIC(obj)
Convenient casting macro.
Definition: rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition: rclass.h:46
#define RCLASS(obj)
Convenient casting macro.
Definition: rclass.h:40
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
This is the primitive way to wrap an existing C struct into RData.
Definition: gc.c:2750
#define DATA_PTR(obj)
Convenient getter macro.
Definition: rdata.h:71
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Identical to rb_data_object_wrap(), except it allocates a new data region internally instead of takin...
Definition: gc.c:2758
#define RDATA(obj)
Convenient casting macro.
Definition: rdata.h:63
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition: rdata.h:82
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition: rdata.h:108
#define RFILE(obj)
Convenient casting macro.
Definition: rfile.h:50
void rb_gc_writebarrier(VALUE old, VALUE young)
This is the implementation of RB_OBJ_WRITE().
Definition: gc.c:8464
void rb_gc_writebarrier_unprotect(VALUE obj)
This is the implementation of RB_OBJ_WB_UNPROTECT().
Definition: gc.c:8500
#define USE_RGENGC
Definition: rgengc.h:44
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
Definition: rgengc.h:118
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition: rhash.h:82
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition: rhash.h:92
#define RMATCH(obj)
Convenient casting macro.
Definition: rmatch.h:37
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition: robject.h:171
@ ROBJECT_EMBED_LEN_MAX
Max possible number of instance variables that can be embedded.
Definition: robject.h:84
static uint32_t ROBJECT_NUMIV(VALUE obj)
Queries the number of instance variables.
Definition: robject.h:145
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition: rregexp.h:45
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
Definition: rstring.h:497
static long RSTRING_LEN(VALUE str)
Queries the length of the string.
Definition: rstring.h:483
#define RSTRING(obj)
Convenient casting macro.
Definition: rstring.h:41
static long RSTRUCT_LEN(VALUE st)
Returns the number of struct members.
Definition: rstruct.h:94
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition: rtypeddata.h:540
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition: rtypeddata.h:102
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
This is the primitive way to wrap an existing C struct into RTypedData.
Definition: gc.c:2766
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Identical to rb_data_typed_object_wrap(), except it allocates a new data region internally instead of...
Definition: gc.c:2774
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition: rtypeddata.h:507
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition: rtypeddata.h:563
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition: rtypeddata.h:489
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition: variable.c:309
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition: thread.c:5539
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition: stdarg.h:35
MEMO.
Definition: imemo.h:104
Ruby's array.
Definition: rarray.h:166
struct RArray::@42::@43 heap
Arrays that use separated memory region for elements use this pattern.
union RArray::@42 as
Array's specific fields.
Ruby's object's, base components.
Definition: rbasic.h:64
const VALUE klass
Class of an object.
Definition: rbasic.h:88
VALUE flags
Per-object flags.
Definition: rbasic.h:77
Definition: class.h:67
Internal header for Complex.
Definition: complex.h:13
Definition: rdata.h:124
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition: rdata.h:138
void * data
Pointer to the actual C level struct that you want to wrap.
Definition: rdata.h:150
Ruby's File and IO.
Definition: rfile.h:35
struct rb_io_t * fptr
IO's specific fields.
Definition: rfile.h:41
Definition: numeric.h:47
Definition: hash.h:43
Regular expression execution context.
Definition: rmatch.h:94
VALUE regexp
The expression of this match.
Definition: rmatch.h:112
VALUE str
The target string that the match was made against.
Definition: rmatch.h:102
Definition: gc.c:564
Ruby's ordinal objects.
Definition: robject.h:93
Internal header for Rational.
Definition: rational.h:17
Ruby's regular expression.
Definition: rregexp.h:60
const VALUE src
Source code of this expression.
Definition: rregexp.h:74
Ruby's String.
Definition: rstring.h:231
struct RString::@47::@48 heap
Strings that use separated memory region for contents use this pattern.
union RString::@47 as
String's specific fields.
Definition: struct.h:22
Definition: symbol.h:26
"Typed" user data.
Definition: rtypeddata.h:340
const rb_data_type_t * type
This field stores various information about how Ruby should handle a data.
Definition: rtypeddata.h:350
Definition: gc.c:572
Definition: gc.c:1090
Definition: gc.c:645
Definition: gc.c:885
Definition: vm_core.h:224
Definition: gc.c:657
Definition: method.h:62
Definition: constant.h:33
CREF (Class REFerence)
Definition: method.h:44
Definition: class.h:34
This is the struct that holds necessary info for a struct.
Definition: rtypeddata.h:190
struct rb_data_type_struct::@51 function
Function pointers.
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition: rtypeddata.h:241
const char * wrap_struct_name
Name of structs of this kind.
Definition: rtypeddata.h:197
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition: rtypeddata.h:211
VALUE ecopts
Flags as Ruby hash.
Definition: io.h:165
Ruby's IO, metadata and buffers.
Definition: io.h:95
struct rb_io_t::rb_io_enc_t encs
Decomposed encoding flags.
VALUE writeconv_asciicompat
This is, when set, an instance of rb_cString which holds the "common" encoding.
Definition: io.h:186
VALUE pathv
pathname for file
Definition: io.h:116
VALUE write_lock
This is a Ruby level mutex.
Definition: io.h:210
VALUE self
The IO's Ruby level counterpart.
Definition: io.h:98
VALUE writeconv_pre_ecopts
Value of rb_io_t::rb_io_enc_t::ecopts stored right before initialising rb_io_t::writeconv.
Definition: io.h:201
VALUE tied_io_for_writing
Duplex IO object, if set.
Definition: io.h:135
Definition: class.h:28
Definition: method.h:54
rb_cref_t * cref
class reference, should be marked
Definition: method.h:136
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
Internal header for Class.
Definition: class.h:22
Represents the region of a capture group.
Definition: rmatch.h:65
Represents a match.
Definition: rmatch.h:71
int char_offset_num_allocated
Number of rmatch_offset that rmatch::char_offset holds.
Definition: rmatch.h:82
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition: rmatch.h:79
struct re_registers regs
"Registers" of a match.
Definition: rmatch.h:76
Definition: st.h:79
IFUNC (Internal FUNCtion)
Definition: imemo.h:84
SVAR (Special VARiable)
Definition: imemo.h:53
THROW_DATA.
Definition: imemo.h:62
Definition: gc.c:11938
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition: value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition: value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:375
ruby_value_type
C-level type of an object.
Definition: value_type.h:112
@ RUBY_T_MASK
Bitmask of ruby_value_type.
Definition: value_type.h:144
void * ruby_xmalloc2(size_t nelems, size_t elemsiz)
Identical to ruby_xmalloc(), except it allocates nelems * elemsiz bytes.
Definition: gc.c:13714
void * ruby_xmalloc(size_t size)
Allocates a storage instance.
Definition: gc.c:13704
void ruby_xfree(void *ptr)
Deallocates a storage instance.
Definition: gc.c:11775
void * ruby_xcalloc(size_t nelems, size_t elemsiz)
Identical to ruby_xmalloc2(), except it returns a zero-filled storage instance.
Definition: gc.c:13724
void * ruby_xrealloc(void *ptr, size_t newsiz)
Resize the storage instance.
Definition: gc.c:13734
void * ruby_xrealloc2(void *ptr, size_t newelems, size_t newsiz)
Identical to ruby_xrealloc(), except it resizes the given storage instance to newelems * newsiz bytes...
Definition: gc.c:13744