Ruby 2.7.7p221 (2022-11-24 revision 168ec2b1e5ad0e4688e963d9de019557c78feed9)
gc.c
Go to the documentation of this file.
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/encoding.h"
18#include "ruby/io.h"
19#include "ruby/st.h"
20#include "ruby/re.h"
21#include "ruby/thread.h"
22#include "ruby/util.h"
23#include "ruby/debug.h"
24#include "internal.h"
25#include "eval_intern.h"
26#include "vm_core.h"
27#include "builtin.h"
28#include "gc.h"
29#include "constant.h"
30#include "ruby_atomic.h"
31#include "probes.h"
32#include "id_table.h"
33#include "symbol.h"
34#include <stdio.h>
35#include <stdarg.h>
36#include <setjmp.h>
37#include <sys/types.h>
38#include "ruby_assert.h"
39#include "debug_counter.h"
40#include "transient_heap.h"
41#include "mjit.h"
42
43#undef rb_data_object_wrap
44
45#ifndef HAVE_MALLOC_USABLE_SIZE
46# ifdef _WIN32
47# define HAVE_MALLOC_USABLE_SIZE
48# define malloc_usable_size(a) _msize(a)
49# elif defined HAVE_MALLOC_SIZE
50# define HAVE_MALLOC_USABLE_SIZE
51# define malloc_usable_size(a) malloc_size(a)
52# endif
53#endif
54#ifdef HAVE_MALLOC_USABLE_SIZE
55# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
56# include RUBY_ALTERNATIVE_MALLOC_HEADER
57# elif HAVE_MALLOC_H
58# include <malloc.h>
59# elif defined(HAVE_MALLOC_NP_H)
60# include <malloc_np.h>
61# elif defined(HAVE_MALLOC_MALLOC_H)
62# include <malloc/malloc.h>
63# endif
64#endif
65
66#ifdef HAVE_SYS_TIME_H
67#include <sys/time.h>
68#endif
69
70#ifdef HAVE_SYS_RESOURCE_H
71#include <sys/resource.h>
72#endif
73
74#if defined _WIN32 || defined __CYGWIN__
75#include <windows.h>
76#elif defined(HAVE_POSIX_MEMALIGN)
77#elif defined(HAVE_MEMALIGN)
78#include <malloc.h>
79#endif
80
81#define rb_setjmp(env) RUBY_SETJMP(env)
82#define rb_jmp_buf rb_jmpbuf_t
83
84#if defined(_MSC_VER) && defined(_WIN64)
85#include <intrin.h>
86#pragma intrinsic(_umul128)
87#endif
88
89/* Expecting this struct to be eliminated by function inlinings */
90struct optional {
91 bool left;
92 size_t right;
93};
94
95static inline struct optional
96size_mul_overflow(size_t x, size_t y)
97{
98 bool p;
99 size_t z;
100#if 0
101
102#elif defined(HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW)
103 p = __builtin_mul_overflow(x, y, &z);
104
105#elif defined(DSIZE_T)
108 RB_GNUC_EXTENSION DSIZE_T dz = dx * dy;
109 p = dz > SIZE_MAX;
110 z = (size_t)dz;
111
112#elif defined(_MSC_VER) && defined(_WIN64)
113 unsigned __int64 dp;
114 unsigned __int64 dz = _umul128(x, y, &dp);
115 p = (bool)dp;
116 z = (size_t)dz;
117
118#else
119 /* https://wiki.sei.cmu.edu/confluence/display/c/INT30-C.+Ensure+that+unsigned+integer+operations+do+not+wrap */
120 p = (y != 0) && (x > SIZE_MAX / y);
121 z = x * y;
122
123#endif
124 return (struct optional) { p, z, };
125}
126
127static inline struct optional
128size_add_overflow(size_t x, size_t y)
129{
130 size_t z;
131 bool p;
132#if 0
133
134#elif defined(HAVE_BUILTIN___BUILTIN_ADD_OVERFLOW)
135 p = __builtin_add_overflow(x, y, &z);
136
137#elif defined(DSIZE_T)
140 RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
141 p = dz > SIZE_MAX;
142 z = (size_t)dz;
143
144#else
145 z = x + y;
146 p = z < y;
147
148#endif
149 return (struct optional) { p, z, };
150}
151
152static inline struct optional
153size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
154{
155 struct optional t = size_mul_overflow(x, y);
156 struct optional u = size_add_overflow(t.right, z);
157 return (struct optional) { t.left || u.left, u.right };
158}
159
160static inline struct optional
161size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
162{
163 struct optional t = size_mul_overflow(x, y);
164 struct optional u = size_mul_overflow(z, w);
165 struct optional v = size_add_overflow(t.right, u.right);
166 return (struct optional) { t.left || u.left || v.left, v.right };
167}
168
169PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
170
171static inline size_t
172size_mul_or_raise(size_t x, size_t y, VALUE exc)
173{
174 struct optional t = size_mul_overflow(x, y);
175 if (LIKELY(!t.left)) {
176 return t.right;
177 }
178 else if (rb_during_gc()) {
179 rb_memerror(); /* or...? */
180 }
181 else {
182 gc_raise(
183 exc,
184 "integer overflow: %"PRIuSIZE
185 " * %"PRIuSIZE
186 " > %"PRIuSIZE,
187 x, y, SIZE_MAX);
188 }
189}
190
191size_t
192rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
193{
194 return size_mul_or_raise(x, y, exc);
195}
196
197static inline size_t
198size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
199{
200 struct optional t = size_mul_add_overflow(x, y, z);
201 if (LIKELY(!t.left)) {
202 return t.right;
203 }
204 else if (rb_during_gc()) {
205 rb_memerror(); /* or...? */
206 }
207 else {
208 gc_raise(
209 exc,
210 "integer overflow: %"PRIuSIZE
211 " * %"PRIuSIZE
212 " + %"PRIuSIZE
213 " > %"PRIuSIZE,
214 x, y, z, SIZE_MAX);
215 }
216}
217
218size_t
219rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
220{
221 return size_mul_add_or_raise(x, y, z, exc);
222}
223
224static inline size_t
225size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
226{
227 struct optional t = size_mul_add_mul_overflow(x, y, z, w);
228 if (LIKELY(!t.left)) {
229 return t.right;
230 }
231 else if (rb_during_gc()) {
232 rb_memerror(); /* or...? */
233 }
234 else {
235 gc_raise(
236 exc,
237 "integer overflow: %"PRIdSIZE
238 " * %"PRIdSIZE
239 " + %"PRIdSIZE
240 " * %"PRIdSIZE
241 " > %"PRIdSIZE,
242 x, y, z, w, SIZE_MAX);
243 }
244}
245
246#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
247/* trick the compiler into thinking a external signal handler uses this */
249volatile VALUE *
251{
252 rb_gc_guarded_val = val;
253
254 return ptr;
255}
256#endif
257
258#ifndef GC_HEAP_INIT_SLOTS
259#define GC_HEAP_INIT_SLOTS 10000
260#endif
261#ifndef GC_HEAP_FREE_SLOTS
262#define GC_HEAP_FREE_SLOTS 4096
263#endif
264#ifndef GC_HEAP_GROWTH_FACTOR
265#define GC_HEAP_GROWTH_FACTOR 1.8
266#endif
267#ifndef GC_HEAP_GROWTH_MAX_SLOTS
268#define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
269#endif
270#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
271#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
272#endif
273
274#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
275#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
276#endif
277#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
278#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
279#endif
280#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
281#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
282#endif
283
284#ifndef GC_MALLOC_LIMIT_MIN
285#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
286#endif
287#ifndef GC_MALLOC_LIMIT_MAX
288#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
289#endif
290#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
291#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
292#endif
293
294#ifndef GC_OLDMALLOC_LIMIT_MIN
295#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
296#endif
297#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
298#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
299#endif
300#ifndef GC_OLDMALLOC_LIMIT_MAX
301#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
302#endif
303
304#ifndef PRINT_MEASURE_LINE
305#define PRINT_MEASURE_LINE 0
306#endif
307#ifndef PRINT_ENTER_EXIT_TICK
308#define PRINT_ENTER_EXIT_TICK 0
309#endif
310#ifndef PRINT_ROOT_TICKS
311#define PRINT_ROOT_TICKS 0
312#endif
313
314#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
315#define TICK_TYPE 1
316
317typedef struct {
322
327
331
335
338
339static ruby_gc_params_t gc_params = {
344
349
353
357
358 FALSE,
359};
360
361/* GC_DEBUG:
362 * enable to embed GC debugging information.
363 */
364#ifndef GC_DEBUG
365#define GC_DEBUG 0
366#endif
367
368#if USE_RGENGC
369/* RGENGC_DEBUG:
370 * 1: basic information
371 * 2: remember set operation
372 * 3: mark
373 * 4:
374 * 5: sweep
375 */
376#ifndef RGENGC_DEBUG
377#ifdef RUBY_DEVEL
378#define RGENGC_DEBUG -1
379#else
380#define RGENGC_DEBUG 0
381#endif
382#endif
383#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
384# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
385#else
386# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
387#endif
389
390/* RGENGC_CHECK_MODE
391 * 0: disable all assertions
392 * 1: enable assertions (to debug RGenGC)
393 * 2: enable internal consistency check at each GC (for debugging)
394 * 3: enable internal consistency check at each GC steps (for debugging)
395 * 4: enable liveness check
396 * 5: show all references
397 */
398#ifndef RGENGC_CHECK_MODE
399#define RGENGC_CHECK_MODE 0
400#endif
401
402// Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
403#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
404
405/* RGENGC_OLD_NEWOBJ_CHECK
406 * 0: disable all assertions
407 * >0: make a OLD object when new object creation.
408 *
409 * Make one OLD object per RGENGC_OLD_NEWOBJ_CHECK WB protected objects creation.
410 */
411#ifndef RGENGC_OLD_NEWOBJ_CHECK
412#define RGENGC_OLD_NEWOBJ_CHECK 0
413#endif
414
415/* RGENGC_PROFILE
416 * 0: disable RGenGC profiling
417 * 1: enable profiling for basic information
418 * 2: enable profiling for each types
419 */
420#ifndef RGENGC_PROFILE
421#define RGENGC_PROFILE 0
422#endif
423
424/* RGENGC_ESTIMATE_OLDMALLOC
425 * Enable/disable to estimate increase size of malloc'ed size by old objects.
426 * If estimation exceeds threshold, then will invoke full GC.
427 * 0: disable estimation.
428 * 1: enable estimation.
429 */
430#ifndef RGENGC_ESTIMATE_OLDMALLOC
431#define RGENGC_ESTIMATE_OLDMALLOC 1
432#endif
433
434/* RGENGC_FORCE_MAJOR_GC
435 * Force major/full GC if this macro is not 0.
436 */
437#ifndef RGENGC_FORCE_MAJOR_GC
438#define RGENGC_FORCE_MAJOR_GC 0
439#endif
440
441#else /* USE_RGENGC */
442
443#ifdef RGENGC_DEBUG
444#undef RGENGC_DEBUG
445#endif
446#define RGENGC_DEBUG 0
447#ifdef RGENGC_CHECK_MODE
448#undef RGENGC_CHECK_MODE
449#endif
450#define RGENGC_CHECK_MODE 0
451#define RGENGC_PROFILE 0
452#define RGENGC_ESTIMATE_OLDMALLOC 0
453#define RGENGC_FORCE_MAJOR_GC 0
454
455#endif /* USE_RGENGC */
456
457#ifndef GC_PROFILE_MORE_DETAIL
458#define GC_PROFILE_MORE_DETAIL 0
459#endif
460#ifndef GC_PROFILE_DETAIL_MEMORY
461#define GC_PROFILE_DETAIL_MEMORY 0
462#endif
463#ifndef GC_ENABLE_INCREMENTAL_MARK
464#define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
465#endif
466#ifndef GC_ENABLE_LAZY_SWEEP
467#define GC_ENABLE_LAZY_SWEEP 1
468#endif
469#ifndef CALC_EXACT_MALLOC_SIZE
470#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
471#endif
472#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
473#ifndef MALLOC_ALLOCATED_SIZE
474#define MALLOC_ALLOCATED_SIZE 0
475#endif
476#else
477#define MALLOC_ALLOCATED_SIZE 0
478#endif
479#ifndef MALLOC_ALLOCATED_SIZE_CHECK
480#define MALLOC_ALLOCATED_SIZE_CHECK 0
481#endif
482
483#ifndef GC_DEBUG_STRESS_TO_CLASS
484#define GC_DEBUG_STRESS_TO_CLASS 0
485#endif
486
487#ifndef RGENGC_OBJ_INFO
488#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
489#endif
490
491typedef enum {
493 /* major reason */
498#if RGENGC_ESTIMATE_OLDMALLOC
500#endif
502
503 /* gc reason */
509
510 /* others */
515
520
521typedef struct gc_profile_record {
522 int flags;
523
524 double gc_time;
526
530
531#if GC_PROFILE_MORE_DETAIL
532 double gc_mark_time;
533 double gc_sweep_time;
534
535 size_t heap_use_pages;
536 size_t heap_live_objects;
537 size_t heap_free_objects;
538
539 size_t allocate_increase;
540 size_t allocate_limit;
541
542 double prepare_time;
543 size_t removing_objects;
544 size_t empty_objects;
545#if GC_PROFILE_DETAIL_MEMORY
546 long maxrss;
547 long minflt;
548 long majflt;
549#endif
550#endif
551#if MALLOC_ALLOCATED_SIZE
552 size_t allocated_size;
553#endif
554
555#if RGENGC_PROFILE > 0
556 size_t old_objects;
557 size_t remembered_normal_objects;
558 size_t remembered_shady_objects;
559#endif
561
562#if defined(_MSC_VER) || defined(__CYGWIN__)
563#pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
564#endif
565
566typedef struct RVALUE {
567 union {
568 struct {
569 VALUE flags; /* always 0 for freed obj */
570 struct RVALUE *next;
572 struct RMoved moved;
573 struct RBasic basic;
575 struct RClass klass;
578 struct RArray array;
580 struct RHash hash;
581 struct RData data;
585 struct RFile file;
586 struct RMatch match;
589 union {
591 struct vm_svar svar;
594 struct MEMO memo;
601 struct {
602 struct RBasic basic;
607 } as;
608#if GC_DEBUG
609 const char *file;
610 int line;
611#endif
613
614#if defined(_MSC_VER) || defined(__CYGWIN__)
615#pragma pack(pop)
616#endif
617
619enum {
620 BITS_SIZE = sizeof(bits_t),
623#define popcount_bits rb_popcount_intptr
624
627};
628
631 /* char gap[]; */
632 /* RVALUE values[]; */
633};
634
635struct gc_list {
637 struct gc_list *next;
638};
639
640#define STACK_CHUNK_SIZE 500
641
642typedef struct stack_chunk {
646
647typedef struct mark_stack {
650 int index;
651 int limit;
655
656typedef struct rb_heap_struct {
658
662 struct heap_page *sweeping_page; /* iterator for .pages */
663#if GC_ENABLE_INCREMENTAL_MARK
665#endif
666 size_t total_pages; /* total page count in a heap */
667 size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
669
675
676typedef struct rb_objspace {
677 struct {
678 size_t limit;
679 size_t increase;
680#if MALLOC_ALLOCATED_SIZE
681 size_t allocated_size;
682 size_t allocations;
683#endif
685
686 struct {
687 unsigned int mode : 2;
688 unsigned int immediate_sweep : 1;
689 unsigned int dont_gc : 1;
690 unsigned int dont_incremental : 1;
691 unsigned int during_gc : 1;
692 unsigned int during_compacting : 1;
693 unsigned int gc_stressful: 1;
694 unsigned int has_hook: 1;
695#if USE_RGENGC
696 unsigned int during_minor_gc : 1;
697#endif
698#if GC_ENABLE_INCREMENTAL_MARK
700#endif
702
706
708 rb_heap_t tomb_heap; /* heap for zombies and ghosts */
709
710 struct {
713
715 void *data;
718
721
722 struct {
729
730 /* final */
734
736
737 struct {
738 int run;
743 size_t size;
744
745#if GC_PROFILE_MORE_DETAIL
746 double prepare_time;
747#endif
749
750#if USE_RGENGC
754#if RGENGC_PROFILE > 0
755 size_t total_generated_normal_object_count;
756 size_t total_generated_shady_object_count;
757 size_t total_shade_operation_count;
758 size_t total_promoted_count;
759 size_t total_remembered_normal_object_count;
760 size_t total_remembered_shady_object_count;
761
762#if RGENGC_PROFILE >= 2
763 size_t generated_normal_object_count_types[RUBY_T_MASK];
764 size_t generated_shady_object_count_types[RUBY_T_MASK];
765 size_t shade_operation_count_types[RUBY_T_MASK];
766 size_t promoted_types[RUBY_T_MASK];
767 size_t remembered_normal_object_count_types[RUBY_T_MASK];
768 size_t remembered_shady_object_count_types[RUBY_T_MASK];
769#endif
770#endif /* RGENGC_PROFILE */
771#endif /* USE_RGENGC */
772
773 /* temporary profiling space */
777
778 /* basic statistics */
779 size_t count;
785
787
788#if USE_RGENGC
789 struct {
797
798#if RGENGC_ESTIMATE_OLDMALLOC
801#endif
802
803#if RGENGC_CHECK_MODE >= 2
804 struct st_table *allrefs_table;
805 size_t error_count;
806#endif
808
809 struct {
813
814#if GC_ENABLE_INCREMENTAL_MARK
815 struct {
819#endif
820#endif /* USE_RGENGC */
821
824
825#if GC_DEBUG_STRESS_TO_CLASS
827#endif
829
830
831/* default tiny heap size: 16KB */
832#define HEAP_PAGE_ALIGN_LOG 14
833#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
834enum {
837 REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
839 HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)),
842 HEAP_PAGE_BITMAP_PLANES = USE_RGENGC ? 4 : 1 /* RGENGC: mark, unprotected, uncollectible, marking */
844
845struct heap_page {
850 struct {
851 unsigned int before_sweep : 1;
852 unsigned int has_remembered_objects : 1;
854 unsigned int in_tomb : 1;
856
861
862#if USE_RGENGC
864#endif
865 /* the following three bitmaps are cleared at the beginning of full GC */
867#if USE_RGENGC
870#endif
871
872 /* If set, the object is not movable */
874};
875
876#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
877#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
878#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
879
880#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
881#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
882#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
883#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
884
885/* Bitmap Operations */
886#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
887#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
888#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
889
890/* getting bitmap */
891#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
892#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
893#if USE_RGENGC
894#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
895#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
896#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
897#endif
898
899/* Aliases */
900#define rb_objspace (*rb_objspace_of(GET_VM()))
901#define rb_objspace_of(vm) ((vm)->objspace)
902
903#define ruby_initial_gc_stress gc_params.gc_stress
904
906
907#define malloc_limit objspace->malloc_params.limit
908#define malloc_increase objspace->malloc_params.increase
909#define malloc_allocated_size objspace->malloc_params.allocated_size
910#define heap_pages_sorted objspace->heap_pages.sorted
911#define heap_allocated_pages objspace->heap_pages.allocated_pages
912#define heap_pages_sorted_length objspace->heap_pages.sorted_length
913#define heap_pages_lomem objspace->heap_pages.range[0]
914#define heap_pages_himem objspace->heap_pages.range[1]
915#define heap_allocatable_pages objspace->heap_pages.allocatable_pages
916#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
917#define heap_pages_final_slots objspace->heap_pages.final_slots
918#define heap_pages_deferred_final objspace->heap_pages.deferred_final
919#define heap_eden (&objspace->eden_heap)
920#define heap_tomb (&objspace->tomb_heap)
921#define dont_gc objspace->flags.dont_gc
922#define during_gc objspace->flags.during_gc
923#define finalizing objspace->atomic_flags.finalizing
924#define finalizer_table objspace->finalizer_table
925#define global_list objspace->global_list
926#define ruby_gc_stressful objspace->flags.gc_stressful
927#define ruby_gc_stress_mode objspace->gc_stress_mode
928#if GC_DEBUG_STRESS_TO_CLASS
929#define stress_to_class objspace->stress_to_class
930#else
931#define stress_to_class 0
932#endif
933
934static inline enum gc_mode
935gc_mode_verify(enum gc_mode mode)
936{
937#if RGENGC_CHECK_MODE > 0
938 switch (mode) {
939 case gc_mode_none:
940 case gc_mode_marking:
941 case gc_mode_sweeping:
942 break;
943 default:
944 rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
945 }
946#endif
947 return mode;
948}
949
950#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
951#define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
952
953#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
954#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
955#if USE_RGENGC
956#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
957#else
958#define is_full_marking(objspace) TRUE
959#endif
960#if GC_ENABLE_INCREMENTAL_MARK
961#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
962#else
963#define is_incremental_marking(objspace) FALSE
964#endif
965#if GC_ENABLE_INCREMENTAL_MARK
966#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
967#else
968#define will_be_incremental_marking(objspace) FALSE
969#endif
970#define has_sweeping_pages(heap) ((heap)->sweeping_page != 0)
971#define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
972
973#if SIZEOF_LONG == SIZEOF_VOIDP
974# define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
975# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
976#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
977# define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
978# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
979 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
980#else
981# error not supported
982#endif
983
984#define RANY(o) ((RVALUE*)(o))
985
986struct RZombie {
987 struct RBasic basic;
989 void (*dfree)(void *);
990 void *data;
991};
992
993#define RZOMBIE(o) ((struct RZombie *)(o))
994
995#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
996
997#if RUBY_MARK_FREE_DEBUG
998int ruby_gc_debug_indent = 0;
999#endif
1002
1003void rb_iseq_mark(const rb_iseq_t *iseq);
1005void rb_iseq_free(const rb_iseq_t *iseq);
1006size_t rb_iseq_memsize(const rb_iseq_t *iseq);
1007void rb_vm_update_references(void *ptr);
1008
1010
1011static VALUE define_final0(VALUE obj, VALUE block);
1012
1013NORETURN(static void negative_size_allocation_error(const char *));
1014
1015static void init_mark_stack(mark_stack_t *stack);
1016
1017static int ready_to_gc(rb_objspace_t *objspace);
1018
1019static int garbage_collect(rb_objspace_t *, int reason);
1020
1021static int gc_start(rb_objspace_t *objspace, int reason);
1022static void gc_rest(rb_objspace_t *objspace);
1023static inline void gc_enter(rb_objspace_t *objspace, const char *event);
1024static inline void gc_exit(rb_objspace_t *objspace, const char *event);
1025
1026static void gc_marks(rb_objspace_t *objspace, int full_mark);
1027static void gc_marks_start(rb_objspace_t *objspace, int full);
1028static int gc_marks_finish(rb_objspace_t *objspace);
1029static void gc_marks_rest(rb_objspace_t *objspace);
1030static void gc_marks_step(rb_objspace_t *objspace, int slots);
1031static void gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1032
1033static void gc_sweep(rb_objspace_t *objspace);
1034static void gc_sweep_start(rb_objspace_t *objspace);
1035static void gc_sweep_finish(rb_objspace_t *objspace);
1036static int gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap);
1037static void gc_sweep_rest(rb_objspace_t *objspace);
1038static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1039
1040static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1041static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1042static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1043static void gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr);
1044NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr));
1045static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
1046
1047static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1048static int gc_mark_stacked_objects_all(rb_objspace_t *);
1049static void gc_grey(rb_objspace_t *objspace, VALUE ptr);
1050
1051static inline int gc_mark_set(rb_objspace_t *objspace, VALUE obj);
1052NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr));
1053
1054static void push_mark_stack(mark_stack_t *, VALUE);
1055static int pop_mark_stack(mark_stack_t *, VALUE *);
1056static size_t mark_stack_size(mark_stack_t *stack);
1057static void shrink_stack_chunk_cache(mark_stack_t *stack);
1058
1059static size_t obj_memsize_of(VALUE obj, int use_all_types);
1060static void gc_verify_internal_consistency(rb_objspace_t *objspace);
1061static int gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj);
1062static int gc_verify_heap_pages(rb_objspace_t *objspace);
1063
1064static void gc_stress_set(rb_objspace_t *objspace, VALUE flag);
1065static VALUE gc_disable_no_rest(rb_objspace_t *);
1066
1067static double getrusage_time(void);
1068static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, int reason);
1069static inline void gc_prof_timer_start(rb_objspace_t *);
1070static inline void gc_prof_timer_stop(rb_objspace_t *);
1071static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1072static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1073static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1074static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1075static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1076static inline void gc_prof_set_heap_info(rb_objspace_t *);
1077
1078#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1079 if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1080 *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1081 } \
1082} while (0)
1083
1084#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1085
1086#define gc_prof_record(objspace) (objspace)->profile.current_record
1087#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1088
1089#ifdef HAVE_VA_ARGS_MACRO
1090# define gc_report(level, objspace, ...) \
1091 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1092#else
1093# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1094#endif
1095PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1096static const char *obj_info(VALUE obj);
1097
1098#define PUSH_MARK_FUNC_DATA(v) do { \
1099 struct mark_func_data_struct *prev_mark_func_data = objspace->mark_func_data; \
1100 objspace->mark_func_data = (v);
1101
1102#define POP_MARK_FUNC_DATA() objspace->mark_func_data = prev_mark_func_data;} while (0)
1103
1104/*
1105 * 1 - TSC (H/W Time Stamp Counter)
1106 * 2 - getrusage
1107 */
1108#ifndef TICK_TYPE
1109#define TICK_TYPE 1
1110#endif
1111
1112#if USE_TICK_T
1113
1114#if TICK_TYPE == 1
1115/* the following code is only for internal tuning. */
1116
1117/* Source code to use RDTSC is quoted and modified from
1118 * http://www.mcs.anl.gov/~kazutomo/rdtsc.html
1119 * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1120 */
1121
1122#if defined(__GNUC__) && defined(__i386__)
1123typedef unsigned long long tick_t;
1124#define PRItick "llu"
1125static inline tick_t
1126tick(void)
1127{
1128 unsigned long long int x;
1129 __asm__ __volatile__ ("rdtsc" : "=A" (x));
1130 return x;
1131}
1132
1133#elif defined(__GNUC__) && defined(__x86_64__)
1134typedef unsigned long long tick_t;
1135#define PRItick "llu"
1136
1137static __inline__ tick_t
1138tick(void)
1139{
1140 unsigned long hi, lo;
1141 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1142 return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1143}
1144
1145#elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1146typedef unsigned long long tick_t;
1147#define PRItick "llu"
1148
1149static __inline__ tick_t
1150tick(void)
1151{
1152 unsigned long long val = __builtin_ppc_get_timebase();
1153 return val;
1154}
1155
1156#elif defined(__aarch64__) && defined(__GNUC__)
1157typedef unsigned long tick_t;
1158#define PRItick "lu"
1159
1160static __inline__ tick_t
1161tick(void)
1162{
1163 unsigned long val;
1164 __asm__ __volatile__ ("mrs %0, cntvct_el0", : "=r" (val));
1165 return val;
1166}
1167
1168
1169#elif defined(_WIN32) && defined(_MSC_VER)
1170#include <intrin.h>
1171typedef unsigned __int64 tick_t;
1172#define PRItick "llu"
1173
1174static inline tick_t
1175tick(void)
1176{
1177 return __rdtsc();
1178}
1179
1180#else /* use clock */
1181typedef clock_t tick_t;
1182#define PRItick "llu"
1183
1184static inline tick_t
1185tick(void)
1186{
1187 return clock();
1188}
1189#endif /* TSC */
1190
1191#elif TICK_TYPE == 2
1192typedef double tick_t;
1193#define PRItick "4.9f"
1194
1195static inline tick_t
1196tick(void)
1197{
1198 return getrusage_time();
1199}
1200#else /* TICK_TYPE */
1201#error "choose tick type"
1202#endif /* TICK_TYPE */
1203
1204#define MEASURE_LINE(expr) do { \
1205 volatile tick_t start_time = tick(); \
1206 volatile tick_t end_time; \
1207 expr; \
1208 end_time = tick(); \
1209 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1210} while (0)
1211
1212#else /* USE_TICK_T */
1213#define MEASURE_LINE(expr) expr
1214#endif /* USE_TICK_T */
1215
1216#define FL_CHECK2(name, x, pred) \
1217 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1218 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1219#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1220#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1221#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1222
1223#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1224#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1225#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1226
1227#if USE_RGENGC
1228#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1229#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1230#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1231
1232#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1233#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1234#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1235
1236#define RVALUE_OLD_AGE 3
1237#define RVALUE_AGE_SHIFT 5 /* FL_PROMOTED0 bit */
1238
1239static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
1240static int rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj);
1241static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1242static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1243static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1244
1245static inline int
1246RVALUE_FLAGS_AGE(VALUE flags)
1247{
1248 return (int)((flags & (FL_PROMOTED0 | FL_PROMOTED1)) >> RVALUE_AGE_SHIFT);
1249}
1250
1251#endif /* USE_RGENGC */
1252
1253static int
1254check_rvalue_consistency_force(const VALUE obj, int terminate)
1255{
1256 rb_objspace_t *objspace = &rb_objspace;
1257 int err = 0;
1258
1259 if (SPECIAL_CONST_P(obj)) {
1260 fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1261 err++;
1262 }
1263 else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1264 /* check if it is in tomb_pages */
1265 struct heap_page *page = NULL;
1266 list_for_each(&heap_tomb->pages, page, page_node) {
1267 if (&page->start[0] <= (RVALUE *)obj &&
1268 (RVALUE *)obj < &page->start[page->total_slots]) {
1269 fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1270 (void *)obj, (void *)page);
1271 err++;
1272 goto skip;
1273 }
1274 }
1275 fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1276 err++;
1277 skip:
1278 ;
1279 }
1280 else {
1281 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1282 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1283 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1284 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1285 const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1286
1287 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1288 fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1289 err++;
1290 }
1291 if (BUILTIN_TYPE(obj) == T_NONE) {
1292 fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1293 err++;
1294 }
1295 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1296 fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1297 err++;
1298 }
1299
1300 obj_memsize_of((VALUE)obj, FALSE);
1301
1302 /* check generation
1303 *
1304 * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1305 */
1306 if (age > 0 && wb_unprotected_bit) {
1307 fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1308 err++;
1309 }
1310
1311 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1312 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1313 err++;
1314 }
1315
1316 if (!is_full_marking(objspace)) {
1317 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1318 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1319 obj_info(obj), age);
1320 err++;
1321 }
1322 if (remembered_bit && age != RVALUE_OLD_AGE) {
1323 fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1324 obj_info(obj), age);
1325 err++;
1326 }
1327 }
1328
1329 /*
1330 * check coloring
1331 *
1332 * marking:false marking:true
1333 * marked:false white *invalid*
1334 * marked:true black grey
1335 */
1336 if (is_incremental_marking(objspace) && marking_bit) {
1337 if (!is_marking(objspace) && !mark_bit) {
1338 fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1339 err++;
1340 }
1341 }
1342 }
1343
1344 if (err > 0 && terminate) {
1345 rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1346 }
1347
1348 return err;
1349}
1350
1351#if RGENGC_CHECK_MODE == 0
1352static inline VALUE
1353check_rvalue_consistency(const VALUE obj)
1354{
1355 return obj;
1356}
1357#else
1358static VALUE
1359check_rvalue_consistency(const VALUE obj)
1360{
1361 check_rvalue_consistency_force(obj, TRUE);
1362 return obj;
1363}
1364#endif
1365
1366static inline int
1367gc_object_moved_p(rb_objspace_t * objspace, VALUE obj)
1368{
1369 if (RB_SPECIAL_CONST_P(obj)) {
1370 return FALSE;
1371 }
1372 else {
1373 void *poisoned = asan_poisoned_object_p(obj);
1374 asan_unpoison_object(obj, false);
1375
1376 int ret = BUILTIN_TYPE(obj) == T_MOVED;
1377 /* Re-poison slot if it's not the one we want */
1378 if (poisoned) {
1380 asan_poison_object(obj);
1381 }
1382 return ret;
1383 }
1384}
1385
1386static inline int
1387RVALUE_MARKED(VALUE obj)
1388{
1389 check_rvalue_consistency(obj);
1390 return RVALUE_MARK_BITMAP(obj) != 0;
1391}
1392
1393static inline int
1394RVALUE_PINNED(VALUE obj)
1395{
1396 check_rvalue_consistency(obj);
1397 return RVALUE_PIN_BITMAP(obj) != 0;
1398}
1399
1400#if USE_RGENGC
1401static inline int
1402RVALUE_WB_UNPROTECTED(VALUE obj)
1403{
1404 check_rvalue_consistency(obj);
1405 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1406}
1407
1408static inline int
1409RVALUE_MARKING(VALUE obj)
1410{
1411 check_rvalue_consistency(obj);
1412 return RVALUE_MARKING_BITMAP(obj) != 0;
1413}
1414
1415static inline int
1416RVALUE_REMEMBERED(VALUE obj)
1417{
1418 check_rvalue_consistency(obj);
1419 return RVALUE_MARKING_BITMAP(obj) != 0;
1420}
1421
1422static inline int
1423RVALUE_UNCOLLECTIBLE(VALUE obj)
1424{
1425 check_rvalue_consistency(obj);
1426 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1427}
1428
1429static inline int
1430RVALUE_OLD_P_RAW(VALUE obj)
1431{
1432 const VALUE promoted = FL_PROMOTED0 | FL_PROMOTED1;
1433 return (RBASIC(obj)->flags & promoted) == promoted;
1434}
1435
1436static inline int
1437RVALUE_OLD_P(VALUE obj)
1438{
1439 check_rvalue_consistency(obj);
1440 return RVALUE_OLD_P_RAW(obj);
1441}
1442
1443#if RGENGC_CHECK_MODE || GC_DEBUG
1444static inline int
1445RVALUE_AGE(VALUE obj)
1446{
1447 check_rvalue_consistency(obj);
1448 return RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1449}
1450#endif
1451
1452static inline void
1453RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1454{
1456 objspace->rgengc.old_objects++;
1458
1459#if RGENGC_PROFILE >= 2
1460 objspace->profile.total_promoted_count++;
1461 objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1462#endif
1463}
1464
1465static inline void
1466RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1467{
1468 RB_DEBUG_COUNTER_INC(obj_promote);
1469 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1470}
1471
1472static inline VALUE
1473RVALUE_FLAGS_AGE_SET(VALUE flags, int age)
1474{
1476 flags |= (age << RVALUE_AGE_SHIFT);
1477 return flags;
1478}
1479
1480/* set age to age+1 */
1481static inline void
1482RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1483{
1484 VALUE flags = RBASIC(obj)->flags;
1485 int age = RVALUE_FLAGS_AGE(flags);
1486
1487 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1488 rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1489 }
1490
1491 age++;
1492 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1493
1494 if (age == RVALUE_OLD_AGE) {
1495 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1496 }
1497 check_rvalue_consistency(obj);
1498}
1499
1500/* set age to RVALUE_OLD_AGE */
1501static inline void
1502RVALUE_AGE_SET_OLD(rb_objspace_t *objspace, VALUE obj)
1503{
1504 check_rvalue_consistency(obj);
1505 GC_ASSERT(!RVALUE_OLD_P(obj));
1506
1507 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE);
1508 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1509
1510 check_rvalue_consistency(obj);
1511}
1512
1513/* set age to RVALUE_OLD_AGE - 1 */
1514static inline void
1515RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1516{
1517 check_rvalue_consistency(obj);
1518 GC_ASSERT(!RVALUE_OLD_P(obj));
1519
1520 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1521
1522 check_rvalue_consistency(obj);
1523}
1524
1525static inline void
1526RVALUE_DEMOTE_RAW(rb_objspace_t *objspace, VALUE obj)
1527{
1528 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1530}
1531
1532static inline void
1533RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1534{
1535 check_rvalue_consistency(obj);
1536 GC_ASSERT(RVALUE_OLD_P(obj));
1537
1538 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1540 }
1541
1542 RVALUE_DEMOTE_RAW(objspace, obj);
1543
1544 if (RVALUE_MARKED(obj)) {
1545 objspace->rgengc.old_objects--;
1546 }
1547
1548 check_rvalue_consistency(obj);
1549}
1550
1551static inline void
1552RVALUE_AGE_RESET_RAW(VALUE obj)
1553{
1554 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1555}
1556
1557static inline void
1558RVALUE_AGE_RESET(VALUE obj)
1559{
1560 check_rvalue_consistency(obj);
1561 GC_ASSERT(!RVALUE_OLD_P(obj));
1562
1563 RVALUE_AGE_RESET_RAW(obj);
1564 check_rvalue_consistency(obj);
1565}
1566
1567static inline int
1568RVALUE_BLACK_P(VALUE obj)
1569{
1570 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1571}
1572
1573#if 0
1574static inline int
1575RVALUE_GREY_P(VALUE obj)
1576{
1577 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1578}
1579#endif
1580
1581static inline int
1582RVALUE_WHITE_P(VALUE obj)
1583{
1584 return RVALUE_MARKED(obj) == FALSE;
1585}
1586
1587#endif /* USE_RGENGC */
1588
1589/*
1590 --------------------------- ObjectSpace -----------------------------
1591*/
1592
1593static inline void *
1594calloc1(size_t n)
1595{
1596 return calloc(1, n);
1597}
1598
1601{
1602 rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
1603 malloc_limit = gc_params.malloc_limit_min;
1604 list_head_init(&objspace->eden_heap.pages);
1605 list_head_init(&objspace->tomb_heap.pages);
1606 dont_gc = TRUE;
1607
1608 return objspace;
1609}
1610
1611static void free_stack_chunks(mark_stack_t *);
1612static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1613
1614void
1616{
1618 rb_bug("lazy sweeping underway when freeing object space");
1619
1620 if (objspace->profile.records) {
1621 free(objspace->profile.records);
1622 objspace->profile.records = 0;
1623 }
1624
1625 if (global_list) {
1626 struct gc_list *list, *next;
1627 for (list = global_list; list; list = next) {
1628 next = list->next;
1629 xfree(list);
1630 }
1631 }
1632 if (heap_pages_sorted) {
1633 size_t i;
1634 for (i = 0; i < heap_allocated_pages; ++i) {
1635 heap_page_free(objspace, heap_pages_sorted[i]);
1636 }
1640 heap_pages_lomem = 0;
1641 heap_pages_himem = 0;
1642
1643 objspace->eden_heap.total_pages = 0;
1644 objspace->eden_heap.total_slots = 0;
1645 }
1646 st_free_table(objspace->id_to_obj_tbl);
1647 st_free_table(objspace->obj_to_id_tbl);
1648 free_stack_chunks(&objspace->mark_stack);
1649 free(objspace);
1650}
1651
1652static void
1653heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
1654{
1655 struct heap_page **sorted;
1656 size_t size = size_mul_or_raise(next_length, sizeof(struct heap_page *), rb_eRuntimeError);
1657
1658 gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %d, size: %d\n", (int)next_length, (int)size);
1659
1660 if (heap_pages_sorted_length > 0) {
1661 sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
1662 if (sorted) heap_pages_sorted = sorted;
1663 }
1664 else {
1665 sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
1666 }
1667
1668 if (sorted == 0) {
1669 rb_memerror();
1670 }
1671
1672 heap_pages_sorted_length = next_length;
1673}
1674
1675static void
1676heap_pages_expand_sorted(rb_objspace_t *objspace)
1677{
1678 /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
1679 * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
1680 * however, if there are pages which do not have empty slots, then try to create new pages
1681 * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
1682 */
1683 size_t next_length = heap_allocatable_pages;
1684 next_length += heap_eden->total_pages;
1685 next_length += heap_tomb->total_pages;
1686
1687 if (next_length > heap_pages_sorted_length) {
1688 heap_pages_expand_sorted_to(objspace, next_length);
1689 }
1690
1693}
1694
1695static void
1696heap_allocatable_pages_set(rb_objspace_t *objspace, size_t s)
1697{
1699 heap_pages_expand_sorted(objspace);
1700}
1701
1702
1703static inline void
1704heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1705{
1706 RVALUE *p = (RVALUE *)obj;
1707 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1708
1709 p->as.free.flags = 0;
1710 p->as.free.next = page->freelist;
1711 page->freelist = p;
1712 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1713
1714 if (RGENGC_CHECK_MODE &&
1715 /* obj should belong to page */
1716 !(&page->start[0] <= (RVALUE *)obj &&
1717 (RVALUE *)obj < &page->start[page->total_slots] &&
1718 obj % sizeof(RVALUE) == 0)) {
1719 rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
1720 }
1721
1722 asan_poison_object(obj);
1723
1724 gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
1725}
1726
1727static inline void
1728heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
1729{
1730 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1731 GC_ASSERT(page->free_slots != 0);
1732 if (page->freelist) {
1733 page->free_next = heap->free_pages;
1734 heap->free_pages = page;
1735 }
1736 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1737}
1738
1739#if GC_ENABLE_INCREMENTAL_MARK
1740static inline int
1741heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1742{
1743 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1744 if (page->freelist) {
1745 page->free_next = heap->pooled_pages;
1746 heap->pooled_pages = page;
1747 objspace->rincgc.pooled_slots += page->free_slots;
1748 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1749
1750 return TRUE;
1751 }
1752 else {
1753 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1754
1755 return FALSE;
1756 }
1757}
1758#endif
1759
1760static void
1761heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1762{
1763 list_del(&page->page_node);
1764 heap->total_pages--;
1765 heap->total_slots -= page->total_slots;
1766}
1767
1768static void rb_aligned_free(void *ptr);
1769
1770static void
1771heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
1772{
1774 objspace->profile.total_freed_pages++;
1775 rb_aligned_free(GET_PAGE_BODY(page->start));
1776 free(page);
1777}
1778
1779static void
1780heap_pages_free_unused_pages(rb_objspace_t *objspace)
1781{
1782 size_t i, j;
1783
1784 if (!list_empty(&heap_tomb->pages)) {
1785 for (i = j = 1; j < heap_allocated_pages; i++) {
1786 struct heap_page *page = heap_pages_sorted[i];
1787
1788 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
1789 heap_unlink_page(objspace, heap_tomb, page);
1790 heap_page_free(objspace, page);
1791 }
1792 else {
1793 if (i != j) {
1794 heap_pages_sorted[j] = page;
1795 }
1796 j++;
1797 }
1798 }
1800 }
1801}
1802
1803static struct heap_page *
1804heap_page_allocate(rb_objspace_t *objspace)
1805{
1806 RVALUE *start, *end, *p;
1807 struct heap_page *page;
1808 struct heap_page_body *page_body = 0;
1809 size_t hi, lo, mid;
1810 int limit = HEAP_PAGE_OBJ_LIMIT;
1811
1812 /* assign heap_page body (contains heap_page_header and RVALUEs) */
1814 if (page_body == 0) {
1815 rb_memerror();
1816 }
1817
1818 /* assign heap_page entry */
1819 page = calloc1(sizeof(struct heap_page));
1820 if (page == 0) {
1821 rb_aligned_free(page_body);
1822 rb_memerror();
1823 }
1824
1825 /* adjust obj_limit (object number available in this page) */
1826 start = (RVALUE*)((VALUE)page_body + sizeof(struct heap_page_header));
1827 if ((VALUE)start % sizeof(RVALUE) != 0) {
1828 int delta = (int)(sizeof(RVALUE) - ((VALUE)start % sizeof(RVALUE)));
1829 start = (RVALUE*)((VALUE)start + delta);
1830 limit = (HEAP_PAGE_SIZE - (int)((VALUE)start - (VALUE)page_body))/(int)sizeof(RVALUE);
1831 }
1832 end = start + limit;
1833
1834 /* setup heap_pages_sorted */
1835 lo = 0;
1837 while (lo < hi) {
1838 struct heap_page *mid_page;
1839
1840 mid = (lo + hi) / 2;
1841 mid_page = heap_pages_sorted[mid];
1842 if (mid_page->start < start) {
1843 lo = mid + 1;
1844 }
1845 else if (mid_page->start > start) {
1846 hi = mid;
1847 }
1848 else {
1849 rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
1850 }
1851 }
1852
1853 if (hi < heap_allocated_pages) {
1855 }
1856
1857 heap_pages_sorted[hi] = page;
1858
1860
1862 GC_ASSERT(heap_eden->total_pages + heap_tomb->total_pages == heap_allocated_pages - 1);
1864
1865 objspace->profile.total_allocated_pages++;
1866
1868 rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")",
1870 }
1871
1873 if (heap_pages_himem < end) heap_pages_himem = end;
1874
1875 page->start = start;
1876 page->total_slots = limit;
1877 page_body->header.page = page;
1878
1879 for (p = start; p != end; p++) {
1880 gc_report(3, objspace, "assign_heap_page: %p is added to freelist\n", (void *)p);
1881 heap_page_add_freeobj(objspace, page, (VALUE)p);
1882 }
1883 page->free_slots = limit;
1884
1885 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1886 return page;
1887}
1888
1889static struct heap_page *
1890heap_page_resurrect(rb_objspace_t *objspace)
1891{
1892 struct heap_page *page = 0, *next;
1893
1894 list_for_each_safe(&heap_tomb->pages, page, next, page_node) {
1895 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1896 if (page->freelist != NULL) {
1897 heap_unlink_page(objspace, heap_tomb, page);
1898 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1899 return page;
1900 }
1901 }
1902
1903 return NULL;
1904}
1905
1906static struct heap_page *
1907heap_page_create(rb_objspace_t *objspace)
1908{
1909 struct heap_page *page;
1910 const char *method = "recycle";
1911
1913
1914 page = heap_page_resurrect(objspace);
1915
1916 if (page == NULL) {
1917 page = heap_page_allocate(objspace);
1918 method = "allocate";
1919 }
1920 if (0) fprintf(stderr, "heap_page_create: %s - %p, heap_allocated_pages: %d, heap_allocated_pages: %d, tomb->total_pages: %d\n",
1921 method, (void *)page, (int)heap_pages_sorted_length, (int)heap_allocated_pages, (int)heap_tomb->total_pages);
1922 return page;
1923}
1924
1925static void
1926heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1927{
1928 page->flags.in_tomb = (heap == heap_tomb);
1929 list_add(&heap->pages, &page->page_node);
1930 heap->total_pages++;
1931 heap->total_slots += page->total_slots;
1932}
1933
1934static void
1935heap_assign_page(rb_objspace_t *objspace, rb_heap_t *heap)
1936{
1937 struct heap_page *page = heap_page_create(objspace);
1938 heap_add_page(objspace, heap, page);
1939 heap_add_freepage(heap, page);
1940}
1941
1942static void
1943heap_add_pages(rb_objspace_t *objspace, rb_heap_t *heap, size_t add)
1944{
1945 size_t i;
1946
1947 heap_allocatable_pages_set(objspace, add);
1948
1949 for (i = 0; i < add; i++) {
1950 heap_assign_page(objspace, heap);
1951 }
1952
1954}
1955
1956static size_t
1957heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots)
1958{
1959 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
1961 size_t next_used;
1962
1963 if (goal_ratio == 0.0) {
1964 next_used = (size_t)(used * gc_params.growth_factor);
1965 }
1966 else {
1967 /* Find `f' where free_slots = f * total_slots * goal_ratio
1968 * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
1969 */
1970 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
1971
1972 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
1973 if (f < 1.0) f = 1.1;
1974
1975 next_used = (size_t)(f * used);
1976
1977 if (0) {
1979 "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
1980 " G(%1.2f), f(%1.2f),"
1981 " used(%8"PRIuSIZE") => next_used(%8"PRIuSIZE")\n",
1983 goal_ratio, f, used, next_used);
1984 }
1985 }
1986
1987 if (gc_params.growth_max_slots > 0) {
1988 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
1989 if (next_used > max_used) next_used = max_used;
1990 }
1991
1992 return next_used - used;
1993}
1994
1995static void
1996heap_set_increment(rb_objspace_t *objspace, size_t additional_pages)
1997{
1998 size_t used = heap_eden->total_pages;
1999 size_t next_used_limit = used + additional_pages;
2000
2001 if (next_used_limit == heap_allocated_pages) next_used_limit++;
2002
2003 heap_allocatable_pages_set(objspace, next_used_limit - used);
2004
2005 gc_report(1, objspace, "heap_set_increment: heap_allocatable_pages is %d\n", (int)heap_allocatable_pages);
2006}
2007
2008static int
2009heap_increment(rb_objspace_t *objspace, rb_heap_t *heap)
2010{
2011 if (heap_allocatable_pages > 0) {
2012 gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %d, heap_pages_inc: %d, heap->total_pages: %d\n",
2014
2017
2018 heap_assign_page(objspace, heap);
2019 return TRUE;
2020 }
2021 return FALSE;
2022}
2023
2024static void
2025heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
2026{
2027 GC_ASSERT(heap->free_pages == NULL);
2028
2029 if (is_lazy_sweeping(heap)) {
2030 gc_sweep_continue(objspace, heap);
2031 }
2032 else if (is_incremental_marking(objspace)) {
2033 gc_marks_continue(objspace, heap);
2034 }
2035
2036 if (heap->free_pages == NULL &&
2037 (will_be_incremental_marking(objspace) || heap_increment(objspace, heap) == FALSE) &&
2038 gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2039 rb_memerror();
2040 }
2041}
2042
2043static RVALUE *
2044heap_get_freeobj_from_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
2045{
2046 struct heap_page *page;
2047 RVALUE *p;
2048
2049 while (heap->free_pages == NULL) {
2050 heap_prepare(objspace, heap);
2051 }
2052 page = heap->free_pages;
2053 heap->free_pages = page->free_next;
2054 heap->using_page = page;
2055
2056 GC_ASSERT(page->free_slots != 0);
2057 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
2058 p = page->freelist;
2059 page->freelist = NULL;
2060 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
2061 page->free_slots = 0;
2062 asan_unpoison_object((VALUE)p, true);
2063 return p;
2064}
2065
2066static inline VALUE
2067heap_get_freeobj_head(rb_objspace_t *objspace, rb_heap_t *heap)
2068{
2069 RVALUE *p = heap->freelist;
2070 if (LIKELY(p != NULL)) {
2071 heap->freelist = p->as.free.next;
2072 }
2073 asan_unpoison_object((VALUE)p, true);
2074 return (VALUE)p;
2075}
2076
2077static inline VALUE
2078heap_get_freeobj(rb_objspace_t *objspace, rb_heap_t *heap)
2079{
2080 RVALUE *p = heap->freelist;
2081
2082 while (1) {
2083 if (LIKELY(p != NULL)) {
2084 asan_unpoison_object((VALUE)p, true);
2085 heap->freelist = p->as.free.next;
2086 return (VALUE)p;
2087 }
2088 else {
2089 p = heap_get_freeobj_from_next_freepage(objspace, heap);
2090 }
2091 }
2092}
2093
2094void
2096{
2097 rb_objspace_t *objspace = &rb_objspace;
2099 objspace->flags.has_hook = (objspace->hook_events != 0);
2100}
2101
2102static void
2103gc_event_hook_body(rb_execution_context_t *ec, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
2104{
2105 const VALUE *pc = ec->cfp->pc;
2106 if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
2107 /* increment PC because source line is calculated with PC-1 */
2108 ec->cfp->pc++;
2109 }
2110 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2111 ec->cfp->pc = pc;
2112}
2113
2114#define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2115#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2116
2117#define gc_event_hook(objspace, event, data) do { \
2118 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2119 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2120 } \
2121} while (0)
2122
2123static inline VALUE
2124newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2125{
2126#if !__has_feature(memory_sanitizer)
2129#endif
2130
2131 /* OBJSETUP */
2132 struct RVALUE buf = {
2133 .as = {
2134 .values = {
2135 .basic = {
2136 .flags = flags,
2137 .klass = klass,
2138 },
2139 .v1 = v1,
2140 .v2 = v2,
2141 .v3 = v3,
2142 },
2143 },
2144 };
2145 MEMCPY(RANY(obj), &buf, RVALUE, 1);
2146
2147#if RGENGC_CHECK_MODE
2148 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2149 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2150 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2151 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2152
2153 if (flags & FL_PROMOTED1) {
2154 if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2155 }
2156 else {
2157 if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2158 }
2159 if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
2160#endif
2161
2162#if USE_RGENGC
2163 if (UNLIKELY(wb_protected == FALSE)) {
2165 }
2166#endif
2167
2168#if RGENGC_PROFILE
2169 if (wb_protected) {
2170 objspace->profile.total_generated_normal_object_count++;
2171#if RGENGC_PROFILE >= 2
2172 objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2173#endif
2174 }
2175 else {
2176 objspace->profile.total_generated_shady_object_count++;
2177#if RGENGC_PROFILE >= 2
2178 objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2179#endif
2180 }
2181#endif
2182
2183#if GC_DEBUG
2184 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2185 GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2186#endif
2187
2188 objspace->total_allocated_objects++;
2189
2190 gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
2191
2192#if RGENGC_OLD_NEWOBJ_CHECK > 0
2193 {
2194 static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2195
2196 if (!is_incremental_marking(objspace) &&
2197 flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
2198 ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
2199 if (--newobj_cnt == 0) {
2200 newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2201
2202 gc_mark_set(objspace, obj);
2203 RVALUE_AGE_SET_OLD(objspace, obj);
2204
2206 }
2207 }
2208 }
2209#endif
2210 check_rvalue_consistency(obj);
2211 return obj;
2212}
2213
2214static inline VALUE
2215newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, int wb_protected)
2216{
2217 VALUE obj;
2218
2220 if (during_gc) {
2221 dont_gc = 1;
2222 during_gc = 0;
2223 rb_bug("object allocation during garbage collection phase");
2224 }
2225
2226 if (ruby_gc_stressful) {
2227 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2228 rb_memerror();
2229 }
2230 }
2231 }
2232
2233 obj = heap_get_freeobj(objspace, heap_eden);
2234 newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
2236 return obj;
2237}
2238
2239NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace));
2240NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace));
2241
2242static VALUE
2243newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)
2244{
2245 return newobj_slowpath(klass, flags, v1, v2, v3, objspace, TRUE);
2246}
2247
2248static VALUE
2249newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)
2250{
2251 return newobj_slowpath(klass, flags, v1, v2, v3, objspace, FALSE);
2252}
2253
2254static inline VALUE
2255newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected)
2256{
2257 rb_objspace_t *objspace = &rb_objspace;
2258 VALUE obj;
2259
2260 RB_DEBUG_COUNTER_INC(obj_newobj);
2261 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2262
2263#if GC_DEBUG_STRESS_TO_CLASS
2266 for (i = 0; i < cnt; ++i) {
2268 }
2269 }
2270#endif
2271 if (!(during_gc ||
2273 gc_event_hook_available_p(objspace)) &&
2274 (obj = heap_get_freeobj_head(objspace, heap_eden)) != Qfalse) {
2275 return newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
2276 }
2277 else {
2278 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2279
2280 return wb_protected ?
2281 newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) :
2282 newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace);
2283 }
2284}
2285
2286VALUE
2288{
2290 return newobj_of(klass, flags, 0, 0, 0, FALSE);
2291}
2292
2293VALUE
2295{
2297 return newobj_of(klass, flags, 0, 0, 0, TRUE);
2298}
2299
2300/* for compatibility */
2301
2302VALUE
2304{
2305 return newobj_of(0, T_NONE, 0, 0, 0, FALSE);
2306}
2307
2308VALUE
2310{
2311 return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED);
2312}
2313
2314#define UNEXPECTED_NODE(func) \
2315 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2316 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2317
2318#undef rb_imemo_new
2319
2320VALUE
2322{
2323 VALUE flags = T_IMEMO | (type << FL_USHIFT);
2324 return newobj_of(v0, flags, v1, v2, v3, TRUE);
2325}
2326
2327static VALUE
2328rb_imemo_tmpbuf_new(VALUE v1, VALUE v2, VALUE v3, VALUE v0)
2329{
2331 return newobj_of(v0, flags, v1, v2, v3, FALSE);
2332}
2333
2334static VALUE
2335rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf, size_t cnt)
2336{
2337 return rb_imemo_tmpbuf_new((VALUE)buf, 0, (VALUE)cnt, 0);
2338}
2339
2342{
2343 return (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new((VALUE)buf, (VALUE)old_heap, (VALUE)cnt, 0);
2344}
2345
2346static size_t
2347imemo_memsize(VALUE obj)
2348{
2349 size_t size = 0;
2350 switch (imemo_type(obj)) {
2351 case imemo_ment:
2352 size += sizeof(RANY(obj)->as.imemo.ment.def);
2353 break;
2354 case imemo_iseq:
2356 break;
2357 case imemo_env:
2358 size += RANY(obj)->as.imemo.env.env_size * sizeof(VALUE);
2359 break;
2360 case imemo_tmpbuf:
2361 size += RANY(obj)->as.imemo.alloc.cnt * sizeof(VALUE);
2362 break;
2363 case imemo_ast:
2364 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
2365 break;
2366 case imemo_cref:
2367 case imemo_svar:
2368 case imemo_throw_data:
2369 case imemo_ifunc:
2370 case imemo_memo:
2372 break;
2373 default:
2374 /* unreachable */
2375 break;
2376 }
2377 return size;
2378}
2379
2380#if IMEMO_DEBUG
2381VALUE
2382rb_imemo_new_debug(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0, const char *file, int line)
2383{
2384 VALUE memo = rb_imemo_new(type, v1, v2, v3, v0);
2385 fprintf(stderr, "memo %p (type: %d) @ %s:%d\n", (void *)memo, imemo_type(memo), file, line);
2386 return memo;
2387}
2388#endif
2389
2390VALUE
2392{
2394 return newobj_of(klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, FALSE);
2395}
2396
2397#undef rb_data_object_alloc
2399 RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree),
2400 rb_data_object_wrap, (klass, datap, dmark, dfree))
2401
2402
2403VALUE
2405{
2406 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
2407 DATA_PTR(obj) = xcalloc(1, size);
2408 return obj;
2409}
2410
2411VALUE
2413{
2415 return newobj_of(klass, T_DATA, (VALUE)type, (VALUE)1, (VALUE)datap, type->flags & RUBY_FL_WB_PROTECTED);
2416}
2417
2418#undef rb_data_typed_object_alloc
2420 const rb_data_type_t *type),
2422
2423VALUE
2425{
2427 DATA_PTR(obj) = xcalloc(1, size);
2428 return obj;
2429}
2430
2431size_t
2433{
2434 if (RTYPEDDATA_P(obj)) {
2436 const void *ptr = RTYPEDDATA_DATA(obj);
2437 if (ptr && type->function.dsize) {
2438 return type->function.dsize(ptr);
2439 }
2440 }
2441 return 0;
2442}
2443
2444const char *
2446{
2447 if (RTYPEDDATA_P(obj)) {
2448 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
2449 }
2450 else {
2451 return 0;
2452 }
2453}
2454
2455PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);)
2456static inline int
2457is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
2458{
2459 register RVALUE *p = RANY(ptr);
2460 register struct heap_page *page;
2461 register size_t hi, lo, mid;
2462
2463 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2464
2465 if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
2466 RB_DEBUG_COUNTER_INC(gc_isptr_range);
2467
2468 if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
2469 RB_DEBUG_COUNTER_INC(gc_isptr_align);
2470
2471 /* check if p looks like a pointer using bsearch*/
2472 lo = 0;
2474 while (lo < hi) {
2475 mid = (lo + hi) / 2;
2476 page = heap_pages_sorted[mid];
2477 if (page->start <= p) {
2478 if (p < page->start + page->total_slots) {
2479 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2480
2481 if (page->flags.in_tomb) {
2482 return FALSE;
2483 }
2484 else {
2485 return TRUE;
2486 }
2487 }
2488 lo = mid + 1;
2489 }
2490 else {
2491 hi = mid;
2492 }
2493 }
2494 return FALSE;
2495}
2496
2498free_const_entry_i(VALUE value, void *data)
2499{
2500 rb_const_entry_t *ce = (rb_const_entry_t *)value;
2501 xfree(ce);
2502 return ID_TABLE_CONTINUE;
2503}
2504
2505void
2507{
2508 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
2509 rb_id_table_free(tbl);
2510}
2511
2512static inline void
2513make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
2514{
2515 struct RZombie *zombie = RZOMBIE(obj);
2516 zombie->basic.flags = T_ZOMBIE | (zombie->basic.flags & FL_SEEN_OBJ_ID);
2517 zombie->dfree = dfree;
2518 zombie->data = data;
2521}
2522
2523static inline void
2524make_io_zombie(rb_objspace_t *objspace, VALUE obj)
2525{
2526 rb_io_t *fptr = RANY(obj)->as.file.fptr;
2527 make_zombie(objspace, obj, (void (*)(void*))rb_io_fptr_finalize, fptr);
2528}
2529
2530static void
2531obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
2532{
2533 VALUE id;
2534
2537
2538 if (st_delete(objspace->obj_to_id_tbl, (st_data_t *)&obj, &id)) {
2539 GC_ASSERT(id);
2540 st_delete(objspace->id_to_obj_tbl, (st_data_t *)&id, NULL);
2541 }
2542 else {
2543 rb_bug("Object ID seen, but not in mapping table: %s\n", obj_info(obj));
2544 }
2545}
2546
2547static int
2548obj_free(rb_objspace_t *objspace, VALUE obj)
2549{
2550 RB_DEBUG_COUNTER_INC(obj_free);
2551
2553
2554 switch (BUILTIN_TYPE(obj)) {
2555 case T_NIL:
2556 case T_FIXNUM:
2557 case T_TRUE:
2558 case T_FALSE:
2559 rb_bug("obj_free() called for broken object");
2560 break;
2561 }
2562
2563 if (FL_TEST(obj, FL_EXIVAR)) {
2566 }
2567
2569 obj_free_object_id(objspace, obj);
2570 }
2571
2572#if USE_RGENGC
2573 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2574
2575#if RGENGC_CHECK_MODE
2576#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
2577 CHECK(RVALUE_WB_UNPROTECTED);
2578 CHECK(RVALUE_MARKED);
2579 CHECK(RVALUE_MARKING);
2580 CHECK(RVALUE_UNCOLLECTIBLE);
2581#undef CHECK
2582#endif
2583#endif
2584
2585 switch (BUILTIN_TYPE(obj)) {
2586 case T_OBJECT:
2587 if ((RANY(obj)->as.basic.flags & ROBJECT_EMBED) ||
2588 RANY(obj)->as.object.as.heap.ivptr == NULL) {
2589 RB_DEBUG_COUNTER_INC(obj_obj_embed);
2590 }
2591 else if (ROBJ_TRANSIENT_P(obj)) {
2592 RB_DEBUG_COUNTER_INC(obj_obj_transient);
2593 }
2594 else {
2595 xfree(RANY(obj)->as.object.as.heap.ivptr);
2596 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
2597 }
2598 break;
2599 case T_MODULE:
2600 case T_CLASS:
2603 if (RCLASS_IV_TBL(obj)) {
2605 }
2606 if (RCLASS_CONST_TBL(obj)) {
2608 }
2609 if (RCLASS_IV_INDEX_TBL(obj)) {
2611 }
2612 if (RCLASS_EXT(obj)->subclasses) {
2613 if (BUILTIN_TYPE(obj) == T_MODULE) {
2615 }
2616 else {
2618 }
2619 RCLASS_EXT(obj)->subclasses = NULL;
2620 }
2623 if (RANY(obj)->as.klass.ptr)
2624 xfree(RANY(obj)->as.klass.ptr);
2625 RANY(obj)->as.klass.ptr = NULL;
2626
2629 break;
2630 case T_STRING:
2632 break;
2633 case T_ARRAY:
2635 break;
2636 case T_HASH:
2637#if USE_DEBUG_COUNTER
2638 switch RHASH_SIZE(obj) {
2639 case 0:
2640 RB_DEBUG_COUNTER_INC(obj_hash_empty);
2641 break;
2642 case 1:
2643 RB_DEBUG_COUNTER_INC(obj_hash_1);
2644 break;
2645 case 2:
2646 RB_DEBUG_COUNTER_INC(obj_hash_2);
2647 break;
2648 case 3:
2649 RB_DEBUG_COUNTER_INC(obj_hash_3);
2650 break;
2651 case 4:
2652 RB_DEBUG_COUNTER_INC(obj_hash_4);
2653 break;
2654 case 5:
2655 case 6:
2656 case 7:
2657 case 8:
2658 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
2659 break;
2660 default:
2661 GC_ASSERT(RHASH_SIZE(obj) > 8);
2662 RB_DEBUG_COUNTER_INC(obj_hash_g8);
2663 }
2664
2665 if (RHASH_AR_TABLE_P(obj)) {
2666 if (RHASH_AR_TABLE(obj) == NULL) {
2667 RB_DEBUG_COUNTER_INC(obj_hash_null);
2668 }
2669 else {
2670 RB_DEBUG_COUNTER_INC(obj_hash_ar);
2671 }
2672 }
2673 else {
2674 RB_DEBUG_COUNTER_INC(obj_hash_st);
2675 }
2676#endif
2677 if (/* RHASH_AR_TABLE_P(obj) */ !FL_TEST_RAW(obj, RHASH_ST_TABLE_FLAG)) {
2678 struct ar_table_struct *tab = RHASH(obj)->as.ar;
2679
2680 if (tab) {
2681 if (RHASH_TRANSIENT_P(obj)) {
2682 RB_DEBUG_COUNTER_INC(obj_hash_transient);
2683 }
2684 else {
2685 ruby_xfree(tab);
2686 }
2687 }
2688 }
2689 else {
2691 st_free_table(RHASH(obj)->as.st);
2692 }
2693 break;
2694 case T_REGEXP:
2695 if (RANY(obj)->as.regexp.ptr) {
2696 onig_free(RANY(obj)->as.regexp.ptr);
2697 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
2698 }
2699 break;
2700 case T_DATA:
2701 if (DATA_PTR(obj)) {
2702 int free_immediately = FALSE;
2703 void (*dfree)(void *);
2704 void *data = DATA_PTR(obj);
2705
2706 if (RTYPEDDATA_P(obj)) {
2707 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
2708 dfree = RANY(obj)->as.typeddata.type->function.dfree;
2709 if (0 && free_immediately == 0) {
2710 /* to expose non-free-immediate T_DATA */
2711 fprintf(stderr, "not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
2712 }
2713 }
2714 else {
2715 dfree = RANY(obj)->as.data.dfree;
2716 }
2717
2718 if (dfree) {
2719 if (dfree == RUBY_DEFAULT_FREE) {
2720 xfree(data);
2721 RB_DEBUG_COUNTER_INC(obj_data_xfree);
2722 }
2723 else if (free_immediately) {
2724 (*dfree)(data);
2725 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
2726 }
2727 else {
2728 make_zombie(objspace, obj, dfree, data);
2729 RB_DEBUG_COUNTER_INC(obj_data_zombie);
2730 return 1;
2731 }
2732 }
2733 else {
2734 RB_DEBUG_COUNTER_INC(obj_data_empty);
2735 }
2736 }
2737 break;
2738 case T_MATCH:
2739 if (RANY(obj)->as.match.rmatch) {
2740 struct rmatch *rm = RANY(obj)->as.match.rmatch;
2741#if USE_DEBUG_COUNTER
2742 if (rm->regs.num_regs >= 8) {
2743 RB_DEBUG_COUNTER_INC(obj_match_ge8);
2744 }
2745 else if (rm->regs.num_regs >= 4) {
2746 RB_DEBUG_COUNTER_INC(obj_match_ge4);
2747 }
2748 else if (rm->regs.num_regs >= 1) {
2749 RB_DEBUG_COUNTER_INC(obj_match_under4);
2750 }
2751#endif
2752 onig_region_free(&rm->regs, 0);
2753 if (rm->char_offset)
2754 xfree(rm->char_offset);
2755 xfree(rm);
2756
2757 RB_DEBUG_COUNTER_INC(obj_match_ptr);
2758 }
2759 break;
2760 case T_FILE:
2761 if (RANY(obj)->as.file.fptr) {
2762 make_io_zombie(objspace, obj);
2763 RB_DEBUG_COUNTER_INC(obj_file_ptr);
2764 return 1;
2765 }
2766 break;
2767 case T_RATIONAL:
2768 RB_DEBUG_COUNTER_INC(obj_rational);
2769 break;
2770 case T_COMPLEX:
2771 RB_DEBUG_COUNTER_INC(obj_complex);
2772 break;
2773 case T_MOVED:
2774 break;
2775 case T_ICLASS:
2776 /* Basically , T_ICLASS shares table with the module */
2779 }
2780 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
2782 }
2783 if (RCLASS_EXT(obj)->subclasses) {
2785 RCLASS_EXT(obj)->subclasses = NULL;
2786 }
2789 xfree(RANY(obj)->as.klass.ptr);
2790 RANY(obj)->as.klass.ptr = NULL;
2791
2792 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
2793 break;
2794
2795 case T_FLOAT:
2796 RB_DEBUG_COUNTER_INC(obj_float);
2797 break;
2798
2799 case T_BIGNUM:
2800 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2802 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
2803 }
2804 else {
2805 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
2806 }
2807 break;
2808
2809 case T_NODE:
2810 UNEXPECTED_NODE(obj_free);
2811 break;
2812
2813 case T_STRUCT:
2814 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
2815 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
2816 RB_DEBUG_COUNTER_INC(obj_struct_embed);
2817 }
2818 else if (RSTRUCT_TRANSIENT_P(obj)) {
2819 RB_DEBUG_COUNTER_INC(obj_struct_transient);
2820 }
2821 else {
2822 xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
2823 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
2824 }
2825 break;
2826
2827 case T_SYMBOL:
2828 {
2830 RB_DEBUG_COUNTER_INC(obj_symbol);
2831 }
2832 break;
2833
2834 case T_IMEMO:
2835 switch (imemo_type(obj)) {
2836 case imemo_ment:
2837 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
2838 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
2839 break;
2840 case imemo_iseq:
2841 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
2842 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
2843 break;
2844 case imemo_env:
2845 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
2846 xfree((VALUE *)RANY(obj)->as.imemo.env.env);
2847 RB_DEBUG_COUNTER_INC(obj_imemo_env);
2848 break;
2849 case imemo_tmpbuf:
2850 xfree(RANY(obj)->as.imemo.alloc.ptr);
2851 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
2852 break;
2853 case imemo_ast:
2854 rb_ast_free(&RANY(obj)->as.imemo.ast);
2855 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
2856 break;
2857 case imemo_cref:
2858 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
2859 break;
2860 case imemo_svar:
2861 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
2862 break;
2863 case imemo_throw_data:
2864 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
2865 break;
2866 case imemo_ifunc:
2867 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
2868 break;
2869 case imemo_memo:
2870 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
2871 break;
2873 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
2874 break;
2875 default:
2876 /* unreachable */
2877 break;
2878 }
2879 return 0;
2880
2881 default:
2882 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
2883 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
2884 }
2885
2886 if (FL_TEST(obj, FL_FINALIZE)) {
2887 make_zombie(objspace, obj, 0, 0);
2888 return 1;
2889 }
2890 else {
2891 return 0;
2892 }
2893}
2894
2895
2896#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
2897#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
2898
2899static int
2900object_id_cmp(st_data_t x, st_data_t y)
2901{
2902 if (RB_TYPE_P(x, T_BIGNUM)) {
2903 return !rb_big_eql(x, y);
2904 } else {
2905 return x != y;
2906 }
2907}
2908
2909static st_index_t
2910object_id_hash(st_data_t n)
2911{
2912 if (RB_TYPE_P(n, T_BIGNUM)) {
2913 return FIX2LONG(rb_big_hash(n));
2914 } else {
2915 return st_numhash(n);
2916 }
2917}
2918static const struct st_hash_type object_id_hash_type = {
2919 object_id_cmp,
2920 object_id_hash,
2921};
2922
2923void
2925{
2926 rb_objspace_t *objspace = &rb_objspace;
2927
2929 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
2930 objspace->obj_to_id_tbl = st_init_numtable();
2931
2932#if RGENGC_ESTIMATE_OLDMALLOC
2934#endif
2935
2936 heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
2937 init_mark_stack(&objspace->mark_stack);
2938
2939 objspace->profile.invoke_time = getrusage_time();
2941}
2942
2943void
2945{
2946 rb_objspace_t *objspace = &rb_objspace;
2947
2948 gc_stress_set(objspace, ruby_initial_gc_stress);
2949}
2950
2951typedef int each_obj_callback(void *, void *, size_t, void *);
2952
2953static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data);
2954static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
2955
2959 void *data;
2960};
2961
2962static void
2963objspace_each_objects_without_setup(rb_objspace_t *objspace, each_obj_callback *callback, void *data)
2964{
2965 size_t i;
2966 struct heap_page *page;
2967 RVALUE *pstart = NULL, *pend;
2968
2969 i = 0;
2970 while (i < heap_allocated_pages) {
2971 while (0 < i && pstart < heap_pages_sorted[i-1]->start) i--;
2972 while (i < heap_allocated_pages && heap_pages_sorted[i]->start <= pstart) i++;
2973 if (heap_allocated_pages <= i) break;
2974
2975 page = heap_pages_sorted[i];
2976
2977 pstart = page->start;
2978 pend = pstart + page->total_slots;
2979
2980 if ((*callback)(pstart, pend, sizeof(RVALUE), data)) {
2981 break;
2982 }
2983 }
2984}
2985
2986static VALUE
2987objspace_each_objects_protected(VALUE arg)
2988{
2989 struct each_obj_args *args = (struct each_obj_args *)arg;
2990 objspace_each_objects_without_setup(args->objspace, args->callback, args->data);
2991 return Qnil;
2992}
2993
2994static VALUE
2995incremental_enable(VALUE _)
2996{
2998
3000 return Qnil;
3001}
3002
3003/*
3004 * rb_objspace_each_objects() is special C API to walk through
3005 * Ruby object space. This C API is too difficult to use it.
3006 * To be frank, you should not use it. Or you need to read the
3007 * source code of this function and understand what this function does.
3008 *
3009 * 'callback' will be called several times (the number of heap page,
3010 * at current implementation) with:
3011 * vstart: a pointer to the first living object of the heap_page.
3012 * vend: a pointer to next to the valid heap_page area.
3013 * stride: a distance to next VALUE.
3014 *
3015 * If callback() returns non-zero, the iteration will be stopped.
3016 *
3017 * This is a sample callback code to iterate liveness objects:
3018 *
3019 * int
3020 * sample_callback(void *vstart, void *vend, int stride, void *data) {
3021 * VALUE v = (VALUE)vstart;
3022 * for (; v != (VALUE)vend; v += stride) {
3023 * if (RBASIC(v)->flags) { // liveness check
3024 * // do something with live object 'v'
3025 * }
3026 * return 0; // continue to iteration
3027 * }
3028 *
3029 * Note: 'vstart' is not a top of heap_page. This point the first
3030 * living object to grasp at least one object to avoid GC issue.
3031 * This means that you can not walk through all Ruby object page
3032 * including freed object page.
3033 *
3034 * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
3035 * However, there are possibilities to pass variable values with
3036 * 'stride' with some reasons. You must use stride instead of
3037 * use some constant value in the iteration.
3038 */
3039void
3041{
3042 objspace_each_objects(&rb_objspace, callback, data);
3043}
3044
3045static void
3046objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data)
3047{
3048 int prev_dont_incremental = objspace->flags.dont_incremental;
3049
3050 gc_rest(objspace);
3052
3053 if (prev_dont_incremental) {
3054 objspace_each_objects_without_setup(objspace, callback, data);
3055 }
3056 else {
3057 struct each_obj_args args = {objspace, callback, data};
3058 rb_ensure(objspace_each_objects_protected, (VALUE)&args, incremental_enable, Qnil);
3059 }
3060}
3061
3062void
3064{
3065 objspace_each_objects_without_setup(&rb_objspace, callback, data);
3066}
3067
3069 size_t num;
3071};
3072
3073static int
3074internal_object_p(VALUE obj)
3075{
3076 RVALUE *p = (RVALUE *)obj;
3078 asan_unpoison_object(obj, false);
3079 bool used_p = p->as.basic.flags;
3080
3081 if (used_p) {
3082 switch (BUILTIN_TYPE(p)) {
3083 case T_NODE:
3084 UNEXPECTED_NODE(internal_object_p);
3085 break;
3086 case T_NONE:
3087 case T_MOVED:
3088 case T_IMEMO:
3089 case T_ICLASS:
3090 case T_ZOMBIE:
3091 break;
3092 case T_CLASS:
3093 if (!p->as.basic.klass) break;
3094 if (FL_TEST(obj, FL_SINGLETON)) {
3096 }
3097 return 0;
3098 default:
3099 if (!p->as.basic.klass) break;
3100 return 0;
3101 }
3102 }
3103 if (ptr || ! used_p) {
3104 asan_poison_object(obj);
3105 }
3106 return 1;
3107}
3108
3109int
3111{
3112 return internal_object_p(obj);
3113}
3114
3115static int
3116os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
3117{
3118 struct os_each_struct *oes = (struct os_each_struct *)data;
3119 RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
3120
3121 for (; p != pend; p++) {
3122 volatile VALUE v = (VALUE)p;
3123 if (!internal_object_p(v)) {
3124 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
3125 rb_yield(v);
3126 oes->num++;
3127 }
3128 }
3129 }
3130
3131 return 0;
3132}
3133
3134static VALUE
3135os_obj_of(VALUE of)
3136{
3137 struct os_each_struct oes;
3138
3139 oes.num = 0;
3140 oes.of = of;
3141 rb_objspace_each_objects(os_obj_of_i, &oes);
3142 return SIZET2NUM(oes.num);
3143}
3144
3145/*
3146 * call-seq:
3147 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
3148 * ObjectSpace.each_object([module]) -> an_enumerator
3149 *
3150 * Calls the block once for each living, nonimmediate object in this
3151 * Ruby process. If <i>module</i> is specified, calls the block
3152 * for only those classes or modules that match (or are a subclass of)
3153 * <i>module</i>. Returns the number of objects found. Immediate
3154 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
3155 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
3156 * never returned. In the example below, #each_object returns both
3157 * the numbers we defined and several constants defined in the Math
3158 * module.
3159 *
3160 * If no block is given, an enumerator is returned instead.
3161 *
3162 * a = 102.7
3163 * b = 95 # Won't be returned
3164 * c = 12345678987654321
3165 * count = ObjectSpace.each_object(Numeric) {|x| p x }
3166 * puts "Total count: #{count}"
3167 *
3168 * <em>produces:</em>
3169 *
3170 * 12345678987654321
3171 * 102.7
3172 * 2.71828182845905
3173 * 3.14159265358979
3174 * 2.22044604925031e-16
3175 * 1.7976931348623157e+308
3176 * 2.2250738585072e-308
3177 * Total count: 7
3178 *
3179 */
3180
3181static VALUE
3182os_each_obj(int argc, VALUE *argv, VALUE os)
3183{
3184 VALUE of;
3185
3186 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
3187 RETURN_ENUMERATOR(os, 1, &of);
3188 return os_obj_of(of);
3189}
3190
3191/*
3192 * call-seq:
3193 * ObjectSpace.undefine_finalizer(obj)
3194 *
3195 * Removes all finalizers for <i>obj</i>.
3196 *
3197 */
3198
3199static VALUE
3200undefine_final(VALUE os, VALUE obj)
3201{
3202 return rb_undefine_finalizer(obj);
3203}
3204
3205VALUE
3207{
3208 rb_objspace_t *objspace = &rb_objspace;
3209 st_data_t data = obj;
3211 st_delete(finalizer_table, &data, 0);
3213 return obj;
3214}
3215
3216static void
3217should_be_callable(VALUE block)
3218{
3219 if (!rb_obj_respond_to(block, idCall, TRUE)) {
3220 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
3221 rb_obj_class(block));
3222 }
3223}
3224
3225static void
3226should_be_finalizable(VALUE obj)
3227{
3228 if (!FL_ABLE(obj)) {
3229 rb_raise(rb_eArgError, "cannot define finalizer for %s",
3231 }
3233}
3234
3235/*
3236 * call-seq:
3237 * ObjectSpace.define_finalizer(obj, aProc=proc())
3238 *
3239 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
3240 * was destroyed. The object ID of the <i>obj</i> will be passed
3241 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
3242 * method, make sure it can be called with a single argument.
3243 *
3244 */
3245
3246static VALUE
3247define_final(int argc, VALUE *argv, VALUE os)
3248{
3249 VALUE obj, block;
3250
3251 rb_scan_args(argc, argv, "11", &obj, &block);
3252 should_be_finalizable(obj);
3253 if (argc == 1) {
3254 block = rb_block_proc();
3255 }
3256 else {
3257 should_be_callable(block);
3258 }
3259
3260 return define_final0(obj, block);
3261}
3262
3263static VALUE
3264define_final0(VALUE obj, VALUE block)
3265{
3266 rb_objspace_t *objspace = &rb_objspace;
3267 VALUE table;
3268 st_data_t data;
3269
3270 RBASIC(obj)->flags |= FL_FINALIZE;
3271
3272 block = rb_ary_new3(2, INT2FIX(0), block);
3273 OBJ_FREEZE(block);
3274
3275 if (st_lookup(finalizer_table, obj, &data)) {
3276 table = (VALUE)data;
3277
3278 /* avoid duplicate block, table is usually small */
3279 {
3280 long len = RARRAY_LEN(table);
3281 long i;
3282
3283 for (i = 0; i < len; i++) {
3284 VALUE recv = RARRAY_AREF(table, i);
3285 if (rb_funcall(recv, idEq, 1, block)) {
3286 return recv;
3287 }
3288 }
3289 }
3290
3291 rb_ary_push(table, block);
3292 }
3293 else {
3294 table = rb_ary_new3(1, block);
3295 RBASIC_CLEAR_CLASS(table);
3297 }
3298 return block;
3299}
3300
3301VALUE
3303{
3304 should_be_finalizable(obj);
3305 should_be_callable(block);
3306 return define_final0(obj, block);
3307}
3308
3309void
3311{
3312 rb_objspace_t *objspace = &rb_objspace;
3313 VALUE table;
3314 st_data_t data;
3315
3316 if (!FL_TEST(obj, FL_FINALIZE)) return;
3317 if (st_lookup(finalizer_table, obj, &data)) {
3318 table = (VALUE)data;
3319 st_insert(finalizer_table, dest, table);
3320 }
3321 FL_SET(dest, FL_FINALIZE);
3322}
3323
3324static VALUE
3325run_single_final(VALUE final, VALUE objid)
3326{
3327 const VALUE cmd = RARRAY_AREF(final, 1);
3328 return rb_check_funcall(cmd, idCall, 1, &objid);
3329}
3330
3331static void
3332run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
3333{
3334 long i;
3335 enum ruby_tag_type state;
3336 volatile struct {
3337 VALUE errinfo;
3338 VALUE objid;
3340 long finished;
3341 } saved;
3342 rb_execution_context_t * volatile ec = GET_EC();
3343#define RESTORE_FINALIZER() (\
3344 ec->cfp = saved.cfp, \
3345 rb_set_errinfo(saved.errinfo))
3346
3347 saved.errinfo = rb_errinfo();
3348 saved.objid = rb_obj_id(obj);
3349 saved.cfp = ec->cfp;
3350 saved.finished = 0;
3351
3352 EC_PUSH_TAG(ec);
3353 state = EC_EXEC_TAG();
3354 if (state != TAG_NONE) {
3355 ++saved.finished; /* skip failed finalizer */
3356 }
3357 for (i = saved.finished;
3358 RESTORE_FINALIZER(), i<RARRAY_LEN(table);
3359 saved.finished = ++i) {
3360 run_single_final(RARRAY_AREF(table, i), saved.objid);
3361 }
3362 EC_POP_TAG();
3363#undef RESTORE_FINALIZER
3364}
3365
3366static void
3367run_final(rb_objspace_t *objspace, VALUE zombie)
3368{
3369 st_data_t key, table;
3370
3371 if (RZOMBIE(zombie)->dfree) {
3372 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
3373 }
3374
3375 key = (st_data_t)zombie;
3376 if (st_delete(finalizer_table, &key, &table)) {
3377 run_finalizer(objspace, zombie, (VALUE)table);
3378 }
3379}
3380
3381static void
3382finalize_list(rb_objspace_t *objspace, VALUE zombie)
3383{
3384 while (zombie) {
3385 VALUE next_zombie;
3386 struct heap_page *page;
3387 asan_unpoison_object(zombie, false);
3388 next_zombie = RZOMBIE(zombie)->next;
3389 page = GET_HEAP_PAGE(zombie);
3390
3391 run_final(objspace, zombie);
3392
3393 GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
3394 if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
3395 obj_free_object_id(objspace, zombie);
3396 }
3397
3398 RZOMBIE(zombie)->basic.flags = 0;
3400 page->final_slots--;
3401 page->free_slots++;
3402 heap_page_add_freeobj(objspace, GET_HEAP_PAGE(zombie), zombie);
3403
3404 objspace->profile.total_freed_objects++;
3405
3406 zombie = next_zombie;
3407 }
3408}
3409
3410static void
3411finalize_deferred(rb_objspace_t *objspace)
3412{
3413 VALUE zombie;
3414
3415 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
3416 finalize_list(objspace, zombie);
3417 }
3418}
3419
3420static void
3421gc_finalize_deferred(void *dmy)
3422{
3423 rb_objspace_t *objspace = dmy;
3424 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
3425 finalize_deferred(objspace);
3427}
3428
3429static void
3430gc_finalize_deferred_register(rb_objspace_t *objspace)
3431{
3432 if (rb_postponed_job_register_one(0, gc_finalize_deferred, objspace) == 0) {
3433 rb_bug("gc_finalize_deferred_register: can't register finalizer.");
3434 }
3435}
3436
3441};
3442
3443static int
3444force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
3445{
3446 struct force_finalize_list **prev = (struct force_finalize_list **)arg;
3447 struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
3448 curr->obj = key;
3449 curr->table = val;
3450 curr->next = *prev;
3451 *prev = curr;
3452 return ST_CONTINUE;
3453}
3454
3455void
3457{
3458 RVALUE *p, *pend;
3459 size_t i;
3460
3461#if RGENGC_CHECK_MODE >= 2
3462 gc_verify_internal_consistency(objspace);
3463#endif
3464 gc_rest(objspace);
3465
3466 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
3467
3468 /* run finalizers */
3469 finalize_deferred(objspace);
3471
3472 gc_rest(objspace);
3473 /* prohibit incremental GC */
3474 objspace->flags.dont_incremental = 1;
3475
3476 /* force to run finalizer */
3477 while (finalizer_table->num_entries) {
3478 struct force_finalize_list *list = 0;
3479 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
3480 while (list) {
3481 struct force_finalize_list *curr = list;
3482 st_data_t obj = (st_data_t)curr->obj;
3483 run_finalizer(objspace, curr->obj, curr->table);
3485 list = curr->next;
3486 xfree(curr);
3487 }
3488 }
3489
3490 /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
3491 dont_gc = 1;
3492
3493 /* running data/file finalizers are part of garbage collection */
3494 gc_enter(objspace, "rb_objspace_call_finalizer");
3495
3496 /* run data/file object's finalizers */
3497 for (i = 0; i < heap_allocated_pages; i++) {
3498 p = heap_pages_sorted[i]->start; pend = p + heap_pages_sorted[i]->total_slots;
3499 while (p < pend) {
3500 void *poisoned = asan_poisoned_object_p((VALUE)p);
3501 asan_unpoison_object((VALUE)p, false);
3502 switch (BUILTIN_TYPE(p)) {
3503 case T_DATA:
3504 if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
3505 if (rb_obj_is_thread((VALUE)p)) break;
3506 if (rb_obj_is_mutex((VALUE)p)) break;
3507 if (rb_obj_is_fiber((VALUE)p)) break;
3508 p->as.free.flags = 0;
3509 if (RTYPEDDATA_P(p)) {
3510 RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
3511 }
3512 if (RANY(p)->as.data.dfree == RUBY_DEFAULT_FREE) {
3513 xfree(DATA_PTR(p));
3514 }
3515 else if (RANY(p)->as.data.dfree) {
3516 make_zombie(objspace, (VALUE)p, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
3517 }
3518 break;
3519 case T_FILE:
3520 if (RANY(p)->as.file.fptr) {
3521 make_io_zombie(objspace, (VALUE)p);
3522 }
3523 break;
3524 }
3525 if (poisoned) {
3527 asan_poison_object((VALUE)p);
3528 }
3529 p++;
3530 }
3531 }
3532
3533 gc_exit(objspace, "rb_objspace_call_finalizer");
3534
3536 finalize_list(objspace, heap_pages_deferred_final);
3537 }
3538
3540 finalizer_table = 0;
3542}
3543
3544PUREFUNC(static inline int is_id_value(rb_objspace_t *objspace, VALUE ptr));
3545static inline int
3546is_id_value(rb_objspace_t *objspace, VALUE ptr)
3547{
3548 if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
3549 if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
3550 if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
3551 return TRUE;
3552}
3553
3554static inline int
3555heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr)
3556{
3557 struct heap_page *page = GET_HEAP_PAGE(ptr);
3558 return page->flags.before_sweep ? FALSE : TRUE;
3559}
3560
3561static inline int
3562is_swept_object(rb_objspace_t *objspace, VALUE ptr)
3563{
3564 if (heap_is_swept_object(objspace, heap_eden, ptr)) {
3565 return TRUE;
3566 }
3567 else {
3568 return FALSE;
3569 }
3570}
3571
3572/* garbage objects will be collected soon. */
3573static inline int
3574is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
3575{
3577 is_swept_object(objspace, ptr) ||
3579
3580 return FALSE;
3581 }
3582 else {
3583 return TRUE;
3584 }
3585}
3586
3587static inline int
3588is_live_object(rb_objspace_t *objspace, VALUE ptr)
3589{
3590 switch (BUILTIN_TYPE(ptr)) {
3591 case T_NONE:
3592 case T_ZOMBIE:
3593 return FALSE;
3594 }
3595
3596 if (!is_garbage_object(objspace, ptr)) {
3597 return TRUE;
3598 }
3599 else {
3600 return FALSE;
3601 }
3602}
3603
3604static inline int
3605is_markable_object(rb_objspace_t *objspace, VALUE obj)
3606{
3607 if (rb_special_const_p(obj)) return FALSE; /* special const is not markable */
3608 check_rvalue_consistency(obj);
3609 return TRUE;
3610}
3611
3612int
3614{
3615 rb_objspace_t *objspace = &rb_objspace;
3616 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
3617}
3618
3619int
3621{
3622 rb_objspace_t *objspace = &rb_objspace;
3623 return is_garbage_object(objspace, obj);
3624}
3625
3626static VALUE
3627id2ref_obj_tbl(rb_objspace_t *objspace, VALUE objid)
3628{
3629 VALUE orig;
3630 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
3631 return orig;
3632 }
3633 else {
3634 return Qundef;
3635 }
3636}
3637
3638/*
3639 * call-seq:
3640 * ObjectSpace._id2ref(object_id) -> an_object
3641 *
3642 * Converts an object id to a reference to the object. May not be
3643 * called on an object id passed as a parameter to a finalizer.
3644 *
3645 * s = "I am a string" #=> "I am a string"
3646 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
3647 * r == s #=> true
3648 *
3649 */
3650
3651static VALUE
3652id2ref(VALUE objid)
3653{
3654#if SIZEOF_LONG == SIZEOF_VOIDP
3655#define NUM2PTR(x) NUM2ULONG(x)
3656#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3657#define NUM2PTR(x) NUM2ULL(x)
3658#endif
3659 rb_objspace_t *objspace = &rb_objspace;
3660 VALUE ptr;
3661 VALUE orig;
3662 void *p0;
3663
3664 objid = rb_to_int(objid);
3665 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
3666 ptr = NUM2PTR(objid);
3667 if (ptr == Qtrue) return Qtrue;
3668 if (ptr == Qfalse) return Qfalse;
3669 if (ptr == Qnil) return Qnil;
3670 if (FIXNUM_P(ptr)) return (VALUE)ptr;
3671 if (FLONUM_P(ptr)) return (VALUE)ptr;
3672
3673 ptr = obj_id_to_ref(objid);
3674 if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
3675 ID symid = ptr / sizeof(RVALUE);
3676 p0 = (void *)ptr;
3677 if (rb_id2str(symid) == 0)
3678 rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
3679 return ID2SYM(symid);
3680 }
3681 }
3682
3683 if ((orig = id2ref_obj_tbl(objspace, objid)) != Qundef &&
3684 is_live_object(objspace, orig)) {
3685 return orig;
3686 }
3687
3688 if (rb_int_ge(objid, objspace->next_object_id)) {
3689 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
3690 } else {
3691 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_int2str(objid, 10));
3692 }
3693}
3694
3695static VALUE
3696os_id2ref(VALUE os, VALUE objid)
3697{
3698 return id2ref(objid);
3699}
3700
3701static VALUE
3702rb_find_object_id(VALUE obj, VALUE (*get_heap_object_id)(VALUE))
3703{
3704 if (STATIC_SYM_P(obj)) {
3705 return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
3706 }
3707 else if (FLONUM_P(obj)) {
3708#if SIZEOF_LONG == SIZEOF_VOIDP
3709 return LONG2NUM((SIGNED_VALUE)obj);
3710#else
3711 return LL2NUM((SIGNED_VALUE)obj);
3712#endif
3713 }
3714 else if (SPECIAL_CONST_P(obj)) {
3715 return LONG2NUM((SIGNED_VALUE)obj);
3716 }
3717
3718 return get_heap_object_id(obj);
3719}
3720
3721static VALUE
3722cached_object_id(VALUE obj)
3723{
3724 VALUE id;
3725 rb_objspace_t *objspace = &rb_objspace;
3726
3727 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &id)) {
3729 return id;
3730 }
3731 else {
3733
3734 id = objspace->next_object_id;
3736
3737 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
3738 st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
3740
3741 return id;
3742 }
3743}
3744
3745static VALUE
3746nonspecial_obj_id_(VALUE obj)
3747{
3748 return nonspecial_obj_id(obj);
3749}
3750
3751
3752VALUE
3754{
3755 return rb_find_object_id(obj, nonspecial_obj_id_);
3756}
3757
3758/*
3759 * Document-method: __id__
3760 * Document-method: object_id
3761 *
3762 * call-seq:
3763 * obj.__id__ -> integer
3764 * obj.object_id -> integer
3765 *
3766 * Returns an integer identifier for +obj+.
3767 *
3768 * The same number will be returned on all calls to +object_id+ for a given
3769 * object, and no two active objects will share an id.
3770 *
3771 * Note: that some objects of builtin classes are reused for optimization.
3772 * This is the case for immediate values and frozen string literals.
3773 *
3774 * BasicObject implements +__id__+, Kernel implements +object_id+.
3775 *
3776 * Immediate values are not passed by reference but are passed by value:
3777 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
3778 *
3779 * Object.new.object_id == Object.new.object_id # => false
3780 * (21 * 2).object_id == (21 * 2).object_id # => true
3781 * "hello".object_id == "hello".object_id # => false
3782 * "hi".freeze.object_id == "hi".freeze.object_id # => true
3783 */
3784
3785VALUE
3787{
3788 /*
3789 * 32-bit VALUE space
3790 * MSB ------------------------ LSB
3791 * false 00000000000000000000000000000000
3792 * true 00000000000000000000000000000010
3793 * nil 00000000000000000000000000000100
3794 * undef 00000000000000000000000000000110
3795 * symbol ssssssssssssssssssssssss00001110
3796 * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
3797 * fixnum fffffffffffffffffffffffffffffff1
3798 *
3799 * object_id space
3800 * LSB
3801 * false 00000000000000000000000000000000
3802 * true 00000000000000000000000000000010
3803 * nil 00000000000000000000000000000100
3804 * undef 00000000000000000000000000000110
3805 * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
3806 * object oooooooooooooooooooooooooooooo0 o...o % A = 0
3807 * fixnum fffffffffffffffffffffffffffffff1 bignum if required
3808 *
3809 * where A = sizeof(RVALUE)/4
3810 *
3811 * sizeof(RVALUE) is
3812 * 20 if 32-bit, double is 4-byte aligned
3813 * 24 if 32-bit, double is 8-byte aligned
3814 * 40 if 64-bit
3815 */
3816
3817 return rb_find_object_id(obj, cached_object_id);
3818}
3819
3820#include "regint.h"
3821
3822static size_t
3823obj_memsize_of(VALUE obj, int use_all_types)
3824{
3825 size_t size = 0;
3826
3827 if (SPECIAL_CONST_P(obj)) {
3828 return 0;
3829 }
3830
3831 if (FL_TEST(obj, FL_EXIVAR)) {
3833 }
3834
3835 switch (BUILTIN_TYPE(obj)) {
3836 case T_OBJECT:
3837 if (!(RBASIC(obj)->flags & ROBJECT_EMBED) &&
3838 ROBJECT(obj)->as.heap.ivptr) {
3839 size += ROBJECT(obj)->as.heap.numiv * sizeof(VALUE);
3840 }
3841 break;
3842 case T_MODULE:
3843 case T_CLASS:
3844 if (RCLASS_EXT(obj)) {
3845 if (RCLASS_M_TBL(obj)) {
3847 }
3848 if (RCLASS_IV_TBL(obj)) {
3850 }
3851 if (RCLASS_IV_INDEX_TBL(obj)) {
3853 }
3854 if (RCLASS(obj)->ptr->iv_tbl) {
3855 size += st_memsize(RCLASS(obj)->ptr->iv_tbl);
3856 }
3857 if (RCLASS(obj)->ptr->const_tbl) {
3858 size += rb_id_table_memsize(RCLASS(obj)->ptr->const_tbl);
3859 }
3860 size += sizeof(rb_classext_t);
3861 }
3862 break;
3863 case T_ICLASS:
3865 if (RCLASS_M_TBL(obj)) {
3867 }
3868 }
3869 break;
3870 case T_STRING:
3872 break;
3873 case T_ARRAY:
3875 break;
3876 case T_HASH:
3877 if (RHASH_AR_TABLE_P(obj)) {
3878 if (RHASH_AR_TABLE(obj) != NULL) {
3879 size_t rb_hash_ar_table_size();
3881 }
3882 }
3883 else {
3886 }
3887 break;
3888 case T_REGEXP:
3889 if (RREGEXP_PTR(obj)) {
3891 }
3892 break;
3893 case T_DATA:
3894 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
3895 break;
3896 case T_MATCH:
3897 if (RMATCH(obj)->rmatch) {
3898 struct rmatch *rm = RMATCH(obj)->rmatch;
3899 size += onig_region_memsize(&rm->regs);
3900 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
3901 size += sizeof(struct rmatch);
3902 }
3903 break;
3904 case T_FILE:
3905 if (RFILE(obj)->fptr) {
3906 size += rb_io_memsize(RFILE(obj)->fptr);
3907 }
3908 break;
3909 case T_RATIONAL:
3910 case T_COMPLEX:
3911 break;
3912 case T_IMEMO:
3913 size += imemo_memsize(obj);
3914 break;
3915
3916 case T_FLOAT:
3917 case T_SYMBOL:
3918 break;
3919
3920 case T_BIGNUM:
3921 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
3922 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
3923 }
3924 break;
3925
3926 case T_NODE:
3927 UNEXPECTED_NODE(obj_memsize_of);
3928 break;
3929
3930 case T_STRUCT:
3931 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
3932 RSTRUCT(obj)->as.heap.ptr) {
3933 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
3934 }
3935 break;
3936
3937 case T_ZOMBIE:
3938 case T_MOVED:
3939 break;
3940
3941 default:
3942 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
3943 BUILTIN_TYPE(obj), (void*)obj);
3944 }
3945
3946 return size + sizeof(RVALUE);
3947}
3948
3949size_t
3951{
3952 return obj_memsize_of(obj, TRUE);
3953}
3954
3955static int
3956set_zero(st_data_t key, st_data_t val, st_data_t arg)
3957{
3958 VALUE k = (VALUE)key;
3959 VALUE hash = (VALUE)arg;
3960 rb_hash_aset(hash, k, INT2FIX(0));
3961 return ST_CONTINUE;
3962}
3963
3964static VALUE
3965type_sym(size_t type)
3966{
3967 switch (type) {
3968#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
3996#undef COUNT_TYPE
3997 default: return INT2NUM(type); break;
3998 }
3999}
4000
4001/*
4002 * call-seq:
4003 * ObjectSpace.count_objects([result_hash]) -> hash
4004 *
4005 * Counts all objects grouped by type.
4006 *
4007 * It returns a hash, such as:
4008 * {
4009 * :TOTAL=>10000,
4010 * :FREE=>3011,
4011 * :T_OBJECT=>6,
4012 * :T_CLASS=>404,
4013 * # ...
4014 * }
4015 *
4016 * The contents of the returned hash are implementation specific.
4017 * It may be changed in future.
4018 *
4019 * The keys starting with +:T_+ means live objects.
4020 * For example, +:T_ARRAY+ is the number of arrays.
4021 * +:FREE+ means object slots which is not used now.
4022 * +:TOTAL+ means sum of above.
4023 *
4024 * If the optional argument +result_hash+ is given,
4025 * it is overwritten and returned. This is intended to avoid probe effect.
4026 *
4027 * h = {}
4028 * ObjectSpace.count_objects(h)
4029 * puts h
4030 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
4031 *
4032 * This method is only expected to work on C Ruby.
4033 *
4034 */
4035
4036static VALUE
4037count_objects(int argc, VALUE *argv, VALUE os)
4038{
4039 rb_objspace_t *objspace = &rb_objspace;
4040 size_t counts[T_MASK+1];
4041 size_t freed = 0;
4042 size_t total = 0;
4043 size_t i;
4044 VALUE hash = Qnil;
4045
4046 if (rb_check_arity(argc, 0, 1) == 1) {
4047 hash = argv[0];
4048 if (!RB_TYPE_P(hash, T_HASH))
4049 rb_raise(rb_eTypeError, "non-hash given");
4050 }
4051
4052 for (i = 0; i <= T_MASK; i++) {
4053 counts[i] = 0;
4054 }
4055
4056 for (i = 0; i < heap_allocated_pages; i++) {
4057 struct heap_page *page = heap_pages_sorted[i];
4058 RVALUE *p, *pend;
4059
4060 p = page->start; pend = p + page->total_slots;
4061 for (;p < pend; p++) {
4062 void *poisoned = asan_poisoned_object_p((VALUE)p);
4063 asan_unpoison_object((VALUE)p, false);
4064 if (p->as.basic.flags) {
4065 counts[BUILTIN_TYPE(p)]++;
4066 }
4067 else {
4068 freed++;
4069 }
4070 if (poisoned) {
4072 asan_poison_object((VALUE)p);
4073 }
4074 }
4075 total += page->total_slots;
4076 }
4077
4078 if (hash == Qnil) {
4079 hash = rb_hash_new();
4080 }
4081 else if (!RHASH_EMPTY_P(hash)) {
4082 rb_hash_stlike_foreach(hash, set_zero, hash);
4083 }
4084 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
4085 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
4086
4087 for (i = 0; i <= T_MASK; i++) {
4088 VALUE type = type_sym(i);
4089 if (counts[i])
4090 rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
4091 }
4092
4093 return hash;
4094}
4095
4096/*
4097 ------------------------ Garbage Collection ------------------------
4098*/
4099
4100/* Sweeping */
4101
4102static size_t
4103objspace_available_slots(rb_objspace_t *objspace)
4104{
4105 return heap_eden->total_slots + heap_tomb->total_slots;
4106}
4107
4108static size_t
4109objspace_live_slots(rb_objspace_t *objspace)
4110{
4112}
4113
4114static size_t
4115objspace_free_slots(rb_objspace_t *objspace)
4116{
4117 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
4118}
4119
4120static void
4121gc_setup_mark_bits(struct heap_page *page)
4122{
4123#if USE_RGENGC
4124 /* copy oldgen bitmap to mark bitmap */
4126#else
4127 /* clear mark bitmap */
4128 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
4129#endif
4130}
4131
4132static inline int
4133gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page)
4134{
4135 int i;
4136 int empty_slots = 0, freed_slots = 0, final_slots = 0;
4137 RVALUE *p, *pend,*offset;
4138 bits_t *bits, bitset;
4139
4140 gc_report(2, objspace, "page_sweep: start.\n");
4141
4142 sweep_page->flags.before_sweep = FALSE;
4143
4144 p = sweep_page->start; pend = p + sweep_page->total_slots;
4145 offset = p - NUM_IN_PAGE(p);
4146 bits = sweep_page->mark_bits;
4147
4148 /* create guard : fill 1 out-of-range */
4149 bits[BITMAP_INDEX(p)] |= BITMAP_BIT(p)-1;
4150 bits[BITMAP_INDEX(pend)] |= ~(BITMAP_BIT(pend) - 1);
4151
4152 for (i=0; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4153 bitset = ~bits[i];
4154 if (bitset) {
4155 p = offset + i * BITS_BITLENGTH;
4156 do {
4157 asan_unpoison_object((VALUE)p, false);
4158 if (bitset & 1) {
4159 switch (BUILTIN_TYPE(p)) {
4160 default: { /* majority case */
4161 gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
4162#if USE_RGENGC && RGENGC_CHECK_MODE
4163 if (!is_full_marking(objspace)) {
4164 if (RVALUE_OLD_P((VALUE)p)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
4165 if (rgengc_remembered_sweep(objspace, (VALUE)p)) rb_bug("page_sweep: %p - remembered.", (void *)p);
4166 }
4167#endif
4168 if (obj_free(objspace, (VALUE)p)) {
4169 final_slots++;
4170 }
4171 else {
4172 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
4173 heap_page_add_freeobj(objspace, sweep_page, (VALUE)p);
4174 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info((VALUE)p));
4175 freed_slots++;
4176 asan_poison_object((VALUE)p);
4177 }
4178 break;
4179 }
4180
4181 /* minor cases */
4182 case T_ZOMBIE:
4183 /* already counted */
4184 break;
4185 case T_NONE:
4186 empty_slots++; /* already freed */
4187 break;
4188 }
4189 }
4190 p++;
4191 bitset >>= 1;
4192 } while (bitset);
4193 }
4194 }
4195
4196 gc_setup_mark_bits(sweep_page);
4197
4198#if GC_PROFILE_MORE_DETAIL
4199 if (gc_prof_enabled(objspace)) {
4200 gc_profile_record *record = gc_prof_record(objspace);
4201 record->removing_objects += final_slots + freed_slots;
4202 record->empty_objects += empty_slots;
4203 }
4204#endif
4205 if (0) fprintf(stderr, "gc_page_sweep(%d): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
4206 (int)rb_gc_count(),
4207 (int)sweep_page->total_slots,
4208 freed_slots, empty_slots, final_slots);
4209
4210 sweep_page->free_slots = freed_slots + empty_slots;
4211 objspace->profile.total_freed_objects += freed_slots;
4213 sweep_page->final_slots += final_slots;
4214
4216 rb_thread_t *th = GET_THREAD();
4217 if (th) {
4218 gc_finalize_deferred_register(objspace);
4219 }
4220 }
4221
4222 gc_report(2, objspace, "page_sweep: end.\n");
4223
4224 return freed_slots + empty_slots;
4225}
4226
4227/* allocate additional minimum page to work */
4228static void
4229gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_heap_t *heap)
4230{
4231 if (!heap->free_pages && heap_increment(objspace, heap) == FALSE) {
4232 /* there is no free after page_sweep() */
4233 heap_set_increment(objspace, 1);
4234 if (!heap_increment(objspace, heap)) { /* can't allocate additional free objects */
4235 rb_memerror();
4236 }
4237 }
4238}
4239
4240static const char *
4241gc_mode_name(enum gc_mode mode)
4242{
4243 switch (mode) {
4244 case gc_mode_none: return "none";
4245 case gc_mode_marking: return "marking";
4246 case gc_mode_sweeping: return "sweeping";
4247 default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
4248 }
4249}
4250
4251static void
4252gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
4253{
4254#if RGENGC_CHECK_MODE
4255 enum gc_mode prev_mode = gc_mode(objspace);
4256 switch (prev_mode) {
4257 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
4258 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
4259 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none); break;
4260 }
4261#endif
4262 if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
4263 gc_mode_set(objspace, mode);
4264}
4265
4266static void
4267gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
4268{
4269 heap->sweeping_page = list_top(&heap->pages, struct heap_page, page_node);
4270 heap->free_pages = NULL;
4271#if GC_ENABLE_INCREMENTAL_MARK
4272 heap->pooled_pages = NULL;
4273 objspace->rincgc.pooled_slots = 0;
4274#endif
4275 if (heap->using_page) {
4276 struct heap_page *page = heap->using_page;
4277 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
4278
4279 RVALUE **p = &page->freelist;
4280 while (*p) {
4281 p = &(*p)->as.free.next;
4282 }
4283 *p = heap->freelist;
4284 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
4285 heap->using_page = NULL;
4286 }
4287 heap->freelist = NULL;
4288}
4289
4290#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
4291__attribute__((noinline))
4292#endif
4293static void
4294gc_sweep_start(rb_objspace_t *objspace)
4295{
4296 gc_mode_transition(objspace, gc_mode_sweeping);
4297 gc_sweep_start_heap(objspace, heap_eden);
4298}
4299
4300static void
4301gc_sweep_finish(rb_objspace_t *objspace)
4302{
4303 gc_report(1, objspace, "gc_sweep_finish\n");
4304
4305 gc_prof_set_heap_info(objspace);
4306 heap_pages_free_unused_pages(objspace);
4307
4308 /* if heap_pages has unused pages, then assign them to increment */
4309 if (heap_allocatable_pages < heap_tomb->total_pages) {
4310 heap_allocatable_pages_set(objspace, heap_tomb->total_pages);
4311 }
4312
4314 gc_mode_transition(objspace, gc_mode_none);
4315
4316#if RGENGC_CHECK_MODE >= 2
4317 gc_verify_internal_consistency(objspace);
4318#endif
4319}
4320
4321static int
4322gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
4323{
4324 struct heap_page *sweep_page = heap->sweeping_page;
4325 int unlink_limit = 3;
4326#if GC_ENABLE_INCREMENTAL_MARK
4327 int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
4328
4329 gc_report(2, objspace, "gc_sweep_step (need_pool: %d)\n", need_pool);
4330#else
4331 gc_report(2, objspace, "gc_sweep_step\n");
4332#endif
4333
4334 if (sweep_page == NULL) return FALSE;
4335
4336#if GC_ENABLE_LAZY_SWEEP
4337 gc_prof_sweep_timer_start(objspace);
4338#endif
4339
4340 do {
4341 int free_slots = gc_page_sweep(objspace, heap, sweep_page);
4342 heap->sweeping_page = list_next(&heap->pages, sweep_page, page_node);
4343
4344 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
4346 unlink_limit > 0) {
4348 unlink_limit--;
4349 /* there are no living objects -> move this page to tomb heap */
4350 heap_unlink_page(objspace, heap, sweep_page);
4351 heap_add_page(objspace, heap_tomb, sweep_page);
4352 }
4353 else if (free_slots > 0) {
4354#if GC_ENABLE_INCREMENTAL_MARK
4355 if (need_pool) {
4356 if (heap_add_poolpage(objspace, heap, sweep_page)) {
4357 need_pool = FALSE;
4358 }
4359 }
4360 else {
4361 heap_add_freepage(heap, sweep_page);
4362 break;
4363 }
4364#else
4365 heap_add_freepage(heap, sweep_page);
4366 break;
4367#endif
4368 }
4369 else {
4370 sweep_page->free_next = NULL;
4371 }
4372 } while ((sweep_page = heap->sweeping_page));
4373
4374 if (!heap->sweeping_page) {
4375 gc_sweep_finish(objspace);
4376 }
4377
4378#if GC_ENABLE_LAZY_SWEEP
4379 gc_prof_sweep_timer_stop(objspace);
4380#endif
4381
4382 return heap->free_pages != NULL;
4383}
4384
4385static void
4386gc_sweep_rest(rb_objspace_t *objspace)
4387{
4388 rb_heap_t *heap = heap_eden; /* lazy sweep only for eden */
4389
4390 while (has_sweeping_pages(heap)) {
4391 gc_sweep_step(objspace, heap);
4392 }
4393}
4394
4395static void
4396gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
4397{
4399 if (!GC_ENABLE_LAZY_SWEEP) return;
4400
4401 gc_enter(objspace, "sweep_continue");
4402#if USE_RGENGC
4403 if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE && heap_increment(objspace, heap)) {
4404 gc_report(3, objspace, "gc_sweep_continue: success heap_increment().\n");
4405 }
4406#endif
4407 gc_sweep_step(objspace, heap);
4408 gc_exit(objspace, "sweep_continue");
4409}
4410
4411static void
4412gc_sweep(rb_objspace_t *objspace)
4413{
4414 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
4415
4416 gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
4417
4418 if (immediate_sweep) {
4419#if !GC_ENABLE_LAZY_SWEEP
4420 gc_prof_sweep_timer_start(objspace);
4421#endif
4422 gc_sweep_start(objspace);
4423 gc_sweep_rest(objspace);
4424#if !GC_ENABLE_LAZY_SWEEP
4425 gc_prof_sweep_timer_stop(objspace);
4426#endif
4427 }
4428 else {
4429 struct heap_page *page = NULL;
4430 gc_sweep_start(objspace);
4431
4432 list_for_each(&heap_eden->pages, page, page_node) {
4433 page->flags.before_sweep = TRUE;
4434 }
4435 gc_sweep_step(objspace, heap_eden);
4436 }
4437
4438 gc_heap_prepare_minimum_pages(objspace, heap_eden);
4439}
4440
4441/* Marking - Marking stack */
4442
4443static stack_chunk_t *
4444stack_chunk_alloc(void)
4445{
4446 stack_chunk_t *res;
4447
4448 res = malloc(sizeof(stack_chunk_t));
4449 if (!res)
4450 rb_memerror();
4451
4452 return res;
4453}
4454
4455static inline int
4456is_mark_stack_empty(mark_stack_t *stack)
4457{
4458 return stack->chunk == NULL;
4459}
4460
4461static size_t
4462mark_stack_size(mark_stack_t *stack)
4463{
4464 size_t size = stack->index;
4465 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
4466
4467 while (chunk) {
4468 size += stack->limit;
4469 chunk = chunk->next;
4470 }
4471 return size;
4472}
4473
4474static void
4475add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
4476{
4477 chunk->next = stack->cache;
4478 stack->cache = chunk;
4479 stack->cache_size++;
4480}
4481
4482static void
4483shrink_stack_chunk_cache(mark_stack_t *stack)
4484{
4485 stack_chunk_t *chunk;
4486
4487 if (stack->unused_cache_size > (stack->cache_size/2)) {
4488 chunk = stack->cache;
4489 stack->cache = stack->cache->next;
4490 stack->cache_size--;
4491 free(chunk);
4492 }
4493 stack->unused_cache_size = stack->cache_size;
4494}
4495
4496static void
4497push_mark_stack_chunk(mark_stack_t *stack)
4498{
4499 stack_chunk_t *next;
4500
4501 GC_ASSERT(stack->index == stack->limit);
4502
4503 if (stack->cache_size > 0) {
4504 next = stack->cache;
4505 stack->cache = stack->cache->next;
4506 stack->cache_size--;
4507 if (stack->unused_cache_size > stack->cache_size)
4508 stack->unused_cache_size = stack->cache_size;
4509 }
4510 else {
4511 next = stack_chunk_alloc();
4512 }
4513 next->next = stack->chunk;
4514 stack->chunk = next;
4515 stack->index = 0;
4516}
4517
4518static void
4519pop_mark_stack_chunk(mark_stack_t *stack)
4520{
4521 stack_chunk_t *prev;
4522
4523 prev = stack->chunk->next;
4524 GC_ASSERT(stack->index == 0);
4525 add_stack_chunk_cache(stack, stack->chunk);
4526 stack->chunk = prev;
4527 stack->index = stack->limit;
4528}
4529
4530static void
4531free_stack_chunks(mark_stack_t *stack)
4532{
4533 stack_chunk_t *chunk = stack->chunk;
4534 stack_chunk_t *next = NULL;
4535
4536 while (chunk != NULL) {
4537 next = chunk->next;
4538 free(chunk);
4539 chunk = next;
4540 }
4541}
4542
4543static void
4544push_mark_stack(mark_stack_t *stack, VALUE data)
4545{
4546 if (stack->index == stack->limit) {
4547 push_mark_stack_chunk(stack);
4548 }
4549 stack->chunk->data[stack->index++] = data;
4550}
4551
4552static int
4553pop_mark_stack(mark_stack_t *stack, VALUE *data)
4554{
4555 if (is_mark_stack_empty(stack)) {
4556 return FALSE;
4557 }
4558 if (stack->index == 1) {
4559 *data = stack->chunk->data[--stack->index];
4560 pop_mark_stack_chunk(stack);
4561 }
4562 else {
4563 *data = stack->chunk->data[--stack->index];
4564 }
4565 return TRUE;
4566}
4567
4568#if GC_ENABLE_INCREMENTAL_MARK
4569static int
4570invalidate_mark_stack_chunk(stack_chunk_t *chunk, int limit, VALUE obj)
4571{
4572 int i;
4573 for (i=0; i<limit; i++) {
4574 if (chunk->data[i] == obj) {
4575 chunk->data[i] = Qundef;
4576 return TRUE;
4577 }
4578 }
4579 return FALSE;
4580}
4581
4582static void
4583invalidate_mark_stack(mark_stack_t *stack, VALUE obj)
4584{
4585 stack_chunk_t *chunk = stack->chunk;
4586 int limit = stack->index;
4587
4588 while (chunk) {
4589 if (invalidate_mark_stack_chunk(chunk, limit, obj)) return;
4590 chunk = chunk->next;
4591 limit = stack->limit;
4592 }
4593 rb_bug("invalid_mark_stack: unreachable");
4594}
4595#endif
4596
4597static void
4598init_mark_stack(mark_stack_t *stack)
4599{
4600 int i;
4601
4602 MEMZERO(stack, mark_stack_t, 1);
4603 stack->index = stack->limit = STACK_CHUNK_SIZE;
4604 stack->cache_size = 0;
4605
4606 for (i=0; i < 4; i++) {
4607 add_stack_chunk_cache(stack, stack_chunk_alloc());
4608 }
4609 stack->unused_cache_size = stack->cache_size;
4610}
4611
4612/* Marking */
4613
4614#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
4615
4616#define STACK_START (ec->machine.stack_start)
4617#define STACK_END (ec->machine.stack_end)
4618#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
4619
4620#ifdef __EMSCRIPTEN__
4621#undef STACK_GROW_DIRECTION
4622#define STACK_GROW_DIRECTION 1
4623#endif
4624
4625#if STACK_GROW_DIRECTION < 0
4626# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
4627#elif STACK_GROW_DIRECTION > 0
4628# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
4629#else
4630# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
4631 : (size_t)(STACK_END - STACK_START + 1))
4632#endif
4633#if !STACK_GROW_DIRECTION
4635int
4637{
4638 VALUE *end;
4640
4641 if (end > addr) return ruby_stack_grow_direction = 1;
4642 return ruby_stack_grow_direction = -1;
4643}
4644#endif
4645
4646size_t
4648{
4652 return STACK_LENGTH;
4653}
4654
4655#define PREVENT_STACK_OVERFLOW 1
4656#ifndef PREVENT_STACK_OVERFLOW
4657#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
4658# define PREVENT_STACK_OVERFLOW 1
4659#else
4660# define PREVENT_STACK_OVERFLOW 0
4661#endif
4662#endif
4663#if PREVENT_STACK_OVERFLOW
4664static int
4665stack_check(rb_execution_context_t *ec, int water_mark)
4666{
4668
4669 size_t length = STACK_LENGTH;
4670 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
4671
4672 return length > maximum_length;
4673}
4674#else
4675#define stack_check(ec, water_mark) FALSE
4676#endif
4677
4678#define STACKFRAME_FOR_CALL_CFUNC 2048
4679
4682{
4683 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
4684}
4685
4686int
4688{
4689 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
4690}
4691
4692ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n));
4693static void
4694mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n)
4695{
4696 VALUE v;
4697 while (n--) {
4698 v = *x;
4699 gc_mark_maybe(objspace, v);
4700 x++;
4701 }
4702}
4703
4704static void
4705gc_mark_locations(rb_objspace_t *objspace, const VALUE *start, const VALUE *end)
4706{
4707 long n;
4708
4709 if (end <= start) return;
4710 n = end - start;
4711 mark_locations_array(objspace, start, n);
4712}
4713
4714void
4716{
4717 gc_mark_locations(&rb_objspace, start, end);
4718}
4719
4720static void
4721gc_mark_values(rb_objspace_t *objspace, long n, const VALUE *values)
4722{
4723 long i;
4724
4725 for (i=0; i<n; i++) {
4726 gc_mark(objspace, values[i]);
4727 }
4728}
4729
4730void
4731rb_gc_mark_values(long n, const VALUE *values)
4732{
4733 long i;
4734 rb_objspace_t *objspace = &rb_objspace;
4735
4736 for (i=0; i<n; i++) {
4737 gc_mark_and_pin(objspace, values[i]);
4738 }
4739}
4740
4741static void
4742gc_mark_and_pin_stack_values(rb_objspace_t *objspace, long n, const VALUE *values)
4743{
4744 long i;
4745
4746 for (i=0; i<n; i++) {
4747 /* skip MOVED objects that are on the stack */
4748 if (is_markable_object(objspace, values[i]) && T_MOVED != BUILTIN_TYPE(values[i])) {
4749 gc_mark_and_pin(objspace, values[i]);
4750 }
4751 }
4752}
4753
4754void
4756{
4757 rb_objspace_t *objspace = &rb_objspace;
4758 gc_mark_and_pin_stack_values(objspace, n, values);
4759}
4760
4761static int
4762mark_value(st_data_t key, st_data_t value, st_data_t data)
4763{
4764 rb_objspace_t *objspace = (rb_objspace_t *)data;
4765 gc_mark(objspace, (VALUE)value);
4766 return ST_CONTINUE;
4767}
4768
4769static int
4770mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
4771{
4772 rb_objspace_t *objspace = (rb_objspace_t *)data;
4773 gc_mark_and_pin(objspace, (VALUE)value);
4774 return ST_CONTINUE;
4775}
4776
4777static void
4778mark_tbl_no_pin(rb_objspace_t *objspace, st_table *tbl)
4779{
4780 if (!tbl || tbl->num_entries == 0) return;
4781 st_foreach(tbl, mark_value, (st_data_t)objspace);
4782}
4783
4784static void
4785mark_tbl(rb_objspace_t *objspace, st_table *tbl)
4786{
4787 if (!tbl || tbl->num_entries == 0) return;
4788 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
4789}
4790
4791static int
4792mark_key(st_data_t key, st_data_t value, st_data_t data)
4793{
4794 rb_objspace_t *objspace = (rb_objspace_t *)data;
4795 gc_mark_and_pin(objspace, (VALUE)key);
4796 return ST_CONTINUE;
4797}
4798
4799static void
4800mark_set(rb_objspace_t *objspace, st_table *tbl)
4801{
4802 if (!tbl) return;
4803 st_foreach(tbl, mark_key, (st_data_t)objspace);
4804}
4805
4806static void
4807mark_finalizer_tbl(rb_objspace_t *objspace, st_table *tbl)
4808{
4809 if (!tbl) return;
4810 st_foreach(tbl, mark_value, (st_data_t)objspace);
4811}
4812
4813void
4815{
4816 mark_set(&rb_objspace, tbl);
4817}
4818
4819static int
4820mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
4821{
4822 rb_objspace_t *objspace = (rb_objspace_t *)data;
4823
4824 gc_mark(objspace, (VALUE)key);
4825 gc_mark(objspace, (VALUE)value);
4826 return ST_CONTINUE;
4827}
4828
4829static int
4830pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
4831{
4832 rb_objspace_t *objspace = (rb_objspace_t *)data;
4833
4834 gc_mark_and_pin(objspace, (VALUE)key);
4835 gc_mark_and_pin(objspace, (VALUE)value);
4836 return ST_CONTINUE;
4837}
4838
4839static int
4840pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
4841{
4842 rb_objspace_t *objspace = (rb_objspace_t *)data;
4843
4844 gc_mark_and_pin(objspace, (VALUE)key);
4845 gc_mark(objspace, (VALUE)value);
4846 return ST_CONTINUE;
4847}
4848
4849static void
4850mark_hash(rb_objspace_t *objspace, VALUE hash)
4851{
4852 if (rb_hash_compare_by_id_p(hash)) {
4853 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
4854 }
4855 else {
4856 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
4857 }
4858
4859 if (RHASH_AR_TABLE_P(hash)) {
4860 if (objspace->mark_func_data == NULL && RHASH_TRANSIENT_P(hash)) {
4862 }
4863 }
4864 else {
4866 }
4867 gc_mark(objspace, RHASH(hash)->ifnone);
4868}
4869
4870static void
4871mark_st(rb_objspace_t *objspace, st_table *tbl)
4872{
4873 if (!tbl) return;
4874 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
4875}
4876
4877void
4879{
4880 mark_st(&rb_objspace, tbl);
4881}
4882
4883static void
4884mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
4885{
4886 const rb_method_definition_t *def = me->def;
4887
4888 gc_mark(objspace, me->owner);
4889 gc_mark(objspace, me->defined_class);
4890
4891 if (def) {
4892 switch (def->type) {
4894 if (def->body.iseq.iseqptr) gc_mark(objspace, (VALUE)def->body.iseq.iseqptr);
4895 gc_mark(objspace, (VALUE)def->body.iseq.cref);
4896 break;
4899 gc_mark(objspace, def->body.attr.location);
4900 break;
4902 gc_mark(objspace, def->body.bmethod.proc);
4904 break;
4906 gc_mark(objspace, (VALUE)def->body.alias.original_me);
4907 return;
4909 gc_mark(objspace, (VALUE)def->body.refined.orig_me);
4910 gc_mark(objspace, (VALUE)def->body.refined.owner);
4911 break;
4918 break;
4919 }
4920 }
4921}
4922
4924mark_method_entry_i(VALUE me, void *data)
4925{
4926 rb_objspace_t *objspace = (rb_objspace_t *)data;
4927
4928 gc_mark(objspace, me);
4929 return ID_TABLE_CONTINUE;
4930}
4931
4932static void
4933mark_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
4934{
4935 if (tbl) {
4936 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
4937 }
4938}
4939
4941mark_const_entry_i(VALUE value, void *data)
4942{
4943 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
4944 rb_objspace_t *objspace = data;
4945
4946 gc_mark(objspace, ce->value);
4947 gc_mark(objspace, ce->file);
4948 return ID_TABLE_CONTINUE;
4949}
4950
4951static void
4952mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
4953{
4954 if (!tbl) return;
4955 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
4956}
4957
4958#if STACK_GROW_DIRECTION < 0
4959#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
4960#elif STACK_GROW_DIRECTION > 0
4961#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
4962#else
4963#define GET_STACK_BOUNDS(start, end, appendix) \
4964 ((STACK_END < STACK_START) ? \
4965 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
4966#endif
4967
4968static void mark_stack_locations(rb_objspace_t *objspace, const rb_execution_context_t *ec,
4969 const VALUE *stack_start, const VALUE *stack_end);
4970
4971static void
4972mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
4973{
4974 union {
4975 rb_jmp_buf j;
4976 VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
4977 } save_regs_gc_mark;
4978 VALUE *stack_start, *stack_end;
4979
4981 memset(&save_regs_gc_mark, 0, sizeof(save_regs_gc_mark));
4982 /* This assumes that all registers are saved into the jmp_buf (and stack) */
4983 rb_setjmp(save_regs_gc_mark.j);
4984
4985 /* SET_STACK_END must be called in this function because
4986 * the stack frame of this function may contain
4987 * callee save registers and they should be marked. */
4989 GET_STACK_BOUNDS(stack_start, stack_end, 1);
4990
4991 mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
4992
4993 mark_stack_locations(objspace, ec, stack_start, stack_end);
4994}
4995
4996void
4998{
4999 rb_objspace_t *objspace = &rb_objspace;
5000 VALUE *stack_start, *stack_end;
5001
5002 GET_STACK_BOUNDS(stack_start, stack_end, 0);
5003 mark_stack_locations(objspace, ec, stack_start, stack_end);
5004}
5005
5006static void
5007mark_stack_locations(rb_objspace_t *objspace, const rb_execution_context_t *ec,
5008 const VALUE *stack_start, const VALUE *stack_end)
5009{
5010
5011 gc_mark_locations(objspace, stack_start, stack_end);
5012
5013#if defined(__mc68000__)
5014 gc_mark_locations(objspace,
5015 (VALUE*)((char*)stack_start + 2),
5016 (VALUE*)((char*)stack_end - 2));
5017#endif
5018}
5019
5020void
5022{
5023 mark_tbl(&rb_objspace, tbl);
5024}
5025
5026void
5028{
5029 mark_tbl_no_pin(&rb_objspace, tbl);
5030}
5031
5032static void
5033gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
5034{
5036
5037 if (is_pointer_to_heap(objspace, (void *)obj)) {
5039 asan_unpoison_object(obj, false);
5040
5041 /* Garbage can live on the stack, so do not mark or pin */
5042 switch (BUILTIN_TYPE(obj)) {
5043 case T_MOVED:
5044 case T_ZOMBIE:
5045 case T_NONE:
5046 break;
5047 default:
5048 gc_mark_and_pin(objspace, obj);
5049 break;
5050 }
5051
5052 if (ptr) {
5054 asan_poison_object(obj);
5055 }
5056 }
5057}
5058
5059void
5061{
5062 gc_mark_maybe(&rb_objspace, obj);
5063}
5064
5065static inline int
5066gc_mark_set(rb_objspace_t *objspace, VALUE obj)
5067{
5068 if (RVALUE_MARKED(obj)) return 0;
5070 return 1;
5071}
5072
5073#if USE_RGENGC
5074static int
5075gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
5076{
5077 struct heap_page *page = GET_HEAP_PAGE(obj);
5079
5084
5085#if RGENGC_PROFILE > 0
5086 objspace->profile.total_remembered_shady_object_count++;
5087#if RGENGC_PROFILE >= 2
5088 objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
5089#endif
5090#endif
5091 return TRUE;
5092 }
5093 else {
5094 return FALSE;
5095 }
5096}
5097#endif
5098
5099static void
5100rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
5101{
5102#if USE_RGENGC
5103 const VALUE old_parent = objspace->rgengc.parent_object;
5104
5105 if (old_parent) { /* parent object is old */
5106 if (RVALUE_WB_UNPROTECTED(obj)) {
5107 if (gc_remember_unprotected(objspace, obj)) {
5108 gc_report(2, objspace, "relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5109 }
5110 }
5111 else {
5112 if (!RVALUE_OLD_P(obj)) {
5113 if (RVALUE_MARKED(obj)) {
5114 /* An object pointed from an OLD object should be OLD. */
5115 gc_report(2, objspace, "relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5116 RVALUE_AGE_SET_OLD(objspace, obj);
5117 if (is_incremental_marking(objspace)) {
5118 if (!RVALUE_MARKING(obj)) {
5119 gc_grey(objspace, obj);
5120 }
5121 }
5122 else {
5123 rgengc_remember(objspace, obj);
5124 }
5125 }
5126 else {
5127 gc_report(2, objspace, "relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5128 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
5129 }
5130 }
5131 }
5132 }
5133
5134 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
5135#endif
5136}
5137
5138static void
5139gc_grey(rb_objspace_t *objspace, VALUE obj)
5140{
5141#if RGENGC_CHECK_MODE
5142 if (RVALUE_MARKED(obj) == FALSE) rb_bug("gc_grey: %s is not marked.", obj_info(obj));
5143 if (RVALUE_MARKING(obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj));
5144#endif
5145
5146#if GC_ENABLE_INCREMENTAL_MARK
5147 if (is_incremental_marking(objspace)) {
5149 }
5150#endif
5151
5152 push_mark_stack(&objspace->mark_stack, obj);
5153}
5154
5155static void
5156gc_aging(rb_objspace_t *objspace, VALUE obj)
5157{
5158#if USE_RGENGC
5159 struct heap_page *page = GET_HEAP_PAGE(obj);
5160
5161 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
5162 check_rvalue_consistency(obj);
5163
5164 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
5165 if (!RVALUE_OLD_P(obj)) {
5166 gc_report(3, objspace, "gc_aging: YOUNG: %s\n", obj_info(obj));
5167 RVALUE_AGE_INC(objspace, obj);
5168 }
5169 else if (is_full_marking(objspace)) {
5171 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
5172 }
5173 }
5174 check_rvalue_consistency(obj);
5175#endif /* USE_RGENGC */
5176
5177 objspace->marked_slots++;
5178}
5179
5180NOINLINE(static void gc_mark_ptr(rb_objspace_t *objspace, VALUE obj));
5181
5182static void
5183gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
5184{
5185 if (LIKELY(objspace->mark_func_data == NULL)) {
5186 rgengc_check_relation(objspace, obj);
5187 if (!gc_mark_set(objspace, obj)) return; /* already marked */
5188 if (RB_TYPE_P(obj, T_NONE)) rb_bug("try to mark T_NONE object"); /* check here will help debugging */
5189 gc_aging(objspace, obj);
5190 gc_grey(objspace, obj);
5191 }
5192 else {
5193 objspace->mark_func_data->mark_func(obj, objspace->mark_func_data->data);
5194 }
5195}
5196
5197static inline void
5198gc_pin(rb_objspace_t *objspace, VALUE obj)
5199{
5200 GC_ASSERT(is_markable_object(objspace, obj));
5201 if (UNLIKELY(objspace->flags.during_compacting)) {
5203 }
5204}
5205
5206static inline void
5207gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
5208{
5209 if (!is_markable_object(objspace, obj)) return;
5210 gc_pin(objspace, obj);
5211 gc_mark_ptr(objspace, obj);
5212}
5213
5214static inline void
5215gc_mark(rb_objspace_t *objspace, VALUE obj)
5216{
5217 if (!is_markable_object(objspace, obj)) return;
5218 gc_mark_ptr(objspace, obj);
5219}
5220
5221void
5223{
5224 gc_mark(&rb_objspace, ptr);
5225}
5226
5227void
5229{
5230 gc_mark_and_pin(&rb_objspace, ptr);
5231}
5232
5233/* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
5234 * This function is only for GC_END_MARK timing.
5235 */
5236
5237int
5239{
5240 return RVALUE_MARKED(obj) ? TRUE : FALSE;
5241}
5242
5243static inline void
5244gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
5245{
5246#if USE_RGENGC
5247 if (RVALUE_OLD_P(obj)) {
5248 objspace->rgengc.parent_object = obj;
5249 }
5250 else {
5251 objspace->rgengc.parent_object = Qfalse;
5252 }
5253#endif
5254}
5255
5256static void
5257gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
5258{
5259 switch (imemo_type(obj)) {
5260 case imemo_env:
5261 {
5262 const rb_env_t *env = (const rb_env_t *)obj;
5263 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
5264 gc_mark_values(objspace, (long)env->env_size, env->env);
5265 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
5266 gc_mark(objspace, (VALUE)rb_vm_env_prev_env(env));
5267 gc_mark(objspace, (VALUE)env->iseq);
5268 }
5269 return;
5270 case imemo_cref:
5271 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass);
5272 gc_mark(objspace, (VALUE)RANY(obj)->as.imemo.cref.next);
5273 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
5274 return;
5275 case imemo_svar:
5276 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
5277 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
5278 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
5279 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
5280 return;
5281 case imemo_throw_data:
5282 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
5283 return;
5284 case imemo_ifunc:
5285 gc_mark_maybe(objspace, (VALUE)RANY(obj)->as.imemo.ifunc.data);
5286 return;
5287 case imemo_memo:
5288 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
5289 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
5290 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
5291 return;
5292 case imemo_ment:
5293 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
5294 return;
5295 case imemo_iseq:
5297 return;
5298 case imemo_tmpbuf:
5299 {
5300 const rb_imemo_tmpbuf_t *m = &RANY(obj)->as.imemo.alloc;
5301 do {
5302 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
5303 } while ((m = m->next) != NULL);
5304 }
5305 return;
5306 case imemo_ast:
5307 rb_ast_mark(&RANY(obj)->as.imemo.ast);
5308 return;
5311 return;
5312#if VM_CHECK_MODE > 0
5313 default:
5314 VM_UNREACHABLE(gc_mark_imemo);
5315#endif
5316 }
5317}
5318
5319static void
5320gc_mark_children(rb_objspace_t *objspace, VALUE obj)
5321{
5322 register RVALUE *any = RANY(obj);
5323 gc_mark_set_parent(objspace, obj);
5324
5325 if (FL_TEST(obj, FL_EXIVAR)) {
5327 }
5328
5329 switch (BUILTIN_TYPE(obj)) {
5330 case T_FLOAT:
5331 case T_BIGNUM:
5332 case T_SYMBOL:
5333 /* Not immediates, but does not have references and singleton
5334 * class */
5335 return;
5336
5337 case T_NIL:
5338 case T_FIXNUM:
5339 rb_bug("rb_gc_mark() called for broken object");
5340 break;
5341
5342 case T_NODE:
5344 break;
5345
5346 case T_IMEMO:
5347 gc_mark_imemo(objspace, obj);
5348 return;
5349 }
5350
5351 gc_mark(objspace, any->as.basic.klass);
5352
5353 switch (BUILTIN_TYPE(obj)) {
5354 case T_CLASS:
5355 case T_MODULE:
5356 if (RCLASS_SUPER(obj)) {
5357 gc_mark(objspace, RCLASS_SUPER(obj));
5358 }
5359 if (!RCLASS_EXT(obj)) break;
5360 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
5361 mark_tbl_no_pin(objspace, RCLASS_IV_TBL(obj));
5362 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
5363 break;
5364
5365 case T_ICLASS:
5367 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
5368 }
5369 if (RCLASS_SUPER(obj)) {
5370 gc_mark(objspace, RCLASS_SUPER(obj));
5371 }
5372 if (!RCLASS_EXT(obj)) break;
5373 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
5374 break;
5375
5376 case T_ARRAY:
5377 if (FL_TEST(obj, ELTS_SHARED)) {
5378 VALUE root = any->as.array.as.heap.aux.shared_root;
5379 gc_mark(objspace, root);
5380 }
5381 else {
5382 long i, len = RARRAY_LEN(obj);
5384 for (i=0; i < len; i++) {
5385 gc_mark(objspace, ptr[i]);
5386 }
5387
5388 if (objspace->mark_func_data == NULL) {
5392 }
5393 }
5394 }
5395 break;
5396
5397 case T_HASH:
5398 mark_hash(objspace, obj);
5399 break;
5400
5401 case T_STRING:
5402 if (STR_SHARED_P(obj)) {
5403 gc_mark(objspace, any->as.string.as.heap.aux.shared);
5404 }
5405 break;
5406
5407 case T_DATA:
5408 {
5409 void *const ptr = DATA_PTR(obj);
5410 if (ptr) {
5411 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
5413 any->as.data.dmark;
5414 if (mark_func) (*mark_func)(ptr);
5415 }
5416 }
5417 break;
5418
5419 case T_OBJECT:
5420 {
5421 const VALUE * const ptr = ROBJECT_IVPTR(obj);
5422
5423 if (ptr) {
5425 for (i = 0; i < len; i++) {
5426 gc_mark(objspace, ptr[i]);
5427 }
5428
5429 if (objspace->mark_func_data == NULL &&
5432 }
5433 }
5434 }
5435 break;
5436
5437 case T_FILE:
5438 if (any->as.file.fptr) {
5439 gc_mark(objspace, any->as.file.fptr->pathv);
5440 gc_mark(objspace, any->as.file.fptr->tied_io_for_writing);
5441 gc_mark(objspace, any->as.file.fptr->writeconv_asciicompat);
5442 gc_mark(objspace, any->as.file.fptr->writeconv_pre_ecopts);
5443 gc_mark(objspace, any->as.file.fptr->encs.ecopts);
5444 gc_mark(objspace, any->as.file.fptr->write_lock);
5445 }
5446 break;
5447
5448 case T_REGEXP:
5449 gc_mark(objspace, any->as.regexp.src);
5450 break;
5451
5452 case T_MATCH:
5453 gc_mark(objspace, any->as.match.regexp);
5454 if (any->as.match.str) {
5455 gc_mark(objspace, any->as.match.str);
5456 }
5457 break;
5458
5459 case T_RATIONAL:
5460 gc_mark(objspace, any->as.rational.num);
5461 gc_mark(objspace, any->as.rational.den);
5462 break;
5463
5464 case T_COMPLEX:
5465 gc_mark(objspace, any->as.complex.real);
5466 gc_mark(objspace, any->as.complex.imag);
5467 break;
5468
5469 case T_STRUCT:
5470 {
5471 long i;
5472 const long len = RSTRUCT_LEN(obj);
5473 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
5474
5475 for (i=0; i<len; i++) {
5476 gc_mark(objspace, ptr[i]);
5477 }
5478
5479 if (objspace->mark_func_data == NULL &&
5482 }
5483 }
5484 break;
5485
5486 default:
5487#if GC_DEBUG
5489#endif
5490 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
5491 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
5492 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
5493 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
5494 BUILTIN_TYPE(obj), (void *)any,
5495 is_pointer_to_heap(objspace, any) ? "corrupted object" : "non object");
5496 }
5497}
5498
5503static inline int
5504gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
5505{
5506 mark_stack_t *mstack = &objspace->mark_stack;
5507 VALUE obj;
5508#if GC_ENABLE_INCREMENTAL_MARK
5509 size_t marked_slots_at_the_beginning = objspace->marked_slots;
5510 size_t popped_count = 0;
5511#endif
5512
5513 while (pop_mark_stack(mstack, &obj)) {
5514 if (obj == Qundef) continue; /* skip */
5515
5516 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
5517 rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
5518 }
5519 gc_mark_children(objspace, obj);
5520
5521#if GC_ENABLE_INCREMENTAL_MARK
5522 if (incremental) {
5523 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
5524 rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
5525 }
5527 popped_count++;
5528
5529 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
5530 break;
5531 }
5532 }
5533 else {
5534 /* just ignore marking bits */
5535 }
5536#endif
5537 }
5538
5539 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
5540
5541 if (is_mark_stack_empty(mstack)) {
5542 shrink_stack_chunk_cache(mstack);
5543 return TRUE;
5544 }
5545 else {
5546 return FALSE;
5547 }
5548}
5549
5550static int
5551gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
5552{
5553 return gc_mark_stacked_objects(objspace, TRUE, count);
5554}
5555
5556static int
5557gc_mark_stacked_objects_all(rb_objspace_t *objspace)
5558{
5559 return gc_mark_stacked_objects(objspace, FALSE, 0);
5560}
5561
5562#if PRINT_ROOT_TICKS
5563#define MAX_TICKS 0x100
5564static tick_t mark_ticks[MAX_TICKS];
5565static const char *mark_ticks_categories[MAX_TICKS];
5566
5567static void
5568show_mark_ticks(void)
5569{
5570 int i;
5571 fprintf(stderr, "mark ticks result:\n");
5572 for (i=0; i<MAX_TICKS; i++) {
5573 const char *category = mark_ticks_categories[i];
5574 if (category) {
5575 fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
5576 }
5577 else {
5578 break;
5579 }
5580 }
5581}
5582
5583#endif /* PRINT_ROOT_TICKS */
5584
5585static void
5586gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
5587{
5588 struct gc_list *list;
5590 rb_vm_t *vm = rb_ec_vm_ptr(ec);
5591
5592#if PRINT_ROOT_TICKS
5593 tick_t start_tick = tick();
5594 int tick_count = 0;
5595 const char *prev_category = 0;
5596
5597 if (mark_ticks_categories[0] == 0) {
5598 atexit(show_mark_ticks);
5599 }
5600#endif
5601
5602 if (categoryp) *categoryp = "xxx";
5603
5604#if USE_RGENGC
5605 objspace->rgengc.parent_object = Qfalse;
5606#endif
5607
5608#if PRINT_ROOT_TICKS
5609#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
5610 if (prev_category) { \
5611 tick_t t = tick(); \
5612 mark_ticks[tick_count] = t - start_tick; \
5613 mark_ticks_categories[tick_count] = prev_category; \
5614 tick_count++; \
5615 } \
5616 prev_category = category; \
5617 start_tick = tick(); \
5618} while (0)
5619#else /* PRINT_ROOT_TICKS */
5620#define MARK_CHECKPOINT_PRINT_TICK(category)
5621#endif
5622
5623#define MARK_CHECKPOINT(category) do { \
5624 if (categoryp) *categoryp = category; \
5625 MARK_CHECKPOINT_PRINT_TICK(category); \
5626} while (0)
5627
5628 MARK_CHECKPOINT("vm");
5630 rb_vm_mark(vm);
5631 if (vm->self) gc_mark(objspace, vm->self);
5632
5633 MARK_CHECKPOINT("finalizers");
5634 mark_finalizer_tbl(objspace, finalizer_table);
5635
5636 MARK_CHECKPOINT("machine_context");
5637 mark_current_machine_context(objspace, ec);
5638
5639 /* mark protected global variables */
5640 MARK_CHECKPOINT("global_list");
5641 for (list = global_list; list; list = list->next) {
5642 gc_mark_maybe(objspace, *list->varptr);
5643 }
5644
5645 MARK_CHECKPOINT("end_proc");
5647
5648 MARK_CHECKPOINT("global_tbl");
5650
5651 MARK_CHECKPOINT("object_id");
5652 rb_gc_mark(objspace->next_object_id);
5653 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl); /* Only mark ids */
5654
5656
5657 MARK_CHECKPOINT("finish");
5658#undef MARK_CHECKPOINT
5659}
5660
5661#if RGENGC_CHECK_MODE >= 4
5662
5663#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
5664#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
5665#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
5666
5667struct reflist {
5668 VALUE *list;
5669 int pos;
5670 int size;
5671};
5672
5673static struct reflist *
5674reflist_create(VALUE obj)
5675{
5676 struct reflist *refs = xmalloc(sizeof(struct reflist));
5677 refs->size = 1;
5678 refs->list = ALLOC_N(VALUE, refs->size);
5679 refs->list[0] = obj;
5680 refs->pos = 1;
5681 return refs;
5682}
5683
5684static void
5685reflist_destruct(struct reflist *refs)
5686{
5687 xfree(refs->list);
5688 xfree(refs);
5689}
5690
5691static void
5692reflist_add(struct reflist *refs, VALUE obj)
5693{
5694 if (refs->pos == refs->size) {
5695 refs->size *= 2;
5696 SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
5697 }
5698
5699 refs->list[refs->pos++] = obj;
5700}
5701
5702static void
5703reflist_dump(struct reflist *refs)
5704{
5705 int i;
5706 for (i=0; i<refs->pos; i++) {
5707 VALUE obj = refs->list[i];
5708 if (IS_ROOTSIG(obj)) { /* root */
5709 fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
5710 }
5711 else {
5712 fprintf(stderr, "<%s>", obj_info(obj));
5713 }
5714 if (i+1 < refs->pos) fprintf(stderr, ", ");
5715 }
5716}
5717
5718static int
5719reflist_referred_from_machine_context(struct reflist *refs)
5720{
5721 int i;
5722 for (i=0; i<refs->pos; i++) {
5723 VALUE obj = refs->list[i];
5724 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
5725 }
5726 return 0;
5727}
5728
5729struct allrefs {
5730 rb_objspace_t *objspace;
5731 /* a -> obj1
5732 * b -> obj1
5733 * c -> obj1
5734 * c -> obj2
5735 * d -> obj3
5736 * #=> {obj1 => [a, b, c], obj2 => [c, d]}
5737 */
5738 struct st_table *references;
5739 const char *category;
5740 VALUE root_obj;
5742};
5743
5744static int
5745allrefs_add(struct allrefs *data, VALUE obj)
5746{
5747 struct reflist *refs;
5748
5749 if (st_lookup(data->references, obj, (st_data_t *)&refs)) {
5750 reflist_add(refs, data->root_obj);
5751 return 0;
5752 }
5753 else {
5754 refs = reflist_create(data->root_obj);
5755 st_insert(data->references, obj, (st_data_t)refs);
5756 return 1;
5757 }
5758}
5759
5760static void
5761allrefs_i(VALUE obj, void *ptr)
5762{
5763 struct allrefs *data = (struct allrefs *)ptr;
5764
5765 if (allrefs_add(data, obj)) {
5766 push_mark_stack(&data->mark_stack, obj);
5767 }
5768}
5769
5770static void
5771allrefs_roots_i(VALUE obj, void *ptr)
5772{
5773 struct allrefs *data = (struct allrefs *)ptr;
5774 if (strlen(data->category) == 0) rb_bug("!!!");
5775 data->root_obj = MAKE_ROOTSIG(data->category);
5776
5777 if (allrefs_add(data, obj)) {
5778 push_mark_stack(&data->mark_stack, obj);
5779 }
5780}
5781
5782static st_table *
5783objspace_allrefs(rb_objspace_t *objspace)
5784{
5785 struct allrefs data;
5786 struct mark_func_data_struct mfd;
5787 VALUE obj;
5788 int prev_dont_gc = dont_gc;
5789 dont_gc = TRUE;
5790
5791 data.objspace = objspace;
5792 data.references = st_init_numtable();
5793 init_mark_stack(&data.mark_stack);
5794
5795 mfd.mark_func = allrefs_roots_i;
5796 mfd.data = &data;
5797
5798 /* traverse root objects */
5799 PUSH_MARK_FUNC_DATA(&mfd);
5800 objspace->mark_func_data = &mfd;
5801 gc_mark_roots(objspace, &data.category);
5803
5804 /* traverse rest objects reachable from root objects */
5805 while (pop_mark_stack(&data.mark_stack, &obj)) {
5806 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
5807 }
5808 free_stack_chunks(&data.mark_stack);
5809
5810 dont_gc = prev_dont_gc;
5811 return data.references;
5812}
5813
5814static int
5815objspace_allrefs_destruct_i(st_data_t key, st_data_t value, void *ptr)
5816{
5817 struct reflist *refs = (struct reflist *)value;
5818 reflist_destruct(refs);
5819 return ST_CONTINUE;
5820}
5821
5822static void
5823objspace_allrefs_destruct(struct st_table *refs)
5824{
5825 st_foreach(refs, objspace_allrefs_destruct_i, 0);
5826 st_free_table(refs);
5827}
5828
5829#if RGENGC_CHECK_MODE >= 5
5830static int
5831allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
5832{
5833 VALUE obj = (VALUE)k;
5834 struct reflist *refs = (struct reflist *)v;
5835 fprintf(stderr, "[allrefs_dump_i] %s <- ", obj_info(obj));
5836 reflist_dump(refs);
5837 fprintf(stderr, "\n");
5838 return ST_CONTINUE;
5839}
5840
5841static void
5842allrefs_dump(rb_objspace_t *objspace)
5843{
5844 fprintf(stderr, "[all refs] (size: %d)\n", (int)objspace->rgengc.allrefs_table->num_entries);
5845 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
5846}
5847#endif
5848
5849static int
5850gc_check_after_marks_i(st_data_t k, st_data_t v, void *ptr)
5851{
5852 VALUE obj = k;
5853 struct reflist *refs = (struct reflist *)v;
5854 rb_objspace_t *objspace = (rb_objspace_t *)ptr;
5855
5856 /* object should be marked or oldgen */
5858 fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
5859 fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
5860 reflist_dump(refs);
5861
5862 if (reflist_referred_from_machine_context(refs)) {
5863 fprintf(stderr, " (marked from machine stack).\n");
5864 /* marked from machine context can be false positive */
5865 }
5866 else {
5867 objspace->rgengc.error_count++;
5868 fprintf(stderr, "\n");
5869 }
5870 }
5871 return ST_CONTINUE;
5872}
5873
5874static void
5875gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
5876{
5877 size_t saved_malloc_increase = objspace->malloc_params.increase;
5878#if RGENGC_ESTIMATE_OLDMALLOC
5879 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
5880#endif
5881 VALUE already_disabled = rb_objspace_gc_disable(objspace);
5882
5883 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
5884
5885 if (checker_func) {
5886 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
5887 }
5888
5889 if (objspace->rgengc.error_count > 0) {
5890#if RGENGC_CHECK_MODE >= 5
5891 allrefs_dump(objspace);
5892#endif
5893 if (checker_name) rb_bug("%s: GC has problem.", checker_name);
5894 }
5895
5896 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
5897 objspace->rgengc.allrefs_table = 0;
5898
5899 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
5900 objspace->malloc_params.increase = saved_malloc_increase;
5901#if RGENGC_ESTIMATE_OLDMALLOC
5902 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
5903#endif
5904}
5905#endif /* RGENGC_CHECK_MODE >= 4 */
5906
5912
5913#if USE_RGENGC
5917#endif
5918};
5919
5920#if USE_RGENGC
5921static void
5922check_generation_i(const VALUE child, void *ptr)
5923{
5925 const VALUE parent = data->parent;
5926
5927 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
5928
5929 if (!RVALUE_OLD_P(child)) {
5930 if (!RVALUE_REMEMBERED(parent) &&
5931 !RVALUE_REMEMBERED(child) &&
5932 !RVALUE_UNCOLLECTIBLE(child)) {
5933 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
5934 data->err_count++;
5935 }
5936 }
5937}
5938
5939static void
5940check_color_i(const VALUE child, void *ptr)
5941{
5943 const VALUE parent = data->parent;
5944
5945 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
5946 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
5947 obj_info(parent), obj_info(child));
5948 data->err_count++;
5949 }
5950}
5951#endif
5952
5953static void
5954check_children_i(const VALUE child, void *ptr)
5955{
5957 if (check_rvalue_consistency_force(child, FALSE) != 0) {
5958 fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
5959 obj_info(child), obj_info(data->parent));
5960 rb_print_backtrace(); /* C backtrace will help to debug */
5961
5962 data->err_count++;
5963 }
5964}
5965
5966static int
5967verify_internal_consistency_i(void *page_start, void *page_end, size_t stride, void *ptr)
5968{
5970 VALUE obj;
5972
5973 for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
5974 void *poisoned = asan_poisoned_object_p(obj);
5975 asan_unpoison_object(obj, false);
5976
5977 if (is_live_object(objspace, obj)) {
5978 /* count objects */
5979 data->live_object_count++;
5980 data->parent = obj;
5981
5982 /* Normally, we don't expect T_MOVED objects to be in the heap.
5983 * But they can stay alive on the stack, */
5984 if (!gc_object_moved_p(objspace, obj)) {
5985 /* moved slots don't have children */
5986 rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
5987 }
5988
5989#if USE_RGENGC
5990 /* check health of children */
5991 if (RVALUE_OLD_P(obj)) data->old_object_count++;
5992 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
5993
5994 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
5995 /* reachable objects from an oldgen object should be old or (young with remember) */
5996 data->parent = obj;
5997 rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
5998 }
5999
6001 if (RVALUE_BLACK_P(obj)) {
6002 /* reachable objects from black objects should be black or grey objects */
6003 data->parent = obj;
6004 rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
6005 }
6006 }
6007#endif
6008 }
6009 else {
6010 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
6011 GC_ASSERT((RBASIC(obj)->flags & ~FL_SEEN_OBJ_ID) == T_ZOMBIE);
6012 data->zombie_object_count++;
6013 }
6014 }
6015 if (poisoned) {
6017 asan_poison_object(obj);
6018 }
6019 }
6020
6021 return 0;
6022}
6023
6024static int
6025gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
6026{
6027#if USE_RGENGC
6028 int i;
6029 unsigned int has_remembered_shady = FALSE;
6030 unsigned int has_remembered_old = FALSE;
6031 int remembered_old_objects = 0;
6032 int free_objects = 0;
6033 int zombie_objects = 0;
6034
6035 for (i=0; i<page->total_slots; i++) {
6036 VALUE val = (VALUE)&page->start[i];
6037 void *poisoned = asan_poisoned_object_p(val);
6038 asan_unpoison_object(val, false);
6039
6040 if (RBASIC(val) == 0) free_objects++;
6041 if (BUILTIN_TYPE(val) == T_ZOMBIE) zombie_objects++;
6042 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
6043 has_remembered_shady = TRUE;
6044 }
6045 if (RVALUE_PAGE_MARKING(page, val)) {
6046 has_remembered_old = TRUE;
6047 remembered_old_objects++;
6048 }
6049
6050 if (poisoned) {
6051 GC_ASSERT(BUILTIN_TYPE(val) == T_NONE);
6052 asan_poison_object(val);
6053 }
6054 }
6055
6057 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
6058
6059 for (i=0; i<page->total_slots; i++) {
6060 VALUE val = (VALUE)&page->start[i];
6061 if (RVALUE_PAGE_MARKING(page, val)) {
6062 fprintf(stderr, "marking -> %s\n", obj_info(val));
6063 }
6064 }
6065 rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
6066 (void *)page, remembered_old_objects, obj ? obj_info(obj) : "");
6067 }
6068
6069 if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
6070 rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
6071 (void *)page, obj ? obj_info(obj) : "");
6072 }
6073
6074 if (0) {
6075 /* free_slots may not equal to free_objects */
6076 if (page->free_slots != free_objects) {
6077 rb_bug("page %p's free_slots should be %d, but %d\n", (void *)page, (int)page->free_slots, free_objects);
6078 }
6079 }
6080 if (page->final_slots != zombie_objects) {
6081 rb_bug("page %p's final_slots should be %d, but %d\n", (void *)page, (int)page->final_slots, zombie_objects);
6082 }
6083
6084 return remembered_old_objects;
6085#else
6086 return 0;
6087#endif
6088}
6089
6090static int
6091gc_verify_heap_pages_(rb_objspace_t *objspace, struct list_head *head)
6092{
6093 int remembered_old_objects = 0;
6094 struct heap_page *page = 0;
6095
6096 list_for_each(head, page, page_node) {
6097 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
6098 RVALUE *p = page->freelist;
6099 while (p) {
6100 RVALUE *prev = p;
6101 asan_unpoison_object((VALUE)p, false);
6102 if (BUILTIN_TYPE(p) != T_NONE) {
6103 fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", obj_info((VALUE)p));
6104 }
6105 p = p->as.free.next;
6106 asan_poison_object((VALUE)prev);
6107 }
6108 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
6109
6110 if (page->flags.has_remembered_objects == FALSE) {
6111 remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
6112 }
6113 }
6114
6115 return remembered_old_objects;
6116}
6117
6118static int
6119gc_verify_heap_pages(rb_objspace_t *objspace)
6120{
6121 int remembered_old_objects = 0;
6122 remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_eden->pages);
6123 remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_tomb->pages);
6124 return remembered_old_objects;
6125}
6126
6127/*
6128 * call-seq:
6129 * GC.verify_internal_consistency -> nil
6130 *
6131 * Verify internal consistency.
6132 *
6133 * This method is implementation specific.
6134 * Now this method checks generational consistency
6135 * if RGenGC is supported.
6136 */
6137static VALUE
6138gc_verify_internal_consistency_m(VALUE dummy)
6139{
6140 gc_verify_internal_consistency(&rb_objspace);
6141
6142 return Qnil;
6143}
6144
6145static void
6146gc_verify_internal_consistency(rb_objspace_t *objspace)
6147{
6148 struct verify_internal_consistency_struct data = {0};
6149
6150 data.objspace = objspace;
6151 gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
6152
6153 /* check relations */
6154
6155 objspace_each_objects_without_setup(objspace, verify_internal_consistency_i, &data);
6156
6157 if (data.err_count != 0) {
6158#if RGENGC_CHECK_MODE >= 5
6159 objspace->rgengc.error_count = data.err_count;
6160 gc_marks_check(objspace, NULL, NULL);
6161 allrefs_dump(objspace);
6162#endif
6163 rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
6164 }
6165
6166 /* check heap_page status */
6167 gc_verify_heap_pages(objspace);
6168
6169 /* check counters */
6170
6172 if (objspace_live_slots(objspace) != data.live_object_count) {
6173 fprintf(stderr, "heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n",
6175 rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace_live_slots(objspace), data.live_object_count);
6176 }
6177 }
6178
6179#if USE_RGENGC
6180 if (!is_marking(objspace)) {
6182 rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace->rgengc.old_objects, data.old_object_count);
6183 }
6185 rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
6186 }
6187 }
6188#endif
6189
6190 if (!finalizing) {
6191 size_t list_count = 0;
6192
6193 {
6195 while (z) {
6196 list_count++;
6197 z = RZOMBIE(z)->next;
6198 }
6199 }
6200
6202 heap_pages_final_slots != list_count) {
6203
6204 rb_bug("inconsistent finalizing object count:\n"
6205 " expect %"PRIuSIZE"\n"
6206 " but %"PRIuSIZE" zombies\n"
6207 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
6210 list_count);
6211 }
6212 }
6213
6214 gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
6215}
6216
6217void
6219{
6220 gc_verify_internal_consistency(&rb_objspace);
6221}
6222
6223static VALUE
6224gc_verify_transient_heap_internal_consistency(VALUE dmy)
6225{
6227 return Qnil;
6228}
6229
6230/* marks */
6231
6232static void
6233gc_marks_start(rb_objspace_t *objspace, int full_mark)
6234{
6235 /* start marking */
6236 gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
6237 gc_mode_transition(objspace, gc_mode_marking);
6238
6239#if USE_RGENGC
6240 if (full_mark) {
6241#if GC_ENABLE_INCREMENTAL_MARK
6243
6244 if (0) fprintf(stderr, "objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n",
6246#endif
6253 rgengc_mark_and_rememberset_clear(objspace, heap_eden);
6254 }
6255 else {
6258 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
6260 rgengc_rememberset_mark(objspace, heap_eden);
6261 }
6262#endif
6263
6264 gc_mark_roots(objspace, NULL);
6265
6266 gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %d\n", full_mark ? "full" : "minor", (int)mark_stack_size(&objspace->mark_stack));
6267}
6268
6269#if GC_ENABLE_INCREMENTAL_MARK
6270static void
6271gc_marks_wb_unprotected_objects(rb_objspace_t *objspace)
6272{
6273 struct heap_page *page = 0;
6274
6275 list_for_each(&heap_eden->pages, page, page_node) {
6276 bits_t *mark_bits = page->mark_bits;
6277 bits_t *wbun_bits = page->wb_unprotected_bits;
6278 RVALUE *p = page->start;
6279 RVALUE *offset = p - NUM_IN_PAGE(p);
6280 size_t j;
6281
6282 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
6283 bits_t bits = mark_bits[j] & wbun_bits[j];
6284
6285 if (bits) {
6286 p = offset + j * BITS_BITLENGTH;
6287
6288 do {
6289 if (bits & 1) {
6290 gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
6291 GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
6292 GC_ASSERT(RVALUE_MARKED((VALUE)p));
6293 gc_mark_children(objspace, (VALUE)p);
6294 }
6295 p++;
6296 bits >>= 1;
6297 } while (bits);
6298 }
6299 }
6300 }
6301
6302 gc_mark_stacked_objects_all(objspace);
6303}
6304
6305static struct heap_page *
6306heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
6307{
6308 struct heap_page *page = heap->pooled_pages;
6309
6310 if (page) {
6311 heap->pooled_pages = page->free_next;
6312 heap_add_freepage(heap, page);
6313 }
6314
6315 return page;
6316}
6317#endif
6318
6319static int
6320gc_marks_finish(rb_objspace_t *objspace)
6321{
6322#if GC_ENABLE_INCREMENTAL_MARK
6323 /* finish incremental GC */
6324 if (is_incremental_marking(objspace)) {
6325 if (heap_eden->pooled_pages) {
6326 heap_move_pooled_pages_to_free_pages(heap_eden);
6327 gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n");
6328 return FALSE; /* continue marking phase */
6329 }
6330
6331 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
6332 rb_bug("gc_marks_finish: mark stack is not empty (%d).", (int)mark_stack_size(&objspace->mark_stack));
6333 }
6334
6335 gc_mark_roots(objspace, 0);
6336
6337 if (is_mark_stack_empty(&objspace->mark_stack) == FALSE) {
6338 gc_report(1, objspace, "gc_marks_finish: not empty (%d). retry.\n", (int)mark_stack_size(&objspace->mark_stack));
6339 return FALSE;
6340 }
6341
6342#if RGENGC_CHECK_MODE >= 2
6343 if (gc_verify_heap_pages(objspace) != 0) {
6344 rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
6345 }
6346#endif
6347
6349 /* check children of all marked wb-unprotected objects */
6350 gc_marks_wb_unprotected_objects(objspace);
6351 }
6352#endif /* GC_ENABLE_INCREMENTAL_MARK */
6353
6354#if RGENGC_CHECK_MODE >= 2
6355 gc_verify_internal_consistency(objspace);
6356#endif
6357
6358#if USE_RGENGC
6359 if (is_full_marking(objspace)) {
6360 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
6361 const double r = gc_params.oldobject_limit_factor;
6363 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
6364 }
6365#endif
6366
6367#if RGENGC_CHECK_MODE >= 4
6368 gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
6369#endif
6370
6371 {
6372 /* decide full GC is needed or not */
6373 rb_heap_t *heap = heap_eden;
6375 size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
6376 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
6377 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
6378 int full_marking = is_full_marking(objspace);
6379
6380 GC_ASSERT(heap->total_slots >= objspace->marked_slots);
6381
6382 /* setup free-able page counts */
6383 if (max_free_slots < gc_params.heap_init_slots) max_free_slots = gc_params.heap_init_slots;
6384
6385 if (sweep_slots > max_free_slots) {
6386 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
6387 }
6388 else {
6390 }
6391
6392 /* check free_min */
6393 if (min_free_slots < gc_params.heap_free_slots) min_free_slots = gc_params.heap_free_slots;
6394
6395#if USE_RGENGC
6396 if (sweep_slots < min_free_slots) {
6397 if (!full_marking) {
6398 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
6399 full_marking = TRUE;
6400 /* do not update last_major_gc, because full marking is not done. */
6401 goto increment;
6402 }
6403 else {
6404 gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
6406 }
6407 }
6408 else {
6409 increment:
6410 gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
6411 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slots, total_slots));
6412 heap_increment(objspace, heap);
6413 }
6414 }
6415
6416 if (full_marking) {
6417 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
6418 const double r = gc_params.oldobject_limit_factor;
6420 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
6421 }
6422
6425 }
6426 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
6428 }
6431 }
6432
6433 gc_report(1, objspace, "gc_marks_finish (marks %d objects, old %d objects, total %d slots, sweep %d slots, increment: %d, next GC: %s)\n",
6434 (int)objspace->marked_slots, (int)objspace->rgengc.old_objects, (int)heap->total_slots, (int)sweep_slots, (int)heap_allocatable_pages,
6435 objspace->rgengc.need_major_gc ? "major" : "minor");
6436#else /* USE_RGENGC */
6437 if (sweep_slots < min_free_slots) {
6438 gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
6439 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slot, total_slot));
6440 heap_increment(objspace, heap);
6441 }
6442#endif
6443 }
6444
6446
6448
6449 return TRUE;
6450}
6451
6452static void
6453gc_marks_step(rb_objspace_t *objspace, int slots)
6454{
6455#if GC_ENABLE_INCREMENTAL_MARK
6456 GC_ASSERT(is_marking(objspace));
6457
6458 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
6459 if (gc_marks_finish(objspace)) {
6460 /* finish */
6461 gc_sweep(objspace);
6462 }
6463 }
6464 if (0) fprintf(stderr, "objspace->marked_slots: %d\n", (int)objspace->marked_slots);
6465#endif
6466}
6467
6468static void
6469gc_marks_rest(rb_objspace_t *objspace)
6470{
6471 gc_report(1, objspace, "gc_marks_rest\n");
6472
6473#if GC_ENABLE_INCREMENTAL_MARK
6474 heap_eden->pooled_pages = NULL;
6475#endif
6476
6477 if (is_incremental_marking(objspace)) {
6478 do {
6479 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
6480 } while (gc_marks_finish(objspace) == FALSE);
6481 }
6482 else {
6483 gc_mark_stacked_objects_all(objspace);
6484 gc_marks_finish(objspace);
6485 }
6486
6487 /* move to sweep */
6488 gc_sweep(objspace);
6489}
6490
6491static void
6492gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
6493{
6495#if GC_ENABLE_INCREMENTAL_MARK
6496
6497 gc_enter(objspace, "marks_continue");
6498
6500 {
6501 int slots = 0;
6502 const char *from;
6503
6504 if (heap->pooled_pages) {
6505 while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) {
6506 struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
6507 slots += page->free_slots;
6508 }
6509 from = "pooled-pages";
6510 }
6511 else if (heap_increment(objspace, heap)) {
6512 slots = heap->free_pages->free_slots;
6513 from = "incremented-pages";
6514 }
6515
6516 if (slots > 0) {
6517 gc_report(2, objspace, "gc_marks_continue: provide %d slots from %s.\n", slots, from);
6518 gc_marks_step(objspace, (int)objspace->rincgc.step_slots);
6519 }
6520 else {
6521 gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %d).\n", (int)mark_stack_size(&objspace->mark_stack));
6522 gc_marks_rest(objspace);
6523 }
6524 }
6526
6527 gc_exit(objspace, "marks_continue");
6528#endif
6529}
6530
6531static void
6532gc_marks(rb_objspace_t *objspace, int full_mark)
6533{
6534 gc_prof_mark_timer_start(objspace);
6535
6537 {
6538 /* setup marking */
6539
6540#if USE_RGENGC
6541 gc_marks_start(objspace, full_mark);
6542 if (!is_incremental_marking(objspace)) {
6543 gc_marks_rest(objspace);
6544 }
6545
6546#if RGENGC_PROFILE > 0
6547 if (gc_prof_record(objspace)) {
6548 gc_profile_record *record = gc_prof_record(objspace);
6549 record->old_objects = objspace->rgengc.old_objects;
6550 }
6551#endif
6552
6553#else /* USE_RGENGC */
6554 gc_marks_start(objspace, TRUE);
6555 gc_marks_rest(objspace);
6556#endif
6557 }
6559 gc_prof_mark_timer_stop(objspace);
6560}
6561
6562/* RGENGC */
6563
6564static void
6565gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
6566{
6567 if (level <= RGENGC_DEBUG) {
6568 char buf[1024];
6569 FILE *out = stderr;
6570 va_list args;
6571 const char *status = " ";
6572
6573#if USE_RGENGC
6574 if (during_gc) {
6575 status = is_full_marking(objspace) ? "+" : "-";
6576 }
6577 else {
6579 status = "S";
6580 }
6581 if (is_incremental_marking(objspace)) {
6582 status = "M";
6583 }
6584 }
6585#endif
6586
6587 va_start(args, fmt);
6588 vsnprintf(buf, 1024, fmt, args);
6589 va_end(args);
6590
6591 fprintf(out, "%s|", status);
6592 fputs(buf, out);
6593 }
6594}
6595
6596#if USE_RGENGC
6597
6598/* bit operations */
6599
6600static int
6601rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
6602{
6603 return RVALUE_REMEMBERED(obj);
6604}
6605
6606static int
6607rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
6608{
6609 struct heap_page *page = GET_HEAP_PAGE(obj);
6610 bits_t *bits = &page->marking_bits[0];
6611
6613
6614 if (MARKED_IN_BITMAP(bits, obj)) {
6615 return FALSE;
6616 }
6617 else {
6619 MARK_IN_BITMAP(bits, obj);
6620 return TRUE;
6621 }
6622}
6623
6624/* wb, etc */
6625
6626/* return FALSE if already remembered */
6627static int
6628rgengc_remember(rb_objspace_t *objspace, VALUE obj)
6629{
6630 gc_report(6, objspace, "rgengc_remember: %s %s\n", obj_info(obj),
6631 rgengc_remembersetbits_get(objspace, obj) ? "was already remembered" : "is remembered now");
6632
6633 check_rvalue_consistency(obj);
6634
6635 if (RGENGC_CHECK_MODE) {
6636 if (RVALUE_WB_UNPROTECTED(obj)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj));
6637 }
6638
6639#if RGENGC_PROFILE > 0
6640 if (!rgengc_remembered(objspace, obj)) {
6641 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
6642 objspace->profile.total_remembered_normal_object_count++;
6643#if RGENGC_PROFILE >= 2
6644 objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
6645#endif
6646 }
6647 }
6648#endif /* RGENGC_PROFILE > 0 */
6649
6650 return rgengc_remembersetbits_set(objspace, obj);
6651}
6652
6653static int
6654rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj)
6655{
6656 int result = rgengc_remembersetbits_get(objspace, obj);
6657 check_rvalue_consistency(obj);
6658 return result;
6659}
6660
6661static int
6662rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
6663{
6664 gc_report(6, objspace, "rgengc_remembered: %s\n", obj_info(obj));
6665 return rgengc_remembered_sweep(objspace, obj);
6666}
6667
6668#ifndef PROFILE_REMEMBERSET_MARK
6669#define PROFILE_REMEMBERSET_MARK 0
6670#endif
6671
6672static void
6673rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
6674{
6675 size_t j;
6676 struct heap_page *page = 0;
6677#if PROFILE_REMEMBERSET_MARK
6678 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
6679#endif
6680 gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
6681
6682 list_for_each(&heap->pages, page, page_node) {
6684 RVALUE *p = page->start;
6685 RVALUE *offset = p - NUM_IN_PAGE(p);
6686 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
6690#if PROFILE_REMEMBERSET_MARK
6692 else if (page->flags.has_remembered_objects) has_old++;
6693 else if (page->flags.has_uncollectible_shady_objects) has_shady++;
6694#endif
6695 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
6696 bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
6697 marking_bits[j] = 0;
6698 }
6700
6701 for (j=0; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
6702 bitset = bits[j];
6703
6704 if (bitset) {
6705 p = offset + j * BITS_BITLENGTH;
6706
6707 do {
6708 if (bitset & 1) {
6709 VALUE obj = (VALUE)p;
6710 gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", obj_info(obj));
6711 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
6712 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
6713
6714 gc_mark_children(objspace, obj);
6715 }
6716 p++;
6717 bitset >>= 1;
6718 } while (bitset);
6719 }
6720 }
6721 }
6722#if PROFILE_REMEMBERSET_MARK
6723 else {
6724 skip++;
6725 }
6726#endif
6727 }
6728
6729#if PROFILE_REMEMBERSET_MARK
6730 fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
6731#endif
6732 gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
6733}
6734
6735static void
6736rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
6737{
6738 struct heap_page *page = 0;
6739
6740 list_for_each(&heap->pages, page, page_node) {
6741 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6747 }
6748}
6749
6750/* RGENGC: APIs */
6751
6752NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
6753
6754static void
6755gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
6756{
6757 if (RGENGC_CHECK_MODE) {
6758 if (!RVALUE_OLD_P(a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a));
6759 if ( RVALUE_OLD_P(b)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b));
6760 if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
6761 }
6762
6763#if 1
6764 /* mark `a' and remember (default behavior) */
6765 if (!rgengc_remembered(objspace, a)) {
6766 rgengc_remember(objspace, a);
6767 gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
6768 }
6769#else
6770 /* mark `b' and remember */
6772 if (RVALUE_WB_UNPROTECTED(b)) {
6773 gc_remember_unprotected(objspace, b);
6774 }
6775 else {
6776 RVALUE_AGE_SET_OLD(objspace, b);
6777 rgengc_remember(objspace, b);
6778 }
6779
6780 gc_report(1, objspace, "gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
6781#endif
6782
6783 check_rvalue_consistency(a);
6784 check_rvalue_consistency(b);
6785}
6786
6787#if GC_ENABLE_INCREMENTAL_MARK
6788static void
6789gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
6790{
6791 gc_mark_set_parent(objspace, parent);
6792 rgengc_check_relation(objspace, obj);
6793 if (gc_mark_set(objspace, obj) == FALSE) return;
6794 gc_aging(objspace, obj);
6795 gc_grey(objspace, obj);
6796}
6797
6798NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
6799
6800static void
6801gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
6802{
6803 gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, obj_info(b));
6804
6805 if (RVALUE_BLACK_P(a)) {
6806 if (RVALUE_WHITE_P(b)) {
6807 if (!RVALUE_WB_UNPROTECTED(a)) {
6808 gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, obj_info(b));
6809 gc_mark_from(objspace, b, a);
6810 }
6811 }
6812 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
6813 if (!RVALUE_WB_UNPROTECTED(b)) {
6814 gc_report(1, objspace, "gc_writebarrier_incremental: [GN] %p -> %s\n", (void *)a, obj_info(b));
6815 RVALUE_AGE_SET_OLD(objspace, b);
6816
6817 if (RVALUE_BLACK_P(b)) {
6818 gc_grey(objspace, b);
6819 }
6820 }
6821 else {
6822 gc_report(1, objspace, "gc_writebarrier_incremental: [LL] %p -> %s\n", (void *)a, obj_info(b));
6823 gc_remember_unprotected(objspace, b);
6824 }
6825 }
6826 }
6827}
6828#else
6829#define gc_writebarrier_incremental(a, b, objspace)
6830#endif
6831
6832void
6834{
6835 rb_objspace_t *objspace = &rb_objspace;
6836
6837 if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const");
6838 if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const");
6839
6840 if (!is_incremental_marking(objspace)) {
6841 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
6842 return;
6843 }
6844 else {
6845 gc_writebarrier_generational(a, b, objspace);
6846 }
6847 }
6848 else { /* slow path */
6849 gc_writebarrier_incremental(a, b, objspace);
6850 }
6851}
6852
6853void
6855{
6856 if (RVALUE_WB_UNPROTECTED(obj)) {
6857 return;
6858 }
6859 else {
6860 rb_objspace_t *objspace = &rb_objspace;
6861
6862 gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
6863 rgengc_remembered(objspace, obj) ? " (already remembered)" : "");
6864
6865 if (RVALUE_OLD_P(obj)) {
6866 gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
6867 RVALUE_DEMOTE(objspace, obj);
6868 gc_mark_set(objspace, obj);
6869 gc_remember_unprotected(objspace, obj);
6870
6871#if RGENGC_PROFILE
6872 objspace->profile.total_shade_operation_count++;
6873#if RGENGC_PROFILE >= 2
6874 objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
6875#endif /* RGENGC_PROFILE >= 2 */
6876#endif /* RGENGC_PROFILE */
6877 }
6878 else {
6879 RVALUE_AGE_RESET(obj);
6880 }
6881
6882 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
6884 }
6885}
6886
6887/*
6888 * remember `obj' if needed.
6889 */
6892{
6893 rb_objspace_t *objspace = &rb_objspace;
6894
6895 gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", obj_info(obj));
6896
6897 if (is_incremental_marking(objspace)) {
6898 if (RVALUE_BLACK_P(obj)) {
6899 gc_grey(objspace, obj);
6900 }
6901 }
6902 else {
6903 if (RVALUE_OLD_P(obj)) {
6904 rgengc_remember(objspace, obj);
6905 }
6906 }
6907}
6908
6909static st_table *rgengc_unprotect_logging_table;
6910
6911static int
6912rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
6913{
6914 fprintf(stderr, "%s\t%d\n", (char *)key, (int)val);
6915 return ST_CONTINUE;
6916}
6917
6918static void
6919rgengc_unprotect_logging_exit_func(void)
6920{
6921 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
6922}
6923
6924void
6925rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
6926{
6927 VALUE obj = (VALUE)objptr;
6928
6929 if (rgengc_unprotect_logging_table == 0) {
6930 rgengc_unprotect_logging_table = st_init_strtable();
6931 atexit(rgengc_unprotect_logging_exit_func);
6932 }
6933
6934 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
6935 char buff[0x100];
6936 st_data_t cnt = 1;
6937 char *ptr = buff;
6938
6939 snprintf(ptr, 0x100 - 1, "%s|%s:%d", obj_info(obj), filename, line);
6940
6941 if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
6942 cnt++;
6943 }
6944 else {
6945 ptr = (strdup)(buff);
6946 if (!ptr) rb_memerror();
6947 }
6948 st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
6949 }
6950}
6951#endif /* USE_RGENGC */
6952
6953void
6955{
6956#if USE_RGENGC
6957 rb_objspace_t *objspace = &rb_objspace;
6958
6959 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
6960 if (!RVALUE_OLD_P(dest)) {
6962 RVALUE_AGE_RESET_RAW(dest);
6963 }
6964 else {
6965 RVALUE_DEMOTE(objspace, dest);
6966 }
6967 }
6968
6969 check_rvalue_consistency(dest);
6970#endif
6971}
6972
6973/* RGENGC analysis information */
6974
6975VALUE
6977{
6978#if USE_RGENGC
6979 return RVALUE_WB_UNPROTECTED(obj) ? Qfalse : Qtrue;
6980#else
6981 return Qfalse;
6982#endif
6983}
6984
6985VALUE
6987{
6988 return OBJ_PROMOTED(obj) ? Qtrue : Qfalse;
6989}
6990
6991size_t
6993{
6994 size_t n = 0;
6995 static ID ID_marked;
6996#if USE_RGENGC
6997 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
6998#endif
6999
7000 if (!ID_marked) {
7001#define I(s) ID_##s = rb_intern(#s);
7002 I(marked);
7003#if USE_RGENGC
7004 I(wb_protected);
7005 I(old);
7006 I(marking);
7007 I(uncollectible);
7008 I(pinned);
7009#endif
7010#undef I
7011 }
7012
7013#if USE_RGENGC
7014 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
7015 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
7016 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
7017 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
7018#endif
7019 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
7020 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
7021 return n;
7022}
7023
7024/* GC */
7025
7026void
7028{
7029 rb_objspace_t *objspace = &rb_objspace;
7030
7031#if USE_RGENGC
7032 int is_old = RVALUE_OLD_P(obj);
7033
7034 gc_report(2, objspace, "rb_gc_force_recycle: %s\n", obj_info(obj));
7035
7036 if (is_old) {
7037 if (RVALUE_MARKED(obj)) {
7038 objspace->rgengc.old_objects--;
7039 }
7040 }
7043
7044#if GC_ENABLE_INCREMENTAL_MARK
7045 if (is_incremental_marking(objspace)) {
7047 invalidate_mark_stack(&objspace->mark_stack, obj);
7049 }
7051 }
7052 else {
7053#endif
7054 if (is_old || !GET_HEAP_PAGE(obj)->flags.before_sweep) {
7056 }
7058#if GC_ENABLE_INCREMENTAL_MARK
7059 }
7060#endif
7061#endif
7062
7063 objspace->profile.total_freed_objects++;
7064
7065 heap_page_add_freeobj(objspace, GET_HEAP_PAGE(obj), obj);
7066
7067 /* Disable counting swept_slots because there are no meaning.
7068 * if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
7069 * objspace->heap.swept_slots++;
7070 * }
7071 */
7072}
7073
7074#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
7075#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
7076#endif
7077
7078void
7080{
7081 VALUE ary_ary = GET_VM()->mark_object_ary;
7082 VALUE ary = rb_ary_last(0, 0, ary_ary);
7083
7084 if (ary == Qnil || RARRAY_LEN(ary) >= MARK_OBJECT_ARY_BUCKET_SIZE) {
7086 rb_ary_push(ary_ary, ary);
7087 }
7088
7089 rb_ary_push(ary, obj);
7090}
7091
7092void
7094{
7095 rb_objspace_t *objspace = &rb_objspace;
7096 struct gc_list *tmp;
7097
7098 tmp = ALLOC(struct gc_list);
7099 tmp->next = global_list;
7100 tmp->varptr = addr;
7101 global_list = tmp;
7102}
7103
7104void
7106{
7107 rb_objspace_t *objspace = &rb_objspace;
7108 struct gc_list *tmp = global_list;
7109
7110 if (tmp->varptr == addr) {
7111 global_list = tmp->next;
7112 xfree(tmp);
7113 return;
7114 }
7115 while (tmp->next) {
7116 if (tmp->next->varptr == addr) {
7117 struct gc_list *t = tmp->next;
7118
7119 tmp->next = tmp->next->next;
7120 xfree(t);
7121 break;
7122 }
7123 tmp = tmp->next;
7124 }
7125}
7126
7127void
7129{
7131}
7132
7133#define GC_NOTIFY 0
7134
7135enum {
7141
7142#define gc_stress_full_mark_after_malloc_p() \
7143 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
7144
7145static void
7146heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
7147{
7148 if (!heap->freelist && !heap->free_pages) {
7149 if (!heap_increment(objspace, heap)) {
7150 heap_set_increment(objspace, 1);
7151 heap_increment(objspace, heap);
7152 }
7153 }
7154}
7155
7156static int
7157ready_to_gc(rb_objspace_t *objspace)
7158{
7159 if (dont_gc || during_gc || ruby_disable_gc) {
7160 heap_ready_to_gc(objspace, heap_eden);
7161 return FALSE;
7162 }
7163 else {
7164 return TRUE;
7165 }
7166}
7167
7168static void
7169gc_reset_malloc_info(rb_objspace_t *objspace)
7170{
7171 gc_prof_set_malloc_info(objspace);
7172 {
7173 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
7174 size_t old_limit = malloc_limit;
7175
7176 if (inc > malloc_limit) {
7177 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
7178 if (malloc_limit > gc_params.malloc_limit_max) {
7179 malloc_limit = gc_params.malloc_limit_max;
7180 }
7181 }
7182 else {
7183 malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
7184 if (malloc_limit < gc_params.malloc_limit_min) {
7185 malloc_limit = gc_params.malloc_limit_min;
7186 }
7187 }
7188
7189 if (0) {
7190 if (old_limit != malloc_limit) {
7191 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
7192 rb_gc_count(), old_limit, malloc_limit);
7193 }
7194 else {
7195 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
7197 }
7198 }
7199 }
7200
7201 /* reset oldmalloc info */
7202#if RGENGC_ESTIMATE_OLDMALLOC
7203 if (!is_full_marking(objspace)) {
7204 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
7208
7209 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
7211 }
7212 }
7213
7214 if (0) fprintf(stderr, "%d\t%d\t%u\t%u\t%d\n",
7215 (int)rb_gc_count(),
7216 (int)objspace->rgengc.need_major_gc,
7217 (unsigned int)objspace->rgengc.oldmalloc_increase,
7218 (unsigned int)objspace->rgengc.oldmalloc_increase_limit,
7219 (unsigned int)gc_params.oldmalloc_limit_max);
7220 }
7221 else {
7222 /* major GC */
7223 objspace->rgengc.oldmalloc_increase = 0;
7224
7225 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
7227 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
7228 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
7230 }
7231 }
7232 }
7233#endif
7234}
7235
7236static int
7237garbage_collect(rb_objspace_t *objspace, int reason)
7238{
7239#if GC_PROFILE_MORE_DETAIL
7240 objspace->profile.prepare_time = getrusage_time();
7241#endif
7242
7243 gc_rest(objspace);
7244
7245#if GC_PROFILE_MORE_DETAIL
7246 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
7247#endif
7248
7249 return gc_start(objspace, reason);
7250}
7251
7252static int
7253gc_start(rb_objspace_t *objspace, int reason)
7254{
7255 unsigned int do_full_mark = !!((unsigned)reason & GPR_FLAG_FULL_MARK);
7256 unsigned int immediate_mark = (unsigned)reason & GPR_FLAG_IMMEDIATE_MARK;
7257
7258 /* reason may be clobbered, later, so keep set immediate_sweep here */
7259 objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
7260
7261 if (!heap_allocated_pages) return FALSE; /* heap is not ready */
7262 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
7263
7264 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
7267#if RGENGC_CHECK_MODE >= 2
7268 gc_verify_internal_consistency(objspace);
7269#endif
7270
7271 gc_enter(objspace, "gc_start");
7272
7273 if (ruby_gc_stressful) {
7275
7276 if ((flag & (1<<gc_stress_no_major)) == 0) {
7277 do_full_mark = TRUE;
7278 }
7279
7280 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
7281 }
7282 else {
7283#if USE_RGENGC
7284 if (objspace->rgengc.need_major_gc) {
7285 reason |= objspace->rgengc.need_major_gc;
7286 do_full_mark = TRUE;
7287 }
7288 else if (RGENGC_FORCE_MAJOR_GC) {
7289 reason = GPR_FLAG_MAJOR_BY_FORCE;
7290 do_full_mark = TRUE;
7291 }
7292
7294#endif
7295 }
7296
7297 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
7298 reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
7299 }
7300
7301#if GC_ENABLE_INCREMENTAL_MARK
7302 if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
7304 }
7305 else {
7306 objspace->flags.during_incremental_marking = do_full_mark;
7307 }
7308#endif
7309
7310 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
7311 objspace->flags.immediate_sweep = TRUE;
7312 }
7313
7314 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
7315
7316 gc_report(1, objspace, "gc_start(reason: %d) => %u, %d, %d\n",
7317 reason,
7318 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
7319
7320#if USE_DEBUG_COUNTER
7321 RB_DEBUG_COUNTER_INC(gc_count);
7322
7323 if (reason & GPR_FLAG_MAJOR_MASK) {
7324 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
7325 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
7326 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
7327 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
7328#if RGENGC_ESTIMATE_OLDMALLOC
7329 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
7330#endif
7331 }
7332 else {
7333 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
7334 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
7335 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
7336 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
7337 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
7338 }
7339#endif
7340
7341 objspace->profile.count++;
7342 objspace->profile.latest_gc_info = reason;
7345 gc_prof_setup_new_record(objspace, reason);
7346 gc_reset_malloc_info(objspace);
7347 rb_transient_heap_start_marking(do_full_mark);
7348
7349 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
7351
7352 gc_prof_timer_start(objspace);
7353 {
7354 gc_marks(objspace, do_full_mark);
7355 }
7356 gc_prof_timer_stop(objspace);
7357
7358 gc_exit(objspace, "gc_start");
7359 return TRUE;
7360}
7361
7362static void
7363gc_rest(rb_objspace_t *objspace)
7364{
7365 int marking = is_incremental_marking(objspace);
7366 int sweeping = is_lazy_sweeping(heap_eden);
7367
7368 if (marking || sweeping) {
7369 gc_enter(objspace, "gc_rest");
7370
7371 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
7372
7373 if (is_incremental_marking(objspace)) {
7375 gc_marks_rest(objspace);
7377 }
7379 gc_sweep_rest(objspace);
7380 }
7381 gc_exit(objspace, "gc_rest");
7382 }
7383}
7384
7388};
7389
7390static void
7391gc_current_status_fill(rb_objspace_t *objspace, char *buff)
7392{
7393 int i = 0;
7394 if (is_marking(objspace)) {
7395 buff[i++] = 'M';
7396#if USE_RGENGC
7397 if (is_full_marking(objspace)) buff[i++] = 'F';
7398#if GC_ENABLE_INCREMENTAL_MARK
7399 if (is_incremental_marking(objspace)) buff[i++] = 'I';
7400#endif
7401#endif
7402 }
7403 else if (is_sweeping(objspace)) {
7404 buff[i++] = 'S';
7405 if (is_lazy_sweeping(heap_eden)) buff[i++] = 'L';
7406 }
7407 else {
7408 buff[i++] = 'N';
7409 }
7410 buff[i] = '\0';
7411}
7412
7413static const char *
7414gc_current_status(rb_objspace_t *objspace)
7415{
7416 static char buff[0x10];
7417 gc_current_status_fill(objspace, buff);
7418 return buff;
7419}
7420
7421#if PRINT_ENTER_EXIT_TICK
7422
7423static tick_t last_exit_tick;
7424static tick_t enter_tick;
7425static int enter_count = 0;
7426static char last_gc_status[0x10];
7427
7428static inline void
7429gc_record(rb_objspace_t *objspace, int direction, const char *event)
7430{
7431 if (direction == 0) { /* enter */
7432 enter_count++;
7433 enter_tick = tick();
7434 gc_current_status_fill(objspace, last_gc_status);
7435 }
7436 else { /* exit */
7437 tick_t exit_tick = tick();
7438 char current_gc_status[0x10];
7439 gc_current_status_fill(objspace, current_gc_status);
7440#if 1
7441 /* [last mutator time] [gc time] [event] */
7442 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
7443 enter_tick - last_exit_tick,
7444 exit_tick - enter_tick,
7445 event,
7446 last_gc_status, current_gc_status,
7447 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
7448 last_exit_tick = exit_tick;
7449#else
7450 /* [enter_tick] [gc time] [event] */
7451 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
7452 enter_tick,
7453 exit_tick - enter_tick,
7454 event,
7455 last_gc_status, current_gc_status,
7456 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
7457#endif
7458 }
7459}
7460#else /* PRINT_ENTER_EXIT_TICK */
7461static inline void
7462gc_record(rb_objspace_t *objspace, int direction, const char *event)
7463{
7464 /* null */
7465}
7466#endif /* PRINT_ENTER_EXIT_TICK */
7467
7468static inline void
7469gc_enter(rb_objspace_t *objspace, const char *event)
7470{
7471 GC_ASSERT(during_gc == 0);
7472 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7473
7475
7476 during_gc = TRUE;
7477 gc_report(1, objspace, "gc_enter: %s [%s]\n", event, gc_current_status(objspace));
7478 gc_record(objspace, 0, event);
7479 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
7480}
7481
7482static inline void
7483gc_exit(rb_objspace_t *objspace, const char *event)
7484{
7485 GC_ASSERT(during_gc != 0);
7486
7487 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passsed? */
7488 gc_record(objspace, 1, event);
7489 gc_report(1, objspace, "gc_exit: %s [%s]\n", event, gc_current_status(objspace));
7490 during_gc = FALSE;
7491
7493}
7494
7495static void *
7496gc_with_gvl(void *ptr)
7497{
7498 struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
7499 return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
7500}
7501
7502static int
7503garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
7504{
7505 if (dont_gc) return TRUE;
7506 if (ruby_thread_has_gvl_p()) {
7507 return garbage_collect(objspace, reason);
7508 }
7509 else {
7510 if (ruby_native_thread_p()) {
7511 struct objspace_and_reason oar;
7512 oar.objspace = objspace;
7513 oar.reason = reason;
7514 return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
7515 }
7516 else {
7517 /* no ruby thread */
7518 fprintf(stderr, "[FATAL] failed to allocate memory\n");
7520 }
7521 }
7522}
7523
7524static VALUE
7525gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep)
7526{
7532
7533 if (!RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
7534 if (!RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
7535 if (!RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
7536
7537 garbage_collect(objspace, reason);
7538 gc_finalize_deferred(objspace);
7539
7540 return Qnil;
7541}
7542
7543static int
7544gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
7545{
7546 if (SPECIAL_CONST_P(obj)) {
7547 return FALSE;
7548 }
7549
7550 switch (BUILTIN_TYPE(obj)) {
7551 case T_NONE:
7552 case T_NIL:
7553 case T_MOVED:
7554 case T_ZOMBIE:
7555 return FALSE;
7556 break;
7557 case T_SYMBOL:
7558 if (DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->id & ~ID_SCOPE_MASK)) {
7559 return FALSE;
7560 }
7561 /* fall through */
7562 case T_STRING:
7563 case T_OBJECT:
7564 case T_FLOAT:
7565 case T_IMEMO:
7566 case T_ARRAY:
7567 case T_BIGNUM:
7568 case T_ICLASS:
7569 case T_MODULE:
7570 case T_REGEXP:
7571 case T_DATA:
7572 case T_MATCH:
7573 case T_STRUCT:
7574 case T_HASH:
7575 case T_FILE:
7576 case T_COMPLEX:
7577 case T_RATIONAL:
7578 case T_NODE:
7579 case T_CLASS:
7580 if (FL_TEST(obj, FL_FINALIZE)) {
7582 return FALSE;
7583 }
7584 }
7585 return RVALUE_MARKED(obj) && !RVALUE_PINNED(obj);
7586 break;
7587
7588 default:
7589 rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
7590 break;
7591 }
7592
7593 return FALSE;
7594}
7595
7596static VALUE
7597gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, VALUE moved_list)
7598{
7599 int marked;
7600 int wb_unprotected;
7601 int uncollectible;
7602 int marking;
7603 RVALUE *dest = (RVALUE *)free;
7604 RVALUE *src = (RVALUE *)scan;
7605
7606 gc_report(4, objspace, "Moving object: %p -> %p\n", (void*)scan, (void *)free);
7607
7608 GC_ASSERT(BUILTIN_TYPE(scan) != T_NONE);
7610
7611 /* Save off bits for current object. */
7613 wb_unprotected = RVALUE_WB_UNPROTECTED((VALUE)src);
7614 uncollectible = RVALUE_UNCOLLECTIBLE((VALUE)src);
7615 marking = RVALUE_MARKING((VALUE)src);
7616
7618
7619 /* Clear bits for eventual T_MOVED */
7624
7625 if (FL_TEST(src, FL_EXIVAR)) {
7627 }
7628
7629 VALUE id;
7630
7631 /* If the source object's object_id has been seen, we need to update
7632 * the object to object id mapping. */
7633 if (st_lookup(objspace->obj_to_id_tbl, (VALUE)src, &id)) {
7634 gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
7637 }
7638
7639 /* Move the object */
7640 memcpy(dest, src, sizeof(RVALUE));
7641 memset(src, 0, sizeof(RVALUE));
7642
7643 /* Set bits for object in new location */
7644 if (marking) {
7646 }
7647 else {
7649 }
7650
7651 if (marked) {
7653 }
7654 else {
7656 }
7657
7658 if (wb_unprotected) {
7660 }
7661 else {
7663 }
7664
7665 if (uncollectible) {
7667 }
7668 else {
7670 }
7671
7672 /* Assign forwarding address */
7673 src->as.moved.flags = T_MOVED;
7674 src->as.moved.destination = (VALUE)dest;
7675 src->as.moved.next = moved_list;
7677
7678 return (VALUE)src;
7679}
7680
7683 size_t index;
7686};
7687
7688static void
7689advance_cursor(struct heap_cursor *free, struct heap_page **page_list)
7690{
7691 if (free->slot == free->page->start + free->page->total_slots - 1) {
7692 free->index++;
7693 free->page = page_list[free->index];
7694 free->slot = free->page->start;
7695 }
7696 else {
7697 free->slot++;
7698 }
7699}
7700
7701static void
7702retreat_cursor(struct heap_cursor *scan, struct heap_page **page_list)
7703{
7704 if (scan->slot == scan->page->start) {
7705 scan->index--;
7706 scan->page = page_list[scan->index];
7707 scan->slot = scan->page->start + scan->page->total_slots - 1;
7708 }
7709 else {
7710 scan->slot--;
7711 }
7712}
7713
7714static int
7715not_met(struct heap_cursor *free, struct heap_cursor *scan)
7716{
7717 if (free->index < scan->index)
7718 return 1;
7719
7720 if (free->index > scan->index)
7721 return 0;
7722
7723 return free->slot < scan->slot;
7724}
7725
7726static void
7727init_cursors(rb_objspace_t *objspace, struct heap_cursor *free, struct heap_cursor *scan, struct heap_page **page_list)
7728{
7729 struct heap_page *page;
7730 size_t total_pages = heap_eden->total_pages;
7731 page = page_list[0];
7732
7733 free->index = 0;
7734 free->page = page;
7735 free->slot = page->start;
7736 free->objspace = objspace;
7737
7738 page = page_list[total_pages - 1];
7739 scan->index = total_pages - 1;
7740 scan->page = page;
7741 scan->slot = page->start + page->total_slots - 1;
7742 scan->objspace = objspace;
7743}
7744
7745static int
7746count_pinned(struct heap_page *page)
7747{
7748 int pinned = 0;
7749 int i;
7750
7751 for (i = 0; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
7752 pinned += popcount_bits(page->pinned_bits[i]);
7753 }
7754
7755 return pinned;
7756}
7757
7758static int
7759compare_pinned(const void *left, const void *right, void *dummy)
7760{
7761 struct heap_page *left_page;
7762 struct heap_page *right_page;
7763
7764 left_page = *(struct heap_page * const *)left;
7765 right_page = *(struct heap_page * const *)right;
7766
7767 return right_page->pinned_slots - left_page->pinned_slots;
7768}
7769
7770static int
7771compare_free_slots(const void *left, const void *right, void *dummy)
7772{
7773 struct heap_page *left_page;
7774 struct heap_page *right_page;
7775
7776 left_page = *(struct heap_page * const *)left;
7777 right_page = *(struct heap_page * const *)right;
7778
7779 return right_page->free_slots - left_page->free_slots;
7780}
7781
7782typedef int page_compare_func_t(const void *, const void *, void *);
7783
7784static struct heap_page **
7785allocate_page_list(rb_objspace_t *objspace, page_compare_func_t *comparator)
7786{
7787 size_t total_pages = heap_eden->total_pages;
7788 size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
7789 struct heap_page *page = 0, **page_list = malloc(size);
7790 int i = 0;
7791
7792 list_for_each(&heap_eden->pages, page, page_node) {
7793 page_list[i++] = page;
7794 page->pinned_slots = count_pinned(page);
7795 GC_ASSERT(page != NULL);
7796 }
7797 GC_ASSERT(total_pages > 0);
7798 GC_ASSERT((size_t)i == total_pages);
7799
7800 ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), comparator, NULL);
7801
7802 return page_list;
7803}
7804
7805static VALUE
7806gc_compact_heap(rb_objspace_t *objspace, page_compare_func_t *comparator)
7807{
7808 struct heap_cursor free_cursor;
7809 struct heap_cursor scan_cursor;
7810 struct heap_page **page_list;
7811 VALUE moved_list;
7812
7813 moved_list = Qfalse;
7814 memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
7815 memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
7816
7817 page_list = allocate_page_list(objspace, comparator);
7818
7819 init_cursors(objspace, &free_cursor, &scan_cursor, page_list);
7820
7821 /* Two finger algorithm */
7822 while (not_met(&free_cursor, &scan_cursor)) {
7823 /* Free cursor movement */
7824
7825 /* Unpoison free_cursor slot */
7826 void *free_slot_poison = asan_poisoned_object_p((VALUE)free_cursor.slot);
7827 asan_unpoison_object((VALUE)free_cursor.slot, false);
7828
7829 while (BUILTIN_TYPE(free_cursor.slot) != T_NONE && not_met(&free_cursor, &scan_cursor)) {
7830 /* Re-poison slot if it's not the one we want */
7831 if (free_slot_poison) {
7832 GC_ASSERT(BUILTIN_TYPE(free_cursor.slot) == T_NONE);
7833 asan_poison_object((VALUE)free_cursor.slot);
7834 }
7835
7836 advance_cursor(&free_cursor, page_list);
7837
7838 /* Unpoison free_cursor slot */
7839 free_slot_poison = asan_poisoned_object_p((VALUE)free_cursor.slot);
7840 asan_unpoison_object((VALUE)free_cursor.slot, false);
7841 }
7842
7843 /* Unpoison scan_cursor slot */
7844 void *scan_slot_poison = asan_poisoned_object_p((VALUE)scan_cursor.slot);
7845 asan_unpoison_object((VALUE)scan_cursor.slot, false);
7846
7847 /* Scan cursor movement */
7848 objspace->rcompactor.considered_count_table[BUILTIN_TYPE((VALUE)scan_cursor.slot)]++;
7849
7850 while (!gc_is_moveable_obj(objspace, (VALUE)scan_cursor.slot) && not_met(&free_cursor, &scan_cursor)) {
7851
7852 /* Re-poison slot if it's not the one we want */
7853 if (scan_slot_poison) {
7854 GC_ASSERT(BUILTIN_TYPE(scan_cursor.slot) == T_NONE);
7855 asan_poison_object((VALUE)scan_cursor.slot);
7856 }
7857
7858 retreat_cursor(&scan_cursor, page_list);
7859
7860 /* Unpoison scan_cursor slot */
7861 scan_slot_poison = asan_poisoned_object_p((VALUE)scan_cursor.slot);
7862 asan_unpoison_object((VALUE)scan_cursor.slot, false);
7863
7864 objspace->rcompactor.considered_count_table[BUILTIN_TYPE((VALUE)scan_cursor.slot)]++;
7865 }
7866
7867 if (not_met(&free_cursor, &scan_cursor)) {
7868 objspace->rcompactor.moved_count_table[BUILTIN_TYPE((VALUE)scan_cursor.slot)]++;
7869
7870 GC_ASSERT(BUILTIN_TYPE(free_cursor.slot) == T_NONE);
7871 GC_ASSERT(BUILTIN_TYPE(scan_cursor.slot) != T_NONE);
7872 GC_ASSERT(BUILTIN_TYPE(scan_cursor.slot) != T_MOVED);
7873
7874 moved_list = gc_move(objspace, (VALUE)scan_cursor.slot, (VALUE)free_cursor.slot, moved_list);
7875
7876 GC_ASSERT(BUILTIN_TYPE(free_cursor.slot) != T_MOVED);
7877 GC_ASSERT(BUILTIN_TYPE(free_cursor.slot) != T_NONE);
7878 GC_ASSERT(BUILTIN_TYPE(scan_cursor.slot) == T_MOVED);
7879
7880 advance_cursor(&free_cursor, page_list);
7881 retreat_cursor(&scan_cursor, page_list);
7882 }
7883 }
7884 free(page_list);
7885
7886 return moved_list;
7887}
7888
7889static void
7890gc_ref_update_array(rb_objspace_t * objspace, VALUE v)
7891{
7892 long i, len;
7893
7894 if (FL_TEST(v, ELTS_SHARED))
7895 return;
7896
7897 len = RARRAY_LEN(v);
7898 if (len > 0) {
7900 for (i = 0; i < len; i++) {
7901 UPDATE_IF_MOVED(objspace, ptr[i]);
7902 }
7903 }
7904}
7905
7906static void
7907gc_ref_update_object(rb_objspace_t * objspace, VALUE v)
7908{
7910
7911 if (ptr) {
7913 for (i = 0; i < len; i++) {
7914 UPDATE_IF_MOVED(objspace, ptr[i]);
7915 }
7916 }
7917}
7918
7919static int
7920hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
7921{
7922 rb_objspace_t *objspace = (rb_objspace_t *)argp;
7923
7924 if (gc_object_moved_p(objspace, (VALUE)*key)) {
7926 }
7927
7928 if (gc_object_moved_p(objspace, (VALUE)*value)) {
7929 *value = rb_gc_location((VALUE)*value);
7930 }
7931
7932 return ST_CONTINUE;
7933}
7934
7935static int
7936hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp, int error)
7937{
7938 rb_objspace_t *objspace;
7939
7940 objspace = (rb_objspace_t *)argp;
7941
7942 if (gc_object_moved_p(objspace, (VALUE)key)) {
7943 return ST_REPLACE;
7944 }
7945
7946 if (gc_object_moved_p(objspace, (VALUE)value)) {
7947 return ST_REPLACE;
7948 }
7949 return ST_CONTINUE;
7950}
7951
7952static int
7953hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
7954{
7955 rb_objspace_t *objspace = (rb_objspace_t *)argp;
7956
7957 if (gc_object_moved_p(objspace, (VALUE)*value)) {
7958 *value = rb_gc_location((VALUE)*value);
7959 }
7960
7961 return ST_CONTINUE;
7962}
7963
7964static int
7965hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp, int error)
7966{
7967 rb_objspace_t *objspace;
7968
7969 objspace = (rb_objspace_t *)argp;
7970
7971 if (gc_object_moved_p(objspace, (VALUE)value)) {
7972 return ST_REPLACE;
7973 }
7974 return ST_CONTINUE;
7975}
7976
7977static void
7978gc_update_tbl_refs(rb_objspace_t * objspace, st_table *tbl)
7979{
7980 if (!tbl || tbl->num_entries == 0) return;
7981
7982 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
7983 rb_raise(rb_eRuntimeError, "hash modified during iteration");
7984 }
7985}
7986
7987static void
7988gc_update_table_refs(rb_objspace_t * objspace, st_table *tbl)
7989{
7990 if (!tbl || tbl->num_entries == 0) return;
7991
7992 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
7993 rb_raise(rb_eRuntimeError, "hash modified during iteration");
7994 }
7995}
7996
7997/* Update MOVED references in an st_table */
7998void
8000{
8001 rb_objspace_t *objspace = &rb_objspace;
8002 gc_update_table_refs(objspace, ptr);
8003}
8004
8005static void
8006gc_ref_update_hash(rb_objspace_t * objspace, VALUE v)
8007{
8008 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
8009}
8010
8011static void
8012gc_ref_update_method_entry(rb_objspace_t *objspace, rb_method_entry_t *me)
8013{
8015
8016 UPDATE_IF_MOVED(objspace, me->owner);
8017 UPDATE_IF_MOVED(objspace, me->defined_class);
8018
8019 if (def) {
8020 switch (def->type) {
8022 if (def->body.iseq.iseqptr) {
8023 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, def->body.iseq.iseqptr);
8024 }
8025 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, def->body.iseq.cref);
8026 break;
8029 UPDATE_IF_MOVED(objspace, def->body.attr.location);
8030 break;
8032 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
8033 break;
8036 return;
8039 UPDATE_IF_MOVED(objspace, def->body.refined.owner);
8040 break;
8047 break;
8048 }
8049 }
8050}
8051
8052static void
8053gc_update_values(rb_objspace_t *objspace, long n, VALUE *values)
8054{
8055 long i;
8056
8057 for (i=0; i<n; i++) {
8058 UPDATE_IF_MOVED(objspace, values[i]);
8059 }
8060}
8061
8062static void
8063gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
8064{
8065 switch (imemo_type(obj)) {
8066 case imemo_env:
8067 {
8068 rb_env_t *env = (rb_env_t *)obj;
8069 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, env->iseq);
8071 gc_update_values(objspace, (long)env->env_size, (VALUE *)env->env);
8072 }
8073 break;
8074 case imemo_cref:
8075 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass);
8076 TYPED_UPDATE_IF_MOVED(objspace, struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
8077 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
8078 break;
8079 case imemo_svar:
8080 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
8081 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
8082 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
8083 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
8084 break;
8085 case imemo_throw_data:
8086 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
8087 break;
8088 case imemo_ifunc:
8089 break;
8090 case imemo_memo:
8091 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
8092 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
8093 break;
8094 case imemo_ment:
8095 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
8096 break;
8097 case imemo_iseq:
8099 break;
8100 case imemo_ast:
8102 break;
8104 case imemo_tmpbuf:
8105 break;
8106 default:
8107 rb_bug("not reachable %d", imemo_type(obj));
8108 break;
8109 }
8110}
8111
8113check_id_table_move(ID id, VALUE value, void *data)
8114{
8115 rb_objspace_t *objspace = (rb_objspace_t *)data;
8116
8117 if (gc_object_moved_p(objspace, (VALUE)value)) {
8118 return ID_TABLE_REPLACE;
8119 }
8120
8121 return ID_TABLE_CONTINUE;
8122}
8123
8124/* Returns the new location of an object, if it moved. Otherwise returns
8125 * the existing location. */
8126VALUE
8128{
8129
8130 VALUE destination;
8131
8132 if (!SPECIAL_CONST_P((void *)value)) {
8133 void *poisoned = asan_poisoned_object_p(value);
8134 asan_unpoison_object(value, false);
8135
8136 if (BUILTIN_TYPE(value) == T_MOVED) {
8137 destination = (VALUE)RMOVED(value)->destination;
8138 GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
8139 }
8140 else {
8141 destination = value;
8142 }
8143
8144 /* Re-poison slot if it's not the one we want */
8145 if (poisoned) {
8146 GC_ASSERT(BUILTIN_TYPE(value) == T_NONE);
8147 asan_poison_object(value);
8148 }
8149 }
8150 else {
8151 destination = value;
8152 }
8153
8154 return destination;
8155}
8156
8158update_id_table(ID *key, VALUE * value, void *data, int existing)
8159{
8160 rb_objspace_t *objspace = (rb_objspace_t *)data;
8161
8162 if (gc_object_moved_p(objspace, (VALUE)*value)) {
8163 *value = rb_gc_location((VALUE)*value);
8164 }
8165
8166 return ID_TABLE_CONTINUE;
8167}
8168
8169static void
8170update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
8171{
8172 if (tbl) {
8173 rb_id_table_foreach_with_replace(tbl, check_id_table_move, update_id_table, objspace);
8174 }
8175}
8176
8178update_const_table(VALUE value, void *data)
8179{
8180 rb_const_entry_t *ce = (rb_const_entry_t *)value;
8181 rb_objspace_t * objspace = (rb_objspace_t *)data;
8182
8183 if (gc_object_moved_p(objspace, ce->value)) {
8184 ce->value = rb_gc_location(ce->value);
8185 }
8186
8187 if (gc_object_moved_p(objspace, ce->file)) {
8188 ce->file = rb_gc_location(ce->file);
8189 }
8190
8191 return ID_TABLE_CONTINUE;
8192}
8193
8194static void
8195update_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
8196{
8197 if (!tbl) return;
8198 rb_id_table_foreach_values(tbl, update_const_table, objspace);
8199}
8200
8201static void
8202update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
8203{
8204 while (entry) {
8205 UPDATE_IF_MOVED(objspace, entry->klass);
8206 entry = entry->next;
8207 }
8208}
8209
8210static void
8211update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
8212{
8213 UPDATE_IF_MOVED(objspace, ext->origin_);
8214 UPDATE_IF_MOVED(objspace, ext->refined_class);
8215 update_subclass_entries(objspace, ext->subclasses);
8216}
8217
8218static void
8219gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
8220{
8221 RVALUE *any = RANY(obj);
8222
8223 gc_report(4, objspace, "update-refs: %p ->", (void *)obj);
8224
8225 switch (BUILTIN_TYPE(obj)) {
8226 case T_CLASS:
8227 case T_MODULE:
8228 if (RCLASS_SUPER((VALUE)obj)) {
8229 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
8230 }
8231 if (!RCLASS_EXT(obj)) break;
8232 update_m_tbl(objspace, RCLASS_M_TBL(obj));
8233 gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
8234 update_class_ext(objspace, RCLASS_EXT(obj));
8235 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
8236 break;
8237
8238 case T_ICLASS:
8240 update_m_tbl(objspace, RCLASS_M_TBL(obj));
8241 }
8242 if (RCLASS_SUPER((VALUE)obj)) {
8243 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
8244 }
8245 if (!RCLASS_EXT(obj)) break;
8246 if (RCLASS_IV_TBL(obj)) {
8247 gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
8248 }
8249 update_class_ext(objspace, RCLASS_EXT(obj));
8250 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
8251 break;
8252
8253 case T_IMEMO:
8254 gc_ref_update_imemo(objspace, obj);
8255 return;
8256
8257 case T_NIL:
8258 case T_FIXNUM:
8259 case T_NODE:
8260 case T_MOVED:
8261 case T_NONE:
8262 /* These can't move */
8263 return;
8264
8265 case T_ARRAY:
8266 if (FL_TEST(obj, ELTS_SHARED)) {
8267 UPDATE_IF_MOVED(objspace, any->as.array.as.heap.aux.shared_root);
8268 }
8269 else {
8270 gc_ref_update_array(objspace, obj);
8271 }
8272 break;
8273
8274 case T_HASH:
8275 gc_ref_update_hash(objspace, obj);
8276 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
8277 break;
8278
8279 case T_STRING:
8280 if (STR_SHARED_P(obj)) {
8281 UPDATE_IF_MOVED(objspace, any->as.string.as.heap.aux.shared);
8282 }
8283 break;
8284
8285 case T_DATA:
8286 /* Call the compaction callback, if it exists */
8287 {
8288 void *const ptr = DATA_PTR(obj);
8289 if (ptr) {
8290 if (RTYPEDDATA_P(obj)) {
8291 RUBY_DATA_FUNC compact_func = any->as.typeddata.type->function.dcompact;
8292 if (compact_func) (*compact_func)(ptr);
8293 }
8294 }
8295 }
8296 break;
8297
8298 case T_OBJECT:
8299 gc_ref_update_object(objspace, obj);
8300 break;
8301
8302 case T_FILE:
8303 if (any->as.file.fptr) {
8304 UPDATE_IF_MOVED(objspace, any->as.file.fptr->pathv);
8308 UPDATE_IF_MOVED(objspace, any->as.file.fptr->encs.ecopts);
8309 UPDATE_IF_MOVED(objspace, any->as.file.fptr->write_lock);
8310 }
8311 break;
8312 case T_REGEXP:
8313 UPDATE_IF_MOVED(objspace, any->as.regexp.src);
8314 break;
8315
8316 case T_SYMBOL:
8317 if (DYNAMIC_SYM_P((VALUE)any)) {
8318 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
8319 }
8320 break;
8321
8322 case T_FLOAT:
8323 case T_BIGNUM:
8324 break;
8325
8326 case T_MATCH:
8327 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
8328
8329 if (any->as.match.str) {
8330 UPDATE_IF_MOVED(objspace, any->as.match.str);
8331 }
8332 break;
8333
8334 case T_RATIONAL:
8335 UPDATE_IF_MOVED(objspace, any->as.rational.num);
8336 UPDATE_IF_MOVED(objspace, any->as.rational.den);
8337 break;
8338
8339 case T_COMPLEX:
8340 UPDATE_IF_MOVED(objspace, any->as.complex.real);
8341 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
8342
8343 break;
8344
8345 case T_STRUCT:
8346 {
8347 long i, len = RSTRUCT_LEN(obj);
8349
8350 for (i = 0; i < len; i++) {
8351 UPDATE_IF_MOVED(objspace, ptr[i]);
8352 }
8353 }
8354 break;
8355 default:
8356#if GC_DEBUG
8359 rb_bug("unreachable");
8360#endif
8361 break;
8362
8363 }
8364
8365 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
8366
8367 gc_report(4, objspace, "update-refs: %p <-", (void *)obj);
8368}
8369
8370static int
8371gc_ref_update(void *vstart, void *vend, size_t stride, void * data)
8372{
8373 rb_objspace_t * objspace;
8374 struct heap_page *page;
8375 short free_slots = 0;
8376
8377 VALUE v = (VALUE)vstart;
8378 objspace = (rb_objspace_t *)data;
8379 page = GET_HEAP_PAGE(v);
8380 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
8381 page->freelist = NULL;
8382 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
8385
8386 /* For each object on the page */
8387 for (; v != (VALUE)vend; v += stride) {
8388 if (!SPECIAL_CONST_P(v)) {
8389 void *poisoned = asan_poisoned_object_p(v);
8390 asan_unpoison_object(v, false);
8391
8392 switch (BUILTIN_TYPE(v)) {
8393 case T_NONE:
8394 heap_page_add_freeobj(objspace, page, v);
8395 free_slots++;
8396 break;
8397 case T_MOVED:
8398 break;
8399 case T_ZOMBIE:
8400 break;
8401 default:
8402 if (RVALUE_WB_UNPROTECTED(v)) {
8404 }
8405 if (RVALUE_PAGE_MARKING(page, v)) {
8407 }
8408 gc_update_object_references(objspace, v);
8409 }
8410
8411 if (poisoned) {
8413 asan_poison_object(v);
8414 }
8415 }
8416 }
8417
8418 page->free_slots = free_slots;
8419 return 0;
8420}
8421
8423#define global_symbols ruby_global_symbols
8424
8425static void
8426gc_update_references(rb_objspace_t * objspace)
8427{
8429 rb_vm_t *vm = rb_ec_vm_ptr(ec);
8430
8431 objspace_each_objects_without_setup(objspace, gc_ref_update, objspace);
8435 global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
8436 gc_update_tbl_refs(objspace, objspace->obj_to_id_tbl);
8437 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
8438 gc_update_table_refs(objspace, global_symbols.str_sym);
8439 gc_update_table_refs(objspace, finalizer_table);
8440}
8441
8442static VALUE type_sym(size_t type);
8443
8444static VALUE
8445gc_compact_stats(rb_objspace_t *objspace)
8446{
8447 size_t i;
8448 VALUE h = rb_hash_new();
8449 VALUE considered = rb_hash_new();
8450 VALUE moved = rb_hash_new();
8451
8452 for (i=0; i<T_MASK; i++) {
8453 rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
8454 }
8455
8456 for (i=0; i<T_MASK; i++) {
8457 rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
8458 }
8459
8460 rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
8461 rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
8462
8463 return h;
8464}
8465
8466static void gc_compact_after_gc(rb_objspace_t *objspace, int use_toward_empty, int use_double_pages, int use_verifier);
8467
8468static void
8469gc_compact(rb_objspace_t *objspace, int use_toward_empty, int use_double_pages, int use_verifier)
8470{
8471
8472 objspace->flags.during_compacting = TRUE;
8473 {
8474 /* pin objects referenced by maybe pointers */
8475 garbage_collect(objspace, GPR_DEFAULT_REASON);
8476 /* compact */
8477 gc_compact_after_gc(objspace, use_toward_empty, use_double_pages, use_verifier);
8478 }
8479 objspace->flags.during_compacting = FALSE;
8480}
8481
8482static VALUE
8483rb_gc_compact(rb_execution_context_t *ec, VALUE self)
8484{
8485 rb_objspace_t *objspace = &rb_objspace;
8486 if (dont_gc) return Qnil;
8487
8488 gc_compact(objspace, FALSE, FALSE, FALSE);
8489 return gc_compact_stats(objspace);
8490}
8491
8492static void
8493root_obj_check_moved_i(const char *category, VALUE obj, void *data)
8494{
8495 if (gc_object_moved_p(&rb_objspace, obj)) {
8496 rb_bug("ROOT %s points to MOVED: %p -> %s\n", category, (void *)obj, obj_info(rb_gc_location(obj)));
8497 }
8498}
8499
8500static void
8501reachable_object_check_moved_i(VALUE ref, void *data)
8502{
8503 VALUE parent = (VALUE)data;
8504 if (gc_object_moved_p(&rb_objspace, ref)) {
8505 rb_bug("Object %s points to MOVED: %p -> %s\n", obj_info(parent), (void *)ref, obj_info(rb_gc_location(ref)));
8506 }
8507}
8508
8509static int
8510heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
8511{
8512 VALUE v = (VALUE)vstart;
8513 for (; v != (VALUE)vend; v += stride) {
8514 if (gc_object_moved_p(&rb_objspace, v)) {
8515 /* Moved object still on the heap, something may have a reference. */
8516 }
8517 else {
8518 void *poisoned = asan_poisoned_object_p(v);
8519 asan_unpoison_object(v, false);
8520
8521 switch (BUILTIN_TYPE(v)) {
8522 case T_NONE:
8523 case T_ZOMBIE:
8524 break;
8525 default:
8526 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
8527 }
8528
8529 if (poisoned) {
8531 asan_poison_object(v);
8532 }
8533 }
8534 }
8535
8536 return 0;
8537}
8538
8539static VALUE
8540gc_check_references_for_moved(rb_objspace_t *objspace)
8541{
8542 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
8543 objspace_each_objects(objspace, heap_check_moved_i, NULL);
8544 return Qnil;
8545}
8546
8547static void
8548gc_compact_after_gc(rb_objspace_t *objspace, int use_toward_empty, int use_double_pages, int use_verifier)
8549{
8550 if (0) fprintf(stderr, "gc_compact_after_gc: %d,%d,%d\n", use_toward_empty, use_double_pages, use_verifier);
8551
8552 mjit_gc_start_hook(); // prevent MJIT from running while moving pointers related to ISeq
8553
8554 objspace->profile.compact_count++;
8555
8556 if (use_verifier) {
8557 gc_verify_internal_consistency(objspace);
8558 }
8559
8560 if (use_double_pages) {
8561 /* Double heap size */
8562 heap_add_pages(objspace, heap_eden, heap_allocated_pages);
8563 }
8564
8565 VALUE moved_list_head;
8566 VALUE disabled = rb_objspace_gc_disable(objspace);
8567
8568 if (use_toward_empty) {
8569 moved_list_head = gc_compact_heap(objspace, compare_free_slots);
8570 }
8571 else {
8572 moved_list_head = gc_compact_heap(objspace, compare_pinned);
8573 }
8574 heap_eden->freelist = NULL;
8575
8576 gc_update_references(objspace);
8577 if (!RTEST(disabled)) rb_objspace_gc_enable(objspace);
8578
8579 if (use_verifier) {
8580 gc_check_references_for_moved(objspace);
8581 }
8582
8585 heap_eden->free_pages = NULL;
8586 heap_eden->using_page = NULL;
8587
8588 /* For each moved slot */
8589 while (moved_list_head) {
8590 VALUE next_moved;
8591 struct heap_page *page;
8592
8593 page = GET_HEAP_PAGE(moved_list_head);
8594 next_moved = RMOVED(moved_list_head)->next;
8595
8596 /* clear the memory for that moved slot */
8597 RMOVED(moved_list_head)->flags = 0;
8598 RMOVED(moved_list_head)->destination = 0;
8599 RMOVED(moved_list_head)->next = 0;
8600 page->free_slots++;
8601 heap_page_add_freeobj(objspace, page, moved_list_head);
8602
8603 if (page->free_slots == page->total_slots && heap_pages_freeable_pages > 0) {
8605 heap_unlink_page(objspace, heap_eden, page);
8606 heap_add_page(objspace, heap_tomb, page);
8607 }
8608 objspace->profile.total_freed_objects++;
8609 moved_list_head = next_moved;
8610 }
8611
8612 /* Add any eden pages with free slots back to the free pages list */
8613 struct heap_page *page = NULL;
8614 list_for_each(&heap_eden->pages, page, page_node) {
8615 if (page->free_slots > 0) {
8616 heap_add_freepage(heap_eden, page);
8617 } else {
8618 page->free_next = NULL;
8619 }
8620 }
8621
8622 /* Set up "using_page" if we have any pages with free slots */
8623 if (heap_eden->free_pages) {
8624 heap_eden->using_page = heap_eden->free_pages;
8625 heap_eden->free_pages = heap_eden->free_pages->free_next;
8626 }
8627
8628 if (use_verifier) {
8629 gc_verify_internal_consistency(objspace);
8630 }
8631
8632 mjit_gc_exit_hook(); // unlock MJIT here, because `rb_gc()` calls `mjit_gc_start_hook()` again.
8633}
8634
8635/*
8636 * call-seq:
8637 * GC.verify_compaction_references(toward: nil, double_heap: nil) -> nil
8638 *
8639 * Verify compaction reference consistency.
8640 *
8641 * This method is implementation specific. During compaction, objects that
8642 * were moved are replaced with T_MOVED objects. No object should have a
8643 * reference to a T_MOVED object after compaction.
8644 *
8645 * This function doubles the heap to ensure room to move all objects,
8646 * compacts the heap to make sure everything moves, updates all references,
8647 * then performs a full GC. If any object contains a reference to a T_MOVED
8648 * object, that object should be pushed on the mark stack, and will
8649 * make a SEGV.
8650 */
8651static VALUE
8652gc_verify_compaction_references(int argc, VALUE *argv, VALUE mod)
8653{
8654 rb_objspace_t *objspace = &rb_objspace;
8655 int use_toward_empty = FALSE;
8656 int use_double_pages = FALSE;
8657
8658 if (dont_gc) return Qnil;
8659
8660 VALUE opt = Qnil;
8661 static ID keyword_ids[2];
8662 VALUE kwvals[2];
8663
8664 kwvals[1] = Qtrue;
8665
8666 rb_scan_args(argc, argv, "0:", &opt);
8667
8668 if (!NIL_P(opt)) {
8669 if (!keyword_ids[0]) {
8670 keyword_ids[0] = rb_intern("toward");
8671 keyword_ids[1] = rb_intern("double_heap");
8672 }
8673
8674 rb_get_kwargs(opt, keyword_ids, 0, 2, kwvals);
8675 if (kwvals[0] != Qundef && rb_intern("empty") == rb_sym2id(kwvals[0])) {
8676 use_toward_empty = TRUE;
8677 }
8678 if (kwvals[1] != Qundef && RTEST(kwvals[1])) {
8679 use_double_pages = TRUE;
8680 }
8681 }
8682
8683 gc_compact(objspace, use_toward_empty, use_double_pages, TRUE);
8684 return gc_compact_stats(objspace);
8685}
8686
8687VALUE
8689{
8690 rb_gc();
8691 return Qnil;
8692}
8693
8694void
8696{
8697 rb_objspace_t *objspace = &rb_objspace;
8698 int reason = GPR_DEFAULT_REASON;
8699 garbage_collect(objspace, reason);
8700}
8701
8702int
8704{
8705 rb_objspace_t *objspace = &rb_objspace;
8706 return during_gc;
8707}
8708
8709#if RGENGC_PROFILE >= 2
8710
8711static const char *type_name(int type, VALUE obj);
8712
8713static void
8714gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
8715{
8717 int i;
8718 for (i=0; i<T_MASK; i++) {
8719 const char *type = type_name(i, 0);
8721 }
8722 rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
8723}
8724#endif
8725
8726size_t
8728{
8729 return rb_objspace.profile.count;
8730}
8731
8732static VALUE
8733gc_count(rb_execution_context_t *ec, VALUE self)
8734{
8735 return SIZET2NUM(rb_gc_count());
8736}
8737
8738static VALUE
8739gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const int orig_flags)
8740{
8741 static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
8742 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
8743#if RGENGC_ESTIMATE_OLDMALLOC
8744 static VALUE sym_oldmalloc;
8745#endif
8746 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
8747 static VALUE sym_none, sym_marking, sym_sweeping;
8748 VALUE hash = Qnil, key = Qnil;
8749 VALUE major_by;
8750 VALUE flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
8751
8752 if (SYMBOL_P(hash_or_key)) {
8753 key = hash_or_key;
8754 }
8755 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
8756 hash = hash_or_key;
8757 }
8758 else {
8759 rb_raise(rb_eTypeError, "non-hash or symbol given");
8760 }
8761
8762 if (sym_major_by == Qnil) {
8763#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
8764 S(major_by);
8765 S(gc_by);
8766 S(immediate_sweep);
8767 S(have_finalizer);
8768 S(state);
8769
8770 S(stress);
8771 S(nofree);
8772 S(oldgen);
8773 S(shady);
8774 S(force);
8775#if RGENGC_ESTIMATE_OLDMALLOC
8776 S(oldmalloc);
8777#endif
8778 S(newobj);
8779 S(malloc);
8780 S(method);
8781 S(capi);
8782
8783 S(none);
8784 S(marking);
8785 S(sweeping);
8786#undef S
8787 }
8788
8789#define SET(name, attr) \
8790 if (key == sym_##name) \
8791 return (attr); \
8792 else if (hash != Qnil) \
8793 rb_hash_aset(hash, sym_##name, (attr));
8794
8795 major_by =
8796 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
8797 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
8798 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
8799 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
8800#if RGENGC_ESTIMATE_OLDMALLOC
8801 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
8802#endif
8803 Qnil;
8804 SET(major_by, major_by);
8805
8806 SET(gc_by,
8807 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
8808 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
8809 (flags & GPR_FLAG_METHOD) ? sym_method :
8810 (flags & GPR_FLAG_CAPI) ? sym_capi :
8811 (flags & GPR_FLAG_STRESS) ? sym_stress :
8812 Qnil
8813 );
8814
8815 SET(have_finalizer, (flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
8816 SET(immediate_sweep, (flags & GPR_FLAG_IMMEDIATE_SWEEP) ? Qtrue : Qfalse);
8817
8818 if (orig_flags == 0) {
8819 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
8820 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
8821 }
8822#undef SET
8823
8824 if (!NIL_P(key)) {/* matched key should return above */
8825 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
8826 }
8827
8828 return hash;
8829}
8830
8831VALUE
8833{
8834 rb_objspace_t *objspace = &rb_objspace;
8835 return gc_info_decode(objspace, key, 0);
8836}
8837
8838static VALUE
8839gc_latest_gc_info(rb_execution_context_t *ec, VALUE self, VALUE arg)
8840{
8841 rb_objspace_t *objspace = &rb_objspace;
8842
8843 if (NIL_P(arg)) {
8844 arg = rb_hash_new();
8845 }
8846 else if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
8847 rb_raise(rb_eTypeError, "non-hash or symbol given");
8848 }
8849
8850 return gc_info_decode(objspace, arg, 0);
8851}
8852
8871#if USE_RGENGC
8879#if RGENGC_ESTIMATE_OLDMALLOC
8882#endif
8883#if RGENGC_PROFILE
8884 gc_stat_sym_total_generated_normal_object_count,
8885 gc_stat_sym_total_generated_shady_object_count,
8886 gc_stat_sym_total_shade_operation_count,
8887 gc_stat_sym_total_promoted_count,
8888 gc_stat_sym_total_remembered_normal_object_count,
8889 gc_stat_sym_total_remembered_shady_object_count,
8890#endif
8891#endif
8894
8905#if USE_RGENGC
8910#endif
8915#if RGENGC_ESTIMATE_OLDMALLOC
8918#endif
8921
8922static VALUE gc_stat_symbols[gc_stat_sym_last];
8923static VALUE gc_stat_compat_symbols[gc_stat_compat_sym_last];
8924static VALUE gc_stat_compat_table;
8925
8926static void
8927setup_gc_stat_symbols(void)
8928{
8929 if (gc_stat_symbols[0] == 0) {
8930#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
8931 S(count);
8933 S(heap_sorted_length);
8935 S(heap_available_slots);
8936 S(heap_live_slots);
8937 S(heap_free_slots);
8938 S(heap_final_slots);
8939 S(heap_marked_slots);
8940 S(heap_eden_pages);
8941 S(heap_tomb_pages);
8942 S(total_allocated_pages);
8943 S(total_freed_pages);
8944 S(total_allocated_objects);
8945 S(total_freed_objects);
8946 S(malloc_increase_bytes);
8947 S(malloc_increase_bytes_limit);
8948#if USE_RGENGC
8949 S(minor_gc_count);
8950 S(major_gc_count);
8951 S(compact_count);
8952 S(remembered_wb_unprotected_objects);
8953 S(remembered_wb_unprotected_objects_limit);
8954 S(old_objects);
8955 S(old_objects_limit);
8956#if RGENGC_ESTIMATE_OLDMALLOC
8957 S(oldmalloc_increase_bytes);
8958 S(oldmalloc_increase_bytes_limit);
8959#endif
8960#if RGENGC_PROFILE
8961 S(total_generated_normal_object_count);
8962 S(total_generated_shady_object_count);
8963 S(total_shade_operation_count);
8964 S(total_promoted_count);
8965 S(total_remembered_normal_object_count);
8966 S(total_remembered_shady_object_count);
8967#endif /* RGENGC_PROFILE */
8968#endif /* USE_RGENGC */
8969#undef S
8970#define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s))
8971 S(gc_stat_heap_used);
8972 S(heap_eden_page_length);
8973 S(heap_tomb_page_length);
8974 S(heap_increment);
8975 S(heap_length);
8976 S(heap_live_slot);
8977 S(heap_free_slot);
8978 S(heap_final_slot);
8979 S(heap_swept_slot);
8980#if USE_RGEGC
8981 S(remembered_shady_object);
8982 S(remembered_shady_object_limit);
8983 S(old_object);
8984 S(old_object_limit);
8985#endif
8986 S(total_allocated_object);
8987 S(total_freed_object);
8989 S(malloc_limit);
8990#if RGENGC_ESTIMATE_OLDMALLOC
8991 S(oldmalloc_increase);
8992 S(oldmalloc_limit);
8993#endif
8994#undef S
8995
8996 {
8997 VALUE table = gc_stat_compat_table = rb_hash_new();
8998 rb_obj_hide(table);
9000
9001 /* compatibility layer for Ruby 2.1 */
9002#define OLD_SYM(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s]
9003#define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s]
9004 rb_hash_aset(table, OLD_SYM(gc_stat_heap_used), NEW_SYM(heap_allocated_pages));
9005 rb_hash_aset(table, OLD_SYM(heap_eden_page_length), NEW_SYM(heap_eden_pages));
9006 rb_hash_aset(table, OLD_SYM(heap_tomb_page_length), NEW_SYM(heap_tomb_pages));
9007 rb_hash_aset(table, OLD_SYM(heap_increment), NEW_SYM(heap_allocatable_pages));
9008 rb_hash_aset(table, OLD_SYM(heap_length), NEW_SYM(heap_sorted_length));
9009 rb_hash_aset(table, OLD_SYM(heap_live_slot), NEW_SYM(heap_live_slots));
9010 rb_hash_aset(table, OLD_SYM(heap_free_slot), NEW_SYM(heap_free_slots));
9011 rb_hash_aset(table, OLD_SYM(heap_final_slot), NEW_SYM(heap_final_slots));
9012#if USE_RGEGC
9013 rb_hash_aset(table, OLD_SYM(remembered_shady_object), NEW_SYM(remembered_wb_unprotected_objects));
9014 rb_hash_aset(table, OLD_SYM(remembered_shady_object_limit), NEW_SYM(remembered_wb_unprotected_objects_limit));
9015 rb_hash_aset(table, OLD_SYM(old_object), NEW_SYM(old_objects));
9016 rb_hash_aset(table, OLD_SYM(old_object_limit), NEW_SYM(old_objects_limit));
9017#endif
9018 rb_hash_aset(table, OLD_SYM(total_allocated_object), NEW_SYM(total_allocated_objects));
9019 rb_hash_aset(table, OLD_SYM(total_freed_object), NEW_SYM(total_freed_objects));
9020 rb_hash_aset(table, OLD_SYM(malloc_increase), NEW_SYM(malloc_increase_bytes));
9021 rb_hash_aset(table, OLD_SYM(malloc_limit), NEW_SYM(malloc_increase_bytes_limit));
9022#if RGENGC_ESTIMATE_OLDMALLOC
9023 rb_hash_aset(table, OLD_SYM(oldmalloc_increase), NEW_SYM(oldmalloc_increase_bytes));
9024 rb_hash_aset(table, OLD_SYM(oldmalloc_limit), NEW_SYM(oldmalloc_increase_bytes_limit));
9025#endif
9026#undef OLD_SYM
9027#undef NEW_SYM
9028 rb_obj_freeze(table);
9029 }
9030 }
9031}
9032
9033static VALUE
9034compat_key(VALUE key)
9035{
9036 VALUE new_key = rb_hash_lookup(gc_stat_compat_table, key);
9037
9038 if (!NIL_P(new_key)) {
9039 static int warned = 0;
9040 if (warned == 0) {
9041 rb_warn("GC.stat keys were changed from Ruby 2.1. "
9042 "In this case, you refer to obsolete `%"PRIsVALUE"' (new key is `%"PRIsVALUE"'). "
9043 "Please check <https://bugs.ruby-lang.org/issues/9924> for more information.",
9044 key, new_key);
9045 warned = 1;
9046 }
9047 }
9048
9049 return new_key;
9050}
9051
9052static VALUE
9053default_proc_for_compat_func(RB_BLOCK_CALL_FUNC_ARGLIST(hash, _))
9054{
9055 VALUE key, new_key;
9056
9057 Check_Type(hash, T_HASH);
9058 rb_check_arity(argc, 2, 2);
9059 key = argv[1];
9060
9061 if ((new_key = compat_key(key)) != Qnil) {
9062 return rb_hash_lookup(hash, new_key);
9063 }
9064
9065 return Qnil;
9066}
9067
9068static size_t
9069gc_stat_internal(VALUE hash_or_sym)
9070{
9071 rb_objspace_t *objspace = &rb_objspace;
9072 VALUE hash = Qnil, key = Qnil;
9073
9074 setup_gc_stat_symbols();
9075
9076 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
9077 hash = hash_or_sym;
9078
9079 if (NIL_P(RHASH_IFNONE(hash))) {
9080 static VALUE default_proc_for_compat = 0;
9081 if (default_proc_for_compat == 0) { /* TODO: it should be */
9082 default_proc_for_compat = rb_proc_new(default_proc_for_compat_func, Qnil);
9083 rb_gc_register_mark_object(default_proc_for_compat);
9084 }
9085 rb_hash_set_default_proc(hash, default_proc_for_compat);
9086 }
9087 }
9088 else if (SYMBOL_P(hash_or_sym)) {
9089 key = hash_or_sym;
9090 }
9091 else {
9092 rb_raise(rb_eTypeError, "non-hash or symbol argument");
9093 }
9094
9095#define SET(name, attr) \
9096 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
9097 return attr; \
9098 else if (hash != Qnil) \
9099 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
9100
9101 again:
9102 SET(count, objspace->profile.count);
9103
9104 /* implementation dependent counters */
9106 SET(heap_sorted_length, heap_pages_sorted_length);
9108 SET(heap_available_slots, objspace_available_slots(objspace));
9109 SET(heap_live_slots, objspace_live_slots(objspace));
9110 SET(heap_free_slots, objspace_free_slots(objspace));
9111 SET(heap_final_slots, heap_pages_final_slots);
9112 SET(heap_marked_slots, objspace->marked_slots);
9113 SET(heap_eden_pages, heap_eden->total_pages);
9114 SET(heap_tomb_pages, heap_tomb->total_pages);
9115 SET(total_allocated_pages, objspace->profile.total_allocated_pages);
9116 SET(total_freed_pages, objspace->profile.total_freed_pages);
9117 SET(total_allocated_objects, objspace->total_allocated_objects);
9118 SET(total_freed_objects, objspace->profile.total_freed_objects);
9119 SET(malloc_increase_bytes, malloc_increase);
9120 SET(malloc_increase_bytes_limit, malloc_limit);
9121#if USE_RGENGC
9122 SET(minor_gc_count, objspace->profile.minor_gc_count);
9123 SET(major_gc_count, objspace->profile.major_gc_count);
9124 SET(compact_count, objspace->profile.compact_count);
9125 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
9126 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
9127 SET(old_objects, objspace->rgengc.old_objects);
9128 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
9129#if RGENGC_ESTIMATE_OLDMALLOC
9130 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
9131 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
9132#endif
9133
9134#if RGENGC_PROFILE
9135 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
9136 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
9137 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
9138 SET(total_promoted_count, objspace->profile.total_promoted_count);
9139 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
9140 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
9141#endif /* RGENGC_PROFILE */
9142#endif /* USE_RGENGC */
9143#undef SET
9144
9145 if (!NIL_P(key)) { /* matched key should return above */
9146 VALUE new_key;
9147 if ((new_key = compat_key(key)) != Qnil) {
9148 key = new_key;
9149 goto again;
9150 }
9151 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
9152 }
9153
9154#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
9155 if (hash != Qnil) {
9156 gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
9157 gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
9158 gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
9159 gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
9160 gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
9161 gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
9162 }
9163#endif
9164
9165 return 0;
9166}
9167
9168static VALUE
9169gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
9170{
9171 if (NIL_P(arg)) {
9172 arg = rb_hash_new();
9173 }
9174 else if (SYMBOL_P(arg)) {
9175 size_t value = gc_stat_internal(arg);
9176 return SIZET2NUM(value);
9177 }
9178 else if (RB_TYPE_P(arg, T_HASH)) {
9179 // ok
9180 }
9181 else {
9182 rb_raise(rb_eTypeError, "non-hash or symbol given");
9183 }
9184
9185 gc_stat_internal(arg);
9186 return arg;
9187}
9188
9189size_t
9191{
9192 if (SYMBOL_P(key)) {
9193 size_t value = gc_stat_internal(key);
9194 return value;
9195 }
9196 else {
9197 gc_stat_internal(key);
9198 return 0;
9199 }
9200}
9201
9202static VALUE
9203gc_stress_get(rb_execution_context_t *ec, VALUE self)
9204{
9205 rb_objspace_t *objspace = &rb_objspace;
9206 return ruby_gc_stress_mode;
9207}
9208
9209static void
9210gc_stress_set(rb_objspace_t *objspace, VALUE flag)
9211{
9212 objspace->flags.gc_stressful = RTEST(flag);
9213 objspace->gc_stress_mode = flag;
9214}
9215
9216static VALUE
9217gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
9218{
9219 rb_objspace_t *objspace = &rb_objspace;
9220 gc_stress_set(objspace, flag);
9221 return flag;
9222}
9223
9224VALUE
9226{
9227 rb_objspace_t *objspace = &rb_objspace;
9228 return rb_objspace_gc_enable(objspace);
9229}
9230
9231VALUE
9233{
9234 int old = dont_gc;
9235
9236 dont_gc = FALSE;
9237 return old ? Qtrue : Qfalse;
9238}
9239
9240static VALUE
9241gc_enable(rb_execution_context_t *ec, VALUE _)
9242{
9243 return rb_gc_enable();
9244}
9245
9246VALUE
9248{
9249 rb_objspace_t *objspace = &rb_objspace;
9250 return gc_disable_no_rest(objspace);
9251}
9252
9253static VALUE
9254gc_disable_no_rest(rb_objspace_t *objspace)
9255{
9256 int old = dont_gc;
9257 dont_gc = TRUE;
9258 return old ? Qtrue : Qfalse;
9259}
9260
9261VALUE
9263{
9264 rb_objspace_t *objspace = &rb_objspace;
9265 return rb_objspace_gc_disable(objspace);
9266}
9267
9268VALUE
9270{
9271 gc_rest(objspace);
9272 return gc_disable_no_rest(objspace);
9273}
9274
9275static VALUE
9276gc_disable(rb_execution_context_t *ec, VALUE _)
9277{
9278 return rb_gc_disable();
9279}
9280
9281static int
9282get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
9283{
9284 char *ptr = getenv(name);
9285 ssize_t val;
9286
9287 if (ptr != NULL && *ptr) {
9288 size_t unit = 0;
9289 char *end;
9290#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
9291 val = strtoll(ptr, &end, 0);
9292#else
9293 val = strtol(ptr, &end, 0);
9294#endif
9295 switch (*end) {
9296 case 'k': case 'K':
9297 unit = 1024;
9298 ++end;
9299 break;
9300 case 'm': case 'M':
9301 unit = 1024*1024;
9302 ++end;
9303 break;
9304 case 'g': case 'G':
9305 unit = 1024*1024*1024;
9306 ++end;
9307 break;
9308 }
9309 while (*end && isspace((unsigned char)*end)) end++;
9310 if (*end) {
9311 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
9312 return 0;
9313 }
9314 if (unit > 0) {
9315 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
9316 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
9317 return 0;
9318 }
9319 val *= unit;
9320 }
9321 if (val > 0 && (size_t)val > lower_bound) {
9322 if (RTEST(ruby_verbose)) {
9323 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
9324 }
9325 *default_value = (size_t)val;
9326 return 1;
9327 }
9328 else {
9329 if (RTEST(ruby_verbose)) {
9330 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
9331 name, val, *default_value, lower_bound);
9332 }
9333 return 0;
9334 }
9335 }
9336 return 0;
9337}
9338
9339static int
9340get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
9341{
9342 char *ptr = getenv(name);
9343 double val;
9344
9345 if (ptr != NULL && *ptr) {
9346 char *end;
9347 val = strtod(ptr, &end);
9348 if (!*ptr || *end) {
9349 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
9350 return 0;
9351 }
9352
9353 if (accept_zero && val == 0.0) {
9354 goto accept;
9355 }
9356 else if (val <= lower_bound) {
9357 if (RTEST(ruby_verbose)) {
9358 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
9359 name, val, *default_value, lower_bound);
9360 }
9361 }
9362 else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
9363 val > upper_bound) {
9364 if (RTEST(ruby_verbose)) {
9365 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
9366 name, val, *default_value, upper_bound);
9367 }
9368 }
9369 else {
9370 accept:
9371 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
9372 *default_value = val;
9373 return 1;
9374 }
9375 }
9376 return 0;
9377}
9378
9379static void
9380gc_set_initial_pages(void)
9381{
9382 size_t min_pages;
9383 rb_objspace_t *objspace = &rb_objspace;
9384
9385 min_pages = gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT;
9386 if (min_pages > heap_eden->total_pages) {
9387 heap_add_pages(objspace, heap_eden, min_pages - heap_eden->total_pages);
9388 }
9389}
9390
9391/*
9392 * GC tuning environment variables
9393 *
9394 * * RUBY_GC_HEAP_INIT_SLOTS
9395 * - Initial allocation slots.
9396 * * RUBY_GC_HEAP_FREE_SLOTS
9397 * - Prepare at least this amount of slots after GC.
9398 * - Allocate slots if there are not enough slots.
9399 * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
9400 * - Allocate slots by this factor.
9401 * - (next slots number) = (current slots number) * (this factor)
9402 * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
9403 * - Allocation rate is limited to this number of slots.
9404 * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
9405 * - Allocate additional pages when the number of free slots is
9406 * lower than the value (total_slots * (this ratio)).
9407 * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
9408 * - Allocate slots to satisfy this formula:
9409 * free_slots = total_slots * goal_ratio
9410 * - In other words, prepare (total_slots * goal_ratio) free slots.
9411 * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
9412 * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
9413 * - Allow to free pages when the number of free slots is
9414 * greater than the value (total_slots * (this ratio)).
9415 * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
9416 * - Do full GC when the number of old objects is more than R * N
9417 * where R is this factor and
9418 * N is the number of old objects just after last full GC.
9419 *
9420 * * obsolete
9421 * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
9422 * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
9423 *
9424 * * RUBY_GC_MALLOC_LIMIT
9425 * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
9426 * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
9427 *
9428 * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
9429 * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
9430 * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
9431 */
9432
9433void
9435{
9436 /* RUBY_GC_HEAP_FREE_SLOTS */
9437 if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
9438 /* ok */
9439 }
9440 else if (get_envparam_size("RUBY_FREE_MIN", &gc_params.heap_free_slots, 0)) {
9441 rb_warn("RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead.");
9442 }
9443
9444 /* RUBY_GC_HEAP_INIT_SLOTS */
9445 if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
9446 gc_set_initial_pages();
9447 }
9448 else if (get_envparam_size("RUBY_HEAP_MIN_SLOTS", &gc_params.heap_init_slots, 0)) {
9449 rb_warn("RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead.");
9450 gc_set_initial_pages();
9451 }
9452
9453 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
9454 get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
9455 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
9456 0.0, 1.0, FALSE);
9457 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
9458 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
9459 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
9461 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
9462
9463 get_envparam_size ("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0);
9464 get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
9465 if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
9466 gc_params.malloc_limit_max = SIZE_MAX;
9467 }
9468 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
9469
9470#if RGENGC_ESTIMATE_OLDMALLOC
9471 if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
9472 rb_objspace_t *objspace = &rb_objspace;
9474 }
9475 get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
9476 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
9477#endif
9478}
9479
9480void
9481rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
9482{
9483 rb_objspace_t *objspace = &rb_objspace;
9484
9485 if (is_markable_object(objspace, obj)) {
9486 struct mark_func_data_struct mfd;
9487 mfd.mark_func = func;
9488 mfd.data = data;
9489 PUSH_MARK_FUNC_DATA(&mfd);
9490 gc_mark_children(objspace, obj);
9492 }
9493}
9494
9496 const char *category;
9497 void (*func)(const char *category, VALUE, void *);
9498 void *data;
9499};
9500
9501static void
9502root_objects_from(VALUE obj, void *ptr)
9503{
9504 const struct root_objects_data *data = (struct root_objects_data *)ptr;
9505 (*data->func)(data->category, obj, data->data);
9506}
9507
9508void
9509rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
9510{
9511 rb_objspace_t *objspace = &rb_objspace;
9512 objspace_reachable_objects_from_root(objspace, func, passing_data);
9513}
9514
9515static void
9516objspace_reachable_objects_from_root(rb_objspace_t *objspace, void (func)(const char *category, VALUE, void *), void *passing_data)
9517{
9518 struct root_objects_data data;
9519 struct mark_func_data_struct mfd;
9520
9521 data.func = func;
9522 data.data = passing_data;
9523
9524 mfd.mark_func = root_objects_from;
9525 mfd.data = &data;
9526
9527 PUSH_MARK_FUNC_DATA(&mfd);
9528 gc_mark_roots(objspace, &data.category);
9530}
9531
9532/*
9533 ------------------------ Extended allocator ------------------------
9534*/
9535
9538 const char *fmt;
9540};
9541
9542static void *
9543gc_vraise(void *ptr)
9544{
9545 struct gc_raise_tag *argv = ptr;
9546 rb_vraise(argv->exc, argv->fmt, *argv->ap);
9548}
9549
9550static void
9551gc_raise(VALUE exc, const char *fmt, ...)
9552{
9553 va_list ap;
9554 va_start(ap, fmt);
9555 struct gc_raise_tag argv = {
9556 exc, fmt, &ap,
9557 };
9558
9559 if (ruby_thread_has_gvl_p()) {
9560 gc_vraise(&argv);
9562 }
9563 else if (ruby_native_thread_p()) {
9564 rb_thread_call_with_gvl(gc_vraise, &argv);
9566 }
9567 else {
9568 /* Not in a ruby thread */
9569 fprintf(stderr, "%s", "[FATAL] ");
9570 vfprintf(stderr, fmt, ap);
9571 abort();
9572 }
9573
9574 va_end(ap);
9575}
9576
9577static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
9578
9579static void
9580negative_size_allocation_error(const char *msg)
9581{
9582 gc_raise(rb_eNoMemError, "%s", msg);
9583}
9584
9585static void *
9586ruby_memerror_body(void *dummy)
9587{
9588 rb_memerror();
9589 return 0;
9590}
9591
9592static void
9593ruby_memerror(void)
9594{
9595 if (ruby_thread_has_gvl_p()) {
9596 rb_memerror();
9597 }
9598 else {
9599 if (ruby_native_thread_p()) {
9600 rb_thread_call_with_gvl(ruby_memerror_body, 0);
9601 }
9602 else {
9603 /* no ruby thread */
9604 fprintf(stderr, "[FATAL] failed to allocate memory\n");
9606 }
9607 }
9608}
9609
9610void
9612{
9614 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
9615 VALUE exc;
9616
9617 if (0) {
9618 // Print out pid, sleep, so you can attach debugger to see what went wrong:
9619 fprintf(stderr, "rb_memerror pid=%"PRI_PIDT_PREFIX"d\n", getpid());
9620 sleep(60);
9621 }
9622
9623 if (during_gc) gc_exit(objspace, "rb_memerror");
9624
9625 exc = nomem_error;
9626 if (!exc ||
9628 fprintf(stderr, "[FATAL] failed to allocate memory\n");
9630 }
9633 }
9634 else {
9637 }
9638 ec->errinfo = exc;
9640}
9641
9642void *
9643rb_aligned_malloc(size_t alignment, size_t size)
9644{
9645 void *res;
9646
9647#if defined __MINGW32__
9648 res = __mingw_aligned_malloc(size, alignment);
9649#elif defined _WIN32
9650 void *_aligned_malloc(size_t, size_t);
9651 res = _aligned_malloc(size, alignment);
9652#elif defined(HAVE_POSIX_MEMALIGN)
9653 if (posix_memalign(&res, alignment, size) == 0) {
9654 return res;
9655 }
9656 else {
9657 return NULL;
9658 }
9659#elif defined(HAVE_MEMALIGN)
9660 res = memalign(alignment, size);
9661#else
9662 char* aligned;
9663 res = malloc(alignment + size + sizeof(void*));
9664 aligned = (char*)res + alignment + sizeof(void*);
9665 aligned -= ((VALUE)aligned & (alignment - 1));
9666 ((void**)aligned)[-1] = res;
9667 res = (void*)aligned;
9668#endif
9669
9670 /* alignment must be a power of 2 */
9671 GC_ASSERT(((alignment - 1) & alignment) == 0);
9672 GC_ASSERT(alignment % sizeof(void*) == 0);
9673 return res;
9674}
9675
9676static void
9677rb_aligned_free(void *ptr)
9678{
9679#if defined __MINGW32__
9680 __mingw_aligned_free(ptr);
9681#elif defined _WIN32
9682 _aligned_free(ptr);
9683#elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
9684 free(ptr);
9685#else
9686 free(((void**)ptr)[-1]);
9687#endif
9688}
9689
9690static inline size_t
9691objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
9692{
9693#ifdef HAVE_MALLOC_USABLE_SIZE
9694 return malloc_usable_size(ptr);
9695#else
9696 return hint;
9697#endif
9698}
9699
9705
9706static inline void
9707atomic_sub_nounderflow(size_t *var, size_t sub)
9708{
9709 if (sub == 0) return;
9710
9711 while (1) {
9712 size_t val = *var;
9713 if (val < sub) sub = val;
9714 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
9715 }
9716}
9717
9718static void
9719objspace_malloc_gc_stress(rb_objspace_t *objspace)
9720{
9724
9726 reason |= GPR_FLAG_FULL_MARK;
9727 }
9728 garbage_collect_with_gvl(objspace, reason);
9729 }
9730}
9731
9732static void
9733objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
9734{
9735 if (new_size > old_size) {
9736 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
9737#if RGENGC_ESTIMATE_OLDMALLOC
9738 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
9739#endif
9740 }
9741 else {
9742 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
9743#if RGENGC_ESTIMATE_OLDMALLOC
9744 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
9745#endif
9746 }
9747
9748 if (type == MEMOP_TYPE_MALLOC) {
9749 retry:
9752 gc_rest(objspace); /* gc_rest can reduce malloc_increase */
9753 goto retry;
9754 }
9755 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
9756 }
9757 }
9758
9759#if MALLOC_ALLOCATED_SIZE
9760 if (new_size >= old_size) {
9761 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
9762 }
9763 else {
9764 size_t dec_size = old_size - new_size;
9765 size_t allocated_size = objspace->malloc_params.allocated_size;
9766
9767#if MALLOC_ALLOCATED_SIZE_CHECK
9768 if (allocated_size < dec_size) {
9769 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
9770 }
9771#endif
9772 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
9773 }
9774
9775 if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %d, old_size: %d\n",
9776 mem,
9777 type == MEMOP_TYPE_MALLOC ? "malloc" :
9778 type == MEMOP_TYPE_FREE ? "free " :
9779 type == MEMOP_TYPE_REALLOC ? "realloc": "error",
9780 (int)new_size, (int)old_size);
9781
9782 switch (type) {
9783 case MEMOP_TYPE_MALLOC:
9784 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
9785 break;
9786 case MEMOP_TYPE_FREE:
9787 {
9788 size_t allocations = objspace->malloc_params.allocations;
9789 if (allocations > 0) {
9790 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
9791 }
9792#if MALLOC_ALLOCATED_SIZE_CHECK
9793 else {
9794 GC_ASSERT(objspace->malloc_params.allocations > 0);
9795 }
9796#endif
9797 }
9798 break;
9799 case MEMOP_TYPE_REALLOC: /* ignore */ break;
9800 }
9801#endif
9802}
9803
9804struct malloc_obj_info { /* 4 words */
9805 size_t size;
9806#if USE_GC_MALLOC_OBJ_INFO_DETAILS
9807 size_t gen;
9808 const char *file;
9809 size_t line;
9810#endif
9811};
9812
9813#if USE_GC_MALLOC_OBJ_INFO_DETAILS
9814const char *ruby_malloc_info_file;
9815int ruby_malloc_info_line;
9816#endif
9817
9818static inline size_t
9819objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
9820{
9821 if (size == 0) size = 1;
9822
9823#if CALC_EXACT_MALLOC_SIZE
9824 size += sizeof(struct malloc_obj_info);
9825#endif
9826
9827 return size;
9828}
9829
9830static inline void *
9831objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
9832{
9833 size = objspace_malloc_size(objspace, mem, size);
9834 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
9835
9836#if CALC_EXACT_MALLOC_SIZE
9837 {
9838 struct malloc_obj_info *info = mem;
9839 info->size = size;
9840#if USE_GC_MALLOC_OBJ_INFO_DETAILS
9841 info->gen = objspace->profile.count;
9842 info->file = ruby_malloc_info_file;
9843 info->line = info->file ? ruby_malloc_info_line : 0;
9844#else
9845 info->file = NULL;
9846#endif
9847 mem = info + 1;
9848 }
9849#endif
9850
9851 return mem;
9852}
9853
9854#define TRY_WITH_GC(alloc) do { \
9855 objspace_malloc_gc_stress(objspace); \
9856 if (!(alloc) && \
9857 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
9858 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
9859 GPR_FLAG_MALLOC) || \
9860 !(alloc))) { \
9861 ruby_memerror(); \
9862 } \
9863 } while (0)
9864
9865/* these shouldn't be called directly.
9866 * objspace_* functinos do not check allocation size.
9867 */
9868static void *
9869objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
9870{
9871 void *mem;
9872
9873 size = objspace_malloc_prepare(objspace, size);
9874 TRY_WITH_GC(mem = malloc(size));
9875 RB_DEBUG_COUNTER_INC(heap_xmalloc);
9876 return objspace_malloc_fixup(objspace, mem, size);
9877}
9878
9879static inline size_t
9880xmalloc2_size(const size_t count, const size_t elsize)
9881{
9882 return size_mul_or_raise(count, elsize, rb_eArgError);
9883}
9884
9885static void *
9886objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
9887{
9888 void *mem;
9889
9890 if (!ptr) return objspace_xmalloc0(objspace, new_size);
9891
9892 /*
9893 * The behavior of realloc(ptr, 0) is implementation defined.
9894 * Therefore we don't use realloc(ptr, 0) for portability reason.
9895 * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
9896 */
9897 if (new_size == 0) {
9898 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
9899 /*
9900 * - OpenBSD's malloc(3) man page says that when 0 is passed, it
9901 * returns a non-NULL pointer to an access-protected memory page.
9902 * The returned pointer cannot be read / written at all, but
9903 * still be a valid argument of free().
9904 *
9905 * https://man.openbsd.org/malloc.3
9906 *
9907 * - Linux's malloc(3) man page says that it _might_ perhaps return
9908 * a non-NULL pointer when its argument is 0. That return value
9909 * is safe (and is expected) to be passed to free().
9910 *
9911 * http://man7.org/linux/man-pages/man3/malloc.3.html
9912 *
9913 * - As I read the implementation jemalloc's malloc() returns fully
9914 * normal 16 bytes memory region when its argument is 0.
9915 *
9916 * - As I read the implementation musl libc's malloc() returns
9917 * fully normal 32 bytes memory region when its argument is 0.
9918 *
9919 * - Other malloc implementations can also return non-NULL.
9920 */
9921 objspace_xfree(objspace, ptr, old_size);
9922 return mem;
9923 }
9924 else {
9925 /*
9926 * It is dangerous to return NULL here, because that could lead to
9927 * RCE. Fallback to 1 byte instead of zero.
9928 *
9929 * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
9930 */
9931 new_size = 1;
9932 }
9933 }
9934
9935#if CALC_EXACT_MALLOC_SIZE
9936 {
9937 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
9938 new_size += sizeof(struct malloc_obj_info);
9939 ptr = info;
9940 old_size = info->size;
9941 }
9942#endif
9943
9944 old_size = objspace_malloc_size(objspace, ptr, old_size);
9945 TRY_WITH_GC(mem = realloc(ptr, new_size));
9946 new_size = objspace_malloc_size(objspace, mem, new_size);
9947
9948#if CALC_EXACT_MALLOC_SIZE
9949 {
9950 struct malloc_obj_info *info = mem;
9951 info->size = new_size;
9952 mem = info + 1;
9953 }
9954#endif
9955
9956 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
9957
9958 RB_DEBUG_COUNTER_INC(heap_xrealloc);
9959 return mem;
9960}
9961
9962#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
9963
9964#define MALLOC_INFO_GEN_SIZE 100
9965#define MALLOC_INFO_SIZE_SIZE 10
9966static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
9967static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
9968static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
9969static st_table *malloc_info_file_table;
9970
9971static int
9972mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
9973{
9974 const char *file = (void *)key;
9975 const size_t *data = (void *)val;
9976
9977 fprintf(stderr, "%s\t%d\t%d\n", file, (int)data[0], (int)data[1]);
9978
9979 return ST_CONTINUE;
9980}
9981
9982__attribute__((destructor))
9983void
9985{
9986 int i;
9987
9988 fprintf(stderr, "* malloc_info gen statistics\n");
9989 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
9990 if (i == MALLOC_INFO_GEN_SIZE-1) {
9991 fprintf(stderr, "more\t%d\t%d\n", (int)malloc_info_gen_cnt[i], (int)malloc_info_gen_size[i]);
9992 }
9993 else {
9994 fprintf(stderr, "%d\t%d\t%d\n", i, (int)malloc_info_gen_cnt[i], (int)malloc_info_gen_size[i]);
9995 }
9996 }
9997
9998 fprintf(stderr, "* malloc_info size statistics\n");
9999 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
10000 int s = 16 << i;
10001 fprintf(stderr, "%d\t%d\n", (int)s, (int)malloc_info_size[i]);
10002 }
10003 fprintf(stderr, "more\t%d\n", (int)malloc_info_size[i]);
10004
10005 if (malloc_info_file_table) {
10006 fprintf(stderr, "* malloc_info file statistics\n");
10007 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
10008 }
10009}
10010#else
10011void
10013{
10014}
10015#endif
10016
10017static void
10018objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
10019{
10020 if (!ptr) {
10021 /*
10022 * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
10023 * its first version. We would better follow.
10024 */
10025 return;
10026 }
10027#if CALC_EXACT_MALLOC_SIZE
10028 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
10029 ptr = info;
10030 old_size = info->size;
10031
10032#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10033 {
10034 int gen = (int)(objspace->profile.count - info->gen);
10035 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
10036 int i;
10037
10038 malloc_info_gen_cnt[gen_index]++;
10039 malloc_info_gen_size[gen_index] += info->size;
10040
10041 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
10042 size_t s = 16 << i;
10043 if (info->size <= s) {
10044 malloc_info_size[i]++;
10045 goto found;
10046 }
10047 }
10048 malloc_info_size[i]++;
10049 found:;
10050
10051 {
10052 st_data_t key = (st_data_t)info->file;
10053 size_t *data;
10054
10055 if (malloc_info_file_table == NULL) {
10056 malloc_info_file_table = st_init_numtable_with_size(1024);
10057 }
10058 if (st_lookup(malloc_info_file_table, key, (st_data_t *)&data)) {
10059 /* hit */
10060 }
10061 else {
10062 data = malloc(xmalloc2_size(2, sizeof(size_t)));
10063 if (data == NULL) rb_bug("objspace_xfree: can not allocate memory");
10064 data[0] = data[1] = 0;
10065 st_insert(malloc_info_file_table, key, (st_data_t)data);
10066 }
10067 data[0] ++;
10068 data[1] += info->size;
10069 };
10070#if 0 /* verbose output */
10071 if (gen >= 2) {
10072 if (info->file) {
10073 fprintf(stderr, "free - size:%d, gen:%d, pos: %s:%d\n", (int)info->size, gen, info->file, (int)info->line);
10074 }
10075 else {
10076 fprintf(stderr, "free - size:%d, gen:%d\n", (int)info->size, gen);
10077 }
10078 }
10079#endif
10080 }
10081#endif
10082#endif
10083 old_size = objspace_malloc_size(objspace, ptr, old_size);
10084
10085 free(ptr);
10086 RB_DEBUG_COUNTER_INC(heap_xfree);
10087
10088 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE);
10089}
10090
10091static void *
10092ruby_xmalloc0(size_t size)
10093{
10094 return objspace_xmalloc0(&rb_objspace, size);
10095}
10096
10097void *
10099{
10100 if ((ssize_t)size < 0) {
10101 negative_size_allocation_error("too large allocation size");
10102 }
10103 return ruby_xmalloc0(size);
10104}
10105
10106void
10107ruby_malloc_size_overflow(size_t count, size_t elsize)
10108{
10110 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
10111 count, elsize);
10112}
10113
10114void *
10116{
10117 return objspace_xmalloc0(&rb_objspace, xmalloc2_size(n, size));
10118}
10119
10120static void *
10121objspace_xcalloc(rb_objspace_t *objspace, size_t size)
10122{
10123 void *mem;
10124
10125 size = objspace_malloc_prepare(objspace, size);
10126 TRY_WITH_GC(mem = calloc1(size));
10127 return objspace_malloc_fixup(objspace, mem, size);
10128}
10129
10130void *
10131ruby_xcalloc_body(size_t n, size_t size)
10132{
10133 return objspace_xcalloc(&rb_objspace, xmalloc2_size(n, size));
10134}
10135
10136#ifdef ruby_sized_xrealloc
10137#undef ruby_sized_xrealloc
10138#endif
10139void *
10140ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
10141{
10142 if ((ssize_t)new_size < 0) {
10143 negative_size_allocation_error("too large allocation size");
10144 }
10145
10146 return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
10147}
10148
10149void *
10150ruby_xrealloc_body(void *ptr, size_t new_size)
10151{
10152 return ruby_sized_xrealloc(ptr, new_size, 0);
10153}
10154
10155#ifdef ruby_sized_xrealloc2
10156#undef ruby_sized_xrealloc2
10157#endif
10158void *
10159ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
10160{
10161 size_t len = xmalloc2_size(n, size);
10162 return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
10163}
10164
10165void *
10166ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
10167{
10168 return ruby_sized_xrealloc2(ptr, n, size, 0);
10169}
10170
10171#ifdef ruby_sized_xfree
10172#undef ruby_sized_xfree
10173#endif
10174void
10175ruby_sized_xfree(void *x, size_t size)
10176{
10177 if (x) {
10178 objspace_xfree(&rb_objspace, x, size);
10179 }
10180}
10181
10182void
10184{
10185 ruby_sized_xfree(x, 0);
10186}
10187
10188void *
10189rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
10190{
10191 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
10192 return ruby_xmalloc(w);
10193}
10194
10195void *
10196rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
10197{
10198 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
10199 return ruby_xrealloc((void *)p, w);
10200}
10201
10202void *
10203rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
10204{
10205 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
10206 return ruby_xmalloc(u);
10207}
10208
10209void *
10210rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
10211{
10212 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
10213 return ruby_xcalloc(u, 1);
10214}
10215
10216/* Mimic ruby_xmalloc, but need not rb_objspace.
10217 * should return pointer suitable for ruby_xfree
10218 */
10219void *
10221{
10222 void *mem;
10223#if CALC_EXACT_MALLOC_SIZE
10224 size += sizeof(struct malloc_obj_info);
10225#endif
10226 mem = malloc(size);
10227#if CALC_EXACT_MALLOC_SIZE
10228 if (!mem) {
10229 return NULL;
10230 }
10231 else
10232 /* set 0 for consistency of allocated_size/allocations */
10233 {
10234 struct malloc_obj_info *info = mem;
10235 info->size = 0;
10236#if USE_GC_MALLOC_OBJ_INFO_DETAILS
10237 info->gen = 0;
10238 info->file = NULL;
10239 info->line = 0;
10240#else
10241 info->file = NULL;
10242#endif
10243 mem = info + 1;
10244 }
10245#endif
10246 return mem;
10247}
10248
10249void
10251{
10252#if CALC_EXACT_MALLOC_SIZE
10253 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
10254 ptr = info;
10255#endif
10256 free(ptr);
10257}
10258
10259void *
10260rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
10261{
10262 void *ptr;
10263 VALUE imemo;
10264 rb_imemo_tmpbuf_t *tmpbuf;
10265
10266 /* Keep the order; allocate an empty imemo first then xmalloc, to
10267 * get rid of potential memory leak */
10268 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
10269 *store = imemo;
10270 ptr = ruby_xmalloc0(size);
10271 tmpbuf = (rb_imemo_tmpbuf_t *)imemo;
10272 tmpbuf->ptr = ptr;
10273 tmpbuf->cnt = cnt;
10274 return ptr;
10275}
10276
10277void *
10278rb_alloc_tmp_buffer(volatile VALUE *store, long len)
10279{
10280 long cnt;
10281
10282 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
10283 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
10284 }
10285
10286 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
10287}
10288
10289void
10291{
10293 if (s) {
10294 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
10295 s->cnt = 0;
10296 ruby_xfree(ptr);
10297 }
10298}
10299
10300#if MALLOC_ALLOCATED_SIZE
10301/*
10302 * call-seq:
10303 * GC.malloc_allocated_size -> Integer
10304 *
10305 * Returns the size of memory allocated by malloc().
10306 *
10307 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
10308 */
10309
10310static VALUE
10311gc_malloc_allocated_size(VALUE self)
10312{
10313 return UINT2NUM(rb_objspace.malloc_params.allocated_size);
10314}
10315
10316/*
10317 * call-seq:
10318 * GC.malloc_allocations -> Integer
10319 *
10320 * Returns the number of malloc() allocations.
10321 *
10322 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
10323 */
10324
10325static VALUE
10326gc_malloc_allocations(VALUE self)
10327{
10328 return UINT2NUM(rb_objspace.malloc_params.allocations);
10329}
10330#endif
10331
10332void
10334{
10335 rb_objspace_t *objspace = &rb_objspace;
10336 if (diff > 0) {
10337 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
10338 }
10339 else if (diff < 0) {
10340 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
10341 }
10342}
10343
10344/*
10345 ------------------------------ WeakMap ------------------------------
10346*/
10347
10348struct weakmap {
10349 st_table *obj2wmap; /* obj -> [ref,...] */
10350 st_table *wmap2obj; /* ref -> obj */
10351 VALUE final;
10352};
10353
10354#define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
10355
10356#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10357static int
10358wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
10359{
10360 rb_objspace_t *objspace = (rb_objspace_t *)arg;
10361 VALUE obj = (VALUE)val;
10362 if (!is_live_object(objspace, obj)) return ST_DELETE;
10363 return ST_CONTINUE;
10364}
10365#endif
10366
10367static void
10368wmap_compact(void *ptr)
10369{
10370 struct weakmap *w = ptr;
10373 w->final = rb_gc_location(w->final);
10374}
10375
10376static void
10377wmap_mark(void *ptr)
10378{
10379 struct weakmap *w = ptr;
10380#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10381 if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
10382#endif
10384}
10385
10386static int
10387wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
10388{
10389 VALUE *ptr = (VALUE *)val;
10390 ruby_sized_xfree(ptr, (ptr[0] + 1) * sizeof(VALUE));
10391 return ST_CONTINUE;
10392}
10393
10394static void
10395wmap_free(void *ptr)
10396{
10397 struct weakmap *w = ptr;
10398 st_foreach(w->obj2wmap, wmap_free_map, 0);
10401}
10402
10403static int
10404wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
10405{
10406 VALUE *ptr = (VALUE *)val;
10407 *(size_t *)arg += (ptr[0] + 1) * sizeof(VALUE);
10408 return ST_CONTINUE;
10409}
10410
10411static size_t
10412wmap_memsize(const void *ptr)
10413{
10414 size_t size;
10415 const struct weakmap *w = ptr;
10416 size = sizeof(*w);
10417 size += st_memsize(w->obj2wmap);
10418 size += st_memsize(w->wmap2obj);
10419 st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
10420 return size;
10421}
10422
10423static const rb_data_type_t weakmap_type = {
10424 "weakmap",
10425 {
10426 wmap_mark,
10427 wmap_free,
10428 wmap_memsize,
10429 wmap_compact,
10430 },
10432};
10433
10434extern const struct st_hash_type rb_hashtype_ident;
10435static VALUE wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self));
10436
10437static VALUE
10438wmap_allocate(VALUE klass)
10439{
10440 struct weakmap *w;
10441 VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
10444 w->final = rb_func_lambda_new(wmap_finalize, obj, 1, 1);
10445 return obj;
10446}
10447
10448static int
10449wmap_live_p(rb_objspace_t *objspace, VALUE obj)
10450{
10451 if (!FL_ABLE(obj)) return TRUE;
10452 if (!is_id_value(objspace, obj)) return FALSE;
10453 if (!is_live_object(objspace, obj)) return FALSE;
10454 return TRUE;
10455}
10456
10457static int
10458wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
10459{
10460 VALUE wmap, *ptr, size, i, j;
10461 if (!existing) return ST_STOP;
10462 wmap = (VALUE)arg, ptr = (VALUE *)*value;
10463 for (i = j = 1, size = ptr[0]; i <= size; ++i) {
10464 if (ptr[i] != wmap) {
10465 ptr[j++] = ptr[i];
10466 }
10467 }
10468 if (j == 1) {
10469 ruby_sized_xfree(ptr, i * sizeof(VALUE));
10470 return ST_DELETE;
10471 }
10472 if (j < i) {
10473 SIZED_REALLOC_N(ptr, VALUE, j + 1, i);
10474 ptr[0] = j;
10475 *value = (st_data_t)ptr;
10476 }
10477 return ST_CONTINUE;
10478}
10479
10480/* :nodoc: */
10481static VALUE
10482wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self))
10483{
10484 st_data_t orig, wmap, data;
10485 VALUE obj, *rids, i, size;
10486 struct weakmap *w;
10487
10488 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10489 /* Get reference from object id. */
10490 if ((obj = id2ref_obj_tbl(&rb_objspace, objid)) == Qundef) {
10491 rb_bug("wmap_finalize: objid is not found.");
10492 }
10493
10494 /* obj is original referenced object and/or weak reference. */
10495 orig = (st_data_t)obj;
10496 if (st_delete(w->obj2wmap, &orig, &data)) {
10497 rids = (VALUE *)data;
10498 size = *rids++;
10499 for (i = 0; i < size; ++i) {
10500 wmap = (st_data_t)rids[i];
10501 st_delete(w->wmap2obj, &wmap, NULL);
10502 }
10503 ruby_sized_xfree((VALUE *)data, (size + 1) * sizeof(VALUE));
10504 }
10505
10506 wmap = (st_data_t)obj;
10507 if (st_delete(w->wmap2obj, &wmap, &orig)) {
10508 wmap = (st_data_t)obj;
10509 st_update(w->obj2wmap, orig, wmap_final_func, wmap);
10510 }
10511 return self;
10512}
10513
10517};
10518
10519static VALUE
10520wmap_inspect_append(rb_objspace_t *objspace, VALUE str, VALUE obj)
10521{
10522 if (SPECIAL_CONST_P(obj)) {
10523 return rb_str_append(str, rb_inspect(obj));
10524 }
10525 else if (wmap_live_p(objspace, obj)) {
10526 return rb_str_append(str, rb_any_to_s(obj));
10527 }
10528 else {
10529 return rb_str_catf(str, "#<collected:%p>", (void*)obj);
10530 }
10531}
10532
10533static int
10534wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
10535{
10536 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
10538 VALUE str = argp->value;
10539 VALUE k = (VALUE)key, v = (VALUE)val;
10540
10541 if (RSTRING_PTR(str)[0] == '#') {
10542 rb_str_cat2(str, ", ");
10543 }
10544 else {
10545 rb_str_cat2(str, ": ");
10546 RSTRING_PTR(str)[0] = '#';
10547 }
10548 wmap_inspect_append(objspace, str, k);
10549 rb_str_cat2(str, " => ");
10550 wmap_inspect_append(objspace, str, v);
10551
10552 return ST_CONTINUE;
10553}
10554
10555static VALUE
10556wmap_inspect(VALUE self)
10557{
10558 VALUE str;
10559 VALUE c = rb_class_name(CLASS_OF(self));
10560 struct weakmap *w;
10561 struct wmap_iter_arg args;
10562
10563 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10564 str = rb_sprintf("-<%"PRIsVALUE":%p", c, (void *)self);
10565 if (w->wmap2obj) {
10566 args.objspace = &rb_objspace;
10567 args.value = str;
10568 st_foreach(w->wmap2obj, wmap_inspect_i, (st_data_t)&args);
10569 }
10570 RSTRING_PTR(str)[0] = '#';
10571 rb_str_cat2(str, ">");
10572 return str;
10573}
10574
10575static int
10576wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
10577{
10579 VALUE obj = (VALUE)val;
10580 if (wmap_live_p(objspace, obj)) {
10582 }
10583 return ST_CONTINUE;
10584}
10585
10586/* Iterates over keys and objects in a weakly referenced object */
10587static VALUE
10588wmap_each(VALUE self)
10589{
10590 struct weakmap *w;
10591 rb_objspace_t *objspace = &rb_objspace;
10592
10593 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10594 st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
10595 return self;
10596}
10597
10598static int
10599wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
10600{
10601 rb_objspace_t *objspace = (rb_objspace_t *)arg;
10602 VALUE obj = (VALUE)val;
10603 if (wmap_live_p(objspace, obj)) {
10604 rb_yield((VALUE)key);
10605 }
10606 return ST_CONTINUE;
10607}
10608
10609/* Iterates over keys and objects in a weakly referenced object */
10610static VALUE
10611wmap_each_key(VALUE self)
10612{
10613 struct weakmap *w;
10614 rb_objspace_t *objspace = &rb_objspace;
10615
10616 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10617 st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
10618 return self;
10619}
10620
10621static int
10622wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
10623{
10624 rb_objspace_t *objspace = (rb_objspace_t *)arg;
10625 VALUE obj = (VALUE)val;
10626 if (wmap_live_p(objspace, obj)) {
10627 rb_yield(obj);
10628 }
10629 return ST_CONTINUE;
10630}
10631
10632/* Iterates over keys and objects in a weakly referenced object */
10633static VALUE
10634wmap_each_value(VALUE self)
10635{
10636 struct weakmap *w;
10637 rb_objspace_t *objspace = &rb_objspace;
10638
10639 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10640 st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
10641 return self;
10642}
10643
10644static int
10645wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
10646{
10647 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
10649 VALUE ary = argp->value;
10650 VALUE obj = (VALUE)val;
10651 if (wmap_live_p(objspace, obj)) {
10652 rb_ary_push(ary, (VALUE)key);
10653 }
10654 return ST_CONTINUE;
10655}
10656
10657/* Iterates over keys and objects in a weakly referenced object */
10658static VALUE
10659wmap_keys(VALUE self)
10660{
10661 struct weakmap *w;
10662 struct wmap_iter_arg args;
10663
10664 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10665 args.objspace = &rb_objspace;
10666 args.value = rb_ary_new();
10667 st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
10668 return args.value;
10669}
10670
10671static int
10672wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
10673{
10674 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
10676 VALUE ary = argp->value;
10677 VALUE obj = (VALUE)val;
10678 if (wmap_live_p(objspace, obj)) {
10679 rb_ary_push(ary, obj);
10680 }
10681 return ST_CONTINUE;
10682}
10683
10684/* Iterates over values and objects in a weakly referenced object */
10685static VALUE
10686wmap_values(VALUE self)
10687{
10688 struct weakmap *w;
10689 struct wmap_iter_arg args;
10690
10691 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10692 args.objspace = &rb_objspace;
10693 args.value = rb_ary_new();
10694 st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
10695 return args.value;
10696}
10697
10698static int
10699wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
10700{
10701 VALUE size, *ptr, *optr;
10702 if (existing) {
10703 size = (ptr = optr = (VALUE *)*val)[0];
10704 ++size;
10706 }
10707 else {
10708 optr = 0;
10709 size = 1;
10710 ptr = ruby_xmalloc0(2 * sizeof(VALUE));
10711 }
10712 ptr[0] = size;
10713 ptr[size] = (VALUE)arg;
10714 if (ptr == optr) return ST_STOP;
10715 *val = (st_data_t)ptr;
10716 return ST_CONTINUE;
10717}
10718
10719/* Creates a weak reference from the given key to the given value */
10720static VALUE
10721wmap_aset(VALUE self, VALUE wmap, VALUE orig)
10722{
10723 struct weakmap *w;
10724
10725 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10726 if (FL_ABLE(orig)) {
10727 define_final0(orig, w->final);
10728 }
10729 if (FL_ABLE(wmap)) {
10730 define_final0(wmap, w->final);
10731 }
10732
10733 st_update(w->obj2wmap, (st_data_t)orig, wmap_aset_update, wmap);
10734 st_insert(w->wmap2obj, (st_data_t)wmap, (st_data_t)orig);
10735 return nonspecial_obj_id(orig);
10736}
10737
10738/* Retrieves a weakly referenced object with the given key */
10739static VALUE
10740wmap_lookup(VALUE self, VALUE key)
10741{
10742 st_data_t data;
10743 VALUE obj;
10744 struct weakmap *w;
10745 rb_objspace_t *objspace = &rb_objspace;
10746
10747 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10748 if (!st_lookup(w->wmap2obj, (st_data_t)key, &data)) return Qundef;
10749 obj = (VALUE)data;
10750 if (!wmap_live_p(objspace, obj)) return Qundef;
10751 return obj;
10752}
10753
10754/* Retrieves a weakly referenced object with the given key */
10755static VALUE
10756wmap_aref(VALUE self, VALUE key)
10757{
10758 VALUE obj = wmap_lookup(self, key);
10759 return obj != Qundef ? obj : Qnil;
10760}
10761
10762/* Returns +true+ if +key+ is registered */
10763static VALUE
10764wmap_has_key(VALUE self, VALUE key)
10765{
10766 return wmap_lookup(self, key) == Qundef ? Qfalse : Qtrue;
10767}
10768
10769/* Returns the number of referenced objects */
10770static VALUE
10771wmap_size(VALUE self)
10772{
10773 struct weakmap *w;
10774 st_index_t n;
10775
10776 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10777 n = w->wmap2obj->num_entries;
10778#if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
10779 return ULONG2NUM(n);
10780#else
10781 return ULL2NUM(n);
10782#endif
10783}
10784
10785/*
10786 ------------------------------ GC profiler ------------------------------
10787*/
10788
10789#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
10790
10791/* return sec in user time */
10792static double
10793getrusage_time(void)
10794{
10795#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
10796 {
10797 static int try_clock_gettime = 1;
10798 struct timespec ts;
10799 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
10800 return ts.tv_sec + ts.tv_nsec * 1e-9;
10801 }
10802 else {
10803 try_clock_gettime = 0;
10804 }
10805 }
10806#endif
10807
10808#ifdef RUSAGE_SELF
10809 {
10810 struct rusage usage;
10811 struct timeval time;
10812 if (getrusage(RUSAGE_SELF, &usage) == 0) {
10813 time = usage.ru_utime;
10814 return time.tv_sec + time.tv_usec * 1e-6;
10815 }
10816 }
10817#endif
10818
10819#ifdef _WIN32
10820 {
10821 FILETIME creation_time, exit_time, kernel_time, user_time;
10822 ULARGE_INTEGER ui;
10823 LONG_LONG q;
10824 double t;
10825
10826 if (GetProcessTimes(GetCurrentProcess(),
10827 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
10828 memcpy(&ui, &user_time, sizeof(FILETIME));
10829 q = ui.QuadPart / 10L;
10830 t = (DWORD)(q % 1000000L) * 1e-6;
10831 q /= 1000000L;
10832#ifdef __GNUC__
10833 t += q;
10834#else
10835 t += (double)(DWORD)(q >> 16) * (1 << 16);
10836 t += (DWORD)q & ~(~0 << 16);
10837#endif
10838 return t;
10839 }
10840 }
10841#endif
10842
10843 return 0.0;
10844}
10845
10846static inline void
10847gc_prof_setup_new_record(rb_objspace_t *objspace, int reason)
10848{
10849 if (objspace->profile.run) {
10850 size_t index = objspace->profile.next_index;
10851 gc_profile_record *record;
10852
10853 /* create new record */
10854 objspace->profile.next_index++;
10855
10856 if (!objspace->profile.records) {
10858 objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
10859 }
10860 if (index >= objspace->profile.size) {
10861 void *ptr;
10862 objspace->profile.size += 1000;
10863 ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
10864 if (!ptr) rb_memerror();
10865 objspace->profile.records = ptr;
10866 }
10867 if (!objspace->profile.records) {
10868 rb_bug("gc_profile malloc or realloc miss");
10869 }
10870 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
10871 MEMZERO(record, gc_profile_record, 1);
10872
10873 /* setup before-GC parameter */
10874 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
10875#if MALLOC_ALLOCATED_SIZE
10876 record->allocated_size = malloc_allocated_size;
10877#endif
10878#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
10879#ifdef RUSAGE_SELF
10880 {
10881 struct rusage usage;
10882 if (getrusage(RUSAGE_SELF, &usage) == 0) {
10883 record->maxrss = usage.ru_maxrss;
10884 record->minflt = usage.ru_minflt;
10885 record->majflt = usage.ru_majflt;
10886 }
10887 }
10888#endif
10889#endif
10890 }
10891}
10892
10893static inline void
10894gc_prof_timer_start(rb_objspace_t *objspace)
10895{
10896 if (gc_prof_enabled(objspace)) {
10897 gc_profile_record *record = gc_prof_record(objspace);
10898#if GC_PROFILE_MORE_DETAIL
10899 record->prepare_time = objspace->profile.prepare_time;
10900#endif
10901 record->gc_time = 0;
10902 record->gc_invoke_time = getrusage_time();
10903 }
10904}
10905
10906static double
10907elapsed_time_from(double time)
10908{
10909 double now = getrusage_time();
10910 if (now > time) {
10911 return now - time;
10912 }
10913 else {
10914 return 0;
10915 }
10916}
10917
10918static inline void
10919gc_prof_timer_stop(rb_objspace_t *objspace)
10920{
10921 if (gc_prof_enabled(objspace)) {
10922 gc_profile_record *record = gc_prof_record(objspace);
10923 record->gc_time = elapsed_time_from(record->gc_invoke_time);
10924 record->gc_invoke_time -= objspace->profile.invoke_time;
10925 }
10926}
10927
10928#define RUBY_DTRACE_GC_HOOK(name) \
10929 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
10930static inline void
10931gc_prof_mark_timer_start(rb_objspace_t *objspace)
10932{
10933 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
10934#if GC_PROFILE_MORE_DETAIL
10935 if (gc_prof_enabled(objspace)) {
10936 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
10937 }
10938#endif
10939}
10940
10941static inline void
10942gc_prof_mark_timer_stop(rb_objspace_t *objspace)
10943{
10944 RUBY_DTRACE_GC_HOOK(MARK_END);
10945#if GC_PROFILE_MORE_DETAIL
10946 if (gc_prof_enabled(objspace)) {
10947 gc_profile_record *record = gc_prof_record(objspace);
10948 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
10949 }
10950#endif
10951}
10952
10953static inline void
10954gc_prof_sweep_timer_start(rb_objspace_t *objspace)
10955{
10956 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
10957 if (gc_prof_enabled(objspace)) {
10958 gc_profile_record *record = gc_prof_record(objspace);
10959
10960 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
10961 objspace->profile.gc_sweep_start_time = getrusage_time();
10962 }
10963 }
10964}
10965
10966static inline void
10967gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
10968{
10969 RUBY_DTRACE_GC_HOOK(SWEEP_END);
10970
10971 if (gc_prof_enabled(objspace)) {
10972 double sweep_time;
10973 gc_profile_record *record = gc_prof_record(objspace);
10974
10975 if (record->gc_time > 0) {
10976 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
10977 /* need to accumulate GC time for lazy sweep after gc() */
10978 record->gc_time += sweep_time;
10979 }
10980 else if (GC_PROFILE_MORE_DETAIL) {
10981 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
10982 }
10983
10984#if GC_PROFILE_MORE_DETAIL
10985 record->gc_sweep_time += sweep_time;
10987#endif
10989 }
10990}
10991
10992static inline void
10993gc_prof_set_malloc_info(rb_objspace_t *objspace)
10994{
10995#if GC_PROFILE_MORE_DETAIL
10996 if (gc_prof_enabled(objspace)) {
10997 gc_profile_record *record = gc_prof_record(objspace);
10998 record->allocate_increase = malloc_increase;
10999 record->allocate_limit = malloc_limit;
11000 }
11001#endif
11002}
11003
11004static inline void
11005gc_prof_set_heap_info(rb_objspace_t *objspace)
11006{
11007 if (gc_prof_enabled(objspace)) {
11008 gc_profile_record *record = gc_prof_record(objspace);
11009 size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
11010 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
11011
11012#if GC_PROFILE_MORE_DETAIL
11013 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
11014 record->heap_live_objects = live;
11015 record->heap_free_objects = total - live;
11016#endif
11017
11018 record->heap_total_objects = total;
11019 record->heap_use_size = live * sizeof(RVALUE);
11020 record->heap_total_size = total * sizeof(RVALUE);
11021 }
11022}
11023
11024/*
11025 * call-seq:
11026 * GC::Profiler.clear -> nil
11027 *
11028 * Clears the GC profiler data.
11029 *
11030 */
11031
11032static VALUE
11033gc_profile_clear(VALUE _)
11034{
11035 rb_objspace_t *objspace = &rb_objspace;
11036 void *p = objspace->profile.records;
11037 objspace->profile.records = NULL;
11038 objspace->profile.size = 0;
11039 objspace->profile.next_index = 0;
11040 objspace->profile.current_record = 0;
11041 if (p) {
11042 free(p);
11043 }
11044 return Qnil;
11045}
11046
11047/*
11048 * call-seq:
11049 * GC::Profiler.raw_data -> [Hash, ...]
11050 *
11051 * Returns an Array of individual raw profile data Hashes ordered
11052 * from earliest to latest by +:GC_INVOKE_TIME+.
11053 *
11054 * For example:
11055 *
11056 * [
11057 * {
11058 * :GC_TIME=>1.3000000000000858e-05,
11059 * :GC_INVOKE_TIME=>0.010634999999999999,
11060 * :HEAP_USE_SIZE=>289640,
11061 * :HEAP_TOTAL_SIZE=>588960,
11062 * :HEAP_TOTAL_OBJECTS=>14724,
11063 * :GC_IS_MARKED=>false
11064 * },
11065 * # ...
11066 * ]
11067 *
11068 * The keys mean:
11069 *
11070 * +:GC_TIME+::
11071 * Time elapsed in seconds for this GC run
11072 * +:GC_INVOKE_TIME+::
11073 * Time elapsed in seconds from startup to when the GC was invoked
11074 * +:HEAP_USE_SIZE+::
11075 * Total bytes of heap used
11076 * +:HEAP_TOTAL_SIZE+::
11077 * Total size of heap in bytes
11078 * +:HEAP_TOTAL_OBJECTS+::
11079 * Total number of objects
11080 * +:GC_IS_MARKED+::
11081 * Returns +true+ if the GC is in mark phase
11082 *
11083 * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
11084 * to the following hash keys:
11085 *
11086 * +:GC_MARK_TIME+::
11087 * +:GC_SWEEP_TIME+::
11088 * +:ALLOCATE_INCREASE+::
11089 * +:ALLOCATE_LIMIT+::
11090 * +:HEAP_USE_PAGES+::
11091 * +:HEAP_LIVE_OBJECTS+::
11092 * +:HEAP_FREE_OBJECTS+::
11093 * +:HAVE_FINALIZE+::
11094 *
11095 */
11096
11097static VALUE
11098gc_profile_record_get(VALUE _)
11099{
11100 VALUE prof;
11101 VALUE gc_profile = rb_ary_new();
11102 size_t i;
11103 rb_objspace_t *objspace = (&rb_objspace);
11104
11105 if (!objspace->profile.run) {
11106 return Qnil;
11107 }
11108
11109 for (i =0; i < objspace->profile.next_index; i++) {
11110 gc_profile_record *record = &objspace->profile.records[i];
11111
11112 prof = rb_hash_new();
11113 rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record->flags));
11114 rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
11115 rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
11116 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
11117 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
11118 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
11119 rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
11120#if GC_PROFILE_MORE_DETAIL
11121 rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
11122 rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
11123 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
11124 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
11125 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
11126 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
11127 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
11128
11129 rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
11130 rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
11131
11132 rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), (record->flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
11133#endif
11134
11135#if RGENGC_PROFILE > 0
11136 rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
11137 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
11138 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
11139#endif
11140 rb_ary_push(gc_profile, prof);
11141 }
11142
11143 return gc_profile;
11144}
11145
11146#if GC_PROFILE_MORE_DETAIL
11147#define MAJOR_REASON_MAX 0x10
11148
11149static char *
11150gc_profile_dump_major_reason(int flags, char *buff)
11151{
11152 int reason = flags & GPR_FLAG_MAJOR_MASK;
11153 int i = 0;
11154
11155 if (reason == GPR_FLAG_NONE) {
11156 buff[0] = '-';
11157 buff[1] = 0;
11158 }
11159 else {
11160#define C(x, s) \
11161 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
11162 buff[i++] = #x[0]; \
11163 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
11164 buff[i] = 0; \
11165 }
11166 C(NOFREE, N);
11167 C(OLDGEN, O);
11168 C(SHADY, S);
11169#if RGENGC_ESTIMATE_OLDMALLOC
11170 C(OLDMALLOC, M);
11171#endif
11172#undef C
11173 }
11174 return buff;
11175}
11176#endif
11177
11178static void
11179gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
11180{
11181 rb_objspace_t *objspace = &rb_objspace;
11182 size_t count = objspace->profile.next_index;
11183#ifdef MAJOR_REASON_MAX
11184 char reason_str[MAJOR_REASON_MAX];
11185#endif
11186
11187 if (objspace->profile.run && count /* > 1 */) {
11188 size_t i;
11189 const gc_profile_record *record;
11190
11191 append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
11192 append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
11193
11194 for (i = 0; i < count; i++) {
11195 record = &objspace->profile.records[i];
11196 append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
11197 i+1, record->gc_invoke_time, record->heap_use_size,
11198 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
11199 }
11200
11201#if GC_PROFILE_MORE_DETAIL
11202 append(out, rb_str_new_cstr("\n\n" \
11203 "More detail.\n" \
11204 "Prepare Time = Previously GC's rest sweep time\n"
11205 "Index Flags Allocate Inc. Allocate Limit"
11207 " Allocated Size"
11208#endif
11209 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
11211 " OldgenObj RemNormObj RemShadObj"
11212#endif
11214 " MaxRSS(KB) MinorFLT MajorFLT"
11215#endif
11216 "\n"));
11217
11218 for (i = 0; i < count; i++) {
11219 record = &objspace->profile.records[i];
11220 append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
11222 " %15"PRIuSIZE
11223#endif
11224 " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
11226 "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
11227#endif
11229 "%11ld %8ld %8ld"
11230#endif
11231
11232 "\n",
11233 i+1,
11234 gc_profile_dump_major_reason(record->flags, reason_str),
11235 (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
11236 (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
11237 (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
11238 (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
11239 (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
11240 (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
11241 record->allocate_increase, record->allocate_limit,
11243 record->allocated_size,
11244#endif
11245 record->heap_use_pages,
11246 record->gc_mark_time*1000,
11247 record->gc_sweep_time*1000,
11248 record->prepare_time*1000,
11249
11250 record->heap_live_objects,
11251 record->heap_free_objects,
11252 record->removing_objects,
11253 record->empty_objects
11255 ,
11256 record->old_objects,
11257 record->remembered_normal_objects,
11258 record->remembered_shady_objects
11259#endif
11261 ,
11262 record->maxrss / 1024,
11263 record->minflt,
11264 record->majflt
11265#endif
11266
11267 ));
11268 }
11269#endif
11270 }
11271}
11272
11273/*
11274 * call-seq:
11275 * GC::Profiler.result -> String
11276 *
11277 * Returns a profile data report such as:
11278 *
11279 * GC 1 invokes.
11280 * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
11281 * 1 0.012 159240 212940 10647 0.00000000000001530000
11282 */
11283
11284static VALUE
11285gc_profile_result(VALUE _)
11286{
11288 gc_profile_dump_on(str, rb_str_buf_append);
11289 return str;
11290}
11291
11292/*
11293 * call-seq:
11294 * GC::Profiler.report
11295 * GC::Profiler.report(io)
11296 *
11297 * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
11298 *
11299 */
11300
11301static VALUE
11302gc_profile_report(int argc, VALUE *argv, VALUE self)
11303{
11304 VALUE out;
11305
11306 out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
11307 gc_profile_dump_on(out, rb_io_write);
11308
11309 return Qnil;
11310}
11311
11312/*
11313 * call-seq:
11314 * GC::Profiler.total_time -> float
11315 *
11316 * The total time used for garbage collection in seconds
11317 */
11318
11319static VALUE
11320gc_profile_total_time(VALUE self)
11321{
11322 double time = 0;
11323 rb_objspace_t *objspace = &rb_objspace;
11324
11325 if (objspace->profile.run && objspace->profile.next_index > 0) {
11326 size_t i;
11327 size_t count = objspace->profile.next_index;
11328
11329 for (i = 0; i < count; i++) {
11330 time += objspace->profile.records[i].gc_time;
11331 }
11332 }
11333 return DBL2NUM(time);
11334}
11335
11336/*
11337 * call-seq:
11338 * GC::Profiler.enabled? -> true or false
11339 *
11340 * The current status of GC profile mode.
11341 */
11342
11343static VALUE
11344gc_profile_enable_get(VALUE self)
11345{
11346 rb_objspace_t *objspace = &rb_objspace;
11347 return objspace->profile.run ? Qtrue : Qfalse;
11348}
11349
11350/*
11351 * call-seq:
11352 * GC::Profiler.enable -> nil
11353 *
11354 * Starts the GC profiler.
11355 *
11356 */
11357
11358static VALUE
11359gc_profile_enable(VALUE _)
11360{
11361 rb_objspace_t *objspace = &rb_objspace;
11362 objspace->profile.run = TRUE;
11363 objspace->profile.current_record = 0;
11364 return Qnil;
11365}
11366
11367/*
11368 * call-seq:
11369 * GC::Profiler.disable -> nil
11370 *
11371 * Stops the GC profiler.
11372 *
11373 */
11374
11375static VALUE
11376gc_profile_disable(VALUE _)
11377{
11378 rb_objspace_t *objspace = &rb_objspace;
11379
11380 objspace->profile.run = FALSE;
11381 objspace->profile.current_record = 0;
11382 return Qnil;
11383}
11384
11385/*
11386 ------------------------------ DEBUG ------------------------------
11387*/
11388
11389static const char *
11390type_name(int type, VALUE obj)
11391{
11392 switch (type) {
11393#define TYPE_NAME(t) case (t): return #t;
11419 case T_DATA:
11422 }
11423 return "T_DATA";
11424#undef TYPE_NAME
11425 }
11426 return "unknown";
11427}
11428
11429static const char *
11430obj_type_name(VALUE obj)
11431{
11432 return type_name(TYPE(obj), obj);
11433}
11434
11435const char *
11437{
11438 switch (type) {
11439 case VM_METHOD_TYPE_ISEQ: return "iseq";
11440 case VM_METHOD_TYPE_ATTRSET: return "attrest";
11441 case VM_METHOD_TYPE_IVAR: return "ivar";
11442 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
11443 case VM_METHOD_TYPE_ALIAS: return "alias";
11444 case VM_METHOD_TYPE_REFINED: return "refined";
11445 case VM_METHOD_TYPE_CFUNC: return "cfunc";
11446 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
11447 case VM_METHOD_TYPE_MISSING: return "missing";
11448 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
11449 case VM_METHOD_TYPE_UNDEF: return "undef";
11450 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
11451 }
11452 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
11453}
11454
11455/* from array.c */
11456# define ARY_SHARED_P(ary) \
11457 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11458 FL_TEST((ary),ELTS_SHARED)!=0)
11459# define ARY_EMBED_P(ary) \
11460 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11461 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
11462
11463static void
11464rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
11465{
11466 if (buff_size > 0 && iseq->body && iseq->body->location.label && !RB_TYPE_P(iseq->body->location.pathobj, T_MOVED)) {
11469 snprintf(buff, buff_size, " %s@%s:%d",
11472 n ? FIX2INT(n) : 0 );
11473 }
11474}
11475
11476const char *
11477rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
11478{
11479 int pos = 0;
11480
11481#define BUFF_ARGS buff + pos, buff_size - pos
11482#define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
11483 if (SPECIAL_CONST_P(obj)) {
11484 APPENDF((BUFF_ARGS, "%s", obj_type_name(obj)));
11485
11486 if (FIXNUM_P(obj)) {
11487 APPENDF((BUFF_ARGS, " %ld", FIX2LONG(obj)));
11488 }
11489 else if (SYMBOL_P(obj)) {
11490 APPENDF((BUFF_ARGS, " %s", rb_id2name(SYM2ID(obj))));
11491 }
11492 }
11493 else {
11494#define TF(c) ((c) != 0 ? "true" : "false")
11495#define C(c, s) ((c) != 0 ? (s) : " ")
11496 const int type = BUILTIN_TYPE(obj);
11497#if USE_RGENGC
11498 const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
11499
11500 if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
11501 APPENDF((BUFF_ARGS, "%p [%d%s%s%s%s%s] %s ",
11502 (void *)obj, age,
11504 C(RVALUE_MARK_BITMAP(obj), "M"),
11505 C(RVALUE_PIN_BITMAP(obj), "P"),
11508 obj_type_name(obj)));
11509 }
11510 else {
11511 /* fake */
11512 APPENDF((BUFF_ARGS, "%p [%dXXXX] %s",
11513 (void *)obj, age,
11514 obj_type_name(obj)));
11515 }
11516#else
11517 APPENDF((BUFF_ARGS, "%p [%s] %s",
11518 (void *)obj,
11519 C(RVALUE_MARK_BITMAP(obj), "M"),
11520 obj_type_name(obj)));
11521#endif
11522
11523 if (internal_object_p(obj)) {
11524 /* ignore */
11525 }
11526 else if (RBASIC(obj)->klass == 0) {
11527 APPENDF((BUFF_ARGS, "(temporary internal)"));
11528 }
11529 else {
11530 if (RTEST(RBASIC(obj)->klass)) {
11531 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
11532 if (!NIL_P(class_path)) {
11533 APPENDF((BUFF_ARGS, "(%s)", RSTRING_PTR(class_path)));
11534 }
11535 }
11536 }
11537
11538#if GC_DEBUG
11539 APPENDF((BUFF_ARGS, "@%s:%d", RANY(obj)->file, RANY(obj)->line));
11540#endif
11541
11542 switch (type) {
11543 case T_NODE:
11545 break;
11546 case T_ARRAY:
11547 if (FL_TEST(obj, ELTS_SHARED)) {
11548 APPENDF((BUFF_ARGS, "shared -> %s",
11549 rb_obj_info(RARRAY(obj)->as.heap.aux.shared_root)));
11550 }
11551 else if (FL_TEST(obj, RARRAY_EMBED_FLAG)) {
11552 APPENDF((BUFF_ARGS, "[%s%s] len: %d (embed)",
11553 C(ARY_EMBED_P(obj), "E"),
11554 C(ARY_SHARED_P(obj), "S"),
11555 (int)RARRAY_LEN(obj)));
11556 }
11557 else {
11558 APPENDF((BUFF_ARGS, "[%s%s%s] len: %d, capa:%d ptr:%p",
11559 C(ARY_EMBED_P(obj), "E"),
11560 C(ARY_SHARED_P(obj), "S"),
11561 C(RARRAY_TRANSIENT_P(obj), "T"),
11562 (int)RARRAY_LEN(obj),
11563 ARY_EMBED_P(obj) ? -1 : (int)RARRAY(obj)->as.heap.aux.capa,
11565 }
11566 break;
11567 case T_STRING: {
11568 APPENDF((BUFF_ARGS, "%s", RSTRING_PTR(obj)));
11569 break;
11570 }
11571 case T_MOVED: {
11572 APPENDF((BUFF_ARGS, "-> %p", (void*)rb_gc_location(obj)));
11573 break;
11574 }
11575 case T_HASH: {
11576 APPENDF((BUFF_ARGS, "[%c%c] %d",
11577 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
11578 RHASH_TRANSIENT_P(obj) ? 'T' : ' ',
11579 (int)RHASH_SIZE(obj)));
11580 break;
11581 }
11582 case T_CLASS:
11583 case T_MODULE:
11584 {
11585 VALUE class_path = rb_class_path_cached(obj);
11586 if (!NIL_P(class_path)) {
11587 APPENDF((BUFF_ARGS, "%s", RSTRING_PTR(class_path)));
11588 }
11589 break;
11590 }
11591 case T_ICLASS:
11592 {
11594 if (!NIL_P(class_path)) {
11595 APPENDF((BUFF_ARGS, "src:%s", RSTRING_PTR(class_path)));
11596 }
11597 break;
11598 }
11599 case T_OBJECT:
11600 {
11602
11603 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
11604 APPENDF((BUFF_ARGS, "(embed) len:%d", len));
11605 }
11606 else {
11608 APPENDF((BUFF_ARGS, "len:%d ptr:%p", len, (void *)ptr));
11609 }
11610 }
11611 break;
11612 case T_DATA: {
11613 const struct rb_block *block;
11614 const rb_iseq_t *iseq;
11615 if (rb_obj_is_proc(obj) &&
11616 (block = vm_proc_block(obj)) != NULL &&
11617 (vm_block_type(block) == block_type_iseq) &&
11618 (iseq = vm_block_iseq(block)) != NULL) {
11619 rb_raw_iseq_info(BUFF_ARGS, iseq);
11620 }
11621 else {
11622 const char * const type_name = rb_objspace_data_type_name(obj);
11623 if (type_name) {
11624 APPENDF((BUFF_ARGS, "%s", type_name));
11625 }
11626 }
11627 break;
11628 }
11629 case T_IMEMO: {
11630 const char *imemo_name = "\0";
11631 switch (imemo_type(obj)) {
11632#define IMEMO_NAME(x) case imemo_##x: imemo_name = #x; break;
11633 IMEMO_NAME(env);
11634 IMEMO_NAME(cref);
11635 IMEMO_NAME(svar);
11636 IMEMO_NAME(throw_data);
11637 IMEMO_NAME(ifunc);
11638 IMEMO_NAME(memo);
11639 IMEMO_NAME(ment);
11641 IMEMO_NAME(tmpbuf);
11642 IMEMO_NAME(ast);
11643 IMEMO_NAME(parser_strterm);
11644#undef IMEMO_NAME
11645 default: UNREACHABLE;
11646 }
11647 APPENDF((BUFF_ARGS, "/%s", imemo_name));
11648
11649 switch (imemo_type(obj)) {
11650 case imemo_ment: {
11651 const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
11652 if (me->def) {
11653 APPENDF((BUFF_ARGS, "(called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)",
11656 me->def->alias_count,
11657 obj_info(me->owner),
11658 obj_info(me->defined_class)));
11659 }
11660 else {
11662 }
11663 break;
11664 }
11665 case imemo_iseq: {
11666 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
11667 rb_raw_iseq_info(BUFF_ARGS, iseq);
11668 break;
11669 }
11670 default:
11671 break;
11672 }
11673 }
11674 default:
11675 break;
11676 }
11677#undef TF
11678#undef C
11679 }
11680 end:
11681 return buff;
11682#undef APPENDF
11683#undef BUFF_ARGS
11684}
11685
11686#if RGENGC_OBJ_INFO
11687#define OBJ_INFO_BUFFERS_NUM 10
11688#define OBJ_INFO_BUFFERS_SIZE 0x100
11689static int obj_info_buffers_index = 0;
11690static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
11691
11692static const char *
11693obj_info(VALUE obj)
11694{
11695 const int index = obj_info_buffers_index++;
11696 char *const buff = &obj_info_buffers[index][0];
11697
11698 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
11699 obj_info_buffers_index = 0;
11700 }
11701
11702 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
11703}
11704#else
11705static const char *
11706obj_info(VALUE obj)
11707{
11708 return obj_type_name(obj);
11709}
11710#endif
11711
11712MJIT_FUNC_EXPORTED const char *
11714{
11715 return obj_info(obj);
11716}
11717
11718void
11720{
11721 char buff[0x100];
11722 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
11723}
11724
11725void
11726rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
11727{
11728 char buff[0x100];
11729 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
11730}
11731
11732#if GC_DEBUG
11733
11734void
11736{
11737 rb_objspace_t *objspace = &rb_objspace;
11738
11739 fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
11740
11741 if (BUILTIN_TYPE(obj) == T_MOVED) {
11742 fprintf(stderr, "moved?: true\n");
11743 }
11744 else {
11745 fprintf(stderr, "moved?: false\n");
11746 }
11747 if (is_pointer_to_heap(objspace, (void *)obj)) {
11748 fprintf(stderr, "pointer to heap?: true\n");
11749 }
11750 else {
11751 fprintf(stderr, "pointer to heap?: false\n");
11752 return;
11753 }
11754
11755 fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
11756 fprintf(stderr, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ? "true" : "false");
11757#if USE_RGENGC
11758 fprintf(stderr, "age? : %d\n", RVALUE_AGE(obj));
11759 fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
11760 fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
11761 fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false");
11762#endif
11763
11765 fprintf(stderr, "lazy sweeping?: true\n");
11766 fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet");
11767 }
11768 else {
11769 fprintf(stderr, "lazy sweeping?: false\n");
11770 }
11771}
11772
11773static VALUE
11774gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj, name))
11775{
11776 fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
11777 return Qnil;
11778}
11779
11780void
11781rb_gcdebug_sentinel(VALUE obj, const char *name)
11782{
11783 rb_define_finalizer(obj, rb_proc_new(gcdebug_sentinel, (VALUE)name));
11784}
11785
11786#endif /* GC_DEBUG */
11787
11788#if GC_DEBUG_STRESS_TO_CLASS
11789/*
11790 * call-seq:
11791 * GC.add_stress_to_class(class[, ...])
11792 *
11793 * Raises NoMemoryError when allocating an instance of the given classes.
11794 *
11795 */
11796static VALUE
11797rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
11798{
11799 rb_objspace_t *objspace = &rb_objspace;
11800
11801 if (!stress_to_class) {
11803 }
11805 return self;
11806}
11807
11808/*
11809 * call-seq:
11810 * GC.remove_stress_to_class(class[, ...])
11811 *
11812 * No longer raises NoMemoryError when allocating an instance of the
11813 * given classes.
11814 *
11815 */
11816static VALUE
11817rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
11818{
11819 rb_objspace_t *objspace = &rb_objspace;
11820 int i;
11821
11822 if (stress_to_class) {
11823 for (i = 0; i < argc; ++i) {
11825 }
11826 if (RARRAY_LEN(stress_to_class) == 0) {
11827 stress_to_class = 0;
11828 }
11829 }
11830 return Qnil;
11831}
11832#endif
11833
11834/*
11835 * Document-module: ObjectSpace
11836 *
11837 * The ObjectSpace module contains a number of routines
11838 * that interact with the garbage collection facility and allow you to
11839 * traverse all living objects with an iterator.
11840 *
11841 * ObjectSpace also provides support for object finalizers, procs that will be
11842 * called when a specific object is about to be destroyed by garbage
11843 * collection.
11844 *
11845 * require 'objspace'
11846 *
11847 * a = "A"
11848 * b = "B"
11849 *
11850 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
11851 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
11852 *
11853 * _produces:_
11854 *
11855 * Finalizer two on 537763470
11856 * Finalizer one on 537763480
11857 */
11858
11859/*
11860 * Document-class: ObjectSpace::WeakMap
11861 *
11862 * An ObjectSpace::WeakMap object holds references to
11863 * any objects, but those objects can get garbage collected.
11864 *
11865 * This class is mostly used internally by WeakRef, please use
11866 * +lib/weakref.rb+ for the public interface.
11867 */
11868
11869/* Document-class: GC::Profiler
11870 *
11871 * The GC profiler provides access to information on GC runs including time,
11872 * length and object space size.
11873 *
11874 * Example:
11875 *
11876 * GC::Profiler.enable
11877 *
11878 * require 'rdoc/rdoc'
11879 *
11880 * GC::Profiler.report
11881 *
11882 * GC::Profiler.disable
11883 *
11884 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
11885 */
11886
11887#include "gc.rbinc"
11888
11889void
11891{
11892#undef rb_intern
11893 VALUE rb_mObjSpace;
11894 VALUE rb_mProfiler;
11895 VALUE gc_constants;
11896
11897 rb_mGC = rb_define_module("GC");
11898 load_gc();
11899
11900 gc_constants = rb_hash_new();
11901 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
11902 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
11903 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
11904 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_PLANES")), SIZET2NUM(HEAP_PAGE_BITMAP_PLANES));
11905 OBJ_FREEZE(gc_constants);
11906 /* internal constants */
11907 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
11908
11909 rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
11910 rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
11911 rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
11912 rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
11913 rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
11914 rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
11915 rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
11916 rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
11917 rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
11918
11919 rb_mObjSpace = rb_define_module("ObjectSpace");
11920
11921 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
11922
11923 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
11924 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
11925
11926 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
11927
11929
11931 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
11932
11933 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
11934
11935 {
11936 VALUE rb_cWeakMap = rb_define_class_under(rb_mObjSpace, "WeakMap", rb_cObject);
11937 rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
11938 rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
11939 rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
11940 rb_define_method(rb_cWeakMap, "include?", wmap_has_key, 1);
11941 rb_define_method(rb_cWeakMap, "member?", wmap_has_key, 1);
11942 rb_define_method(rb_cWeakMap, "key?", wmap_has_key, 1);
11943 rb_define_method(rb_cWeakMap, "inspect", wmap_inspect, 0);
11944 rb_define_method(rb_cWeakMap, "each", wmap_each, 0);
11945 rb_define_method(rb_cWeakMap, "each_pair", wmap_each, 0);
11946 rb_define_method(rb_cWeakMap, "each_key", wmap_each_key, 0);
11947 rb_define_method(rb_cWeakMap, "each_value", wmap_each_value, 0);
11948 rb_define_method(rb_cWeakMap, "keys", wmap_keys, 0);
11949 rb_define_method(rb_cWeakMap, "values", wmap_values, 0);
11950 rb_define_method(rb_cWeakMap, "size", wmap_size, 0);
11951 rb_define_method(rb_cWeakMap, "length", wmap_size, 0);
11952 rb_include_module(rb_cWeakMap, rb_mEnumerable);
11953 }
11954
11955 /* internal methods */
11956 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
11957 rb_define_singleton_method(rb_mGC, "verify_compaction_references", gc_verify_compaction_references, -1);
11958 rb_define_singleton_method(rb_mGC, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency, 0);
11959#if MALLOC_ALLOCATED_SIZE
11960 rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
11961 rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
11962#endif
11963
11964#if GC_DEBUG_STRESS_TO_CLASS
11965 rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
11966 rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
11967#endif
11968
11969 {
11970 VALUE opts;
11971 /* GC build options */
11972 rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
11973#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
11974 OPT(GC_DEBUG);
11975 OPT(USE_RGENGC);
11986#undef OPT
11987 OBJ_FREEZE(opts);
11988 }
11989}
11990
11991#ifdef ruby_xmalloc
11992#undef ruby_xmalloc
11993#endif
11994#ifdef ruby_xmalloc2
11995#undef ruby_xmalloc2
11996#endif
11997#ifdef ruby_xcalloc
11998#undef ruby_xcalloc
11999#endif
12000#ifdef ruby_xrealloc
12001#undef ruby_xrealloc
12002#endif
12003#ifdef ruby_xrealloc2
12004#undef ruby_xrealloc2
12005#endif
12006
12007void *
12009{
12010#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12011 ruby_malloc_info_file = __FILE__;
12012 ruby_malloc_info_line = __LINE__;
12013#endif
12014 return ruby_xmalloc_body(size);
12015}
12016
12017void *
12018ruby_xmalloc2(size_t n, size_t size)
12019{
12020#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12021 ruby_malloc_info_file = __FILE__;
12022 ruby_malloc_info_line = __LINE__;
12023#endif
12024 return ruby_xmalloc2_body(n, size);
12025}
12026
12027void *
12028ruby_xcalloc(size_t n, size_t size)
12029{
12030#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12031 ruby_malloc_info_file = __FILE__;
12032 ruby_malloc_info_line = __LINE__;
12033#endif
12034 return ruby_xcalloc_body(n, size);
12035}
12036
12037void *
12038ruby_xrealloc(void *ptr, size_t new_size)
12039{
12040#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12041 ruby_malloc_info_file = __FILE__;
12042 ruby_malloc_info_line = __LINE__;
12043#endif
12044 return ruby_xrealloc_body(ptr, new_size);
12045}
12046
12047void *
12048ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
12049{
12050#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12051 ruby_malloc_info_file = __FILE__;
12052 ruby_malloc_info_line = __LINE__;
12053#endif
12054 return ruby_xrealloc2_body(ptr, n, new_size);
12055}
#define CHECK(sub)
Definition: compile.c:448
#define __asm__
Definition: Context.c:14
#define sub(x, y)
Definition: date_strftime.c:24
#define add(x, y)
Definition: date_strftime.c:23
#define mod(x, y)
Definition: date_strftime.c:28
struct RIMemo * ptr
Definition: debug.c:65
union @73::@75 imemo
enum imemo_type types
Definition: debug.c:63
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Definition: vm_trace.c:1614
#define free(x)
Definition: dln.c:52
struct rb_encoding_entry * list
Definition: encoding.c:56
int count
Definition: encoding.c:57
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
#define O(member)
#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing)
Definition: gc.c:1078
VALUE rb_wb_protected_newobj_of(VALUE klass, VALUE flags)
Definition: gc.c:2294
#define RGENGC_PROFILE
Definition: gc.c:421
#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR
Definition: gc.c:271
#define GC_OLDMALLOC_LIMIT_MAX
Definition: gc.c:301
int rb_objspace_internal_object_p(VALUE obj)
Definition: gc.c:3110
#define I(s)
VALUE * ruby_initial_gc_stress_ptr
Definition: gc.c:905
#define GC_MALLOC_LIMIT_MAX
Definition: gc.c:288
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Definition: gc.c:2391
volatile VALUE * rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
Definition: gc.c:250
void ruby_xfree(void *x)
Definition: gc.c:10183
#define STACK_END
Definition: gc.c:4617
#define popcount_bits
Definition: gc.c:623
#define CEILDIV(i, mod)
Definition: gc.c:833
#define APPENDF(f)
#define OBJ_ID_INITIAL
Definition: gc.c:2897
void rb_memerror(void)
Definition: gc.c:9611
#define GC_HEAP_FREE_SLOTS_MIN_RATIO
Definition: gc.c:275
#define MALLOC_ALLOCATED_SIZE
Definition: gc.c:477
#define GC_ENABLE_INCREMENTAL_MARK
Definition: gc.c:464
#define STACK_START
Definition: gc.c:4616
#define heap_eden
Definition: gc.c:919
#define ARY_SHARED_P(ary)
Definition: gc.c:11456
#define obj_id_to_ref(objid)
Definition: gc.c:975
#define stress_to_class
Definition: gc.c:931
#define NUM2PTR(x)
#define CALC_EXACT_MALLOC_SIZE
Definition: gc.c:470
gc_stat_sym
Definition: gc.c:8853
@ gc_stat_sym_oldmalloc_increase_bytes
Definition: gc.c:8880
@ gc_stat_sym_total_freed_objects
Definition: gc.c:8868
@ gc_stat_sym_old_objects
Definition: gc.c:8877
@ gc_stat_sym_total_allocated_objects
Definition: gc.c:8867
@ gc_stat_sym_compact_count
Definition: gc.c:8874
@ gc_stat_sym_heap_allocatable_pages
Definition: gc.c:8857
@ gc_stat_sym_old_objects_limit
Definition: gc.c:8878
@ gc_stat_sym_heap_live_slots
Definition: gc.c:8859
@ gc_stat_sym_heap_free_slots
Definition: gc.c:8860
@ gc_stat_sym_heap_marked_slots
Definition: gc.c:8862
@ gc_stat_sym_total_allocated_pages
Definition: gc.c:8865
@ gc_stat_sym_oldmalloc_increase_bytes_limit
Definition: gc.c:8881
@ gc_stat_sym_count
Definition: gc.c:8854
@ gc_stat_sym_heap_available_slots
Definition: gc.c:8858
@ gc_stat_sym_remembered_wb_unprotected_objects_limit
Definition: gc.c:8876
@ gc_stat_sym_last
Definition: gc.c:8892
@ gc_stat_sym_malloc_increase_bytes_limit
Definition: gc.c:8870
@ gc_stat_sym_heap_final_slots
Definition: gc.c:8861
@ gc_stat_sym_total_freed_pages
Definition: gc.c:8866
@ gc_stat_sym_heap_sorted_length
Definition: gc.c:8856
@ gc_stat_sym_heap_tomb_pages
Definition: gc.c:8864
@ gc_stat_sym_heap_allocated_pages
Definition: gc.c:8855
@ gc_stat_sym_heap_eden_pages
Definition: gc.c:8863
@ gc_stat_sym_remembered_wb_unprotected_objects
Definition: gc.c:8875
@ gc_stat_sym_minor_gc_count
Definition: gc.c:8872
@ gc_stat_sym_malloc_increase_bytes
Definition: gc.c:8869
@ gc_stat_sym_major_gc_count
Definition: gc.c:8873
#define RGENGC_ESTIMATE_OLDMALLOC
Definition: gc.c:431
@ REQUIRED_SIZE_BY_MALLOC
Definition: gc.c:837
@ HEAP_PAGE_BITMAP_LIMIT
Definition: gc.c:840
@ HEAP_PAGE_OBJ_LIMIT
Definition: gc.c:839
@ HEAP_PAGE_BITMAP_SIZE
Definition: gc.c:841
@ HEAP_PAGE_BITMAP_PLANES
Definition: gc.c:842
@ HEAP_PAGE_ALIGN_MASK
Definition: gc.c:836
@ HEAP_PAGE_SIZE
Definition: gc.c:838
@ HEAP_PAGE_ALIGN
Definition: gc.c:835
#define RGENGC_CHECK_MODE
Definition: gc.c:399
#define GC_HEAP_GROWTH_MAX_SLOTS
Definition: gc.c:268
int rb_objspace_garbage_object_p(VALUE obj)
Definition: gc.c:3620
#define GC_PROFILE_DETAIL_MEMORY
Definition: gc.c:461
#define ARY_EMBED_P(ary)
Definition: gc.c:11459
#define RVALUE_PIN_BITMAP(obj)
Definition: gc.c:1224
VALUE rb_gc_location(VALUE value)
Definition: gc.c:8127
#define heap_pages_final_slots
Definition: gc.c:917
struct stack_chunk stack_chunk_t
#define GC_MALLOC_LIMIT_MIN
Definition: gc.c:285
void ruby_sized_xfree(void *x, size_t size)
Definition: gc.c:10175
VALUE rb_newobj(void)
Definition: gc.c:2303
VALUE rb_gc_disable(void)
Definition: gc.c:9262
size_t rb_objspace_data_type_memsize(VALUE obj)
Definition: gc.c:2432
#define is_marking(objspace)
Definition: gc.c:953
#define gc_mode(objspace)
Definition: gc.c:950
#define gc_prof_enabled(objspace)
Definition: gc.c:1087
void Init_heap(void)
Definition: gc.c:2924
NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace))
VALUE rb_gc_start(void)
Definition: gc.c:8688
void * rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
Definition: gc.c:10260
#define UNEXPECTED_NODE(func)
Definition: gc.c:2314
void * rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z)
Definition: gc.c:10196
void rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
Definition: gc.c:6954
void rb_mark_tbl_no_pin(st_table *tbl)
Definition: gc.c:5027
void * ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
Definition: gc.c:10140
#define heap_pages_freeable_pages
Definition: gc.c:916
void ruby_mimfree(void *ptr)
Definition: gc.c:10250
rb_imemo_tmpbuf_t * rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
Definition: gc.c:2341
#define heap_pages_deferred_final
Definition: gc.c:918
MJIT_FUNC_EXPORTED int rb_ec_stack_check(rb_execution_context_t *ec)
Definition: gc.c:4681
#define gc_report
Definition: gc.c:1093
VALUE rb_gc_enable(void)
Definition: gc.c:9225
void * ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
Definition: gc.c:12048
void rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
Definition: gc.c:11726
const char * rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
Definition: gc.c:11477
VALUE rb_undefine_finalizer(VALUE obj)
Definition: gc.c:3206
#define ruby_gc_stress_mode
Definition: gc.c:927
#define finalizing
Definition: gc.c:923
#define global_list
Definition: gc.c:925
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
Definition: gc.c:4997
rb_symbols_t ruby_global_symbols
Definition: symbol.c:66
#define COUNT_TYPE(t)
int rb_during_gc(void)
Definition: gc.c:8703
#define MALLOC_ALLOCATED_SIZE_CHECK
Definition: gc.c:480
struct mark_stack mark_stack_t
void rb_mark_set(st_table *tbl)
Definition: gc.c:4814
#define rb_data_object_alloc
Definition: gc.c:14
void * ruby_xcalloc(size_t n, size_t size)
Definition: gc.c:12028
void * ruby_xmalloc2_body(size_t n, size_t size)
Definition: gc.c:10115
#define CLEAR_IN_BITMAP(bits, p)
Definition: gc.c:888
#define HEAP_PAGE_ALIGN_LOG
Definition: gc.c:832
#define GET_HEAP_MARKING_BITS(x)
Definition: gc.c:896
void rb_mark_hash(st_table *tbl)
Definition: gc.c:4878
#define heap_pages_lomem
Definition: gc.c:913
VALUE rb_obj_id(VALUE obj)
Definition: gc.c:3786
void rb_gc_mark_movable(VALUE ptr)
Definition: gc.c:5222
#define GET_HEAP_WB_UNPROTECTED_BITS(x)
Definition: gc.c:895
void rb_gc_mark_maybe(VALUE obj)
Definition: gc.c:5060
#define nomem_error
Definition: gc.c:995
#define global_symbols
Definition: gc.c:8423
#define GC_ENABLE_LAZY_SWEEP
Definition: gc.c:467
NORETURN(static void negative_size_allocation_error(const char *))
#define OBJ_ID_INCREMENT
Definition: gc.c:2896
#define MARK_IN_BITMAP(bits, p)
Definition: gc.c:887
#define BUFF_ARGS
#define GC_PROFILE_RECORD_DEFAULT_SIZE
Definition: gc.c:10789
#define RVALUE_PAGE_MARKING(page, obj)
Definition: gc.c:1234
int rb_objspace_marked_object_p(VALUE obj)
Definition: gc.c:5238
void * rb_xmalloc_mul_add(size_t x, size_t y, size_t z)
Definition: gc.c:10189
VALUE rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
Definition: gc.c:6976
#define GET_PAGE_BODY(x)
Definition: gc.c:876
void * rb_aligned_malloc(size_t alignment, size_t size)
Definition: gc.c:9643
void rb_mark_tbl(st_table *tbl)
Definition: gc.c:5021
#define SET_STACK_END
Definition: gc.c:4614
MJIT_FUNC_EXPORTED void rb_gc_writebarrier_remember(VALUE obj)
Definition: gc.c:6891
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
Definition: gc.c:2412
#define NUM_IN_PAGE(p)
Definition: gc.c:880
#define malloc_allocated_size
Definition: gc.c:909
uintptr_t bits_t
Definition: gc.c:618
VALUE rb_mGC
Definition: gc.c:1000
#define TRY_WITH_GC(alloc)
Definition: gc.c:9854
#define STACK_LENGTH
Definition: gc.c:4630
void * ruby_mimmalloc(size_t size)
Definition: gc.c:10220
#define GC_HEAP_INIT_SLOTS
Definition: gc.c:259
#define heap_pages_sorted_length
Definition: gc.c:912
#define RVALUE_WB_UNPROTECTED_BITMAP(obj)
Definition: gc.c:1228
size_t rb_obj_memsize_of(VALUE obj)
Definition: gc.c:3950
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Definition: gc.c:4715
void rb_objspace_reachable_objects_from(VALUE obj, void(func)(VALUE, void *), void *data)
Definition: gc.c:9481
#define IMEMO_NAME(x)
const char * rb_method_type_name(rb_method_type_t type)
Definition: gc.c:11436
gc_stat_compat_sym
Definition: gc.c:8895
@ gc_stat_compat_sym_oldmalloc_limit
Definition: gc.c:8917
@ gc_stat_compat_sym_old_object
Definition: gc.c:8908
@ gc_stat_compat_sym_remembered_shady_object
Definition: gc.c:8906
@ gc_stat_compat_sym_heap_live_slot
Definition: gc.c:8901
@ gc_stat_compat_sym_heap_length
Definition: gc.c:8900
@ gc_stat_compat_sym_last
Definition: gc.c:8919
@ gc_stat_compat_sym_malloc_increase
Definition: gc.c:8913
@ gc_stat_compat_sym_total_freed_object
Definition: gc.c:8912
@ gc_stat_compat_sym_old_object_limit
Definition: gc.c:8909
@ gc_stat_compat_sym_heap_free_slot
Definition: gc.c:8902
@ gc_stat_compat_sym_heap_swept_slot
Definition: gc.c:8904
@ gc_stat_compat_sym_remembered_shady_object_limit
Definition: gc.c:8907
@ gc_stat_compat_sym_heap_increment
Definition: gc.c:8899
@ gc_stat_compat_sym_malloc_limit
Definition: gc.c:8914
@ gc_stat_compat_sym_heap_final_slot
Definition: gc.c:8903
@ gc_stat_compat_sym_oldmalloc_increase
Definition: gc.c:8916
@ gc_stat_compat_sym_heap_tomb_page_length
Definition: gc.c:8898
@ gc_stat_compat_sym_gc_stat_heap_used
Definition: gc.c:8896
@ gc_stat_compat_sym_total_allocated_object
Definition: gc.c:8911
@ gc_stat_compat_sym_heap_eden_page_length
Definition: gc.c:8897
#define STACKFRAME_FOR_CALL_CFUNC
Definition: gc.c:4678
VALUE rb_memory_id(VALUE obj)
Definition: gc.c:3753
void rb_gc(void)
Definition: gc.c:8695
PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char *,...)), 2, 3)
#define RGENGC_OLD_NEWOBJ_CHECK
Definition: gc.c:412
#define GC_ASSERT(expr)
Definition: gc.c:403
#define SET(name, attr)
#define TYPE_NAME(t)
struct gc_profile_record gc_profile_record
const char * rb_objspace_data_type_name(VALUE obj)
Definition: gc.c:2445
rb_objspace_t * rb_objspace_alloc(void)
Definition: gc.c:1600
#define gc_mode_set(objspace, mode)
Definition: gc.c:951
#define POP_MARK_FUNC_DATA()
Definition: gc.c:1102
#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
Definition: gc.c:298
void rb_gc_force_recycle(VALUE obj)
Definition: gc.c:7027
#define GET_STACK_BOUNDS(start, end, appendix)
Definition: gc.c:4963
void * rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w)
Definition: gc.c:10210
void ruby_gc_set_params(void)
Definition: gc.c:9434
void rb_objspace_set_event_hook(const rb_event_flag_t event)
Definition: gc.c:2095
#define malloc_limit
Definition: gc.c:907
#define BITMAP_INDEX(p)
Definition: gc.c:881
void rb_gcdebug_print_obj_condition(VALUE obj)
#define S(s)
int ruby_get_stack_grow_direction(volatile VALUE *addr)
Definition: gc.c:4636
void * ruby_xrealloc_body(void *ptr, size_t new_size)
Definition: gc.c:10150
void rb_gc_copy_finalizer(VALUE dest, VALUE obj)
Definition: gc.c:3310
size_t rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
Definition: gc.c:219
void rb_iseq_update_references(rb_iseq_t *iseq)
Definition: iseq.c:221
#define GET_HEAP_MARK_BITS(x)
Definition: gc.c:891
#define RANY(o)
Definition: gc.c:984
void rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
Definition: gc.c:3063
int rb_objspace_markable_object_p(VALUE obj)
Definition: gc.c:3613
void rb_gc_update_tbl_refs(st_table *ptr)
Definition: gc.c:7999
#define RGENGC_DEBUG
Definition: gc.c:380
void rb_gc_mark(VALUE ptr)
Definition: gc.c:5228
void rb_gc_verify_internal_consistency(void)
Definition: gc.c:6218
#define gc_event_hook_available_p(objspace)
Definition: gc.c:2114
void ruby_malloc_size_overflow(size_t count, size_t elsize)
Definition: gc.c:10107
#define GC_HEAP_FREE_SLOTS_GOAL_RATIO
Definition: gc.c:278
#define RVALUE_MARK_BITMAP(obj)
Definition: gc.c:1223
#define RZOMBIE(o)
Definition: gc.c:993
void rb_gc_mark_values(long n, const VALUE *values)
Definition: gc.c:4731
void * ruby_xmalloc(size_t size)
Definition: gc.c:12008
#define OLD_SYM(s)
void rb_obj_info_dump(VALUE obj)
Definition: gc.c:11719
#define STACK_CHUNK_SIZE
Definition: gc.c:640
#define gc_prof_record(objspace)
Definition: gc.c:1086
#define RVALUE_OLD_AGE
Definition: gc.c:1236
void Init_GC(void)
Definition: gc.c:11890
#define heap_tomb
Definition: gc.c:920
#define MARK_CHECKPOINT(category)
VALUE rb_newobj_of(VALUE klass, VALUE flags)
Definition: gc.c:2309
#define UPDATE_IF_MOVED(_objspace, _thing)
Definition: gc.c:1084
#define PUSH_MARK_FUNC_DATA(v)
Definition: gc.c:1098
#define is_incremental_marking(objspace)
Definition: gc.c:961
void rb_vm_update_references(void *ptr)
Definition: vm.c:2234
memop_type
Definition: gc.c:9700
@ MEMOP_TYPE_FREE
Definition: gc.c:9702
@ MEMOP_TYPE_MALLOC
Definition: gc.c:9701
@ MEMOP_TYPE_REALLOC
Definition: gc.c:9703
#define GC_MALLOC_LIMIT_GROWTH_FACTOR
Definition: gc.c:291
#define is_sweeping(objspace)
Definition: gc.c:954
int ruby_stack_grow_direction
Definition: gc.c:4634
ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n))
int ruby_disable_gc
Definition: gc.c:1001
#define heap_pages_himem
Definition: gc.c:914
#define RVALUE_AGE_SHIFT
Definition: gc.c:1237
VALUE rb_define_finalizer(VALUE obj, VALUE block)
Definition: gc.c:3302
RUBY_ALIAS_FUNCTION(rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree), rb_data_object_wrap,(klass, datap, dmark, dfree))
Definition: gc.c:2398
#define MARK_OBJECT_ARY_BUCKET_SIZE
Definition: gc.c:7075
#define has_sweeping_pages(heap)
Definition: gc.c:970
VALUE rb_objspace_gc_enable(rb_objspace_t *objspace)
Definition: gc.c:9232
NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr))
#define RGENGC_FORCE_MAJOR_GC
Definition: gc.c:438
void * ruby_xmalloc2(size_t n, size_t size)
Definition: gc.c:12018
gc_profile_record_flag
Definition: gc.c:491
@ GPR_FLAG_MAJOR_BY_FORCE
Definition: gc.c:497
@ GPR_FLAG_HAVE_FINALIZE
Definition: gc.c:512
@ GPR_DEFAULT_REASON
Definition: gc.c:516
@ GPR_FLAG_IMMEDIATE_SWEEP
Definition: gc.c:511
@ GPR_FLAG_MAJOR_BY_OLDMALLOC
Definition: gc.c:499
@ GPR_FLAG_MAJOR_MASK
Definition: gc.c:501
@ GPR_FLAG_NEWOBJ
Definition: gc.c:504
@ GPR_FLAG_MAJOR_BY_SHADY
Definition: gc.c:496
@ GPR_FLAG_NONE
Definition: gc.c:492
@ GPR_FLAG_FULL_MARK
Definition: gc.c:514
@ GPR_FLAG_MAJOR_BY_NOFREE
Definition: gc.c:494
@ GPR_FLAG_IMMEDIATE_MARK
Definition: gc.c:513
@ GPR_FLAG_MAJOR_BY_OLDGEN
Definition: gc.c:495
@ GPR_FLAG_CAPI
Definition: gc.c:507
@ GPR_FLAG_METHOD
Definition: gc.c:506
@ GPR_FLAG_STRESS
Definition: gc.c:508
@ GPR_FLAG_MALLOC
Definition: gc.c:505
struct rb_objspace rb_objspace_t
#define GC_OLDMALLOC_LIMIT_MIN
Definition: gc.c:295
#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj)
Definition: gc.c:1233
void * ruby_xrealloc(void *ptr, size_t new_size)
Definition: gc.c:12038
#define RVALUE_UNCOLLECTIBLE_BITMAP(obj)
Definition: gc.c:1229
#define rb_objspace
Definition: gc.c:900
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
Definition: gc.c:2321
void rb_objspace_each_objects(each_obj_callback *callback, void *data)
Definition: gc.c:3040
void * ruby_xmalloc_body(size_t size)
Definition: gc.c:10098
void * ruby_xcalloc_body(size_t n, size_t size)
Definition: gc.c:10131
void rb_objspace_reachable_objects_from_root(void(func)(const char *category, VALUE, void *), void *passing_data)
Definition: gc.c:9509
VALUE rb_gc_disable_no_rest(void)
Definition: gc.c:9247
#define nonspecial_obj_id(obj)
Definition: gc.c:974
void rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
Definition: gc.c:6925
VALUE rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags)
Definition: gc.c:2287
void rb_objspace_call_finalizer(rb_objspace_t *objspace)
Definition: gc.c:3456
gc_mode
Definition: gc.c:670
@ gc_mode_sweeping
Definition: gc.c:673
@ gc_mode_marking
Definition: gc.c:672
@ gc_mode_none
Definition: gc.c:671
#define gc_event_hook(objspace, event, data)
Definition: gc.c:2117
size_t rb_gc_stat(VALUE key)
Definition: gc.c:9190
#define GC_HEAP_GROWTH_FACTOR
Definition: gc.c:265
#define BITMAP_BIT(p)
Definition: gc.c:883
@ BITS_BITLENGTH
Definition: gc.c:621
@ BITS_SIZE
Definition: gc.c:620
void rb_iseq_mark(const rb_iseq_t *iseq)
Definition: iseq.c:287
void * ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
Definition: gc.c:10159
void rb_iseq_free(const rb_iseq_t *iseq)
Definition: iseq.c:89
void rb_objspace_free(rb_objspace_t *objspace)
Definition: gc.c:1615
#define heap_pages_sorted
Definition: gc.c:910
void rb_free_const_table(struct rb_id_table *tbl)
Definition: gc.c:2506
#define MARKED_IN_BITMAP(bits, p)
Definition: gc.c:886
#define finalizer_table
Definition: gc.c:924
#define gc_stress_full_mark_after_malloc_p()
Definition: gc.c:7142
int ruby_rgengc_debug
Definition: gc.c:388
size_t rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
Definition: gc.c:192
void rb_gc_mark_vm_stack_values(long n, const VALUE *values)
Definition: gc.c:4755
VALUE rb_objspace_gc_disable(rb_objspace_t *objspace)
Definition: gc.c:9269
void rb_malloc_info_show_results(void)
Definition: gc.c:10012
#define GET_HEAP_PINNED_BITS(x)
Definition: gc.c:892
#define heap_allocated_pages
Definition: gc.c:911
#define RVALUE_MARKING_BITMAP(obj)
Definition: gc.c:1230
#define GC_HEAP_FREE_SLOTS_MAX_RATIO
Definition: gc.c:281
#define OPT(o)
#define ruby_gc_stressful
Definition: gc.c:926
#define C(c, s)
int page_compare_func_t(const void *, const void *, void *)
Definition: gc.c:7782
void Init_gc_stress(void)
Definition: gc.c:2944
void rb_gc_adjust_memory_usage(ssize_t diff)
Definition: gc.c:10333
#define GET_HEAP_PAGE(x)
Definition: gc.c:878
const struct st_hash_type rb_hashtype_ident
Definition: hash.c:322
#define will_be_incremental_marking(objspace)
Definition: gc.c:966
#define during_gc
Definition: gc.c:922
VALUE rb_gc_latest_gc_info(VALUE key)
Definition: gc.c:8832
void * rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w)
Definition: gc.c:10203
#define RUBY_DTRACE_GC_HOOK(name)
Definition: gc.c:10928
#define heap_allocatable_pages
Definition: gc.c:915
MJIT_FUNC_EXPORTED const char * rb_obj_info(VALUE obj)
Definition: gc.c:11713
#define RESTORE_FINALIZER()
#define GC_HEAP_FREE_SLOTS
Definition: gc.c:262
struct rb_heap_struct rb_heap_t
PUREFUNC(static inline int is_id_value(rb_objspace_t *objspace, VALUE ptr))
size_t rb_obj_gc_flags(VALUE obj, ID *flags, size_t max)
Definition: gc.c:6992
#define malloc_increase
Definition: gc.c:908
size_t rb_iseq_memsize(const rb_iseq_t *iseq)
Definition: iseq.c:373
#define STACK_LEVEL_MAX
Definition: gc.c:4618
@ gc_stress_full_mark_after_malloc
Definition: gc.c:7138
@ gc_stress_no_immediate_sweep
Definition: gc.c:7137
@ gc_stress_max
Definition: gc.c:7139
@ gc_stress_no_major
Definition: gc.c:7136
size_t rb_gc_count(void)
Definition: gc.c:8727
#define rb_data_typed_object_alloc
Definition: gc.c:15
#define dont_gc
Definition: gc.c:921
#define is_lazy_sweeping(heap)
Definition: gc.c:971
void * ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
Definition: gc.c:10166
#define rb_jmp_buf
Definition: gc.c:82
#define rb_objspace_of(vm)
Definition: gc.c:901
#define GC_DEBUG
Definition: gc.c:365
VALUE rb_obj_rgengc_promoted_p(VALUE obj)
Definition: gc.c:6986
int each_obj_callback(void *, void *, size_t, void *)
Definition: gc.c:2951
#define ruby_initial_gc_stress
Definition: gc.c:903
volatile VALUE rb_gc_guarded_val
Definition: gc.c:248
#define GET_HEAP_UNCOLLECTIBLE_BITS(x)
Definition: gc.c:894
#define NEW_SYM(s)
#define is_full_marking(objspace)
Definition: gc.c:956
#define RVALUE_PAGE_WB_UNPROTECTED(page, obj)
Definition: gc.c:1232
struct RVALUE RVALUE
#define GC_PROFILE_MORE_DETAIL
Definition: gc.c:458
#define rb_setjmp(env)
Definition: gc.c:81
void rb_include_module(VALUE, VALUE)
Definition: class.c:882
void rb_class_detach_subclasses(VALUE)
Definition: class.c:136
VALUE rb_define_class_under(VALUE, const char *, VALUE)
Defines a class under the namespace of outer.
Definition: class.c:711
int rb_singleton_class_internal_p(VALUE sklass)
Definition: class.c:468
VALUE rb_define_module(const char *)
Definition: class.c:785
void rb_class_detach_module_subclasses(VALUE)
Definition: class.c:148
void rb_class_remove_from_module_subclasses(VALUE)
Definition: class.c:97
VALUE rb_define_module_under(VALUE, const char *)
Definition: class.c:810
void rb_class_remove_from_super_subclasses(VALUE)
Definition: class.c:79
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *)
Definition: class.c:1904
VALUE rb_mKernel
Kernel module.
Definition: ruby.h:2000
@ ROBJECT_EMBED
Definition: ruby.h:917
void(* dmark)(void *)
Definition: ruby.h:1151
void rb_gc_register_mark_object(VALUE obj)
Definition: gc.c:7079
void rb_gc_unregister_address(VALUE *addr)
Definition: gc.c:7105
const VALUE shared_root
Definition: ruby.h:1060
VALUE rb_cBasicObject
BasicObject class.
Definition: ruby.h:2011
VALUE rb_cObject
Object class.
Definition: ruby.h:2012
struct rb_io_t * fptr
Definition: ruby.h:1136
void rb_gc_writebarrier_unprotect(VALUE obj)
Definition: gc.c:6854
void rb_global_variable(VALUE *var)
Definition: gc.c:7128
const VALUE src
Definition: ruby.h:1115
union RString::@157::@158::@159 aux
@ RARRAY_EMBED_FLAG
Definition: ruby.h:1029
void * rb_alloc_tmp_buffer(volatile VALUE *store, long len)
Definition: gc.c:10278
const rb_data_type_t * type
Definition: ruby.h:1170
void rb_gc_register_address(VALUE *addr)
Definition: gc.c:7093
union RArray::@160 as
void rb_gc_writebarrier(VALUE a, VALUE b)
Definition: gc.c:6833
void rb_free_tmp_buffer(volatile VALUE *store)
Definition: gc.c:10290
struct RArray::@160::@161 heap
VALUE shared
Definition: ruby.h:996
VALUE rb_mEnumerable
Definition: enum.c:20
union RString::@157 as
union RArray::@160::@161::@162 aux
void(* dcompact)(void *)
Definition: ruby.h:1154
struct RString::@157::@158 heap
@ RUBY_FL_WB_PROTECTED
Definition: ruby.h:842
struct rb_data_type_struct::@163 function
void(* dmark)(void *)
Definition: ruby.h:1141
int ruby_stack_check(void)
Definition: gc.c:4687
size_t ruby_stack_length(VALUE **p)
Definition: gc.c:4647
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2671
void rb_bug(const char *fmt,...)
Definition: error.c:636
VALUE rb_eNoMemError
Definition: error.c:935
VALUE rb_eRangeError
Definition: error.c:928
VALUE rb_eTypeError
Definition: error.c:924
void rb_vraise(VALUE exc, const char *fmt, va_list ap)
Definition: error.c:2665
VALUE rb_eRuntimeError
Definition: error.c:922
void rb_warn(const char *fmt,...)
Definition: error.c:315
VALUE rb_eArgError
Definition: error.c:925
VALUE rb_ensure(VALUE(*)(VALUE), VALUE, VALUE(*)(VALUE), VALUE)
An equivalent to ensure clause.
Definition: eval.c:1115
VALUE rb_errinfo(void)
The current exception in the current thread.
Definition: eval.c:1882
VALUE rb_any_to_s(VALUE)
Default implementation of #to_s.
Definition: object.c:527
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:78
VALUE rb_obj_class(VALUE)
Equivalent to Object#class in Ruby.
Definition: object.c:217
VALUE rb_inspect(VALUE)
Convenient wrapper of Object::inspect.
Definition: object.c:551
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Determines if obj is a kind of c.
Definition: object.c:692
VALUE rb_obj_freeze(VALUE)
Make the object unmodifiable.
Definition: object.c:1080
VALUE rb_to_int(VALUE)
Converts val into Integer.
Definition: object.c:3021
size_t rb_hash_ar_table_size(void)
Definition: hash.c:355
void rb_id_table_foreach_with_replace(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, rb_id_table_update_callback_func_t *replace, void *data)
Definition: id_table.c:270
size_t rb_id_table_memsize(const struct rb_id_table *tbl)
Definition: id_table.c:123
void rb_id_table_free(struct rb_id_table *tbl)
Definition: id_table.c:102
void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
Definition: id_table.c:311
rb_id_table_iterator_result
Definition: id_table.h:8
@ ID_TABLE_REPLACE
Definition: id_table.h:12
@ ID_TABLE_CONTINUE
Definition: id_table.h:9
#define __asan_region_is_poisoned(x, y)
Definition: internal.h:110
MJIT_STATIC VALUE ruby_vm_special_exception_copy(VALUE)
Definition: vm_insnhelper.c:34
RUBY_FUNC_EXPORTED size_t rb_io_memsize(const rb_io_t *fptr)
Definition: io.c:4764
#define N
Definition: lgamma_r.c:20
#define M
Definition: mt19937.c:53
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:39
const char * name
Definition: nkf.c:208
ONIG_EXTERN void onig_region_free(OnigRegion *region, int free_self)
Definition: regexec.c:343
ONIG_EXTERN void onig_free(OnigRegex)
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
VALUE rb_class_name(VALUE)
Definition: variable.c:274
#define RARRAY_LEN(a)
__uint32_t uint32_t
#define ROBJECT(obj)
VALUE rb_hash_lookup(VALUE, VALUE)
Definition: hash.c:2063
#define MEMCPY(p1, p2, type, n)
#define FLUSH_REGISTER_WINDOWS
#define T_COMPLEX
#define RHASH_TRANSIENT_P(hash)
#define list_del(n)
#define NULL
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1027
#define FL_SINGLETON
#define ATOMIC_VALUE_EXCHANGE(var, val)
void rb_print_backtrace(void)
Definition: vm_dump.c:750
#define dp(v)
#define RBASIC_CLEAR_CLASS(obj)
#define T_FILE
#define TAG_RAISE
int rb_hash_stlike_foreach_with_replace(VALUE hash, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg)
Definition: hash.c:1453
VALUE rb_class_path_cached(VALUE)
Definition: variable.c:162
#define FL_EXIVAR
size_t rb_big_size(VALUE)
Definition: bignum.c:6778
#define _(args)
#define list_next(h, i, member)
int clock_gettime(clockid_t clock_id, struct timespec *tp)
Definition: win32.c:4642
#define ULL2NUM(v)
#define RTEST(v)
#define rb_ec_raised_p(ec, f)
#define TAG_NONE
#define VALGRIND_MAKE_MEM_DEFINED(p, n)
#define RHASH_AR_TABLE(hash)
#define RHASH_ST_TABLE_P(h)
#define RCLASS_SUPER(c)
#define FL_TEST(x, f)
VALUE rb_big_hash(VALUE)
Definition: bignum.c:6726
unsigned long st_data_t
VALUE rb_big_eql(VALUE, VALUE)
Definition: bignum.c:5544
#define ATOMIC_EXCHANGE(var, val)
#define RBASIC(obj)
size_t strlen(const char *)
int strcmp(const char *, const char *)
#define RCLASS_CALLABLE_M_TBL(c)
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1704
#define T_STRING
void * malloc(size_t) __attribute__((__malloc__)) __attribute__((__warn_unused_result__)) __attribute__((__alloc_size__(1)))
void rb_mark_end_proc(void)
Definition: eval_jump.c:78
void mjit_gc_exit_hook(void)
#define rb_yield_values(argc,...)
#define FL_ABLE(x)
#define RHASH_IFNONE(h)
rb_control_frame_t * cfp
#define ATOMIC_SIZE_INC(var)
#define RSTRUCT_CONST_PTR(st)
#define PRIuSIZE
#define xfree
time_t time(time_t *_timer)
#define T_MASK
void * memalign(size_t, size_t)
VALUE rb_ary_cat(VALUE, const VALUE *, long)
Definition: array.c:1208
#define Qundef
#define RB_SPECIAL_CONST_P(x)
#define CHAR_BIT
#define rb_ec_raised_set(ec, f)
#define rb_str_cat2
int posix_memalign(void **, size_t, size_t) __attribute__((__nonnull__(1))) __attribute__((__warn_unused_result__))
int rb_obj_respond_to(VALUE, ID, int)
Definition: vm_method.c:2197
int int int int int int vfprintf(FILE *__restrict__, const char *__restrict__, __gnuc_va_list) __attribute__((__format__(__printf__
size_t rb_str_memsize(VALUE)
Definition: string.c:1371
#define RSTRUCT_EMBED_LEN_MASK
const VALUE VALUE obj
#define rb_check_frozen(obj)
#define FL_SET(x, f)
#define TYPE(x)
#define UINT2NUM(x)
#define T_NIL
#define UNREACHABLE
unsigned long clock_t
#define SIZE_MAX
#define T_FLOAT
void rb_free_generic_ivar(VALUE)
Definition: variable.c:993
#define st_is_member(table, key)
#define RSTRING_PTR(str)
const rb_iseq_t const char * error
#define unsigned
#define T_IMEMO
#define RCLASS_SERIAL(c)
#define T_BIGNUM
int snprintf(char *__restrict__, size_t, const char *__restrict__,...) __attribute__((__format__(__printf__
#define rb_vm_register_special_exception(sp, e, m)
#define RTYPEDDATA_DATA(v)
void rb_clear_constant_cache(void)
Definition: vm_method.c:87
#define RHASH_AR_TABLE_P(hash)
#define GET_EC()
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:152
#define STR_SHARED_P(s)
#define NIL_P(v)
VALUE rb_ary_last(int, const VALUE *, VALUE)
Definition: array.c:1677
#define T_STRUCT
VALUE rb_check_funcall(VALUE, ID, int, const VALUE *)
Definition: vm_eval.c:505
const rb_callable_method_entry_t * me
#define RSTRUCT(obj)
#define numberof(array)
#define DBL2NUM(dbl)
void rb_gc_mark_global_tbl(void)
Definition: variable.c:434
void rb_mark_generic_ivar(VALUE)
Definition: variable.c:973
#define VM_ASSERT(expr)
#define rb_special_const_p(obj)
#define ID2SYM(x)
const char * rb_id2name(ID)
Definition: symbol.c:801
#define ELTS_SHARED
#define EC_EXEC_TAG()
#define T_FIXNUM
VALUE rb_int2str(VALUE num, int base)
Definition: numeric.c:3567
int fprintf(FILE *__restrict__, const char *__restrict__,...) __attribute__((__format__(__printf__
#define ATOMIC_PTR_EXCHANGE(var, val)
#define VM_ENV_DATA_INDEX_ENV
const char size_t n
#define T_DATA
#define ruby_verbose
#define MEMZERO(p, type, n)
VALUE rb_io_write(VALUE, VALUE)
Definition: io.c:1804
#define FL_SEEN_OBJ_ID
#define FL_PROMOTED0
#define ATOMIC_SET(var, val)
#define FIXNUM_FLAG
#define SYM2ID(x)
unsigned long VALUE
VALUE rb_ary_push(VALUE, VALUE)
Definition: array.c:1195
#define stderr
#define T_NONE
#define T_NODE
void * realloc(void *, size_t) __attribute__((__warn_unused_result__)) __attribute__((__alloc_size__(2)))
#define EC_PUSH_TAG(ec)
__inline__ const void *__restrict__ src
VALUE rb_sym2str(VALUE)
Definition: symbol.c:784
void rb_str_free(VALUE)
Definition: string.c:1349
#define EC_JUMP_TAG(ec, st)
VALUE rb_func_lambda_new(rb_block_call_func_t func, VALUE val, int min_argc, int max_argc)
Definition: proc.c:735
VALUE rb_str_buf_new(long)
Definition: string.c:1315
void rb_free_method_entry(const rb_method_entry_t *me)
Definition: vm_method.c:174
void * calloc(size_t, size_t) __attribute__((__malloc__)) __attribute__((__warn_unused_result__)) __attribute__((__alloc_size__(1
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
void rb_ast_free(rb_ast_t *)
Definition: node.c:1352
VALUE rb_obj_is_fiber(VALUE)
Definition: cont.c:1040
#define RARRAY(obj)
#define xmalloc
#define STATIC_SYM_P(x)
#define FL_FINALIZE
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
#define T_MODULE
#define GET_VM()
#define OBJ_PROMOTED(x)
#define FL_UNSET(x, f)
uint32_t i
#define roomof(x, y)
void rb_gc_free_dsymbol(VALUE)
Definition: symbol.c:678
__inline__ const void *__restrict__ size_t len
#define EXIT_FAILURE
const VALUE int int int int int int VALUE char * fmt
const char * rb_obj_classname(VALUE)
Definition: variable.c:289
#define FL_TEST_RAW(x, f)
#define ALLOC_N(type, n)
#define OBJ_FREEZE(x)
VALUE rb_block_proc(void)
Definition: proc.c:837
#define INT2NUM(x)
void rb_ast_mark(rb_ast_t *)
Definition: node.c:1340
size_t rb_generic_ivar_memsize(VALUE)
Definition: variable.c:1010
#define SIZED_REALLOC_N(var, type, n, old_n)
VALUE rb_obj_is_thread(VALUE obj)
Definition: vm.c:2657
#define RCLASS_IV_TBL(c)
#define T_TRUE
void rb_vm_mark(void *ptr)
Definition: vm.c:2243
#define T_RATIONAL
void rb_define_module_function(VALUE, const char *, VALUE(*)(), int)
unsigned int rb_atomic_t
#define va_end(v)
#define list_top(h, type, member)
#define FLONUM_P(x)
#define T_ICLASS
VALUE rb_hash_new_with_size(st_index_t size)
Definition: hash.c:1529
#define T_HASH
void rb_strterm_mark(VALUE obj)
Definition: ripper.c:768
#define rb_ec_raised_clear(ec)
#define LONG2NUM(x)
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2891
__gnuc_va_list va_list
#define long
void rb_define_singleton_method(VALUE, const char *, VALUE(*)(), int)
@ VM_METHOD_TYPE_ATTRSET
@ VM_METHOD_TYPE_CFUNC
@ VM_METHOD_TYPE_OPTIMIZED
@ VM_METHOD_TYPE_REFINED
@ VM_METHOD_TYPE_NOTIMPLEMENTED
@ VM_METHOD_TYPE_MISSING
@ VM_METHOD_TYPE_BMETHOD
@ VM_METHOD_TYPE_ZSUPER
@ VM_METHOD_TYPE_ALIAS
@ VM_METHOD_TYPE_UNDEF
#define RUBY_TYPED_FREE_IMMEDIATELY
#define RUBY_INTERNAL_EVENT_GC_EXIT
#define SIZEOF_VOIDP
#define DYNAMIC_SYM_P(x)
#define RREGEXP_PTR(r)
void rb_ast_update_references(rb_ast_t *)
Definition: node.c:1330
#define TypedData_Get_Struct(obj, type, data_type, sval)
#define GET_THREAD()
#define PRIsVALUE
#define SET_MACHINE_STACK_END(p)
#define rb_ary_new3
void * memset(void *, int, size_t)
#define RUBY_INTERNAL_EVENT_GC_ENTER
void rb_ary_delete_same(VALUE, VALUE)
Definition: array.c:3396
VALUE rb_ary_tmp_new(long)
Definition: array.c:768
#define rb_funcall(recv, mid, argc,...)
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
#define FIX2INT(x)
int VALUE v
VALUE rb_ary_new(void)
Definition: array.c:723
#define PRI_PIDT_PREFIX
#define BIGNUM_LEN(b)
#define rb_scan_args(argc, argvp, fmt,...)
#define list_empty(h)
#define RSTRUCT_TRANSIENT_P(st)
#define list_for_each_safe(h, i, nxt, member)
#define RUBY_INTERNAL_EVENT_GC_END_MARK
void rb_hook_list_mark(rb_hook_list_t *hooks)
Definition: vm_trace.c:53
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
#define EC_POP_TAG()
VALUE rb_hash_compare_by_id_p(VALUE hash)
Definition: hash.c:4267
#define T_FALSE
#define BIGNUM_EMBED_FLAG
#define rb_intern(str)
VALUE rb_hash_set_default_proc(VALUE hash, VALUE proc)
Definition: hash.c:2242
#define RCLASS_CONST_TBL(c)
#define INT_MAX
#define RB_DEBUG_COUNTER_INC_IF(type, cond)
#define UNREACHABLE_RETURN(val)
#define va_start(v, l)
VALUE ID VALUE old
#define PRIxVALUE
const rb_iseq_t * iseq
VALUE rb_str_catf(VALUE, const char *,...) __attribute__((format(printf
#define RDATA(obj)
#define RCLASS_EXT(c)
struct rb_classext_struct rb_classext_t
#define T_UNDEF
#define RTYPEDDATA_TYPE(v)
#define RARRAY_CONST_PTR_TRANSIENT(a)
#define TRUE
#define RCLASS(obj)
#define FALSE
#define RHASH_ST_TABLE(hash)
#define RHASH_SIZE(h)
unsigned int size
@ imemo_parser_strterm
#define Qtrue
#define MEMMOVE(p1, p2, type, n)
VALUE rb_proc_new(rb_block_call_func_t, VALUE)
Definition: proc.c:2991
long unsigned int size_t
char * strdup(const char *) __attribute__((__malloc__)) __attribute__((__warn_unused_result__))
#define BDIGIT
#define RSTRUCT_LEN(st)
int rb_hash_stlike_foreach(VALUE hash, st_foreach_callback_func *func, st_data_t arg)
Definition: hash.c:1442
#define RUBY_INTERNAL_EVENT_FREEOBJ
#define UNLIKELY(x)
#define ATOMIC_SIZE_ADD(var, val)
struct rb_call_cache buf
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
Definition: vm.c:796
VALUE rb_int_ge(VALUE x, VALUE y)
Definition: numeric.c:4297
VALUE rb_str_append(VALUE, VALUE)
Definition: string.c:2965
#define RCLASS_M_TBL(c)
size_t rb_ast_memsize(const rb_ast_t *)
Definition: node.c:1373
__uintptr_t uintptr_t
#define T_ZOMBIE
void exit(int __status) __attribute__((__noreturn__))
#define Qnil
#define Qfalse
#define bool
#define DATA_PTR(dta)
#define T_ARRAY
#define RUBY_DEFAULT_FREE
void abort(void) __attribute__((__noreturn__))
long strtol(const char *__restrict__ __n, char **__restrict__ __end_PTR, int __base)
void * memcpy(void *__restrict__, const void *__restrict__, size_t)
#define RB_GNUC_EXTENSION
const char * rb_source_location_cstr(int *pline)
Definition: vm.c:1376
VALUE rb_str_buf_append(VALUE, VALUE)
Definition: string.c:2950
#define T_OBJECT
#define list_for_each(h, i, member)
#define rb_io_fptr_finalize
#define RTYPEDDATA_P(v)
#define SIGNED_VALUE
int int vsnprintf(char *__restrict__, size_t, const char *__restrict__, __gnuc_va_list) __attribute__((__format__(__printf__
st_data_t st_index_t
#define RICLASS_IS_ORIGIN
#define ULONG2NUM(x)
#define RARRAY_TRANSIENT_P(ary)
ID rb_sym2id(VALUE)
Definition: symbol.c:748
#define RB_TYPE_P(obj, type)
#define RHASH(obj)
#define FL_WB_PROTECTED
#define INT2FIX(i)
#define SPECIAL_CONST_P(x)
pid_t getpid(void)
#define RUBY_INTERNAL_EVENT_GC_START
#define ALLOC(type)
#define RFILE(obj)
#define T_SYMBOL
#define TypedData_Make_Struct(klass, type, data_type, sval)
#define MJIT_FUNC_EXPORTED
const VALUE * argv
#define T_MATCH
#define SYMBOL_P(x)
_ssize_t ssize_t
uint32_t rb_event_flag_t
void rb_ary_free(VALUE)
Definition: array.c:786
void rb_mv_generic_ivar(VALUE src, VALUE dst)
Definition: variable.c:983
__inline__ int
#define FIXNUM_P(f)
#define LL2NUM(v)
#define T_CLASS
#define CLASS_OF(v)
#define RETURN_ENUMERATOR(obj, argc, argv)
int st_foreach_callback_func(st_data_t, st_data_t, st_data_t)
if((__builtin_expect(!!(!me), 0)))
#define Check_Type(v, t)
VALUE rb_hash_aset(VALUE, VALUE, VALUE)
Definition: hash.c:2852
#define DSIZE_T
long long strtoll(const char *__restrict__ __n, char **__restrict__ __end_PTR, int __base)
#define T_MOVED
#define ROBJ_TRANSIENT_P(obj)
#define STACK_UPPER(x, a, b)
unsigned sleep(unsigned int __seconds)
rb_control_frame_t const VALUE * pc
void mjit_remove_class_serial(rb_serial_t class_serial)
#define rb_check_arity
@ VM_ENV_FLAG_WB_REQUIRED
#define FL_PROMOTED1
#define PRIdSIZE
clock_t clock(void)
#define xcalloc
size_t rb_ary_memsize(VALUE)
Definition: array.c:816
#define ROBJECT_IVPTR(o)
int atexit(void(*__func)(void))
#define CLOCK_PROCESS_CPUTIME_ID
#define RUBY_INTERNAL_EVENT_NEWOBJ
VALUE rb_sprintf(const char *,...) __attribute__((format(printf
#define RBASIC_CLASS(obj)
#define USE_RGENGC
unsigned long ID
rb_control_frame_t * __attribute__((__fastcall__)) *rb_insn_func_t)(rb_execution_context_t *
#define RCLASS_IV_INDEX_TBL(c)
#define BIGNUM_DIGITS(b)
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1237
#define RHASH_EMPTY_P(h)
#define PRIuVALUE
#define list_add(h, n)
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread_sync.c:131
size_t st_index_t h
#define VALGRIND_MAKE_MEM_UNDEFINED(p, n)
#define FL_USHIFT
VALUE ID id
#define FIX2LONG(x)
const rb_iseq_t const VALUE exc
#define ID_SCOPE_MASK
void rb_define_method(VALUE, const char *, VALUE(*)(), int)
void rb_clear_method_cache_by_class(VALUE)
Definition: vm_method.c:93
#define RARRAY_AREF(a, i)
int ruby_native_thread_p(void)
Definition: thread.c:5276
#define BUILTIN_TYPE(x)
#define SIZEOF_VALUE
#define ATOMIC_SIZE_EXCHANGE(var, val)
VALUE rb_stdout
int fputs(const char *__restrict__, FILE *__restrict__)
VALUE rb_hash_new(void)
Definition: hash.c:1523
#define VM_UNREACHABLE(func)
#define RMOVED(obj)
#define rb_str_new_cstr(str)
VALUE rb_int_plus(VALUE x, VALUE y)
Definition: numeric.c:3615
#define SIZET2NUM(v)
#define LONG_LONG
void mjit_gc_start_hook(void)
#define ROBJECT_NUMIV(o)
#define T_REGEXP
#define ATOMIC_SIZE_CAS(var, oldval, val)
#define RB_DEBUG_COUNTER_INC(type)
#define LIKELY(x)
#define RMATCH(obj)
Definition: re.h:50
size_t onig_region_memsize(const OnigRegion *regs)
Definition: regcomp.c:5672
size_t onig_memsize(const regex_t *reg)
Definition: regcomp.c:5657
VALUE rb_data_object_zalloc(VALUE, size_t, RUBY_DATA_FUNC, RUBY_DATA_FUNC)
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
void(* RUBY_DATA_FUNC)(void *)
Definition: ruby.h:1184
rb_atomic_t cnt[RUBY_NSIG]
Definition: signal.c:503
#define lo
Definition: siphash.c:21
#define hi
Definition: siphash.c:22
#define f
st_index_t st_numhash(st_data_t n)
Definition: st.c:2175
void st_free_table(st_table *tab)
Definition: st.c:709
st_table * st_init_numtable_with_size(st_index_t size)
Definition: st.c:660
size_t st_memsize(const st_table *tab)
Definition: st.c:719
int st_delete(st_table *tab, st_data_t *key, st_data_t *value)
Definition: st.c:1418
void st_add_direct(st_table *tab, st_data_t key, st_data_t value)
Definition: st.c:1251
st_table * st_init_numtable(void)
Definition: st.c:653
st_table * st_init_strtable(void)
Definition: st.c:668
int st_insert(st_table *tab, st_data_t key, st_data_t value)
Definition: st.c:1171
int st_lookup(st_table *tab, st_data_t key, st_data_t *value)
Definition: st.c:1101
int st_foreach(st_table *tab, st_foreach_callback_func *func, st_data_t arg)
Definition: st.c:1717
st_table * st_init_table(const struct st_hash_type *type)
Definition: st.c:645
int st_foreach_with_replace(st_table *tab, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg)
Definition: st.c:1699
int st_update(st_table *tab, st_data_t key, st_update_callback_func *func, st_data_t arg)
Definition: st.c:1509
Definition: ruby.h:1048
Definition: ruby.h:1139
Definition: ruby.h:1134
const VALUE ifnone
Definition: re.h:43
VALUE regexp
Definition: re.h:47
VALUE str
Definition: re.h:45
Definition: ruby.h:922
Definition: ruby.h:1112
Definition: ruby.h:988
Definition: gc.c:566
rb_env_t env
Definition: gc.c:597
struct RVALUE::@141::@142 free
struct RHash hash
Definition: gc.c:580
struct RData data
Definition: gc.c:581
struct RClass klass
Definition: gc.c:575
struct RBasic basic
Definition: gc.c:573
union RVALUE::@141::@143 imemo
VALUE flags
Definition: gc.c:569
struct MEMO memo
Definition: gc.c:594
struct rb_method_entry_struct ment
Definition: gc.c:595
rb_ast_t ast
Definition: gc.c:599
VALUE v3
Definition: gc.c:605
struct vm_throw_data throw_data
Definition: gc.c:592
VALUE v1
Definition: gc.c:603
struct RMoved moved
Definition: gc.c:572
struct RVALUE::@141::@144 values
struct RBignum bignum
Definition: gc.c:584
struct RObject object
Definition: gc.c:574
struct vm_ifunc ifunc
Definition: gc.c:593
struct RComplex complex
Definition: gc.c:588
struct RArray array
Definition: gc.c:578
struct RFile file
Definition: gc.c:585
struct RFloat flonum
Definition: gc.c:576
struct RRegexp regexp
Definition: gc.c:579
VALUE v2
Definition: gc.c:604
const rb_iseq_t iseq
Definition: gc.c:596
struct RTypedData typeddata
Definition: gc.c:582
struct vm_svar svar
Definition: gc.c:591
struct RString string
Definition: gc.c:577
struct RRational rational
Definition: gc.c:587
rb_cref_t cref
Definition: gc.c:590
struct RStruct rstruct
Definition: gc.c:583
struct RMatch match
Definition: gc.c:586
union RVALUE::@141 as
struct rb_imemo_tmpbuf_struct alloc
Definition: gc.c:598
struct RVALUE * next
Definition: gc.c:570
Definition: gc.c:986
void(* dfree)(void *)
Definition: gc.c:989
VALUE next
Definition: gc.c:988
struct RBasic basic
Definition: gc.c:987
void * data
Definition: gc.c:990
rb_objspace_t * objspace
Definition: gc.c:2957
each_obj_callback * callback
Definition: gc.c:2958
void * data
Definition: gc.c:2959
struct force_finalize_list * next
Definition: gc.c:3440
Definition: gc.c:635
struct gc_list * next
Definition: gc.c:637
VALUE * varptr
Definition: gc.c:636
double gc_invoke_time
Definition: gc.c:525
size_t heap_use_size
Definition: gc.c:528
size_t heap_total_objects
Definition: gc.c:527
size_t heap_total_size
Definition: gc.c:529
double gc_time
Definition: gc.c:524
VALUE exc
Definition: gc.c:9537
va_list * ap
Definition: gc.c:9539
const char * fmt
Definition: gc.c:9538
size_t index
Definition: gc.c:7683
struct heap_page * page
Definition: gc.c:7684
rb_objspace_t * objspace
Definition: gc.c:7685
RVALUE * slot
Definition: gc.c:7682
struct heap_page_header header
Definition: gc.c:630
struct heap_page * page
Definition: gc.c:626
Definition: gc.c:845
short final_slots
Definition: gc.c:849
short total_slots
Definition: gc.c:846
bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:868
short pinned_slots
Definition: gc.c:848
unsigned int has_remembered_objects
Definition: gc.c:852
bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:866
unsigned int before_sweep
Definition: gc.c:851
RVALUE * freelist
Definition: gc.c:859
bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:869
bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:873
bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:863
struct list_node page_node
Definition: gc.c:860
RVALUE * start
Definition: gc.c:858
unsigned int has_uncollectible_shady_objects
Definition: gc.c:853
struct heap_page * free_next
Definition: gc.c:857
unsigned int in_tomb
Definition: gc.c:854
struct heap_page::@153 flags
short free_slots
Definition: gc.c:847
size_t size
Definition: gc.c:9805
Definition: gc.c:647
stack_chunk_t * chunk
Definition: gc.c:648
size_t unused_cache_size
Definition: gc.c:653
int index
Definition: gc.c:650
stack_chunk_t * cache
Definition: gc.c:649
size_t cache_size
Definition: gc.c:652
int limit
Definition: gc.c:651
rb_objspace_t * objspace
Definition: gc.c:7386
Definition: gc.c:90
bool left
Definition: gc.c:91
size_t right
Definition: gc.c:92
VALUE of
Definition: gc.c:3070
size_t num
Definition: gc.c:3069
ID called_id
struct rb_method_definition_struct *const def
const VALUE defined_class
const VALUE owner
rb_subclass_entry_t * subclasses
VALUE value
VALUE file
CREF (Class REFerence)
size_t total_pages
Definition: gc.c:666
struct heap_page * free_pages
Definition: gc.c:659
struct heap_page * using_page
Definition: gc.c:660
struct heap_page * sweeping_page
Definition: gc.c:662
struct list_head pages
Definition: gc.c:661
struct heap_page * pooled_pages
Definition: gc.c:664
RVALUE * freelist
Definition: gc.c:657
size_t total_slots
Definition: gc.c:667
struct rb_imemo_tmpbuf_struct * next
VALUE ecopts
Definition: io.h:89
Definition: io.h:66
struct rb_io_t::rb_io_enc_t encs
VALUE writeconv_asciicompat
Definition: io.h:96
VALUE pathv
Definition: io.h:72
VALUE write_lock
Definition: io.h:101
VALUE writeconv_pre_ecopts
Definition: io.h:99
VALUE tied_io_for_writing
Definition: io.h:77
struct rb_iseq_constant_body * body
struct rb_method_entry_struct * original_me
struct rb_hook_list_struct * hooks
union rb_method_definition_struct::@41 body
rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
rb_cref_t * cref
class reference, should be marked
struct rb_method_entry_struct * orig_me
void(* mark_func)(VALUE v, void *data)
Definition: gc.c:716
struct rb_objspace::@145 malloc_params
size_t total_freed_objects
Definition: gc.c:780
size_t uncollectible_wb_unprotected_objects_limit
Definition: gc.c:794
size_t heap_used_at_gc_start
Definition: gc.c:776
size_t last_major_gc
Definition: gc.c:792
struct rb_objspace::@150 rgengc
st_table * finalizer_table
Definition: gc.c:735
size_t major_gc_count
Definition: gc.c:752
unsigned int during_incremental_marking
Definition: gc.c:699
rb_event_flag_t hook_events
Definition: gc.c:703
double invoke_time
Definition: gc.c:748
VALUE parent_object
Definition: gc.c:790
size_t marked_slots
Definition: gc.c:720
size_t moved_count_table[T_MASK]
Definition: gc.c:811
struct rb_objspace::mark_func_data_struct * mark_func_data
gc_profile_record * records
Definition: gc.c:740
unsigned int during_minor_gc
Definition: gc.c:696
struct gc_list * global_list
Definition: gc.c:784
st_table * obj_to_id_tbl
Definition: gc.c:823
struct rb_objspace::@151 rcompactor
size_t uncollectible_wb_unprotected_objects
Definition: gc.c:793
unsigned int gc_stressful
Definition: gc.c:693
size_t old_objects
Definition: gc.c:795
gc_profile_record * current_record
Definition: gc.c:741
struct rb_objspace::@152 rincgc
rb_heap_t tomb_heap
Definition: gc.c:708
VALUE deferred_final
Definition: gc.c:732
struct rb_objspace::@147 atomic_flags
struct rb_objspace::@146 flags
mark_stack_t mark_stack
Definition: gc.c:719
double gc_sweep_start_time
Definition: gc.c:774
size_t total_freed_pages
Definition: gc.c:782
struct heap_page ** sorted
Definition: gc.c:723
size_t sorted_length
Definition: gc.c:726
unsigned int dont_gc
Definition: gc.c:689
unsigned int immediate_sweep
Definition: gc.c:688
size_t oldmalloc_increase_limit
Definition: gc.c:800
size_t old_objects_limit
Definition: gc.c:796
int latest_gc_info
Definition: gc.c:739
VALUE next_object_id
Definition: gc.c:705
unsigned int has_hook
Definition: gc.c:694
VALUE gc_stress_mode
Definition: gc.c:786
rb_atomic_t finalizing
Definition: gc.c:711
unsigned int during_compacting
Definition: gc.c:692
size_t count
Definition: gc.c:779
size_t allocatable_pages
Definition: gc.c:725
size_t allocated_pages
Definition: gc.c:724
rb_heap_t eden_heap
Definition: gc.c:707
size_t considered_count_table[T_MASK]
Definition: gc.c:810
size_t total_allocated_objects_at_gc_start
Definition: gc.c:775
size_t freeable_pages
Definition: gc.c:728
struct rb_objspace::@149 profile
size_t step_slots
Definition: gc.c:817
size_t oldmalloc_increase
Definition: gc.c:799
int need_major_gc
Definition: gc.c:791
size_t final_slots
Definition: gc.c:731
unsigned int during_gc
Definition: gc.c:691
size_t size
Definition: gc.c:743
size_t minor_gc_count
Definition: gc.c:751
size_t limit
Definition: gc.c:678
size_t total_allocated_pages
Definition: gc.c:781
struct rb_objspace::@148 heap_pages
st_table * id_to_obj_tbl
Definition: gc.c:822
size_t next_index
Definition: gc.c:742
size_t compact_count
Definition: gc.c:753
unsigned int mode
Definition: gc.c:687
size_t increase
Definition: gc.c:679
RVALUE * range[2]
Definition: gc.c:727
size_t total_allocated_objects
Definition: gc.c:704
unsigned int dont_incremental
Definition: gc.c:690
size_t pooled_slots
Definition: gc.c:816
int run
Definition: gc.c:738
rb_subclass_entry_t * next
VALUE klass
int num_regs
Definition: onigmo.h:718
Definition: re.h:36
int char_offset_num_allocated
Definition: re.h:40
struct rmatch_offset * char_offset
Definition: re.h:39
struct re_registers regs
Definition: re.h:37
void(* func)(const char *category, VALUE, void *)
Definition: gc.c:9497
const char * category
Definition: gc.c:9496
void * data
Definition: gc.c:9498
double oldmalloc_limit_growth_factor
Definition: gc.c:334
VALUE gc_stress
Definition: gc.c:336
size_t malloc_limit_max
Definition: gc.c:329
size_t growth_max_slots
Definition: gc.c:321
size_t malloc_limit_min
Definition: gc.c:328
double growth_factor
Definition: gc.c:320
size_t heap_init_slots
Definition: gc.c:318
double heap_free_slots_max_ratio
Definition: gc.c:325
double heap_free_slots_goal_ratio
Definition: gc.c:324
double malloc_limit_growth_factor
Definition: gc.c:330
double oldobject_limit_factor
Definition: gc.c:326
double heap_free_slots_min_ratio
Definition: gc.c:323
size_t heap_free_slots
Definition: gc.c:319
size_t oldmalloc_limit_min
Definition: gc.c:332
size_t oldmalloc_limit_max
Definition: gc.c:333
struct stack_chunk * next
Definition: gc.c:644
VALUE data[STACK_CHUNK_SIZE]
Definition: gc.c:643
rb_objspace_t * objspace
Definition: gc.c:5908
IFUNC (Internal FUNCtion)
SVAR (Special VARiable)
Definition: gc.c:10348
st_table * obj2wmap
Definition: gc.c:10349
st_table * wmap2obj
Definition: gc.c:10350
VALUE final
Definition: gc.c:10351
VALUE value
Definition: gc.c:10516
rb_objspace_t * objspace
Definition: gc.c:10515
#define RSYMBOL(obj)
Definition: symbol.h:33
RUBY_SYMBOL_EXPORT_BEGIN void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1661
void rb_transient_heap_promote(VALUE obj)
void rb_transient_heap_verify(void)
void rb_transient_heap_mark(VALUE obj, const void *ptr)
void rb_transient_heap_finish_marking(void)
void rb_transient_heap_start_marking(int full_marking)
void rb_transient_heap_update_references(void)
#define strtod(s, e)
Definition: util.h:76
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
#define rb_id2str(id)
Definition: vm_backtrace.c:30
#define getenv(name)
Definition: win32.c:73
#define env
IUnknown DWORD
Definition: win32ole.c:33