Ruby  1.9.3p551(2014-11-13revision48407)
gc.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  gc.c -
4 
5  $Author: usa $
6  created at: Tue Oct 5 09:44:46 JST 1993
7 
8  Copyright (C) 1993-2007 Yukihiro Matsumoto
9  Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10  Copyright (C) 2000 Information-technology Promotion Agency, Japan
11 
12 **********************************************************************/
13 
14 #include "ruby/ruby.h"
15 #include "ruby/st.h"
16 #include "ruby/re.h"
17 #include "ruby/io.h"
18 #include "ruby/util.h"
19 #include "eval_intern.h"
20 #include "vm_core.h"
21 #include "internal.h"
22 #include "gc.h"
23 #include "constant.h"
24 #include "ruby_atomic.h"
25 #include <stdio.h>
26 #include <setjmp.h>
27 #include <sys/types.h>
28 #include <assert.h>
29 
30 #ifdef HAVE_SYS_TIME_H
31 #include <sys/time.h>
32 #endif
33 
34 #ifdef HAVE_SYS_RESOURCE_H
35 #include <sys/resource.h>
36 #endif
37 
38 #if defined _WIN32 || defined __CYGWIN__
39 #include <windows.h>
40 #endif
41 
42 #ifdef HAVE_VALGRIND_MEMCHECK_H
43 # include <valgrind/memcheck.h>
44 # ifndef VALGRIND_MAKE_MEM_DEFINED
45 # define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
46 # endif
47 # ifndef VALGRIND_MAKE_MEM_UNDEFINED
48 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
49 # endif
50 #else
51 # define VALGRIND_MAKE_MEM_DEFINED(p, n) /* empty */
52 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) /* empty */
53 #endif
54 
55 #define rb_setjmp(env) RUBY_SETJMP(env)
56 #define rb_jmp_buf rb_jmpbuf_t
57 
58 /* Make alloca work the best possible way. */
59 #ifdef __GNUC__
60 # ifndef atarist
61 # ifndef alloca
62 # define alloca __builtin_alloca
63 # endif
64 # endif /* atarist */
65 #else
66 # ifdef HAVE_ALLOCA_H
67 # include <alloca.h>
68 # else
69 # ifdef _AIX
70  #pragma alloca
71 # else
72 # ifndef alloca /* predefined by HP cc +Olibcalls */
73 void *alloca ();
74 # endif
75 # endif /* AIX */
76 # endif /* HAVE_ALLOCA_H */
77 #endif /* __GNUC__ */
78 
79 #ifndef GC_MALLOC_LIMIT
80 #define GC_MALLOC_LIMIT 8000000
81 #endif
82 #define HEAP_MIN_SLOTS 10000
83 #define FREE_MIN 4096
84 
85 typedef struct {
86  unsigned int initial_malloc_limit;
87  unsigned int initial_heap_min_slots;
88  unsigned int initial_free_min;
89  int gc_stress;
91 
95  FREE_MIN,
96 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
97  FALSE,
98 #endif
99 };
100 
101 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
102 
103 #if SIZEOF_LONG == SIZEOF_VOIDP
104 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
105 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
106 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
107 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
108 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
109  ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
110 #else
111 # error not supported
112 #endif
113 
115 
116 /* for GC profile */
117 #define GC_PROFILE_MORE_DETAIL 0
118 typedef struct gc_profile_record {
119  double gc_time;
120  double gc_mark_time;
123 
130 
133 
137 
138 static double
140 {
141 #ifdef RUSAGE_SELF
142  struct rusage usage;
143  struct timeval time;
144  getrusage(RUSAGE_SELF, &usage);
145  time = usage.ru_utime;
146  return time.tv_sec + time.tv_usec * 1e-6;
147 #elif defined _WIN32
148  FILETIME creation_time, exit_time, kernel_time, user_time;
149  ULARGE_INTEGER ui;
150  LONG_LONG q;
151  double t;
152 
153  if (GetProcessTimes(GetCurrentProcess(),
154  &creation_time, &exit_time, &kernel_time, &user_time) == 0)
155  {
156  return 0.0;
157  }
158  memcpy(&ui, &user_time, sizeof(FILETIME));
159  q = ui.QuadPart / 10L;
160  t = (DWORD)(q % 1000000L) * 1e-6;
161  q /= 1000000L;
162 #ifdef __GNUC__
163  t += q;
164 #else
165  t += (double)(DWORD)(q >> 16) * (1 << 16);
166  t += (DWORD)q & ~(~0 << 16);
167 #endif
168  return t;
169 #else
170  return 0.0;
171 #endif
172 }
173 
174 #define GC_PROF_TIMER_START do {\
175  if (objspace->profile.run) {\
176  if (!objspace->profile.record) {\
177  objspace->profile.size = 1000;\
178  objspace->profile.record = malloc(sizeof(gc_profile_record) * objspace->profile.size);\
179  }\
180  if (count >= objspace->profile.size) {\
181  objspace->profile.size += 1000;\
182  objspace->profile.record = realloc(objspace->profile.record, sizeof(gc_profile_record) * objspace->profile.size);\
183  }\
184  if (!objspace->profile.record) {\
185  rb_bug("gc_profile malloc or realloc miss");\
186  }\
187  MEMZERO(&objspace->profile.record[count], gc_profile_record, 1);\
188  gc_time = getrusage_time();\
189  objspace->profile.record[count].gc_invoke_time = gc_time - objspace->profile.invoke_time;\
190  }\
191  } while(0)
192 
193 #define GC_PROF_TIMER_STOP(marked) do {\
194  if (objspace->profile.run) {\
195  gc_time = getrusage_time() - gc_time;\
196  if (gc_time < 0) gc_time = 0;\
197  objspace->profile.record[count].gc_time = gc_time;\
198  objspace->profile.record[count].is_marked = !!(marked);\
199  GC_PROF_SET_HEAP_INFO(objspace->profile.record[count]);\
200  objspace->profile.count++;\
201  }\
202  } while(0)
203 
204 #if GC_PROFILE_MORE_DETAIL
205 #define INIT_GC_PROF_PARAMS double gc_time = 0, sweep_time = 0;\
206  size_t count = objspace->profile.count, total = 0, live = 0
207 
208 #define GC_PROF_MARK_TIMER_START double mark_time = 0;\
209  do {\
210  if (objspace->profile.run) {\
211  mark_time = getrusage_time();\
212  }\
213  } while(0)
214 
215 #define GC_PROF_MARK_TIMER_STOP do {\
216  if (objspace->profile.run) {\
217  mark_time = getrusage_time() - mark_time;\
218  if (mark_time < 0) mark_time = 0;\
219  objspace->profile.record[objspace->profile.count].gc_mark_time = mark_time;\
220  }\
221  } while(0)
222 
223 #define GC_PROF_SWEEP_TIMER_START do {\
224  if (objspace->profile.run) {\
225  sweep_time = getrusage_time();\
226  }\
227  } while(0)
228 
229 #define GC_PROF_SWEEP_TIMER_STOP do {\
230  if (objspace->profile.run) {\
231  sweep_time = getrusage_time() - sweep_time;\
232  if (sweep_time < 0) sweep_time = 0;\
233  objspace->profile.record[count].gc_sweep_time = sweep_time;\
234  }\
235  } while(0)
236 #define GC_PROF_SET_MALLOC_INFO do {\
237  if (objspace->profile.run) {\
238  gc_profile_record *record = &objspace->profile.record[objspace->profile.count];\
239  record->allocate_increase = malloc_increase;\
240  record->allocate_limit = malloc_limit; \
241  }\
242  } while(0)
243 #define GC_PROF_SET_HEAP_INFO(record) do {\
244  live = objspace->heap.live_num;\
245  total = heaps_used * HEAP_OBJ_LIMIT;\
246  (record).heap_use_slots = heaps_used;\
247  (record).heap_live_objects = live;\
248  (record).heap_free_objects = total - live;\
249  (record).heap_total_objects = total;\
250  (record).have_finalize = deferred_final_list ? Qtrue : Qfalse;\
251  (record).heap_use_size = live * sizeof(RVALUE);\
252  (record).heap_total_size = total * sizeof(RVALUE);\
253  } while(0)
254 #define GC_PROF_INC_LIVE_NUM objspace->heap.live_num++
255 #define GC_PROF_DEC_LIVE_NUM objspace->heap.live_num--
256 #else
257 #define INIT_GC_PROF_PARAMS double gc_time = 0;\
258  size_t count = objspace->profile.count, total = 0, live = 0
259 #define GC_PROF_MARK_TIMER_START
260 #define GC_PROF_MARK_TIMER_STOP
261 #define GC_PROF_SWEEP_TIMER_START
262 #define GC_PROF_SWEEP_TIMER_STOP
263 #define GC_PROF_SET_MALLOC_INFO
264 #define GC_PROF_SET_HEAP_INFO(record) do {\
265  live = objspace->heap.live_num;\
266  total = heaps_used * HEAP_OBJ_LIMIT;\
267  (record).heap_total_objects = total;\
268  (record).heap_use_size = live * sizeof(RVALUE);\
269  (record).heap_total_size = total * sizeof(RVALUE);\
270  } while(0)
271 #define GC_PROF_INC_LIVE_NUM
272 #define GC_PROF_DEC_LIVE_NUM
273 #endif
274 
275 
276 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
277 #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
278 #endif
279 
280 typedef struct RVALUE {
281  union {
282  struct {
283  VALUE flags; /* always 0 for freed obj */
284  struct RVALUE *next;
285  } free;
286  struct RBasic basic;
287  struct RObject object;
288  struct RClass klass;
289  struct RFloat flonum;
290  struct RString string;
291  struct RArray array;
292  struct RRegexp regexp;
293  struct RHash hash;
294  struct RData data;
296  struct RStruct rstruct;
297  struct RBignum bignum;
298  struct RFile file;
299  struct RNode node;
300  struct RMatch match;
303  } as;
304 #ifdef GC_DEBUG
305  const char *file;
306  int line;
307 #endif
308 } RVALUE;
309 
310 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
311 #pragma pack(pop)
312 #endif
313 
314 struct heaps_slot {
315  void *membase;
317  size_t limit;
318  struct heaps_slot *next;
319  struct heaps_slot *prev;
320 };
321 
325  struct heaps_slot *slot;
326 };
327 
328 struct gc_list {
330  struct gc_list *next;
331 };
332 
333 #define STACK_CHUNK_SIZE 500
334 
335 typedef struct stack_chunk {
337  struct stack_chunk *next;
338 } stack_chunk_t;
339 
340 typedef struct mark_stack {
343  size_t index;
344  size_t limit;
345  size_t cache_size;
347 } mark_stack_t;
348 
349 #define CALC_EXACT_MALLOC_SIZE 0
350 
351 typedef struct rb_objspace {
352  struct {
353  size_t limit;
354  size_t increase;
355 #if CALC_EXACT_MALLOC_SIZE
356  size_t allocated_size;
357  size_t allocations;
358 #endif
359  } malloc_params;
360  struct {
361  size_t increment;
362  struct heaps_slot *ptr;
365  size_t length;
366  size_t used;
370  size_t live_num;
371  size_t free_num;
372  size_t free_min;
373  size_t final_num;
374  size_t do_heap_free;
375  } heap;
376  struct {
377  int dont_gc;
381  } flags;
382  struct {
385  } final;
387  struct {
388  int run;
390  size_t count;
391  size_t size;
392  double invoke_time;
393  } profile;
395  size_t count;
397 } rb_objspace_t;
398 
399 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
400 #define rb_objspace (*GET_VM()->objspace)
401 #define ruby_initial_gc_stress initial_params.gc_stress
403 #else
405 int *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
406 #endif
407 #define malloc_limit objspace->malloc_params.limit
408 #define malloc_increase objspace->malloc_params.increase
409 #define heaps objspace->heap.ptr
410 #define heaps_length objspace->heap.length
411 #define heaps_used objspace->heap.used
412 #define freelist objspace->heap.freelist
413 #define lomem objspace->heap.range[0]
414 #define himem objspace->heap.range[1]
415 #define heaps_inc objspace->heap.increment
416 #define heaps_freed objspace->heap.freed
417 #define dont_gc objspace->flags.dont_gc
418 #define during_gc objspace->flags.during_gc
419 #define finalizing objspace->flags.finalizing
420 #define finalizer_table objspace->final.table
421 #define deferred_final_list objspace->final.deferred
422 #define global_List objspace->global_list
423 #define ruby_gc_stress objspace->gc_stress
424 #define initial_malloc_limit initial_params.initial_malloc_limit
425 #define initial_heap_min_slots initial_params.initial_heap_min_slots
426 #define initial_free_min initial_params.initial_free_min
427 
428 static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
429 
430 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
433 {
434  rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
435  memset(objspace, 0, sizeof(*objspace));
438 
439  return objspace;
440 }
441 #endif
442 
443 static void initial_expand_heap(rb_objspace_t *objspace);
444 static void init_mark_stack(mark_stack_t *stack);
445 
446 void
448 {
449  char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr;
450 
451  if (rb_safe_level() > 0) return;
452 
453  malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
454  if (malloc_limit_ptr != NULL) {
455  int malloc_limit_i = atoi(malloc_limit_ptr);
456  if (RTEST(ruby_verbose))
457  fprintf(stderr, "malloc_limit=%d (%d)\n",
458  malloc_limit_i, initial_malloc_limit);
459  if (malloc_limit_i > 0) {
460  initial_malloc_limit = malloc_limit_i;
461  }
462  }
463 
464  heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
465  if (heap_min_slots_ptr != NULL) {
466  int heap_min_slots_i = atoi(heap_min_slots_ptr);
467  if (RTEST(ruby_verbose))
468  fprintf(stderr, "heap_min_slots=%d (%d)\n",
469  heap_min_slots_i, initial_heap_min_slots);
470  if (heap_min_slots_i > 0) {
471  initial_heap_min_slots = heap_min_slots_i;
472  initial_expand_heap(&rb_objspace);
473  }
474  }
475 
476  free_min_ptr = getenv("RUBY_FREE_MIN");
477  if (free_min_ptr != NULL) {
478  int free_min_i = atoi(free_min_ptr);
479  if (RTEST(ruby_verbose))
480  fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
481  if (free_min_i > 0) {
482  initial_free_min = free_min_i;
483  }
484  }
485 }
486 
487 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
488 static void gc_sweep(rb_objspace_t *);
489 static void slot_sweep(rb_objspace_t *, struct heaps_slot *);
490 static void rest_sweep(rb_objspace_t *);
491 static void free_stack_chunks(mark_stack_t *);
492 
493 void
495 {
496  rest_sweep(objspace);
497  if (objspace->profile.record) {
498  free(objspace->profile.record);
499  objspace->profile.record = 0;
500  }
501  if (global_List) {
502  struct gc_list *list, *next;
503  for (list = global_List; list; list = next) {
504  next = list->next;
505  free(list);
506  }
507  }
508  if (objspace->heap.sorted) {
509  size_t i;
510  for (i = 0; i < heaps_used; ++i) {
511  free(objspace->heap.sorted[i].slot->membase);
512  free(objspace->heap.sorted[i].slot);
513  }
514  free(objspace->heap.sorted);
515  heaps_used = 0;
516  heaps = 0;
517  }
518  free_stack_chunks(&objspace->mark_stack);
519  free(objspace);
520 }
521 #endif
522 
523 /* tiny heap size */
524 /* 32KB */
525 /*#define HEAP_SIZE 0x8000 */
526 /* 128KB */
527 /*#define HEAP_SIZE 0x20000 */
528 /* 64KB */
529 /*#define HEAP_SIZE 0x10000 */
530 /* 16KB */
531 #define HEAP_SIZE 0x4000
532 /* 8KB */
533 /*#define HEAP_SIZE 0x2000 */
534 /* 4KB */
535 /*#define HEAP_SIZE 0x1000 */
536 /* 2KB */
537 /*#define HEAP_SIZE 0x800 */
538 
539 #define HEAP_OBJ_LIMIT (unsigned int)(HEAP_SIZE / sizeof(struct RVALUE))
540 
541 extern st_table *rb_class_tbl;
542 
544 
545 static void run_final(rb_objspace_t *objspace, VALUE obj);
546 static int garbage_collect(rb_objspace_t *objspace);
547 static int gc_lazy_sweep(rb_objspace_t *objspace);
548 
549 void
551 {
553 }
554 
555 static void *
556 ruby_memerror_body(void *dummy)
557 {
558  rb_memerror();
559  return 0;
560 }
561 
562 static void
564 {
565  if (ruby_thread_has_gvl_p()) {
566  rb_memerror();
567  }
568  else {
569  if (ruby_native_thread_p()) {
571  }
572  else {
573  /* no ruby thread */
574  fprintf(stderr, "[FATAL] failed to allocate memory\n");
575  exit(EXIT_FAILURE);
576  }
577  }
578 }
579 
580 void
582 {
583  rb_thread_t *th = GET_THREAD();
584  if (!nomem_error ||
586  fprintf(stderr, "[FATAL] failed to allocate memory\n");
587  exit(EXIT_FAILURE);
588  }
591  GET_THREAD()->errinfo = nomem_error;
593  }
596 }
597 
598 /*
599  * call-seq:
600  * GC.stress -> true or false
601  *
602  * returns current status of GC stress mode.
603  */
604 
605 static VALUE
607 {
608  rb_objspace_t *objspace = &rb_objspace;
609  return ruby_gc_stress ? Qtrue : Qfalse;
610 }
611 
612 /*
613  * call-seq:
614  * GC.stress = bool -> bool
615  *
616  * Updates the GC stress mode.
617  *
618  * When stress mode is enabled the GC is invoked at every GC opportunity:
619  * all memory and object allocations.
620  *
621  * Enabling stress mode makes Ruby very slow, it is only for debugging.
622  */
623 
624 static VALUE
626 {
627  rb_objspace_t *objspace = &rb_objspace;
628  rb_secure(2);
629  ruby_gc_stress = RTEST(flag);
630  return flag;
631 }
632 
633 /*
634  * call-seq:
635  * GC::Profiler.enable? -> true or false
636  *
637  * The current status of GC profile mode.
638  */
639 
640 static VALUE
642 {
643  rb_objspace_t *objspace = &rb_objspace;
644  return objspace->profile.run ? Qtrue : Qfalse;
645 }
646 
647 /*
648  * call-seq:
649  * GC::Profiler.enable -> nil
650  *
651  * Starts the GC profiler.
652  *
653  */
654 
655 static VALUE
657 {
658  rb_objspace_t *objspace = &rb_objspace;
659 
660  objspace->profile.run = TRUE;
661  return Qnil;
662 }
663 
664 /*
665  * call-seq:
666  * GC::Profiler.disable -> nil
667  *
668  * Stops the GC profiler.
669  *
670  */
671 
672 static VALUE
674 {
675  rb_objspace_t *objspace = &rb_objspace;
676 
677  objspace->profile.run = FALSE;
678  return Qnil;
679 }
680 
681 /*
682  * call-seq:
683  * GC::Profiler.clear -> nil
684  *
685  * Clears the GC profiler data.
686  *
687  */
688 
689 static VALUE
691 {
692  rb_objspace_t *objspace = &rb_objspace;
693  MEMZERO(objspace->profile.record, gc_profile_record, objspace->profile.size);
694  objspace->profile.count = 0;
695  return Qnil;
696 }
697 
698 static void *
700 {
701  rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
702  return 0; /* should not be reached */
703 }
704 
705 static void
707 {
708  if (ruby_thread_has_gvl_p()) {
709  rb_raise(rb_eNoMemError, "%s", msg);
710  }
711  else {
712  if (ruby_native_thread_p()) {
714  }
715  else {
716  fprintf(stderr, "[FATAL] %s\n", msg);
717  exit(EXIT_FAILURE);
718  }
719  }
720 }
721 
722 static void *
723 gc_with_gvl(void *ptr)
724 {
725  return (void *)(VALUE)garbage_collect((rb_objspace_t *)ptr);
726 }
727 
728 static int
730 {
731  if (dont_gc) return TRUE;
732  if (ruby_thread_has_gvl_p()) {
733  return garbage_collect(objspace);
734  }
735  else {
736  if (ruby_native_thread_p()) {
737  return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)objspace);
738  }
739  else {
740  /* no ruby thread */
741  fprintf(stderr, "[FATAL] failed to allocate memory\n");
742  exit(EXIT_FAILURE);
743  }
744  }
745 }
746 
747 static void vm_xfree(rb_objspace_t *objspace, void *ptr);
748 
749 static inline size_t
751 {
752  if ((ssize_t)size < 0) {
753  negative_size_allocation_error("negative allocation size (or too big)");
754  }
755  if (size == 0) size = 1;
756 
757 #if CALC_EXACT_MALLOC_SIZE
758  size += sizeof(size_t);
759 #endif
760 
763  garbage_collect_with_gvl(objspace);
764  }
765 
766  return size;
767 }
768 
769 static inline void *
770 vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
771 {
773 
774 #if CALC_EXACT_MALLOC_SIZE
775  objspace->malloc_params.allocated_size += size;
776  objspace->malloc_params.allocations++;
777  ((size_t *)mem)[0] = size;
778  mem = (size_t *)mem + 1;
779 #endif
780 
781  return mem;
782 }
783 
784 #define TRY_WITH_GC(alloc) do { \
785  if (!(alloc) && \
786  (!garbage_collect_with_gvl(objspace) || \
787  !(alloc))) { \
788  ruby_memerror(); \
789  } \
790  } while (0)
791 
792 static void *
793 vm_xmalloc(rb_objspace_t *objspace, size_t size)
794 {
795  void *mem;
796 
797  size = vm_malloc_prepare(objspace, size);
798  TRY_WITH_GC(mem = malloc(size));
799  return vm_malloc_fixup(objspace, mem, size);
800 }
801 
802 static void *
803 vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
804 {
805  void *mem;
806 
807  if ((ssize_t)size < 0) {
808  negative_size_allocation_error("negative re-allocation size");
809  }
810  if (!ptr) return vm_xmalloc(objspace, size);
811  if (size == 0) {
812  vm_xfree(objspace, ptr);
813  return 0;
814  }
816  garbage_collect_with_gvl(objspace);
817 
818 #if CALC_EXACT_MALLOC_SIZE
819  size += sizeof(size_t);
820  objspace->malloc_params.allocated_size -= size;
821  ptr = (size_t *)ptr - 1;
822 #endif
823 
824  mem = realloc(ptr, size);
825  if (!mem) {
826  if (garbage_collect_with_gvl(objspace)) {
827  mem = realloc(ptr, size);
828  }
829  if (!mem) {
830  ruby_memerror();
831  }
832  }
834 
835 #if CALC_EXACT_MALLOC_SIZE
836  objspace->malloc_params.allocated_size += size;
837  ((size_t *)mem)[0] = size;
838  mem = (size_t *)mem + 1;
839 #endif
840 
841  return mem;
842 }
843 
844 static void
845 vm_xfree(rb_objspace_t *objspace, void *ptr)
846 {
847 #if CALC_EXACT_MALLOC_SIZE
848  size_t size;
849  ptr = ((size_t *)ptr) - 1;
850  size = ((size_t*)ptr)[0];
851  objspace->malloc_params.allocated_size -= size;
852  objspace->malloc_params.allocations--;
853 #endif
854 
855  free(ptr);
856 }
857 
858 void *
860 {
861  return vm_xmalloc(&rb_objspace, size);
862 }
863 
864 static inline size_t
865 xmalloc2_size(size_t n, size_t size)
866 {
867  size_t len = size * n;
868  if (n != 0 && size != len / n) {
869  rb_raise(rb_eArgError, "malloc: possible integer overflow");
870  }
871  return len;
872 }
873 
874 void *
875 ruby_xmalloc2(size_t n, size_t size)
876 {
877  return vm_xmalloc(&rb_objspace, xmalloc2_size(n, size));
878 }
879 
880 static void *
881 vm_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
882 {
883  void *mem;
884  size_t size;
885 
886  size = xmalloc2_size(count, elsize);
887  size = vm_malloc_prepare(objspace, size);
888 
889  TRY_WITH_GC(mem = calloc(1, size));
890  return vm_malloc_fixup(objspace, mem, size);
891 }
892 
893 void *
894 ruby_xcalloc(size_t n, size_t size)
895 {
896  return vm_xcalloc(&rb_objspace, n, size);
897 }
898 
899 void *
900 ruby_xrealloc(void *ptr, size_t size)
901 {
902  return vm_xrealloc(&rb_objspace, ptr, size);
903 }
904 
905 void *
906 ruby_xrealloc2(void *ptr, size_t n, size_t size)
907 {
908  size_t len = size * n;
909  if (n != 0 && size != len / n) {
910  rb_raise(rb_eArgError, "realloc: possible integer overflow");
911  }
912  return ruby_xrealloc(ptr, len);
913 }
914 
915 void
916 ruby_xfree(void *x)
917 {
918  if (x)
919  vm_xfree(&rb_objspace, x);
920 }
921 
922 
923 /*
924  * call-seq:
925  * GC.enable -> true or false
926  *
927  * Enables garbage collection, returning <code>true</code> if garbage
928  * collection was previously disabled.
929  *
930  * GC.disable #=> false
931  * GC.enable #=> true
932  * GC.enable #=> false
933  *
934  */
935 
936 VALUE
938 {
939  rb_objspace_t *objspace = &rb_objspace;
940  int old = dont_gc;
941 
942  dont_gc = FALSE;
943  return old ? Qtrue : Qfalse;
944 }
945 
946 /*
947  * call-seq:
948  * GC.disable -> true or false
949  *
950  * Disables garbage collection, returning <code>true</code> if garbage
951  * collection was already disabled.
952  *
953  * GC.disable #=> false
954  * GC.disable #=> true
955  *
956  */
957 
958 VALUE
960 {
961  rb_objspace_t *objspace = &rb_objspace;
962  int old = dont_gc;
963 
964  dont_gc = TRUE;
965  return old ? Qtrue : Qfalse;
966 }
967 
969 
970 void
972 {
973  VALUE ary = GET_THREAD()->vm->mark_object_ary;
974  rb_ary_push(ary, obj);
975 }
976 
977 void
979 {
980  rb_objspace_t *objspace = &rb_objspace;
981  struct gc_list *tmp;
982 
983  tmp = ALLOC(struct gc_list);
984  tmp->next = global_List;
985  tmp->varptr = addr;
986  global_List = tmp;
987 }
988 
989 void
991 {
992  rb_objspace_t *objspace = &rb_objspace;
993  struct gc_list *tmp = global_List;
994 
995  if (tmp->varptr == addr) {
996  global_List = tmp->next;
997  xfree(tmp);
998  return;
999  }
1000  while (tmp->next) {
1001  if (tmp->next->varptr == addr) {
1002  struct gc_list *t = tmp->next;
1003 
1004  tmp->next = tmp->next->next;
1005  xfree(t);
1006  break;
1007  }
1008  tmp = tmp->next;
1009  }
1010 }
1011 
1012 
1013 static void
1014 allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
1015 {
1016  struct sorted_heaps_slot *p;
1017  size_t size;
1018 
1019  size = next_heaps_length*sizeof(struct sorted_heaps_slot);
1020 
1021  if (heaps_used > 0) {
1022  p = (struct sorted_heaps_slot *)realloc(objspace->heap.sorted, size);
1023  if (p) objspace->heap.sorted = p;
1024  }
1025  else {
1026  p = objspace->heap.sorted = (struct sorted_heaps_slot *)malloc(size);
1027  }
1028 
1029  if (p == 0) {
1030  during_gc = 0;
1031  rb_memerror();
1032  }
1033  heaps_length = next_heaps_length;
1034 }
1035 
1036 static void
1038 {
1039  RVALUE *p, *pend, *membase;
1040  struct heaps_slot *slot;
1041  size_t hi, lo, mid;
1042  size_t objs;
1043 
1044  objs = HEAP_OBJ_LIMIT;
1045  p = (RVALUE*)malloc(HEAP_SIZE);
1046  if (p == 0) {
1047  during_gc = 0;
1048  rb_memerror();
1049  }
1050  slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
1051  if (slot == 0) {
1052  xfree(p);
1053  during_gc = 0;
1054  rb_memerror();
1055  }
1056  MEMZERO((void*)slot, struct heaps_slot, 1);
1057 
1058  slot->next = heaps;
1059  if (heaps) heaps->prev = slot;
1060  heaps = slot;
1061 
1062  membase = p;
1063  if ((VALUE)p % sizeof(RVALUE) != 0) {
1064  p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
1065  if ((HEAP_SIZE - HEAP_OBJ_LIMIT * sizeof(RVALUE)) < (size_t)((char*)p - (char*)membase)) {
1066  objs--;
1067  }
1068  }
1069 
1070  lo = 0;
1071  hi = heaps_used;
1072  while (lo < hi) {
1073  register RVALUE *mid_membase;
1074  mid = (lo + hi) / 2;
1075  mid_membase = objspace->heap.sorted[mid].slot->membase;
1076  if (mid_membase < membase) {
1077  lo = mid + 1;
1078  }
1079  else if (mid_membase > membase) {
1080  hi = mid;
1081  }
1082  else {
1083  rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
1084  }
1085  }
1086  if (hi < heaps_used) {
1087  MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct sorted_heaps_slot, heaps_used - hi);
1088  }
1089  objspace->heap.sorted[hi].slot = slot;
1090  objspace->heap.sorted[hi].start = p;
1091  objspace->heap.sorted[hi].end = (p + objs);
1092  heaps->membase = membase;
1093  heaps->slot = p;
1094  heaps->limit = objs;
1095  objspace->heap.free_num += objs;
1096  pend = p + objs;
1097  if (lomem == 0 || lomem > p) lomem = p;
1098  if (himem < pend) himem = pend;
1099  heaps_used++;
1100 
1101  while (p < pend) {
1102  p->as.free.flags = 0;
1103  p->as.free.next = freelist;
1104  freelist = p;
1105  p++;
1106  }
1107 }
1108 
1109 static void
1111 {
1112  size_t i;
1113 
1114  if ((heaps_used + add) > heaps_length) {
1115  allocate_sorted_heaps(objspace, heaps_used + add);
1116  }
1117 
1118  for (i = 0; i < add; i++) {
1119  assign_heap_slot(objspace);
1120  }
1121  heaps_inc = 0;
1122 }
1123 
1124 static void
1126 {
1128  init_mark_stack(&objspace->mark_stack);
1129 #ifdef USE_SIGALTSTACK
1130  {
1131  /* altstack of another threads are allocated in another place */
1132  rb_thread_t *th = GET_THREAD();
1133  void *tmp = th->altstack;
1134  th->altstack = malloc(ALT_STACK_SIZE);
1135  free(tmp); /* free previously allocated area */
1136  }
1137 #endif
1138 
1139  objspace->profile.invoke_time = getrusage_time();
1141 }
1142 
1143 static void
1145 {
1146  size_t min_size = initial_heap_min_slots / HEAP_OBJ_LIMIT;
1147 
1148  if (min_size > heaps_used) {
1149  add_heap_slots(objspace, min_size - heaps_used);
1150  }
1151 }
1152 
1153 static void
1155 {
1156  size_t next_heaps_length = (size_t)(heaps_used * 1.8);
1157 
1158  if (next_heaps_length == heaps_used) {
1159  next_heaps_length++;
1160  }
1161 
1162  heaps_inc = next_heaps_length - heaps_used;
1163 
1164  if (next_heaps_length > heaps_length) {
1165  allocate_sorted_heaps(objspace, next_heaps_length);
1166  }
1167 }
1168 
1169 static int
1171 {
1172  if (heaps_inc > 0) {
1173  assign_heap_slot(objspace);
1174  heaps_inc--;
1175  return TRUE;
1176  }
1177  return FALSE;
1178 }
1179 
1180 int
1182 {
1183  rb_objspace_t *objspace = &rb_objspace;
1184  return during_gc;
1185 }
1186 
1187 #define RANY(o) ((RVALUE*)(o))
1188 
1189 VALUE
1191 {
1192  rb_objspace_t *objspace = &rb_objspace;
1193  VALUE obj;
1194 
1195  if (UNLIKELY(during_gc)) {
1196  dont_gc = 1;
1197  during_gc = 0;
1198  rb_bug("object allocation during garbage collection phase");
1199  }
1200 
1202  if (!garbage_collect(objspace)) {
1203  during_gc = 0;
1204  rb_memerror();
1205  }
1206  }
1207 
1208  if (UNLIKELY(!freelist)) {
1209  if (!gc_lazy_sweep(objspace)) {
1210  during_gc = 0;
1211  rb_memerror();
1212  }
1213  }
1214 
1215  obj = (VALUE)freelist;
1216  freelist = freelist->as.free.next;
1217 
1218  MEMZERO((void*)obj, RVALUE, 1);
1219 #ifdef GC_DEBUG
1220  RANY(obj)->file = rb_sourcefile();
1221  RANY(obj)->line = rb_sourceline();
1222 #endif
1224 
1225  return obj;
1226 }
1227 
1228 NODE*
1230 {
1231  NODE *n = (NODE*)rb_newobj();
1232 
1233  n->flags |= T_NODE;
1234  nd_set_type(n, type);
1235 
1236  n->u1.value = a0;
1237  n->u2.value = a1;
1238  n->u3.value = a2;
1239 
1240  return n;
1241 }
1242 
1243 VALUE
1244 rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1245 {
1246  NEWOBJ(data, struct RData);
1247  if (klass) Check_Type(klass, T_CLASS);
1248  OBJSETUP(data, klass, T_DATA);
1249  data->data = datap;
1250  data->dfree = dfree;
1251  data->dmark = dmark;
1252 
1253  return (VALUE)data;
1254 }
1255 
1256 VALUE
1258 {
1259  NEWOBJ(data, struct RTypedData);
1260 
1261  if (klass) Check_Type(klass, T_CLASS);
1262 
1263  OBJSETUP(data, klass, T_DATA);
1264 
1265  data->data = datap;
1266  data->typed_flag = 1;
1267  data->type = type;
1268 
1269  return (VALUE)data;
1270 }
1271 
1272 size_t
1274 {
1275  if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) {
1276  return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj));
1277  }
1278  else {
1279  return 0;
1280  }
1281 }
1282 
1283 const char *
1285 {
1286  if (RTYPEDDATA_P(obj)) {
1287  return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1288  }
1289  else {
1290  return 0;
1291  }
1292 }
1293 
1294 #ifdef __ia64
1295 #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
1296 #else
1297 #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
1298 #endif
1299 
1300 #define STACK_START (th->machine_stack_start)
1301 #define STACK_END (th->machine_stack_end)
1302 #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
1303 
1304 #if STACK_GROW_DIRECTION < 0
1305 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
1306 #elif STACK_GROW_DIRECTION > 0
1307 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
1308 #else
1309 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
1310  : (size_t)(STACK_END - STACK_START + 1))
1311 #endif
1312 #if !STACK_GROW_DIRECTION
1314 int
1316 {
1317  VALUE *end;
1318  SET_MACHINE_STACK_END(&end);
1319 
1320  if (end > addr) return ruby_stack_grow_direction = 1;
1321  return ruby_stack_grow_direction = -1;
1322 }
1323 #endif
1324 
1325 /* Marking stack */
1326 
1327 static void push_mark_stack(mark_stack_t *, VALUE);
1328 static int pop_mark_stack(mark_stack_t *, VALUE *);
1329 static void shrink_stack_chunk_cache(mark_stack_t *stack);
1330 
1331 static stack_chunk_t *
1333 {
1334  stack_chunk_t *res;
1335 
1336  res = malloc(sizeof(stack_chunk_t));
1337  if (!res)
1338  rb_memerror();
1339 
1340  return res;
1341 }
1342 
1343 static inline int
1345 {
1346  return stack->chunk == NULL;
1347 }
1348 
1349 static void
1351 {
1352  chunk->next = stack->cache;
1353  stack->cache = chunk;
1354  stack->cache_size++;
1355 }
1356 
1357 static void
1359 {
1360  stack_chunk_t *chunk;
1361 
1362  if (stack->unused_cache_size > (stack->cache_size/2)) {
1363  chunk = stack->cache;
1364  stack->cache = stack->cache->next;
1365  stack->cache_size--;
1366  free(chunk);
1367  }
1368  stack->unused_cache_size = stack->cache_size;
1369 }
1370 
1371 static void
1373 {
1375 
1376  if (stack->cache_size > 0) {
1377  next = stack->cache;
1378  stack->cache = stack->cache->next;
1379  stack->cache_size--;
1380  if (stack->unused_cache_size > stack->cache_size)
1381  stack->unused_cache_size = stack->cache_size;
1382  }
1383  else {
1384  next = stack_chunk_alloc();
1385  }
1386  next->next = stack->chunk;
1387  stack->chunk = next;
1388  stack->index = 0;
1389 }
1390 
1391 static void
1393 {
1395 
1396  prev = stack->chunk->next;
1397  add_stack_chunk_cache(stack, stack->chunk);
1398  stack->chunk = prev;
1399  stack->index = stack->limit;
1400 }
1401 
1402 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
1403 static void
1405 {
1406  stack_chunk_t *chunk = stack->chunk;
1407  stack_chunk_t *next = NULL;
1408 
1409  while (chunk != NULL) {
1410  next = chunk->next;
1411  free(chunk);
1412  chunk = next;
1413  }
1414 }
1415 #endif
1416 
1417 static void
1419 {
1420  if (stack->index == stack->limit) {
1421  push_mark_stack_chunk(stack);
1422  }
1423  stack->chunk->data[stack->index++] = data;
1424 }
1425 
1426 static int
1428 {
1429  if (is_mark_stask_empty(stack)) {
1430  return FALSE;
1431  }
1432  if (stack->index == 1) {
1433  *data = stack->chunk->data[--stack->index];
1434  pop_mark_stack_chunk(stack);
1435  return TRUE;
1436  }
1437  *data = stack->chunk->data[--stack->index];
1438  return TRUE;
1439 }
1440 
1441 static void
1443 {
1444  int i;
1445 
1446  push_mark_stack_chunk(stack);
1447  stack->limit = STACK_CHUNK_SIZE;
1448 
1449  for(i=0; i < 4; i++) {
1451  }
1452  stack->unused_cache_size = stack->cache_size;
1453 }
1454 
1455 
1456 size_t
1458 {
1459  rb_thread_t *th = GET_THREAD();
1460  SET_STACK_END;
1461  if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
1462  return STACK_LENGTH;
1463 }
1464 
1465 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
1466 static int
1467 stack_check(int water_mark)
1468 {
1469  int ret;
1470  rb_thread_t *th = GET_THREAD();
1471  SET_STACK_END;
1472  ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark;
1473 #ifdef __ia64
1474  if (!ret) {
1475  ret = (VALUE*)rb_ia64_bsp() - th->machine_register_stack_start >
1476  th->machine_register_stack_maxsize/sizeof(VALUE) - water_mark;
1477  }
1478 #endif
1479  return ret;
1480 }
1481 #endif
1482 
1483 #define STACKFRAME_FOR_CALL_CFUNC 512
1484 
1485 int
1487 {
1488 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
1489  return 0;
1490 #else
1492 #endif
1493 }
1494 
1495 #define MARK_STACK_EMPTY (mark_stack_ptr == mark_stack)
1496 
1497 static void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1498 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
1499 
1500 static void
1502 {
1503  mark_stack_t *mstack = &objspace->mark_stack;
1504  VALUE obj = 0;
1505 
1506  if (!mstack->index) return;
1507  while (pop_mark_stack(mstack, &obj)) {
1508  gc_mark_children(objspace, obj);
1509  }
1510  shrink_stack_chunk_cache(mstack);
1511 }
1512 
1513 static inline int
1514 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
1515 {
1516  register RVALUE *p = RANY(ptr);
1517  register struct sorted_heaps_slot *heap;
1518  register size_t hi, lo, mid;
1519 
1520  if (p < lomem || p > himem) return FALSE;
1521  if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
1522 
1523  /* check if p looks like a pointer using bsearch*/
1524  lo = 0;
1525  hi = heaps_used;
1526  while (lo < hi) {
1527  mid = (lo + hi) / 2;
1528  heap = &objspace->heap.sorted[mid];
1529  if (heap->start <= p) {
1530  if (p < heap->end)
1531  return TRUE;
1532  lo = mid + 1;
1533  }
1534  else {
1535  hi = mid;
1536  }
1537  }
1538  return FALSE;
1539 }
1540 
1541 static void
1542 mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
1543 {
1544  VALUE v;
1545  while (n--) {
1546  v = *x;
1547  VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v));
1548  if (is_pointer_to_heap(objspace, (void *)v)) {
1549  gc_mark(objspace, v);
1550  }
1551  x++;
1552  }
1553 }
1554 
1555 static void
1557 {
1558  long n;
1559 
1560  if (end <= start) return;
1561  n = end - start;
1562  mark_locations_array(objspace, start, n);
1563 }
1564 
1565 void
1567 {
1568  gc_mark_locations(&rb_objspace, start, end);
1569 }
1570 
1571 #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end))
1572 
1575 };
1576 
1577 static int
1579 {
1580  struct mark_tbl_arg *arg = (void*)data;
1581  gc_mark(arg->objspace, value);
1582  return ST_CONTINUE;
1583 }
1584 
1585 static void
1587 {
1588  struct mark_tbl_arg arg;
1589  if (!tbl || tbl->num_entries == 0) return;
1590  arg.objspace = objspace;
1591  st_foreach(tbl, mark_entry, (st_data_t)&arg);
1592 }
1593 
1594 static int
1596 {
1597  struct mark_tbl_arg *arg = (void*)data;
1598  gc_mark(arg->objspace, key);
1599  return ST_CONTINUE;
1600 }
1601 
1602 static void
1604 {
1605  struct mark_tbl_arg arg;
1606  if (!tbl) return;
1607  arg.objspace = objspace;
1608  st_foreach(tbl, mark_key, (st_data_t)&arg);
1609 }
1610 
1611 void
1613 {
1614  mark_set(&rb_objspace, tbl);
1615 }
1616 
1617 static int
1619 {
1620  struct mark_tbl_arg *arg = (void*)data;
1621  gc_mark(arg->objspace, key);
1622  gc_mark(arg->objspace, value);
1623  return ST_CONTINUE;
1624 }
1625 
1626 static void
1628 {
1629  struct mark_tbl_arg arg;
1630  if (!tbl) return;
1631  arg.objspace = objspace;
1632  st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
1633 }
1634 
1635 void
1637 {
1638  mark_hash(&rb_objspace, tbl);
1639 }
1640 
1641 static void
1643 {
1644  const rb_method_definition_t *def = me->def;
1645 
1646  gc_mark(objspace, me->klass);
1647  if (!def) return;
1648  switch (def->type) {
1649  case VM_METHOD_TYPE_ISEQ:
1650  gc_mark(objspace, def->body.iseq->self);
1651  break;
1653  gc_mark(objspace, def->body.proc);
1654  break;
1656  case VM_METHOD_TYPE_IVAR:
1657  gc_mark(objspace, def->body.attr.location);
1658  break;
1659  default:
1660  break; /* ignore */
1661  }
1662 }
1663 
1664 void
1666 {
1667  mark_method_entry(&rb_objspace, me);
1668 }
1669 
1670 static int
1672 {
1673  struct mark_tbl_arg *arg = (void*)data;
1674  mark_method_entry(arg->objspace, me);
1675  return ST_CONTINUE;
1676 }
1677 
1678 static void
1680 {
1681  struct mark_tbl_arg arg;
1682  if (!tbl) return;
1683  arg.objspace = objspace;
1685 }
1686 
1687 static int
1689 {
1690  if (!me->mark) {
1692  }
1693  return ST_CONTINUE;
1694 }
1695 
1696 void
1698 {
1700  st_free_table(tbl);
1701 }
1702 
1703 static int
1705 {
1706  struct mark_tbl_arg *arg = (void*)data;
1707  gc_mark(arg->objspace, ce->value);
1708  return ST_CONTINUE;
1709 }
1710 
1711 static void
1713 {
1714  struct mark_tbl_arg arg;
1715  if (!tbl) return;
1716  arg.objspace = objspace;
1718 }
1719 
1720 static int
1722 {
1723  xfree(ce);
1724  return ST_CONTINUE;
1725 }
1726 
1727 void
1729 {
1730  st_foreach(tbl, free_const_entry_i, 0);
1731  st_free_table(tbl);
1732 }
1733 
1734 void
1736 {
1737  mark_tbl(&rb_objspace, tbl);
1738 }
1739 
1740 void
1742 {
1743  if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
1744  gc_mark(&rb_objspace, obj);
1745  }
1746 }
1747 
1748 static void
1750 {
1751  register RVALUE *obj;
1752 
1753  obj = RANY(ptr);
1754  if (rb_special_const_p(ptr)) return; /* special const not marked */
1755  if (obj->as.basic.flags == 0) return; /* free cell */
1756  if (obj->as.basic.flags & FL_MARK) return; /* already marked */
1757  obj->as.basic.flags |= FL_MARK;
1758  objspace->heap.live_num++;
1759 
1760  push_mark_stack(&objspace->mark_stack, ptr);
1761 }
1762 
1763 void
1765 {
1766  gc_mark(&rb_objspace, ptr);
1767 }
1768 
1769 static void
1771 {
1772  register RVALUE *obj = RANY(ptr);
1773 
1774  goto marking; /* skip */
1775 
1776  again:
1777  obj = RANY(ptr);
1778  if (rb_special_const_p(ptr)) return; /* special const not marked */
1779  if (obj->as.basic.flags == 0) return; /* free cell */
1780  if (obj->as.basic.flags & FL_MARK) return; /* already marked */
1781  obj->as.basic.flags |= FL_MARK;
1782  objspace->heap.live_num++;
1783 
1784  marking:
1785  if (FL_TEST(obj, FL_EXIVAR)) {
1786  rb_mark_generic_ivar(ptr);
1787  }
1788 
1789  switch (BUILTIN_TYPE(obj)) {
1790  case T_NIL:
1791  case T_FIXNUM:
1792  rb_bug("rb_gc_mark() called for broken object");
1793  break;
1794 
1795  case T_NODE:
1796  switch (nd_type(obj)) {
1797  case NODE_IF: /* 1,2,3 */
1798  case NODE_FOR:
1799  case NODE_ITER:
1800  case NODE_WHEN:
1801  case NODE_MASGN:
1802  case NODE_RESCUE:
1803  case NODE_RESBODY:
1804  case NODE_CLASS:
1805  case NODE_BLOCK_PASS:
1806  gc_mark(objspace, (VALUE)obj->as.node.u2.node);
1807  /* fall through */
1808  case NODE_BLOCK: /* 1,3 */
1809  case NODE_OPTBLOCK:
1810  case NODE_ARRAY:
1811  case NODE_DSTR:
1812  case NODE_DXSTR:
1813  case NODE_DREGX:
1814  case NODE_DREGX_ONCE:
1815  case NODE_ENSURE:
1816  case NODE_CALL:
1817  case NODE_DEFS:
1818  case NODE_OP_ASGN1:
1819  case NODE_ARGS:
1820  gc_mark(objspace, (VALUE)obj->as.node.u1.node);
1821  /* fall through */
1822  case NODE_SUPER: /* 3 */
1823  case NODE_FCALL:
1824  case NODE_DEFN:
1825  case NODE_ARGS_AUX:
1826  ptr = (VALUE)obj->as.node.u3.node;
1827  goto again;
1828 
1829  case NODE_WHILE: /* 1,2 */
1830  case NODE_UNTIL:
1831  case NODE_AND:
1832  case NODE_OR:
1833  case NODE_CASE:
1834  case NODE_SCLASS:
1835  case NODE_DOT2:
1836  case NODE_DOT3:
1837  case NODE_FLIP2:
1838  case NODE_FLIP3:
1839  case NODE_MATCH2:
1840  case NODE_MATCH3:
1841  case NODE_OP_ASGN_OR:
1842  case NODE_OP_ASGN_AND:
1843  case NODE_MODULE:
1844  case NODE_ALIAS:
1845  case NODE_VALIAS:
1846  case NODE_ARGSCAT:
1847  gc_mark(objspace, (VALUE)obj->as.node.u1.node);
1848  /* fall through */
1849  case NODE_GASGN: /* 2 */
1850  case NODE_LASGN:
1851  case NODE_DASGN:
1852  case NODE_DASGN_CURR:
1853  case NODE_IASGN:
1854  case NODE_IASGN2:
1855  case NODE_CVASGN:
1856  case NODE_COLON3:
1857  case NODE_OPT_N:
1858  case NODE_EVSTR:
1859  case NODE_UNDEF:
1860  case NODE_POSTEXE:
1861  ptr = (VALUE)obj->as.node.u2.node;
1862  goto again;
1863 
1864  case NODE_HASH: /* 1 */
1865  case NODE_LIT:
1866  case NODE_STR:
1867  case NODE_XSTR:
1868  case NODE_DEFINED:
1869  case NODE_MATCH:
1870  case NODE_RETURN:
1871  case NODE_BREAK:
1872  case NODE_NEXT:
1873  case NODE_YIELD:
1874  case NODE_COLON2:
1875  case NODE_SPLAT:
1876  case NODE_TO_ARY:
1877  ptr = (VALUE)obj->as.node.u1.node;
1878  goto again;
1879 
1880  case NODE_SCOPE: /* 2,3 */
1881  case NODE_CDECL:
1882  case NODE_OPT_ARG:
1883  gc_mark(objspace, (VALUE)obj->as.node.u3.node);
1884  ptr = (VALUE)obj->as.node.u2.node;
1885  goto again;
1886 
1887  case NODE_ZARRAY: /* - */
1888  case NODE_ZSUPER:
1889  case NODE_VCALL:
1890  case NODE_GVAR:
1891  case NODE_LVAR:
1892  case NODE_DVAR:
1893  case NODE_IVAR:
1894  case NODE_CVAR:
1895  case NODE_NTH_REF:
1896  case NODE_BACK_REF:
1897  case NODE_REDO:
1898  case NODE_RETRY:
1899  case NODE_SELF:
1900  case NODE_NIL:
1901  case NODE_TRUE:
1902  case NODE_FALSE:
1903  case NODE_ERRINFO:
1904  case NODE_BLOCK_ARG:
1905  break;
1906  case NODE_ALLOCA:
1907  mark_locations_array(objspace,
1908  (VALUE*)obj->as.node.u1.value,
1909  obj->as.node.u3.cnt);
1910  ptr = (VALUE)obj->as.node.u2.node;
1911  goto again;
1912 
1913  default: /* unlisted NODE */
1914  if (is_pointer_to_heap(objspace, obj->as.node.u1.node)) {
1915  gc_mark(objspace, (VALUE)obj->as.node.u1.node);
1916  }
1917  if (is_pointer_to_heap(objspace, obj->as.node.u2.node)) {
1918  gc_mark(objspace, (VALUE)obj->as.node.u2.node);
1919  }
1920  if (is_pointer_to_heap(objspace, obj->as.node.u3.node)) {
1921  gc_mark(objspace, (VALUE)obj->as.node.u3.node);
1922  }
1923  }
1924  return; /* no need to mark class. */
1925  }
1926 
1927  gc_mark(objspace, obj->as.basic.klass);
1928  switch (BUILTIN_TYPE(obj)) {
1929  case T_ICLASS:
1930  case T_CLASS:
1931  case T_MODULE:
1932  mark_m_tbl(objspace, RCLASS_M_TBL(obj));
1933  mark_tbl(objspace, RCLASS_IV_TBL(obj));
1934  mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
1935  ptr = RCLASS_SUPER(obj);
1936  goto again;
1937 
1938  case T_ARRAY:
1939  if (FL_TEST(obj, ELTS_SHARED)) {
1940  ptr = obj->as.array.as.heap.aux.shared;
1941  goto again;
1942  }
1943  else {
1944  long i, len = RARRAY_LEN(obj);
1945  VALUE *ptr = RARRAY_PTR(obj);
1946  for (i=0; i < len; i++) {
1947  gc_mark(objspace, *ptr++);
1948  }
1949  }
1950  break;
1951 
1952  case T_HASH:
1953  mark_hash(objspace, obj->as.hash.ntbl);
1954  ptr = obj->as.hash.ifnone;
1955  goto again;
1956 
1957  case T_STRING:
1958 #define STR_ASSOC FL_USER3 /* copied from string.c */
1959  if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
1960  ptr = obj->as.string.as.heap.aux.shared;
1961  goto again;
1962  }
1963  break;
1964 
1965  case T_DATA:
1966  if (RTYPEDDATA_P(obj)) {
1967  RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark;
1968  if (mark_func) (*mark_func)(DATA_PTR(obj));
1969  }
1970  else {
1971  if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
1972  }
1973  break;
1974 
1975  case T_OBJECT:
1976  {
1977  long i, len = ROBJECT_NUMIV(obj);
1978  VALUE *ptr = ROBJECT_IVPTR(obj);
1979  for (i = 0; i < len; i++) {
1980  gc_mark(objspace, *ptr++);
1981  }
1982  }
1983  break;
1984 
1985  case T_FILE:
1986  if (obj->as.file.fptr) {
1987  gc_mark(objspace, obj->as.file.fptr->pathv);
1988  gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing);
1989  gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat);
1990  gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts);
1991  gc_mark(objspace, obj->as.file.fptr->encs.ecopts);
1992  gc_mark(objspace, obj->as.file.fptr->write_lock);
1993  }
1994  break;
1995 
1996  case T_REGEXP:
1997  ptr = obj->as.regexp.src;
1998  goto again;
1999 
2000  case T_FLOAT:
2001  case T_BIGNUM:
2002  case T_ZOMBIE:
2003  break;
2004 
2005  case T_MATCH:
2006  gc_mark(objspace, obj->as.match.regexp);
2007  if (obj->as.match.str) {
2008  ptr = obj->as.match.str;
2009  goto again;
2010  }
2011  break;
2012 
2013  case T_RATIONAL:
2014  gc_mark(objspace, obj->as.rational.num);
2015  ptr = obj->as.rational.den;
2016  goto again;
2017 
2018  case T_COMPLEX:
2019  gc_mark(objspace, obj->as.complex.real);
2020  ptr = obj->as.complex.imag;
2021  goto again;
2022 
2023  case T_STRUCT:
2024  {
2025  long len = RSTRUCT_LEN(obj);
2026  VALUE *ptr = RSTRUCT_PTR(obj);
2027 
2028  while (len--) {
2029  gc_mark(objspace, *ptr++);
2030  }
2031  }
2032  break;
2033 
2034  default:
2035  rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
2036  BUILTIN_TYPE(obj), (void *)obj,
2037  is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
2038  }
2039 }
2040 
2041 static int obj_free(rb_objspace_t *, VALUE);
2042 
2043 static inline void
2045 {
2046  VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
2047  p->as.free.flags = 0;
2048  p->as.free.next = freelist;
2049  freelist = p;
2050 }
2051 
2052 static void
2054 {
2055  while (p) {
2056  RVALUE *tmp = p->as.free.next;
2057  run_final(objspace, (VALUE)p);
2058  if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
2059  if (objspace->heap.sweep_slots) {
2060  p->as.free.flags = 0;
2061  }
2062  else {
2064  add_freelist(objspace, p);
2065  }
2066  }
2067  else {
2068  struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
2069  slot->limit--;
2070  }
2071  p = tmp;
2072  }
2073 }
2074 
2075 static void
2077 {
2078  if (slot->prev)
2079  slot->prev->next = slot->next;
2080  if (slot->next)
2081  slot->next->prev = slot->prev;
2082  if (heaps == slot)
2083  heaps = slot->next;
2084  if (objspace->heap.sweep_slots == slot)
2085  objspace->heap.sweep_slots = slot->next;
2086  slot->prev = NULL;
2087  slot->next = NULL;
2088 }
2089 
2090 
2091 static void
2093 {
2094  size_t i, j;
2095  RVALUE *last = 0;
2096 
2097  for (i = j = 1; j < heaps_used; i++) {
2098  if (objspace->heap.sorted[i].slot->limit == 0) {
2099  if (!last) {
2100  last = objspace->heap.sorted[i].slot->membase;
2101  }
2102  else {
2103  free(objspace->heap.sorted[i].slot->membase);
2104  }
2105  free(objspace->heap.sorted[i].slot);
2106  heaps_used--;
2107  }
2108  else {
2109  if (i != j) {
2110  objspace->heap.sorted[j] = objspace->heap.sorted[i];
2111  }
2112  j++;
2113  }
2114  }
2115  if (last) {
2116  if (last < heaps_freed) {
2117  free(heaps_freed);
2118  heaps_freed = last;
2119  }
2120  else {
2121  free(last);
2122  }
2123  }
2124 }
2125 
2126 static void
2127 slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
2128 {
2129  size_t free_num = 0, final_num = 0;
2130  RVALUE *p, *pend;
2131  RVALUE *free = freelist, *final = deferred_final_list;
2132  int deferred;
2133 
2134  p = sweep_slot->slot; pend = p + sweep_slot->limit;
2135  while (p < pend) {
2136  if (!(p->as.basic.flags & FL_MARK)) {
2137  if (p->as.basic.flags &&
2138  ((deferred = obj_free(objspace, (VALUE)p)) ||
2139  (FL_TEST(p, FL_FINALIZE)))) {
2140  if (!deferred) {
2141  p->as.free.flags = T_ZOMBIE;
2142  RDATA(p)->dfree = 0;
2143  }
2144  p->as.free.flags |= FL_MARK;
2145  p->as.free.next = deferred_final_list;
2147  final_num++;
2148  }
2149  else {
2150  add_freelist(objspace, p);
2151  free_num++;
2152  }
2153  }
2154  else if (BUILTIN_TYPE(p) == T_ZOMBIE) {
2155  /* objects to be finalized */
2156  /* do nothing remain marked */
2157  }
2158  else {
2159  RBASIC(p)->flags &= ~FL_MARK;
2160  }
2161  p++;
2162  }
2163  if (final_num + free_num == sweep_slot->limit &&
2164  objspace->heap.free_num > objspace->heap.do_heap_free) {
2165  RVALUE *pp;
2166 
2167  for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
2168  RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot;
2169  pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
2170  }
2171  sweep_slot->limit = final_num;
2172  freelist = free; /* cancel this page from freelist */
2173  unlink_heap_slot(objspace, sweep_slot);
2174  }
2175  else {
2176  objspace->heap.free_num += free_num;
2177  }
2178  objspace->heap.final_num += final_num;
2179 
2180  if (deferred_final_list && !finalizing) {
2181  rb_thread_t *th = GET_THREAD();
2182  if (th) {
2184  }
2185  }
2186 }
2187 
2188 static int
2190 {
2191  if (dont_gc || during_gc) {
2192  if (!freelist) {
2193  if (!heaps_increment(objspace)) {
2194  set_heaps_increment(objspace);
2195  heaps_increment(objspace);
2196  }
2197  }
2198  return FALSE;
2199  }
2200  return TRUE;
2201 }
2202 
2203 static void
2205 {
2206  freelist = 0;
2207  objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
2208  objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.2);
2209  if (objspace->heap.free_min < initial_free_min) {
2211  objspace->heap.free_min = initial_free_min;
2212  }
2213  objspace->heap.sweep_slots = heaps;
2214  objspace->heap.free_num = 0;
2215 
2216  /* sweep unlinked method entries */
2217  if (GET_VM()->unlinked_method_entry_list) {
2219  }
2220 }
2221 
2222 static void
2224 {
2226 
2227  if (objspace->heap.free_num < objspace->heap.free_min) {
2228  set_heaps_increment(objspace);
2229  heaps_increment(objspace);
2230  }
2231 
2232  if (malloc_increase > malloc_limit) {
2233  malloc_limit += (size_t)((malloc_increase - malloc_limit) * (double)objspace->heap.live_num / (heaps_used * HEAP_OBJ_LIMIT));
2235  }
2236  malloc_increase = 0;
2237 
2238  free_unused_heaps(objspace);
2239 }
2240 
2241 static int
2243 {
2244  struct heaps_slot *next;
2245 
2246  heaps_increment(objspace);
2247  while (objspace->heap.sweep_slots) {
2248  next = objspace->heap.sweep_slots->next;
2249  slot_sweep(objspace, objspace->heap.sweep_slots);
2250  objspace->heap.sweep_slots = next;
2251  if (freelist) {
2252  during_gc = 0;
2253  return TRUE;
2254  }
2255  }
2256  return FALSE;
2257 }
2258 
2259 static void
2261 {
2262  if (objspace->heap.sweep_slots) {
2263  while (objspace->heap.sweep_slots) {
2264  lazy_sweep(objspace);
2265  }
2266  after_gc_sweep(objspace);
2267  }
2268 }
2269 
2270 static void gc_marks(rb_objspace_t *objspace);
2271 
2272 static int
2274 {
2275  int res;
2277 
2278  if (objspace->flags.dont_lazy_sweep)
2279  return garbage_collect(objspace);
2280 
2281 
2282  if (!ready_to_gc(objspace)) return TRUE;
2283 
2284  during_gc++;
2287 
2288  if (objspace->heap.sweep_slots) {
2289  res = lazy_sweep(objspace);
2290  if (res) {
2294  return res;
2295  }
2296  after_gc_sweep(objspace);
2297  }
2298  else {
2299  if (heaps_increment(objspace)) {
2300  during_gc = 0;
2301  return TRUE;
2302  }
2303  }
2304 
2305  gc_marks(objspace);
2306 
2307  before_gc_sweep(objspace);
2308  if (objspace->heap.free_min > (heaps_used * HEAP_OBJ_LIMIT - objspace->heap.live_num)) {
2309  set_heaps_increment(objspace);
2310  }
2311 
2313  if(!(res = lazy_sweep(objspace))) {
2314  after_gc_sweep(objspace);
2315  if(freelist) {
2316  res = TRUE;
2317  during_gc = 0;
2318  }
2319  }
2321 
2323  return res;
2324 }
2325 
2326 static void
2328 {
2329  struct heaps_slot *next;
2330 
2331  before_gc_sweep(objspace);
2332 
2333  while (objspace->heap.sweep_slots) {
2334  next = objspace->heap.sweep_slots->next;
2335  slot_sweep(objspace, objspace->heap.sweep_slots);
2336  objspace->heap.sweep_slots = next;
2337  }
2338 
2339  after_gc_sweep(objspace);
2340 
2341  during_gc = 0;
2342 }
2343 
2344 void
2346 {
2347  rb_objspace_t *objspace = &rb_objspace;
2349  if (RBASIC(p)->flags & FL_MARK) {
2350  RANY(p)->as.free.flags = 0;
2351  }
2352  else {
2353  add_freelist(objspace, (RVALUE *)p);
2354  }
2355 }
2356 
2357 static inline void
2359 {
2360  p->as.basic.flags = (p->as.basic.flags & ~T_MASK) | T_ZOMBIE;
2361 }
2362 
2363 static inline void
2365 {
2366  rb_io_t *fptr = p->as.file.fptr;
2367  make_deferred(p);
2368  p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
2369  p->as.data.data = fptr;
2370 }
2371 
2372 static int
2374 {
2375  switch (BUILTIN_TYPE(obj)) {
2376  case T_NIL:
2377  case T_FIXNUM:
2378  case T_TRUE:
2379  case T_FALSE:
2380  rb_bug("obj_free() called for broken object");
2381  break;
2382  }
2383 
2384  if (FL_TEST(obj, FL_EXIVAR)) {
2386  FL_UNSET(obj, FL_EXIVAR);
2387  }
2388 
2389  switch (BUILTIN_TYPE(obj)) {
2390  case T_OBJECT:
2391  if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
2392  RANY(obj)->as.object.as.heap.ivptr) {
2393  xfree(RANY(obj)->as.object.as.heap.ivptr);
2394  }
2395  break;
2396  case T_MODULE:
2397  case T_CLASS:
2400  if (RCLASS_IV_TBL(obj)) {
2402  }
2403  if (RCLASS_CONST_TBL(obj)) {
2405  }
2406  if (RCLASS_IV_INDEX_TBL(obj)) {
2408  }
2409  xfree(RANY(obj)->as.klass.ptr);
2410  break;
2411  case T_STRING:
2412  rb_str_free(obj);
2413  break;
2414  case T_ARRAY:
2415  rb_ary_free(obj);
2416  break;
2417  case T_HASH:
2418  if (RANY(obj)->as.hash.ntbl) {
2419  st_free_table(RANY(obj)->as.hash.ntbl);
2420  }
2421  break;
2422  case T_REGEXP:
2423  if (RANY(obj)->as.regexp.ptr) {
2424  onig_free(RANY(obj)->as.regexp.ptr);
2425  }
2426  break;
2427  case T_DATA:
2428  if (DATA_PTR(obj)) {
2429  if (RTYPEDDATA_P(obj)) {
2430  RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree;
2431  }
2432  if (RANY(obj)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
2433  xfree(DATA_PTR(obj));
2434  }
2435  else if (RANY(obj)->as.data.dfree) {
2436  make_deferred(RANY(obj));
2437  return 1;
2438  }
2439  }
2440  break;
2441  case T_MATCH:
2442  if (RANY(obj)->as.match.rmatch) {
2443  struct rmatch *rm = RANY(obj)->as.match.rmatch;
2444  onig_region_free(&rm->regs, 0);
2445  if (rm->char_offset)
2446  xfree(rm->char_offset);
2447  xfree(rm);
2448  }
2449  break;
2450  case T_FILE:
2451  if (RANY(obj)->as.file.fptr) {
2452  make_io_deferred(RANY(obj));
2453  return 1;
2454  }
2455  break;
2456  case T_RATIONAL:
2457  case T_COMPLEX:
2458  break;
2459  case T_ICLASS:
2460  /* iClass shares table with the module */
2461  xfree(RANY(obj)->as.klass.ptr);
2462  break;
2463 
2464  case T_FLOAT:
2465  break;
2466 
2467  case T_BIGNUM:
2468  if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
2469  xfree(RBIGNUM_DIGITS(obj));
2470  }
2471  break;
2472  case T_NODE:
2473  switch (nd_type(obj)) {
2474  case NODE_SCOPE:
2475  if (RANY(obj)->as.node.u1.tbl) {
2476  xfree(RANY(obj)->as.node.u1.tbl);
2477  }
2478  break;
2479  case NODE_ALLOCA:
2480  xfree(RANY(obj)->as.node.u1.node);
2481  break;
2482  }
2483  break; /* no need to free iv_tbl */
2484 
2485  case T_STRUCT:
2486  if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
2487  RANY(obj)->as.rstruct.as.heap.ptr) {
2488  xfree(RANY(obj)->as.rstruct.as.heap.ptr);
2489  }
2490  break;
2491 
2492  default:
2493  rb_bug("gc_sweep(): unknown data type 0x%x(%p)",
2494  BUILTIN_TYPE(obj), (void*)obj);
2495  }
2496 
2497  return 0;
2498 }
2499 
2500 #define GC_NOTIFY 0
2501 
2502 #if STACK_GROW_DIRECTION < 0
2503 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2504 #elif STACK_GROW_DIRECTION > 0
2505 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2506 #else
2507 #define GET_STACK_BOUNDS(start, end, appendix) \
2508  ((STACK_END < STACK_START) ? \
2509  ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2510 #endif
2511 
2512 #define numberof(array) (int)(sizeof(array) / sizeof((array)[0]))
2513 
2514 static void
2516 {
2517  union {
2518  rb_jmp_buf j;
2519  VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
2520  } save_regs_gc_mark;
2521  VALUE *stack_start, *stack_end;
2522 
2524  /* This assumes that all registers are saved into the jmp_buf (and stack) */
2525  rb_setjmp(save_regs_gc_mark.j);
2526 
2527  SET_STACK_END;
2528  GET_STACK_BOUNDS(stack_start, stack_end, 1);
2529 
2530  mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
2531 
2532  rb_gc_mark_locations(stack_start, stack_end);
2533 #ifdef __ia64
2534  rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
2535 #endif
2536 #if defined(__mc68000__)
2537  mark_locations_array(objspace, (VALUE*)((char*)STACK_END + 2),
2538  (STACK_START - STACK_END));
2539 #endif
2540 }
2541 
2542 static void
2544 {
2545  struct gc_list *list;
2546  rb_thread_t *th = GET_THREAD();
2548 
2549  objspace->heap.live_num = 0;
2550  objspace->count++;
2551 
2552 
2553  SET_STACK_END;
2554 
2555  th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
2556 
2557  mark_tbl(objspace, finalizer_table);
2558  mark_current_machine_context(objspace, th);
2559 
2562 
2563  /* mark protected global variables */
2564  for (list = global_List; list; list = list->next) {
2565  rb_gc_mark_maybe(*list->varptr);
2566  }
2567  rb_mark_end_proc();
2569 
2570  mark_tbl(objspace, rb_class_tbl);
2571 
2572  /* mark generic instance variables for special constants */
2574 
2576 
2578 
2579  /* marking-loop */
2580  gc_mark_stacked_objects(objspace);
2581 
2583 }
2584 
2585 static int
2587 {
2589 
2590  if (GC_NOTIFY) printf("start garbage_collect()\n");
2591 
2592  if (!heaps) {
2593  return FALSE;
2594  }
2595  if (!ready_to_gc(objspace)) {
2596  return TRUE;
2597  }
2598 
2600 
2601  rest_sweep(objspace);
2602 
2603  during_gc++;
2604  gc_marks(objspace);
2605 
2607  gc_sweep(objspace);
2609 
2611  if (GC_NOTIFY) printf("end garbage_collect()\n");
2612  return TRUE;
2613 }
2614 
2615 int
2617 {
2618  return garbage_collect(&rb_objspace);
2619 }
2620 
2621 void
2623 {
2624  rb_objspace_t *objspace = &rb_objspace;
2625  VALUE *stack_start, *stack_end;
2626 
2627  GET_STACK_BOUNDS(stack_start, stack_end, 0);
2628  rb_gc_mark_locations(stack_start, stack_end);
2629 #ifdef __ia64
2630  rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
2631 #endif
2632 }
2633 
2634 
2635 /*
2636  * call-seq:
2637  * GC.start -> nil
2638  * gc.garbage_collect -> nil
2639  * ObjectSpace.garbage_collect -> nil
2640  *
2641  * Initiates garbage collection, unless manually disabled.
2642  *
2643  */
2644 
2645 VALUE
2647 {
2648  rb_gc();
2649  return Qnil;
2650 }
2651 
2652 #undef Init_stack
2653 
2654 void
2655 Init_stack(volatile VALUE *addr)
2656 {
2657  ruby_init_stack(addr);
2658 }
2659 
2660 /*
2661  * Document-class: ObjectSpace
2662  *
2663  * The <code>ObjectSpace</code> module contains a number of routines
2664  * that interact with the garbage collection facility and allow you to
2665  * traverse all living objects with an iterator.
2666  *
2667  * <code>ObjectSpace</code> also provides support for object
2668  * finalizers, procs that will be called when a specific object is
2669  * about to be destroyed by garbage collection.
2670  *
2671  * include ObjectSpace
2672  *
2673  *
2674  * a = "A"
2675  * b = "B"
2676  * c = "C"
2677  *
2678  *
2679  * define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
2680  * define_finalizer(a, proc {|id| puts "Finalizer two on #{id}" })
2681  * define_finalizer(b, proc {|id| puts "Finalizer three on #{id}" })
2682  *
2683  * <em>produces:</em>
2684  *
2685  * Finalizer three on 537763470
2686  * Finalizer one on 537763480
2687  * Finalizer two on 537763480
2688  *
2689  */
2690 
2691 void
2693 {
2694  init_heap(&rb_objspace);
2695 }
2696 
2697 static VALUE
2699 {
2700  rb_objspace_t *objspace = &rb_objspace;
2701 
2702  objspace->flags.dont_lazy_sweep = FALSE;
2703  return Qnil;
2704 }
2705 
2706 typedef int each_obj_callback(void *, void *, size_t, void *);
2707 
2710  void *data;
2711 };
2712 
2713 static VALUE
2715 {
2716  size_t i;
2717  RVALUE *membase = 0;
2718  RVALUE *pstart, *pend;
2719  rb_objspace_t *objspace = &rb_objspace;
2720  struct each_obj_args *args = (struct each_obj_args *)arg;
2721  volatile VALUE v;
2722 
2723  i = 0;
2724  while (i < heaps_used) {
2725  while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1].slot->membase)
2726  i--;
2727  while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i].slot->membase <= (uintptr_t)membase)
2728  i++;
2729  if (heaps_used <= i)
2730  break;
2731  membase = objspace->heap.sorted[i].slot->membase;
2732 
2733  pstart = objspace->heap.sorted[i].slot->slot;
2734  pend = pstart + objspace->heap.sorted[i].slot->limit;
2735 
2736  for (; pstart != pend; pstart++) {
2737  if (pstart->as.basic.flags) {
2738  v = (VALUE)pstart; /* acquire to save this object */
2739  break;
2740  }
2741  }
2742  if (pstart != pend) {
2743  if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
2744  break;
2745  }
2746  }
2747  }
2748 
2749  return Qnil;
2750 }
2751 
2752 /*
2753  * rb_objspace_each_objects() is special C API to walk through
2754  * Ruby object space. This C API is too difficult to use it.
2755  * To be frank, you should not use it. Or you need to read the
2756  * source code of this function and understand what this function does.
2757  *
2758  * 'callback' will be called several times (the number of heap slot,
2759  * at current implementation) with:
2760  * vstart: a pointer to the first living object of the heap_slot.
2761  * vend: a pointer to next to the valid heap_slot area.
2762  * stride: a distance to next VALUE.
2763  *
2764  * If callback() returns non-zero, the iteration will be stopped.
2765  *
2766  * This is a sample callback code to iterate liveness objects:
2767  *
2768  * int
2769  * sample_callback(void *vstart, void *vend, int stride, void *data) {
2770  * VALUE v = (VALUE)vstart;
2771  * for (; v != (VALUE)vend; v += stride) {
2772  * if (RBASIC(v)->flags) { // liveness check
2773  * // do something with live object 'v'
2774  * }
2775  * return 0; // continue to iteration
2776  * }
2777  *
2778  * Note: 'vstart' is not a top of heap_slot. This point the first
2779  * living object to grasp at least one object to avoid GC issue.
2780  * This means that you can not walk through all Ruby object slot
2781  * including freed object slot.
2782  *
2783  * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
2784  * However, there are possibilities to pass variable values with
2785  * 'stride' with some reasons. You must use stride instead of
2786  * use some constant value in the iteration.
2787  */
2788 void
2790 {
2791  struct each_obj_args args;
2792  rb_objspace_t *objspace = &rb_objspace;
2793 
2794  rest_sweep(objspace);
2795  objspace->flags.dont_lazy_sweep = TRUE;
2796 
2797  args.callback = callback;
2798  args.data = data;
2800 }
2801 
2803  size_t num;
2805 };
2806 
2807 static int
2808 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
2809 {
2810  struct os_each_struct *oes = (struct os_each_struct *)data;
2811  RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
2812  volatile VALUE v;
2813 
2814  for (; p != pend; p++) {
2815  if (p->as.basic.flags) {
2816  switch (BUILTIN_TYPE(p)) {
2817  case T_NONE:
2818  case T_ICLASS:
2819  case T_NODE:
2820  case T_ZOMBIE:
2821  continue;
2822  case T_CLASS:
2823  if (FL_TEST(p, FL_SINGLETON))
2824  continue;
2825  default:
2826  if (!p->as.basic.klass) continue;
2827  v = (VALUE)p;
2828  if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
2829  rb_yield(v);
2830  oes->num++;
2831  }
2832  }
2833  }
2834  }
2835 
2836  return 0;
2837 }
2838 
2839 static VALUE
2841 {
2842  struct os_each_struct oes;
2843 
2844  oes.num = 0;
2845  oes.of = of;
2847  return SIZET2NUM(oes.num);
2848 }
2849 
2850 /*
2851  * call-seq:
2852  * ObjectSpace.each_object([module]) {|obj| ... } -> fixnum
2853  * ObjectSpace.each_object([module]) -> an_enumerator
2854  *
2855  * Calls the block once for each living, nonimmediate object in this
2856  * Ruby process. If <i>module</i> is specified, calls the block
2857  * for only those classes or modules that match (or are a subclass of)
2858  * <i>module</i>. Returns the number of objects found. Immediate
2859  * objects (<code>Fixnum</code>s, <code>Symbol</code>s
2860  * <code>true</code>, <code>false</code>, and <code>nil</code>) are
2861  * never returned. In the example below, <code>each_object</code>
2862  * returns both the numbers we defined and several constants defined in
2863  * the <code>Math</code> module.
2864  *
2865  * If no block is given, an enumerator is returned instead.
2866  *
2867  * a = 102.7
2868  * b = 95 # Won't be returned
2869  * c = 12345678987654321
2870  * count = ObjectSpace.each_object(Numeric) {|x| p x }
2871  * puts "Total count: #{count}"
2872  *
2873  * <em>produces:</em>
2874  *
2875  * 12345678987654321
2876  * 102.7
2877  * 2.71828182845905
2878  * 3.14159265358979
2879  * 2.22044604925031e-16
2880  * 1.7976931348623157e+308
2881  * 2.2250738585072e-308
2882  * Total count: 7
2883  *
2884  */
2885 
2886 static VALUE
2888 {
2889  VALUE of;
2890 
2891  rb_secure(4);
2892  if (argc == 0) {
2893  of = 0;
2894  }
2895  else {
2896  rb_scan_args(argc, argv, "01", &of);
2897  }
2898  RETURN_ENUMERATOR(os, 1, &of);
2899  return os_obj_of(of);
2900 }
2901 
2902 /*
2903  * call-seq:
2904  * ObjectSpace.undefine_finalizer(obj)
2905  *
2906  * Removes all finalizers for <i>obj</i>.
2907  *
2908  */
2909 
2910 static VALUE
2912 {
2913  rb_objspace_t *objspace = &rb_objspace;
2914  st_data_t data = obj;
2915  rb_check_frozen(obj);
2916  st_delete(finalizer_table, &data, 0);
2917  FL_UNSET(obj, FL_FINALIZE);
2918  return obj;
2919 }
2920 
2921 /*
2922  * call-seq:
2923  * ObjectSpace.define_finalizer(obj, aProc=proc())
2924  *
2925  * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
2926  * was destroyed.
2927  *
2928  */
2929 
2930 static VALUE
2932 {
2933  rb_objspace_t *objspace = &rb_objspace;
2934  VALUE obj, block, table;
2935  st_data_t data;
2936 
2937  rb_scan_args(argc, argv, "11", &obj, &block);
2938  rb_check_frozen(obj);
2939  if (argc == 1) {
2940  block = rb_block_proc();
2941  }
2942  else if (!rb_respond_to(block, rb_intern("call"))) {
2943  rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
2944  rb_obj_classname(block));
2945  }
2946  if (!FL_ABLE(obj)) {
2947  rb_raise(rb_eArgError, "cannot define finalizer for %s",
2948  rb_obj_classname(obj));
2949  }
2950  RBASIC(obj)->flags |= FL_FINALIZE;
2951 
2952  block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
2953  OBJ_FREEZE(block);
2954 
2955  if (st_lookup(finalizer_table, obj, &data)) {
2956  table = (VALUE)data;
2957  rb_ary_push(table, block);
2958  }
2959  else {
2960  table = rb_ary_new3(1, block);
2961  RBASIC(table)->klass = 0;
2962  st_add_direct(finalizer_table, obj, table);
2963  }
2964  return block;
2965 }
2966 
2967 void
2969 {
2970  rb_objspace_t *objspace = &rb_objspace;
2971  VALUE table;
2972  st_data_t data;
2973 
2974  if (!FL_TEST(obj, FL_FINALIZE)) return;
2975  if (st_lookup(finalizer_table, obj, &data)) {
2976  table = (VALUE)data;
2977  st_insert(finalizer_table, dest, table);
2978  }
2979  FL_SET(dest, FL_FINALIZE);
2980 }
2981 
2982 static VALUE
2984 {
2985  VALUE *args = (VALUE *)arg;
2986  rb_eval_cmd(args[0], args[1], (int)args[2]);
2987  return Qnil;
2988 }
2989 
2990 static void
2991 run_finalizer(rb_objspace_t *objspace, VALUE objid, VALUE table)
2992 {
2993  long i;
2994  int status;
2995  VALUE args[3];
2996 
2997  if (RARRAY_LEN(table) > 0) {
2998  args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
2999  }
3000  else {
3001  args[1] = 0;
3002  }
3003 
3004  args[2] = (VALUE)rb_safe_level();
3005  for (i=0; i<RARRAY_LEN(table); i++) {
3006  VALUE final = RARRAY_PTR(table)[i];
3007  args[0] = RARRAY_PTR(final)[1];
3008  args[2] = FIX2INT(RARRAY_PTR(final)[0]);
3009  status = 0;
3010  rb_protect(run_single_final, (VALUE)args, &status);
3011  if (status)
3013  }
3014 }
3015 
3016 static void
3018 {
3019  VALUE objid;
3020  RUBY_DATA_FUNC free_func = 0;
3021  st_data_t key, table;
3022 
3023  objspace->heap.final_num--;
3024 
3025  objid = rb_obj_id(obj); /* make obj into id */
3026  RBASIC(obj)->klass = 0;
3027 
3028  if (RTYPEDDATA_P(obj)) {
3029  free_func = RTYPEDDATA_TYPE(obj)->function.dfree;
3030  }
3031  else {
3032  free_func = RDATA(obj)->dfree;
3033  }
3034  if (free_func) {
3035  (*free_func)(DATA_PTR(obj));
3036  }
3037 
3038  key = (st_data_t)obj;
3039  if (st_delete(finalizer_table, &key, &table)) {
3040  run_finalizer(objspace, objid, (VALUE)table);
3041  }
3042 }
3043 
3044 static void
3046 {
3047  RVALUE *p;
3048 
3049  while ((p = ATOMIC_PTR_EXCHANGE(deferred_final_list, 0)) != 0) {
3050  finalize_list(objspace, p);
3051  }
3052 }
3053 
3054 void
3056 {
3057  rb_objspace_t *objspace = &rb_objspace;
3058  if (ATOMIC_EXCHANGE(finalizing, 1)) return;
3059  finalize_deferred(objspace);
3060  ATOMIC_SET(finalizing, 0);
3061 }
3062 
3067 };
3068 
3069 static int
3071 {
3072  struct force_finalize_list **prev = (struct force_finalize_list **)arg;
3073  struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
3074  curr->obj = key;
3075  curr->table = val;
3076  curr->next = *prev;
3077  *prev = curr;
3078  return ST_CONTINUE;
3079 }
3080 
3081 void
3083 {
3084  rb_objspace_call_finalizer(&rb_objspace);
3085 }
3086 
3087 static void
3089 {
3090  RVALUE *p, *pend;
3091  RVALUE *final_list = 0;
3092  size_t i;
3093 
3094  rest_sweep(objspace);
3095 
3096  /* run finalizers */
3097  finalize_deferred(objspace);
3099 
3100  if (ATOMIC_EXCHANGE(finalizing, 1)) return;
3101 
3102  /* force to run finalizer */
3103  while (finalizer_table->num_entries) {
3104  struct force_finalize_list *list = 0;
3106  while (list) {
3107  struct force_finalize_list *curr = list;
3108  run_finalizer(objspace, rb_obj_id(curr->obj), curr->table);
3109  st_delete(finalizer_table, (st_data_t*)&curr->obj, 0);
3110  list = curr->next;
3111  xfree(curr);
3112  }
3113  }
3114 
3115  /* finalizers are part of garbage collection */
3116  during_gc++;
3117 
3118  /* run data object's finalizers */
3119  for (i = 0; i < heaps_used; i++) {
3120  p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
3121  while (p < pend) {
3122  if (BUILTIN_TYPE(p) == T_DATA &&
3123  DATA_PTR(p) && RANY(p)->as.data.dfree &&
3125  !rb_obj_is_fiber((VALUE)p)) {
3126  p->as.free.flags = 0;
3127  if (RTYPEDDATA_P(p)) {
3128  RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
3129  }
3130  if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
3131  xfree(DATA_PTR(p));
3132  }
3133  else if (RANY(p)->as.data.dfree) {
3134  make_deferred(RANY(p));
3135  RANY(p)->as.free.next = final_list;
3136  final_list = p;
3137  }
3138  }
3139  else if (BUILTIN_TYPE(p) == T_FILE) {
3140  if (RANY(p)->as.file.fptr) {
3141  make_io_deferred(RANY(p));
3142  RANY(p)->as.free.next = final_list;
3143  final_list = p;
3144  }
3145  }
3146  p++;
3147  }
3148  }
3149  during_gc = 0;
3150  if (final_list) {
3151  finalize_list(objspace, final_list);
3152  }
3153 
3155  finalizer_table = 0;
3156  ATOMIC_SET(finalizing, 0);
3157 }
3158 
3159 void
3160 rb_gc(void)
3161 {
3162  rb_objspace_t *objspace = &rb_objspace;
3163  garbage_collect(objspace);
3164  if (!finalizing) finalize_deferred(objspace);
3165  free_unused_heaps(objspace);
3166 }
3167 
3168 /*
3169  * call-seq:
3170  * ObjectSpace._id2ref(object_id) -> an_object
3171  *
3172  * Converts an object id to a reference to the object. May not be
3173  * called on an object id passed as a parameter to a finalizer.
3174  *
3175  * s = "I am a string" #=> "I am a string"
3176  * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
3177  * r == s #=> true
3178  *
3179  */
3180 
3181 static VALUE
3183 {
3184 #if SIZEOF_LONG == SIZEOF_VOIDP
3185 #define NUM2PTR(x) NUM2ULONG(x)
3186 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3187 #define NUM2PTR(x) NUM2ULL(x)
3188 #endif
3189  rb_objspace_t *objspace = &rb_objspace;
3190  VALUE ptr;
3191  void *p0;
3192 
3193  rb_secure(4);
3194  ptr = NUM2PTR(objid);
3195  p0 = (void *)ptr;
3196 
3197  if (ptr == Qtrue) return Qtrue;
3198  if (ptr == Qfalse) return Qfalse;
3199  if (ptr == Qnil) return Qnil;
3200  if (FIXNUM_P(ptr)) return (VALUE)ptr;
3201  ptr = obj_id_to_ref(objid);
3202 
3203  if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
3204  ID symid = ptr / sizeof(RVALUE);
3205  if (rb_id2name(symid) == 0)
3206  rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
3207  return ID2SYM(symid);
3208  }
3209 
3210  if (!is_pointer_to_heap(objspace, (void *)ptr) ||
3211  BUILTIN_TYPE(ptr) > T_FIXNUM || BUILTIN_TYPE(ptr) == T_ICLASS) {
3212  rb_raise(rb_eRangeError, "%p is not id value", p0);
3213  }
3214  if (BUILTIN_TYPE(ptr) == 0 || RBASIC(ptr)->klass == 0) {
3215  rb_raise(rb_eRangeError, "%p is recycled object", p0);
3216  }
3217  return (VALUE)ptr;
3218 }
3219 
3220 /*
3221  * Document-method: __id__
3222  * Document-method: object_id
3223  *
3224  * call-seq:
3225  * obj.__id__ -> fixnum
3226  * obj.object_id -> fixnum
3227  *
3228  * Returns an integer identifier for <i>obj</i>. The same number will
3229  * be returned on all calls to <code>id</code> for a given object, and
3230  * no two active objects will share an id.
3231  * <code>Object#object_id</code> is a different concept from the
3232  * <code>:name</code> notation, which returns the symbol id of
3233  * <code>name</code>. Replaces the deprecated <code>Object#id</code>.
3234  */
3235 
3236 /*
3237  * call-seq:
3238  * obj.hash -> fixnum
3239  *
3240  * Generates a <code>Fixnum</code> hash value for this object. This
3241  * function must have the property that <code>a.eql?(b)</code> implies
3242  * <code>a.hash == b.hash</code>. The hash value is used by class
3243  * <code>Hash</code>. Any hash value that exceeds the capacity of a
3244  * <code>Fixnum</code> will be truncated before being used.
3245  */
3246 
3247 VALUE
3249 {
3250  /*
3251  * 32-bit VALUE space
3252  * MSB ------------------------ LSB
3253  * false 00000000000000000000000000000000
3254  * true 00000000000000000000000000000010
3255  * nil 00000000000000000000000000000100
3256  * undef 00000000000000000000000000000110
3257  * symbol ssssssssssssssssssssssss00001110
3258  * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
3259  * fixnum fffffffffffffffffffffffffffffff1
3260  *
3261  * object_id space
3262  * LSB
3263  * false 00000000000000000000000000000000
3264  * true 00000000000000000000000000000010
3265  * nil 00000000000000000000000000000100
3266  * undef 00000000000000000000000000000110
3267  * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
3268  * object oooooooooooooooooooooooooooooo0 o...o % A = 0
3269  * fixnum fffffffffffffffffffffffffffffff1 bignum if required
3270  *
3271  * where A = sizeof(RVALUE)/4
3272  *
3273  * sizeof(RVALUE) is
3274  * 20 if 32-bit, double is 4-byte aligned
3275  * 24 if 32-bit, double is 8-byte aligned
3276  * 40 if 64-bit
3277  */
3278  if (SYMBOL_P(obj)) {
3279  return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
3280  }
3281  if (SPECIAL_CONST_P(obj)) {
3282  return LONG2NUM((SIGNED_VALUE)obj);
3283  }
3284  return nonspecial_obj_id(obj);
3285 }
3286 
3287 static int
3289 {
3290  VALUE k = (VALUE)key;
3291  VALUE hash = (VALUE)arg;
3292  rb_hash_aset(hash, k, INT2FIX(0));
3293  return ST_CONTINUE;
3294 }
3295 
3296 /*
3297  * call-seq:
3298  * ObjectSpace.count_objects([result_hash]) -> hash
3299  *
3300  * Counts objects for each type.
3301  *
3302  * It returns a hash as:
3303  * {:TOTAL=>10000, :FREE=>3011, :T_OBJECT=>6, :T_CLASS=>404, ...}
3304  *
3305  * If the optional argument, result_hash, is given,
3306  * it is overwritten and returned.
3307  * This is intended to avoid probe effect.
3308  *
3309  * The contents of the returned hash is implementation defined.
3310  * It may be changed in future.
3311  *
3312  * This method is not expected to work except C Ruby.
3313  *
3314  */
3315 
3316 static VALUE
3318 {
3319  rb_objspace_t *objspace = &rb_objspace;
3320  size_t counts[T_MASK+1];
3321  size_t freed = 0;
3322  size_t total = 0;
3323  size_t i;
3324  VALUE hash;
3325 
3326  if (rb_scan_args(argc, argv, "01", &hash) == 1) {
3327  if (TYPE(hash) != T_HASH)
3328  rb_raise(rb_eTypeError, "non-hash given");
3329  }
3330 
3331  for (i = 0; i <= T_MASK; i++) {
3332  counts[i] = 0;
3333  }
3334 
3335  for (i = 0; i < heaps_used; i++) {
3336  RVALUE *p, *pend;
3337 
3338  p = objspace->heap.sorted[i].start; pend = objspace->heap.sorted[i].end;
3339  for (;p < pend; p++) {
3340  if (p->as.basic.flags) {
3341  counts[BUILTIN_TYPE(p)]++;
3342  }
3343  else {
3344  freed++;
3345  }
3346  }
3347  total += objspace->heap.sorted[i].slot->limit;
3348  }
3349 
3350  if (hash == Qnil) {
3351  hash = rb_hash_new();
3352  }
3353  else if (!RHASH_EMPTY_P(hash)) {
3354  st_foreach(RHASH_TBL(hash), set_zero, hash);
3355  }
3356  rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
3357  rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
3358 
3359  for (i = 0; i <= T_MASK; i++) {
3360  VALUE type;
3361  switch (i) {
3362 #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
3363  COUNT_TYPE(T_NONE);
3371  COUNT_TYPE(T_HASH);
3374  COUNT_TYPE(T_FILE);
3375  COUNT_TYPE(T_DATA);
3379  COUNT_TYPE(T_NIL);
3380  COUNT_TYPE(T_TRUE);
3385  COUNT_TYPE(T_NODE);
3388 #undef COUNT_TYPE
3389  default: type = INT2NUM(i); break;
3390  }
3391  if (counts[i])
3392  rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
3393  }
3394 
3395  return hash;
3396 }
3397 
3398 /*
3399  * call-seq:
3400  * GC.count -> Integer
3401  *
3402  * The number of times GC occurred.
3403  *
3404  * It returns the number of times GC occurred since the process started.
3405  *
3406  */
3407 
3408 static VALUE
3410 {
3411  return UINT2NUM((&rb_objspace)->count);
3412 }
3413 
3414 /*
3415  * call-seq:
3416  * GC.stat -> Hash
3417  *
3418  * Returns a Hash containing information about the GC.
3419  *
3420  * The hash includes information about internal statistics about GC such as:
3421  *
3422  * {
3423  * :count => 18,
3424  * :heap_used => 77,
3425  * :heap_length => 77,
3426  * :heap_increment => 0,
3427  * :heap_live_num => 23287,
3428  * :heap_free_num => 8115,
3429  * :heap_final_num => 0,
3430  * }
3431  *
3432  * The contents of the hash are implementation defined and may be changed in
3433  * the future.
3434  *
3435  * This method is only expected to work on C Ruby.
3436  *
3437  */
3438 
3439 static VALUE
3441 {
3442  rb_objspace_t *objspace = &rb_objspace;
3443  VALUE hash;
3444 
3445  if (rb_scan_args(argc, argv, "01", &hash) == 1) {
3446  if (TYPE(hash) != T_HASH)
3447  rb_raise(rb_eTypeError, "non-hash given");
3448  }
3449 
3450  if (hash == Qnil) {
3451  hash = rb_hash_new();
3452  }
3453 
3454  rest_sweep(objspace);
3455 
3456  rb_hash_aset(hash, ID2SYM(rb_intern("count")), SIZET2NUM(objspace->count));
3457 
3458  /* implementation dependent counters */
3459  rb_hash_aset(hash, ID2SYM(rb_intern("heap_used")), SIZET2NUM(objspace->heap.used));
3460  rb_hash_aset(hash, ID2SYM(rb_intern("heap_length")), SIZET2NUM(objspace->heap.length));
3461  rb_hash_aset(hash, ID2SYM(rb_intern("heap_increment")), SIZET2NUM(objspace->heap.increment));
3462  rb_hash_aset(hash, ID2SYM(rb_intern("heap_live_num")), SIZET2NUM(objspace->heap.live_num));
3463  rb_hash_aset(hash, ID2SYM(rb_intern("heap_free_num")), SIZET2NUM(objspace->heap.free_num));
3464  rb_hash_aset(hash, ID2SYM(rb_intern("heap_final_num")), SIZET2NUM(objspace->heap.final_num));
3465  return hash;
3466 }
3467 
3468 
3469 #if CALC_EXACT_MALLOC_SIZE
3470 /*
3471  * call-seq:
3472  * GC.malloc_allocated_size -> Integer
3473  *
3474  * The allocated size by malloc().
3475  *
3476  * It returns the allocated size by malloc().
3477  */
3478 
3479 static VALUE
3480 gc_malloc_allocated_size(VALUE self)
3481 {
3482  return UINT2NUM((&rb_objspace)->malloc_params.allocated_size);
3483 }
3484 
3485 /*
3486  * call-seq:
3487  * GC.malloc_allocations -> Integer
3488  *
3489  * The number of allocated memory object by malloc().
3490  *
3491  * It returns the number of allocated memory object by malloc().
3492  */
3493 
3494 static VALUE
3495 gc_malloc_allocations(VALUE self)
3496 {
3497  return UINT2NUM((&rb_objspace)->malloc_params.allocations);
3498 }
3499 #endif
3500 
3501 static VALUE
3503 {
3504  VALUE prof;
3505  VALUE gc_profile = rb_ary_new();
3506  size_t i;
3507  rb_objspace_t *objspace = (&rb_objspace);
3508 
3509  if (!objspace->profile.run) {
3510  return Qnil;
3511  }
3512 
3513  for (i =0; i < objspace->profile.count; i++) {
3514  prof = rb_hash_new();
3515  rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(objspace->profile.record[i].gc_time));
3516  rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(objspace->profile.record[i].gc_invoke_time));
3517  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_use_size));
3518  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(objspace->profile.record[i].heap_total_size));
3519  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_total_objects));
3520  rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), objspace->profile.record[i].is_marked);
3521 #if GC_PROFILE_MORE_DETAIL
3522  rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(objspace->profile.record[i].gc_mark_time));
3523  rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(objspace->profile.record[i].gc_sweep_time));
3524  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(objspace->profile.record[i].allocate_increase));
3525  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(objspace->profile.record[i].allocate_limit));
3526  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SLOTS")), SIZET2NUM(objspace->profile.record[i].heap_use_slots));
3527  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_live_objects));
3528  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(objspace->profile.record[i].heap_free_objects));
3529  rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), objspace->profile.record[i].have_finalize);
3530 #endif
3531  rb_ary_push(gc_profile, prof);
3532  }
3533 
3534  return gc_profile;
3535 }
3536 
3537 /*
3538  * call-seq:
3539  * GC::Profiler.result -> String
3540  *
3541  * Returns a profile data report such as:
3542  *
3543  * GC 1 invokes.
3544  * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
3545  * 1 0.012 159240 212940 10647 0.00000000000001530000
3546  */
3547 
3548 static VALUE
3550 {
3551  rb_objspace_t *objspace = &rb_objspace;
3552  VALUE record;
3553  VALUE result;
3554  int i, index;
3555 
3556  record = gc_profile_record_get();
3557  if (objspace->profile.run && objspace->profile.count) {
3558  result = rb_sprintf("GC %d invokes.\n", NUM2INT(gc_count(0)));
3559  index = 1;
3560  rb_str_cat2(result, "Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n");
3561  for (i = 0; i < (int)RARRAY_LEN(record); i++) {
3562  VALUE r = RARRAY_PTR(record)[i];
3563 #if !GC_PROFILE_MORE_DETAIL
3564  if (rb_hash_aref(r, ID2SYM(rb_intern("GC_IS_MARKED")))) {
3565 #endif
3566  rb_str_catf(result, "%5d %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
3567  index++, NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_INVOKE_TIME")))),
3568  (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SIZE")))),
3569  (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")))),
3570  (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")))),
3571  NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_TIME"))))*1000);
3572 #if !GC_PROFILE_MORE_DETAIL
3573  }
3574 #endif
3575  }
3576 #if GC_PROFILE_MORE_DETAIL
3577  rb_str_cat2(result, "\n\n");
3578  rb_str_cat2(result, "More detail.\n");
3579  rb_str_cat2(result, "Index Allocate Increase Allocate Limit Use Slot Have Finalize Mark Time(ms) Sweep Time(ms)\n");
3580  index = 1;
3581  for (i = 0; i < (int)RARRAY_LEN(record); i++) {
3582  VALUE r = RARRAY_PTR(record)[i];
3583  rb_str_catf(result, "%5d %17"PRIuSIZE" %17"PRIuSIZE" %9"PRIuSIZE" %14s %25.20f %25.20f\n",
3584  index++, (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_INCREASE")))),
3585  (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("ALLOCATE_LIMIT")))),
3586  (size_t)NUM2SIZET(rb_hash_aref(r, ID2SYM(rb_intern("HEAP_USE_SLOTS")))),
3587  rb_hash_aref(r, ID2SYM(rb_intern("HAVE_FINALIZE")))? "true" : "false",
3588  NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_MARK_TIME"))))*1000,
3589  NUM2DBL(rb_hash_aref(r, ID2SYM(rb_intern("GC_SWEEP_TIME"))))*1000);
3590  }
3591 #endif
3592  }
3593  else {
3594  result = rb_str_new2("");
3595  }
3596  return result;
3597 }
3598 
3599 
3600 /*
3601  * call-seq:
3602  * GC::Profiler.report
3603  * GC::Profiler.report io
3604  *
3605  * Writes the GC::Profiler#result to <tt>$stdout</tt> or the given IO object.
3606  *
3607  */
3608 
3609 static VALUE
3611 {
3612  VALUE out;
3613 
3614  if (argc == 0) {
3615  out = rb_stdout;
3616  }
3617  else {
3618  rb_scan_args(argc, argv, "01", &out);
3619  }
3621 
3622  return Qnil;
3623 }
3624 
3625 /*
3626  * call-seq:
3627  * GC::Profiler.total_time -> float
3628  *
3629  * The total time used for garbage collection in milliseconds
3630  */
3631 
3632 static VALUE
3634 {
3635  double time = 0;
3636  rb_objspace_t *objspace = &rb_objspace;
3637  size_t i;
3638 
3639  if (objspace->profile.run && objspace->profile.count) {
3640  for (i = 0; i < objspace->profile.count; i++) {
3641  time += objspace->profile.record[i].gc_time;
3642  }
3643  }
3644  return DBL2NUM(time);
3645 }
3646 
3647 /* Document-class: GC::Profiler
3648  *
3649  * The GC profiler provides access to information on GC runs including time,
3650  * length and object space size.
3651  *
3652  * Example:
3653  *
3654  * GC::Profiler.enable
3655  *
3656  * require 'rdoc/rdoc'
3657  *
3658  * puts GC::Profiler.result
3659  *
3660  * GC::Profiler.disable
3661  *
3662  * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
3663  */
3664 
3665 /*
3666  * The <code>GC</code> module provides an interface to Ruby's mark and
3667  * sweep garbage collection mechanism. Some of the underlying methods
3668  * are also available via the ObjectSpace module.
3669  *
3670  * You may obtain information about the operation of the GC through
3671  * GC::Profiler.
3672  */
3673 
3674 void
3675 Init_GC(void)
3676 {
3677  VALUE rb_mObSpace;
3678  VALUE rb_mProfiler;
3679 
3680  rb_mGC = rb_define_module("GC");
3688  rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
3689 
3690  rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
3691  rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
3692  rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
3693  rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
3694  rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
3695  rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
3696  rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
3697  rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
3698 
3699  rb_mObSpace = rb_define_module("ObjectSpace");
3700  rb_define_module_function(rb_mObSpace, "each_object", os_each_obj, -1);
3701  rb_define_module_function(rb_mObSpace, "garbage_collect", rb_gc_start, 0);
3702 
3703  rb_define_module_function(rb_mObSpace, "define_finalizer", define_final, -1);
3704  rb_define_module_function(rb_mObSpace, "undefine_finalizer", undefine_final, 1);
3705 
3706  rb_define_module_function(rb_mObSpace, "_id2ref", id2ref, 1);
3707 
3709  rb_obj_freeze(rb_str_new2("failed to allocate memory")));
3712 
3714  rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
3715 
3716  rb_define_module_function(rb_mObSpace, "count_objects", count_objects, -1);
3717 
3718 #if CALC_EXACT_MALLOC_SIZE
3719  rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
3720  rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
3721 #endif
3722 }
3723