From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on dcvr.yhbt.net X-Spam-Level: X-Spam-ASN: AS12876 62.210.0.0/16 X-Spam-Status: No, score=-0.1 required=3.0 tests=AWL,BAYES_00, RCVD_IN_MSPIKE_BL,RCVD_IN_MSPIKE_ZBI,RCVD_IN_XBL,RDNS_DYNAMIC,SPF_FAIL, SPF_HELO_FAIL shortcircuit=no autolearn=no autolearn_force=no version=3.4.0 Received: from 80x24.org (62-210-37-82.rev.poneytelecom.eu [62.210.37.82]) by dcvr.yhbt.net (Postfix) with ESMTP id C65D71FD4F for ; Wed, 30 May 2018 22:46:51 +0000 (UTC) From: Eric Wong To: spew@80x24.org Subject: [PATCH 2/3] gc.c: introduce GPR_FLAG_IMMEDIATE_MARK to reduce parameters Date: Wed, 30 May 2018 22:46:42 +0000 Message-Id: <20180530224643.21110-3-e@80x24.org> In-Reply-To: <20180530224643.21110-1-e@80x24.org> References: <20180530224643.21110-1-e@80x24.org> List-Id: Another step to making gc_start and garbage_collect self-documenting, because remembering the order of function arguments is difficult. --- gc.c | 59 ++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/gc.c b/gc.c index bf62bd1a4e5..77833c419f9 100644 --- a/gc.c +++ b/gc.c @@ -355,7 +355,8 @@ typedef enum { /* others */ GPR_FLAG_IMMEDIATE_SWEEP = 0x2000, - GPR_FLAG_HAVE_FINALIZE = 0x4000 + GPR_FLAG_HAVE_FINALIZE = 0x4000, + GPR_FLAG_IMMEDIATE_MARK = 0x8000 } gc_profile_record_flag; typedef struct gc_profile_record { @@ -848,9 +849,9 @@ static void init_mark_stack(mark_stack_t *stack); static int ready_to_gc(rb_objspace_t *objspace); -static int garbage_collect(rb_objspace_t *, int full_mark, int immediate_mark, int reason); +static int garbage_collect(rb_objspace_t *, int full_mark, int reason); -static int gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, int reason); +static int gc_start(rb_objspace_t *objspace, const int full_mark, int reason); static void gc_rest(rb_objspace_t *objspace); static inline void gc_enter(rb_objspace_t *objspace, const char *event); static inline void gc_exit(rb_objspace_t *objspace, const char *event); @@ -1749,7 +1750,7 @@ heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap) if (heap->free_pages == NULL && (will_be_incremental_marking(objspace) || heap_increment(objspace, heap) == FALSE) && - gc_start(objspace, FALSE, FALSE, GPR_FLAG_NEWOBJ) == FALSE) { + gc_start(objspace, FALSE, GPR_FLAG_NEWOBJ) == FALSE) { rb_memerror(); } } @@ -1919,7 +1920,7 @@ newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objsp } if (ruby_gc_stressful) { - if (!garbage_collect(objspace, FALSE, FALSE, GPR_FLAG_NEWOBJ)) { + if (!garbage_collect(objspace, FALSE, GPR_FLAG_NEWOBJ)) { rb_memerror(); } } @@ -6396,7 +6397,7 @@ gc_reset_malloc_info(rb_objspace_t *objspace) } static int -garbage_collect(rb_objspace_t *objspace, int full_mark, int immediate_mark, int reason) +garbage_collect(rb_objspace_t *objspace, int full_mark, int reason) { #if GC_PROFILE_MORE_DETAIL objspace->profile.prepare_time = getrusage_time(); @@ -6408,13 +6409,14 @@ garbage_collect(rb_objspace_t *objspace, int full_mark, int immediate_mark, int objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time; #endif - return gc_start(objspace, full_mark, immediate_mark, reason); + return gc_start(objspace, full_mark, reason); } static int -gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, int reason) +gc_start(rb_objspace_t *objspace, const int full_mark, int reason) { int do_full_mark = full_mark; + unsigned int immediate_mark = (unsigned)reason & GPR_FLAG_IMMEDIATE_MARK; /* reason may be clobbered, later, so keep set immediate_sweep here */ objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP); @@ -6474,8 +6476,8 @@ gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark, if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP; - gc_report(1, objspace, "gc_start(%d, %d, reason: %d) => %d, %d, %d\n", - full_mark, immediate_mark, reason, + gc_report(1, objspace, "gc_start(%d, reason: %d) => %d, %d, %d\n", + full_mark, reason, do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep); objspace->profile.count++; @@ -6525,7 +6527,6 @@ struct objspace_and_reason { rb_objspace_t *objspace; int reason; int full_mark; - int immediate_mark; }; static void @@ -6637,15 +6638,15 @@ static void * gc_with_gvl(void *ptr) { struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr; - return (void *)(VALUE)garbage_collect(oar->objspace, oar->full_mark, oar->immediate_mark, oar->reason); + return (void *)(VALUE)garbage_collect(oar->objspace, oar->full_mark, oar->reason); } static int -garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int immediate_mark, int reason) +garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int reason) { if (dont_gc) return TRUE; if (ruby_thread_has_gvl_p()) { - return garbage_collect(objspace, full_mark, immediate_mark, reason); + return garbage_collect(objspace, full_mark, reason); } else { if (ruby_native_thread_p()) { @@ -6653,7 +6654,6 @@ garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int immediate_m oar.objspace = objspace; oar.reason = reason; oar.full_mark = full_mark; - oar.immediate_mark = immediate_mark; return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar); } else { @@ -6699,8 +6699,9 @@ static VALUE gc_start_internal(int argc, VALUE *argv, VALUE self) { rb_objspace_t *objspace = &rb_objspace; - int reason = GPR_FLAG_METHOD | GPR_FLAG_IMMEDIATE_SWEEP; - int full_mark = TRUE, immediate_mark = TRUE; + int reason = GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | + GPR_FLAG_METHOD; + int full_mark = TRUE; VALUE opt = Qnil; static ID keyword_ids[3]; @@ -6718,13 +6719,15 @@ gc_start_internal(int argc, VALUE *argv, VALUE self) rb_get_kwargs(opt, keyword_ids, 0, 3, kwvals); if (kwvals[0] != Qundef) full_mark = RTEST(kwvals[0]); - if (kwvals[1] != Qundef) immediate_mark = RTEST(kwvals[1]); + if (kwvals[1] != Qundef && !RTEST(kwvals[1])) { + reason &= ~GPR_FLAG_IMMEDIATE_MARK; + } if (kwvals[2] != Qundef && !RTEST(kwvals[2])) { reason &= ~GPR_FLAG_IMMEDIATE_SWEEP; } } - garbage_collect(objspace, full_mark, immediate_mark, reason); + garbage_collect(objspace, full_mark, reason); gc_finalize_deferred(objspace); return Qnil; @@ -6741,8 +6744,9 @@ void rb_gc(void) { rb_objspace_t *objspace = &rb_objspace; - int reason = GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI; - garbage_collect(objspace, TRUE, TRUE, reason); + int reason = GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | + GPR_FLAG_CAPI; + garbage_collect(objspace, TRUE, reason); gc_finalize_deferred(objspace); } @@ -7811,8 +7815,8 @@ objspace_malloc_gc_stress(rb_objspace_t *objspace) { if (ruby_gc_stressful && ruby_native_thread_p()) { garbage_collect_with_gvl(objspace, gc_stress_full_mark_after_malloc_p(), - TRUE, - GPR_FLAG_IMMEDIATE_SWEEP | + GPR_FLAG_IMMEDIATE_MARK | + GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_STRESS | GPR_FLAG_MALLOC); } @@ -7841,7 +7845,7 @@ objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, si gc_rest(objspace); /* gc_rest can reduce malloc_increase */ goto retry; } - garbage_collect_with_gvl(objspace, FALSE, FALSE, GPR_FLAG_MALLOC); + garbage_collect_with_gvl(objspace, FALSE, GPR_FLAG_MALLOC); } } @@ -7919,9 +7923,10 @@ objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size) #define TRY_WITH_GC(alloc) do { \ objspace_malloc_gc_stress(objspace); \ if (!(alloc) && \ - /* full/immediate mark */ \ - (!garbage_collect_with_gvl(objspace, TRUE, TRUE, \ - GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_MALLOC) || \ + /* full mark */ \ + (!garbage_collect_with_gvl(objspace, TRUE, \ + GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \ + GPR_FLAG_MALLOC) || \ !(alloc))) { \ ruby_memerror(); \ } \ -- EW