dumping ground for random patches and texts
 help / color / mirror / Atom feed
From: Eric Wong <e@80x24.org>
To: spew@80x24.org
Subject: [PATCH] MRU wip
Date: Fri, 22 Jun 2018 08:10:17 +0000	[thread overview]
Message-ID: <20180622081017.20225-1-e@80x24.org> (raw)

---
 gc.c | 145 ++++++++++++++++++++++++++++++++++++-----------------------
 1 file changed, 90 insertions(+), 55 deletions(-)

diff --git a/gc.c b/gc.c
index feac07279aa..14a103b75cf 100644
--- a/gc.c
+++ b/gc.c
@@ -541,7 +541,7 @@ typedef struct rb_objspace {
 
     rb_event_flag_t hook_events;
     size_t total_allocated_objects;
-
+    struct list_head mru_pages; /* part of eden_heap */
     rb_heap_t eden_heap;
     rb_heap_t tomb_heap; /* heap for zombies and ghosts */
 
@@ -1047,8 +1047,8 @@ tick(void)
 
 static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
 static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
-static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
-static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
+static void rgengc_mark_and_rememberset_clear(const struct list_head *);
+static void rgengc_rememberset_mark(rb_objspace_t *objspace);
 
 static inline int
 RVALUE_FLAGS_AGE(VALUE flags)
@@ -1332,6 +1332,7 @@ rb_objspace_alloc(void)
 #endif
     malloc_limit = gc_params.malloc_limit_min;
     list_head_init(&objspace->eden_heap.pages);
+    list_head_init(&objspace->mru_pages);
     list_head_init(&objspace->tomb_heap.pages);
 
     return objspace;
@@ -3632,8 +3633,11 @@ gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
 }
 
 static void
-gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
+gc_sweep_start_heap(rb_objspace_t *objspace)
 {
+    rb_heap_t *heap = heap_eden;
+    /* fprintf(stderr, "start\n"); */
+    list_prepend_list(&heap->pages, &objspace->mru_pages);
     heap->sweeping_page = list_top(&heap->pages, struct heap_page, page_node);
     heap->free_pages = NULL;
 #if GC_ENABLE_INCREMENTAL_MARK
@@ -3658,7 +3662,7 @@ static void
 gc_sweep_start(rb_objspace_t *objspace)
 {
     gc_mode_transition(objspace, gc_mode_sweeping);
-    gc_sweep_start_heap(objspace, heap_eden);
+    gc_sweep_start_heap(objspace);
 }
 
 static void
@@ -3722,6 +3726,8 @@ gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
 		}
 	    }
 	    else {
+                list_del(&sweep_page->page_node);
+                list_add(&objspace->mru_pages, &sweep_page->page_node);
 		heap_add_freepage(objspace, heap, sweep_page);
 		break;
 	    }
@@ -3793,6 +3799,7 @@ gc_sweep(rb_objspace_t *objspace)
 	struct heap_page *page = NULL;
 	gc_sweep_start(objspace);
 
+        VM_ASSERT(list_empty(&objspace->mru_pages));
         list_for_each(&heap_eden->pages, page, page_node) {
             page->flags.before_sweep = TRUE;
         }
@@ -5272,7 +5279,7 @@ gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
 }
 
 static int
-gc_verify_heap_pages_(rb_objspace_t *objspace, struct list_head *head)
+gc_verify_heap_pages_(rb_objspace_t *objspace, const struct list_head *head)
 {
     int remembered_old_objects = 0;
     struct heap_page *page = 0;
@@ -5290,8 +5297,9 @@ static int
 gc_verify_heap_pages(rb_objspace_t *objspace)
 {
     int remembered_old_objects = 0;
-    remembered_old_objects = gc_verify_heap_pages_(objspace, &heap_eden->pages);
-    remembered_old_objects = gc_verify_heap_pages_(objspace, &heap_tomb->pages);
+    remembered_old_objects += gc_verify_heap_pages_(objspace, &objspace->mru_pages);
+    remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_eden->pages);
+    remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_tomb->pages);
     return remembered_old_objects;
 }
 
@@ -5412,14 +5420,15 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark)
 	objspace->rgengc.old_objects = 0;
 	objspace->rgengc.last_major_gc = objspace->profile.count;
 	objspace->marked_slots = 0;
-	rgengc_mark_and_rememberset_clear(objspace, heap_eden);
+	rgengc_mark_and_rememberset_clear(&objspace->mru_pages);
+	rgengc_mark_and_rememberset_clear(&heap_eden->pages);
     }
     else {
 	objspace->flags.during_minor_gc = TRUE;
 	objspace->marked_slots =
 	  objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
 	objspace->profile.minor_gc_count++;
-	rgengc_rememberset_mark(objspace, heap_eden);
+	rgengc_rememberset_mark(objspace);
     }
 #endif
 
@@ -5430,37 +5439,43 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark)
 
 #if GC_ENABLE_INCREMENTAL_MARK
 static void
-gc_marks_wb_unprotected_objects(rb_objspace_t *objspace)
+gc_marks_wb_unprotected_objects_head(rb_objspace_t *objspace,
+                                     const struct list_head *head)
 {
     struct heap_page *page = 0;
 
-    list_for_each(&heap_eden->pages, page, page_node) {
-	bits_t *mark_bits = page->mark_bits;
-	bits_t *wbun_bits = page->wb_unprotected_bits;
-	RVALUE *p = page->start;
-	RVALUE *offset = p - NUM_IN_PAGE(p);
-	size_t j;
-
-	for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
-	    bits_t bits = mark_bits[j] & wbun_bits[j];
-
-	    if (bits) {
-		p = offset  + j * BITS_BITLENGTH;
-
-		do {
-		    if (bits & 1) {
-			gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
-			GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
-			GC_ASSERT(RVALUE_MARKED((VALUE)p));
-			gc_mark_children(objspace, (VALUE)p);
-		    }
-		    p++;
-		    bits >>= 1;
-		} while (bits);
-	    }
-	}
+    list_for_each(head, page, page_node) {
+        bits_t *mark_bits = page->mark_bits;
+        bits_t *wbun_bits = page->wb_unprotected_bits;
+        RVALUE *p = page->start;
+        RVALUE *offset = p - NUM_IN_PAGE(p);
+        size_t j;
+
+        for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
+            bits_t bits = mark_bits[j] & wbun_bits[j];
+
+            if (bits) {
+                p = offset  + j * BITS_BITLENGTH;
+
+                do {
+                    if (bits & 1) {
+                        gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
+                        GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
+                        GC_ASSERT(RVALUE_MARKED((VALUE)p));
+                        gc_mark_children(objspace, (VALUE)p);
+                    }
+                    p++;
+                    bits >>= 1;
+                } while (bits);
+            }
+        }
     }
-
+}
+static void
+gc_marks_wb_unprotected_objects(rb_objspace_t *objspace)
+{
+    gc_marks_wb_unprotected_objects_head(objspace, &objspace->mru_pages);
+    gc_marks_wb_unprotected_objects_head(objspace, &heap_eden->pages);
     gc_mark_stacked_objects_all(objspace);
 }
 
@@ -5823,18 +5838,22 @@ rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
 #ifndef PROFILE_REMEMBERSET_MARK
 #define PROFILE_REMEMBERSET_MARK 0
 #endif
+struct rset_mark_prof {
+    int has_old;
+    int has_shady;
+    int has_both;
+    int skip;
+};
 
 static void
-rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
+rgengc_rememberset_mark_head(rb_objspace_t *objspace,
+                             const struct list_head *head,
+                             struct rset_mark_prof *prof)
 {
     size_t j;
     struct heap_page *page = 0;
-#if PROFILE_REMEMBERSET_MARK
-    int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
-#endif
-    gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
 
-    list_for_each(&heap->pages, page, page_node) {
+    list_for_each(head, page, page_node) {
 	if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
 	    RVALUE *p = page->start;
 	    RVALUE *offset = p - NUM_IN_PAGE(p);
@@ -5842,11 +5861,15 @@ rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
 	    bits_t *marking_bits = page->marking_bits;
 	    bits_t *uncollectible_bits = page->uncollectible_bits;
 	    bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
-#if PROFILE_REMEMBERSET_MARK
-	    if (page->flags.has_remembered_objects && page->flags.has_uncollectible_shady_objects) has_both++;
-	    else if (page->flags.has_remembered_objects) has_old++;
-	    else if (page->flags.has_uncollectible_shady_objects) has_shady++;
-#endif
+            if (prof) {
+                if (page->flags.has_remembered_objects &&
+                    page->flags.has_uncollectible_shady_objects)
+                    prof->has_both++;
+                else if (page->flags.has_remembered_objects)
+                    prof->has_old++;
+                else if (page->flags.has_uncollectible_shady_objects)
+                    prof->has_shady++;
+            }
 	    for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
 		bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
 		marking_bits[j] = 0;
@@ -5874,25 +5897,37 @@ rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
 		}
 	    }
 	}
-#if PROFILE_REMEMBERSET_MARK
-	else {
-	    skip++;
+	else if (prof) {
+	    prof->skip++;
 	}
-#endif
     }
+}
 
+static void
+rgengc_rememberset_mark(rb_objspace_t *objspace)
+{
 #if PROFILE_REMEMBERSET_MARK
-    fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
+    struct rset_mark_prof profile = { 0 };
+    struct rset_mark_prof *prof = &profile;
+#else
+    struct rset_mark_prof *prof = 0;
 #endif
+    gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
+    rgengc_rememberset_mark_head(objspace, &objspace->mru_pages, prof);
+    rgengc_rememberset_mark_head(objspace, &heap_eden->pages, prof);
+    if (prof) {
+        fprintf(stderr, "%d\t%d\t%d\t%d\n",
+                prof->has_both, prof->has_old, prof->has_shady, prof->skip);
+    }
     gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
 }
 
 static void
-rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
+rgengc_mark_and_rememberset_clear(const struct list_head *head)
 {
     struct heap_page *page = 0;
 
-    list_for_each(&heap->pages, page, page_node) {
+    list_for_each(head, page, page_node) {
 	memset(&page->mark_bits[0],       0, HEAP_PAGE_BITMAP_SIZE);
 	memset(&page->marking_bits[0],    0, HEAP_PAGE_BITMAP_SIZE);
 	memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
-- 
EW


                 reply	other threads:[~2018-06-22  8:10 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180622081017.20225-1-e@80x24.org \
    --to=e@80x24.org \
    --cc=spew@80x24.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).