dumping ground for random patches and texts
 help / color / mirror / Atom feed
From: Eric Wong <e@80x24.org>
To: spew@80x24.org
Subject: [PATCH] wip
Date: Sun, 24 Jun 2018 08:39:12 +0000	[thread overview]
Message-ID: <20180624083912.1362-1-e@80x24.org> (raw)

From: Eric Wong <normalperson@yhbt.net>

---
 mjit.c    |  11 +++-
 process.c | 160 ++++++++++++++++++++++++++----------------------------
 signal.c  |  30 +++++-----
 thread.c  |   3 +
 4 files changed, 104 insertions(+), 100 deletions(-)

diff --git a/mjit.c b/mjit.c
index 55ff7e21ee..685a57dfb8 100644
--- a/mjit.c
+++ b/mjit.c
@@ -111,7 +111,8 @@ extern void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lo
 extern int rb_thread_create_mjit_thread(void (*child_hook)(void), void (*worker_func)(void));
 
 
-pid_t ruby_waitpid_locked(rb_vm_t *, rb_pid_t, int *status, int options);
+pid_t ruby_waitpid_locked(rb_vm_t *, rb_pid_t, int *status, int options,
+                          rb_nativethread_cond_t *cond);
 
 #define RB_CONDATTR_CLOCK_MONOTONIC 1
 
@@ -385,7 +386,9 @@ exec_process(const char *path, char *const argv[])
     int stat, exit_code;
     pid_t pid;
     rb_vm_t *vm = GET_VM();
+    rb_nativethread_cond_t cond;
 
+    rb_native_cond_initialize(&cond);
     rb_nativethread_lock_lock(&vm->waitpid_lock);
     pid = start_process(path, argv);
     if (pid <= 0) {
@@ -393,10 +396,11 @@ exec_process(const char *path, char *const argv[])
         return -2;
     }
     for (;;) {
-        pid_t r = ruby_waitpid_locked(vm, pid, &stat, 0);
+        pid_t r = ruby_waitpid_locked(vm, pid, &stat, 0, &cond);
         if (r == -1) {
-            if (errno == EINTR) continue;
+            if (errno == EINTR) continue; /* should never happen */
             fprintf(stderr, "waitpid: %s\n", strerror(errno));
+            exit_code = -2;
             break;
         }
         else if (r == pid) {
@@ -410,6 +414,7 @@ exec_process(const char *path, char *const argv[])
         }
     }
     rb_nativethread_lock_unlock(&vm->waitpid_lock);
+    rb_native_cond_destroy(&cond);
     return exit_code;
 }
 
diff --git a/process.c b/process.c
index bdb92036b7..3b293b8d6b 100644
--- a/process.c
+++ b/process.c
@@ -899,153 +899,145 @@ do_waitpid(rb_pid_t pid, int *st, int flags)
 
 struct waitpid_state {
     struct list_node wnode;
-    rb_nativethread_cond_t cond;
+    union {
+        rb_nativethread_cond_t *cond; /* non-Ruby thread */
+        rb_execution_context_t *ec; /* normal Ruby execution context */
+    } wake;
+    struct rb_unblock_callback ubf;
     rb_pid_t ret;
     rb_pid_t pid;
     int status;
     int options;
     int errnum;
-    rb_vm_t *vm;
+    unsigned int is_ruby : 1;
 };
 
+void rb_native_mutex_lock(rb_nativethread_lock_t *);
+void rb_native_mutex_unlock(rb_nativethread_lock_t *);
 void rb_native_cond_signal(rb_nativethread_cond_t *);
 void rb_native_cond_wait(rb_nativethread_cond_t *, rb_nativethread_lock_t *);
-void rb_native_cond_initialize(rb_nativethread_cond_t *);
-void rb_native_cond_destroy(rb_nativethread_cond_t *);
 
-/* only called by vm->main_thread */
+/* called by timer-thread */
 void
-rb_sigchld(rb_vm_t *vm)
+ruby_waitpid_all(rb_vm_t *vm)
 {
     struct waitpid_state *w = 0, *next;
 
-    rb_nativethread_lock_lock(&vm->waitpid_lock);
+    rb_native_mutex_lock(&vm->waitpid_lock);
     list_for_each_safe(&vm->waiting_pids, w, next, wnode) {
         w->ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
+
         if (w->ret == 0) continue;
         if (w->ret == -1) w->errnum = errno;
+
         list_del_init(&w->wnode);
-        rb_native_cond_signal(&w->cond);
+        if (w->is_ruby) {
+            /*
+             * we call this in timer-thread, because vm->main_thread
+             * cannot wake itself up...
+             */
+            rb_thread_wakeup_alive(rb_ec_thread_ptr(w->wake.ec)->self);
+        }
+        else {
+            rb_native_cond_signal(w->wake.cond);
+        }
     }
-    rb_nativethread_lock_unlock(&vm->waitpid_lock);
+    rb_native_mutex_unlock(&vm->waitpid_lock);
 }
 
 static void
-waitpid_state_init(struct waitpid_state *w, rb_vm_t *vm, pid_t pid, int options)
+waitpid_state_init(struct waitpid_state *w, pid_t pid, int options)
 {
-    rb_native_cond_initialize(&w->cond);
     w->ret = 0;
     w->pid = pid;
     w->status = 0;
     w->options = options;
-    w->vm = vm;
     list_node_init(&w->wnode);
+    w->is_ruby = ruby_thread_has_gvl_p();
 }
 
-/* must be called with vm->waitpid_lock held, this is not interruptible */
+/*
+ * must be called with vm->waitpid_lock held, this is not interruptible
+ */
 pid_t
-ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options)
+ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options,
+                    rb_nativethread_cond_t *cond)
 {
     struct waitpid_state w;
 
     assert(!ruby_thread_has_gvl_p() && "must not have GVL");
 
-    waitpid_state_init(&w, vm, pid, options);
+    waitpid_state_init(&w, pid, options);
     w.ret = do_waitpid(w.pid, &w.status, w.options | WNOHANG);
     if (w.ret) {
-        if (w.ret == -1) {
-            w.errnum = errno;
-        }
+        if (w.ret == -1) w.errnum = errno;
     }
     else {
+        w.wake.cond = cond;
         list_add(&vm->waiting_pids, &w.wnode);
-        while (!w.ret) {
-            rb_native_cond_wait(&w.cond, &vm->waitpid_lock);
-        }
+        do {
+            rb_native_cond_wait(w.wake.cond, &vm->waitpid_lock);
+        } while (!w.ret);
         list_del(&w.wnode);
     }
     if (status) {
         *status = w.status;
     }
-    rb_native_cond_destroy(&w.cond);
     errno = w.errnum;
     return w.ret;
 }
 
-static void
-waitpid_ubf(void *x)
-{
-    struct waitpid_state *w = x;
-    rb_nativethread_lock_lock(&w->vm->waitpid_lock);
-    if (!w->ret) {
-        w->errnum = EINTR;
-        w->ret = -1;
-    }
-    rb_native_cond_signal(&w->cond);
-    rb_nativethread_lock_unlock(&w->vm->waitpid_lock);
-}
-
-static void *
-waitpid_nogvl(void *x)
+static VALUE
+waitpid_sleep(VALUE x)
 {
-    struct waitpid_state *w = x;
+    struct waitpid_state *w = (struct waitpid_state *)x;
 
-    /* let rb_sigchld handle it */
-    rb_native_cond_wait(&w->cond, &w->vm->waitpid_lock);
+    do {
+        rb_thread_sleep_forever(); /* handles interrupts for us */
+    } while (!w->ret);
 
-    return 0;
+    return Qfalse;
 }
 
 static VALUE
-waitpid_wait(VALUE x)
+waitpid_ensure(VALUE x)
 {
     struct waitpid_state *w = (struct waitpid_state *)x;
 
-    rb_nativethread_lock_lock(&w->vm->waitpid_lock);
-    w->ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
-    if (w->ret) {
-        if (w->ret == -1) {
-            w->errnum = errno;
-        }
-    }
-    else {
-        rb_execution_context_t *ec = GET_EC();
+    if (w->ret <= 0) {
+        rb_vm_t *vm = rb_ec_vm_ptr(w->wake.ec);
 
-        list_add(&w->vm->waiting_pids, &w->wnode);
-        do {
-            rb_thread_call_without_gvl2(waitpid_nogvl, w, waitpid_ubf, w);
-            if (RUBY_VM_INTERRUPTED_ANY(ec) ||
-                    (w->ret == -1 && w->errnum == EINTR)) {
-                rb_nativethread_lock_unlock(&w->vm->waitpid_lock);
-
-                RUBY_VM_CHECK_INTS(ec);
-
-                rb_nativethread_lock_lock(&w->vm->waitpid_lock);
-                if (w->ret == -1 && w->errnum == EINTR) {
-                    w->ret = do_waitpid(w->pid, &w->status, w->options|WNOHANG);
-                    if (w->ret == -1)
-                        w->errnum = errno;
-                }
-            }
-        } while (!w->ret);
+        rb_native_mutex_lock(&vm->waitpid_lock);
+        list_del(&w->wnode);
+        rb_native_mutex_unlock(&vm->waitpid_lock);
     }
-    rb_nativethread_lock_unlock(&w->vm->waitpid_lock);
     return Qfalse;
 }
 
-static VALUE
-waitpid_ensure(VALUE x)
+static void
+waitpid_wait(struct waitpid_state *w)
 {
-    struct waitpid_state *w = (struct waitpid_state *)x;
+    rb_vm_t *vm = rb_ec_vm_ptr(w->wake.ec);
 
-    if (w->ret <= 0) {
-        rb_nativethread_lock_lock(&w->vm->waitpid_lock);
-        list_del_init(&w->wnode);
-        rb_nativethread_lock_unlock(&w->vm->waitpid_lock);
+    /*
+     * Lock here to prevent do_waitpid from stealing work from the
+     * ruby_waitpid_locked done by mjit workers since mjit works
+     * outside of GVL
+     */
+    rb_native_mutex_lock(&vm->waitpid_lock);
+
+    w->ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
+    if (w->ret) {
+        if (w->ret == -1) w->errnum = errno;
+
+        rb_native_mutex_unlock(&vm->waitpid_lock);
     }
+    else {
+        list_add(&vm->waiting_pids, &w->wnode);
+        rb_native_mutex_unlock(&vm->waitpid_lock);
 
-    rb_native_cond_destroy(&w->cond);
-    return Qfalse;
+        rb_ensure(waitpid_sleep, (VALUE)w, waitpid_ensure, (VALUE)w);
+    }
 }
 
 rb_pid_t
@@ -1059,11 +1051,10 @@ rb_waitpid(rb_pid_t pid, int *st, int flags)
     else {
         struct waitpid_state w;
 
-        waitpid_state_init(&w, GET_VM(), pid, flags);
-        rb_ensure(waitpid_wait, (VALUE)&w, waitpid_ensure, (VALUE)&w);
-        if (st) {
-            *st = w.status;
-        }
+        waitpid_state_init(&w, pid, flags);
+        w.wake.ec = GET_EC();
+        waitpid_wait(&w);
+        if (st) *st = w.status;
         result = w.ret;
     }
     if (result > 0) {
@@ -1348,6 +1339,9 @@ after_exec_non_async_signal_safe(void)
 {
     rb_thread_reset_timer_thread();
     rb_thread_start_timer_thread();
+    if (rb_signal_buff_size()) {
+        rb_thread_wakeup_timer_thread();
+    }
 }
 
 static void
diff --git a/signal.c b/signal.c
index c20b01ea36..e339434113 100644
--- a/signal.c
+++ b/signal.c
@@ -1052,18 +1052,23 @@ rb_trap_exit(void)
     }
 }
 
-static int
-sig_is_chld(int sig)
-{
 #if defined(SIGCLD)
-    return (sig == SIGCLD);
+#  define RUBY_SIGCHLD    (SIGCLD)
 #elif defined(SIGCHLD)
-    return (sig == SIGCHLD);
+#  define RUBY_SIGCHLD    (SIGCHLD)
+#else
+#  define RUBY_SIGCHLD    (0)
 #endif
-    return 0;
-}
 
-void rb_sigchld(rb_vm_t *); /* process.c */
+void ruby_waitpid_all(rb_vm_t *); /* process.c */
+
+void
+ruby_sigchld_handler(rb_vm_t *vm)
+{
+    if (signal_buff.cnt[RUBY_SIGCHLD]) {
+        ruby_waitpid_all(vm);
+    }
+}
 
 void
 rb_signal_exec(rb_thread_t *th, int sig)
@@ -1072,9 +1077,6 @@ rb_signal_exec(rb_thread_t *th, int sig)
     VALUE cmd = vm->trap_list.cmd[sig];
     int safe = vm->trap_list.safe[sig];
 
-    if (sig_is_chld(sig)) {
-	rb_sigchld(vm);
-    }
     if (cmd == 0) {
 	switch (sig) {
 	  case SIGINT:
@@ -1176,7 +1178,7 @@ trap_handler(VALUE *cmd, int sig)
     VALUE command;
 
     if (NIL_P(*cmd)) {
-	if (sig_is_chld(sig)) {
+	if (sig == RUBY_SIGCHLD) {
 	    goto sig_dfl;
 	}
 	func = SIG_IGN;
@@ -1199,7 +1201,7 @@ trap_handler(VALUE *cmd, int sig)
 		break;
               case 14:
 		if (memcmp(cptr, "SYSTEM_DEFAULT", 14) == 0) {
-		    if (sig_is_chld(sig)) {
+		    if (sig == RUBY_SIGCHLD) {
 			goto sig_dfl;
 		    }
                     func = SIG_DFL;
@@ -1209,7 +1211,7 @@ trap_handler(VALUE *cmd, int sig)
 	      case 7:
 		if (memcmp(cptr, "SIG_IGN", 7) == 0) {
 sig_ign:
-		    if (sig_is_chld(sig)) {
+		    if (sig == RUBY_SIGCHLD) {
 			goto sig_dfl;
 		    }
                     func = SIG_IGN;
diff --git a/thread.c b/thread.c
index 8c9aafe07a..613599eaa6 100644
--- a/thread.c
+++ b/thread.c
@@ -4146,6 +4146,8 @@ rb_threadptr_check_signal(rb_thread_t *mth)
     }
 }
 
+void ruby_sigchld_handler(rb_vm_t *); /* signal.c */
+
 static void
 timer_thread_function(void *arg)
 {
@@ -4164,6 +4166,7 @@ timer_thread_function(void *arg)
     rb_native_mutex_unlock(&vm->thread_destruct_lock);
 
     /* check signal */
+    ruby_sigchld_handler(vm);
     rb_threadptr_check_signal(vm->main_thread);
 
 #if 0
-- 
EW


             reply	other threads:[~2018-06-24  8:39 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-24  8:39 Eric Wong [this message]
  -- strict thread matches above, loose matches on Subject: below --
2021-10-27 20:16 [PATCH] wip Eric Wong
2021-06-05 19:58 Eric Wong
2021-04-05  7:42 Eric Wong
2021-03-08  7:11 Eric Wong
2021-01-21  4:24 [PATCH] WIP Eric Wong
2021-01-03 22:57 [PATCH] wip Eric Wong
2020-12-27 11:36 [PATCH] WIP Eric Wong
2020-11-15  7:35 [PATCH] wip Eric Wong
2020-04-23  4:27 Eric Wong
2020-04-20  7:14 Eric Wong
2020-01-13  9:24 [PATCH] WIP Eric Wong
2019-05-11 22:55 Eric Wong
2019-01-02  9:21 [PATCH] wip Eric Wong
2018-07-06 21:31 Eric Wong
2018-06-24 11:55 Eric Wong
2017-07-15  1:42 [PATCH] WIP Eric Wong
2017-04-12 20:17 [PATCH] wip Eric Wong
2017-04-05 18:40 Eric Wong
2016-08-23 20:07 Eric Wong
2016-08-18  2:16 Eric Wong
2016-06-26  3:46 Eric Wong
2015-12-22  0:15 Eric Wong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180624083912.1362-1-e@80x24.org \
    --to=e@80x24.org \
    --cc=spew@80x24.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).