dumping ground for random patches and texts
 help / color / mirror / Atom feed
From: Eric Wong <e@80x24.org>
To: spew@80x24.org
Subject: [PATCH] wip
Date: Sun, 24 Jun 2018 11:55:01 +0000	[thread overview]
Message-ID: <20180624115501.19651-1-e@80x24.org> (raw)

From: Eric Wong <normalperson@yhbt.net>

---
 mjit.c           |  13 ++--
 process.c        | 173 ++++++++++++++++++++++++-----------------------
 signal.c         |  66 ++++++++----------
 thread.c         |  11 +++
 thread_pthread.c |   2 +-
 vm_core.h        |   3 +
 6 files changed, 141 insertions(+), 127 deletions(-)

diff --git a/mjit.c b/mjit.c
index c9c1208355..82c8ae2d7f 100644
--- a/mjit.c
+++ b/mjit.c
@@ -111,7 +111,8 @@ extern void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lo
 extern int rb_thread_create_mjit_thread(void (*child_hook)(void), void (*worker_func)(void));
 
 
-pid_t ruby_waitpid_locked(rb_vm_t *, rb_pid_t, int *status, int options);
+pid_t ruby_waitpid_locked(rb_vm_t *, rb_pid_t, int *status, int options,
+                          rb_nativethread_cond_t *cond);
 
 #define RB_CONDATTR_CLOCK_MONOTONIC 1
 
@@ -388,6 +389,7 @@ exec_process(const char *path, char *const argv[])
     int stat, exit_code;
     pid_t pid;
     rb_vm_t *vm = GET_VM();
+    rb_nativethread_cond_t cond;
 
     rb_nativethread_lock_lock(&vm->waitpid_lock);
     pid = start_process(path, argv);
@@ -395,11 +397,13 @@ exec_process(const char *path, char *const argv[])
         rb_nativethread_lock_unlock(&vm->waitpid_lock);
         return -2;
     }
+    rb_native_cond_initialize(&cond);
     for (;;) {
-        pid_t r = ruby_waitpid_locked(vm, pid, &stat, 0);
+        pid_t r = ruby_waitpid_locked(vm, pid, &stat, 0, &cond);
         if (r == -1) {
-            if (errno == EINTR) continue;
+            if (errno == EINTR) continue; /* should never happen */
             fprintf(stderr, "waitpid: %s\n", strerror(errno));
+            exit_code = -2;
             break;
         }
         else if (r == pid) {
@@ -413,6 +417,7 @@ exec_process(const char *path, char *const argv[])
         }
     }
     rb_nativethread_lock_unlock(&vm->waitpid_lock);
+    rb_native_cond_destroy(&cond);
     return exit_code;
 }
 
@@ -1563,7 +1568,7 @@ mjit_finish(void)
        absence.  So wait for a clean finish of the threads.  */
     while (pch_status == PCH_NOT_READY) {
         verbose(3, "Waiting wakeup from make_pch");
-	/* release GVL to handle interrupts */
+        /* release GVL to handle interrupts */
         rb_thread_call_without_gvl(wait_pch, 0, ubf_pch, 0);
     }
     CRITICAL_SECTION_FINISH(3, "in mjit_finish to wakeup from pch");
diff --git a/process.c b/process.c
index bdb92036b7..044804338f 100644
--- a/process.c
+++ b/process.c
@@ -899,153 +899,153 @@ do_waitpid(rb_pid_t pid, int *st, int flags)
 
 struct waitpid_state {
     struct list_node wnode;
-    rb_nativethread_cond_t cond;
+    union {
+        rb_nativethread_cond_t *cond; /* non-Ruby thread */
+        rb_execution_context_t *ec; /* normal Ruby execution context */
+    } wake;
     rb_pid_t ret;
     rb_pid_t pid;
     int status;
     int options;
     int errnum;
-    rb_vm_t *vm;
+    unsigned int is_ruby : 1;
 };
 
+void rb_native_mutex_lock(rb_nativethread_lock_t *);
+void rb_native_mutex_unlock(rb_nativethread_lock_t *);
 void rb_native_cond_signal(rb_nativethread_cond_t *);
 void rb_native_cond_wait(rb_nativethread_cond_t *, rb_nativethread_lock_t *);
-void rb_native_cond_initialize(rb_nativethread_cond_t *);
-void rb_native_cond_destroy(rb_nativethread_cond_t *);
 
-/* only called by vm->main_thread */
+FILE *mstream;
+
+/* called by main thread */
 void
-rb_sigchld(rb_vm_t *vm)
+rb_waitpid_all(rb_vm_t *vm)
 {
     struct waitpid_state *w = 0, *next;
 
-    rb_nativethread_lock_lock(&vm->waitpid_lock);
+    rb_native_mutex_lock(&vm->waitpid_lock);
     list_for_each_safe(&vm->waiting_pids, w, next, wnode) {
         w->ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
+
         if (w->ret == 0) continue;
         if (w->ret == -1) w->errnum = errno;
+
         list_del_init(&w->wnode);
-        rb_native_cond_signal(&w->cond);
+        if (mstream) fprintf(mstream, "%d %d %d\n", getpid(), w->ret, w->pid);
+        if (w->is_ruby) {
+            /*
+             * we call this in timer-thread, because vm->main_thread
+             * cannot wake itself up...
+             */
+            rb_thread_wakeup_alive(rb_ec_thread_ptr(w->wake.ec)->self);
+        }
+        else {
+            rb_native_cond_signal(w->wake.cond);
+        }
     }
-    rb_nativethread_lock_unlock(&vm->waitpid_lock);
+    rb_native_mutex_unlock(&vm->waitpid_lock);
 }
 
 static void
-waitpid_state_init(struct waitpid_state *w, rb_vm_t *vm, pid_t pid, int options)
+waitpid_state_init(struct waitpid_state *w, pid_t pid, int options)
 {
-    rb_native_cond_initialize(&w->cond);
     w->ret = 0;
     w->pid = pid;
-    w->status = 0;
     w->options = options;
-    w->vm = vm;
     list_node_init(&w->wnode);
 }
 
-/* must be called with vm->waitpid_lock held, this is not interruptible */
+/*
+ * must be called with vm->waitpid_lock held, this is not interruptible
+ */
 pid_t
-ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options)
+ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options,
+                    rb_nativethread_cond_t *cond)
 {
     struct waitpid_state w;
 
     assert(!ruby_thread_has_gvl_p() && "must not have GVL");
 
-    waitpid_state_init(&w, vm, pid, options);
+    waitpid_state_init(&w, pid, options);
+    w.is_ruby = 0;
     w.ret = do_waitpid(w.pid, &w.status, w.options | WNOHANG);
     if (w.ret) {
-        if (w.ret == -1) {
-            w.errnum = errno;
-        }
+        if (w.ret == -1) w.errnum = errno;
     }
     else {
+        w.wake.cond = cond;
         list_add(&vm->waiting_pids, &w.wnode);
-        while (!w.ret) {
-            rb_native_cond_wait(&w.cond, &vm->waitpid_lock);
-        }
+        if (mstream) fprintf(mstream, "%d add- %d\n", getpid(), w.pid);
+        do {
+            rb_native_cond_wait(w.wake.cond, &vm->waitpid_lock);
+        } while (!w.ret);
         list_del(&w.wnode);
     }
     if (status) {
         *status = w.status;
     }
-    rb_native_cond_destroy(&w.cond);
     errno = w.errnum;
     return w.ret;
 }
 
-static void
-waitpid_ubf(void *x)
-{
-    struct waitpid_state *w = x;
-    rb_nativethread_lock_lock(&w->vm->waitpid_lock);
-    if (!w->ret) {
-        w->errnum = EINTR;
-        w->ret = -1;
-    }
-    rb_native_cond_signal(&w->cond);
-    rb_nativethread_lock_unlock(&w->vm->waitpid_lock);
-}
+void rb_thread_sleep_interruptible(struct timespec *ts); /* thread.c */
 
-static void *
-waitpid_nogvl(void *x)
+static VALUE
+waitpid_sleep(VALUE x)
 {
-    struct waitpid_state *w = x;
+    struct waitpid_state *w = (struct waitpid_state *)x;
 
-    /* let rb_sigchld handle it */
-    rb_native_cond_wait(&w->cond, &w->vm->waitpid_lock);
+    rb_thread_check_ints();
+    while (!w->ret) {
+        rb_thread_sleep_interruptible(0);
+        rb_thread_check_ints();
+    }
 
-    return 0;
+    return Qfalse;
 }
 
 static VALUE
-waitpid_wait(VALUE x)
+waitpid_ensure(VALUE x)
 {
     struct waitpid_state *w = (struct waitpid_state *)x;
 
-    rb_nativethread_lock_lock(&w->vm->waitpid_lock);
-    w->ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
-    if (w->ret) {
-        if (w->ret == -1) {
-            w->errnum = errno;
-        }
-    }
-    else {
-        rb_execution_context_t *ec = GET_EC();
+    if (w->ret == 0) {
+        rb_vm_t *vm = rb_ec_vm_ptr(w->wake.ec);
 
-        list_add(&w->vm->waiting_pids, &w->wnode);
-        do {
-            rb_thread_call_without_gvl2(waitpid_nogvl, w, waitpid_ubf, w);
-            if (RUBY_VM_INTERRUPTED_ANY(ec) ||
-                    (w->ret == -1 && w->errnum == EINTR)) {
-                rb_nativethread_lock_unlock(&w->vm->waitpid_lock);
-
-                RUBY_VM_CHECK_INTS(ec);
-
-                rb_nativethread_lock_lock(&w->vm->waitpid_lock);
-                if (w->ret == -1 && w->errnum == EINTR) {
-                    w->ret = do_waitpid(w->pid, &w->status, w->options|WNOHANG);
-                    if (w->ret == -1)
-                        w->errnum = errno;
-                }
-            }
-        } while (!w->ret);
+        if (mstream) fprintf(mstream, "%d ensure del %d\n", getpid(), w->pid);
+        rb_native_mutex_lock(&vm->waitpid_lock);
+        list_del(&w->wnode);
+        rb_native_mutex_unlock(&vm->waitpid_lock);
     }
-    rb_nativethread_lock_unlock(&w->vm->waitpid_lock);
     return Qfalse;
 }
 
-static VALUE
-waitpid_ensure(VALUE x)
+static void
+waitpid_wait(struct waitpid_state *w)
 {
-    struct waitpid_state *w = (struct waitpid_state *)x;
+    rb_vm_t *vm = rb_ec_vm_ptr(w->wake.ec);
 
-    if (w->ret <= 0) {
-        rb_nativethread_lock_lock(&w->vm->waitpid_lock);
-        list_del_init(&w->wnode);
-        rb_nativethread_lock_unlock(&w->vm->waitpid_lock);
+    /*
+     * Lock here to prevent do_waitpid from stealing work from the
+     * ruby_waitpid_locked done by mjit workers since mjit works
+     * outside of GVL
+     */
+    rb_native_mutex_lock(&vm->waitpid_lock);
+
+    w->ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
+    if (w->ret) {
+        if (w->ret == -1) w->errnum = errno;
+
+        rb_native_mutex_unlock(&vm->waitpid_lock);
     }
+    else {
+        list_add(&vm->waiting_pids, &w->wnode);
+        if (mstream) fprintf(mstream, "%d add++ %d\n", getpid(), w->pid);
+        rb_native_mutex_unlock(&vm->waitpid_lock);
 
-    rb_native_cond_destroy(&w->cond);
-    return Qfalse;
+        rb_ensure(waitpid_sleep, (VALUE)w, waitpid_ensure, (VALUE)w);
+    }
 }
 
 rb_pid_t
@@ -1059,11 +1059,11 @@ rb_waitpid(rb_pid_t pid, int *st, int flags)
     else {
         struct waitpid_state w;
 
-        waitpid_state_init(&w, GET_VM(), pid, flags);
-        rb_ensure(waitpid_wait, (VALUE)&w, waitpid_ensure, (VALUE)&w);
-        if (st) {
-            *st = w.status;
-        }
+        waitpid_state_init(&w, pid, flags);
+        w.is_ruby = 1;
+        w.wake.ec = GET_EC();
+        waitpid_wait(&w);
+        if (st) *st = w.status;
         result = w.ret;
     }
     if (result > 0) {
@@ -1348,6 +1348,9 @@ after_exec_non_async_signal_safe(void)
 {
     rb_thread_reset_timer_thread();
     rb_thread_start_timer_thread();
+    if (rb_signal_buff_size()) {
+        rb_thread_wakeup_timer_thread();
+    }
 }
 
 static void
@@ -8227,4 +8230,8 @@ Init_process(void)
     id_exception = rb_intern("exception");
 
     InitVM(process);
+    if (0) {
+        mstream = fopen("/tmp/wait.log", "a");
+        setvbuf(mstream, NULL, _IONBF, 0);
+    }
 }
diff --git a/signal.c b/signal.c
index c20b01ea36..22fc4286d9 100644
--- a/signal.c
+++ b/signal.c
@@ -66,6 +66,14 @@ ruby_atomic_compare_and_swap(rb_atomic_t *ptr, rb_atomic_t cmp,
 # define NSIG (_SIGMAX + 1)      /* For QNX */
 #endif
 
+#if defined(SIGCLD)
+#  define RUBY_SIGCHLD    (SIGCLD)
+#elif defined(SIGCHLD)
+#  define RUBY_SIGCHLD    (SIGCHLD)
+#else
+#  define RUBY_SIGCHLD    (0)
+#endif
+
 static const struct signals {
     const char *signm;
     int  signo;
@@ -129,15 +137,9 @@ static const struct signals {
 #ifdef SIGCONT
     {"CONT", SIGCONT},
 #endif
-#ifdef SIGCHLD
-    {"CHLD", SIGCHLD},
-#endif
-#ifdef SIGCLD
-    {"CLD", SIGCLD},
-#else
-# ifdef SIGCHLD
-    {"CLD", SIGCHLD},
-# endif
+#if RUBY_SIGCHLD
+    {"CHLD", RUBY_SIGCHLD },
+    {"CLD", RUBY_SIGCHLD },
 #endif
 #ifdef SIGTTIN
     {"TTIN", SIGTTIN},
@@ -1052,18 +1054,7 @@ rb_trap_exit(void)
     }
 }
 
-static int
-sig_is_chld(int sig)
-{
-#if defined(SIGCLD)
-    return (sig == SIGCLD);
-#elif defined(SIGCHLD)
-    return (sig == SIGCHLD);
-#endif
-    return 0;
-}
-
-void rb_sigchld(rb_vm_t *); /* process.c */
+void rb_waitpid_all(rb_vm_t *); /* process.c */
 
 void
 rb_signal_exec(rb_thread_t *th, int sig)
@@ -1072,9 +1063,10 @@ rb_signal_exec(rb_thread_t *th, int sig)
     VALUE cmd = vm->trap_list.cmd[sig];
     int safe = vm->trap_list.safe[sig];
 
-    if (sig_is_chld(sig)) {
-	rb_sigchld(vm);
+    if (sig == RUBY_SIGCHLD) {
+        rb_waitpid_all(vm);
     }
+
     if (cmd == 0) {
 	switch (sig) {
 	  case SIGINT:
@@ -1134,10 +1126,8 @@ default_handler(int sig)
 #ifdef SIGUSR2
       case SIGUSR2:
 #endif
-#ifdef SIGCLD
-      case SIGCLD:
-#elif defined(SIGCHLD)
-      case SIGCHLD:
+#if RUBY_SIGCHLD
+      case RUBY_SIGCHLD:
 #endif
         func = sighandler;
         break;
@@ -1176,7 +1166,7 @@ trap_handler(VALUE *cmd, int sig)
     VALUE command;
 
     if (NIL_P(*cmd)) {
-	if (sig_is_chld(sig)) {
+	if (sig == RUBY_SIGCHLD) {
 	    goto sig_dfl;
 	}
 	func = SIG_IGN;
@@ -1199,9 +1189,9 @@ trap_handler(VALUE *cmd, int sig)
 		break;
               case 14:
 		if (memcmp(cptr, "SYSTEM_DEFAULT", 14) == 0) {
-		    if (sig_is_chld(sig)) {
-			goto sig_dfl;
-		    }
+                    if (sig == RUBY_SIGCHLD) {
+                        goto sig_dfl;
+                    }
                     func = SIG_DFL;
                     *cmd = 0;
 		}
@@ -1209,9 +1199,9 @@ trap_handler(VALUE *cmd, int sig)
 	      case 7:
 		if (memcmp(cptr, "SIG_IGN", 7) == 0) {
 sig_ign:
-		    if (sig_is_chld(sig)) {
-			goto sig_dfl;
-		    }
+                    if (sig == RUBY_SIGCHLD) {
+                        goto sig_dfl;
+                    }
                     func = SIG_IGN;
                     *cmd = Qtrue;
 		}
@@ -1443,7 +1433,7 @@ install_sighandler(int signum, sighandler_t handler)
 #  define install_sighandler(signum, handler) \
     INSTALL_SIGHANDLER(install_sighandler(signum, handler), #signum, signum)
 
-#if defined(SIGCLD) || defined(SIGCHLD)
+#if RUBY_SIGCHLD
 static int
 init_sigchld(int sig)
 {
@@ -1570,10 +1560,8 @@ Init_signal(void)
     install_sighandler(SIGSYS, sig_do_nothing);
 #endif
 
-#if defined(SIGCLD)
-    init_sigchld(SIGCLD);
-#elif defined(SIGCHLD)
-    init_sigchld(SIGCHLD);
+#if RUBY_SIGCHLD
+    init_sigchld(RUBY_SIGCHLD);
 #endif
 
     rb_enable_interrupt();
diff --git a/thread.c b/thread.c
index 8c9aafe07a..ab27c60632 100644
--- a/thread.c
+++ b/thread.c
@@ -1287,6 +1287,17 @@ rb_thread_sleep_forever(void)
     sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
 }
 
+void
+rb_thread_sleep_interruptible(struct timespec *ts)
+{
+    rb_thread_t *th = GET_THREAD();
+    enum rb_thread_status prev_status = th->status;
+
+    th->status = THREAD_STOPPED;
+    native_sleep(th, ts);
+    th->status = prev_status;
+}
+
 void
 rb_thread_sleep_deadly(void)
 {
diff --git a/thread_pthread.c b/thread_pthread.c
index 1a1a6fc0c6..4d6afac3fb 100644
--- a/thread_pthread.c
+++ b/thread_pthread.c
@@ -1548,7 +1548,7 @@ rb_thread_create_timer_thread(void)
 	    };
 	    stack_size = stack_min;
 	    if (stack_size < min_size) stack_size = min_size;
-	    if (needs_more_stack) {
+	    if (1 || needs_more_stack) {
 		stack_size += +((BUFSIZ - 1) / stack_min + 1) * stack_min;
 	    }
 	    err = pthread_attr_setstacksize(&attr, stack_size);
diff --git a/vm_core.h b/vm_core.h
index d0689e5ba6..29f12abbf0 100644
--- a/vm_core.h
+++ b/vm_core.h
@@ -1559,11 +1559,14 @@ void rb_thread_stop_timer_thread(void);
 void rb_thread_reset_timer_thread(void);
 void rb_thread_wakeup_timer_thread(void);
 
+extern FILE *mstream;
+
 static inline void
 rb_vm_living_threads_init(rb_vm_t *vm)
 {
     list_head_init(&vm->waiting_fds);
     list_head_init(&vm->waiting_pids);
+    if (mstream) fprintf(mstream, "clobber: %d\n", getpid());
     list_head_init(&vm->living_threads);
     vm->living_thread_num = 0;
 }
-- 
EW


             reply	other threads:[~2018-06-24 11:55 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-24 11:55 Eric Wong [this message]
  -- strict thread matches above, loose matches on Subject: below --
2021-10-27 20:16 [PATCH] wip Eric Wong
2021-06-05 19:58 Eric Wong
2021-04-05  7:42 Eric Wong
2021-03-08  7:11 Eric Wong
2021-01-21  4:24 [PATCH] WIP Eric Wong
2021-01-03 22:57 [PATCH] wip Eric Wong
2020-12-27 11:36 [PATCH] WIP Eric Wong
2020-11-15  7:35 [PATCH] wip Eric Wong
2020-04-23  4:27 Eric Wong
2020-04-20  7:14 Eric Wong
2020-01-13  9:24 [PATCH] WIP Eric Wong
2019-05-11 22:55 Eric Wong
2019-01-02  9:21 [PATCH] wip Eric Wong
2018-07-06 21:31 Eric Wong
2018-06-24  8:39 Eric Wong
2017-07-15  1:42 [PATCH] WIP Eric Wong
2017-04-12 20:17 [PATCH] wip Eric Wong
2017-04-05 18:40 Eric Wong
2016-08-23 20:07 Eric Wong
2016-08-18  2:16 Eric Wong
2016-06-26  3:46 Eric Wong
2015-12-22  0:15 Eric Wong

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180624115501.19651-1-e@80x24.org \
    --to=e@80x24.org \
    --cc=spew@80x24.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).