From: Eric Wong <e@80x24.org>
To: spew@80x24.org
Subject: [PATCH 2/5] thread_pthread: eliminate malloc from signal thread list
Date: Wed, 12 Aug 2015 19:23:09 +0000 [thread overview]
Message-ID: <1439407392-29745-2-git-send-email-e@80x24.org> (raw)
In-Reply-To: <1439407392-29745-1-git-send-email-e@80x24.org>
ccan/list may be used and the list nodes are embedded directly
into the rb_thread_t structure.
Note: double-checked locking from FGLOCK is still possible, but I am
not convinced it is necessary with modern pthreads implementations.
---
thread_pthread.c | 90 ++++++++++++++++----------------------------------------
thread_pthread.h | 9 +++++-
2 files changed, 34 insertions(+), 65 deletions(-)
diff --git a/thread_pthread.c b/thread_pthread.c
index 26fe9ff..4eca11d 100644
--- a/thread_pthread.c
+++ b/thread_pthread.c
@@ -1122,15 +1122,7 @@ native_sleep(rb_thread_t *th, struct timeval *timeout_tv)
}
#ifdef USE_SIGNAL_THREAD_LIST
-struct signal_thread_list {
- rb_thread_t *th;
- struct signal_thread_list *prev;
- struct signal_thread_list *next;
-};
-
-static struct signal_thread_list signal_thread_list_anchor = {
- 0, 0, 0,
-};
+static LIST_HEAD(signal_thread_list_head);
#define FGLOCK(lock, body) do { \
native_mutex_lock(lock); \
@@ -1144,12 +1136,12 @@ static struct signal_thread_list signal_thread_list_anchor = {
static void
print_signal_list(char *str)
{
- struct signal_thread_list *list =
- signal_thread_list_anchor.next;
+ rb_thread_t *th;
+
thread_debug("list (%s)> ", str);
- while (list) {
- thread_debug("%p (%p), ", list->th, list->th->thread_id);
- list = list->next;
+ list_for_each(&signal_thread_list_head, th,
+ native_thread_data.signal_thread.node) {
+ thread_debug("%p (%p), ", th, th->thread_id);
}
thread_debug("\n");
}
@@ -1158,47 +1150,23 @@ print_signal_list(char *str)
static void
add_signal_thread_list(rb_thread_t *th)
{
- if (!th->native_thread_data.signal_thread_list) {
- FGLOCK(&signal_thread_list_lock, {
- struct signal_thread_list *list =
- malloc(sizeof(struct signal_thread_list));
-
- if (list == 0) {
- fprintf(stderr, "[FATAL] failed to allocate memory\n");
- exit(EXIT_FAILURE);
- }
-
- list->th = th;
-
- list->prev = &signal_thread_list_anchor;
- list->next = signal_thread_list_anchor.next;
- if (list->next) {
- list->next->prev = list;
- }
- signal_thread_list_anchor.next = list;
- th->native_thread_data.signal_thread_list = list;
- });
- }
+ FGLOCK(&signal_thread_list_lock, {
+ if (!th->native_thread_data.signal_thread.listed) {
+ list_add_tail(&signal_thread_list_head,
+ &th->native_thread_data.signal_thread.node);
+ }
+ });
}
static void
remove_signal_thread_list(rb_thread_t *th)
{
- if (th->native_thread_data.signal_thread_list) {
- FGLOCK(&signal_thread_list_lock, {
- struct signal_thread_list *list =
- (struct signal_thread_list *)
- th->native_thread_data.signal_thread_list;
-
- list->prev->next = list->next;
- if (list->next) {
- list->next->prev = list->prev;
- }
- th->native_thread_data.signal_thread_list = 0;
- list->th = 0;
- free(list); /* ok */
- });
- }
+ FGLOCK(&signal_thread_list_lock, {
+ if (th->native_thread_data.signal_thread.listed) {
+ list_del(&th->native_thread_data.signal_thread.node);
+ th->native_thread_data.signal_thread.listed = 0;
+ }
+ });
}
static void
@@ -1231,26 +1199,20 @@ ubf_select(void *ptr)
static void
ping_signal_thread_list(void)
{
- if (signal_thread_list_anchor.next) {
- FGLOCK(&signal_thread_list_lock, {
- struct signal_thread_list *list;
+ FGLOCK(&signal_thread_list_lock, {
+ rb_thread_t *th;
- list = signal_thread_list_anchor.next;
- while (list) {
- ubf_select_each(list->th);
- list = list->next;
- }
- });
- }
+ list_for_each(&signal_thread_list_head, th,
+ native_thread_data.signal_thread.node) {
+ ubf_select_each(th);
+ }
+ });
}
static int
check_signal_thread_list(void)
{
- if (signal_thread_list_anchor.next)
- return 1;
- else
- return 0;
+ return !list_empty(&signal_thread_list_head);
}
#else /* USE_SIGNAL_THREAD_LIST */
#define add_signal_thread_list(th) (void)(th)
diff --git a/thread_pthread.h b/thread_pthread.h
index 24a4af4..dcffd7a 100644
--- a/thread_pthread.h
+++ b/thread_pthread.h
@@ -11,6 +11,8 @@
#ifndef RUBY_THREAD_PTHREAD_H
#define RUBY_THREAD_PTHREAD_H
+#include "ccan/list/list.h"
+
#ifdef HAVE_PTHREAD_NP_H
#include <pthread_np.h>
#endif
@@ -26,7 +28,12 @@ typedef struct rb_thread_cond_struct {
} rb_nativethread_cond_t;
typedef struct native_thread_data_struct {
- void *signal_thread_list;
+ /* Protected by signal_thread_list_lock: */
+ union {
+ unsigned long listed;
+ struct list_node node;
+ } signal_thread;
+
rb_nativethread_cond_t sleep_cond;
} native_thread_data_t;
--
EW
next prev parent reply other threads:[~2015-08-12 19:23 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-08-12 19:23 [PATCH 1/5] net/http: retryable exception is configurable Eric Wong
2015-08-12 19:23 ` Eric Wong [this message]
2015-08-12 19:23 ` [PATCH 3/5] improve handling of timer thread shutdown Eric Wong
2015-08-12 19:23 ` [PATCH 4/5] digest: remove needless volatile Eric Wong
2015-08-12 19:23 ` [PATCH 5/5] remove needless volatile from ALLOCV_N + friends Eric Wong
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1439407392-29745-2-git-send-email-e@80x24.org \
--to=e@80x24.org \
--cc=spew@80x24.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).