From: Eric Wong <e@80x24.org>
To: mwrap-perl@80x24.org
Subject: [PATCH] httpd: put location names into a contiguous buffer
Date: Sat, 10 Dec 2022 11:26:06 +0000 [thread overview]
Message-ID: <20221210112606.6103-1-e@80x24.org> (raw)
This drastically reduces the number of malloc+free calls in the
/$PID/each/$MIN endpoint for large processes by writing out all
the names into one contiguous buffer and having pointers point
inside of it from the h1_src_loc array. IOW, this means we have
parallel arrays working together, instead of one array and
thousands of discreet strings.
This makes /each/2000 in my application roughly 7x faster than
before and 10x faster than the equivalent Devel::Mwrap::PSGI
Perl code. I wasn't exactly happy with the performance of
the C version before this change, but now it's pretty good :>
---
mwrap_httpd.h | 93 +++++++++++++++++++++++++++++++--------------------
1 file changed, 56 insertions(+), 37 deletions(-)
diff --git a/mwrap_httpd.h b/mwrap_httpd.h
index aaa9681..13ac256 100644
--- a/mwrap_httpd.h
+++ b/mwrap_httpd.h
@@ -101,11 +101,8 @@ struct h1_src_loc {
size_t frees;
size_t live;
size_t max_life;
- union {
- const struct src_loc *src_loc;
- char *loc_name;
- } as;
- size_t lname_len;
+ off_t lname_len;
+ char *loc_name;
};
/* sort numeric stuff descending */
@@ -126,7 +123,7 @@ CMP_FN(mean_life)
static int cmp_location(const void *x, const void *y)
{
const struct h1_src_loc *a = x, *b = y;
- return strcmp(a->as.loc_name, b->as.loc_name);
+ return strcmp(a->loc_name, b->loc_name);
}
/* fields for /each/$MIN/ endpoint */
@@ -208,9 +205,10 @@ static FILE *wbuf_new(struct mw_membuf *mb)
{
static const struct mw_wbuf pad;
FILE *fp = open_memstream(&mb->ptr, &mb->len);
- if (!fp)
+ if (fp) /* pad space is populated before h1_send_flush */
+ fwrite(&pad, 1, sizeof(pad), fp);
+ else
fprintf(stderr, "open_memstream: %m\n");
- fwrite(&pad, 1, sizeof(pad), fp); /* populated before h1_send_flush */
return fp;
}
@@ -397,14 +395,13 @@ static const char *uri_unescape(const char *s, size_t *len)
return deconst.in;
}
-/* result must be freed */
-static char *loc2name(const struct src_loc *l, size_t *len)
+static off_t write_loc_name(FILE *fp, const struct src_loc *l)
{
- char *ptr;
- FILE *fp = open_memstream(&ptr, len);
- if (!fp) {
- fprintf(stderr, "open_memstream: %m\n");
- return NULL;
+ off_t beg = ftello(fp);
+
+ if (beg < 0) {
+ fprintf(stderr, "ftello: %m\n");
+ return beg;
}
if (l->f) {
fputs(l->f->fn, fp);
@@ -418,8 +415,7 @@ static char *loc2name(const struct src_loc *l, size_t *len)
if (!s) {
fprintf(stderr, "backtrace_symbols: %m\n");
- fclose(fp);
- return NULL;
+ return -1;
}
fputs(s[0], fp);
for (i = 1; i < l->bt_len; i++) {
@@ -428,14 +424,13 @@ static char *loc2name(const struct src_loc *l, size_t *len)
}
free(s);
}
- if (ferror(fp) | fclose(fp)) {
- fprintf(stderr, "ferror|fclose: %m\n");
- return NULL;
- }
- return ptr;
+ off_t end = ftello(fp);
+ if (end < 0)
+ return end;
+ return end - beg;
}
-static struct h1_src_loc *accumulate(unsigned long min, size_t *hslc)
+static struct h1_src_loc *accumulate(unsigned long min, size_t *hslc, FILE *lp)
{
struct mw_membuf mb;
FILE *fp = open_memstream(&mb.ptr, &mb.len);
@@ -454,7 +449,6 @@ static struct h1_src_loc *accumulate(unsigned long min, size_t *hslc)
struct h1_src_loc hsl;
if (total < min) continue;
- hsl.as.src_loc = l;
hsl.bytes = total - freed;
hsl.allocations = uatomic_read(&l->allocations);
hsl.frees = uatomic_read(&l->frees);
@@ -464,6 +458,7 @@ static struct h1_src_loc *accumulate(unsigned long min, size_t *hslc)
(long double)hsl.frees) :
HUGE_VAL;
hsl.max_life = uatomic_read(&l->max_lifespan);
+ hsl.lname_len = write_loc_name(lp, l);
fwrite(&hsl, sizeof(hsl), 1, fp);
}
--locating;
@@ -478,10 +473,6 @@ static struct h1_src_loc *accumulate(unsigned long min, size_t *hslc)
*hslc = mb.len / sizeof(*hslv);
mwrap_assert((mb.len % sizeof(*hslv)) == 0);
hslv = (struct h1_src_loc *)mb.ptr;
- for (size_t i = 0; i++ < *hslc; ++hslv)
- hslv->as.loc_name = loc2name(hslv->as.src_loc,
- &hslv->lname_len);
- hslv = (struct h1_src_loc *)mb.ptr;
}
return hslv;
}
@@ -497,15 +488,24 @@ static enum mw_qev each_at(struct mw_h1 *h1, struct mw_h1req *h1r)
if (len >= PATH_MAX) return h1_400(h1);
struct src_loc *l = mwrap_get(loc, len);
if (!l) return h1_404(h1);
- size_t lname_len;
- char *name = loc2name(l, &lname_len);
+
+ struct mw_membuf lname;
+ FILE *lp = open_memstream(&lname.ptr, &lname.len);
+ if (!lp) return h1_close(h1);
+ if (write_loc_name(lp, l) < 0) return h1_close(h1);
+ if (ferror(lp) | fclose(lp)) {
+ fprintf(stderr, "ferror|fclose: %m\n");
+ return h1_close(h1);
+ }
struct mw_membuf mb;
FILE *fp = wbuf_new(&mb);
+ if (!fp) return h1_close(h1);
FPUTS("<html><head><title>", fp);
- write_html(fp, name, lname_len);
+ write_html(fp, lname.ptr, lname.len);
FPUTS("</title></head><body><p>live allocations at ", fp);
- write_html(fp, name, lname_len);
- free(name);
+ write_html(fp, lname.ptr, lname.len);
+ free(lname.ptr);
+
size_t age = uatomic_read(&total_bytes_inc);
fprintf(fp, "<p>Current age: %zu (live: %zu)",
age, age - uatomic_read(&total_bytes_dec));
@@ -542,10 +542,29 @@ static enum mw_qev each_gt(struct mw_h1 *h1, struct mw_h1req *h1r,
}
size_t hslc;
- struct h1_src_loc *hslv = accumulate(min, &hslc);
+ struct mw_membuf ln;
+ FILE *lp = open_memstream(&ln.ptr, &ln.len);
+ struct h1_src_loc *hslv = accumulate(min, &hslc, lp);
if (!hslv)
return h1_close(h1);
+ if (ferror(lp) | fclose(lp)) {
+ fprintf(stderr, "ferror|fclose: %m\n");
+ free(hslv);
+ return h1_close(h1);
+ }
+
+ char *n = ln.ptr;
+ for (size_t i = 0; i < hslc; ++i) {
+ hslv[i].loc_name = n;
+ n += hslv[i].lname_len;
+ if (hslv[i].lname_len < 0) {
+ free(ln.ptr);
+ free(hslv);
+ return h1_close(h1);
+ }
+ }
+
struct mw_membuf mb;
FILE *fp = wbuf_new(&mb);
fprintf(fp, "<html><head><title>mwrap each >%lu"
@@ -583,13 +602,13 @@ static enum mw_qev each_gt(struct mw_h1 *h1, struct mw_h1req *h1r,
hsl->bytes, hsl->allocations, hsl->frees,
hsl->live, hsl->mean_life, hsl->max_life);
FPUTS("<td><a\nhref=\"../at/", fp);
- write_uri(fp, hsl->as.loc_name, hsl->lname_len);
+ write_uri(fp, hsl->loc_name, hsl->lname_len);
FPUTS("\">", fp);
- write_html(fp, hsl->as.loc_name, hsl->lname_len);
- free(hsl->as.loc_name);
+ write_html(fp, hsl->loc_name, hsl->lname_len);
FPUTS("</a></td></tr>", fp);
}
free(hslv);
+ free(ln.ptr);
FPUTS("</table></body></html>", fp);
return h1_200(h1, fp, &mb);
}
reply other threads:[~2022-12-10 11:26 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221210112606.6103-1-e@80x24.org \
--to=e@80x24.org \
--cc=mwrap-perl@80x24.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
Code repositories for project(s) associated with this public inbox
https://80x24.org/mwrap-perl.git
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).