about summary refs log tree commit homepage
path: root/xt
diff options
context:
space:
mode:
authorEric Wong <e@80x24.org>2023-11-25 20:54:33 +0000
committerEric Wong <e@80x24.org>2023-11-26 01:05:24 +0000
commitdb7e689902f7ea0b0f9188d14a28ce57fb9f221c (patch)
tree9316e8525adfa7e632f3f67fad46f38068f90075 /xt
parent5073e8eceaf0ad20ddf9a5515e1d0aa7acbad5b3 (diff)
downloadpublic-inbox-db7e689902f7ea0b0f9188d14a28ce57fb9f221c.tar.gz
We must not attempt to read request bodies from the HTTP client
while processing a long request since that drains pipelined
requests.  The NNTP/IMAP/POP3 event_step callbacks follow the
same behavior when {long_cb} is present from ->long_response.

This bug has little real-world consequence since HTTP/1.1
pipelining is not widely-used, especially when behind varnish
or other reverse proxies.

I found this bug while randomly strace-ing an active -netd
process to see the kind of traffic it was seeing.
Diffstat (limited to 'xt')
-rw-r--r--xt/httpd-async-stream.t68
1 files changed, 51 insertions, 17 deletions
diff --git a/xt/httpd-async-stream.t b/xt/httpd-async-stream.t
index 099ceb79..21d09331 100644
--- a/xt/httpd-async-stream.t
+++ b/xt/httpd-async-stream.t
@@ -2,8 +2,10 @@
 # Copyright (C) all contributors <meta@public-inbox.org>
 # License: AGPL-3.0+ <https://www.gnu.org/licenses/agpl-3.0.txt>
 # Expensive test to validate compression and TLS.
-use strict;
-use v5.10.1;
+use v5.12;
+use autodie;
+use PublicInbox::IO qw(write_file);
+use IO::Uncompress::Gunzip qw(gunzip $GunzipError);
 use PublicInbox::TestCommon;
 use PublicInbox::DS qw(now);
 use PublicInbox::Spawn qw(popen_rd);
@@ -23,20 +25,15 @@ diag "TEST_JOBS=$JOBS TEST_ENDPOINT=$endpoint TEST_CURL_OPT=$curl_opt";
 my @CURL_OPT = (qw(-HHost:example.com -sSf), split(' ', $curl_opt));
 
 my $make_local_server = sub {
+        my ($http) = @_;
         my $pi_config = "$tmpdir/config";
-        open my $fh, '>', $pi_config or die "open($pi_config): $!";
-        print $fh <<"" or die "print $pi_config: $!";
+        write_file '>', $pi_config, <<"";
 [publicinbox "test"]
 inboxdir = $inboxdir
 address = test\@example.com
 
-        close $fh or die "close($pi_config): $!";
         my ($out, $err) = ("$tmpdir/out", "$tmpdir/err");
-        for ($out, $err) {
-                open my $fh, '>', $_ or die "truncate: $!";
-        }
-        my $http = tcp_server();
-        my $rdr = { 3 => $http };
+        for ($out, $err) { open my $fh, '>', $_ }
 
         # not using multiple workers, here, since we want to increase
         # the chance of tripping concurrency bugs within PublicInbox/HTTP*.pm
@@ -46,10 +43,22 @@ address = test\@example.com
         my $url = "$host_port/test/$endpoint";
         print STDERR "# CMD ". join(' ', @$cmd). "\n";
         my $env = { PI_CONFIG => $pi_config };
-        (start_script($cmd, $env, $rdr), $url);
+        (start_script($cmd, $env, { 3 => $http }), $url)
 };
 
-my ($td, $url) = $make_local_server->();
+my ($td, $url) = $make_local_server->(my $http = tcp_server());
+
+my $s1 = tcp_connect($http);
+my $rbuf = do { # pipeline while reading long response
+        my $req = <<EOM;
+GET /test/$endpoint HTTP/1.1\r
+Host: example.com\r
+\r
+EOM
+        is syswrite($s1, $req), length($req), 'initial long req';
+        <$s1>;
+};
+like $rbuf, qr!\AHTTP/1\.1 200\b!, 'started reading 200 response';
 
 my $do_get_all = sub {
         my ($job) = @_;
@@ -74,16 +83,16 @@ my $do_get_all = sub {
 
 my (%pids, %res);
 for my $job (1..$JOBS) {
-        pipe(my ($r, $w)) or die;
+        pipe(my $r, my $w);
         my $pid = fork;
         if ($pid == 0) {
-                close $r or die;
+                close $r;
                 my $res = $do_get_all->($job);
-                print $w $res or die;
-                close $w or die;
+                print $w $res;
+                close $w;
                 _exit(0);
         }
-        close $w or die;
+        close $w;
         $pids{$pid} = [ $job, $r ];
 }
 
@@ -96,6 +105,31 @@ while (scalar keys %pids) {
         push @{$res{$sum}}, $job;
 }
 is(scalar keys %res, 1, 'all got the same result');
+{
+        my $req = <<EOM;
+GET /test/manifest.js.gz HTTP/1.1\r
+Host: example.com\r
+Connection: close\r
+\r
+EOM
+        is syswrite($s1, $req), length($req),
+                'pipeline another request while reading long response';
+        diag 'reading remainder of slow response';
+        my $res = do { local $/ = "\r\n\r\n"; <$s1> };
+        like $res, qr/^Transfer-Encoding: chunked\r\n/sm, 'chunked response';
+        {
+                local $/ = "\r\n"; # get to final chunk
+                while (defined(my $l = <$s1>)) { last if $l eq "0\r\n" }
+        };
+        is scalar(readline($s1)), "\r\n", 'got final CRLF from 1st response';
+        diag "second response:";
+        $res = do { local $/ = "\r\n\r\n"; <$s1> };
+        like $res, qr!\AHTTP/1\.1 200 !, 'response for pipelined req';
+        gunzip($s1 => \my $json) or xbail "gunzip $GunzipError";
+        my $m = PublicInbox::Config::json()->decode($json);
+        like $m->{'/test'}->{fingerprint}, qr/\A[0-9a-f]{40,}\z/,
+                'acceptable fingerprint in response';
+}
 $td->kill;
 $td->join;
 is($?, 0, 'no error on -httpd exit');