From 3c3c853bf122cfebf143080e2ff818dfc11cb6c5 Mon Sep 17 00:00:00 2001 From: Stefan Eissing Date: Mon, 18 Apr 2016 12:21:00 +0200 Subject: [PATCH] version 1.4.6, exactly as released in Apache 2.4.20 --- ChangeLog | 77 ++ README.md | 21 +- configure.ac | 2 +- mod-h2.xcodeproj/project.pbxproj | 14 +- mod_http2/Makefile.am | 7 +- mod_http2/h2.h | 158 ++++ mod_http2/h2_alt_svc.c | 2 +- mod_http2/h2_bucket_eoc.h | 1 + mod_http2/h2_bucket_eos.h | 1 + mod_http2/h2_config.c | 1 + mod_http2/h2_config.h | 1 + mod_http2/h2_conn.c | 212 ++--- mod_http2/h2_conn.h | 18 +- mod_http2/h2_conn_io.c | 295 +++--- mod_http2/h2_conn_io.h | 35 +- mod_http2/h2_ctx.c | 18 +- mod_http2/h2_ctx.h | 2 + mod_http2/h2_filter.c | 72 +- mod_http2/h2_filter.h | 6 +- mod_http2/h2_from_h1.c | 14 +- mod_http2/h2_from_h1.h | 2 - mod_http2/h2_h2.c | 56 +- mod_http2/h2_h2.h | 41 - mod_http2/{h2_task_queue.c => h2_int_queue.c} | 62 +- mod_http2/{h2_task_queue.h => h2_int_queue.h} | 37 +- mod_http2/h2_io.c | 257 +++--- mod_http2/h2_io.h | 51 +- mod_http2/h2_io_set.c | 20 +- mod_http2/h2_io_set.h | 7 +- mod_http2/h2_mplx.c | 868 +++++++++++++----- mod_http2/h2_mplx.h | 127 ++- mod_http2/h2_ngn_shed.c | 352 +++++++ mod_http2/h2_ngn_shed.h | 76 ++ mod_http2/h2_private.h | 18 +- mod_http2/h2_push.c | 130 ++- mod_http2/h2_push.h | 20 +- mod_http2/h2_request.c | 102 +- mod_http2/h2_request.h | 45 +- mod_http2/h2_response.c | 42 +- mod_http2/h2_response.h | 13 +- mod_http2/h2_session.c | 664 +++++++++----- mod_http2/h2_session.h | 58 +- mod_http2/h2_stream.c | 251 ++--- mod_http2/h2_stream.h | 37 +- mod_http2/h2_switch.c | 12 +- mod_http2/h2_task.c | 150 ++- mod_http2/h2_task.h | 25 +- mod_http2/h2_task_input.c | 36 +- mod_http2/h2_task_input.h | 6 +- mod_http2/h2_task_output.c | 195 ++-- mod_http2/h2_task_output.h | 21 +- mod_http2/h2_util.c | 375 +++++++- mod_http2/h2_util.h | 125 ++- mod_http2/h2_version.h | 4 +- mod_http2/h2_worker.c | 79 +- mod_http2/h2_worker.h | 11 +- mod_http2/h2_workers.c | 210 +++-- mod_http2/h2_workers.h | 13 +- mod_http2/mod_http2.c | 142 ++- mod_http2/mod_http2.h | 77 +- 60 files changed, 3812 insertions(+), 1962 deletions(-) create mode 100644 mod_http2/h2.h rename mod_http2/{h2_task_queue.c => h2_int_queue.c} (72%) rename mod_http2/{h2_task_queue.h => h2_int_queue.h} (72%) create mode 100644 mod_http2/h2_ngn_shed.c create mode 100644 mod_http2/h2_ngn_shed.h diff --git a/ChangeLog b/ChangeLog index 27e21220..62159f60 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,80 @@ +v1.4.6 (as released in Apache httpd 2.4.20) +-------------------------------------------------------------------------------- + * incrementing keepalives on each request started so that logging %k gives + increasing numbers per master http2 connection. New documented variables + in env, usable in custom log formats: H2_PUSH, H2_PUSHED, H2_PUSHED_ON, + H2_STREAM_ID and H2_STREAM_TAG. + * more efficient passing of response bodies with less contention + and file bucket forwarding. + * fix for missing score board updates on request count, fix for memory leak + on slave connection reuse. + * disabling PUSH when client sends GOAWAY. Slave connections are reused for + several requests, improved performance and better memory use. + * fixes problem with wrong lifetime of file buckets on main connection. + * fixes incorrect denial of requests without :authority header. + * give control to async mpm for keepalive timeouts only when no streams are + open and even if only after 1 sec delay. Under load, event mpm discards + connections otherwise too quickly. + * fixed possible read after free when streams were cancelled early by the + client. + * fixed possible deadlock during connection shutdown. Thanks to @FrankStolle + for reporting and getting the necessary data. + * fixed apr_uint64_t formatting in a log statement to user proper APR def, + thanks to @Sp1l. + * number of worker threads allowed to a connection is adjusting + dynamically. Starting with 4, the number is doubled when streams can be + served without block on http/2 connection flow. The number is halfed, when + the server has to wait on client flow control grants. + This can happen with a maximum frequency of 5 times per second. + When a connection occupies too many workers, repeatable requests + (GET/HEAD/OPTIONS) are cancelled and placed back in the queue. Should that + not suffice and a stream is busy longer than the server timeout, the + connection will be aborted with error code ENHANCE_YOUR_CALM. + This does *not* limit the number of streams a client may open, rather the + number of server threads a connection might use. + * allowing link header to specify multiple "rel" values, space-separated + inside a quoted string. Prohibiting push when Link parameter "nopush" is + present. + * reworked connection state handling. Idle connections accept a GOAWAY from + the client without further reply. Otherwise the module makes a best effort + to send one last GOAWAY to the client. + * the values from standard directives Timeout and KeepAliveTimeout properly + are applied to http/2 connections. + * idle connections are returned to async mpms. new hook "pre_close_connection" + used to send GOAWAY frame when not already done. Setting event mpm server + config "by hand" for the main connection to the correct negotiated server. + * keep-alive blocking reads are done with 1 second timeouts to check for MPM + stopping. Will announce early GOAWAY and finish processing open streams, + then close. + * bytes read/written on slave connections are reported via the optional + mod_logio functions. Fixes PR 58871. + * connections how keep a "push diary" where hashes of already pushed resources + are kept. See directive H2PushDiarySize for managing this. Push diaries can + be initialized by clients via the "Cache-Digest" request header. + This carries a base64url encoded. compressed Golomb set as described + in https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ + Introduced a status handler for HTTP/2 connections, giving various counters + and statistics about the current connection, plus its cache digest value + in a JSON record. Not a replacement for more HTTP/2 in the server status. + Configured as + + SetHandler http2-status + + * Fixed flushing of last GOAWAY frame. Previously, that frame did not always + reach the client, causing some to fail the next request. + Fixed calculation of last stream id accepted as described in rfc7540. + Reading in KEEPALIVE state now correctly shown in scoreboard. + Fixed possible race in connection shutdown after review by Ylavic. + Fixed segfault on connection shutdown, callback ran into a semi dismantled session. + * Added support for experimental accept-push-policy draft + (https://tools.ietf.org/html/draft-ruellan-http-accept-push-policy-00). Clients + may now influence server pushes by sending accept-push-policy headers. + * new r->subprocess_env variables HTTP2 and H2PUSH, set to "on" + when available for request. + * mod_status/scoreboard: showing connection protocol in new column, new + ap_update_child_status methods for updating server/description. mod_ssl + sets vhost negotiated by servername directly. + v1.2.8 -------------------------------------------------------------------------------- * Requests without ':authority' header, using 'Host' instead, are no longer diff --git a/README.md b/README.md index 58657ec3..a2dd2538 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ #mod_h[ttp]2 - http/2 for Apache httpd -Copyright (C) 2015 greenbytes GmbH +Copyright (C) 2015, 2016 greenbytes GmbH This repository contains the `mod_h[ttp]2` from Apache httpd as a standalone build. @@ -13,29 +13,23 @@ What you find here are **early experience versions** for people who like living If you want HTTP/2 in your production environment, please head over to the official releases at Apache and grab one of those or wait until the various OS distributions have assembled one for you. ##Current Version -The version here is the **Proposed backport to 2.4.x**, to be published hopefully as 2.4.19. +The version 1.4.6 is **exactly** the one released in Apache httpd 2-.4.20. -This is therefore an **early experience version** +Later versions willl be an **early experience version** and there is no guarantee that it will be released as it is here by Apache. But you are welcome to test it and give feedback. ##Install -You need a built Apache httpd 2.4.18, including apxs and headers to compile and +You need a built Apache httpd 2.4.20, including apxs and headers to compile and run this module. Additionally, you need an installed libnghttp2, at least in version 1.3.0. And additionally, you want an installed OpenSSL 1.0.2. tl;dr -**You need an installed Apache 2.4.18 which already runs ```mod_http2``` in it.** +**You need an installed Apache 2.4.20 which already runs ```mod_http2``` in it.** If you do not have that or don't know how to get it, look at google, stackoverflow, Apache mailing lists or your Linux distro. Not here! -##Changes - -Different to earlier releases, this module no longer has a ```sandbox``` build mode. It always needs an Apache + apxs installed on your system, as -well as all dependant libraries. It was too much work for me to keep the -sandbox up to date... - ##Apache 2.4.x Packages * **Ubuntu**: [ppa by ondrej](https://launchpad.net/~ondrej/+archive/ubuntu/apache2) for Ubuntu 14.04 and others @@ -43,6 +37,9 @@ sandbox up to date... * **Debian** sid (unstable) includes httpd 2.4.17. See [how to install debian sid](https://wiki.debian.org/InstallFAQ#Q._How_do_I_install_.22unstable.22_.28.22sid.22.29.3F) * **FreeBSD**: [Apache 2.4 port includes mod_http2](http://www.freshports.org/www/apache24/) / [mod_http2-devel port in review](https://reviews.freebsd.org/D5220) +##Changes + +See ```ChangeLog``` for details. ##Documenation There is the official [Apache documentation](https://httpd.apache.org/docs/2.4/en/mod/mod_http2.html) of the module, which you will not find here. @@ -73,7 +70,7 @@ SPDY protocol. And without Tatsuhiro Tsujikawa excellent nghttp2 work, this would not have been possible. -Münster, 18.12.2015, +Münster, 18.04.2016, Stefan Eissing, greenbytes GmbH diff --git a/configure.ac b/configure.ac index 53fa5fd9..0f990348 100644 --- a/configure.ac +++ b/configure.ac @@ -14,7 +14,7 @@ # AC_PREREQ([2.69]) -AC_INIT([mod_http2], [1.2.8], [stefan.eissing@greenbytes.de]) +AC_INIT([mod_http2], [1.4.6], [stefan.eissing@greenbytes.de]) LT_PREREQ([2.2.6]) LT_INIT() diff --git a/mod-h2.xcodeproj/project.pbxproj b/mod-h2.xcodeproj/project.pbxproj index 3358f25e..148c4ae7 100644 --- a/mod-h2.xcodeproj/project.pbxproj +++ b/mod-h2.xcodeproj/project.pbxproj @@ -15,6 +15,11 @@ B24C59831B8C66930003DF74 /* ltsugar.m4 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = ltsugar.m4; sourceTree = ""; }; B24C59841B8C66930003DF74 /* ltversion.m4 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = ltversion.m4; sourceTree = ""; }; B24C59851B8C66930003DF74 /* lt~obsolete.m4 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = "lt~obsolete.m4"; sourceTree = ""; }; + B25096BE1CC4E66F002E8B04 /* h2_int_queue.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = h2_int_queue.c; sourceTree = ""; }; + B25096BF1CC4E66F002E8B04 /* h2_int_queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = h2_int_queue.h; sourceTree = ""; }; + B25096C01CC4E66F002E8B04 /* h2_ngn_shed.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = h2_ngn_shed.c; sourceTree = ""; }; + B25096C11CC4E66F002E8B04 /* h2_ngn_shed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = h2_ngn_shed.h; sourceTree = ""; }; + B25096C21CC4E66F002E8B04 /* h2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = h2.h; sourceTree = ""; }; B25574691BEB6EFC0058F97B /* config.h.in */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = config.h.in; sourceTree = ""; }; B255746A1BEB6EFC0058F97B /* h2_alt_svc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = h2_alt_svc.c; sourceTree = ""; }; B255746B1BEB6EFC0058F97B /* h2_alt_svc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = h2_alt_svc.h; sourceTree = ""; }; @@ -59,8 +64,6 @@ B25574921BEB6EFC0058F97B /* h2_task_input.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = h2_task_input.h; sourceTree = ""; }; B25574931BEB6EFC0058F97B /* h2_task_output.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = h2_task_output.c; sourceTree = ""; }; B25574941BEB6EFC0058F97B /* h2_task_output.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = h2_task_output.h; sourceTree = ""; }; - B25574951BEB6EFC0058F97B /* h2_task_queue.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = h2_task_queue.c; sourceTree = ""; }; - B25574961BEB6EFC0058F97B /* h2_task_queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = h2_task_queue.h; sourceTree = ""; }; B25574991BEB6EFC0058F97B /* h2_util.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = h2_util.c; sourceTree = ""; }; B255749A1BEB6EFC0058F97B /* h2_util.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = h2_util.h; sourceTree = ""; }; B255749C1BEB6EFC0058F97B /* h2_version.h.in */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = h2_version.h.in; sourceTree = ""; }; @@ -128,12 +131,16 @@ B25574791BEB6EFC0058F97B /* h2_from_h1.h */, B255747A1BEB6EFC0058F97B /* h2_h2.c */, B255747B1BEB6EFC0058F97B /* h2_h2.h */, + B25096BE1CC4E66F002E8B04 /* h2_int_queue.c */, + B25096BF1CC4E66F002E8B04 /* h2_int_queue.h */, B255747E1BEB6EFC0058F97B /* h2_io_set.c */, B255747F1BEB6EFC0058F97B /* h2_io_set.h */, B255747C1BEB6EFC0058F97B /* h2_io.c */, B255747D1BEB6EFC0058F97B /* h2_io.h */, B25574801BEB6EFC0058F97B /* h2_mplx.c */, B25574811BEB6EFC0058F97B /* h2_mplx.h */, + B25096C01CC4E66F002E8B04 /* h2_ngn_shed.c */, + B25096C11CC4E66F002E8B04 /* h2_ngn_shed.h */, B25574821BEB6EFC0058F97B /* h2_private.h */, B2AB9AB91C2ADBE100908DD6 /* h2_push.c */, B2AB9ABA1C2ADBE100908DD6 /* h2_push.h */, @@ -153,8 +160,6 @@ B25574921BEB6EFC0058F97B /* h2_task_input.h */, B25574931BEB6EFC0058F97B /* h2_task_output.c */, B25574941BEB6EFC0058F97B /* h2_task_output.h */, - B25574951BEB6EFC0058F97B /* h2_task_queue.c */, - B25574961BEB6EFC0058F97B /* h2_task_queue.h */, B255748F1BEB6EFC0058F97B /* h2_task.c */, B25574901BEB6EFC0058F97B /* h2_task.h */, B25574991BEB6EFC0058F97B /* h2_util.c */, @@ -165,6 +170,7 @@ B255749E1BEB6EFC0058F97B /* h2_worker.h */, B255749F1BEB6EFC0058F97B /* h2_workers.c */, B25574A01BEB6EFC0058F97B /* h2_workers.h */, + B25096C21CC4E66F002E8B04 /* h2.h */, B25574A11BEB6EFC0058F97B /* m4 */, B25574A41BEB6EFC0058F97B /* Makefile.am */, B25574A71BEB6EFC0058F97B /* mod_http2.c */, diff --git a/mod_http2/Makefile.am b/mod_http2/Makefile.am index b7068472..99ef1353 100644 --- a/mod_http2/Makefile.am +++ b/mod_http2/Makefile.am @@ -32,9 +32,11 @@ OBJECTS = \ h2_filter.c \ h2_from_h1.c \ h2_h2.c \ + h2_int_queue.c \ h2_io.c \ h2_io_set.c \ h2_mplx.c \ + h2_ngn_shed.c \ h2_push.c \ h2_request.c \ h2_response.c \ @@ -45,13 +47,13 @@ OBJECTS = \ h2_task.c \ h2_task_input.c \ h2_task_output.c \ - h2_task_queue.c \ h2_util.c \ h2_worker.c \ h2_workers.c \ mod_http2.c HFILES = \ + h2.h \ h2_alt_svc.h \ h2_bucket_eoc.h \ h2_bucket_eos.h \ @@ -62,9 +64,11 @@ HFILES = \ h2_filter.h \ h2_from_h1.h \ h2_h2.h \ + h2_int_queue.h \ h2_io.h \ h2_io_set.h \ h2_mplx.h \ + h2_ngn_shed.h \ h2_private.h \ h2_push.h \ h2_request.h \ @@ -76,7 +80,6 @@ HFILES = \ h2_task.h \ h2_task_input.h \ h2_task_output.h \ - h2_task_queue.h \ h2_util.h \ h2_version.h \ h2_worker.h \ diff --git a/mod_http2/h2.h b/mod_http2/h2.h new file mode 100644 index 00000000..acb79cd2 --- /dev/null +++ b/mod_http2/h2.h @@ -0,0 +1,158 @@ +/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __mod_h2__h2__ +#define __mod_h2__h2__ + +/** + * The magic PRIamble of RFC 7540 that is always sent when starting + * a h2 communication. + */ +extern const char *H2_MAGIC_TOKEN; + +#define H2_ERR_NO_ERROR (0x00) +#define H2_ERR_PROTOCOL_ERROR (0x01) +#define H2_ERR_INTERNAL_ERROR (0x02) +#define H2_ERR_FLOW_CONTROL_ERROR (0x03) +#define H2_ERR_SETTINGS_TIMEOUT (0x04) +#define H2_ERR_STREAM_CLOSED (0x05) +#define H2_ERR_FRAME_SIZE_ERROR (0x06) +#define H2_ERR_REFUSED_STREAM (0x07) +#define H2_ERR_CANCEL (0x08) +#define H2_ERR_COMPRESSION_ERROR (0x09) +#define H2_ERR_CONNECT_ERROR (0x0a) +#define H2_ERR_ENHANCE_YOUR_CALM (0x0b) +#define H2_ERR_INADEQUATE_SECURITY (0x0c) +#define H2_ERR_HTTP_1_1_REQUIRED (0x0d) + +#define H2_HEADER_METHOD ":method" +#define H2_HEADER_METHOD_LEN 7 +#define H2_HEADER_SCHEME ":scheme" +#define H2_HEADER_SCHEME_LEN 7 +#define H2_HEADER_AUTH ":authority" +#define H2_HEADER_AUTH_LEN 10 +#define H2_HEADER_PATH ":path" +#define H2_HEADER_PATH_LEN 5 +#define H2_CRLF "\r\n" + +/* Maximum number of padding bytes in a frame, rfc7540 */ +#define H2_MAX_PADLEN 256 +/* Initial default window size, RFC 7540 ch. 6.5.2 */ +#define H2_INITIAL_WINDOW_SIZE ((64*1024)-1) + +#define H2_HTTP_2XX(a) ((a) >= 200 && (a) < 300) + +#define H2_STREAM_CLIENT_INITIATED(id) (id&0x01) + +#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0])) + +#define H2MAX(x,y) ((x) > (y) ? (x) : (y)) +#define H2MIN(x,y) ((x) < (y) ? (x) : (y)) + +typedef enum { + H2_DEPENDANT_AFTER, + H2_DEPENDANT_INTERLEAVED, + H2_DEPENDANT_BEFORE, +} h2_dependency; + +typedef struct h2_priority { + h2_dependency dependency; + int weight; +} h2_priority; + +typedef enum { + H2_PUSH_NONE, + H2_PUSH_DEFAULT, + H2_PUSH_HEAD, + H2_PUSH_FAST_LOAD, +} h2_push_policy; + +typedef enum { + H2_STREAM_ST_IDLE, + H2_STREAM_ST_OPEN, + H2_STREAM_ST_RESV_LOCAL, + H2_STREAM_ST_RESV_REMOTE, + H2_STREAM_ST_CLOSED_INPUT, + H2_STREAM_ST_CLOSED_OUTPUT, + H2_STREAM_ST_CLOSED, +} h2_stream_state_t; + +typedef enum { + H2_SESSION_ST_INIT, /* send initial SETTINGS, etc. */ + H2_SESSION_ST_DONE, /* finished, connection close */ + H2_SESSION_ST_IDLE, /* nothing to write, expecting data inc */ + H2_SESSION_ST_BUSY, /* read/write without stop */ + H2_SESSION_ST_WAIT, /* waiting for tasks reporting back */ + H2_SESSION_ST_LOCAL_SHUTDOWN, /* we announced GOAWAY */ + H2_SESSION_ST_REMOTE_SHUTDOWN, /* client announced GOAWAY */ +} h2_session_state; + +typedef struct h2_session_props { + apr_uint32_t accepted_max; /* the highest remote stream id was/will be handled */ + apr_uint32_t completed_max; /* the highest remote stream completed */ + apr_uint32_t emitted_count; /* the number of local streams sent */ + apr_uint32_t emitted_max; /* the highest local stream id sent */ + apr_uint32_t error; /* the last session error encountered */ + unsigned int accepting : 1; /* if the session is accepting new streams */ +} h2_session_props; + + +/* h2_request is the transformer of HTTP2 streams into HTTP/1.1 internal + * format that will be fed to various httpd input filters to finally + * become a request_rec to be handled by soemone. + */ +typedef struct h2_request h2_request; + +struct h2_request { + int id; /* stream id */ + int initiated_on; /* initiating stream id (PUSH) or 0 */ + + const char *method; /* pseudo header values, see ch. 8.1.2.3 */ + const char *scheme; + const char *authority; + const char *path; + + apr_table_t *headers; + apr_table_t *trailers; + + apr_time_t request_time; + apr_off_t content_length; + + unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */ + unsigned int eoh : 1; /* iff end-of-headers has been seen and request is complete */ + unsigned int body : 1; /* iff this request has a body */ + unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */ + unsigned int push_policy; /* which push policy to use for this request */ +}; + +typedef struct h2_response h2_response; + +struct h2_response { + int stream_id; + int rst_error; + int http_status; + apr_off_t content_length; + apr_table_t *headers; + apr_table_t *trailers; + const char *sos_filter; +}; + + +/* Note key to attach connection task id to conn_rec/request_rec instances */ + +#define H2_TASK_ID_NOTE "http2-task-id" + + +#endif /* defined(__mod_h2__h2__) */ diff --git a/mod_http2/h2_alt_svc.c b/mod_http2/h2_alt_svc.c index 2ccea963..24a8b1f4 100644 --- a/mod_http2/h2_alt_svc.c +++ b/mod_http2/h2_alt_svc.c @@ -102,7 +102,7 @@ static int h2_alt_svc_handler(request_rec *r) if (ma >= 0) { svc_ma = apr_psprintf(r->pool, "; ma=%d", ma); } - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03043) "h2_alt_svc: announce %s for %s:%d", (secure? "secure" : "insecure"), r->hostname, (int)r->server->port); diff --git a/mod_http2/h2_bucket_eoc.h b/mod_http2/h2_bucket_eoc.h index f1cd6f81..2d466919 100644 --- a/mod_http2/h2_bucket_eoc.h +++ b/mod_http2/h2_bucket_eoc.h @@ -21,6 +21,7 @@ struct h2_session; /** End Of HTTP/2 SESSION (H2EOC) bucket */ extern const apr_bucket_type_t h2_bucket_type_eoc; +#define H2_BUCKET_IS_H2EOC(e) (e->type == &h2_bucket_type_eoc) apr_bucket * h2_bucket_eoc_make(apr_bucket *b, struct h2_session *session); diff --git a/mod_http2/h2_bucket_eos.h b/mod_http2/h2_bucket_eos.h index bd3360db..27b501da 100644 --- a/mod_http2/h2_bucket_eos.h +++ b/mod_http2/h2_bucket_eos.h @@ -21,6 +21,7 @@ struct h2_stream; /** End Of HTTP/2 STREAM (H2EOS) bucket */ extern const apr_bucket_type_t h2_bucket_type_eos; +#define H2_BUCKET_IS_H2EOS(e) (e->type == &h2_bucket_type_eos) apr_bucket *h2_bucket_eos_make(apr_bucket *b, struct h2_stream *stream); diff --git a/mod_http2/h2_config.c b/mod_http2/h2_config.c index dfab2d79..0c1e6c46 100644 --- a/mod_http2/h2_config.c +++ b/mod_http2/h2_config.c @@ -28,6 +28,7 @@ #include +#include "h2.h" #include "h2_alt_svc.h" #include "h2_ctx.h" #include "h2_conn.h" diff --git a/mod_http2/h2_config.h b/mod_http2/h2_config.h index 6d6a36bf..92b222f9 100644 --- a/mod_http2/h2_config.h +++ b/mod_http2/h2_config.h @@ -66,6 +66,7 @@ typedef struct h2_config { int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */ int h2_push; /* if HTTP/2 server push is enabled */ struct apr_hash_t *priorities;/* map of content-type to h2_priority records */ + int push_diary_size; /* # of entries in push diary */ } h2_config; diff --git a/mod_http2/h2_conn.c b/mod_http2/h2_conn.c index b975f5b0..3b28c1f9 100644 --- a/mod_http2/h2_conn.c +++ b/mod_http2/h2_conn.c @@ -32,7 +32,6 @@ #include "h2_mplx.h" #include "h2_session.h" #include "h2_stream.h" -#include "h2_stream_set.h" #include "h2_h2.h" #include "h2_task.h" #include "h2_worker.h" @@ -45,6 +44,7 @@ static struct h2_workers *workers; static h2_mpm_type_t mpm_type = H2_MPM_UNKNOWN; static module *mpm_module; static int async_mpm; +static apr_socket_t *dummy_socket; static void check_modules(int force) { @@ -60,8 +60,13 @@ static void check_modules(int force) mpm_module = m; break; } - else if (!strcmp("worker.c", m->name)) { - mpm_type = H2_MPM_WORKER; + else if (!strcmp("motorz.c", m->name)) { + mpm_type = H2_MPM_MOTORZ; + mpm_module = m; + break; + } + else if (!strcmp("mpm_netware.c", m->name)) { + mpm_type = H2_MPM_NETWARE; mpm_module = m; break; } @@ -70,13 +75,26 @@ static void check_modules(int force) mpm_module = m; break; } + else if (!strcmp("simple_api.c", m->name)) { + mpm_type = H2_MPM_SIMPLE; + mpm_module = m; + break; + } + else if (!strcmp("mpm_winnt.c", m->name)) { + mpm_type = H2_MPM_WINNT; + mpm_module = m; + break; + } + else if (!strcmp("worker.c", m->name)) { + mpm_type = H2_MPM_WORKER; + mpm_module = m; + break; + } } checked = 1; } } -static void fix_event_master_conn(h2_session *session); - apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) { const h2_config *config = h2_config_sget(s); @@ -91,7 +109,6 @@ apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) status = ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm); if (status != APR_SUCCESS) { - ap_log_error(APLOG_MARK, APLOG_TRACE1, status, s, "querying MPM for async"); /* some MPMs do not implemnent this */ async_mpm = 0; status = APR_SUCCESS; @@ -138,7 +155,12 @@ apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) NULL, AP_FTYPE_CONNECTION); status = h2_mplx_child_init(pool, s); - + + if (status == APR_SUCCESS) { + status = apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM, + APR_PROTO_TCP, pool); + } + return status; } @@ -172,15 +194,7 @@ apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r) } h2_ctx_session_set(ctx, session); - - switch (h2_conn_mpm_type()) { - case H2_MPM_EVENT: - fix_event_master_conn(session); - break; - default: - break; - } - + return APR_SUCCESS; } @@ -195,11 +209,8 @@ apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c) } status = h2_session_process(h2_ctx_session_get(ctx), async_mpm); - if (c->cs) { - c->cs->state = CONN_STATE_WRITE_COMPLETION; - } if (APR_STATUS_IS_EOF(status)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03045) "h2_session(%ld): process, closing conn", c->id); c->keepalive = AP_CONN_CLOSE; } @@ -217,24 +228,41 @@ apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c) return DONE; } +apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c) +{ + apr_status_t status; + + status = h2_session_pre_close(h2_ctx_session_get(ctx), async_mpm); + if (status == APR_SUCCESS) { + return DONE; /* This is the same, right? */ + } + return status; +} -static void fix_event_conn(conn_rec *c, conn_rec *master); - -conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *p, - apr_thread_t *thread, apr_socket_t *socket) +conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *parent, + apr_allocator_t *allocator) { + apr_pool_t *pool; conn_rec *c; + void *cfg; AP_DEBUG_ASSERT(master); ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master, - "h2_conn(%ld): created from master", master->id); + "h2_conn(%ld): create slave", master->id); - /* This is like the slave connection creation from 2.5-DEV. A - * very efficient way - not sure how compatible this is, since - * the core hooks are no longer run. - * But maybe it's is better this way, not sure yet. + /* We create a pool with its own allocator to be used for + * processing a request. This is the only way to have the processing + * independant of its parent pool in the sense that it can work in + * another thread. */ - c = (conn_rec *) apr_palloc(p, sizeof(conn_rec)); + if (!allocator) { + apr_allocator_create(&allocator); + } + apr_pool_create_ex(&pool, parent, NULL, allocator); + apr_pool_tag(pool, "h2_slave_conn"); + apr_allocator_owner_set(allocator, pool); + + c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec)); if (c == NULL) { ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master, APLOGNO(02913) "h2_task: creating conn"); @@ -244,112 +272,60 @@ conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *p, memcpy(c, master, sizeof(conn_rec)); /* Replace these */ - c->id = (master->id & (long)p); c->master = master; - c->pool = p; - c->current_thread = thread; - c->conn_config = ap_create_conn_config(p); - c->notes = apr_table_make(p, 5); + c->pool = pool; + c->conn_config = ap_create_conn_config(pool); + c->notes = apr_table_make(pool, 5); c->input_filters = NULL; c->output_filters = NULL; - c->bucket_alloc = apr_bucket_alloc_create(p); - c->cs = NULL; + c->bucket_alloc = apr_bucket_alloc_create(pool); c->data_in_input_filters = 0; c->data_in_output_filters = 0; c->clogging_input_filters = 1; c->log = NULL; c->log_id = NULL; - - /* TODO: these should be unique to this thread */ - c->sbh = master->sbh; - /* Simulate that we had already a request on this connection. */ c->keepalives = 1; - - ap_set_module_config(c->conn_config, &core_module, socket); - - /* This works for mpm_worker so far. Other mpm modules have - * different needs, unfortunately. The most interesting one - * being mpm_event... + /* We cannot install the master connection socket on the slaves, as + * modules mess with timeouts/blocking of the socket, with + * unwanted side effects to the master connection processing. + * Fortunately, since we never use the slave socket, we can just install + * a single, process-wide dummy and everyone is happy. */ - switch (h2_conn_mpm_type()) { - case H2_MPM_WORKER: - /* all fine */ - break; - case H2_MPM_EVENT: - fix_event_conn(c, master); - break; - default: - /* fingers crossed */ - break; + ap_set_module_config(c->conn_config, &core_module, dummy_socket); + /* TODO: these should be unique to this thread */ + c->sbh = master->sbh; + /* TODO: not all mpm modules have learned about slave connections yet. + * copy their config from master to slave. + */ + if (h2_conn_mpm_module()) { + cfg = ap_get_module_config(master->conn_config, h2_conn_mpm_module()); + ap_set_module_config(c->conn_config, h2_conn_mpm_module(), cfg); } - + return c; } -/* This is an internal mpm event.c struct which is disguised - * as a conn_state_t so that mpm_event can have special connection - * state information without changing the struct seen on the outside. - * - * For our task connections we need to create a new beast of this type - * and fill it with enough meaningful things that mpm_event reads and - * starts processing out task request. - */ -typedef struct event_conn_state_t event_conn_state_t; -struct event_conn_state_t { - /** APR_RING of expiration timeouts */ - APR_RING_ENTRY(event_conn_state_t) timeout_list; - /** the expiration time of the next keepalive timeout */ - apr_time_t expiration_time; - /** connection record this struct refers to */ - conn_rec *c; - /** request record (if any) this struct refers to */ - request_rec *r; - /** server config this struct refers to */ - void *sc; - /** is the current conn_rec suspended? (disassociated with - * a particular MPM thread; for suspend_/resume_connection - * hooks) - */ - int suspended; - /** memory pool to allocate from */ - apr_pool_t *p; - /** bucket allocator */ - apr_bucket_alloc_t *bucket_alloc; - /** poll file descriptor information */ - apr_pollfd_t pfd; - /** public parts of the connection state */ - conn_state_t pub; -}; -APR_RING_HEAD(timeout_head_t, event_conn_state_t); - -static void fix_event_conn(conn_rec *c, conn_rec *master) +void h2_slave_destroy(conn_rec *slave, apr_allocator_t **pallocator) { - event_conn_state_t *master_cs = ap_get_module_config(master->conn_config, - h2_conn_mpm_module()); - event_conn_state_t *cs = apr_pcalloc(c->pool, sizeof(event_conn_state_t)); - cs->bucket_alloc = apr_bucket_alloc_create(c->pool); - - ap_set_module_config(c->conn_config, h2_conn_mpm_module(), cs); - - cs->c = c; - cs->r = NULL; - cs->p = master_cs->p; - cs->pfd = master_cs->pfd; - cs->pub = master_cs->pub; - cs->pub.state = CONN_STATE_READ_REQUEST_LINE; - - c->cs = &(cs->pub); + apr_pool_t *parent; + apr_allocator_t *allocator = apr_pool_allocator_get(slave->pool); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, slave, + "h2_slave_conn(%ld): destroy (task=%s)", slave->id, + apr_table_get(slave->notes, H2_TASK_ID_NOTE)); + /* Attache the allocator to the parent pool and return it for + * reuse, otherwise the own is still the slave pool and it will + * get destroyed with it. */ + parent = apr_pool_parent_get(slave->pool); + if (pallocator && parent) { + apr_allocator_owner_set(allocator, parent); + *pallocator = allocator; + } + apr_pool_destroy(slave->pool); } -static void fix_event_master_conn(h2_session *session) +apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd) { - /* TODO: event MPM normally does this in a post_read_request hook. But - * we never encounter that on our master connection. We *do* know which - * server was selected during protocol negotiation, so lets set that. - */ - event_conn_state_t *cs = ap_get_module_config(session->c->conn_config, - h2_conn_mpm_module()); - cs->sc = ap_get_module_config(session->s->module_config, h2_conn_mpm_module()); + return ap_run_pre_connection(slave, csd); } diff --git a/mod_http2/h2_conn.h b/mod_http2/h2_conn.h index 66fa2e58..e52fc8d6 100644 --- a/mod_http2/h2_conn.h +++ b/mod_http2/h2_conn.h @@ -38,6 +38,12 @@ apr_status_t h2_conn_setup(struct h2_ctx *ctx, conn_rec *c, request_rec *r); */ apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c); +/** + * The connection is about to close. If we have not send a GOAWAY + * yet, this is the last chance. + */ +apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c); + /* Initialize this child process for h2 connection work, * to be called once during child init before multi processing * starts. @@ -50,13 +56,21 @@ typedef enum { H2_MPM_WORKER, H2_MPM_EVENT, H2_MPM_PREFORK, + H2_MPM_MOTORZ, + H2_MPM_SIMPLE, + H2_MPM_NETWARE, + H2_MPM_WINNT, } h2_mpm_type_t; /* Returns the type of MPM module detected */ h2_mpm_type_t h2_conn_mpm_type(void); -conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *p, - apr_thread_t *thread, apr_socket_t *socket); +conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *parent, + apr_allocator_t *allocator); +void h2_slave_destroy(conn_rec *slave, apr_allocator_t **pallocator); + +apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd); +void h2_slave_run_connection(conn_rec *slave); #endif /* defined(__mod_h2__h2_conn__) */ diff --git a/mod_http2/h2_conn_io.c b/mod_http2/h2_conn_io.c index 18a21375..59561ecd 100644 --- a/mod_http2/h2_conn_io.c +++ b/mod_http2/h2_conn_io.c @@ -14,16 +14,18 @@ */ #include - +#include #include #include #include #include #include +#include #include "h2_private.h" #include "h2_bucket_eoc.h" +#include "h2_bucket_eos.h" #include "h2_config.h" #include "h2_conn_io.h" #include "h2_h2.h" @@ -43,18 +45,96 @@ * which seems to create less TCP packets overall */ #define WRITE_SIZE_MAX (TLS_DATA_MAX - 100) +#define WRITE_BUFFER_SIZE (5*WRITE_SIZE_MAX) + -#define WRITE_BUFFER_SIZE (8*WRITE_SIZE_MAX) +static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level, + const char *tag, apr_bucket_brigade *bb) +{ + char buffer[16 * 1024]; + const char *line = "(null)"; + apr_size_t bmax = sizeof(buffer)/sizeof(buffer[0]); + int off = 0; + apr_bucket *b; + + if (bb) { + memset(buffer, 0, bmax--); + for (b = APR_BRIGADE_FIRST(bb); + bmax && (b != APR_BRIGADE_SENTINEL(bb)); + b = APR_BUCKET_NEXT(b)) { + + if (APR_BUCKET_IS_METADATA(b)) { + if (APR_BUCKET_IS_EOS(b)) { + off += apr_snprintf(buffer+off, bmax-off, "eos "); + } + else if (APR_BUCKET_IS_FLUSH(b)) { + off += apr_snprintf(buffer+off, bmax-off, "flush "); + } + else if (AP_BUCKET_IS_EOR(b)) { + off += apr_snprintf(buffer+off, bmax-off, "eor "); + } + else if (H2_BUCKET_IS_H2EOC(b)) { + off += apr_snprintf(buffer+off, bmax-off, "h2eoc "); + } + else if (H2_BUCKET_IS_H2EOS(b)) { + off += apr_snprintf(buffer+off, bmax-off, "h2eos "); + } + else { + off += apr_snprintf(buffer+off, bmax-off, "meta(unknown) "); + } + } + else { + const char *btype = "data"; + if (APR_BUCKET_IS_FILE(b)) { + btype = "file"; + } + else if (APR_BUCKET_IS_PIPE(b)) { + btype = "pipe"; + } + else if (APR_BUCKET_IS_SOCKET(b)) { + btype = "socket"; + } + else if (APR_BUCKET_IS_HEAP(b)) { + btype = "heap"; + } + else if (APR_BUCKET_IS_TRANSIENT(b)) { + btype = "transient"; + } + else if (APR_BUCKET_IS_IMMORTAL(b)) { + btype = "immortal"; + } +#if APR_HAS_MMAP + else if (APR_BUCKET_IS_MMAP(b)) { + btype = "mmap"; + } +#endif + else if (APR_BUCKET_IS_POOL(b)) { + btype = "pool"; + } + + off += apr_snprintf(buffer+off, bmax-off, "%s[%ld] ", + btype, + (long)(b->length == ((apr_size_t)-1)? + -1 : b->length)); + } + } + line = *buffer? buffer : "(empty)"; + } + /* Intentional no APLOGNO */ + ap_log_cerror(APLOG_MARK, level, 0, c, "bb_dump(%ld-%d)-%s: %s", + c->id, stream_id, tag, line); + +} apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, const h2_config *cfg, apr_pool_t *pool) { - io->connection = c; - io->output = apr_brigade_create(pool, c->bucket_alloc); - io->buflen = 0; - io->is_tls = h2_h2_is_tls(c); - io->buffer_output = io->is_tls; + io->c = c; + io->output = apr_brigade_create(pool, c->bucket_alloc); + io->buflen = 0; + io->is_tls = h2_h2_is_tls(c); + io->buffer_output = io->is_tls; if (io->buffer_output) { io->bufsize = WRITE_BUFFER_SIZE; @@ -65,8 +145,9 @@ apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, } if (io->is_tls) { - /* That is where we start with, - * see https://issues.apache.org/jira/browse/TS-2503 */ + /* This is what we start with, + * see https://issues.apache.org/jira/browse/TS-2503 + */ io->warmup_size = h2_config_geti64(cfg, H2_CONF_TLS_WARMUP_SIZE); io->cooldown_usecs = (h2_config_geti(cfg, H2_CONF_TLS_COOLDOWN_SECS) * APR_USEC_PER_SEC); @@ -79,9 +160,10 @@ apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, } if (APLOGctrace1(c)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->connection, - "h2_conn_io(%ld): init, buffering=%d, warmup_size=%ld, cd_secs=%f", - io->connection->id, io->buffer_output, (long)io->warmup_size, + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c, + "h2_conn_io(%ld): init, buffering=%d, warmup_size=%ld, " + "cd_secs=%f", io->c->id, io->buffer_output, + (long)io->warmup_size, ((float)io->cooldown_usecs/APR_USEC_PER_SEC)); } @@ -109,19 +191,18 @@ static apr_status_t pass_out(apr_bucket_brigade *bb, void *ctx) return APR_SUCCESS; } - ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, NULL); - status = apr_brigade_length(bb, 0, &bblen); - if (status == APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, - "h2_conn_io(%ld): pass_out brigade %ld bytes --> ", + ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_WRITE, c); + apr_brigade_length(bb, 0, &bblen); + h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "master conn pass", bb); + status = ap_pass_brigade(c->output_filters, bb); + if (status == APR_SUCCESS && pctx->io) { + pctx->io->bytes_written += (apr_size_t)bblen; + pctx->io->last_write = apr_time_now(); + } + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03044) + "h2_conn_io(%ld): pass_out brigade %ld bytes", c->id, (long)bblen); - status = ap_pass_brigade(c->output_filters, bb); - if (status == APR_SUCCESS && pctx->io) { - pctx->io->bytes_written += (apr_size_t)bblen; - pctx->io->last_write = apr_time_now(); - } - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, - "h2_conn_io(%ld): --> pass_out brigade returned",c->id); } apr_brigade_cleanup(bb); return status; @@ -143,17 +224,17 @@ static apr_status_t bucketeer_buffer(h2_conn_io *io) /* long time not written, reset write size */ io->write_size = WRITE_SIZE_INITIAL; io->bytes_written = 0; - ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->connection, + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c, "h2_conn_io(%ld): timeout write size reset to %ld", - (long)io->connection->id, (long)io->write_size); + (long)io->c->id, (long)io->write_size); } else if (io->write_size < WRITE_SIZE_MAX && io->bytes_written >= io->warmup_size) { /* connection is hot, use max size */ io->write_size = WRITE_SIZE_MAX; - ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->connection, + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c, "h2_conn_io(%ld): threshold reached, write size now %ld", - (long)io->connection->id, (long)io->write_size); + (long)io->c->id, (long)io->write_size); } bcount = (int)(remaining / io->write_size); @@ -173,31 +254,90 @@ static apr_status_t bucketeer_buffer(h2_conn_io *io) return APR_SUCCESS; } +apr_status_t h2_conn_io_writeb(h2_conn_io *io, apr_bucket *b) +{ + APR_BRIGADE_INSERT_TAIL(io->output, b); + return APR_SUCCESS; +} + +static apr_status_t h2_conn_io_flush_int(h2_conn_io *io, int flush, int eoc) +{ + pass_out_ctx ctx; + apr_bucket *b; + + if (io->buflen == 0 && APR_BRIGADE_EMPTY(io->output)) { + return APR_SUCCESS; + } + + if (io->buflen > 0) { + /* something in the buffer, put it in the output brigade */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c, + "h2_conn_io: flush, flushing %ld bytes", + (long)io->buflen); + bucketeer_buffer(io); + } + + if (flush) { + b = apr_bucket_flush_create(io->c->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(io->output, b); + } + + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c, "h2_conn_io: flush"); + io->buflen = 0; + ctx.c = io->c; + ctx.io = eoc? NULL : io; + + return pass_out(io->output, &ctx); + /* no more access after this, as we might have flushed an EOC bucket + * that de-allocated us all. */ +} + +apr_status_t h2_conn_io_flush(h2_conn_io *io) +{ + return h2_conn_io_flush_int(io, 1, 0); +} + +apr_status_t h2_conn_io_consider_pass(h2_conn_io *io) +{ + apr_off_t len = 0; + + if (!APR_BRIGADE_EMPTY(io->output)) { + len = h2_brigade_mem_size(io->output); + } + len += io->buflen; + if (len >= WRITE_BUFFER_SIZE) { + return h2_conn_io_flush_int(io, 1, 0); + } + return APR_SUCCESS; +} + +apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, h2_session *session) +{ + apr_bucket *b = h2_bucket_eoc_create(io->c->bucket_alloc, session); + APR_BRIGADE_INSERT_TAIL(io->output, b); + return h2_conn_io_flush_int(io, 0, 1); +} + apr_status_t h2_conn_io_write(h2_conn_io *io, const char *buf, size_t length) { apr_status_t status = APR_SUCCESS; pass_out_ctx ctx; - ctx.c = io->connection; + ctx.c = io->c; ctx.io = io; - io->unflushed = 1; if (io->bufsize > 0) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->connection, + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c, "h2_conn_io: buffering %ld bytes", (long)length); if (!APR_BRIGADE_EMPTY(io->output)) { - status = h2_conn_io_pass(io); - io->unflushed = 1; + status = h2_conn_io_flush_int(io, 0, 0); } while (length > 0 && (status == APR_SUCCESS)) { apr_size_t avail = io->bufsize - io->buflen; if (avail <= 0) { - - bucketeer_buffer(io); - status = pass_out(io->output, &ctx); - io->buflen = 0; + status = h2_conn_io_flush_int(io, 0, 0); } else if (length > avail) { memcpy(io->buffer + io->buflen, buf, avail); @@ -215,7 +355,7 @@ apr_status_t h2_conn_io_write(h2_conn_io *io, } else { - ap_log_cerror(APLOG_MARK, APLOG_TRACE4, status, io->connection, + ap_log_cerror(APLOG_MARK, APLOG_TRACE4, status, io->c, "h2_conn_io: writing %ld bytes to brigade", (long)length); status = apr_brigade_write(io->output, pass_out, &ctx, buf, length); } @@ -223,82 +363,3 @@ apr_status_t h2_conn_io_write(h2_conn_io *io, return status; } -apr_status_t h2_conn_io_writeb(h2_conn_io *io, apr_bucket *b) -{ - APR_BRIGADE_INSERT_TAIL(io->output, b); - io->unflushed = 1; - return APR_SUCCESS; -} - -apr_status_t h2_conn_io_consider_flush(h2_conn_io *io) -{ - apr_status_t status = APR_SUCCESS; - - /* The HTTP/1.1 network output buffer/flush behaviour does not - * give optimal performance in the HTTP/2 case, as the pattern of - * buckets (data/eor/eos) is different. - * As long as we have not found out the "best" way to deal with - * this, force a flush at least every WRITE_BUFFER_SIZE amount - * of data. - */ - if (io->unflushed) { - apr_off_t len = 0; - if (!APR_BRIGADE_EMPTY(io->output)) { - apr_brigade_length(io->output, 0, &len); - } - len += io->buflen; - if (len >= WRITE_BUFFER_SIZE) { - return h2_conn_io_pass(io); - } - } - return status; -} - -static apr_status_t h2_conn_io_flush_int(h2_conn_io *io, int force, int eoc) -{ - if (io->unflushed || force) { - pass_out_ctx ctx; - - if (io->buflen > 0) { - /* something in the buffer, put it in the output brigade */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->connection, - "h2_conn_io: flush, flushing %ld bytes", (long)io->buflen); - bucketeer_buffer(io); - io->buflen = 0; - } - - if (force) { - APR_BRIGADE_INSERT_TAIL(io->output, - apr_bucket_flush_create(io->output->bucket_alloc)); - } - - ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->connection, - "h2_conn_io: flush"); - /* Send it out */ - io->unflushed = 0; - - ctx.c = io->connection; - ctx.io = eoc? NULL : io; - return pass_out(io->output, &ctx); - /* no more access after this, as we might have flushed an EOC bucket - * that de-allocated us all. */ - } - return APR_SUCCESS; -} - -apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, apr_bucket *b) -{ - APR_BRIGADE_INSERT_TAIL(io->output, b); - return h2_conn_io_flush_int(io, 1, 1); -} - -apr_status_t h2_conn_io_flush(h2_conn_io *io) -{ - return h2_conn_io_flush_int(io, 1, 0); -} - -apr_status_t h2_conn_io_pass(h2_conn_io *io) -{ - return h2_conn_io_flush_int(io, 0, 0); -} - diff --git a/mod_http2/h2_conn_io.h b/mod_http2/h2_conn_io.h index 15457eb3..b8be671d 100644 --- a/mod_http2/h2_conn_io.h +++ b/mod_http2/h2_conn_io.h @@ -26,7 +26,7 @@ struct h2_session; * directly without copying. */ typedef struct { - conn_rec *connection; + conn_rec *c; apr_bucket_brigade *output; int is_tls; @@ -42,7 +42,6 @@ typedef struct { char *buffer; apr_size_t buflen; apr_size_t bufsize; - int unflushed; } h2_conn_io; apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, @@ -51,16 +50,40 @@ apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, int h2_conn_io_is_buffered(h2_conn_io *io); +/** + * Append data to the buffered output. + * @param buf the data to append + * @param length the length of the data to append + */ apr_status_t h2_conn_io_write(h2_conn_io *io, const char *buf, size_t length); - + +/** + * Append a bucket to the buffered output. + * @param io the connection io + * @param b the bucket to append + */ apr_status_t h2_conn_io_writeb(h2_conn_io *io, apr_bucket *b); -apr_status_t h2_conn_io_consider_flush(h2_conn_io *io); +/** + * Append an End-Of-Connection bucket to the output that, once destroyed, + * will tear down the complete http2 session. + */ +apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, struct h2_session *session); -apr_status_t h2_conn_io_pass(h2_conn_io *io); +/** + * Pass any buffered data on to the connection output filters. + * @param io the connection io + * @param flush if a flush bucket should be appended to any output + */ apr_status_t h2_conn_io_flush(h2_conn_io *io); -apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, apr_bucket *b); + +/** + * Check the amount of buffered output and pass it on if enough has accumulated. + * @param io the connection io + * @param flush if a flush bucket should be appended to any output + */ +apr_status_t h2_conn_io_consider_pass(h2_conn_io *io); #endif /* defined(__mod_h2__h2_conn_io__) */ diff --git a/mod_http2/h2_ctx.c b/mod_http2/h2_ctx.c index b40037cb..8b786b94 100644 --- a/mod_http2/h2_ctx.c +++ b/mod_http2/h2_ctx.c @@ -65,7 +65,11 @@ h2_ctx *h2_ctx_rget(const request_rec *r) const char *h2_ctx_protocol_get(const conn_rec *c) { - h2_ctx *ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module); + h2_ctx *ctx; + if (c->master) { + c = c->master; + } + ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module); return ctx? ctx->protocol : NULL; } @@ -101,7 +105,17 @@ int h2_ctx_is_task(h2_ctx *ctx) return ctx && ctx->task; } -struct h2_task *h2_ctx_get_task(h2_ctx *ctx) +h2_task *h2_ctx_get_task(h2_ctx *ctx) { return ctx? ctx->task : NULL; } + +h2_task *h2_ctx_cget_task(conn_rec *c) +{ + return h2_ctx_get_task(h2_ctx_get(c, 0)); +} + +h2_task *h2_ctx_rget_task(request_rec *r) +{ + return h2_ctx_get_task(h2_ctx_rget(r)); +} diff --git a/mod_http2/h2_ctx.h b/mod_http2/h2_ctx.h index 68dc7c84..3b2c842c 100644 --- a/mod_http2/h2_ctx.h +++ b/mod_http2/h2_ctx.h @@ -71,5 +71,7 @@ const char *h2_ctx_protocol_get(const conn_rec *c); int h2_ctx_is_task(h2_ctx *ctx); struct h2_task *h2_ctx_get_task(h2_ctx *ctx); +struct h2_task *h2_ctx_cget_task(conn_rec *c); +struct h2_task *h2_ctx_rget_task(request_rec *r); #endif /* defined(__mod_h2__h2_ctx__) */ diff --git a/mod_http2/h2_filter.c b/mod_http2/h2_filter.c index f43d8b8c..8bf7fbcb 100644 --- a/mod_http2/h2_filter.c +++ b/mod_http2/h2_filter.c @@ -28,7 +28,6 @@ #include "h2_push.h" #include "h2_task.h" #include "h2_stream.h" -#include "h2_stream_set.h" #include "h2_request.h" #include "h2_response.h" #include "h2_session.h" @@ -105,7 +104,7 @@ apr_status_t h2_filter_core_input(ap_filter_t* f, { h2_filter_cin *cin = f->ctx; apr_status_t status = APR_SUCCESS; - apr_time_t saved_timeout = UNSET; + apr_interval_time_t saved_timeout = UNSET; ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, "core_input(%ld): read, %s, mode=%d, readbytes=%ld", @@ -147,8 +146,6 @@ apr_status_t h2_filter_core_input(ap_filter_t* f, if (saved_timeout != UNSET) { apr_socket_timeout_set(cin->socket, saved_timeout); } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, - "core_input(%ld): got_brigade", (long)f->c->id); } switch (status) { @@ -158,9 +155,11 @@ apr_status_t h2_filter_core_input(ap_filter_t* f, case APR_EOF: case APR_EAGAIN: case APR_TIMEUP: + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, + "core_input(%ld): read", (long)f->c->id); break; default: - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, f->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, f->c, APLOGNO(03046) "h2_conn_io: error reading"); break; } @@ -197,7 +196,18 @@ int h2_filter_h2_status_handler(request_rec *r) return DECLINED; } -#define bbout(...) apr_brigade_printf(bb, NULL, NULL, __VA_ARGS__) +static apr_status_t bbout(apr_bucket_brigade *bb, const char *fmt, ...) +{ + va_list args; + apr_status_t rv; + + va_start(args, fmt); + rv = apr_brigade_vprintf(bb, NULL, NULL, fmt, args); + va_end(args); + + return rv; +} + static apr_status_t h2_sos_h2_status_buffer(h2_sos *sos, apr_bucket_brigade *bb) { h2_stream *stream = sos->stream; @@ -210,21 +220,21 @@ static apr_status_t h2_sos_h2_status_buffer(h2_sos *sos, apr_bucket_brigade *bb) bb = apr_brigade_create(stream->pool, session->c->bucket_alloc); } - bbout("{\n"); - bbout(" \"HTTP2\": \"on\",\n"); - bbout(" \"H2PUSH\": \"%s\",\n", h2_session_push_enabled(session)? "on" : "off"); - bbout(" \"mod_http2_version\": \"%s\",\n", MOD_HTTP2_VERSION); - bbout(" \"session_id\": %ld,\n", (long)session->id); - bbout(" \"streams_max\": %d,\n", (int)session->max_stream_count); - bbout(" \"this_stream\": %d,\n", stream->id); - bbout(" \"streams_open\": %d,\n", (int)h2_stream_set_size(session->streams)); - bbout(" \"max_stream_started\": %d,\n", mplx->max_stream_started); - bbout(" \"requests_received\": %d,\n", session->requests_received); - bbout(" \"responses_submitted\": %d,\n", session->responses_submitted); - bbout(" \"streams_reset\": %d, \n", session->streams_reset); - bbout(" \"pushes_promised\": %d,\n", session->pushes_promised); - bbout(" \"pushes_submitted\": %d,\n", session->pushes_submitted); - bbout(" \"pushes_reset\": %d,\n", session->pushes_reset); + bbout(bb, "{\n"); + bbout(bb, " \"HTTP2\": \"on\",\n"); + bbout(bb, " \"H2PUSH\": \"%s\",\n", h2_session_push_enabled(session)? "on" : "off"); + bbout(bb, " \"mod_http2_version\": \"%s\",\n", MOD_HTTP2_VERSION); + bbout(bb, " \"session_id\": %ld,\n", (long)session->id); + bbout(bb, " \"streams_max\": %d,\n", (int)session->max_stream_count); + bbout(bb, " \"this_stream\": %d,\n", stream->id); + bbout(bb, " \"streams_open\": %d,\n", (int)h2_ihash_count(session->streams)); + bbout(bb, " \"max_stream_started\": %d,\n", mplx->max_stream_started); + bbout(bb, " \"requests_received\": %d,\n", session->remote.emitted_count); + bbout(bb, " \"responses_submitted\": %d,\n", session->responses_submitted); + bbout(bb, " \"streams_reset\": %d, \n", session->streams_reset); + bbout(bb, " \"pushes_promised\": %d,\n", session->pushes_promised); + bbout(bb, " \"pushes_submitted\": %d,\n", session->pushes_submitted); + bbout(bb, " \"pushes_reset\": %d,\n", session->pushes_reset); diary = session->push_diary; if (diary) { @@ -236,7 +246,7 @@ static apr_status_t h2_sos_h2_status_buffer(h2_sos *sos, apr_bucket_brigade *bb) stream->request->authority, &data, &len); if (status == APR_SUCCESS) { base64_digest = h2_util_base64url_encode(data, len, stream->pool); - bbout(" \"cache_digest\": \"%s\",\n", base64_digest); + bbout(bb, " \"cache_digest\": \"%s\",\n", base64_digest); } /* try the reverse for testing purposes */ @@ -246,15 +256,15 @@ static apr_status_t h2_sos_h2_status_buffer(h2_sos *sos, apr_bucket_brigade *bb) stream->request->authority, &data, &len); if (status == APR_SUCCESS) { base64_digest = h2_util_base64url_encode(data, len, stream->pool); - bbout(" \"cache_digest^2\": \"%s\",\n", base64_digest); + bbout(bb, " \"cache_digest^2\": \"%s\",\n", base64_digest); } } } - bbout(" \"frames_received\": %ld,\n", (long)session->frames_received); - bbout(" \"frames_sent\": %ld,\n", (long)session->frames_sent); - bbout(" \"bytes_received\": %"APR_UINT64_T_FMT",\n", session->io.bytes_read); - bbout(" \"bytes_sent\": %"APR_UINT64_T_FMT"\n", session->io.bytes_written); - bbout("}\n"); + bbout(bb, " \"frames_received\": %ld,\n", (long)session->frames_received); + bbout(bb, " \"frames_sent\": %ld,\n", (long)session->frames_sent); + bbout(bb, " \"bytes_received\": %"APR_UINT64_T_FMT",\n", session->io.bytes_read); + bbout(bb, " \"bytes_sent\": %"APR_UINT64_T_FMT"\n", session->io.bytes_written); + bbout(bb, "}\n"); return sos->prev->buffer(sos->prev, bb); } @@ -265,9 +275,9 @@ static apr_status_t h2_sos_h2_status_read_to(h2_sos *sos, apr_bucket_brigade *bb return sos->prev->read_to(sos->prev, bb, plen, peos); } -static apr_status_t h2_sos_h2_status_prep_read(h2_sos *sos, apr_off_t *plen, int *peos) +static apr_status_t h2_sos_h2_status_prepare(h2_sos *sos, apr_off_t *plen, int *peos) { - return sos->prev->prep_read(sos->prev, plen, peos); + return sos->prev->prepare(sos->prev, plen, peos); } static apr_status_t h2_sos_h2_status_readx(h2_sos *sos, h2_io_data_cb *cb, void *ctx, @@ -294,7 +304,7 @@ static h2_sos *h2_sos_h2_status_create(h2_sos *prev) sos->response = response; sos->stream = prev->stream; sos->buffer = h2_sos_h2_status_buffer; - sos->prep_read = h2_sos_h2_status_prep_read; + sos->prepare = h2_sos_h2_status_prepare; sos->readx = h2_sos_h2_status_readx; sos->read_to = h2_sos_h2_status_read_to; sos->get_trailers = h2_sos_h2_status_get_trailers; diff --git a/mod_http2/h2_filter.h b/mod_http2/h2_filter.h index 89d0a464..2f281f8b 100644 --- a/mod_http2/h2_filter.h +++ b/mod_http2/h2_filter.h @@ -35,7 +35,7 @@ typedef struct h2_filter_cin { h2_filter_cin *h2_filter_cin_create(apr_pool_t *p, h2_filter_cin_cb *cb, void *ctx); -void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout_secs); +void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout); apr_status_t h2_filter_core_input(ap_filter_t* filter, apr_bucket_brigade* brigade, @@ -47,7 +47,7 @@ typedef struct h2_sos h2_sos; typedef apr_status_t h2_sos_data_cb(void *ctx, const char *data, apr_off_t len); typedef apr_status_t h2_sos_buffer(h2_sos *sos, apr_bucket_brigade *bb); -typedef apr_status_t h2_sos_prep_read(h2_sos *sos, apr_off_t *plen, int *peos); +typedef apr_status_t h2_sos_prepare(h2_sos *sos, apr_off_t *plen, int *peos); typedef apr_status_t h2_sos_readx(h2_sos *sos, h2_sos_data_cb *cb, void *ctx, apr_off_t *plen, int *peos); typedef apr_status_t h2_sos_read_to(h2_sos *sos, apr_bucket_brigade *bb, @@ -63,7 +63,7 @@ struct h2_sos { struct h2_response *response; void *ctx; h2_sos_buffer *buffer; - h2_sos_prep_read *prep_read; + h2_sos_prepare *prepare; h2_sos_readx *readx; h2_sos_read_to *read_to; h2_sos_get_trailers *get_trailers; diff --git a/mod_http2/h2_from_h1.c b/mod_http2/h2_from_h1.c index f2f39f30..8e1f163a 100644 --- a/mod_http2/h2_from_h1.c +++ b/mod_http2/h2_from_h1.c @@ -49,12 +49,6 @@ h2_from_h1 *h2_from_h1_create(int stream_id, apr_pool_t *pool) return from_h1; } -apr_status_t h2_from_h1_destroy(h2_from_h1 *from_h1) -{ - from_h1->bb = NULL; - return APR_SUCCESS; -} - static void set_state(h2_from_h1 *from_h1, h2_from_h1_state_t state) { if (from_h1->state != state) { @@ -77,7 +71,7 @@ static apr_status_t make_h2_headers(h2_from_h1 *from_h1, request_rec *r) from_h1->content_length = from_h1->response->content_length; from_h1->chunked = r->chunked; - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection, APLOGNO(03197) "h2_from_h1(%d): converted headers, content-length: %d" ", chunked=%d", from_h1->stream_id, (int)from_h1->content_length, @@ -519,7 +513,7 @@ apr_status_t h2_response_output_filter(ap_filter_t *f, apr_bucket_brigade *bb) if (eb) { int st = eb->status; apr_brigade_cleanup(bb); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03047) "h2_from_h1(%d): err bucket status=%d", from_h1->stream_id, st); ap_die(st, r); @@ -528,7 +522,7 @@ apr_status_t h2_response_output_filter(ap_filter_t *f, apr_bucket_brigade *bb) from_h1->response = create_response(from_h1, r); if (from_h1->response == NULL) { - ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, + ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, APLOGNO(03048) "h2_from_h1(%d): unable to create response", from_h1->stream_id); return APR_ENOMEM; @@ -574,7 +568,7 @@ apr_status_t h2_response_trailers_filter(ap_filter_t *f, apr_bucket_brigade *bb) /* FIXME: need a better test case than this. apr_table_setn(r->trailers_out, "X", "1"); */ if (r->trailers_out && !apr_is_empty_table(r->trailers_out)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03049) "h2_from_h1(%d): trailers filter, saving trailers", from_h1->stream_id); h2_response_set_trailers(from_h1->response, diff --git a/mod_http2/h2_from_h1.h b/mod_http2/h2_from_h1.h index cdd38ca6..af5dea24 100644 --- a/mod_http2/h2_from_h1.h +++ b/mod_http2/h2_from_h1.h @@ -60,8 +60,6 @@ struct h2_from_h1 { h2_from_h1 *h2_from_h1_create(int stream_id, apr_pool_t *pool); -apr_status_t h2_from_h1_destroy(h2_from_h1 *response); - apr_status_t h2_from_h1_read_response(h2_from_h1 *from_h1, ap_filter_t* f, apr_bucket_brigade* bb); diff --git a/mod_http2/h2_h2.c b/mod_http2/h2_h2.c index 6217c83e..05fb6ef6 100644 --- a/mod_http2/h2_h2.c +++ b/mod_http2/h2_h2.c @@ -27,6 +27,8 @@ #include #include +#include "mod_ssl.h" + #include "mod_http2.h" #include "h2_private.h" @@ -54,21 +56,9 @@ const char *H2_MAGIC_TOKEN = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; /******************************************************************************* * The optional mod_ssl functions we need. */ -APR_DECLARE_OPTIONAL_FN(int, ssl_engine_disable, (conn_rec*)); -APR_DECLARE_OPTIONAL_FN(int, ssl_is_https, (conn_rec*)); - -static int (*opt_ssl_engine_disable)(conn_rec*); -static int (*opt_ssl_is_https)(conn_rec*); -/******************************************************************************* - * SSL var lookup - */ -APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup, - (apr_pool_t *, server_rec *, - conn_rec *, request_rec *, - char *)); -static char *(*opt_ssl_var_lookup)(apr_pool_t *, server_rec *, - conn_rec *, request_rec *, - char *); +static APR_OPTIONAL_FN_TYPE(ssl_engine_disable) *opt_ssl_engine_disable; +static APR_OPTIONAL_FN_TYPE(ssl_is_https) *opt_ssl_is_https; +static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *opt_ssl_var_lookup; /******************************************************************************* @@ -441,6 +431,7 @@ static int cipher_is_blacklisted(const char *cipher, const char **psource) * - process_conn take over connection in case of h2 */ static int h2_h2_process_conn(conn_rec* c); +static int h2_h2_pre_close_conn(conn_rec* c); static int h2_h2_post_read_req(request_rec *r); /******************************************************************************* @@ -494,14 +485,14 @@ int h2_is_acceptable_connection(conn_rec *c, int require_all) if (strncmp("TLS", val, 3) || !strcmp("TLSv1", val) || !strcmp("TLSv1.1", val)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03050) "h2_h2(%ld): tls protocol not suitable: %s", (long)c->id, val); return 0; } } else if (require_all) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03051) "h2_h2(%ld): tls protocol is indetermined", (long)c->id); return 0; } @@ -512,14 +503,14 @@ int h2_is_acceptable_connection(conn_rec *c, int require_all) if (val && *val) { const char *source; if (cipher_is_blacklisted(val, &source)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03052) "h2_h2(%ld): tls cipher %s blacklisted by %s", (long)c->id, val, source); return 0; } } else if (require_all) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03053) "h2_h2(%ld): tls cipher is indetermined", (long)c->id); return 0; } @@ -565,7 +556,11 @@ void h2_h2_register_hooks(void) */ ap_hook_process_connection(h2_h2_process_conn, mod_ssl, mod_reqtimeout, APR_HOOK_LAST); - + + /* One last chance to properly say goodbye if we have not done so + * already. */ + ap_hook_pre_close_connection(h2_h2_pre_close_conn, NULL, mod_ssl, APR_HOOK_LAST); + /* With "H2SerializeHeaders On", we install the filter in this hook * that parses the response. This needs to happen before any other post * read function terminates the request with an error. Otherwise we will @@ -617,7 +612,7 @@ int h2_h2_process_conn(conn_rec* c) AP_MODE_SPECULATIVE, APR_BLOCK_READ, 24); if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03054) "h2_h2, error reading 24 bytes speculative"); apr_brigade_destroy(temp); return DECLINED; @@ -658,6 +653,25 @@ int h2_h2_process_conn(conn_rec* c) return DECLINED; } +static int h2_h2_pre_close_conn(conn_rec *c) +{ + h2_ctx *ctx; + + /* slave connection? */ + if (c->master) { + return DECLINED; + } + + ctx = h2_ctx_get(c, 0); + if (ctx) { + /* If the session has been closed correctly already, we will not + * fiond a h2_ctx here. The presence indicates that the session + * is still ongoing. */ + return h2_conn_pre_close(ctx, c); + } + return DECLINED; +} + static int h2_h2_post_read_req(request_rec *r) { /* slave connection? */ diff --git a/mod_http2/h2_h2.h b/mod_http2/h2_h2.h index 563abe3f..592001e9 100644 --- a/mod_http2/h2_h2.h +++ b/mod_http2/h2_h2.h @@ -28,47 +28,6 @@ extern const char *h2_clear_protos[]; */ extern const char *h2_tls_protos[]; -/** - * The magic PRIamble of RFC 7540 that is always sent when starting - * a h2 communication. - */ -extern const char *H2_MAGIC_TOKEN; - -#define H2_ERR_NO_ERROR (0x00) -#define H2_ERR_PROTOCOL_ERROR (0x01) -#define H2_ERR_INTERNAL_ERROR (0x02) -#define H2_ERR_FLOW_CONTROL_ERROR (0x03) -#define H2_ERR_SETTINGS_TIMEOUT (0x04) -#define H2_ERR_STREAM_CLOSED (0x05) -#define H2_ERR_FRAME_SIZE_ERROR (0x06) -#define H2_ERR_REFUSED_STREAM (0x07) -#define H2_ERR_CANCEL (0x08) -#define H2_ERR_COMPRESSION_ERROR (0x09) -#define H2_ERR_CONNECT_ERROR (0x0a) -#define H2_ERR_ENHANCE_YOUR_CALM (0x0b) -#define H2_ERR_INADEQUATE_SECURITY (0x0c) -#define H2_ERR_HTTP_1_1_REQUIRED (0x0d) - -/* Maximum number of padding bytes in a frame, rfc7540 */ -#define H2_MAX_PADLEN 256 -/* Initial default window size, RFC 7540 ch. 6.5.2 */ -#define H2_INITIAL_WINDOW_SIZE ((64*1024)-1) - -#define H2_HTTP_2XX(a) ((a) >= 200 && (a) < 300) - -#define H2_STREAM_CLIENT_INITIATED(id) (id&0x01) - -typedef enum { - H2_DEPENDANT_AFTER, - H2_DEPENDANT_INTERLEAVED, - H2_DEPENDANT_BEFORE, -} h2_dependency; - -typedef struct h2_priority { - h2_dependency dependency; - int weight; -} h2_priority; - /** * Provide a user readable description of the HTTP/2 error code- * @param h2_error http/2 error code, as in rfc 7540, ch. 7 diff --git a/mod_http2/h2_task_queue.c b/mod_http2/h2_int_queue.c similarity index 72% rename from mod_http2/h2_task_queue.c rename to mod_http2/h2_int_queue.c index 2871cabc..472ae340 100644 --- a/mod_http2/h2_task_queue.c +++ b/mod_http2/h2_int_queue.c @@ -15,23 +15,21 @@ #include #include +#include -#include -#include +#include "h2_int_queue.h" -#include "h2_task_queue.h" +static void tq_grow(h2_int_queue *q, int nlen); +static void tq_swap(h2_int_queue *q, int i, int j); +static int tq_bubble_up(h2_int_queue *q, int i, int top, + h2_iq_cmp *cmp, void *ctx); +static int tq_bubble_down(h2_int_queue *q, int i, int bottom, + h2_iq_cmp *cmp, void *ctx); -static void tq_grow(h2_task_queue *q, int nlen); -static void tq_swap(h2_task_queue *q, int i, int j); -static int tq_bubble_up(h2_task_queue *q, int i, int top, - h2_tq_cmp *cmp, void *ctx); -static int tq_bubble_down(h2_task_queue *q, int i, int bottom, - h2_tq_cmp *cmp, void *ctx); - -h2_task_queue *h2_tq_create(apr_pool_t *pool, int capacity) +h2_int_queue *h2_iq_create(apr_pool_t *pool, int capacity) { - h2_task_queue *q = apr_pcalloc(pool, sizeof(h2_task_queue)); + h2_int_queue *q = apr_pcalloc(pool, sizeof(h2_int_queue)); if (q) { q->pool = pool; tq_grow(q, capacity); @@ -40,12 +38,18 @@ h2_task_queue *h2_tq_create(apr_pool_t *pool, int capacity) return q; } -int h2_tq_empty(h2_task_queue *q) +int h2_iq_empty(h2_int_queue *q) { return q->nelts == 0; } -void h2_tq_add(h2_task_queue *q, int sid, h2_tq_cmp *cmp, void *ctx) +int h2_iq_size(h2_int_queue *q) +{ + return q->nelts; +} + + +void h2_iq_add(h2_int_queue *q, int sid, h2_iq_cmp *cmp, void *ctx) { int i; @@ -57,11 +61,13 @@ void h2_tq_add(h2_task_queue *q, int sid, h2_tq_cmp *cmp, void *ctx) q->elts[i] = sid; ++q->nelts; - /* bubble it to the front of the queue */ - tq_bubble_up(q, i, q->head, cmp, ctx); + if (cmp) { + /* bubble it to the front of the queue */ + tq_bubble_up(q, i, q->head, cmp, ctx); + } } -int h2_tq_remove(h2_task_queue *q, int sid) +int h2_iq_remove(h2_int_queue *q, int sid) { int i; for (i = 0; i < q->nelts; ++i) { @@ -81,7 +87,12 @@ int h2_tq_remove(h2_task_queue *q, int sid) return 0; } -void h2_tq_sort(h2_task_queue *q, h2_tq_cmp *cmp, void *ctx) +void h2_iq_clear(h2_int_queue *q) +{ + q->nelts = 0; +} + +void h2_iq_sort(h2_int_queue *q, h2_iq_cmp *cmp, void *ctx) { /* Assume that changes in ordering are minimal. This needs, * best case, q->nelts - 1 comparisions to check that nothing @@ -109,7 +120,7 @@ void h2_tq_sort(h2_task_queue *q, h2_tq_cmp *cmp, void *ctx) } -int h2_tq_shift(h2_task_queue *q) +int h2_iq_shift(h2_int_queue *q) { int sid; @@ -124,9 +135,8 @@ int h2_tq_shift(h2_task_queue *q) return sid; } -static void tq_grow(h2_task_queue *q, int nlen) +static void tq_grow(h2_int_queue *q, int nlen) { - AP_DEBUG_ASSERT(q->nalloc <= nlen); if (nlen > q->nalloc) { int *nq = apr_pcalloc(q->pool, sizeof(int) * nlen); if (q->nelts > 0) { @@ -145,15 +155,15 @@ static void tq_grow(h2_task_queue *q, int nlen) } } -static void tq_swap(h2_task_queue *q, int i, int j) +static void tq_swap(h2_int_queue *q, int i, int j) { int x = q->elts[i]; q->elts[i] = q->elts[j]; q->elts[j] = x; } -static int tq_bubble_up(h2_task_queue *q, int i, int top, - h2_tq_cmp *cmp, void *ctx) +static int tq_bubble_up(h2_int_queue *q, int i, int top, + h2_iq_cmp *cmp, void *ctx) { int prev; while (((prev = (q->nalloc + i - 1) % q->nalloc), i != top) @@ -164,8 +174,8 @@ static int tq_bubble_up(h2_task_queue *q, int i, int top, return i; } -static int tq_bubble_down(h2_task_queue *q, int i, int bottom, - h2_tq_cmp *cmp, void *ctx) +static int tq_bubble_down(h2_int_queue *q, int i, int bottom, + h2_iq_cmp *cmp, void *ctx) { int next; while (((next = (q->nalloc + i + 1) % q->nalloc), i != bottom) diff --git a/mod_http2/h2_task_queue.h b/mod_http2/h2_int_queue.h similarity index 72% rename from mod_http2/h2_task_queue.h rename to mod_http2/h2_int_queue.h index 3ff1d396..69f1e1c9 100644 --- a/mod_http2/h2_task_queue.h +++ b/mod_http2/h2_int_queue.h @@ -13,15 +13,15 @@ * limitations under the License. */ -#ifndef __mod_h2__h2_task_queue__ -#define __mod_h2__h2_task_queue__ +#ifndef __mod_h2__h2_int_queue__ +#define __mod_h2__h2_int_queue__ /** - * h2_task_queue keeps a list of sorted h2_task* in ascending order. + * h2_int_queue keeps a list of sorted h2_task* in ascending order. */ -typedef struct h2_task_queue h2_task_queue; +typedef struct h2_int_queue h2_int_queue; -struct h2_task_queue { +struct h2_int_queue { int *elts; int head; int nelts; @@ -40,7 +40,7 @@ struct h2_task_queue { * < 0: s1 should be sorted before s2 * > 0: s2 should be sorted before s1 */ -typedef int h2_tq_cmp(int s1, int s2, void *ctx); +typedef int h2_iq_cmp(int s1, int s2, void *ctx); /** @@ -48,13 +48,19 @@ typedef int h2_tq_cmp(int s1, int s2, void *ctx); * @param id the identifier of the queue * @param pool the memory pool */ -h2_task_queue *h2_tq_create(apr_pool_t *pool, int capacity); +h2_int_queue *h2_iq_create(apr_pool_t *pool, int capacity); /** * Return != 0 iff there are no tasks in the queue. * @param q the queue to check */ -int h2_tq_empty(h2_task_queue *q); +int h2_iq_empty(h2_int_queue *q); + +/** + * Return the number of int in the queue. + * @param q the queue to get size on + */ +int h2_iq_size(h2_int_queue *q); /** * Add a stream idto the queue. @@ -64,7 +70,7 @@ int h2_tq_empty(h2_task_queue *q); * @param cmp the comparator for sorting * @param ctx user data for comparator */ -void h2_tq_add(h2_task_queue *q, int sid, h2_tq_cmp *cmp, void *ctx); +void h2_iq_add(h2_int_queue *q, int sid, h2_iq_cmp *cmp, void *ctx); /** * Remove the stream id from the queue. Return != 0 iff task @@ -73,7 +79,12 @@ void h2_tq_add(h2_task_queue *q, int sid, h2_tq_cmp *cmp, void *ctx); * @param sid the stream id to remove * @return != 0 iff task was found in queue */ -int h2_tq_remove(h2_task_queue *q, int sid); +int h2_iq_remove(h2_int_queue *q, int sid); + +/** + * Remove all entries in the queue. + */ +void h2_iq_clear(h2_int_queue *q); /** * Sort the stream idqueue again. Call if the task ordering @@ -83,7 +94,7 @@ int h2_tq_remove(h2_task_queue *q, int sid); * @param cmp the comparator for sorting * @param ctx user data for the comparator */ -void h2_tq_sort(h2_task_queue *q, h2_tq_cmp *cmp, void *ctx); +void h2_iq_sort(h2_int_queue *q, h2_iq_cmp *cmp, void *ctx); /** * Get the first stream id from the queue or NULL if the queue is empty. @@ -92,6 +103,6 @@ void h2_tq_sort(h2_task_queue *q, h2_tq_cmp *cmp, void *ctx); * @param q the queue to get the first task from * @return the first stream id of the queue, 0 if empty */ -int h2_tq_shift(h2_task_queue *q); +int h2_iq_shift(h2_int_queue *q); -#endif /* defined(__mod_h2__h2_task_queue__) */ +#endif /* defined(__mod_h2__h2_int_queue__) */ diff --git a/mod_http2/h2_io.c b/mod_http2/h2_io.c index 00676320..5bbf09e9 100644 --- a/mod_http2/h2_io.c +++ b/mod_http2/h2_io.c @@ -23,6 +23,7 @@ #include #include #include +#include #include "h2_private.h" #include "h2_h2.h" @@ -33,48 +34,98 @@ #include "h2_task.h" #include "h2_util.h" -h2_io *h2_io_create(int id, apr_pool_t *pool) +h2_io *h2_io_create(int id, apr_pool_t *pool, + apr_bucket_alloc_t *bucket_alloc, + const h2_request *request) { h2_io *io = apr_pcalloc(pool, sizeof(*io)); if (io) { io->id = id; io->pool = pool; - io->bucket_alloc = apr_bucket_alloc_create(pool); + io->bucket_alloc = bucket_alloc; + io->request = h2_request_clone(pool, request); } return io; } -void h2_io_destroy(h2_io *io) +static void check_bbin(h2_io *io) { - if (io->pool) { - apr_pool_destroy(io->pool); - /* gone */ + if (!io->bbin) { + io->bbin = apr_brigade_create(io->pool, io->bucket_alloc); } } +static void check_bbout(h2_io *io) +{ + if (!io->bbout) { + io->bbout = apr_brigade_create(io->pool, io->bucket_alloc); + } +} + +static void check_bbtmp(h2_io *io) +{ + if (!io->bbtmp) { + io->bbtmp = apr_brigade_create(io->pool, io->bucket_alloc); + } +} + +static void append_eos(h2_io *io, apr_bucket_brigade *bb) +{ + APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(io->bucket_alloc)); +} + +void h2_io_redo(h2_io *io) +{ + io->worker_started = 0; + io->response = NULL; + io->rst_error = 0; + if (io->bbin) { + apr_brigade_cleanup(io->bbin); + } + if (io->bbout) { + apr_brigade_cleanup(io->bbout); + } + if (io->bbtmp) { + apr_brigade_cleanup(io->bbtmp); + } + io->started_at = io->done_at = 0; +} + +int h2_io_is_repeatable(h2_io *io) { + if (io->submitted + || io->input_consumed > 0 + || !io->request) { + /* cannot repeat that. */ + return 0; + } + return (!strcmp("GET", io->request->method) + || !strcmp("HEAD", io->request->method) + || !strcmp("OPTIONS", io->request->method)); +} + void h2_io_set_response(h2_io *io, h2_response *response) { AP_DEBUG_ASSERT(io->pool); AP_DEBUG_ASSERT(response); AP_DEBUG_ASSERT(!io->response); - io->response = h2_response_clone(io->pool, response); + /* we used to clone the response into the io->pool. But + * we have much tighter control over the EOR bucket nowadays, + * so just use the instance given */ + io->response = response; if (response->rst_error) { h2_io_rst(io, response->rst_error); } + else if (response->content_length == 0) { + io->eos_out = 1; + } } - void h2_io_rst(h2_io *io, int error) { io->rst_error = error; io->eos_in = 1; } -int h2_io_in_has_eos_for(h2_io *io) -{ - return io->eos_in || (io->bbin && h2_util_has_eos(io->bbin, -1)); -} - int h2_io_out_has_data(h2_io *io) { return io->bbout && h2_util_bb_has_data_or_eos(io->bbout); @@ -128,7 +179,7 @@ apr_status_t h2_io_signal_wait(h2_mplx *m, h2_io *io) if (io->timeout_at != 0) { status = apr_thread_cond_timedwait(io->timed_cond, m->lock, io->timeout_at); if (APR_STATUS_IS_TIMEUP(status)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c, APLOGNO(03055) "h2_mplx(%ld-%d): stream timeout expired: %s", m->id, io->id, (io->timed_op == H2_IO_READ)? "read" : "write"); @@ -172,8 +223,8 @@ static int add_trailer(void *ctx, const char *key, const char *value) return (status == APR_SUCCESS); } -static apr_status_t append_eos(h2_io *io, apr_bucket_brigade *bb, - apr_table_t *trailers) +static apr_status_t in_append_eos(h2_io *io, apr_bucket_brigade *bb, + apr_table_t *trailers) { apr_status_t status = APR_SUCCESS; apr_table_t *t = io->request->trailers; @@ -195,7 +246,7 @@ static apr_status_t append_eos(h2_io *io, apr_bucket_brigade *bb, status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n"); } } - APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_eos_create(io->bucket_alloc)); + append_eos(io, bb); return status; } @@ -212,7 +263,7 @@ apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb, if (!io->bbin || APR_BRIGADE_EMPTY(io->bbin)) { if (io->eos_in) { if (!io->eos_in_written) { - status = append_eos(io, bb, trailers); + status = in_append_eos(io, bb, trailers); io->eos_in_written = 1; return status; } @@ -223,26 +274,27 @@ apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb, if (io->request->chunked) { /* the reader expects HTTP/1.1 chunked encoding */ - status = h2_util_move(io->tmp, io->bbin, maxlen, NULL, "h2_io_in_read_chunk"); + check_bbtmp(io); + status = h2_util_move(io->bbtmp, io->bbin, maxlen, NULL, "h2_io_in_read_chunk"); if (status == APR_SUCCESS) { apr_off_t tmp_len = 0; - apr_brigade_length(io->tmp, 1, &tmp_len); + apr_brigade_length(io->bbtmp, 1, &tmp_len); if (tmp_len > 0) { io->input_consumed += tmp_len; status = apr_brigade_printf(bb, NULL, NULL, "%lx\r\n", (unsigned long)tmp_len); if (status == APR_SUCCESS) { - status = h2_util_move(bb, io->tmp, -1, NULL, "h2_io_in_read_tmp1"); + status = h2_util_move(bb, io->bbtmp, -1, NULL, "h2_io_in_read_tmp1"); if (status == APR_SUCCESS) { status = apr_brigade_puts(bb, NULL, NULL, "\r\n"); } } } else { - status = h2_util_move(bb, io->tmp, -1, NULL, "h2_io_in_read_tmp2"); + status = h2_util_move(bb, io->bbtmp, -1, NULL, "h2_io_in_read_tmp2"); } - apr_brigade_cleanup(io->tmp); + apr_brigade_cleanup(io->bbtmp); } } else { @@ -256,10 +308,22 @@ apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb, } } + if (status == APR_SUCCESS && (!io->bbin || APR_BRIGADE_EMPTY(io->bbin))) { + if (io->eos_in) { + if (!io->eos_in_written) { + status = in_append_eos(io, bb, trailers); + io->eos_in_written = 1; + } + } + } + + if (status == APR_SUCCESS && APR_BRIGADE_EMPTY(bb)) { + return APR_EAGAIN; + } return status; } -apr_status_t h2_io_in_write(h2_io *io, apr_bucket_brigade *bb) +apr_status_t h2_io_in_write(h2_io *io, const char *d, apr_size_t len, int eos) { if (io->rst_error) { return APR_ECONNABORTED; @@ -268,13 +332,12 @@ apr_status_t h2_io_in_write(h2_io *io, apr_bucket_brigade *bb) if (io->eos_in) { return APR_EOF; } - io->eos_in = h2_util_has_eos(bb, -1); - if (!APR_BRIGADE_EMPTY(bb)) { - if (!io->bbin) { - io->bbin = apr_brigade_create(io->pool, io->bucket_alloc); - io->tmp = apr_brigade_create(io->pool, io->bucket_alloc); - } - return h2_util_move(io->bbin, bb, -1, NULL, "h2_io_in_write"); + if (eos) { + io->eos_in = 1; + } + if (len > 0) { + check_bbin(io); + return apr_brigade_write(io->bbin, NULL, NULL, d, len); } return APR_SUCCESS; } @@ -289,104 +352,66 @@ apr_status_t h2_io_in_close(h2_io *io) return APR_SUCCESS; } -apr_status_t h2_io_out_readx(h2_io *io, - h2_io_data_cb *cb, void *ctx, - apr_off_t *plen, int *peos) +apr_status_t h2_io_out_get_brigade(h2_io *io, apr_bucket_brigade *bb, + apr_off_t len) { - apr_status_t status; - if (io->rst_error) { return APR_ECONNABORTED; } - - if (io->eos_out) { - *plen = 0; - *peos = 1; - return APR_SUCCESS; - } - else if (!io->bbout) { - *plen = 0; - *peos = 0; - return APR_EAGAIN; + if (io->eos_out_read) { + return APR_EOF; } - - if (cb == NULL) { - /* just checking length available */ - status = h2_util_bb_avail(io->bbout, plen, peos); + else if (!io->bbout || APR_BRIGADE_EMPTY(io->bbout)) { + return APR_EAGAIN; } else { - status = h2_util_bb_readx(io->bbout, cb, ctx, plen, peos); - if (status == APR_SUCCESS) { - io->eos_out = *peos; + apr_status_t status; + apr_off_t pre_len, post_len; + /* Allow file handles pass through without limits. If they + * already have the lifetime of this stream, we might as well + * pass them on to the master connection */ + apr_size_t files = INT_MAX; + + apr_brigade_length(bb, 0, &pre_len); + status = h2_util_move(bb, io->bbout, len, &files, "h2_io_read_to"); + if (status == APR_SUCCESS && io->eos_out + && APR_BRIGADE_EMPTY(io->bbout)) { + io->eos_out_read = 1; } - } - - return status; -} - -apr_status_t h2_io_out_read_to(h2_io *io, apr_bucket_brigade *bb, - apr_off_t *plen, int *peos) -{ - if (io->rst_error) { - return APR_ECONNABORTED; - } - - if (io->eos_out) { - *plen = 0; - *peos = 1; - return APR_SUCCESS; - } - else if (!io->bbout) { - *plen = 0; - *peos = 0; - return APR_EAGAIN; - } - - io->eos_out = *peos = h2_util_has_eos(io->bbout, *plen); - return h2_util_move(bb, io->bbout, *plen, NULL, "h2_io_read_to"); -} - -static void process_trailers(h2_io *io, apr_table_t *trailers) -{ - if (trailers && io->response) { - h2_response_set_trailers(io->response, - apr_table_clone(io->pool, trailers)); + apr_brigade_length(bb, 0, &post_len); + io->output_consumed += (post_len - pre_len); + return status; } } apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb, - apr_size_t maxlen, apr_table_t *trailers, + apr_size_t maxlen, apr_size_t *pfile_buckets_allowed) { apr_status_t status; + apr_bucket *b; int start_allowed; if (io->rst_error) { return APR_ECONNABORTED; } - if (io->eos_out) { - apr_off_t len; - /* We have already delivered an EOS bucket to a reader, no - * sense in storing anything more here. - */ - status = apr_brigade_length(bb, 1, &len); - if (status == APR_SUCCESS) { - if (len > 0) { - /* someone tries to write real data after EOS, that - * does not look right. */ - status = APR_EOF; - } - /* cleanup, as if we had moved the data */ - apr_brigade_cleanup(bb); + /* Filter the EOR bucket and set it aside. We prefer to tear down + * the request when the whole h2 stream is done */ + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); + b = APR_BUCKET_NEXT(b)) + { + if (AP_BUCKET_IS_EOR(b)) { + APR_BUCKET_REMOVE(b); + io->eor = b; + break; } - return status; - } - - process_trailers(io, trailers); - if (!io->bbout) { - io->bbout = apr_brigade_create(io->pool, io->bucket_alloc); - } + else if (APR_BUCKET_IS_EOS(b)) { + io->eos_out = 1; + break; + } + } /* Let's move the buckets from the request processing in here, so * that the main thread can read them when it has time/capacity. @@ -398,6 +423,7 @@ apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb, * many open files already buffered. Otherwise we will run out of * file handles. */ + check_bbout(io); start_allowed = *pfile_buckets_allowed; status = h2_util_move(io->bbout, bb, maxlen, pfile_buckets_allowed, "h2_io_out_write"); @@ -409,19 +435,18 @@ apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb, } -apr_status_t h2_io_out_close(h2_io *io, apr_table_t *trailers) +apr_status_t h2_io_out_close(h2_io *io) { if (io->rst_error) { return APR_ECONNABORTED; } - if (!io->eos_out) { /* EOS has not been read yet */ - process_trailers(io, trailers); - if (!io->bbout) { - io->bbout = apr_brigade_create(io->pool, io->bucket_alloc); - } - if (!h2_util_has_eos(io->bbout, -1)) { - APR_BRIGADE_INSERT_TAIL(io->bbout, - apr_bucket_eos_create(io->bucket_alloc)); + if (!io->eos_out_read) { /* EOS has not been read yet */ + if (!io->eos_out) { + check_bbout(io); + io->eos_out = 1; + if (!h2_util_has_eos(io->bbout, -1)) { + append_eos(io, io->bbout); + } } } return APR_SUCCESS; diff --git a/mod_http2/h2_io.h b/mod_http2/h2_io.h index acaa56fc..d700f6f3 100644 --- a/mod_http2/h2_io.h +++ b/mod_http2/h2_io.h @@ -20,6 +20,7 @@ struct h2_response; struct apr_thread_cond_t; struct h2_mplx; struct h2_request; +struct h2_task; typedef apr_status_t h2_io_data_cb(void *ctx, const char *data, apr_off_t len); @@ -30,37 +31,44 @@ typedef enum { H2_IO_READ, H2_IO_WRITE, H2_IO_ANY, -} -h2_io_op; +} h2_io_op; typedef struct h2_io h2_io; struct h2_io { int id; /* stream identifier */ - apr_pool_t *pool; /* stream pool */ + apr_pool_t *pool; /* stream pool */ apr_bucket_alloc_t *bucket_alloc; const struct h2_request *request;/* request on this io */ struct h2_response *response; /* response to request */ int rst_error; /* h2 related stream abort error */ + apr_bucket *eor; /* the EOR bucket, set aside */ + struct h2_task *task; /* the task once started */ + apr_bucket_brigade *bbin; /* input data for stream */ apr_bucket_brigade *bbout; /* output data from stream */ - apr_bucket_brigade *tmp; /* temporary data for chunking */ + apr_bucket_brigade *bbtmp; /* temporary data for chunking */ unsigned int orphaned : 1; /* h2_stream is gone for this io */ unsigned int worker_started : 1; /* h2_worker started processing for this io */ unsigned int worker_done : 1; /* h2_worker finished for this io */ + unsigned int submitted : 1; /* response has been submitted to client */ unsigned int request_body : 1; /* iff request has body */ unsigned int eos_in : 1; /* input eos has been seen */ unsigned int eos_in_written : 1; /* input eos has been forwarded */ - unsigned int eos_out : 1; /* output eos has been seen */ + unsigned int eos_out : 1; /* output eos is present */ + unsigned int eos_out_read : 1; /* output eos has been forwarded */ h2_io_op timed_op; /* which operation is waited on, if any */ struct apr_thread_cond_t *timed_cond; /* condition to wait on, maybe NULL */ apr_time_t timeout_at; /* when IO wait will time out */ + apr_time_t started_at; /* when processing started */ + apr_time_t done_at; /* when processing was done */ apr_size_t input_consumed; /* how many bytes have been read */ + apr_size_t output_consumed; /* how many bytes have been written out */ int files_handles_owned; }; @@ -72,12 +80,9 @@ struct h2_io { /** * Creates a new h2_io for the given stream id. */ -h2_io *h2_io_create(int id, apr_pool_t *pool); - -/** - * Frees any resources hold by the h2_io instance. - */ -void h2_io_destroy(h2_io *io); +h2_io *h2_io_create(int id, apr_pool_t *pool, + apr_bucket_alloc_t *bucket_alloc, + const struct h2_request *request); /** * Set the response of this stream. @@ -89,11 +94,9 @@ void h2_io_set_response(h2_io *io, struct h2_response *response); */ void h2_io_rst(h2_io *io, int error); -/** - * The input data is completely queued. Blocked reads will return immediately - * and give either data or EOF. - */ -int h2_io_in_has_eos_for(h2_io *io); +int h2_io_is_repeatable(h2_io *io); +void h2_io_redo(h2_io *io); + /** * Output data is available. */ @@ -120,7 +123,7 @@ apr_status_t h2_io_in_read(h2_io *io, apr_bucket_brigade *bb, /** * Appends given bucket to the input. */ -apr_status_t h2_io_in_write(h2_io *io, apr_bucket_brigade *bb); +apr_status_t h2_io_in_write(h2_io *io, const char *d, apr_size_t len, int eos); /** * Closes the input. After existing data has been read, APR_EOF will @@ -148,23 +151,19 @@ apr_status_t h2_io_in_shutdown(h2_io *io); * @param plen the requested max len, set to amount of data on return * @param peos != 0 iff the end of stream has been reached */ -apr_status_t h2_io_out_readx(h2_io *io, - h2_io_data_cb *cb, void *ctx, - apr_off_t *plen, int *peos); - -apr_status_t h2_io_out_read_to(h2_io *io, - apr_bucket_brigade *bb, - apr_off_t *plen, int *peos); +apr_status_t h2_io_out_get_brigade(h2_io *io, + apr_bucket_brigade *bb, + apr_off_t len); apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb, - apr_size_t maxlen, apr_table_t *trailers, + apr_size_t maxlen, apr_size_t *pfile_buckets_allowed); /** * Closes the input. After existing data has been read, APR_EOF will * be returned. */ -apr_status_t h2_io_out_close(h2_io *io, apr_table_t *trailers); +apr_status_t h2_io_out_close(h2_io *io); /** * Gives the overall length of the data that is currently queued for diff --git a/mod_http2/h2_io_set.c b/mod_http2/h2_io_set.c index 2bb6e694..e0949795 100644 --- a/mod_http2/h2_io_set.c +++ b/mod_http2/h2_io_set.c @@ -45,16 +45,6 @@ h2_io_set *h2_io_set_create(apr_pool_t *pool) return sp; } -void h2_io_set_destroy(h2_io_set *sp) -{ - int i; - for (i = 0; i < sp->list->nelts; ++i) { - h2_io *io = h2_io_IDX(sp->list, i); - h2_io_destroy(io); - } - sp->list->nelts = 0; -} - static int h2_stream_id_cmp(const void *s1, const void *s2) { h2_io **pio1 = (h2_io **)s1; @@ -91,7 +81,7 @@ apr_status_t h2_io_set_add(h2_io_set *sp, h2_io *io) int last; APR_ARRAY_PUSH(sp->list, h2_io*) = io; /* Normally, streams get added in ascending order if id. We - * keep the array sorted, so we just need to check of the newly + * keep the array sorted, so we just need to check if the newly * appended stream has a lower id than the last one. if not, * sorting is not necessary. */ @@ -111,9 +101,7 @@ static void remove_idx(h2_io_set *sp, int idx) --sp->list->nelts; n = sp->list->nelts - idx; if (n > 0) { - /* Close the hole in the array by moving the upper - * parts down one step. - */ + /* There are n h2_io* behind idx. Move the rest down */ h2_io **selts = (h2_io**)sp->list->elts; memmove(selts + idx, selts + idx + 1, n * sizeof(h2_io*)); } @@ -124,7 +112,7 @@ h2_io *h2_io_set_remove(h2_io_set *sp, h2_io *io) int i; for (i = 0; i < sp->list->nelts; ++i) { h2_io *e = h2_io_IDX(sp->list, i); - if (e == io) { + if (e->id == io->id) { remove_idx(sp, i); return e; } @@ -132,7 +120,7 @@ h2_io *h2_io_set_remove(h2_io_set *sp, h2_io *io) return NULL; } -h2_io *h2_io_set_pop_highest_prio(h2_io_set *set) +h2_io *h2_io_set_shift(h2_io_set *set) { /* For now, this just removes the first element in the set. * the name is misleading... diff --git a/mod_http2/h2_io_set.h b/mod_http2/h2_io_set.h index 04ff8702..936e7252 100644 --- a/mod_http2/h2_io_set.h +++ b/mod_http2/h2_io_set.h @@ -26,8 +26,6 @@ typedef struct h2_io_set h2_io_set; h2_io_set *h2_io_set_create(apr_pool_t *pool); -void h2_io_set_destroy(h2_io_set *set); - apr_status_t h2_io_set_add(h2_io_set *set, struct h2_io *io); h2_io *h2_io_set_get(h2_io_set *set, int stream_id); h2_io *h2_io_set_remove(h2_io_set *set, struct h2_io *io); @@ -48,9 +46,8 @@ typedef int h2_io_set_iter_fn(void *ctx, struct h2_io *io); * @param ctx user data for the callback * @return 1 iff iteration completed for all members */ -int h2_io_set_iter(h2_io_set *set, - h2_io_set_iter_fn *iter, void *ctx); +int h2_io_set_iter(h2_io_set *set, h2_io_set_iter_fn *iter, void *ctx); -h2_io *h2_io_set_pop_highest_prio(h2_io_set *set); +h2_io *h2_io_set_shift(h2_io_set *set); #endif /* defined(__mod_h2__h2_io_set__) */ diff --git a/mod_http2/h2_mplx.c b/mod_http2/h2_mplx.c index c508b5a8..a4dbf1f4 100644 --- a/mod_http2/h2_mplx.c +++ b/mod_http2/h2_mplx.c @@ -17,7 +17,6 @@ #include #include -#include #include #include #include @@ -27,21 +26,24 @@ #include #include +#include "mod_http2.h" + #include "h2_private.h" #include "h2_config.h" #include "h2_conn.h" +#include "h2_ctx.h" #include "h2_h2.h" +#include "h2_int_queue.h" #include "h2_io.h" #include "h2_io_set.h" #include "h2_response.h" #include "h2_mplx.h" +#include "h2_ngn_shed.h" #include "h2_request.h" #include "h2_stream.h" -#include "h2_stream_set.h" #include "h2_task.h" #include "h2_task_input.h" #include "h2_task_output.h" -#include "h2_task_queue.h" #include "h2_worker.h" #include "h2_workers.h" #include "h2_util.h" @@ -142,18 +144,7 @@ static void h2_mplx_destroy(h2_mplx *m) ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): destroy, ios=%d", m->id, (int)h2_io_set_size(m->stream_ios)); - m->aborted = 1; - if (m->ready_ios) { - h2_io_set_destroy(m->ready_ios); - m->ready_ios = NULL; - } - if (m->stream_ios) { - h2_io_set_destroy(m->stream_ios); - m->stream_ios = NULL; - } - check_tx_free(m); - if (m->pool) { apr_pool_destroy(m->pool); } @@ -171,7 +162,7 @@ static void h2_mplx_destroy(h2_mplx *m) * than protecting a shared h2_session one with an own lock. */ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, - const h2_config *conf, + const h2_config *conf, apr_interval_time_t stream_timeout, h2_workers *workers) { @@ -194,6 +185,7 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, if (!m->pool) { return NULL; } + apr_pool_tag(m->pool, "h2_mplx"); apr_allocator_owner_set(allocator, m->pool); status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT, @@ -203,36 +195,54 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, return NULL; } - m->q = h2_tq_create(m->pool, h2_config_geti(conf, H2_CONF_MAX_STREAMS)); + status = apr_thread_cond_create(&m->task_thawed, m->pool); + if (status != APR_SUCCESS) { + h2_mplx_destroy(m); + return NULL; + } + + m->bucket_alloc = apr_bucket_alloc_create(m->pool); + m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS); + m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); + m->q = h2_iq_create(m->pool, m->max_streams); m->stream_ios = h2_io_set_create(m->pool); m->ready_ios = h2_io_set_create(m->pool); - m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); m->stream_timeout = stream_timeout; m->workers = workers; + m->workers_max = workers->max_workers; + m->workers_def_limit = 4; + m->workers_limit = m->workers_def_limit; + m->last_limit_change = m->last_idle_block = apr_time_now(); + m->limit_change_interval = apr_time_from_msec(200); m->tx_handles_reserved = 0; m->tx_chunk_size = 4; + + m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*)); + + m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams, + m->stream_max_mem); + h2_ngn_shed_set_ctx(m->ngn_shed , m); } return m; } -int h2_mplx_get_max_stream_started(h2_mplx *m) +apr_uint32_t h2_mplx_shutdown(h2_mplx *m) { - int stream_id = 0; - int acquired; + int acquired, max_stream_started = 0; - enter_mutex(m, &acquired); - stream_id = m->max_stream_started; - leave_mutex(m, acquired); - - return stream_id; + if (enter_mutex(m, &acquired) == APR_SUCCESS) { + max_stream_started = m->max_stream_started; + /* Clear schedule queue, disabling existing streams from starting */ + h2_iq_clear(m->q); + leave_mutex(m, acquired); + } + return max_stream_started; } static void workers_register(h2_mplx *m) { - /* Initially, there was ref count increase for this as well, but - * this is not needed, even harmful. - * h2_workers is only a hub for all the h2_worker instances. + /* h2_workers is only a hub for all the h2_worker instances. * At the end-of-life of this h2_mplx, we always unregister at * the workers. The thing to manage are all the h2_worker instances * out there. Those may hold a reference to this h2_mplx and we cannot @@ -241,10 +251,11 @@ static void workers_register(h2_mplx *m) * Therefore: ref counting for h2_workers in not needed, ref counting * for h2_worker using this is critical. */ + m->need_registration = 0; h2_workers_register(m->workers, m); } -static int io_process_events(h2_mplx *m, h2_io *io) +static int io_in_consumed_signal(h2_mplx *m, h2_io *io) { if (io->input_consumed && m->input_consumed) { m->input_consumed(m->input_consumed_ctx, @@ -255,18 +266,28 @@ static int io_process_events(h2_mplx *m, h2_io *io) return 0; } +static int io_out_consumed_signal(h2_mplx *m, h2_io *io) +{ + if (io->output_consumed && io->task && io->task->assigned) { + h2_req_engine_out_consumed(io->task->assigned, io->task->c, + io->output_consumed); + io->output_consumed = 0; + return 1; + } + return 0; +} + static void io_destroy(h2_mplx *m, h2_io *io, int events) { - apr_pool_t *pool = io->pool; + int reuse_slave; /* cleanup any buffered input */ h2_io_in_shutdown(io); if (events) { /* Process outstanding events before destruction */ - io_process_events(m, io); + io_in_consumed_signal(m, io); } - io->pool = NULL; /* The pool is cleared/destroyed which also closes all * allocated file handles. Give this count back to our * file handle pool. */ @@ -274,14 +295,30 @@ static void io_destroy(h2_mplx *m, h2_io *io, int events) h2_io_set_remove(m->stream_ios, io); h2_io_set_remove(m->ready_ios, io); - h2_io_destroy(io); - - if (pool) { - apr_pool_clear(pool); - if (m->spare_pool) { - apr_pool_destroy(m->spare_pool); + if (m->redo_ios) { + h2_io_set_remove(m->redo_ios, io); + } + + reuse_slave = ((m->spare_slaves->nelts < m->spare_slaves->nalloc) + && !io->rst_error && io->eor); + if (io->task) { + conn_rec *slave = io->task->c; + h2_task_destroy(io->task); + io->task = NULL; + + if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) { + apr_bucket_delete(io->eor); + io->eor = NULL; + APR_ARRAY_PUSH(m->spare_slaves, conn_rec*) = slave; } - m->spare_pool = pool; + else { + slave->sbh = NULL; + h2_slave_destroy(slave, NULL); + } + } + + if (io->pool) { + apr_pool_destroy(io->pool); } check_tx_free(m); @@ -293,7 +330,7 @@ static int io_stream_done(h2_mplx *m, h2_io *io, int rst_error) h2_io_set_remove(m->ready_ios, io); if (!io->worker_started || io->worker_done) { /* already finished or not even started yet */ - h2_tq_remove(m->q, io->id); + h2_iq_remove(m->q, io->id); io_destroy(m, io, 1); return 0; } @@ -309,27 +346,64 @@ static int stream_done_iter(void *ctx, h2_io *io) return io_stream_done((h2_mplx*)ctx, io, 0); } +static int stream_print(void *ctx, h2_io *io) +{ + h2_mplx *m = ctx; + if (io && io->request) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */ + "->03198: h2_stream(%ld-%d): %s %s %s -> %s %d" + "[orph=%d/started=%d/done=%d/eos_in=%d/eos_out=%d]", + m->id, io->id, + io->request->method, io->request->authority, io->request->path, + io->response? "http" : (io->rst_error? "reset" : "?"), + io->response? io->response->http_status : io->rst_error, + io->orphaned, io->worker_started, io->worker_done, + io->eos_in, io->eos_out); + } + else if (io) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */ + "->03198: h2_stream(%ld-%d): NULL -> %s %d" + "[orph=%d/started=%d/done=%d/eos_in=%d/eos_out=%d]", + m->id, io->id, + io->response? "http" : (io->rst_error? "reset" : "?"), + io->response? io->response->http_status : io->rst_error, + io->orphaned, io->worker_started, io->worker_done, + io->eos_in, io->eos_out); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */ + "->03198: h2_stream(%ld-NULL): NULL", m->id); + } + return 1; +} + apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) { apr_status_t status; int acquired; h2_workers_unregister(m->workers, m); + if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { int i, wait_secs = 5; /* disable WINDOW_UPDATE callbacks */ h2_mplx_set_consumed_cb(m, NULL, NULL); + h2_iq_clear(m->q); + apr_thread_cond_broadcast(m->task_thawed); while (!h2_io_set_iter(m->stream_ios, stream_done_iter, m)) { /* iterate until all ios have been orphaned or destroyed */ } - /* Any remaining ios have handed out requests to workers that are - * not done yet. Any operation they do on their assigned stream ios will - * be errored ECONNRESET/ABORTED, so that should find out pretty soon. + /* If we still have busy workers, we cannot release our memory + * pool yet, as slave connections have child pools of their respective + * h2_io's. + * Any remaining ios are processed in these workers. Any operation + * they do on their input/outputs will be errored ECONNRESET/ABORTED, + * so processing them should fail and workers *should* return. */ - for (i = 0; h2_io_set_size(m->stream_ios) > 0; ++i) { + for (i = 0; m->workers_busy > 0; ++i) { m->join_wait = wait; ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): release_join, waiting on %d worker to report back", @@ -343,14 +417,26 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) * If we exit now, all will be deallocated and the worker, once * it does return, will walk all over freed memory... */ - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03198) "h2_mplx(%ld): release, waiting for %d seconds now for " - "all h2_workers to return, have still %d requests outstanding", - m->id, i*wait_secs, (int)h2_io_set_size(m->stream_ios)); + "%d h2_workers to return, have still %d requests outstanding", + m->id, i*wait_secs, m->workers_busy, + (int)h2_io_set_size(m->stream_ios)); + if (i == 1) { + h2_io_set_iter(m->stream_ios, stream_print, m); + } } + h2_mplx_abort(m); + apr_thread_cond_broadcast(m->task_thawed); } } - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, + + if (!h2_io_set_is_empty(m->stream_ios)) { + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c, + "h2_mplx(%ld): release_join, %d streams still open", + m->id, (int)h2_io_set_size(m->stream_ios)); + } + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03056) "h2_mplx(%ld): release_join -> destroy", m->id); leave_mutex(m, acquired); h2_mplx_destroy(m); @@ -361,22 +447,19 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) void h2_mplx_abort(h2_mplx *m) { - apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); - if (!m->aborted) { - if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { - m->aborted = 1; - leave_mutex(m, acquired); - } + if (!m->aborted && enter_mutex(m, &acquired) == APR_SUCCESS) { + m->aborted = 1; + h2_ngn_shed_abort(m->ngn_shed); + leave_mutex(m, acquired); } } apr_status_t h2_mplx_stream_done(h2_mplx *m, int stream_id, int rst_error) { apr_status_t status = APR_SUCCESS; - apr_thread_mutex_t *holding; int acquired; /* This maybe called from inside callbacks that already hold the lock. @@ -391,66 +474,16 @@ apr_status_t h2_mplx_stream_done(h2_mplx *m, int stream_id, int rst_error) * for processing, e.g. when we received all HEADERs. But when * a stream is cancelled very early, it will not exist. */ if (io) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + "h2_mplx(%ld-%d): marking stream as done.", + m->id, stream_id); io_stream_done(m, io, rst_error); } - leave_mutex(m, acquired); } return status; } -static const h2_request *pop_request(h2_mplx *m) -{ - const h2_request *req = NULL; - int sid; - while (!m->aborted && !req && (sid = h2_tq_shift(m->q)) > 0) { - h2_io *io = h2_io_set_get(m->stream_ios, sid); - if (io) { - req = io->request; - io->worker_started = 1; - if (sid > m->max_stream_started) { - m->max_stream_started = sid; - } - } - } - return req; -} - -void h2_mplx_request_done(h2_mplx **pm, int stream_id, const h2_request **preq) -{ - h2_mplx *m = *pm; - int acquired; - - if (enter_mutex(m, &acquired) == APR_SUCCESS) { - h2_io *io = h2_io_set_get(m->stream_ios, stream_id); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, - "h2_mplx(%ld): request(%d) done", m->id, stream_id); - if (io) { - io->worker_done = 1; - if (io->orphaned) { - io_destroy(m, io, 0); - if (m->join_wait) { - apr_thread_cond_signal(m->join_wait); - } - } - else { - /* hang around until the stream deregisteres */ - } - } - - if (preq) { - /* someone wants another request, if we have */ - *preq = pop_request(m); - } - if (!preq || !*preq) { - /* No request to hand back to the worker, NULLify reference - * and decrement count */ - *pm = NULL; - } - leave_mutex(m, acquired); - } -} - apr_status_t h2_mplx_in_read(h2_mplx *m, apr_read_type_e block, int stream_id, apr_bucket_brigade *bb, apr_table_t *trailers, @@ -490,7 +523,7 @@ apr_status_t h2_mplx_in_read(h2_mplx *m, apr_read_type_e block, } apr_status_t h2_mplx_in_write(h2_mplx *m, int stream_id, - apr_bucket_brigade *bb) + const char *data, apr_size_t len, int eos) { apr_status_t status; int acquired; @@ -500,10 +533,10 @@ apr_status_t h2_mplx_in_write(h2_mplx *m, int stream_id, h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->orphaned) { H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_write_pre"); - status = h2_io_in_write(io, bb); + status = h2_io_in_write(io, data, len, eos); H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_write_post"); h2_io_signal(io, H2_IO_READ); - io_process_events(m, io); + io_in_consumed_signal(m, io); } else { status = APR_ECONNABORTED; @@ -525,7 +558,7 @@ apr_status_t h2_mplx_in_close(h2_mplx *m, int stream_id) status = h2_io_in_close(io); H2_MPLX_IO_IN(APLOG_TRACE2, m, io, "h2_mplx_in_close"); h2_io_signal(io, H2_IO_READ); - io_process_events(m, io); + io_in_consumed_signal(m, io); } else { status = APR_ECONNABORTED; @@ -535,6 +568,12 @@ apr_status_t h2_mplx_in_close(h2_mplx *m, int stream_id) return status; } +void h2_mplx_set_consumed_cb(h2_mplx *m, h2_mplx_consumed_cb *cb, void *ctx) +{ + m->input_consumed = cb; + m->input_consumed_ctx = ctx; +} + typedef struct { h2_mplx * m; int streams_updated; @@ -543,18 +582,12 @@ typedef struct { static int update_window(void *ctx, h2_io *io) { update_ctx *uctx = (update_ctx*)ctx; - if (io_process_events(uctx->m, io)) { + if (io_in_consumed_signal(uctx->m, io)) { ++uctx->streams_updated; } return 1; } -void h2_mplx_set_consumed_cb(h2_mplx *m, h2_mplx_consumed_cb *cb, void *ctx) -{ - m->input_consumed = cb; - m->input_consumed_ctx = ctx; -} - apr_status_t h2_mplx_in_update_windows(h2_mplx *m) { apr_status_t status; @@ -581,40 +614,9 @@ apr_status_t h2_mplx_in_update_windows(h2_mplx *m) return status; } -apr_status_t h2_mplx_out_readx(h2_mplx *m, int stream_id, - h2_io_data_cb *cb, void *ctx, - apr_off_t *plen, int *peos, - apr_table_t **ptrailers) -{ - apr_status_t status; - int acquired; - - AP_DEBUG_ASSERT(m); - if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { - h2_io *io = h2_io_set_get(m->stream_ios, stream_id); - if (io && !io->orphaned) { - H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_readx_pre"); - - status = h2_io_out_readx(io, cb, ctx, plen, peos); - H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_readx_post"); - if (status == APR_SUCCESS && cb) { - h2_io_signal(io, H2_IO_WRITE); - } - } - else { - status = APR_ECONNABORTED; - } - - *ptrailers = (*peos && io->response)? io->response->trailers : NULL; - leave_mutex(m, acquired); - } - return status; -} - -apr_status_t h2_mplx_out_read_to(h2_mplx *m, int stream_id, - apr_bucket_brigade *bb, - apr_off_t *plen, int *peos, - apr_table_t **ptrailers) +apr_status_t h2_mplx_out_get_brigade(h2_mplx *m, int stream_id, + apr_bucket_brigade *bb, + apr_off_t len, apr_table_t **ptrailers) { apr_status_t status; int acquired; @@ -623,11 +625,11 @@ apr_status_t h2_mplx_out_read_to(h2_mplx *m, int stream_id, if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->orphaned) { - H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_read_to_pre"); + H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_get_brigade_pre"); - status = h2_io_out_read_to(io, bb, plen, peos); + status = h2_io_out_get_brigade(io, bb, len); - H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_read_to_post"); + H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_get_brigade_post"); if (status == APR_SUCCESS) { h2_io_signal(io, H2_IO_WRITE); } @@ -635,13 +637,13 @@ apr_status_t h2_mplx_out_read_to(h2_mplx *m, int stream_id, else { status = APR_ECONNABORTED; } - *ptrailers = (*peos && io->response)? io->response->trailers : NULL; + *ptrailers = io->response? io->response->trailers : NULL; leave_mutex(m, acquired); } return status; } -h2_stream *h2_mplx_next_submit(h2_mplx *m, h2_stream_set *streams) +h2_stream *h2_mplx_next_submit(h2_mplx *m, h2_ihash_t *streams) { apr_status_t status; h2_stream *stream = NULL; @@ -649,10 +651,11 @@ h2_stream *h2_mplx_next_submit(h2_mplx *m, h2_stream_set *streams) AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { - h2_io *io = h2_io_set_pop_highest_prio(m->ready_ios); + h2_io *io = h2_io_set_shift(m->ready_ios); if (io && !m->aborted) { - stream = h2_stream_set_get(streams, io->id); + stream = h2_ihash_get(streams, io->id); if (stream) { + io->submitted = 1; if (io->rst_error) { h2_stream_rst(stream, io->rst_error); } @@ -668,7 +671,7 @@ h2_stream *h2_mplx_next_submit(h2_mplx *m, h2_stream_set *streams) * reset by the client. Should no longer happen since such * streams should clear io's from the ready queue. */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03347) "h2_mplx(%ld): stream for response %d closed, " "resetting io to close request processing", m->id, io->id); @@ -681,7 +684,7 @@ h2_stream *h2_mplx_next_submit(h2_mplx *m, h2_stream_set *streams) * shutdown input and send out any events (e.g. window * updates) asap. */ h2_io_in_shutdown(io); - io_process_events(m, io); + io_in_consumed_signal(m, io); } } @@ -693,8 +696,8 @@ h2_stream *h2_mplx_next_submit(h2_mplx *m, h2_stream_set *streams) } static apr_status_t out_write(h2_mplx *m, h2_io *io, - ap_filter_t* f, apr_bucket_brigade *bb, - apr_table_t *trailers, + ap_filter_t* f, int blocking, + apr_bucket_brigade *bb, struct apr_thread_cond_t *iowait) { apr_status_t status = APR_SUCCESS; @@ -707,8 +710,10 @@ static apr_status_t out_write(h2_mplx *m, h2_io *io, && !APR_BRIGADE_EMPTY(bb) && !is_aborted(m, &status)) { - status = h2_io_out_write(io, bb, m->stream_max_mem, trailers, + status = h2_io_out_write(io, bb, blocking? m->stream_max_mem : INT_MAX, &m->tx_handles_reserved); + io_out_consumed_signal(m, io); + /* Wait for data to drain until there is room again or * stream timeout expires */ h2_io_signal_init(io, H2_IO_WRITE, m->stream_timeout, iowait); @@ -717,7 +722,12 @@ static apr_status_t out_write(h2_mplx *m, h2_io *io, && iowait && (m->stream_max_mem <= h2_io_out_length(io)) && !is_aborted(m, &status)) { - trailers = NULL; + if (!blocking) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, + "h2_mplx(%ld-%d): incomplete write", + m->id, io->id); + return APR_INCOMPLETE; + } if (f) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, "h2_mplx(%ld-%d): waiting for out drain", @@ -755,7 +765,12 @@ static apr_status_t out_open(h2_mplx *m, int stream_id, h2_response *response, check_tx_reservation(m); } if (bb) { - status = out_write(m, io, f, bb, response->trailers, iowait); + status = out_write(m, io, f, 0, bb, iowait); + if (status == APR_INCOMPLETE) { + /* write will have transferred as much data as possible. + caller has to deal with non-empty brigade */ + status = APR_SUCCESS; + } } have_out_data_for(m, stream_id); } @@ -789,8 +804,8 @@ apr_status_t h2_mplx_out_open(h2_mplx *m, int stream_id, h2_response *response, } apr_status_t h2_mplx_out_write(h2_mplx *m, int stream_id, - ap_filter_t* f, apr_bucket_brigade *bb, - apr_table_t *trailers, + ap_filter_t* f, int blocking, + apr_bucket_brigade *bb, struct apr_thread_cond_t *iowait) { apr_status_t status; @@ -800,10 +815,9 @@ apr_status_t h2_mplx_out_write(h2_mplx *m, int stream_id, if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { h2_io *io = h2_io_set_get(m->stream_ios, stream_id); if (io && !io->orphaned) { - status = out_write(m, io, f, bb, trailers, iowait); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c, - "h2_mplx(%ld-%d): write with trailers=%s", - m->id, io->id, trailers? "yes" : "no"); + status = out_write(m, io, f, blocking, bb, iowait); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c, + "h2_mplx(%ld-%d): write", m->id, io->id); H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_write"); have_out_data_for(m, stream_id); @@ -816,7 +830,7 @@ apr_status_t h2_mplx_out_write(h2_mplx *m, int stream_id, return status; } -apr_status_t h2_mplx_out_close(h2_mplx *m, int stream_id, apr_table_t *trailers) +apr_status_t h2_mplx_out_close(h2_mplx *m, int stream_id) { apr_status_t status; int acquired; @@ -833,15 +847,16 @@ apr_status_t h2_mplx_out_close(h2_mplx *m, int stream_id, apr_table_t *trailers) h2_response *r = h2_response_die(stream_id, APR_EGENERAL, io->request, m->pool); status = out_open(m, stream_id, r, NULL, NULL, NULL); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, m->c, "h2_mplx(%ld-%d): close, no response, no rst", m->id, io->id); } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c, - "h2_mplx(%ld-%d): close with trailers=%s", - m->id, io->id, trailers? "yes" : "no"); - status = h2_io_out_close(io, trailers); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, m->c, + "h2_mplx(%ld-%d): close with eor=%s", + m->id, io->id, io->eor? "yes" : "no"); + status = h2_io_out_close(io); H2_MPLX_IO_OUT(APLOG_TRACE2, m, io, "h2_mplx_out_close"); + io_out_consumed_signal(m, io); have_out_data_for(m, stream_id); } @@ -879,26 +894,6 @@ apr_status_t h2_mplx_out_rst(h2_mplx *m, int stream_id, int error) return status; } -int h2_mplx_in_has_eos_for(h2_mplx *m, int stream_id) -{ - int has_eos = 0; - int acquired; - - apr_status_t status; - AP_DEBUG_ASSERT(m); - if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { - h2_io *io = h2_io_set_get(m->stream_ios, stream_id); - if (io && !io->orphaned) { - has_eos = h2_io_in_has_eos_for(io); - } - else { - has_eos = 1; - } - leave_mutex(m, acquired); - } - return has_eos; -} - int h2_mplx_out_has_data_for(h2_mplx *m, int stream_id) { apr_status_t status; @@ -965,8 +960,7 @@ apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx) status = APR_ECONNABORTED; } else { - h2_tq_sort(m->q, cmp, ctx); - + h2_iq_sort(m->q, cmp, ctx); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): reprioritize tasks", m->id); } @@ -975,30 +969,26 @@ apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx) return status; } -static h2_io *open_io(h2_mplx *m, int stream_id) +static h2_io *open_io(h2_mplx *m, int stream_id, const h2_request *request) { - apr_pool_t *io_pool = m->spare_pool; + apr_pool_t *io_pool; h2_io *io; - if (!io_pool) { - apr_pool_create(&io_pool, m->pool); - } - else { - m->spare_pool = NULL; - } - - io = h2_io_create(stream_id, io_pool); + apr_pool_create(&io_pool, m->pool); + apr_pool_tag(io_pool, "h2_io"); + io = h2_io_create(stream_id, io_pool, m->bucket_alloc, request); h2_io_set_add(m->stream_ios, io); return io; } -apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, const h2_request *req, +apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, + const h2_request *req, h2_stream_pri_cmp *cmp, void *ctx) { apr_status_t status; - int was_empty = 0; + int do_registration = 0; int acquired; AP_DEBUG_ASSERT(m); @@ -1007,15 +997,15 @@ apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, const h2_request *req, status = APR_ECONNABORTED; } else { - h2_io *io = open_io(m, stream_id); - io->request = req; + h2_io *io = open_io(m, stream_id, req); if (!io->request->body) { status = h2_io_in_close(io); } - was_empty = h2_tq_empty(m->q); - h2_tq_add(m->q, io->id, cmp, ctx); + m->need_registration = m->need_registration || h2_iq_empty(m->q); + do_registration = (m->need_registration && m->workers_busy < m->workers_max); + h2_iq_add(m->q, io->id, cmp, ctx); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c, "h2_mplx(%ld-%d): process", m->c->id, stream_id); @@ -1023,30 +1013,460 @@ apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, const h2_request *req, } leave_mutex(m, acquired); } - if (status == APR_SUCCESS && was_empty) { + if (status == APR_SUCCESS && do_registration) { workers_register(m); } return status; } -const h2_request *h2_mplx_pop_request(h2_mplx *m, int *has_more) +static h2_task *pop_task(h2_mplx *m) +{ + h2_task *task = NULL; + int sid; + while (!m->aborted && !task + && (m->workers_busy < m->workers_limit) + && (sid = h2_iq_shift(m->q)) > 0) { + h2_io *io = h2_io_set_get(m->stream_ios, sid); + if (io && io->orphaned) { + io_destroy(m, io, 0); + if (m->join_wait) { + apr_thread_cond_signal(m->join_wait); + } + } + else if (io) { + conn_rec *slave, **pslave; + int new_conn = 0; + + pslave = (conn_rec **)apr_array_pop(m->spare_slaves); + if (pslave) { + slave = *pslave; + } + else { + slave = h2_slave_create(m->c, m->pool, NULL); + new_conn = 1; + } + + slave->sbh = m->c->sbh; + io->task = task = h2_task_create(m->id, io->request, slave, m); + m->c->keepalives++; + apr_table_setn(slave->notes, H2_TASK_ID_NOTE, task->id); + if (new_conn) { + h2_slave_run_pre_connection(slave, ap_get_conn_socket(slave)); + } + io->worker_started = 1; + io->started_at = apr_time_now(); + if (sid > m->max_stream_started) { + m->max_stream_started = sid; + } + ++m->workers_busy; + } + } + return task; +} + +h2_task *h2_mplx_pop_task(h2_mplx *m, int *has_more) { - const h2_request *req = NULL; + h2_task *task = NULL; apr_status_t status; int acquired; AP_DEBUG_ASSERT(m); if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { if (m->aborted) { - req = NULL; *has_more = 0; } else { - req = pop_request(m); - *has_more = !h2_tq_empty(m->q); + task = pop_task(m); + *has_more = !h2_iq_empty(m->q); + } + + if (has_more && !task) { + m->need_registration = 1; + } + leave_mutex(m, acquired); + } + return task; +} + +static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn) +{ + if (task) { + h2_io *io = h2_io_set_get(m->stream_ios, task->stream_id); + + if (task->frozen) { + /* this task was handed over to an engine for processing + * and the original worker has finished. That means the + * engine may start processing now. */ + h2_task_thaw(task); + /* we do not want the task to block on writing response + * bodies into the mplx. */ + /* FIXME: this implementation is incomplete. */ + h2_task_set_io_blocking(task, 0); + apr_thread_cond_broadcast(m->task_thawed); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): task(%s) done", m->id, task->id); + /* clean our references and report request as done. Signal + * that we want another unless we have been aborted */ + /* TODO: this will keep a worker attached to this h2_mplx as + * long as it has requests to handle. Might no be fair to + * other mplx's. Perhaps leave after n requests? */ + h2_mplx_out_close(m, task->stream_id); + + if (ngn && io) { + apr_off_t bytes = io->output_consumed + h2_io_out_length(io); + if (bytes > 0) { + /* we need to report consumed and current buffered output + * to the engine. The request will be streamed out or cancelled, + * no more data is coming from it and the engine should update + * its calculations before we destroy this information. */ + h2_req_engine_out_consumed(ngn, task->c, bytes); + io->output_consumed = 0; + } + } + + if (task->engine) { + if (!h2_req_engine_is_shutdown(task->engine)) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, + "h2_mplx(%ld): task(%s) has not-shutdown " + "engine(%s)", m->id, task->id, + h2_req_engine_get_id(task->engine)); + } + h2_ngn_shed_done_ngn(m->ngn_shed, task->engine); + } + + if (io) { + apr_time_t now = apr_time_now(); + if (!io->orphaned && m->redo_ios + && h2_io_set_get(m->redo_ios, io->id)) { + /* reset and schedule again */ + h2_io_redo(io); + h2_io_set_remove(m->redo_ios, io); + h2_iq_add(m->q, io->id, NULL, NULL); + } + else { + io->worker_done = 1; + io->done_at = now; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): request(%d) done, %f ms" + " elapsed", m->id, io->id, + (io->done_at - io->started_at) / 1000.0); + if (io->started_at > m->last_idle_block) { + /* this task finished without causing an 'idle block', e.g. + * a block by flow control. + */ + if (now - m->last_limit_change >= m->limit_change_interval + && m->workers_limit < m->workers_max) { + /* Well behaving stream, allow it more workers */ + m->workers_limit = H2MIN(m->workers_limit * 2, + m->workers_max); + m->last_limit_change = now; + m->need_registration = 1; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): increase worker limit to %d", + m->id, m->workers_limit); + } + } + } + + if (io->orphaned) { + io_destroy(m, io, 0); + if (m->join_wait) { + apr_thread_cond_signal(m->join_wait); + } + } + else { + /* hang around until the stream deregisters */ + } + } + else { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, + "h2_mplx(%ld): task %s without corresp. h2_io", + m->id, task->id); + } + } + } +} + +void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask) +{ + int acquired; + + if (enter_mutex(m, &acquired) == APR_SUCCESS) { + task_done(m, task, NULL); + --m->workers_busy; + if (ptask) { + /* caller wants another task */ + *ptask = pop_task(m); + } + leave_mutex(m, acquired); + } +} + +/******************************************************************************* + * h2_mplx DoS protection + ******************************************************************************/ + +typedef struct { + h2_mplx *m; + h2_io *io; + apr_time_t now; +} io_iter_ctx; + +static int latest_repeatable_busy_unsubmitted_iter(void *data, h2_io *io) +{ + io_iter_ctx *ctx = data; + if (io->worker_started && !io->worker_done + && h2_io_is_repeatable(io) + && !h2_io_set_get(ctx->m->redo_ios, io->id)) { + /* this io occupies a worker, the response has not been submitted yet, + * not been cancelled and it is a repeatable request + * -> it can be re-scheduled later */ + if (!ctx->io || ctx->io->started_at < io->started_at) { + /* we did not have one or this one was started later */ + ctx->io = io; + } + } + return 1; +} + +static h2_io *get_latest_repeatable_busy_unsubmitted_io(h2_mplx *m) +{ + io_iter_ctx ctx; + ctx.m = m; + ctx.io = NULL; + h2_io_set_iter(m->stream_ios, latest_repeatable_busy_unsubmitted_iter, &ctx); + return ctx.io; +} + +static int timed_out_busy_iter(void *data, h2_io *io) +{ + io_iter_ctx *ctx = data; + if (io->worker_started && !io->worker_done + && (ctx->now - io->started_at) > ctx->m->stream_timeout) { + /* timed out stream occupying a worker, found */ + ctx->io = io; + return 0; + } + return 1; +} +static h2_io *get_timed_out_busy_stream(h2_mplx *m) +{ + io_iter_ctx ctx; + ctx.m = m; + ctx.io = NULL; + ctx.now = apr_time_now(); + h2_io_set_iter(m->stream_ios, timed_out_busy_iter, &ctx); + return ctx.io; +} + +static apr_status_t unschedule_slow_ios(h2_mplx *m) +{ + h2_io *io; + int n; + + if (!m->redo_ios) { + m->redo_ios = h2_io_set_create(m->pool); + } + /* Try to get rid of streams that occupy workers. Look for safe requests + * that are repeatable. If none found, fail the connection. + */ + n = (m->workers_busy - m->workers_limit - h2_io_set_size(m->redo_ios)); + while (n > 0 && (io = get_latest_repeatable_busy_unsubmitted_io(m))) { + h2_io_set_add(m->redo_ios, io); + h2_io_rst(io, H2_ERR_CANCEL); + --n; + } + + if ((m->workers_busy - h2_io_set_size(m->redo_ios)) > m->workers_limit) { + io = get_timed_out_busy_stream(m); + if (io) { + /* Too many busy workers, unable to cancel enough streams + * and with a busy, timed out stream, we tell the client + * to go away... */ + return APR_TIMEUP; + } + } + return APR_SUCCESS; +} + +apr_status_t h2_mplx_idle(h2_mplx *m) +{ + apr_status_t status = APR_SUCCESS; + apr_time_t now; + int acquired; + + if (enter_mutex(m, &acquired) == APR_SUCCESS) { + apr_size_t scount = h2_io_set_size(m->stream_ios); + if (scount > 0 && m->workers_busy) { + /* If we have streams in connection state 'IDLE', meaning + * all streams are ready to sent data out, but lack + * WINDOW_UPDATEs. + * + * This is ok, unless we have streams that still occupy + * h2 workers. As worker threads are a scarce resource, + * we need to take measures that we do not get DoSed. + * + * This is what we call an 'idle block'. Limit the amount + * of busy workers we allow for this connection until it + * well behaves. + */ + now = apr_time_now(); + m->last_idle_block = now; + if (m->workers_limit > 2 + && now - m->last_limit_change >= m->limit_change_interval) { + if (m->workers_limit > 16) { + m->workers_limit = 16; + } + else if (m->workers_limit > 8) { + m->workers_limit = 8; + } + else if (m->workers_limit > 4) { + m->workers_limit = 4; + } + else if (m->workers_limit > 2) { + m->workers_limit = 2; + } + m->last_limit_change = now; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): decrease worker limit to %d", + m->id, m->workers_limit); + } + + if (m->workers_busy > m->workers_limit) { + status = unschedule_slow_ios(m); + } + } + leave_mutex(m, acquired); + } + return status; +} + +/******************************************************************************* + * HTTP/2 request engines + ******************************************************************************/ + +typedef struct { + h2_mplx * m; + h2_req_engine *ngn; + int streams_updated; +} ngn_update_ctx; + +static int ngn_update_window(void *ctx, h2_io *io) +{ + ngn_update_ctx *uctx = ctx; + if (io && io->task && io->task->assigned == uctx->ngn + && io_out_consumed_signal(uctx->m, io)) { + ++uctx->streams_updated; + } + return 1; +} + +static apr_status_t ngn_out_update_windows(h2_mplx *m, h2_req_engine *ngn) +{ + ngn_update_ctx ctx; + + ctx.m = m; + ctx.ngn = ngn; + ctx.streams_updated = 0; + h2_io_set_iter(m->stream_ios, ngn_update_window, &ctx); + + return ctx.streams_updated? APR_SUCCESS : APR_EAGAIN; +} + +apr_status_t h2_mplx_req_engine_push(const char *ngn_type, + request_rec *r, + http2_req_engine_init *einit) +{ + apr_status_t status; + h2_mplx *m; + h2_task *task; + int acquired; + + task = h2_ctx_rget_task(r); + if (!task) { + return APR_ECONNABORTED; + } + m = task->mplx; + task->r = r; + + if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { + h2_io *io = h2_io_set_get(m->stream_ios, task->stream_id); + if (!io || io->orphaned) { + status = APR_ECONNABORTED; + } + else { + status = h2_ngn_shed_push_task(m->ngn_shed, ngn_type, task, einit); + } + leave_mutex(m, acquired); + } + return status; +} + +apr_status_t h2_mplx_req_engine_pull(h2_req_engine *ngn, + apr_read_type_e block, + apr_uint32_t capacity, + request_rec **pr) +{ + h2_ngn_shed *shed = h2_ngn_shed_get_shed(ngn); + h2_mplx *m = h2_ngn_shed_get_ctx(shed); + apr_status_t status; + h2_task *task = NULL; + int acquired; + + if ((status = enter_mutex(m, &acquired)) == APR_SUCCESS) { + int want_shutdown = (block == APR_BLOCK_READ); + + /* Take this opportunity to update output consummation + * for this engine */ + ngn_out_update_windows(m, ngn); + + if (want_shutdown && !h2_iq_empty(m->q)) { + /* For a blocking read, check first if requests are to be + * had and, if not, wait a short while before doing the + * blocking, and if unsuccessful, terminating read. + */ + status = h2_ngn_shed_pull_task(shed, ngn, capacity, 1, &task); + if (APR_STATUS_IS_EAGAIN(status)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): start block engine pull", m->id); + apr_thread_cond_timedwait(m->task_thawed, m->lock, + apr_time_from_msec(20)); + status = h2_ngn_shed_pull_task(shed, ngn, capacity, 1, &task); + } + } + else { + status = h2_ngn_shed_pull_task(shed, ngn, capacity, + want_shutdown, &task); } leave_mutex(m, acquired); } - return req; + *pr = task? task->r : NULL; + return status; } + +void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn) +{ + h2_task *task = h2_ctx_cget_task(r_conn); + + if (task) { + h2_mplx *m = task->mplx; + int acquired; + if (enter_mutex(m, &acquired) == APR_SUCCESS) { + ngn_out_update_windows(m, ngn); + h2_ngn_shed_done_task(m->ngn_shed, ngn, task); + if (task->engine) { + /* cannot report that as done until engine returns */ + } + else { + task_done(m, task, ngn); + } + /* Take this opportunity to update output consummation + * for this engine */ + leave_mutex(m, acquired); + } + } +} + diff --git a/mod_http2/h2_mplx.h b/mod_http2/h2_mplx.h index 024401db..40298476 100644 --- a/mod_http2/h2_mplx.h +++ b/mod_http2/h2_mplx.h @@ -38,6 +38,7 @@ struct apr_pool_t; struct apr_thread_mutex_t; struct apr_thread_cond_t; struct h2_config; +struct h2_ihash_t; struct h2_response; struct h2_task; struct h2_stream; @@ -45,9 +46,11 @@ struct h2_request; struct h2_io_set; struct apr_thread_cond_t; struct h2_workers; -struct h2_stream_set; -struct h2_task_queue; +struct h2_int_queue; +struct h2_ngn_shed; +struct h2_req_engine; +#include #include "h2_io.h" typedef struct h2_mplx h2_mplx; @@ -60,33 +63,49 @@ typedef void h2_mplx_consumed_cb(void *ctx, int stream_id, apr_off_t consumed); struct h2_mplx { long id; - APR_RING_ENTRY(h2_mplx) link; - volatile int refs; conn_rec *c; apr_pool_t *pool; + apr_bucket_alloc_t *bucket_alloc; + + APR_RING_ENTRY(h2_mplx) link; unsigned int aborted : 1; + unsigned int need_registration : 1; - struct h2_task_queue *q; + struct h2_int_queue *q; struct h2_io_set *stream_ios; struct h2_io_set *ready_ios; + struct h2_io_set *redo_ios; - int max_stream_started; /* highest stream id that started processing */ + apr_uint32_t max_streams; /* max # of concurrent streams */ + apr_uint32_t max_stream_started; /* highest stream id that started processing */ + apr_uint32_t workers_busy; /* # of workers processing on this mplx */ + apr_uint32_t workers_limit; /* current # of workers limit, dynamic */ + apr_uint32_t workers_def_limit; /* default # of workers limit */ + apr_uint32_t workers_max; /* max, hard limit # of workers in a process */ + apr_time_t last_idle_block; /* last time, this mplx entered IDLE while + * streams were ready */ + apr_time_t last_limit_change; /* last time, worker limit changed */ + apr_interval_time_t limit_change_interval; apr_thread_mutex_t *lock; struct apr_thread_cond_t *added_output; + struct apr_thread_cond_t *task_thawed; struct apr_thread_cond_t *join_wait; apr_size_t stream_max_mem; apr_interval_time_t stream_timeout; - apr_pool_t *spare_pool; /* spare pool, ready for next io */ + apr_array_header_t *spare_slaves; /* spare slave connections */ + struct h2_workers *workers; apr_size_t tx_handles_reserved; apr_size_t tx_chunk_size; h2_mplx_consumed_cb *input_consumed; void *input_consumed_ctx; + + struct h2_ngn_shed *ngn_shed; }; @@ -122,15 +141,16 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait */ void h2_mplx_abort(h2_mplx *mplx); -void h2_mplx_request_done(h2_mplx **pm, int stream_id, const struct h2_request **preq); +struct h2_task *h2_mplx_pop_task(h2_mplx *mplx, int *has_more); + +void h2_mplx_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask); /** - * Get the highest stream identifier that has been passed on to processing. - * Maybe 0 in case no stream has been processed yet. - * @param m the multiplexer - * @return highest stream identifier for which processing started + * Shut down the multiplexer gracefully. Will no longer schedule new streams + * but let the ongoing ones finish normally. + * @return the highest stream id being/been processed */ -int h2_mplx_get_max_stream_started(h2_mplx *m); +apr_uint32_t h2_mplx_shutdown(h2_mplx *m); /******************************************************************************* * IO lifetime of streams. @@ -146,7 +166,7 @@ int h2_mplx_get_max_stream_started(h2_mplx *m); */ apr_status_t h2_mplx_stream_done(h2_mplx *m, int stream_id, int rst_error); -/* Return != 0 iff the multiplexer has data for the given stream. +/* Return != 0 iff the multiplexer has output data for the given stream. */ int h2_mplx_out_has_data_for(h2_mplx *m, int stream_id); @@ -170,7 +190,8 @@ apr_status_t h2_mplx_out_trywait(h2_mplx *m, apr_interval_time_t timeout, * @param cmp the stream priority compare function * @param ctx context data for the compare function */ -apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, const struct h2_request *r, +apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, + const struct h2_request *r, h2_stream_pri_cmp *cmp, void *ctx); /** @@ -182,8 +203,6 @@ apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, const struct h2_request */ apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx); -const struct h2_request *h2_mplx_pop_request(h2_mplx *mplx, int *has_more); - /** * Register a callback for the amount of input data consumed per stream. The * will only ever be invoked from the thread creating this h2_mplx, e.g. when @@ -215,20 +234,14 @@ apr_status_t h2_mplx_in_read(h2_mplx *m, apr_read_type_e block, * Appends data to the input of the given stream. Storage of input data is * not subject to flow control. */ -apr_status_t h2_mplx_in_write(h2_mplx *mplx, int stream_id, - apr_bucket_brigade *bb); +apr_status_t h2_mplx_in_write(h2_mplx *m, int stream_id, + const char *data, apr_size_t len, int eos); /** * Closes the input for the given stream_id. */ apr_status_t h2_mplx_in_close(h2_mplx *m, int stream_id); -/** - * Returns != 0 iff the input for the given stream has been closed. There - * could still be data queued, but it can be read without blocking. - */ -int h2_mplx_in_has_eos_for(h2_mplx *m, int stream_id); - /** * Invoke the consumed callback for all streams that had bytes read since the * last call to this function. If no stream had input data consumed, the @@ -251,25 +264,15 @@ apr_status_t h2_mplx_in_update_windows(h2_mplx *m); * @param bb the brigade to place any existing repsonse body data into */ struct h2_stream *h2_mplx_next_submit(h2_mplx *m, - struct h2_stream_set *streams); - -/** - * Reads output data from the given stream. Will never block, but - * return APR_EAGAIN until data arrives or the stream is closed. - */ -apr_status_t h2_mplx_out_readx(h2_mplx *mplx, int stream_id, - h2_io_data_cb *cb, void *ctx, - apr_off_t *plen, int *peos, - apr_table_t **ptrailers); + struct h2_ihash_t *streams); /** * Reads output data into the given brigade. Will never block, but * return APR_EAGAIN until data arrives or the stream is closed. */ -apr_status_t h2_mplx_out_read_to(h2_mplx *mplx, int stream_id, - apr_bucket_brigade *bb, - apr_off_t *plen, int *peos, - apr_table_t **ptrailers); +apr_status_t h2_mplx_out_get_brigade(h2_mplx *mplx, int stream_id, + apr_bucket_brigade *bb, + apr_off_t len, apr_table_t **ptrailers); /** * Opens the output for the given stream with the specified response. @@ -284,20 +287,21 @@ apr_status_t h2_mplx_out_open(h2_mplx *mplx, int stream_id, * of bytes buffered reaches configured max. * @param stream_id the stream identifier * @param filter the apache filter context of the data + * @param blocking == 0 iff call should return with APR_INCOMPLETE if + * the full brigade cannot be written at once * @param bb the bucket brigade to append - * @param trailers optional trailers for response, maybe NULL * @param iowait a conditional used for block/signalling in h2_mplx */ apr_status_t h2_mplx_out_write(h2_mplx *mplx, int stream_id, - ap_filter_t* filter, apr_bucket_brigade *bb, - apr_table_t *trailers, + ap_filter_t* filter, + int blocking, + apr_bucket_brigade *bb, struct apr_thread_cond_t *iowait); /** - * Closes the output for stream stream_id. Optionally forwards trailers - * fromt the processed stream. + * Closes the output for stream stream_id. */ -apr_status_t h2_mplx_out_close(h2_mplx *m, int stream_id, apr_table_t *trailers); +apr_status_t h2_mplx_out_close(h2_mplx *m, int stream_id); apr_status_t h2_mplx_out_rst(h2_mplx *m, int stream_id, int error); @@ -372,5 +376,38 @@ APR_RING_INSERT_TAIL((b), ap__b, h2_mplx, link); \ */ #define H2_MPLX_REMOVE(e) APR_RING_REMOVE((e), link) +/******************************************************************************* + * h2_mplx DoS protection + ******************************************************************************/ + +/** + * Master connection has entered idle mode. + * @param m the mplx instance of the master connection + * @return != SUCCESS iff connection should be terminated + */ +apr_status_t h2_mplx_idle(h2_mplx *m); + +/******************************************************************************* + * h2_req_engine handling + ******************************************************************************/ + +typedef void h2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed); +typedef apr_status_t h2_mplx_req_engine_init(struct h2_req_engine *engine, + const char *id, + const char *type, + apr_pool_t *pool, + apr_uint32_t req_buffer_size, + request_rec *r, + h2_output_consumed **pconsumed, + void **pbaton); + +apr_status_t h2_mplx_req_engine_push(const char *ngn_type, + request_rec *r, + h2_mplx_req_engine_init *einit); +apr_status_t h2_mplx_req_engine_pull(struct h2_req_engine *ngn, + apr_read_type_e block, + apr_uint32_t capacity, + request_rec **pr); +void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn); #endif /* defined(__mod_h2__h2_mplx__) */ diff --git a/mod_http2/h2_ngn_shed.c b/mod_http2/h2_ngn_shed.c new file mode 100644 index 00000000..32483d93 --- /dev/null +++ b/mod_http2/h2_ngn_shed.c @@ -0,0 +1,352 @@ +/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include "mod_http2.h" + +#include "h2_private.h" +#include "h2_config.h" +#include "h2_conn.h" +#include "h2_ctx.h" +#include "h2_h2.h" +#include "h2_int_queue.h" +#include "h2_mplx.h" +#include "h2_response.h" +#include "h2_request.h" +#include "h2_task.h" +#include "h2_task_output.h" +#include "h2_util.h" +#include "h2_ngn_shed.h" + + +typedef struct h2_ngn_entry h2_ngn_entry; +struct h2_ngn_entry { + APR_RING_ENTRY(h2_ngn_entry) link; + h2_task *task; +}; + +#define H2_NGN_ENTRY_NEXT(e) APR_RING_NEXT((e), link) +#define H2_NGN_ENTRY_PREV(e) APR_RING_PREV((e), link) +#define H2_NGN_ENTRY_REMOVE(e) APR_RING_REMOVE((e), link) + +#define H2_REQ_ENTRIES_SENTINEL(b) APR_RING_SENTINEL((b), h2_ngn_entry, link) +#define H2_REQ_ENTRIES_EMPTY(b) APR_RING_EMPTY((b), h2_ngn_entry, link) +#define H2_REQ_ENTRIES_FIRST(b) APR_RING_FIRST(b) +#define H2_REQ_ENTRIES_LAST(b) APR_RING_LAST(b) + +#define H2_REQ_ENTRIES_INSERT_HEAD(b, e) do { \ +h2_ngn_entry *ap__b = (e); \ +APR_RING_INSERT_HEAD((b), ap__b, h2_ngn_entry, link); \ +} while (0) + +#define H2_REQ_ENTRIES_INSERT_TAIL(b, e) do { \ +h2_ngn_entry *ap__b = (e); \ +APR_RING_INSERT_TAIL((b), ap__b, h2_ngn_entry, link); \ +} while (0) + +struct h2_req_engine { + const char *id; /* identifier */ + const char *type; /* name of the engine type */ + apr_pool_t *pool; /* pool for engine specific allocations */ + conn_rec *c; /* connection this engine is assigned to */ + h2_task *task; /* the task this engine is base on, running in */ + h2_ngn_shed *shed; + + unsigned int shutdown : 1; /* engine is being shut down */ + unsigned int done : 1; /* engine has finished */ + + APR_RING_HEAD(h2_req_entries, h2_ngn_entry) entries; + apr_uint32_t capacity; /* maximum concurrent requests */ + apr_uint32_t no_assigned; /* # of assigned requests */ + apr_uint32_t no_live; /* # of live */ + apr_uint32_t no_finished; /* # of finished */ + + h2_output_consumed *out_consumed; + void *out_consumed_ctx; +}; + +const char *h2_req_engine_get_id(h2_req_engine *engine) +{ + return engine->id; +} + +int h2_req_engine_is_shutdown(h2_req_engine *engine) +{ + return engine->shutdown; +} + +void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c, + apr_off_t bytes) +{ + if (engine->out_consumed) { + engine->out_consumed(engine->out_consumed_ctx, c, bytes); + } +} + +h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c, + apr_uint32_t default_capacity, + apr_uint32_t req_buffer_size) +{ + h2_ngn_shed *shed; + + shed = apr_pcalloc(pool, sizeof(*shed)); + shed->c = c; + shed->pool = pool; + shed->default_capacity = default_capacity; + shed->req_buffer_size = req_buffer_size; + shed->ngns = apr_hash_make(pool); + + return shed; +} + +void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx) +{ + shed->user_ctx = user_ctx; +} + +void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed) +{ + return shed->user_ctx; +} + +h2_ngn_shed *h2_ngn_shed_get_shed(h2_req_engine *ngn) +{ + return ngn->shed; +} + +void h2_ngn_shed_abort(h2_ngn_shed *shed) +{ + shed->aborted = 1; +} + +static void ngn_add_task(h2_req_engine *ngn, h2_task *task) +{ + h2_ngn_entry *entry = apr_pcalloc(task->pool, sizeof(*entry)); + APR_RING_ELEM_INIT(entry, link); + entry->task = task; + H2_REQ_ENTRIES_INSERT_TAIL(&ngn->entries, entry); +} + + +apr_status_t h2_ngn_shed_push_task(h2_ngn_shed *shed, const char *ngn_type, + h2_task *task, http2_req_engine_init *einit) +{ + h2_req_engine *ngn; + + AP_DEBUG_ASSERT(shed); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, + "h2_ngn_shed(%ld): PUSHing request (task=%s)", shed->c->id, + task->id); + if (task->ser_headers) { + /* Max compatibility, deny processing of this */ + return APR_EOF; + } + + ngn = apr_hash_get(shed->ngns, ngn_type, APR_HASH_KEY_STRING); + if (ngn && !ngn->shutdown) { + /* this task will be processed in another thread, + * freeze any I/O for the time being. */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, + "h2_ngn_shed(%ld): pushing request %s to %s", + shed->c->id, task->id, ngn->id); + if (!h2_task_is_detached(task)) { + h2_task_freeze(task); + } + /* FIXME: sometimes ngn is garbage, probly alread freed */ + ngn_add_task(ngn, task); + ngn->no_assigned++; + return APR_SUCCESS; + } + + /* no existing engine or being shut down, start a new one */ + if (einit) { + apr_status_t status; + apr_pool_t *pool = task->pool; + h2_req_engine *newngn; + + newngn = apr_pcalloc(pool, sizeof(*ngn)); + newngn->pool = pool; + newngn->id = apr_psprintf(pool, "ngn-%s", task->id); + newngn->type = apr_pstrdup(pool, ngn_type); + newngn->c = task->c; + newngn->shed = shed; + newngn->capacity = shed->default_capacity; + newngn->no_assigned = 1; + newngn->no_live = 1; + APR_RING_INIT(&newngn->entries, h2_ngn_entry, link); + + status = einit(newngn, newngn->id, newngn->type, newngn->pool, + shed->req_buffer_size, task->r, + &newngn->out_consumed, &newngn->out_consumed_ctx); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, + "h2_ngn_shed(%ld): create engine %s (%s)", + shed->c->id, newngn->id, newngn->type); + if (status == APR_SUCCESS) { + AP_DEBUG_ASSERT(task->engine == NULL); + newngn->task = task; + task->engine = newngn; + task->assigned = newngn; + apr_hash_set(shed->ngns, newngn->type, APR_HASH_KEY_STRING, newngn); + } + return status; + } + return APR_EOF; +} + +static h2_ngn_entry *pop_detached(h2_req_engine *ngn) +{ + h2_ngn_entry *entry; + for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries); + entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries); + entry = H2_NGN_ENTRY_NEXT(entry)) { + if (h2_task_is_detached(entry->task) + || (entry->task->engine == ngn)) { + /* The task hosting this engine can always be pulled by it. + * For other task, they need to become detached, e.g. no longer + * assigned to another worker. */ + H2_NGN_ENTRY_REMOVE(entry); + return entry; + } + } + return NULL; +} + +apr_status_t h2_ngn_shed_pull_task(h2_ngn_shed *shed, + h2_req_engine *ngn, + apr_uint32_t capacity, + int want_shutdown, + h2_task **ptask) +{ + h2_ngn_entry *entry; + + AP_DEBUG_ASSERT(ngn); + *ptask = NULL; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, + "h2_ngn_shed(%ld): pull task for engine %s, shutdown=%d", + shed->c->id, ngn->id, want_shutdown); + if (shed->aborted) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, shed->c, + "h2_ngn_shed(%ld): abort while pulling requests %s", + shed->c->id, ngn->id); + ngn->shutdown = 1; + return APR_ECONNABORTED; + } + + ngn->capacity = capacity; + if (H2_REQ_ENTRIES_EMPTY(&ngn->entries)) { + if (want_shutdown) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, + "h2_ngn_shed(%ld): emtpy queue, shutdown engine %s", + shed->c->id, ngn->id); + ngn->shutdown = 1; + } + return ngn->shutdown? APR_EOF : APR_EAGAIN; + } + + if ((entry = pop_detached(ngn))) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, entry->task->c, + "h2_ngn_shed(%ld): pulled request %s for engine %s", + shed->c->id, entry->task->id, ngn->id); + ngn->no_live++; + *ptask = entry->task; + entry->task->assigned = ngn; + return APR_SUCCESS; + } + + if (1) { + h2_ngn_entry *entry = H2_REQ_ENTRIES_FIRST(&ngn->entries); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, + "h2_ngn_shed(%ld): pull task, nothing, first task %s", + shed->c->id, entry->task->id); + } + return APR_EAGAIN; +} + +static apr_status_t ngn_done_task(h2_ngn_shed *shed, h2_req_engine *ngn, + h2_task *task, int waslive, int aborted) +{ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, + "h2_ngn_shed(%ld): task %s %s by %s", + shed->c->id, task->id, aborted? "aborted":"done", ngn->id); + ngn->no_finished++; + if (waslive) ngn->no_live--; + ngn->no_assigned--; + + return APR_SUCCESS; +} + +apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed, + struct h2_req_engine *ngn, h2_task *task) +{ + return ngn_done_task(shed, ngn, task, 1, 0); +} + +void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn) +{ + if (ngn->done) { + return; + } + + if (!shed->aborted && !H2_REQ_ENTRIES_EMPTY(&ngn->entries)) { + h2_ngn_entry *entry; + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, shed->c, + "h2_ngn_shed(%ld): exit engine %s (%s), " + "has still requests queued, shutdown=%d," + "assigned=%ld, live=%ld, finished=%ld", + shed->c->id, ngn->id, ngn->type, + ngn->shutdown, + (long)ngn->no_assigned, (long)ngn->no_live, + (long)ngn->no_finished); + for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries); + entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries); + entry = H2_NGN_ENTRY_NEXT(entry)) { + h2_task *task = entry->task; + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, shed->c, + "h2_ngn_shed(%ld): engine %s has queued task %s, " + "frozen=%d, aborting", + shed->c->id, ngn->id, task->id, task->frozen); + ngn_done_task(shed, ngn, task, 0, 1); + } + } + if (!shed->aborted && (ngn->no_assigned > 1 || ngn->no_live > 1)) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, shed->c, + "h2_ngn_shed(%ld): exit engine %s (%s), " + "assigned=%ld, live=%ld, finished=%ld", + shed->c->id, ngn->id, ngn->type, + (long)ngn->no_assigned, (long)ngn->no_live, + (long)ngn->no_finished); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, + "h2_ngn_shed(%ld): exit engine %s", + shed->c->id, ngn->id); + } + + apr_hash_set(shed->ngns, ngn->type, APR_HASH_KEY_STRING, NULL); + ngn->done = 1; +} diff --git a/mod_http2/h2_ngn_shed.h b/mod_http2/h2_ngn_shed.h new file mode 100644 index 00000000..832dbd3a --- /dev/null +++ b/mod_http2/h2_ngn_shed.h @@ -0,0 +1,76 @@ +/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef h2_req_shed_h +#define h2_req_shed_h + +struct h2_req_engine; +struct h2_task; + +typedef struct h2_ngn_shed h2_ngn_shed; +struct h2_ngn_shed { + conn_rec *c; + apr_pool_t *pool; + apr_hash_t *ngns; + void *user_ctx; + + unsigned int aborted : 1; + + apr_uint32_t default_capacity; + apr_uint32_t req_buffer_size; /* preferred buffer size for responses */ +}; + +const char *h2_req_engine_get_id(h2_req_engine *engine); +int h2_req_engine_is_shutdown(h2_req_engine *engine); + +void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c, + apr_off_t bytes); + +typedef apr_status_t h2_shed_ngn_init(h2_req_engine *engine, + const char *id, + const char *type, + apr_pool_t *pool, + apr_uint32_t req_buffer_size, + request_rec *r, + h2_output_consumed **pconsumed, + void **pbaton); + +h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c, + apr_uint32_t default_capactiy, + apr_uint32_t req_buffer_size); + +void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx); +void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed); + +h2_ngn_shed *h2_ngn_shed_get_shed(struct h2_req_engine *ngn); + +void h2_ngn_shed_abort(h2_ngn_shed *shed); + +apr_status_t h2_ngn_shed_push_task(h2_ngn_shed *shed, const char *ngn_type, + struct h2_task *task, + h2_shed_ngn_init *init_cb); + +apr_status_t h2_ngn_shed_pull_task(h2_ngn_shed *shed, h2_req_engine *pub_ngn, + apr_uint32_t capacity, + int want_shutdown, struct h2_task **ptask); + +apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed, + struct h2_req_engine *ngn, + struct h2_task *task); + +void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn); + + +#endif /* h2_req_shed_h */ diff --git a/mod_http2/h2_private.h b/mod_http2/h2_private.h index 0ad02d3b..b6861369 100644 --- a/mod_http2/h2_private.h +++ b/mod_http2/h2_private.h @@ -16,26 +16,12 @@ #ifndef mod_h2_h2_private_h #define mod_h2_h2_private_h +#include + #include extern module AP_MODULE_DECLARE_DATA http2_module; APLOG_USE_MODULE(http2); - -#define H2_HEADER_METHOD ":method" -#define H2_HEADER_METHOD_LEN 7 -#define H2_HEADER_SCHEME ":scheme" -#define H2_HEADER_SCHEME_LEN 7 -#define H2_HEADER_AUTH ":authority" -#define H2_HEADER_AUTH_LEN 10 -#define H2_HEADER_PATH ":path" -#define H2_HEADER_PATH_LEN 5 -#define H2_CRLF "\r\n" - -#define H2_ALEN(a) (sizeof(a)/sizeof((a)[0])) - -#define H2MAX(x,y) ((x) > (y) ? (x) : (y)) -#define H2MIN(x,y) ((x) < (y) ? (x) : (y)) - #endif diff --git a/mod_http2/h2_push.c b/mod_http2/h2_push.c index 4719e5ca..748e32ab 100644 --- a/mod_http2/h2_push.c +++ b/mod_http2/h2_push.c @@ -289,14 +289,36 @@ static int set_push_header(void *ctx, const char *key, const char *value) return 1; } +static int has_param(link_ctx *ctx, const char *param) +{ + const char *p = apr_table_get(ctx->params, param); + return !!p; +} + +static int has_relation(link_ctx *ctx, const char *rel) +{ + const char *s, *val = apr_table_get(ctx->params, "rel"); + if (val) { + if (!strcmp(rel, val)) { + return 1; + } + s = ap_strstr_c(val, rel); + if (s && (s == val || s[-1] == ' ')) { + s += strlen(rel); + if (!*s || *s == ' ') { + return 1; + } + } + } + return 0; +} static int add_push(link_ctx *ctx) { /* so, we have read a Link header and need to decide * if we transform it into a push. */ - const char *rel = apr_table_get(ctx->params, "rel"); - if (rel && !strcmp("preload", rel)) { + if (has_relation(ctx, "preload") && !has_param(ctx, "nopush")) { apr_uri_t uri; if (apr_uri_parse(ctx->pool, ctx->link, &uri) == APR_SUCCESS) { if (uri.path && same_authority(ctx->req, &uri)) { @@ -313,9 +335,7 @@ static int add_push(link_ctx *ctx) * TLS (if any) parameters. */ path = apr_uri_unparse(ctx->pool, &uri, APR_URI_UNP_OMITSITEPART); - push = apr_pcalloc(ctx->pool, sizeof(*push)); - switch (ctx->req->push_policy) { case H2_PUSH_HEAD: method = "HEAD"; @@ -326,10 +346,9 @@ static int add_push(link_ctx *ctx) } headers = apr_table_make(ctx->pool, 5); apr_table_do(set_push_header, headers, ctx->req->headers, NULL); - req = h2_request_createn(0, ctx->pool, ctx->req->config, - method, ctx->req->scheme, - ctx->req->authority, - path, headers); + req = h2_request_createn(0, ctx->pool, method, ctx->req->scheme, + ctx->req->authority, path, headers, + ctx->req->serialize); /* atm, we do not push on pushes */ h2_request_end_headers(req, ctx->pool, 1, 0); push->req = req; @@ -437,38 +456,30 @@ apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req, return NULL; } -void h2_push_policy_determine(struct h2_request *req, apr_pool_t *p, int push_enabled) -{ - h2_push_policy policy = H2_PUSH_NONE; - if (push_enabled) { - const char *val = apr_table_get(req->headers, "accept-push-policy"); - if (val) { - if (ap_find_token(p, val, "fast-load")) { - policy = H2_PUSH_FAST_LOAD; - } - else if (ap_find_token(p, val, "head")) { - policy = H2_PUSH_HEAD; - } - else if (ap_find_token(p, val, "default")) { - policy = H2_PUSH_DEFAULT; - } - else if (ap_find_token(p, val, "none")) { - policy = H2_PUSH_NONE; - } - else { - /* nothing known found in this header, go by default */ - policy = H2_PUSH_DEFAULT; - } - } - else { - policy = H2_PUSH_DEFAULT; - } - } - req->push_policy = policy; -} - /******************************************************************************* * push diary + * + * - The push diary keeps track of resources already PUSHed via HTTP/2 on this + * connection. It records a hash value from the absolute URL of the resource + * pushed. + * - Lacking openssl, it uses 'apr_hashfunc_default' for the value + * - with openssl, it uses SHA256 to calculate the hash value + * - whatever the method to generate the hash, the diary keeps a maximum of 64 + * bits per hash, limiting the memory consumption to about + * H2PushDiarySize * 8 + * bytes. Entries are sorted by most recently used and oldest entries are + * forgotten first. + * - Clients can initialize/replace the push diary by sending a 'Cache-Digest' + * header. Currently, this is the base64url encoded value of the cache digest + * as specified in https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/ + * This draft can be expected to evolve and the definition of the header + * will be added there and refined. + * - The cache digest header is a Golomb Coded Set of hash values, but it may + * limit the amount of bits per hash value even further. For a good description + * of GCS, read here: + * http://giovanni.bajo.it/post/47119962313/golomb-coded-sets-smaller-than-bloom-filters + * - The means that the push diary might be initialized with hash values of much + * less than 64 bits, leading to more false positives, but smaller digest size. ******************************************************************************/ @@ -627,6 +638,7 @@ static void h2_push_diary_append(h2_push_diary *diary, h2_push_diary_entry *e) ne = move_to_last(diary, 0); *ne = *e; } + /* Intentional no APLOGNO */ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, diary->entries->pool, "push_diary_append: %"APR_UINT64_T_HEX_FMT, ne->hash); } @@ -647,11 +659,13 @@ apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t session->push_diary->dcalc(session->push_diary, &e.hash, push); idx = h2_push_diary_find(session->push_diary, e.hash); if (idx >= 0) { + /* Intentional no APLOGNO */ ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c, "push_diary_update: already there PUSH %s", push->req->path); move_to_last(session->push_diary, idx); } else { + /* Intentional no APLOGNO */ ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c, "push_diary_update: adding PUSH %s", push->req->path); if (!npushes) { @@ -679,6 +693,7 @@ apr_array_header_t *h2_push_collect_update(h2_stream *stream, cache_digest, stream->pool); if (status != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, + APLOGNO(03057) "h2_session(%ld): push diary set from Cache-Digest: %s", session->id, cache_digest); } @@ -687,36 +702,6 @@ apr_array_header_t *h2_push_collect_update(h2_stream *stream, return h2_push_diary_update(stream->session, pushes); } -/* h2_log2(n) iff n is a power of 2 */ -static unsigned char h2_log2(apr_uint32_t n) -{ - int lz = 0; - if (!n) { - return 0; - } - if (!(n & 0xffff0000u)) { - lz += 16; - n = (n << 16); - } - if (!(n & 0xff000000u)) { - lz += 8; - n = (n << 8); - } - if (!(n & 0xf0000000u)) { - lz += 4; - n = (n << 4); - } - if (!(n & 0xc0000000u)) { - lz += 2; - n = (n << 2); - } - if (!(n & 0x80000000u)) { - lz += 1; - } - - return 31 - lz; -} - static apr_int32_t h2_log2inv(unsigned char log2) { return log2? (1 << log2) : 1; @@ -790,10 +775,11 @@ static apr_status_t gset_encode_next(gset_encoder *encoder, apr_uint64_t pval) delta = pval - encoder->last; encoder->last = pval; flex_bits = (delta >> encoder->fixed_bits); + /* Intentional no APLOGNO */ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, encoder->pool, "h2_push_diary_enc: val=%"APR_UINT64_T_HEX_FMT", delta=%" APR_UINT64_T_HEX_FMT" flex_bits=%"APR_UINT64_T_FMT", " - "fixed_bits=%d, fixed_val=%"APR_UINT64_T_HEX_FMT, + ", fixed_bits=%d, fixed_val=%"APR_UINT64_T_HEX_FMT, pval, delta, flex_bits, encoder->fixed_bits, delta&encoder->fixed_mask); for (; flex_bits != 0; --flex_bits) { status = gset_encode_bit(encoder, 1); @@ -868,6 +854,7 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool, encoder.bit = 8; encoder.last = 0; + /* Intentional no APLOGNO */ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, "h2_push_diary_digest_get: %d entries, N=%d, log2n=%d, " "mask_bits=%d, enc.mask_bits=%d, delta_bits=%d, enc.log2p=%d, authority=%s", @@ -890,6 +877,7 @@ apr_status_t h2_push_diary_digest_get(h2_push_diary *diary, apr_pool_t *pool, gset_encode_next(&encoder, hashes[i]); } } + /* Intentional no APLOGNO */ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, "h2_push_diary_digest_get: golomb compressed hashes, %d bytes", (int)encoder.offset + 1); @@ -954,6 +942,7 @@ static apr_status_t gset_decode_next(gset_decoder *decoder, apr_uint64_t *phash) *phash = delta + decoder->last_val; decoder->last_val = *phash; + /* Intentional no APLOGNO */ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, decoder->pool, "h2_push_diary_digest_dec: val=%"APR_UINT64_T_HEX_FMT", delta=%" APR_UINT64_T_HEX_FMT", flex=%d, fixed=%"APR_UINT64_T_HEX_FMT, @@ -1028,6 +1017,7 @@ apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authorit diary->N = diary->NMax; } + /* Intentional no APLOGNO */ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, "h2_push_diary_digest_set: N=%d, log2n=%d, " "diary->mask_bits=%d, dec.log2p=%d", @@ -1042,6 +1032,7 @@ apr_status_t h2_push_diary_digest_set(h2_push_diary *diary, const char *authorit h2_push_diary_append(diary, &e); } + /* Intentional no APLOGNO */ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, "h2_push_diary_digest_set: diary now with %d entries, mask_bits=%d", (int)diary->entries->nelts, diary->mask_bits); @@ -1053,6 +1044,7 @@ apr_status_t h2_push_diary_digest64_set(h2_push_diary *diary, const char *author { const char *data; apr_size_t len = h2_util_base64url_decode(&data, data64url, pool); + /* Intentional no APLOGNO */ ap_log_perror(APLOG_MARK, GCSLOG_LEVEL, 0, pool, "h2_push_diary_digest64_set: digest=%s, dlen=%d", data64url, (int)len); diff --git a/mod_http2/h2_push.h b/mod_http2/h2_push.h index b9e7219f..d3519dcb 100644 --- a/mod_http2/h2_push.h +++ b/mod_http2/h2_push.h @@ -15,19 +15,14 @@ #ifndef __mod_h2__h2_push__ #define __mod_h2__h2_push__ +#include "h2.h" + struct h2_request; struct h2_response; struct h2_ngheader; struct h2_session; struct h2_stream; -typedef enum { - H2_PUSH_NONE, - H2_PUSH_DEFAULT, - H2_PUSH_HEAD, - H2_PUSH_FAST_LOAD, -} h2_push_policy; - typedef struct h2_push { const struct h2_request *req; } h2_push; @@ -65,17 +60,6 @@ apr_array_header_t *h2_push_collect(apr_pool_t *p, const struct h2_request *req, const struct h2_response *res); -/** - * Set the push policy for the given request. Takes request headers into - * account, see draft https://tools.ietf.org/html/draft-ruellan-http-accept-push-policy-00 - * for details. - * - * @param req the request to determine the policy for - * @param p the pool to use - * @param push_enabled if HTTP/2 server push is generally enabled for this request - */ -void h2_push_policy_determine(struct h2_request *req, apr_pool_t *p, int push_enabled); - /** * Create a new push diary for the given maximum number of entries. * diff --git a/mod_http2/h2_request.c b/mod_http2/h2_request.c index cf1121b2..2652661e 100644 --- a/mod_http2/h2_request.c +++ b/mod_http2/h2_request.c @@ -30,45 +30,36 @@ #include #include "h2_private.h" -#include "h2_config.h" -#include "h2_mplx.h" #include "h2_push.h" #include "h2_request.h" -#include "h2_task.h" #include "h2_util.h" -h2_request *h2_request_create(int id, apr_pool_t *pool, - const struct h2_config *config) +h2_request *h2_request_create(int id, apr_pool_t *pool, int serialize) { - return h2_request_createn(id, pool, config, - NULL, NULL, NULL, NULL, NULL); + return h2_request_createn(id, pool, NULL, NULL, NULL, NULL, NULL, + serialize); } h2_request *h2_request_createn(int id, apr_pool_t *pool, - const struct h2_config *config, const char *method, const char *scheme, const char *authority, const char *path, - apr_table_t *header) + apr_table_t *header, int serialize) { h2_request *req = apr_pcalloc(pool, sizeof(h2_request)); req->id = id; - req->config = config; req->method = method; req->scheme = scheme; req->authority = authority; req->path = path; req->headers = header? header : apr_table_make(pool, 10); req->request_time = apr_time_now(); - + req->serialize = serialize; + return req; } -void h2_request_destroy(h2_request *req) -{ -} - static apr_status_t inspect_clen(h2_request *req, const char *s) { char *end; @@ -139,38 +130,48 @@ static apr_status_t add_all_h1_header(h2_request *req, apr_pool_t *pool, } +apr_status_t h2_request_make(h2_request *req, apr_pool_t *pool, + const char *method, const char *scheme, + const char *authority, const char *path, + apr_table_t *headers) +{ + req->method = method; + req->scheme = scheme; + req->authority = authority; + req->path = path; + + AP_DEBUG_ASSERT(req->scheme); + AP_DEBUG_ASSERT(req->authority); + AP_DEBUG_ASSERT(req->path); + AP_DEBUG_ASSERT(req->method); + + return add_all_h1_header(req, pool, headers); +} + apr_status_t h2_request_rwrite(h2_request *req, request_rec *r) { apr_status_t status; + const char *scheme, *authority; - req->config = h2_config_rget(r); - req->method = r->method; - req->scheme = (r->parsed_uri.scheme? r->parsed_uri.scheme - : ap_http_scheme(r)); - req->authority = r->hostname; - req->path = apr_uri_unparse(r->pool, &r->parsed_uri, - APR_URI_UNP_OMITSITEPART); - - if (!ap_strchr_c(req->authority, ':') && r->server && r->server->port) { - apr_port_t defport = apr_uri_port_of_scheme(req->scheme); + scheme = (r->parsed_uri.scheme? r->parsed_uri.scheme + : ap_http_scheme(r)); + authority = r->hostname; + if (!ap_strchr_c(authority, ':') && r->server && r->server->port) { + apr_port_t defport = apr_uri_port_of_scheme(scheme); if (defport != r->server->port) { /* port info missing and port is not default for scheme: append */ - req->authority = apr_psprintf(r->pool, "%s:%d", req->authority, - (int)r->server->port); + authority = apr_psprintf(r->pool, "%s:%d", authority, + (int)r->server->port); } } - AP_DEBUG_ASSERT(req->scheme); - AP_DEBUG_ASSERT(req->authority); - AP_DEBUG_ASSERT(req->path); - AP_DEBUG_ASSERT(req->method); - - status = add_all_h1_header(req, r->pool, r->headers_in); - - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, + status = h2_request_make(req, r->pool, r->method, scheme, authority, + apr_uri_unparse(r->pool, &r->parsed_uri, + APR_URI_UNP_OMITSITEPART), + r->headers_in); + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03058) "h2_request(%d): rwrite %s host=%s://%s%s", req->id, req->method, req->scheme, req->authority, req->path); - return status; } @@ -322,13 +323,13 @@ apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool, const char *value, size_t vlen) { if (!req->trailers) { - ap_log_perror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, pool, + ap_log_perror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, pool, APLOGNO(03059) "h2_request(%d): unanounced trailers", req->id); return APR_EINVAL; } if (nlen == 0 || name[0] == ':') { - ap_log_perror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, pool, + ap_log_perror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, pool, APLOGNO(03060) "h2_request(%d): pseudo header in trailer", req->id); return APR_EINVAL; @@ -341,14 +342,32 @@ apr_status_t h2_request_add_trailer(h2_request *req, apr_pool_t *pool, void h2_request_copy(apr_pool_t *p, h2_request *dst, const h2_request *src) { /* keep the dst id */ + dst->initiated_on = src->initiated_on; dst->method = OPT_COPY(p, src->method); dst->scheme = OPT_COPY(p, src->scheme); dst->authority = OPT_COPY(p, src->authority); dst->path = OPT_COPY(p, src->path); dst->headers = apr_table_clone(p, src->headers); + if (src->trailers) { + dst->trailers = apr_table_clone(p, src->trailers); + } + else { + dst->trailers = NULL; + } dst->content_length = src->content_length; dst->chunked = src->chunked; dst->eoh = src->eoh; + dst->body = src->body; + dst->serialize = src->serialize; + dst->push_policy = src->push_policy; +} + +h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src) +{ + h2_request *nreq = apr_pcalloc(p, sizeof(*nreq)); + memcpy(nreq, src, sizeof(*nreq)); + h2_request_copy(p, nreq, src); + return nreq; } request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn) @@ -370,7 +389,7 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn) r->allowed_methods = ap_make_method_list(p, 2); - r->headers_in = apr_table_copy(r->pool, req->headers); + r->headers_in = apr_table_clone(r->pool, req->headers); r->trailers_in = apr_table_make(r->pool, 5); r->subprocess_env = apr_table_make(r->pool, 25); r->headers_out = apr_table_make(r->pool, 12); @@ -417,7 +436,7 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn) } ap_parse_uri(r, req->path); - r->protocol = (char*)"HTTP/2"; + r->protocol = "HTTP/2"; r->proto_num = HTTP_VERSION(2, 0); r->the_request = apr_psprintf(r->pool, "%s %s %s", @@ -448,6 +467,9 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn) /* Request check post hooks failed. An example of this would be a * request for a vhost where h2 is disabled --> 421. */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, conn, APLOGNO() + "h2_request(%d): access_status=%d, request_create failed", + req->id, access_status); ap_die(access_status, r); ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r); ap_run_log_transaction(r); diff --git a/mod_http2/h2_request.h b/mod_http2/h2_request.h index cc01ed12..da87d70a 100644 --- a/mod_http2/h2_request.h +++ b/mod_http2/h2_request.h @@ -16,48 +16,19 @@ #ifndef __mod_h2__h2_request__ #define __mod_h2__h2_request__ -/* h2_request is the transformer of HTTP2 streams into HTTP/1.1 internal - * format that will be fed to various httpd input filters to finally - * become a request_rec to be handled by soemone. - */ -struct h2_config; -struct h2_to_h1; -struct h2_mplx; -struct h2_task; - -typedef struct h2_request h2_request; - -struct h2_request { - int id; /* stream id */ +#include "h2.h" - const char *method; /* pseudo header values, see ch. 8.1.2.3 */ - const char *scheme; - const char *authority; - const char *path; - - apr_table_t *headers; - apr_table_t *trailers; - - apr_time_t request_time; - apr_off_t content_length; - - unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */ - unsigned int eoh : 1; /* iff end-of-headers has been seen and request is complete */ - unsigned int body : 1; /* iff this request has a body */ - unsigned int push_policy; /* which push policy to use for this request */ - const struct h2_config *config; -}; - -h2_request *h2_request_create(int id, apr_pool_t *pool, - const struct h2_config *config); +h2_request *h2_request_create(int id, apr_pool_t *pool, int serialize); h2_request *h2_request_createn(int id, apr_pool_t *pool, - const struct h2_config *config, const char *method, const char *scheme, const char *authority, const char *path, - apr_table_t *headers); + apr_table_t *headers, int serialize); -void h2_request_destroy(h2_request *req); +apr_status_t h2_request_make(h2_request *req, apr_pool_t *pool, + const char *method, const char *scheme, + const char *authority, const char *path, + apr_table_t *headers); apr_status_t h2_request_rwrite(h2_request *req, request_rec *r); @@ -74,6 +45,8 @@ apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, void h2_request_copy(apr_pool_t *p, h2_request *dst, const h2_request *src); +h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src); + /** * Create a request_rec representing the h2_request to be * processed on the given connection. diff --git a/mod_http2/h2_response.c b/mod_http2/h2_response.c index 3ef6f850..eb9043d0 100644 --- a/mod_http2/h2_response.c +++ b/mod_http2/h2_response.c @@ -70,6 +70,28 @@ static const char *get_sos_filter(apr_table_t *notes) return notes? apr_table_get(notes, H2_RESP_SOS_NOTE) : NULL; } +static void check_clen(h2_response *response, request_rec *r, apr_pool_t *pool) +{ + + if (r && r->header_only) { + response->content_length = 0; + } + else if (response->headers) { + const char *s = apr_table_get(response->headers, "Content-Length"); + if (s) { + char *end; + response->content_length = apr_strtoi64(s, &end, 10); + if (s == end) { + ap_log_perror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, + pool, APLOGNO(02956) + "h2_response: content-length" + " value not parsed: %s", s); + response->content_length = -1; + } + } + } +} + static h2_response *h2_response_create_int(int stream_id, int rst_error, int http_status, @@ -78,7 +100,6 @@ static h2_response *h2_response_create_int(int stream_id, apr_pool_t *pool) { h2_response *response; - const char *s; if (!headers) { return NULL; @@ -96,19 +117,7 @@ static h2_response *h2_response_create_int(int stream_id, response->headers = headers; response->sos_filter = get_sos_filter(notes); - s = apr_table_get(headers, "Content-Length"); - if (s) { - char *end; - - response->content_length = apr_strtoi64(s, &end, 10); - if (s == end) { - ap_log_perror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, - pool, APLOGNO(02956) - "h2_response: content-length" - " value not parsed: %s", s); - response->content_length = -1; - } - } + check_clen(response, NULL, pool); return response; } @@ -138,13 +147,16 @@ h2_response *h2_response_rcreate(int stream_id, request_rec *r, response->headers = header; response->sos_filter = get_sos_filter(r->notes); + check_clen(response, r, pool); + if (response->http_status == HTTP_FORBIDDEN) { const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden"); if (cause) { /* This request triggered a TLS renegotiation that is now allowed * in HTTP/2. Tell the client that it should use HTTP/1.1 for this. */ - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, response->http_status, r, + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, response->http_status, r, + APLOGNO(03061) "h2_response(%ld-%d): renegotiate forbidden, cause: %s", (long)r->connection->id, stream_id, cause); response->rst_error = H2_ERR_HTTP_1_1_REQUIRED; diff --git a/mod_http2/h2_response.h b/mod_http2/h2_response.h index 59140ee3..ca57c532 100644 --- a/mod_http2/h2_response.h +++ b/mod_http2/h2_response.h @@ -16,18 +16,7 @@ #ifndef __mod_h2__h2_response__ #define __mod_h2__h2_response__ -struct h2_request; -struct h2_push; - -typedef struct h2_response { - int stream_id; - int rst_error; - int http_status; - apr_off_t content_length; - apr_table_t *headers; - apr_table_t *trailers; - const char *sos_filter; -} h2_response; +#include "h2.h" /** * Create the response from the status and parsed header lines. diff --git a/mod_http2/h2_session.c b/mod_http2/h2_session.c index 7d9128f8..928bb4a6 100644 --- a/mod_http2/h2_session.c +++ b/mod_http2/h2_session.c @@ -14,6 +14,7 @@ */ #include +#include #include #include #include @@ -38,7 +39,6 @@ #include "h2_request.h" #include "h2_response.h" #include "h2_stream.h" -#include "h2_stream_set.h" #include "h2_from_h1.h" #include "h2_task.h" #include "h2_session.h" @@ -47,8 +47,6 @@ #include "h2_workers.h" -static int frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen); - static int h2_session_status_from_apr_status(apr_status_t rv) { if (rv == APR_SUCCESS) { @@ -80,6 +78,49 @@ static int is_accepting_streams(h2_session *session); static void dispatch_event(h2_session *session, h2_session_event_t ev, int err, const char *msg); +typedef struct stream_sel_ctx { + h2_session *session; + h2_stream *candidate; +} stream_sel_ctx; + +static int find_cleanup_stream(void *udata, void *sdata) +{ + stream_sel_ctx *ctx = udata; + h2_stream *stream = sdata; + if (H2_STREAM_CLIENT_INITIATED(stream->id)) { + if (!ctx->session->local.accepting + && stream->id > ctx->session->local.accepted_max) { + ctx->candidate = stream; + return 0; + } + } + else { + if (!ctx->session->remote.accepting + && stream->id > ctx->session->remote.accepted_max) { + ctx->candidate = stream; + return 0; + } + } + return 1; +} + +static void cleanup_streams(h2_session *session) +{ + stream_sel_ctx ctx; + ctx.session = session; + ctx.candidate = NULL; + while (1) { + h2_ihash_iter(session->streams, find_cleanup_stream, &ctx); + if (ctx.candidate) { + h2_session_stream_destroy(session, ctx.candidate); + ctx.candidate = NULL; + } + else { + break; + } + } +} + h2_stream *h2_session_open_stream(h2_session *session, int stream_id) { h2_stream * stream; @@ -91,22 +132,29 @@ h2_stream *h2_session_open_stream(h2_session *session, int stream_id) } else { apr_pool_create(&stream_pool, session->pool); + apr_pool_tag(stream_pool, "h2_stream"); } stream = h2_stream_open(stream_id, stream_pool, session); - h2_stream_set_add(session->streams, stream); - if (H2_STREAM_CLIENT_INITIATED(stream_id) - && stream_id > session->max_stream_received) { - ++session->requests_received; - session->max_stream_received = stream->id; + h2_ihash_add(session->streams, stream); + if (H2_STREAM_CLIENT_INITIATED(stream_id)) { + if (stream_id > session->remote.emitted_max) { + ++session->remote.emitted_count; + session->remote.emitted_max = stream->id; + session->local.accepted_max = stream->id; + } + } + else { + if (stream_id > session->local.emitted_max) { + ++session->local.emitted_count; + session->remote.emitted_max = stream->id; + } } return stream; } -#ifdef H2_NG2_STREAM_API - /** * Determine the importance of streams when scheduling tasks. * - if both stream depend on the same one, compare weights @@ -160,20 +208,6 @@ static int stream_pri_cmp(int sid1, int sid2, void *ctx) return spri_cmp(sid1, s1, sid2, s2, session); } -#else /* ifdef H2_NG2_STREAM_API */ - -/* In absence of nghttp2_stream API, which gives information about - * priorities since nghttp2 1.3.x, we just sort the streams by - * their identifier, aka. order of arrival. - */ -static int stream_pri_cmp(int sid1, int sid2, void *ctx) -{ - (void)ctx; - return sid1 - sid2; -} - -#endif /* (ifdef else) H2_NG2_STREAM_API */ - static apr_status_t stream_schedule(h2_session *session, h2_stream *stream, int eos) { @@ -201,7 +235,7 @@ static ssize_t send_cb(nghttp2_session *ngh2, if (APR_STATUS_IS_EAGAIN(status)) { return NGHTTP2_ERR_WOULDBLOCK; } - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03062) "h2_session: send error"); return h2_session_status_from_apr_status(status); } @@ -216,8 +250,8 @@ static int on_invalid_frame_recv_cb(nghttp2_session *ngh2, if (APLOGcdebug(session->c)) { char buffer[256]; - frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03063) "h2_session(%ld): recv unknown FRAME[%s], frames=%ld/%ld (r/s)", session->id, buffer, (long)session->frames_received, (long)session->frames_sent); @@ -242,7 +276,7 @@ static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags, stream = h2_session_get_stream(session, stream_id); if (!stream) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03064) "h2_stream(%ld-%d): on_data_chunk for unknown stream", session->id, (int)stream_id); rv = nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE, stream_id, @@ -252,8 +286,11 @@ static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags, } return 0; } - - status = h2_stream_write_data(stream, (const char *)data, len); + + /* FIXME: enabling setting EOS this way seems to break input handling + * in mod_proxy_http2. why? */ + status = h2_stream_write_data(stream, (const char *)data, len, + 0 /*flags & NGHTTP2_FLAG_END_STREAM*/); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c, "h2_stream(%ld-%d): data_chunk_recv, written %ld bytes", session->id, stream_id, (long)len); @@ -276,12 +313,14 @@ static apr_status_t stream_release(h2_session *session, ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, "h2_stream(%ld-%d): handled, closing", session->id, (int)stream->id); - if (stream->id > session->max_stream_handled) { - session->max_stream_handled = stream->id; + if (H2_STREAM_CLIENT_INITIATED(stream->id)) { + if (stream->id > session->local.completed_max) { + session->local.completed_max = stream->id; + } } } else { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03065) "h2_stream(%ld-%d): closing with err=%d %s", session->id, (int)stream->id, (int)error_code, h2_h2_err_description(error_code)); @@ -323,7 +362,7 @@ static int on_begin_headers_cb(nghttp2_session *ngh2, else { s = h2_session_open_stream((h2_session *)userp, frame->hd.stream_id); } - return s? 0 : NGHTTP2_ERR_CALLBACK_FAILURE; + return s? 0 : NGHTTP2_ERR_START_STREAM_NOT_ALLOWED; } static int on_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame, @@ -377,8 +416,8 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, if (APLOGcdebug(session->c)) { char buffer[256]; - frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03066) "h2_session(%ld): recv FRAME[%s], frames=%ld/%ld (r/s)", session->id, buffer, (long)session->frames_received, (long)session->frames_sent); @@ -447,12 +486,12 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, frame->window_update.window_size_increment); break; case NGHTTP2_RST_STREAM: - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067) "h2_session(%ld-%d): RST_STREAM by client, errror=%d", session->id, (int)frame->hd.stream_id, (int)frame->rst_stream.error_code); stream = h2_session_get_stream(session, frame->hd.stream_id); - if (stream && stream->initiated_on) { + if (stream && stream->request && stream->request->initiated_on) { ++session->pushes_reset; } else { @@ -460,14 +499,16 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, } break; case NGHTTP2_GOAWAY: + session->remote.accepted_max = frame->goaway.last_stream_id; + session->remote.error = frame->goaway.error_code; dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY, 0, NULL); break; default: if (APLOGctrace2(session->c)) { char buffer[256]; - frame_print(frame, buffer, - sizeof(buffer)/sizeof(buffer[0])); + h2_util_frame_print(frame, buffer, + sizeof(buffer)/sizeof(buffer[0])); ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, "h2_session: on_frame_rcv %s", buffer); } @@ -514,6 +555,7 @@ static int on_send_data_cb(nghttp2_session *ngh2, unsigned char padlen; int eos; h2_stream *stream; + apr_bucket *b; (void)ngh2; (void)source; @@ -558,16 +600,10 @@ static int on_send_data_cb(nghttp2_session *ngh2, } } else { - apr_bucket *b; - char *header = apr_pcalloc(stream->pool, 10); - memcpy(header, (const char *)framehd, 9); - if (padlen) { - header[9] = (char)padlen; + status = h2_conn_io_write(&session->io, (const char *)framehd, 9); + if (padlen && status == APR_SUCCESS) { + status = h2_conn_io_write(&session->io, (const char *)&padlen, 1); } - b = apr_bucket_pool_create(header, padlen? 10 : 9, - stream->pool, session->c->bucket_alloc); - status = h2_conn_io_writeb(&session->io, b); - if (status == APR_SUCCESS) { apr_off_t len = length; status = h2_stream_read_to(stream, session->io.output, &len, &eos); @@ -586,7 +622,7 @@ static int on_send_data_cb(nghttp2_session *ngh2, if (status == APR_SUCCESS) { stream->data_frames_sent++; - h2_conn_io_consider_flush(&session->io); + h2_conn_io_consider_pass(&session->io); return 0; } else { @@ -607,8 +643,8 @@ static int on_frame_send_cb(nghttp2_session *ngh2, if (APLOGcdebug(session->c)) { char buffer[256]; - frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0])); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03068) "h2_session(%ld): sent FRAME[%s], frames=%ld/%ld (r/s)", session->id, buffer, (long)session->frames_received, (long)session->frames_sent); @@ -674,40 +710,60 @@ static void h2_session_destroy(h2_session *session) if (APLOGctrace1(session->c)) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, "h2_session(%ld): destroy, %d streams open", - session->id, (int)h2_stream_set_size(session->streams)); + session->id, (int)h2_ihash_count(session->streams)); } if (session->mplx) { h2_mplx_set_consumed_cb(session->mplx, NULL, NULL); h2_mplx_release_and_join(session->mplx, session->iowait); session->mplx = NULL; } - if (session->streams) { - h2_stream_set_destroy(session->streams); - session->streams = NULL; - } if (session->pool) { apr_pool_destroy(session->pool); } } -static apr_status_t h2_session_shutdown(h2_session *session, int reason, const char *msg) +static apr_status_t h2_session_shutdown(h2_session *session, int error, + const char *msg, int force_close) { apr_status_t status = APR_SUCCESS; - const char *err = msg; AP_DEBUG_ASSERT(session); - if (!err && reason) { - err = nghttp2_strerror(reason); + if (!msg && error) { + msg = nghttp2_strerror(error); + } + + if (error || force_close) { + /* not a graceful shutdown, we want to leave... + * Do not start further streams that are waiting to be scheduled. + * Find out the max stream id that we habe been processed or + * are still actively working on. + * Remove all streams greater than this number without submitting + * a RST_STREAM frame, since that should be clear from the GOAWAY + * we send. */ + session->local.accepted_max = h2_mplx_shutdown(session->mplx); + session->local.error = error; + } + else { + /* graceful shutdown. we will continue processing all streams + * we have, but no longer accept new ones. Report the max stream + * we have received and discard all new ones. */ } nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, - h2_mplx_get_max_stream_started(session->mplx), - reason, (uint8_t*)err, err? strlen(err):0); + session->local.accepted_max, + error, (uint8_t*)msg, msg? strlen(msg):0); status = nghttp2_session_send(session->ngh2); - h2_conn_io_flush(&session->io); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + if (status == APR_SUCCESS) { + status = h2_conn_io_flush(&session->io); + } + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03069) "session(%ld): sent GOAWAY, err=%d, msg=%s", - session->id, reason, err? err : ""); - dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, reason, err); + session->id, error, msg? msg : ""); + dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, error, msg); + + if (force_close) { + h2_mplx_abort(session->mplx); + } + return status; } @@ -734,7 +790,7 @@ static apr_status_t session_pool_cleanup(void *data) * connection when sending the next request, this has the effect * that at least this one request will fail. */ - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c, APLOGNO(03199) "session(%ld): connection disappeared without proper " "goodbye, clients will be confused, should not happen", session->id); @@ -798,6 +854,7 @@ static h2_session *h2_session_create_int(conn_rec *c, if (status != APR_SUCCESS) { return NULL; } + apr_pool_tag(pool, "h2_session"); session = apr_pcalloc(pool, sizeof(h2_session)); if (session) { @@ -808,31 +865,36 @@ static h2_session *h2_session_create_int(conn_rec *c, session->c = c; session->r = r; session->s = h2_ctx_server_get(ctx); + session->pool = pool; session->config = h2_config_sget(session->s); + session->workers = workers; session->state = H2_SESSION_ST_INIT; + session->local.accepting = 1; + session->remote.accepting = 1; - session->pool = pool; apr_pool_pre_cleanup_register(pool, session, session_pool_cleanup); - session->max_stream_count = h2_config_geti(session->config, H2_CONF_MAX_STREAMS); - session->max_stream_mem = h2_config_geti(session->config, H2_CONF_STREAM_MAX_MEM); - + session->max_stream_count = h2_config_geti(session->config, + H2_CONF_MAX_STREAMS); + session->max_stream_mem = h2_config_geti(session->config, + H2_CONF_STREAM_MAX_MEM); + status = apr_thread_cond_create(&session->iowait, session->pool); if (status != APR_SUCCESS) { return NULL; } - session->streams = h2_stream_set_create(session->pool, session->max_stream_count); - - session->workers = workers; + session->streams = h2_ihash_create(session->pool, + offsetof(h2_stream, id)); session->mplx = h2_mplx_create(c, session->pool, session->config, session->s->timeout, workers); h2_mplx_set_consumed_cb(session->mplx, update_window, session); /* Install the connection input filter that feeds the session */ - session->cin = h2_filter_cin_create(session->pool, h2_session_receive, session); + session->cin = h2_filter_cin_create(session->pool, + h2_session_receive, session); ap_add_input_filter("H2_IN", session->cin, r, c); h2_conn_io_init(&session->io, c, session->config, session->pool); @@ -854,8 +916,8 @@ static h2_session *h2_session_create_int(conn_rec *c, h2_session_destroy(session); return NULL; } - nghttp2_option_set_peer_max_concurrent_streams(options, - (uint32_t)session->max_stream_count); + nghttp2_option_set_peer_max_concurrent_streams( + options, (uint32_t)session->max_stream_count); /* We need to handle window updates ourself, otherwise we * get flooded by nghttp2. */ nghttp2_option_set_no_auto_window_update(options, 1); @@ -890,11 +952,15 @@ static h2_session *h2_session_create_int(conn_rec *c, session->push_diary = h2_push_diary_create(session->pool, n); if (APLOGcdebug(c)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, - "session(%ld) created, max_streams=%d, stream_mem=%d, " + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03200) + "h2_session(%ld) created, max_streams=%d, " + "stream_mem=%d, workers_limit=%d, workers_max=%d, " "push_diary(type=%d,N=%d)", session->id, (int)session->max_stream_count, - (int)session->max_stream_mem, session->push_diary->dtype, + (int)session->max_stream_mem, + session->mplx->workers_limit, + session->mplx->workers_max, + session->push_diary->dtype, (int)session->push_diary->N); } } @@ -949,7 +1015,7 @@ static apr_status_t h2_session_start(h2_session *session, int *rv) if (APLOGrdebug(session->r)) { char buffer[128]; h2_util_hex_dump(buffer, 128, (char*)cs, dlen); - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, session->r, + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, session->r, APLOGNO(03070) "upgrading h2c session with HTTP2-Settings: %s -> %s (%d)", s, buffer, (int)dlen); } @@ -994,7 +1060,7 @@ static apr_status_t h2_session_start(h2_session *session, int *rv) ++slen; } - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03201) "h2_session(%ld): start, INITIAL_WINDOW_SIZE=%ld, " "MAX_CONCURRENT_STREAMS=%d", session->id, (long)win_size, (int)session->max_stream_count); @@ -1025,6 +1091,7 @@ static apr_status_t h2_session_start(h2_session *session, int *rv) nghttp2_strerror(*rv)); } } + return status; } @@ -1033,15 +1100,21 @@ typedef struct { int resume_count; } resume_ctx; -static int resume_on_data(void *ctx, h2_stream *stream) +static int resume_on_data(void *ctx, void *val) { + h2_stream *stream = val; resume_ctx *rctx = (resume_ctx*)ctx; h2_session *session = rctx->session; AP_DEBUG_ASSERT(session); AP_DEBUG_ASSERT(stream); if (h2_stream_is_suspended(stream)) { - if (h2_mplx_out_has_data_for(stream->session->mplx, stream->id)) { + apr_status_t status; + apr_off_t len = -1; + int eos; + + status = h2_stream_out_prepare(stream, &len, &eos); + if (status == APR_SUCCESS) { int rv; h2_stream_set_suspended(stream, 0); ++rctx->resume_count; @@ -1050,8 +1123,9 @@ static int resume_on_data(void *ctx, h2_stream *stream) ap_log_cerror(APLOG_MARK, nghttp2_is_fatal(rv)? APLOG_ERR : APLOG_DEBUG, 0, session->c, APLOGNO(02936) - "h2_stream(%ld-%d): resuming %s", - session->id, stream->id, rv? nghttp2_strerror(rv) : ""); + "h2_stream(%ld-%d): resuming %s, len=%ld, eos=%d", + session->id, stream->id, + rv? nghttp2_strerror(rv) : "", (long)len, eos); } } return 1; @@ -1060,7 +1134,7 @@ static int resume_on_data(void *ctx, h2_stream *stream) static int h2_session_resume_streams_with_data(h2_session *session) { AP_DEBUG_ASSERT(session); - if (!h2_stream_set_is_empty(session->streams) + if (!h2_ihash_is_empty(session->streams) && session->mplx && !session->mplx->aborted) { resume_ctx ctx; @@ -1069,7 +1143,7 @@ static int h2_session_resume_streams_with_data(h2_session *session) /* Resume all streams where we have data in the out queue and * which had been suspended before. */ - h2_stream_set_iter(session->streams, resume_on_data, &ctx); + h2_ihash_iter(session->streams, resume_on_data, &ctx); return ctx.resume_count; } return 0; @@ -1077,10 +1151,7 @@ static int h2_session_resume_streams_with_data(h2_session *session) h2_stream *h2_session_get_stream(h2_session *session, int stream_id) { - if (!session->last_stream || stream_id != session->last_stream->id) { - session->last_stream = h2_stream_set_get(session->streams, stream_id); - } - return session->last_stream; + return h2_ihash_get(session->streams, stream_id); } static ssize_t stream_data_cb(nghttp2_session *ng2s, @@ -1119,7 +1190,7 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s, AP_DEBUG_ASSERT(!h2_stream_is_suspended(stream)); - status = h2_stream_prep_read(stream, &nread, &eos); + status = h2_stream_out_prepare(stream, &nread, &eos); if (nread) { *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY; } @@ -1139,16 +1210,11 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s, */ nread = 0; h2_stream_set_suspended(stream, 1); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03071) "h2_stream(%ld-%d): suspending", session->id, (int)stream_id); return NGHTTP2_ERR_DEFERRED; - case APR_EOF: - nread = 0; - eos = 1; - break; - default: nread = 0; ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c, @@ -1164,7 +1230,7 @@ static ssize_t stream_data_cb(nghttp2_session *ng2s, int rv; nh = h2_util_ngheader_make(stream->pool, trailers); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03072) "h2_stream(%ld-%d): submit %d trailers", session->id, (int)stream_id,(int) nh->nvlen); rv = nghttp2_submit_trailer(ng2s, stream->id, nh->nv, nh->nvlen); @@ -1204,18 +1270,21 @@ static apr_status_t submit_response(h2_session *session, h2_stream *stream) rv = NGHTTP2_PROTOCOL_ERROR; } else if (response && response->headers) { - nghttp2_data_provider provider; + nghttp2_data_provider provider, *pprovider = NULL; h2_ngheader *ngh; const h2_priority *prio; - memset(&provider, 0, sizeof(provider)); - provider.source.fd = stream->id; - provider.read_callback = stream_data_cb; - - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03073) "h2_stream(%ld-%d): submit response %d", session->id, stream->id, response->http_status); + if (response->content_length != 0) { + memset(&provider, 0, sizeof(provider)); + provider.source.fd = stream->id; + provider.read_callback = stream_data_cb; + pprovider = &provider; + } + /* If this stream is not a pushed one itself, * and HTTP/2 server push is enabled here, * and the response is in the range 200-299 *), @@ -1231,7 +1300,7 @@ static apr_status_t submit_response(h2_session *session, h2_stream *stream) * as the client, having this resource in its cache, might * also have the pushed ones as well. */ - if (!stream->initiated_on + if (stream->request && !stream->request->initiated_on && H2_HTTP_2XX(response->http_status) && h2_session_push_enabled(session)) { @@ -1247,12 +1316,12 @@ static apr_status_t submit_response(h2_session *session, h2_stream *stream) ngh = h2_util_ngheader_make_res(stream->pool, response->http_status, response->headers); rv = nghttp2_submit_response(session->ngh2, response->stream_id, - ngh->nv, ngh->nvlen, &provider); + ngh->nv, ngh->nvlen, pprovider); } else { int err = H2_STREAM_RST(stream, H2_ERR_PROTOCOL_ERROR); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03074) "h2_stream(%ld-%d): RST_STREAM, err=%d", session->id, stream->id, err); @@ -1261,7 +1330,7 @@ static apr_status_t submit_response(h2_session *session, h2_stream *stream) } stream->submitted = 1; - if (stream->initiated_on) { + if (stream->request && stream->request->initiated_on) { ++session->pushes_submitted; } else { @@ -1291,14 +1360,14 @@ struct h2_stream *h2_session_push(h2_session *session, h2_stream *is, nid = nghttp2_submit_push_promise(session->ngh2, 0, is->id, ngh->nv, ngh->nvlen, NULL); if (nid <= 0) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03075) "h2_stream(%ld-%d): submitting push promise fail: %s", session->id, is->id, nghttp2_strerror(nid)); return NULL; } ++session->pushes_promised; - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03076) "h2_stream(%ld-%d): SERVER_PUSH %d for %s %s on %d", session->id, is->id, nid, push->req->method, push->req->path, is->id); @@ -1317,7 +1386,7 @@ struct h2_stream *h2_session_push(h2_session *session, h2_stream *is, ++session->unsent_promises; } else { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03077) "h2_stream(%ld-%d): failed to create stream obj %d", session->id, is->id, nid); } @@ -1396,7 +1465,7 @@ apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream, id_grandpa = nghttp2_stream_get_stream_id(s_grandpa); rv = nghttp2_session_change_stream_priority(session->ngh2, id_parent, &ps); if (rv < 0) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03202) "h2_stream(%ld-%d): PUSH BEFORE, weight=%d, " "depends=%d, returned=%d", session->id, id_parent, ps.weight, ps.stream_id, rv); @@ -1418,7 +1487,7 @@ apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream, rv = nghttp2_session_change_stream_priority(session->ngh2, stream->id, &ps); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03203) "h2_stream(%ld-%d): PUSH %s, weight=%d, " "depends=%d, returned=%d", session->id, stream->id, ptype, @@ -1438,17 +1507,17 @@ apr_status_t h2_session_stream_destroy(h2_session *session, h2_stream *stream) { apr_pool_t *pool = h2_stream_detach_pool(stream); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + "h2_stream(%ld-%d): cleanup by EOS bucket destroy", + session->id, stream->id); /* this may be called while the session has already freed * some internal structures or even when the mplx is locked. */ if (session->mplx) { h2_mplx_stream_done(session->mplx, stream->id, stream->rst_error); } - if (session->last_stream == stream) { - session->last_stream = NULL; - } if (session->streams) { - h2_stream_set_remove(session->streams, stream->id); + h2_ihash_remove(session->streams, stream->id); } h2_stream_destroy(stream); @@ -1462,90 +1531,13 @@ apr_status_t h2_session_stream_destroy(h2_session *session, h2_stream *stream) return APR_SUCCESS; } -static int frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen) -{ - char scratch[128]; - size_t s_len = sizeof(scratch)/sizeof(scratch[0]); - - switch (frame->hd.type) { - case NGHTTP2_DATA: { - return apr_snprintf(buffer, maxlen, - "DATA[length=%d, flags=%d, stream=%d, padlen=%d]", - (int)frame->hd.length, frame->hd.flags, - frame->hd.stream_id, (int)frame->data.padlen); - } - case NGHTTP2_HEADERS: { - return apr_snprintf(buffer, maxlen, - "HEADERS[length=%d, hend=%d, stream=%d, eos=%d]", - (int)frame->hd.length, - !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS), - frame->hd.stream_id, - !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM)); - } - case NGHTTP2_PRIORITY: { - return apr_snprintf(buffer, maxlen, - "PRIORITY[length=%d, flags=%d, stream=%d]", - (int)frame->hd.length, - frame->hd.flags, frame->hd.stream_id); - } - case NGHTTP2_RST_STREAM: { - return apr_snprintf(buffer, maxlen, - "RST_STREAM[length=%d, flags=%d, stream=%d]", - (int)frame->hd.length, - frame->hd.flags, frame->hd.stream_id); - } - case NGHTTP2_SETTINGS: { - if (frame->hd.flags & NGHTTP2_FLAG_ACK) { - return apr_snprintf(buffer, maxlen, - "SETTINGS[ack=1, stream=%d]", - frame->hd.stream_id); - } - return apr_snprintf(buffer, maxlen, - "SETTINGS[length=%d, stream=%d]", - (int)frame->hd.length, frame->hd.stream_id); - } - case NGHTTP2_PUSH_PROMISE: { - return apr_snprintf(buffer, maxlen, - "PUSH_PROMISE[length=%d, hend=%d, stream=%d]", - (int)frame->hd.length, - !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS), - frame->hd.stream_id); - } - case NGHTTP2_PING: { - return apr_snprintf(buffer, maxlen, - "PING[length=%d, ack=%d, stream=%d]", - (int)frame->hd.length, - frame->hd.flags&NGHTTP2_FLAG_ACK, - frame->hd.stream_id); - } - case NGHTTP2_GOAWAY: { - size_t len = (frame->goaway.opaque_data_len < s_len)? - frame->goaway.opaque_data_len : s_len-1; - memcpy(scratch, frame->goaway.opaque_data, len); - scratch[len+1] = '\0'; - return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s']", - frame->goaway.error_code, scratch); - } - case NGHTTP2_WINDOW_UPDATE: { - return apr_snprintf(buffer, maxlen, - "WINDOW_UPDATE[stream=%d, incr=%d]", - frame->hd.stream_id, - frame->window_update.window_size_increment); - } - default: - return apr_snprintf(buffer, maxlen, - "type=%d[length=%d, flags=%d, stream=%d]", - frame->hd.type, (int)frame->hd.length, - frame->hd.flags, frame->hd.stream_id); - } -} - int h2_session_push_enabled(h2_session *session) { - /* iff we can and they can */ - return (h2_config_geti(session->config, H2_CONF_PUSH) + /* iff we can and they can and want */ + return (session->remote.accepting /* remote GOAWAY received */ + && h2_config_geti(session->config, H2_CONF_PUSH) && nghttp2_session_get_remote_settings(session->ngh2, - NGHTTP2_SETTINGS_ENABLE_PUSH)); + NGHTTP2_SETTINGS_ENABLE_PUSH)); } static apr_status_t h2_session_send(h2_session *session) @@ -1603,13 +1595,13 @@ static apr_status_t h2_session_receive(void *ctx, const char *data, return APR_SUCCESS; } -static apr_status_t h2_session_read(h2_session *session, int block, int loops) +static apr_status_t h2_session_read(h2_session *session, int block) { apr_status_t status, rstatus = APR_EAGAIN; conn_rec *c = session->c; - int i; + apr_off_t read_start = session->io.bytes_read; - for (i = 0; i < loops; ++i) { + while (1) { /* H2_IN filter handles all incoming data against the session. * We just pull at the filter chain to make it happen */ status = ap_get_brigade(c->input_filters, @@ -1634,7 +1626,7 @@ static apr_status_t h2_session_read(h2_session *session, int block, int loops) case APR_TIMEUP: return status; default: - if (!i) { + if (session->io.bytes_read == read_start) { /* first attempt failed */ if (APR_STATUS_IS_ETIMEDOUT(status) || APR_STATUS_IS_ECONNABORTED(status) @@ -1647,7 +1639,7 @@ static apr_status_t h2_session_read(h2_session *session, int block, int loops) } else { /* uncommon status, log on INFO so that we see this */ - ap_log_cerror( APLOG_MARK, APLOG_INFO, status, c, + ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(02950) "h2_session(%ld): error reading, terminating", session->id); @@ -1661,16 +1653,56 @@ static apr_status_t h2_session_read(h2_session *session, int block, int loops) if (!is_accepting_streams(session)) { break; } + if ((session->io.bytes_read - read_start) > (64*1024)) { + /* read enough in one go, give write a chance */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c, + "h2_session(%ld): read 64k, returning", session->id); + break; + } } return rstatus; } +static int unsubmitted_iter(void *ctx, void *val) +{ + h2_stream *stream = val; + if (h2_stream_needs_submit(stream)) { + *((int *)ctx) = 1; + return 0; + } + return 1; +} + +static int has_unsubmitted_streams(h2_session *session) +{ + int has_unsubmitted = 0; + h2_ihash_iter(session->streams, unsubmitted_iter, &has_unsubmitted); + return has_unsubmitted; +} + +static int suspended_iter(void *ctx, void *val) +{ + h2_stream *stream = val; + if (h2_stream_is_suspended(stream)) { + *((int *)ctx) = 1; + return 0; + } + return 1; +} + +static int has_suspended_streams(h2_session *session) +{ + int has_suspended = 0; + h2_ihash_iter(session->streams, suspended_iter, &has_suspended); + return has_suspended; +} + static apr_status_t h2_session_submit(h2_session *session) { apr_status_t status = APR_EAGAIN; h2_stream *stream; - if (h2_stream_set_has_unsubmitted(session->streams)) { + if (has_unsubmitted_streams(session)) { /* If we have responses ready, submit them now. */ while ((stream = h2_mplx_next_submit(session->mplx, session->streams))) { status = submit_response(session, stream); @@ -1727,7 +1759,7 @@ static int is_accepting_streams(h2_session *session) static void transit(h2_session *session, const char *action, h2_session_state nstate) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03078) "h2_session(%ld): transit [%s] -- %s --> [%s]", session->id, state_name(session->state), action, state_name(nstate)); session->state = nstate; @@ -1748,6 +1780,8 @@ static void h2_session_ev_init(h2_session *session, int arg, const char *msg) static void h2_session_ev_local_goaway(h2_session *session, int arg, const char *msg) { + session->local.accepting = 0; + cleanup_streams(session); switch (session->state) { case H2_SESSION_ST_LOCAL_SHUTDOWN: /* already did that? */ @@ -1765,6 +1799,8 @@ static void h2_session_ev_local_goaway(h2_session *session, int arg, const char static void h2_session_ev_remote_goaway(h2_session *session, int arg, const char *msg) { + session->remote.accepting = 0; + cleanup_streams(session); switch (session->state) { case H2_SESSION_ST_REMOTE_SHUTDOWN: /* already received that? */ @@ -1791,9 +1827,9 @@ static void h2_session_ev_conn_error(h2_session *session, int arg, const char *m break; default: - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, "h2_session(%ld): conn error -> shutdown", session->id); - h2_session_shutdown(session, arg, msg); + h2_session_shutdown(session, arg, msg, 0); break; } } @@ -1808,9 +1844,9 @@ static void h2_session_ev_proto_error(h2_session *session, int arg, const char * break; default: - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, "h2_session(%ld): proto error -> shutdown", session->id); - h2_session_shutdown(session, arg, msg); + h2_session_shutdown(session, arg, msg, 0); break; } } @@ -1822,7 +1858,7 @@ static void h2_session_ev_conn_timeout(h2_session *session, int arg, const char transit(session, "conn timeout", H2_SESSION_ST_DONE); break; default: - h2_session_shutdown(session, arg, msg); + h2_session_shutdown(session, arg, msg, 1); transit(session, "conn timeout", H2_SESSION_ST_DONE); break; } @@ -1839,27 +1875,36 @@ static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg) * CPU cycles. Ideally, we'd like to do a blocking read, but that * is not possible if we have scheduled tasks and wait * for them to produce something. */ - if (h2_stream_set_is_empty(session->streams)) { + if (h2_conn_io_flush(&session->io) != APR_SUCCESS) { + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); + } + if (h2_ihash_is_empty(session->streams)) { if (!is_accepting_streams(session)) { /* We are no longer accepting new streams and have * finished processing existing ones. Time to leave. */ - h2_session_shutdown(session, arg, msg); + h2_session_shutdown(session, arg, msg, 0); transit(session, "no io", H2_SESSION_ST_DONE); } else { + apr_time_t now = apr_time_now(); /* When we have no streams, no task event are possible, * switch to blocking reads */ transit(session, "no io", H2_SESSION_ST_IDLE); - session->idle_until = apr_time_now() + session->s->keep_alive_timeout; + session->idle_until = (session->remote.emitted_count? + session->s->keep_alive_timeout : + session->s->timeout) + now; + session->keep_sync_until = now + apr_time_from_sec(1); } } - else if (!h2_stream_set_has_unsubmitted(session->streams) - && !h2_stream_set_has_suspended(session->streams)) { + else if (!has_unsubmitted_streams(session) + && !has_suspended_streams(session)) { /* none of our streams is waiting for a response or * new output data from task processing, - * switch to blocking reads. */ + * switch to blocking reads. We are probably waiting on + * window updates. */ transit(session, "no io", H2_SESSION_ST_IDLE); session->idle_until = apr_time_now() + session->s->timeout; + session->keep_sync_until = session->idle_until; } else { /* Unable to do blocking reads, as we wait on events from @@ -1890,6 +1935,7 @@ static void h2_session_ev_data_read(h2_session *session, int arg, const char *ms { switch (session->state) { case H2_SESSION_ST_IDLE: + case H2_SESSION_ST_WAIT: transit(session, "data read", H2_SESSION_ST_BUSY); break; /* fall through */ @@ -1919,7 +1965,20 @@ static void h2_session_ev_mpm_stopping(h2_session *session, int arg, const char /* nop */ break; default: - h2_session_shutdown(session, arg, msg); + h2_session_shutdown(session, arg, msg, 0); + break; + } +} + +static void h2_session_ev_pre_close(h2_session *session, int arg, const char *msg) +{ + switch (session->state) { + case H2_SESSION_ST_DONE: + case H2_SESSION_ST_LOCAL_SHUTDOWN: + /* nop */ + break; + default: + h2_session_shutdown(session, arg, msg, 1); break; } } @@ -1961,6 +2020,9 @@ static void dispatch_event(h2_session *session, h2_session_event_t ev, case H2_SESSION_EV_MPM_STOPPING: h2_session_ev_mpm_stopping(session, arg, msg); break; + case H2_SESSION_EV_PRE_CLOSE: + h2_session_ev_pre_close(session, arg, msg); + break; default: ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, "h2_session(%ld): unknown event %d", @@ -1975,11 +2037,29 @@ static void dispatch_event(h2_session *session, h2_session_event_t ev, static const int MAX_WAIT_MICROS = 200 * 1000; +static void update_child_status(h2_session *session, int status, const char *msg) +{ + /* Assume that we also change code/msg when something really happened and + * avoid updating the scoreboard in between */ + if (session->last_status_code != status + || session->last_status_msg != msg) { + apr_snprintf(session->status, sizeof(session->status), + "%s, streams: %d/%d/%d/%d/%d (open/recv/resp/push/rst)", + msg? msg : "-", + (int)h2_ihash_count(session->streams), + (int)session->remote.emitted_count, + (int)session->responses_submitted, + (int)session->pushes_submitted, + (int)session->pushes_reset + session->streams_reset); + ap_update_child_status_descr(session->c->sbh, status, session->status); + } +} + apr_status_t h2_session_process(h2_session *session, int async) { apr_status_t status = APR_SUCCESS; conn_rec *c = session->c; - int rv, have_written, have_read, mpm_state; + int rv, have_written, have_read, mpm_state, no_streams; ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c, "h2_session(%ld): process start, async=%d", session->id, async); @@ -1998,15 +2078,19 @@ apr_status_t h2_session_process(h2_session *session, int async) } } + session->status[0] = '\0'; + switch (session->state) { case H2_SESSION_ST_INIT: + ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c); if (!h2_is_acceptable_connection(c, 1)) { - h2_session_shutdown(session, NGHTTP2_INADEQUATE_SECURITY, NULL); + update_child_status(session, SERVER_BUSY_READ, "inadequate security"); + h2_session_shutdown(session, NGHTTP2_INADEQUATE_SECURITY, NULL, 1); } else { - ap_update_child_status(c->sbh, SERVER_BUSY_READ, NULL); + update_child_status(session, SERVER_BUSY_READ, "init"); status = h2_session_start(session, &rv); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03079) "h2_session(%ld): started on %s:%d", session->id, session->s->server_hostname, c->local_addr->port); @@ -2018,33 +2102,92 @@ apr_status_t h2_session_process(h2_session *session, int async) break; case H2_SESSION_ST_IDLE: - h2_filter_cin_timeout_set(session->cin, h2_stream_set_is_empty(session->streams)? - session->s->keep_alive_timeout : session->s->timeout); - status = h2_session_read(session, 1, 10); - if (status == APR_SUCCESS) { - have_read = 1; - dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL); - } - else if (status == APR_EAGAIN) { - /* nothing to read */ - } - else if (APR_STATUS_IS_TIMEUP(status)) { - dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout"); + no_streams = h2_ihash_is_empty(session->streams); + update_child_status(session, (no_streams? SERVER_BUSY_KEEPALIVE + : SERVER_BUSY_READ), "idle"); + /* make certain, the client receives everything before we idle */ + if (!session->keep_sync_until + && async && no_streams && !session->r && session->remote.emitted_count) { + ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c, + "h2_session(%ld): async idle, nonblock read", session->id); + /* We do not return to the async mpm immediately, since under + * load, mpms show the tendency to throw keep_alive connections + * away very rapidly. + * So, if we are still processing streams, we wait for the + * normal timeout first and, on timeout, close. + * If we have no streams, we still wait a short amount of + * time here for the next frame to arrive, before handing + * it to keep_alive processing of the mpm. + */ + status = h2_session_read(session, 0); + + if (status == APR_SUCCESS) { + have_read = 1; + dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL); + } + else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) { + if (apr_time_now() > session->idle_until) { + dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL); + } + else { + status = APR_EAGAIN; + goto out; + } + } + else { + ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c, + "h2_session(%ld): idle, no data, error", + session->id); + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "timeout"); + } } else { - dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); + /* We wait in smaller increments, using a 1 second timeout. + * That gives us the chance to check for MPMQ_STOPPING often. + */ + status = h2_mplx_idle(session->mplx); + if (status != APR_SUCCESS) { + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, + H2_ERR_ENHANCE_YOUR_CALM, "less is more"); + } + h2_filter_cin_timeout_set(session->cin, apr_time_from_sec(1)); + status = h2_session_read(session, 1); + if (status == APR_SUCCESS) { + have_read = 1; + dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL); + } + else if (status == APR_EAGAIN) { + /* nothing to read */ + } + else if (APR_STATUS_IS_TIMEUP(status)) { + apr_time_t now = apr_time_now(); + if (now > session->keep_sync_until) { + /* if we are on an async mpm, now is the time that + * we may dare to pass control to it. */ + session->keep_sync_until = 0; + } + if (now > session->idle_until) { + dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout"); + } + /* continue reading handling */ + } + else { + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "error"); + } } + break; case H2_SESSION_ST_BUSY: case H2_SESSION_ST_LOCAL_SHUTDOWN: case H2_SESSION_ST_REMOTE_SHUTDOWN: if (nghttp2_session_want_read(session->ngh2)) { - ap_update_child_status(c->sbh, SERVER_BUSY_READ, NULL); + ap_update_child_status(session->c->sbh, SERVER_BUSY_READ, NULL); h2_filter_cin_timeout_set(session->cin, session->s->timeout); - status = h2_session_read(session, 0, 10); + status = h2_session_read(session, 0); if (status == APR_SUCCESS) { have_read = 1; + update_child_status(session, SERVER_BUSY_READ, "busy"); dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL); } else if (status == APR_EAGAIN) { @@ -2059,7 +2202,7 @@ apr_status_t h2_session_process(h2_session *session, int async) } } - if (!h2_stream_set_is_empty(session->streams)) { + if (!h2_ihash_is_empty(session->streams)) { /* resume any streams for which data is available again */ h2_session_resume_streams_with_data(session); /* Submit any responses/push_promises that are ready */ @@ -2082,6 +2225,7 @@ apr_status_t h2_session_process(h2_session *session, int async) } if (nghttp2_session_want_write(session->ngh2)) { + ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL); status = h2_session_send(session); if (status == APR_SUCCESS) { have_written = 1; @@ -2107,12 +2251,17 @@ apr_status_t h2_session_process(h2_session *session, int async) if (session->wait_us <= 0) { session->wait_us = 10; session->start_wait = apr_time_now(); + if (h2_conn_io_flush(&session->io) != APR_SUCCESS) { + dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL); + } + update_child_status(session, SERVER_BUSY_READ, "wait"); } else if ((apr_time_now() - session->start_wait) >= session->s->timeout) { /* waited long enough */ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, APR_TIMEUP, c, "h2_session: wait for data"); dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout"); + break; } else { /* repeating, increase timer for graceful backoff */ @@ -2132,38 +2281,42 @@ apr_status_t h2_session_process(h2_session *session, int async) } else if (status == APR_TIMEUP) { /* go back to checking all inputs again */ - transit(session, "wait cycle", H2_SESSION_ST_BUSY); + transit(session, "wait cycle", session->local.accepting? + H2_SESSION_ST_BUSY : H2_SESSION_ST_LOCAL_SHUTDOWN); } else { - h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR, "cond wait error"); + ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c, + "h2_session(%ld): waiting on conditional", + session->id); + h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR, + "cond wait error", 0); } break; case H2_SESSION_ST_DONE: + update_child_status(session, SERVER_CLOSING, "done"); status = APR_EOF; goto out; default: ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c, + APLOGNO(03080) "h2_session(%ld): unknown state %d", session->id, session->state); dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, 0, NULL); break; } - if (have_written) { - h2_conn_io_flush(&session->io); - } - else if (!nghttp2_session_want_read(session->ngh2) + if (!nghttp2_session_want_read(session->ngh2) && !nghttp2_session_want_write(session->ngh2)) { dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL); } + if (session->reprioritize) { + h2_mplx_reprioritize(session->mplx, stream_pri_cmp, session); + session->reprioritize = 0; + } } out: - if (have_written) { - h2_conn_io_flush(&session->io); - } - ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c, "h2_session(%ld): [%s] process returns", session->id, state_name(session->state)); @@ -2179,10 +2332,17 @@ apr_status_t h2_session_process(h2_session *session, int async) if (session->state == H2_SESSION_ST_DONE) { if (!session->eoc_written) { session->eoc_written = 1; - h2_conn_io_write_eoc(&session->io, - h2_bucket_eoc_create(session->c->bucket_alloc, session)); + h2_conn_io_write_eoc(&session->io, session); } } return status; } + +apr_status_t h2_session_pre_close(h2_session *session, int async) +{ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + "h2_session(%ld): pre_close", session->id); + dispatch_event(session, H2_SESSION_EV_PRE_CLOSE, 0, "timeout"); + return APR_SUCCESS; +} diff --git a/mod_http2/h2_session.h b/mod_http2/h2_session.h index 7b834bf3..ea5f82a3 100644 --- a/mod_http2/h2_session.h +++ b/mod_http2/h2_session.h @@ -37,11 +37,14 @@ * */ +#include "h2.h" + struct apr_thread_mutext_t; struct apr_thread_cond_t; struct h2_ctx; struct h2_config; struct h2_filter_cin; +struct h2_ihash_t; struct h2_mplx; struct h2_priority; struct h2_push; @@ -54,16 +57,6 @@ struct h2_workers; struct nghttp2_session; -typedef enum { - H2_SESSION_ST_INIT, /* send initial SETTINGS, etc. */ - H2_SESSION_ST_DONE, /* finished, connection close */ - H2_SESSION_ST_IDLE, /* nothing to write, expecting data inc */ - H2_SESSION_ST_BUSY, /* read/write without stop */ - H2_SESSION_ST_WAIT, /* waiting for tasks reporting back */ - H2_SESSION_ST_LOCAL_SHUTDOWN, /* we announced GOAWAY */ - H2_SESSION_ST_REMOTE_SHUTDOWN, /* client announced GOAWAY */ -} h2_session_state; - typedef enum { H2_SESSION_EV_INIT, /* session was initialized */ H2_SESSION_EV_LOCAL_GOAWAY, /* we send a GOAWAY */ @@ -76,6 +69,7 @@ typedef enum { H2_SESSION_EV_DATA_READ, /* connection data has been read */ H2_SESSION_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */ H2_SESSION_EV_MPM_STOPPING, /* the process is stopping */ + H2_SESSION_EV_PRE_CLOSE, /* connection will close after this */ } h2_session_event_t; typedef struct h2_session { @@ -86,16 +80,29 @@ typedef struct h2_session { * of 'h2c', NULL otherwise */ server_rec *s; /* server/vhost we're starting on */ const struct h2_config *config; /* Relevant config for this session */ - + apr_pool_t *pool; /* pool to use in session */ + struct h2_mplx *mplx; /* multiplexer for stream data */ + struct h2_workers *workers; /* for executing stream tasks */ + struct h2_filter_cin *cin; /* connection input filter context */ + h2_conn_io io; /* io on httpd conn filters */ + struct h2_ihash_t *streams; /* streams handled by this session */ + struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */ + h2_session_state state; /* state session is in */ + + h2_session_props local; /* properties of local session */ + h2_session_props remote; /* properites of remote session */ + unsigned int reprioritize : 1; /* scheduled streams priority changed */ unsigned int eoc_written : 1; /* h2 eoc bucket written */ + unsigned int flush : 1; /* flushing output necessary */ apr_interval_time_t wait_us; /* timout during BUSY_WAIT state, micro secs */ + struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */ + int unsent_submits; /* number of submitted, but not yet written responses. */ int unsent_promises; /* number of submitted, but not yet written push promised */ - int requests_received; /* number of http/2 requests received */ int responses_submitted; /* number of http/2 responses submitted */ int streams_reset; /* number of http/2 streams reset by client */ int pushes_promised; /* number of http/2 push promises submitted */ @@ -105,33 +112,21 @@ typedef struct h2_session { apr_size_t frames_received; /* number of http/2 frames received */ apr_size_t frames_sent; /* number of http/2 frames sent */ - int max_stream_received; /* highest stream id created */ - int max_stream_handled; /* highest stream id completed */ - apr_size_t max_stream_count; /* max number of open streams */ apr_size_t max_stream_mem; /* max buffer memory for a single stream */ - + apr_time_t start_wait; /* Time we started waiting for sth. to happen */ apr_time_t idle_until; /* Time we shut down due to sheer boredom */ + apr_time_t keep_sync_until; /* Time we sync wait until passing to async mpm */ - apr_pool_t *pool; /* pool to use in session handling */ apr_bucket_brigade *bbtmp; /* brigade for keeping temporary data */ struct apr_thread_cond_t *iowait; /* our cond when trywaiting for data */ - struct h2_filter_cin *cin; /* connection input filter context */ - h2_conn_io io; /* io on httpd conn filters */ - - struct h2_mplx *mplx; /* multiplexer for stream data */ - - struct h2_stream *last_stream; /* last stream worked with */ - struct h2_stream_set *streams; /* streams handled by this session */ - apr_pool_t *spare; /* spare stream pool */ - struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */ - struct h2_workers *workers; /* for executing stream tasks */ - - struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */ + char status[64]; /* status message for scoreboard */ + int last_status_code; /* the one already reported */ + const char *last_status_msg; /* the one already reported */ } h2_session; @@ -165,6 +160,11 @@ h2_session *h2_session_rcreate(request_rec *r, struct h2_ctx *ctx, */ apr_status_t h2_session_process(h2_session *session, int async); +/** + * Last chance to do anything before the connection is closed. + */ +apr_status_t h2_session_pre_close(h2_session *session, int async); + /** * Cleanup the session and all objects it still contains. This will not * destroy h2_task instances that have not finished yet. diff --git a/mod_http2/h2_stream.c b/mod_http2/h2_stream.c index 0a5af7eb..0a1dadf9 100644 --- a/mod_http2/h2_stream.c +++ b/mod_http2/h2_stream.c @@ -41,13 +41,6 @@ #include "h2_util.h" -#define H2_STREAM_IN(lvl,s,msg) \ - do { \ - if (APLOG_C_IS_LEVEL((s)->session->c,lvl)) \ - h2_util_bb_log((s)->session->c,(s)->id,lvl,msg,(s)->bbin); \ - } while(0) - - static int state_transition[][7] = { /* ID OP RL RR CI CO CL */ /*ID*/{ 1, 0, 0, 0, 0, 0, 0 }, @@ -67,7 +60,7 @@ static int set_state(h2_stream *stream, h2_stream_state_t state) return 1; } - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c, + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c, APLOGNO(03081) "h2_stream(%ld-%d): invalid state transition from %d to %d", stream->session->id, stream->id, stream->state, state); return 0; @@ -120,7 +113,7 @@ static int close_output(h2_stream *stream) return 1; } -static int input_open(h2_stream *stream) +static int input_open(const h2_stream *stream) { switch (stream->state) { case H2_STREAM_ST_OPEN: @@ -144,23 +137,18 @@ static int output_open(h2_stream *stream) static h2_sos *h2_sos_mplx_create(h2_stream *stream, h2_response *response); -h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session) +h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session) { h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream)); stream->id = id; stream->state = H2_STREAM_ST_IDLE; stream->pool = pool; stream->session = session; - return stream; -} - -h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session) -{ - h2_stream *stream = h2_stream_create(id, pool, session); set_state(stream, H2_STREAM_ST_OPEN); - stream->request = h2_request_create(id, pool, session->config); + stream->request = h2_request_create(id, pool, + h2_config_geti(session->config, H2_CONF_SER_HEADERS)); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03082) "h2_stream(%ld-%d): opened", session->id, stream->id); return stream; } @@ -168,11 +156,6 @@ h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session) apr_status_t h2_stream_destroy(h2_stream *stream) { AP_DEBUG_ASSERT(stream); - if (stream->request) { - h2_request_destroy(stream->request); - stream->request = NULL; - } - if (stream->pool) { apr_pool_destroy(stream->pool); } @@ -242,6 +225,9 @@ apr_status_t h2_stream_set_request(h2_stream *stream, request_rec *r) } set_state(stream, H2_STREAM_ST_OPEN); status = h2_request_rwrite(stream->request, r); + stream->request->serialize = h2_config_geti(h2_config_rget(r), + H2_CONF_SER_HEADERS); + return status; } @@ -249,7 +235,7 @@ void h2_stream_set_h2_request(h2_stream *stream, int initiated_on, const h2_request *req) { h2_request_copy(stream->pool, stream->request, req); - stream->initiated_on = initiated_on; + stream->request->initiated_on = initiated_on; stream->request->eoh = 0; } @@ -297,8 +283,6 @@ apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled, if (status == APR_SUCCESS) { if (!eos) { stream->request->body = 1; - stream->bbin = apr_brigade_create(stream->pool, - stream->session->c->bucket_alloc); } stream->input_remaining = stream->request->content_length; @@ -324,38 +308,11 @@ apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled, return status; } -int h2_stream_is_scheduled(h2_stream *stream) +int h2_stream_is_scheduled(const h2_stream *stream) { return stream->scheduled; } -static apr_status_t h2_stream_input_flush(h2_stream *stream) -{ - apr_status_t status = APR_SUCCESS; - if (stream->bbin && !APR_BRIGADE_EMPTY(stream->bbin)) { - - status = h2_mplx_in_write(stream->session->mplx, stream->id, stream->bbin); - if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->mplx->c, - "h2_stream(%ld-%d): flushing input data", - stream->session->id, stream->id); - } - } - return status; -} - -static apr_status_t input_flush(apr_bucket_brigade *bb, void *ctx) -{ - (void)bb; - return h2_stream_input_flush(ctx); -} - -static apr_status_t input_add_data(h2_stream *stream, - const char *data, size_t len) -{ - return apr_brigade_write(stream->bbin, input_flush, stream, data, len); -} - apr_status_t h2_stream_close_input(h2_stream *stream) { apr_status_t status = APR_SUCCESS; @@ -369,28 +326,23 @@ apr_status_t h2_stream_close_input(h2_stream *stream) return APR_ECONNRESET; } - H2_STREAM_IN(APLOG_TRACE2, stream, "close_pre"); - if (close_input(stream) && stream->bbin) { - status = h2_stream_input_flush(stream); - if (status == APR_SUCCESS) { - status = h2_mplx_in_close(stream->session->mplx, stream->id); - } + if (close_input(stream)) { + status = h2_mplx_in_close(stream->session->mplx, stream->id); } - H2_STREAM_IN(APLOG_TRACE2, stream, "close_post"); return status; } apr_status_t h2_stream_write_data(h2_stream *stream, - const char *data, size_t len) + const char *data, size_t len, int eos) { apr_status_t status = APR_SUCCESS; AP_DEBUG_ASSERT(stream); - if (input_closed(stream) || !stream->request->eoh || !stream->bbin) { + if (input_closed(stream) || !stream->request->eoh) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c, - "h2_stream(%ld-%d): writing denied, closed=%d, eoh=%d, bbin=%d", + "h2_stream(%ld-%d): writing denied, closed=%d, eoh=%d", stream->session->id, stream->id, input_closed(stream), - stream->request->eoh, !!stream->bbin); + stream->request->eoh); return APR_EINVAL; } @@ -398,7 +350,6 @@ apr_status_t h2_stream_write_data(h2_stream *stream, "h2_stream(%ld-%d): add %ld input bytes", stream->session->id, stream->id, (long)len); - H2_STREAM_IN(APLOG_TRACE2, stream, "write_data_pre"); if (!stream->request->chunked) { stream->input_remaining -= len; if (stream->input_remaining < 0) { @@ -414,11 +365,10 @@ apr_status_t h2_stream_write_data(h2_stream *stream, } } - status = input_add_data(stream, data, len); - if (status == APR_SUCCESS) { - status = h2_stream_input_flush(stream); + status = h2_mplx_in_write(stream->session->mplx, stream->id, data, len, eos); + if (eos) { + close_input(stream); } - H2_STREAM_IN(APLOG_TRACE2, stream, "write_data_post"); return status; } @@ -431,23 +381,23 @@ void h2_stream_set_suspended(h2_stream *stream, int suspended) stream->session->id, stream->id, stream->suspended); } -int h2_stream_is_suspended(h2_stream *stream) +int h2_stream_is_suspended(const h2_stream *stream) { AP_DEBUG_ASSERT(stream); return stream->suspended; } -apr_status_t h2_stream_prep_read(h2_stream *stream, - apr_off_t *plen, int *peos) +apr_status_t h2_stream_out_prepare(h2_stream *stream, + apr_off_t *plen, int *peos) { if (stream->rst_error) { + *plen = 0; + *peos = 1; return APR_ECONNRESET; } - if (!stream->sos) { - return APR_EGENERAL; - } - return stream->sos->prep_read(stream->sos, plen, peos); + AP_DEBUG_ASSERT(stream->sos); + return stream->sos->prepare(stream->sos, plen, peos); } apr_status_t h2_stream_readx(h2_stream *stream, @@ -475,12 +425,12 @@ apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb, return stream->sos->read_to(stream->sos, bb, plen, peos); } -int h2_stream_input_is_open(h2_stream *stream) +int h2_stream_input_is_open(const h2_stream *stream) { return input_open(stream); } -int h2_stream_needs_submit(h2_stream *stream) +int h2_stream_needs_submit(const h2_stream *stream) { switch (stream->state) { case H2_STREAM_ST_OPEN: @@ -526,7 +476,7 @@ const h2_priority *h2_stream_get_priority(h2_stream *stream) { h2_response *response = h2_stream_get_response(stream); - if (stream->initiated_on && response) { + if (response && stream->request && stream->request->initiated_on) { const char *ctype = apr_table_get(response->headers, "content-type"); if (ctype) { /* FIXME: Not good enough, config needs to come from request->server */ @@ -543,7 +493,9 @@ const h2_priority *h2_stream_get_priority(h2_stream *stream) typedef struct h2_sos_mplx { h2_mplx *m; apr_bucket_brigade *bb; + apr_bucket_brigade *tmp; apr_table_t *trailers; + apr_off_t buffer_size; } h2_sos_mplx; #define H2_SOS_MPLX_OUT(lvl,msos,msg) \ @@ -553,129 +505,82 @@ typedef struct h2_sos_mplx { } while(0) -static apr_status_t h2_sos_mplx_read_to(h2_sos *sos, apr_bucket_brigade *bb, - apr_off_t *plen, int *peos) +static apr_status_t mplx_transfer(h2_sos_mplx *msos, int stream_id, + apr_pool_t *pool) { - h2_sos_mplx *msos = sos->ctx; - apr_status_t status = APR_SUCCESS; + apr_status_t status; apr_table_t *trailers = NULL; - - H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx read_to_pre"); - - if (APR_BRIGADE_EMPTY(msos->bb)) { - apr_off_t tlen = *plen; - int eos; - status = h2_mplx_out_read_to(msos->m, sos->stream->id, - msos->bb, &tlen, &eos, &trailers); - } - if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(msos->bb)) { - status = h2_transfer_brigade(bb, msos->bb, sos->stream->pool, - plen, peos); + if (!msos->tmp) { + msos->tmp = apr_brigade_create(msos->bb->p, msos->bb->bucket_alloc); } - else { - *plen = 0; - *peos = 0; + status = h2_mplx_out_get_brigade(msos->m, stream_id, msos->tmp, + msos->buffer_size-1, &trailers); + if (!APR_BRIGADE_EMPTY(msos->tmp)) { + h2_transfer_brigade(msos->bb, msos->tmp, pool); } - if (trailers) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c, - "h2_stream(%ld-%d): read_to, saving trailers", - msos->m->id, sos->stream->id); msos->trailers = trailers; } - + return status; +} + +static apr_status_t h2_sos_mplx_read_to(h2_sos *sos, apr_bucket_brigade *bb, + apr_off_t *plen, int *peos) +{ + h2_sos_mplx *msos = sos->ctx; + apr_status_t status; + + status = h2_append_brigade(bb, msos->bb, plen, peos); if (status == APR_SUCCESS && !*peos && !*plen) { status = APR_EAGAIN; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, msos->m->c, + "h2_stream(%ld-%d): read_to, len=%ld eos=%d", + msos->m->id, sos->stream->id, (long)*plen, *peos); } - H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx read_to_post"); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c, "h2_stream(%ld-%d): read_to, len=%ld eos=%d", msos->m->id, sos->stream->id, (long)*plen, *peos); return status; } -static apr_status_t h2_sos_mplx_prep_read(h2_sos *sos, apr_off_t *plen, int *peos) +static apr_status_t h2_sos_mplx_readx(h2_sos *sos, h2_io_data_cb *cb, void *ctx, + apr_off_t *plen, int *peos) { h2_sos_mplx *msos = sos->ctx; apr_status_t status = APR_SUCCESS; - const char *src; - apr_table_t *trailers = NULL; - int test_read = (*plen == 0); - H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx prep_read_pre"); - if (!APR_BRIGADE_EMPTY(msos->bb)) { - src = "stream"; - status = h2_util_bb_avail(msos->bb, plen, peos); - if (!test_read && status == APR_SUCCESS && !*peos && !*plen) { - apr_brigade_cleanup(msos->bb); - return h2_sos_mplx_prep_read(sos, plen, peos); - } - } - else { - src = "mplx"; - status = h2_mplx_out_readx(msos->m, sos->stream->id, - NULL, NULL, plen, peos, &trailers); - if (trailers) { - msos->trailers = trailers; - } - } - - if (!test_read && status == APR_SUCCESS && !*peos && !*plen) { + status = h2_util_bb_readx(msos->bb, cb, ctx, plen, peos); + if (status == APR_SUCCESS && !*peos && !*plen) { status = APR_EAGAIN; } - - H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx prep_read_post"); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c, - "h2_stream(%ld-%d): prep_read %s, len=%ld eos=%d, trailers=%s", - msos->m->id, sos->stream->id, src, (long)*plen, *peos, - msos->trailers? "yes" : "no"); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, msos->m->c, + "h2_stream(%ld-%d): readx, len=%ld eos=%d", + msos->m->id, sos->stream->id, (long)*plen, *peos); return status; } -static apr_status_t h2_sos_mplx_readx(h2_sos *sos, h2_io_data_cb *cb, void *ctx, - apr_off_t *plen, int *peos) +static apr_status_t h2_sos_mplx_prepare(h2_sos *sos, apr_off_t *plen, int *peos) { h2_sos_mplx *msos = sos->ctx; apr_status_t status = APR_SUCCESS; - apr_table_t *trailers = NULL; - const char *src; - H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx readx_pre"); - *peos = 0; - if (!APR_BRIGADE_EMPTY(msos->bb)) { - apr_off_t origlen = *plen; - - src = "stream"; - status = h2_util_bb_readx(msos->bb, cb, ctx, plen, peos); - if (status == APR_SUCCESS && !*peos && !*plen) { - apr_brigade_cleanup(msos->bb); - *plen = origlen; - return h2_sos_mplx_readx(sos, cb, ctx, plen, peos); - } - } - else { - src = "mplx"; - status = h2_mplx_out_readx(msos->m, sos->stream->id, - cb, ctx, plen, peos, &trailers); - } + H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx prepare_pre"); - if (trailers) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c, - "h2_stream(%ld-%d): readx, saving trailers", - msos->m->id, sos->stream->id); - msos->trailers = trailers; + if (APR_BRIGADE_EMPTY(msos->bb)) { + status = mplx_transfer(msos, sos->stream->id, sos->stream->pool); } + h2_util_bb_avail(msos->bb, plen, peos); - if (status == APR_SUCCESS && !*peos && !*plen) { + H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx prepare_post"); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c, + "h2_stream(%ld-%d): prepare, len=%ld eos=%d, trailers=%s", + msos->m->id, sos->stream->id, (long)*plen, *peos, + msos->trailers? "yes" : "no"); + if (!*peos && !*plen) { status = APR_EAGAIN; } - H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_stream readx_post"); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, msos->m->c, - "h2_stream(%ld-%d): readx %s, len=%ld eos=%d", - msos->m->id, sos->stream->id, src, (long)*plen, *peos); - return status; } @@ -692,13 +597,8 @@ static apr_status_t h2_sos_mplx_buffer(h2_sos *sos, apr_bucket_brigade *bb) apr_status_t status = APR_SUCCESS; if (bb && !APR_BRIGADE_EMPTY(bb)) { - apr_size_t move_all = INT_MAX; - /* we can move file handles from h2_mplx into this h2_stream as many - * as we want, since the lifetimes are the same and we are not freeing - * the ones in h2_mplx->io before this stream is done. */ H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx set_response_pre"); - status = h2_util_move(msos->bb, bb, 16 * 1024, &move_all, - "h2_stream_set_response"); + status = mplx_transfer(msos, sos->stream->id, sos->stream->pool); H2_SOS_MPLX_OUT(APLOG_TRACE2, msos, "h2_sos_mplx set_response_post"); } return status; @@ -712,14 +612,15 @@ static h2_sos *h2_sos_mplx_create(h2_stream *stream, h2_response *response) msos = apr_pcalloc(stream->pool, sizeof(*msos)); msos->m = stream->session->mplx; msos->bb = apr_brigade_create(stream->pool, msos->m->c->bucket_alloc); - + msos->buffer_size = 32 * 1024; + sos = apr_pcalloc(stream->pool, sizeof(*sos)); sos->stream = stream; sos->response = response; sos->ctx = msos; sos->buffer = h2_sos_mplx_buffer; - sos->prep_read = h2_sos_mplx_prep_read; + sos->prepare = h2_sos_mplx_prepare; sos->readx = h2_sos_mplx_readx; sos->read_to = h2_sos_mplx_read_to; sos->get_trailers = h2_sos_mplx_get_trailers; diff --git a/mod_http2/h2_stream.h b/mod_http2/h2_stream.h index fa219df2..f0cd2167 100644 --- a/mod_http2/h2_stream.h +++ b/mod_http2/h2_stream.h @@ -16,6 +16,8 @@ #ifndef __mod_h2__h2_stream__ #define __mod_h2__h2_stream__ +#include "h2.h" + /** * A HTTP/2 stream, e.g. a client request+response in HTTP/1.1 terms. * @@ -30,16 +32,6 @@ */ #include "h2_io.h" -typedef enum { - H2_STREAM_ST_IDLE, - H2_STREAM_ST_OPEN, - H2_STREAM_ST_RESV_LOCAL, - H2_STREAM_ST_RESV_REMOTE, - H2_STREAM_ST_CLOSED_INPUT, - H2_STREAM_ST_CLOSED_OUTPUT, - H2_STREAM_ST_CLOSED, -} h2_stream_state_t; - struct h2_mplx; struct h2_priority; struct h2_request; @@ -51,7 +43,6 @@ typedef struct h2_stream h2_stream; struct h2_stream { int id; /* http2 stream id */ - int initiated_on; /* http2 stream id this was initiated on or 0 */ h2_stream_state_t state; /* http/2 state of this stream */ struct h2_session *session; /* the session this stream belongs to */ @@ -65,7 +56,6 @@ struct h2_stream { unsigned int submitted : 1; /* response HEADER has been sent */ apr_off_t input_remaining; /* remaining bytes on input as advertised via content-length */ - apr_bucket_brigade *bbin; /* input DATA */ struct h2_sos *sos; /* stream output source, e.g. to read output from */ apr_off_t data_frames_sent; /* # of DATA frames sent out for this stream */ @@ -74,15 +64,6 @@ struct h2_stream { #define H2_STREAM_RST(s, def) (s->rst_error? s->rst_error : (def)) -/** - * Create a stream in IDLE state. - * @param id the stream identifier - * @param pool the memory pool to use for this stream - * @param session the session this stream belongs to - * @return the newly created IDLE stream - */ -h2_stream *h2_stream_create(int id, apr_pool_t *pool, struct h2_session *session); - /** * Create a stream in OPEN state. * @param id the stream identifier @@ -163,7 +144,7 @@ apr_status_t h2_stream_close_input(h2_stream *stream); * @param len the number of bytes to write */ apr_status_t h2_stream_write_data(h2_stream *stream, - const char *data, size_t len); + const char *data, size_t len, int eos); /** * Reset the stream. Stream write/reads will return errors afterwards. @@ -191,7 +172,7 @@ apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled, * @param stream the stream to check on * @return != 0 iff stream has been scheduled */ -int h2_stream_is_scheduled(h2_stream *stream); +int h2_stream_is_scheduled(const h2_stream *stream); struct h2_response *h2_stream_get_response(h2_stream *stream); @@ -221,8 +202,8 @@ apr_status_t h2_stream_set_response(h2_stream *stream, * APR_EAGAIN if not data is available and end of stream has not been * reached yet. */ -apr_status_t h2_stream_prep_read(h2_stream *stream, - apr_off_t *plen, int *peos); +apr_status_t h2_stream_out_prepare(h2_stream *stream, + apr_off_t *plen, int *peos); /** * Read data from the stream output. @@ -278,21 +259,21 @@ void h2_stream_set_suspended(h2_stream *stream, int suspended); * @param stream the stream to check * @return != 0 iff stream is suspended. */ -int h2_stream_is_suspended(h2_stream *stream); +int h2_stream_is_suspended(const h2_stream *stream); /** * Check if the stream has open input. * @param stream the stream to check * @return != 0 iff stream has open input. */ -int h2_stream_input_is_open(h2_stream *stream); +int h2_stream_input_is_open(const h2_stream *stream); /** * Check if the stream has not submitted a response or RST yet. * @param stream the stream to check * @return != 0 iff stream has not submitted a response or RST. */ -int h2_stream_needs_submit(h2_stream *stream); +int h2_stream_needs_submit(const h2_stream *stream); /** * Submit any server push promises on this stream and schedule diff --git a/mod_http2/h2_switch.c b/mod_http2/h2_switch.c index c08dd9e5..d1d4a60f 100644 --- a/mod_http2/h2_switch.c +++ b/mod_http2/h2_switch.c @@ -58,13 +58,13 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r, if (strcmp(AP_PROTOCOL_HTTP1, ap_get_protocol(c))) { /* We do not know how to switch from anything else but http/1.1. */ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03083) "protocol switch: current proto != http/1.1, declined"); return DECLINED; } if (!h2_is_acceptable_connection(c, 0)) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03084) "protocol propose: connection requirements not met"); return DECLINED; } @@ -82,14 +82,14 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r, p = apr_table_get(r->headers_in, "HTTP2-Settings"); if (!p) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03085) "upgrade without HTTP2-Settings declined"); return DECLINED; } p = apr_table_get(r->headers_in, "Connection"); if (!ap_find_token(r->pool, p, "http2-settings")) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03086) "upgrade without HTTP2-Settings declined"); return DECLINED; } @@ -98,7 +98,7 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r, */ p = apr_table_get(r->headers_in, "Content-Length"); if (p && strcmp(p, "0")) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03087) "upgrade with content-length: %s, declined", p); return DECLINED; } @@ -158,7 +158,7 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s, h2_ctx_server_set(ctx, r->server); status = h2_conn_setup(ctx, r->connection, r); if (status != APR_SUCCESS) { - ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03088) "session setup"); return status; } diff --git a/mod_http2/h2_task.c b/mod_http2/h2_task.c index 9525cc15..dff1bcdd 100644 --- a/mod_http2/h2_task.c +++ b/mod_http2/h2_task.c @@ -35,6 +35,7 @@ #include "h2_private.h" #include "h2_conn.h" #include "h2_config.h" +#include "h2_ctx.h" #include "h2_from_h1.h" #include "h2_h2.h" #include "h2_mplx.h" @@ -54,7 +55,7 @@ static apr_status_t h2_filter_stream_input(ap_filter_t* filter, apr_read_type_e block, apr_off_t readbytes) { - h2_task *task = filter->ctx; + h2_task *task = h2_ctx_cget_task(filter->c); AP_DEBUG_ASSERT(task); if (!task->input) { return APR_ECONNABORTED; @@ -66,7 +67,7 @@ static apr_status_t h2_filter_stream_input(ap_filter_t* filter, static apr_status_t h2_filter_stream_output(ap_filter_t* filter, apr_bucket_brigade* brigade) { - h2_task *task = filter->ctx; + h2_task *task = h2_ctx_cget_task(filter->c); AP_DEBUG_ASSERT(task); if (!task->output) { return APR_ECONNABORTED; @@ -74,15 +75,15 @@ static apr_status_t h2_filter_stream_output(ap_filter_t* filter, return h2_task_output_write(task->output, filter, brigade); } -static apr_status_t h2_filter_read_response(ap_filter_t* f, +static apr_status_t h2_filter_read_response(ap_filter_t* filter, apr_bucket_brigade* bb) { - h2_task *task = f->ctx; + h2_task *task = h2_ctx_cget_task(filter->c); AP_DEBUG_ASSERT(task); if (!task->output || !task->output->from_h1) { return APR_ECONNABORTED; } - return h2_from_h1_read_response(task->output->from_h1, f, bb); + return h2_from_h1_read_response(task->output->from_h1, filter, bb); } /******************************************************************************* @@ -141,74 +142,114 @@ static int h2_task_pre_conn(conn_rec* c, void *arg) ctx = h2_ctx_get(c, 0); (void)arg; if (h2_ctx_is_task(ctx)) { - h2_task *task = h2_ctx_get_task(ctx); - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "h2_h2, pre_connection, found stream task"); /* Add our own, network level in- and output filters. */ - ap_add_input_filter("H2_TO_H1", task, NULL, c); - ap_add_output_filter("H1_TO_H2", task, NULL, c); + ap_add_input_filter("H2_TO_H1", NULL, NULL, c); + ap_add_output_filter("H1_TO_H2", NULL, NULL, c); } return OK; } h2_task *h2_task_create(long session_id, const h2_request *req, - apr_pool_t *pool, h2_mplx *mplx) + conn_rec *c, h2_mplx *mplx) { - h2_task *task = apr_pcalloc(pool, sizeof(h2_task)); + apr_pool_t *pool; + h2_task *task; + + apr_pool_create(&pool, c->pool); + task = apr_pcalloc(pool, sizeof(h2_task)); if (task == NULL) { - ap_log_perror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, pool, + ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, c, APLOGNO(02941) "h2_task(%ld-%d): create stream task", session_id, req->id); - h2_mplx_out_close(mplx, req->id, NULL); + h2_mplx_out_close(mplx, req->id); return NULL; } task->id = apr_psprintf(pool, "%ld-%d", session_id, req->id); task->stream_id = req->id; + task->c = c; task->mplx = mplx; + task->c->keepalives = mplx->c->keepalives; + task->pool = pool; task->request = req; task->input_eos = !req->body; - task->ser_headers = h2_config_geti(req->config, H2_CONF_SER_HEADERS); + task->ser_headers = req->serialize; + task->blocking = 1; + h2_ctx_create_for(c, task); return task; } -apr_status_t h2_task_do(h2_task *task, conn_rec *c, apr_thread_cond_t *cond, - apr_socket_t *socket) +void h2_task_destroy(h2_task *task) +{ + if (task->pool) { + apr_pool_destroy(task->pool); + } +} + +void h2_task_set_io_blocking(h2_task *task, int blocking) +{ + task->blocking = blocking; +} + +apr_status_t h2_task_do(h2_task *task, apr_thread_cond_t *cond) { + apr_status_t status; + AP_DEBUG_ASSERT(task); task->io = cond; - task->input = h2_task_input_create(task, c); - task->output = h2_task_output_create(task, c); - - ap_process_connection(c, socket); + task->input = h2_task_input_create(task, task->c); + task->output = h2_task_output_create(task, task->c); - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, - "h2_task(%s): processing done", task->id); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, + "h2_task(%s): process connection", task->id); + ap_run_process_connection(task->c); - h2_task_input_destroy(task->input); - h2_task_output_close(task->output); - h2_task_output_destroy(task->output); - task->io = NULL; + if (task->frozen) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, + "h2_task(%s): process_conn returned frozen task", + task->id); + /* cleanup delayed */ + status = APR_EAGAIN; + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, + "h2_task(%s): processing done", task->id); + status = APR_SUCCESS; + } - return APR_SUCCESS; + return status; } -static apr_status_t h2_task_process_request(const h2_request *req, conn_rec *c) +static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c) { - request_rec *r; + const h2_request *req = task->request; conn_state_t *cs = c->cs; + request_rec *r; + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): create request_rec", task->id); r = h2_request_create_rec(req, c); if (r && (r->status == HTTP_OK)) { ap_update_child_status(c->sbh, SERVER_BUSY_READ, r); - if (cs) + if (cs) { cs->state = CONN_STATE_HANDLER; + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): start process_request", task->id); ap_process_request(r); + if (task->frozen) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): process_request frozen", task->id); + } + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): process_request done", task->id); + /* After the call to ap_process_request, the * request pool will have been deleted. We set * r=NULL here to ensure that any dereference @@ -216,12 +257,19 @@ static apr_status_t h2_task_process_request(const h2_request *req, conn_rec *c) * will result in a segfault immediately instead * of nondeterministic failures later. */ - if (cs) + if (cs) cs->state = CONN_STATE_WRITE_COMPLETION; r = NULL; } - ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, NULL); - c->sbh = NULL; + else if (!r) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): create request_rec failed, r=NULL", task->id); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_task(%s): create request_rec failed, r->status=%d", + task->id, r->status); + } return APR_SUCCESS; } @@ -237,13 +285,43 @@ static int h2_task_process_conn(conn_rec* c) ctx = h2_ctx_get(c, 0); if (h2_ctx_is_task(ctx)) { if (!ctx->task->ser_headers) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, processing request directly"); - h2_task_process_request(ctx->task->request, c); + h2_task_process_request(ctx->task, c); return DONE; } - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_task(%s), serialized handling", ctx->task->id); } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "slave_conn(%ld): has no task", c->id); + } return DECLINED; } + +apr_status_t h2_task_freeze(h2_task *task) +{ + if (!task->frozen) { + task->frozen = 1; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, + "h2_task(%s), frozen", task->id); + } + return APR_SUCCESS; +} + +apr_status_t h2_task_thaw(h2_task *task) +{ + if (task->frozen) { + task->frozen = 0; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, + "h2_task(%s), thawed", task->id); + } + task->detached = 1; + return APR_SUCCESS; +} + +int h2_task_is_detached(h2_task *task) +{ + return task->detached; +} diff --git a/mod_http2/h2_task.h b/mod_http2/h2_task.h index ce17f851..15a1d3cb 100644 --- a/mod_http2/h2_task.h +++ b/mod_http2/h2_task.h @@ -41,6 +41,7 @@ struct apr_thread_cond_t; struct h2_conn; struct h2_mplx; struct h2_task; +struct h2_req_engine; struct h2_request; struct h2_resp_head; struct h2_worker; @@ -50,23 +51,33 @@ typedef struct h2_task h2_task; struct h2_task { const char *id; int stream_id; - struct h2_mplx *mplx; + conn_rec *c; + struct h2_mplx *mplx; + apr_pool_t *pool; const struct h2_request *request; unsigned int filters_set : 1; unsigned int input_eos : 1; unsigned int ser_headers : 1; + unsigned int frozen : 1; + unsigned int blocking : 1; + unsigned int detached : 1; struct h2_task_input *input; struct h2_task_output *output; struct apr_thread_cond_t *io; /* used to wait for events on */ + + struct h2_req_engine *engine; /* engine hosted by this task */ + struct h2_req_engine *assigned; /* engine that task has been assigned to */ + request_rec *r; /* request being processed in this task */ }; h2_task *h2_task_create(long session_id, const struct h2_request *req, - apr_pool_t *pool, struct h2_mplx *mplx); + conn_rec *c, struct h2_mplx *mplx); + +void h2_task_destroy(h2_task *task); -apr_status_t h2_task_do(h2_task *task, conn_rec *c, - struct apr_thread_cond_t *cond, apr_socket_t *socket); +apr_status_t h2_task_do(h2_task *task, struct apr_thread_cond_t *cond); void h2_task_register_hooks(void); /* @@ -77,4 +88,10 @@ apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s); extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in; extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out; +apr_status_t h2_task_freeze(h2_task *task); +apr_status_t h2_task_thaw(h2_task *task); +int h2_task_is_detached(h2_task *task); + +void h2_task_set_io_blocking(h2_task *task, int blocking); + #endif /* defined(__mod_h2__h2_task__) */ diff --git a/mod_http2/h2_task_input.c b/mod_http2/h2_task_input.c index 16ef3c8d..3993b6b4 100644 --- a/mod_http2/h2_task_input.c +++ b/mod_http2/h2_task_input.c @@ -45,17 +45,17 @@ static int ser_header(void *ctx, const char *name, const char *value) h2_task_input *h2_task_input_create(h2_task *task, conn_rec *c) { - h2_task_input *input = apr_pcalloc(c->pool, sizeof(h2_task_input)); + h2_task_input *input = apr_pcalloc(task->pool, sizeof(h2_task_input)); if (input) { - input->c = c; input->task = task; input->bb = NULL; + input->block = APR_BLOCK_READ; if (task->ser_headers) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_task_input(%s): serialize request %s %s", task->id, task->request->method, task->request->path); - input->bb = apr_brigade_create(c->pool, c->bucket_alloc); + input->bb = apr_brigade_create(task->pool, c->bucket_alloc); apr_brigade_printf(input->bb, NULL, NULL, "%s %s HTTP/1.1\r\n", task->request->method, task->request->path); apr_table_do(ser_header, input, task->request->headers, NULL); @@ -65,7 +65,7 @@ h2_task_input *h2_task_input_create(h2_task *task, conn_rec *c) } } else if (!input->task->input_eos) { - input->bb = apr_brigade_create(c->pool, c->bucket_alloc); + input->bb = apr_brigade_create(task->pool, c->bucket_alloc); } else { /* We do not serialize and have eos already, no need to @@ -75,9 +75,9 @@ h2_task_input *h2_task_input_create(h2_task *task, conn_rec *c) return input; } -void h2_task_input_destroy(h2_task_input *input) +void h2_task_input_block_set(h2_task_input *input, apr_read_type_e block) { - input->bb = NULL; + input->block = block; } apr_status_t h2_task_input_read(h2_task_input *input, @@ -120,7 +120,7 @@ apr_status_t h2_task_input_read(h2_task_input *input, return APR_EOF; } - while ((bblen == 0) || (mode == AP_MODE_READBYTES && bblen < readbytes)) { + while (bblen == 0) { /* Get more data for our stream from mplx. */ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, @@ -129,27 +129,31 @@ apr_status_t h2_task_input_read(h2_task_input *input, input->task->id, block, (long)readbytes, (long)bblen); - /* Although we sometimes get called with APR_NONBLOCK_READs, - we need to fill our buffer blocking. Otherwise we get EAGAIN, - return that to our caller and everyone throws up their hands, - never calling us again. */ - status = h2_mplx_in_read(input->task->mplx, APR_BLOCK_READ, + /* Override the block mode we get called with depending on the input's + * setting. + */ + status = h2_mplx_in_read(input->task->mplx, block, input->task->stream_id, input->bb, f->r? f->r->trailers_in : NULL, input->task->io); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, "h2_task_input(%s): mplx in read returned", input->task->id); - if (status != APR_SUCCESS) { + if (APR_STATUS_IS_EAGAIN(status) + && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) { + /* chunked input handling does not seem to like it if we + * return with APR_EAGAIN from a GETLINE read... + * upload 100k test on test-ser.example.org hangs */ + status = APR_SUCCESS; + } + else if (status != APR_SUCCESS) { return status; } + status = apr_brigade_length(input->bb, 1, &bblen); if (status != APR_SUCCESS) { return status; } - if ((bblen == 0) && (block == APR_NONBLOCK_READ)) { - return h2_util_has_eos(input->bb, -1)? APR_EOF : APR_EAGAIN; - } ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, "h2_task_input(%s): mplx in read, %ld bytes in brigade", diff --git a/mod_http2/h2_task_input.h b/mod_http2/h2_task_input.h index bfc01f58..c8913cac 100644 --- a/mod_http2/h2_task_input.h +++ b/mod_http2/h2_task_input.h @@ -26,16 +26,14 @@ struct h2_task; typedef struct h2_task_input h2_task_input; struct h2_task_input { - conn_rec *c; struct h2_task *task; apr_bucket_brigade *bb; + apr_read_type_e block; }; h2_task_input *h2_task_input_create(struct h2_task *task, conn_rec *c); -void h2_task_input_destroy(h2_task_input *input); - apr_status_t h2_task_input_read(h2_task_input *input, ap_filter_t* filter, apr_bucket_brigade* brigade, @@ -43,4 +41,6 @@ apr_status_t h2_task_input_read(h2_task_input *input, apr_read_type_e block, apr_off_t readbytes); +void h2_task_input_block_set(h2_task_input *input, apr_read_type_e block); + #endif /* defined(__mod_h2__h2_task_input__) */ diff --git a/mod_http2/h2_task_output.c b/mod_http2/h2_task_output.c index f459dc62..80938d1f 100644 --- a/mod_http2/h2_task_output.c +++ b/mod_http2/h2_task_output.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "h2_private.h" #include "h2_conn.h" @@ -36,97 +37,78 @@ h2_task_output *h2_task_output_create(h2_task *task, conn_rec *c) { - h2_task_output *output = apr_pcalloc(c->pool, sizeof(h2_task_output)); + h2_task_output *output = apr_pcalloc(task->pool, sizeof(h2_task_output)); if (output) { - output->c = c; output->task = task; - output->state = H2_TASK_OUT_INIT; - output->from_h1 = h2_from_h1_create(task->stream_id, c->pool); - if (!output->from_h1) { - return NULL; - } + output->from_h1 = h2_from_h1_create(task->stream_id, task->pool); } return output; } -void h2_task_output_destroy(h2_task_output *output) +static apr_status_t open_response(h2_task_output *output, ap_filter_t *f, + apr_bucket_brigade *bb, const char *caller) { - if (output->from_h1) { - h2_from_h1_destroy(output->from_h1); - output->from_h1 = NULL; + h2_response *response; + response = h2_from_h1_get_response(output->from_h1); + if (!response) { + if (f) { + /* This happens currently when ap_die(status, r) is invoked + * by a read request filter. */ + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, output->task->c, APLOGNO(03204) + "h2_task_output(%s): write without response by %s " + "for %s %s %s", + output->task->id, caller, + output->task->request->method, + output->task->request->authority, + output->task->request->path); + output->task->c->aborted = 1; + } + if (output->task->io) { + apr_thread_cond_broadcast(output->task->io); + } + return APR_ECONNABORTED; + } + + if (h2_task_logio_add_bytes_out) { + /* count headers as if we'd do a HTTP/1.1 serialization */ + output->written = h2_util_table_bytes(response->headers, 3)+1; + h2_task_logio_add_bytes_out(output->task->c, output->written); } + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, output->task->c, APLOGNO(03348) + "h2_task(%s): open response to %s %s %s", + output->task->id, output->task->request->method, + output->task->request->authority, + output->task->request->path); + return h2_mplx_out_open(output->task->mplx, output->task->stream_id, + response, f, bb, output->task->io); } -static apr_table_t *get_trailers(h2_task_output *output) +static apr_status_t write_brigade_raw(h2_task_output *output, + ap_filter_t* f, apr_bucket_brigade* bb) { - if (!output->trailers_passed) { - h2_response *response = h2_from_h1_get_response(output->from_h1); - if (response && response->trailers) { - output->trailers_passed = 1; - if (h2_task_logio_add_bytes_out) { - /* counter trailers as if we'd do a HTTP/1.1 serialization */ - h2_task_logio_add_bytes_out(output->c, - h2_util_table_bytes(response->trailers, 3)+1); - } - return response->trailers; - } + apr_off_t written, left; + apr_status_t status; + + apr_brigade_length(bb, 0, &written); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->task->c, + "h2_task(%s): write response body (%ld bytes)", + output->task->id, (long)written); + + status = h2_mplx_out_write(output->task->mplx, output->task->stream_id, + f, output->task->blocking, bb, output->task->io); + if (status == APR_INCOMPLETE) { + apr_brigade_length(bb, 0, &left); + written -= left; + status = APR_SUCCESS; } - return NULL; -} -static apr_status_t open_if_needed(h2_task_output *output, ap_filter_t *f, - apr_bucket_brigade *bb) -{ - if (output->state == H2_TASK_OUT_INIT) { - h2_response *response; - output->state = H2_TASK_OUT_STARTED; - response = h2_from_h1_get_response(output->from_h1); - if (!response) { - if (f) { - /* This happens currently when ap_die(status, r) is invoked - * by a read request filter. */ - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, - "h2_task_output(%s): write without response " - "for %s %s %s", - output->task->id, output->task->request->method, - output->task->request->authority, - output->task->request->path); - f->c->aborted = 1; - } - if (output->task->io) { - apr_thread_cond_broadcast(output->task->io); - } - return APR_ECONNABORTED; - } - + if (status == APR_SUCCESS) { + output->written += written; if (h2_task_logio_add_bytes_out) { - /* counter headers as if we'd do a HTTP/1.1 serialization */ - /* TODO: counter a virtual status line? */ - apr_off_t bytes_written; - apr_brigade_length(bb, 0, &bytes_written); - bytes_written += h2_util_table_bytes(response->headers, 3)+1; - h2_task_logio_add_bytes_out(f->c, bytes_written); + h2_task_logio_add_bytes_out(output->task->c, written); } - get_trailers(output); - return h2_mplx_out_open(output->task->mplx, output->task->stream_id, - response, f, bb, output->task->io); - } - return APR_EOF; -} - -void h2_task_output_close(h2_task_output *output) -{ - open_if_needed(output, NULL, NULL); - if (output->state != H2_TASK_OUT_DONE) { - h2_mplx_out_close(output->task->mplx, output->task->stream_id, - get_trailers(output)); - output->state = H2_TASK_OUT_DONE; } -} - -int h2_task_output_has_started(h2_task_output *output) -{ - return output->state >= H2_TASK_OUT_STARTED; + return status; } /* Bring the data from the brigade (which represents the result of the @@ -136,30 +118,59 @@ int h2_task_output_has_started(h2_task_output *output) apr_status_t h2_task_output_write(h2_task_output *output, ap_filter_t* f, apr_bucket_brigade* bb) { - apr_status_t status; + apr_bucket *b; + apr_status_t status = APR_SUCCESS; if (APR_BRIGADE_EMPTY(bb)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, - "h2_task_output(%s): empty write", output->task->id); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, output->task->c, + "h2_task(%s): empty write", output->task->id); return APR_SUCCESS; } - status = open_if_needed(output, f, bb); - if (status != APR_EOF) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c, - "h2_task_output(%s): opened and passed brigade", - output->task->id); - return status; + if (output->task->frozen) { + h2_util_bb_log(output->task->c, output->task->stream_id, APLOG_TRACE2, + "frozen task output write, ignored", bb); + while (!APR_BRIGADE_EMPTY(bb)) { + b = APR_BRIGADE_FIRST(bb); + if (AP_BUCKET_IS_EOR(b)) { + /* TODO: keep it */ + APR_BUCKET_REMOVE(b); + } + else { + apr_bucket_delete(b); + } + } + return APR_SUCCESS; } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, - "h2_task_output(%s): write brigade", output->task->id); - if (h2_task_logio_add_bytes_out) { - apr_off_t bytes_written; - apr_brigade_length(bb, 0, &bytes_written); - h2_task_logio_add_bytes_out(f->c, bytes_written); + if (!output->response_open) { + status = open_response(output, f, bb, "write"); + output->response_open = 1; } - return h2_mplx_out_write(output->task->mplx, output->task->stream_id, - f, bb, get_trailers(output), output->task->io); + + /* Attempt to write saved brigade first */ + if (status == APR_SUCCESS && output->bb && !APR_BRIGADE_EMPTY(output->bb)) { + status = write_brigade_raw(output, f, output->bb); + } + + /* If there is nothing saved (anymore), try to write the brigade passed */ + if (status == APR_SUCCESS + && (!output->bb || APR_BRIGADE_EMPTY(output->bb)) + && !APR_BRIGADE_EMPTY(bb)) { + status = write_brigade_raw(output, f, bb); + } + + /* If the passed brigade is not empty, save it before return */ + if (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, output->task->c, + "h2_task(%s): could not write all, saving brigade", + output->task->id); + if (!output->bb) { + output->bb = apr_brigade_create(output->task->pool, output->task->c->bucket_alloc); + } + return ap_save_brigade(f, &output->bb, &bb, output->task->pool); + } + + return status; } diff --git a/mod_http2/h2_task_output.h b/mod_http2/h2_task_output.h index eed1f012..3135bc45 100644 --- a/mod_http2/h2_task_output.h +++ b/mod_http2/h2_task_output.h @@ -26,32 +26,25 @@ struct h2_mplx; struct h2_task; struct h2_from_h1; -typedef enum { - H2_TASK_OUT_INIT, - H2_TASK_OUT_STARTED, - H2_TASK_OUT_DONE, -} h2_task_output_state_t; - typedef struct h2_task_output h2_task_output; struct h2_task_output { - conn_rec *c; struct h2_task *task; - h2_task_output_state_t state; struct h2_from_h1 *from_h1; - unsigned int trailers_passed : 1; + + unsigned int response_open : 1; + + apr_off_t written; + apr_bucket_brigade *bb; }; h2_task_output *h2_task_output_create(struct h2_task *task, conn_rec *c); -void h2_task_output_destroy(h2_task_output *output); - apr_status_t h2_task_output_write(h2_task_output *output, ap_filter_t* filter, apr_bucket_brigade* brigade); -void h2_task_output_close(h2_task_output *output); - -int h2_task_output_has_started(h2_task_output *output); +apr_status_t h2_task_output_freeze(h2_task_output *output); +apr_status_t h2_task_output_thaw(h2_task_output *output); #endif /* defined(__mod_h2__h2_task_output__) */ diff --git a/mod_http2/h2_util.c b/mod_http2/h2_util.c index 677d85e9..06472425 100644 --- a/mod_http2/h2_util.c +++ b/mod_http2/h2_util.c @@ -14,7 +14,6 @@ */ #include - #include #include @@ -28,6 +27,36 @@ #include "h2_request.h" #include "h2_util.h" +/* h2_log2(n) iff n is a power of 2 */ +unsigned char h2_log2(apr_uint32_t n) +{ + int lz = 0; + if (!n) { + return 0; + } + if (!(n & 0xffff0000u)) { + lz += 16; + n = (n << 16); + } + if (!(n & 0xff000000u)) { + lz += 8; + n = (n << 8); + } + if (!(n & 0xf0000000u)) { + lz += 4; + n = (n << 4); + } + if (!(n & 0xc0000000u)) { + lz += 2; + n = (n << 2); + } + if (!(n & 0x80000000u)) { + lz += 1; + } + + return 31 - lz; +} + size_t h2_util_hex_dump(char *buffer, size_t maxlen, const char *data, size_t datalen) { @@ -170,10 +199,10 @@ const char *h2_util_base64url_encode(const char *data, enc = p; for (i = 0; i < mlen; i+= 3) { *p++ = BASE64URL_CHARS[ (udata[i] >> 2) & 0x3fu ]; - *p++ = BASE64URL_CHARS[ (udata[i] << 4) + - ((i+1 < len)? (udata[i+1] >> 4) : 0) & 0x3fu ]; - *p++ = BASE64URL_CHARS[ (udata[i+1] << 2) + - ((i+2 < len)? (udata[i+2] >> 6) : 0) & 0x3fu ]; + *p++ = BASE64URL_CHARS[ ((udata[i] << 4) + + ((i+1 < len)? (udata[i+1] >> 4) : 0)) & 0x3fu ]; + *p++ = BASE64URL_CHARS[ ((udata[i+1] << 2) + + ((i+2 < len)? (udata[i+2] >> 6) : 0)) & 0x3fu ]; if (i+2 < len) { *p++ = BASE64URL_CHARS[ udata[i+2] & 0x3fu ]; } @@ -231,6 +260,77 @@ const char *h2_util_first_token_match(apr_pool_t *pool, const char *s, } +/******************************************************************************* + * ihash - hash for structs with int identifier + ******************************************************************************/ +struct h2_ihash_t { + apr_hash_t *hash; + size_t ioff; +}; + +static unsigned int ihash(const char *key, apr_ssize_t *klen) +{ + return (unsigned int)(*((int*)key)); +} + +h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int) +{ + h2_ihash_t *ih = apr_pcalloc(pool, sizeof(h2_ihash_t)); + ih->hash = apr_hash_make_custom(pool, ihash); + ih->ioff = offset_of_int; + return ih; +} + +size_t h2_ihash_count(h2_ihash_t *ih) +{ + return apr_hash_count(ih->hash); +} + +int h2_ihash_is_empty(h2_ihash_t *ih) +{ + return apr_hash_count(ih->hash) == 0; +} + +void *h2_ihash_get(h2_ihash_t *ih, int id) +{ + return apr_hash_get(ih->hash, &id, sizeof(id)); +} + +typedef struct { + h2_ihash_iter_t *iter; + void *ctx; +} iter_ctx; + +static int ihash_iter(void *ctx, const void *key, apr_ssize_t klen, + const void *val) +{ + iter_ctx *ictx = ctx; + return ictx->iter(ictx->ctx, (void*)val); /* why is this passed const?*/ +} + +int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx) +{ + iter_ctx ictx; + ictx.iter = fn; + ictx.ctx = ctx; + return apr_hash_do(ihash_iter, &ictx, ih->hash); +} + +void h2_ihash_add(h2_ihash_t *ih, void *val) +{ + apr_hash_set(ih->hash, ((char *)val + ih->ioff), sizeof(int), val); +} + +void h2_ihash_remove(h2_ihash_t *ih, int id) +{ + apr_hash_set(ih->hash, &id, sizeof(id), NULL); +} + +void h2_ihash_clear(h2_ihash_t *ih) +{ + apr_hash_clear(ih->hash); +} + /******************************************************************************* * h2_util for apt_table_t ******************************************************************************/ @@ -375,7 +475,7 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(to, b); #if LOG_BUCKETS - ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, + ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, APLOGNO(03205) "h2_util_move: %s, passed bucket(same bucket_alloc) " "%ld-%ld, type=%s", msg, (long)b->start, (long)b->length, @@ -394,9 +494,6 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, if (APR_BUCKET_IS_EOS(b)) { APR_BRIGADE_INSERT_TAIL(to, apr_bucket_eos_create(to->bucket_alloc)); } - else if (APR_BUCKET_IS_FLUSH(b)) { - APR_BRIGADE_INSERT_TAIL(to, apr_bucket_flush_create(to->bucket_alloc)); - } else { /* ignore */ } @@ -413,7 +510,7 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_file_t *fd = f->fd; int setaside = (f->readpool != to->p); #if LOG_BUCKETS - ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, + ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, APLOGNO(03206) "h2_util_move: %s, moving FILE bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx), setaside=%d", msg, (long)b->start, (long)b->length, @@ -436,11 +533,12 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, else { const char *data; apr_size_t len; + status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ); if (status == APR_SUCCESS && len > 0) { status = apr_brigade_write(to, NULL, NULL, data, len); #if LOG_BUCKETS - ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, + ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, APLOGNO(03207) "h2_util_move: %s, copied bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, @@ -456,7 +554,7 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, APR_BUCKET_REMOVE(b); APR_BRIGADE_INSERT_TAIL(to, b); #if LOG_BUCKETS - ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, + ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, APLOGNO(03208) "h2_util_move: %s, passed setaside bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, @@ -519,7 +617,7 @@ apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from, if (status == APR_SUCCESS && len > 0) { status = apr_brigade_write(to, NULL, NULL, data, len); #if LOG_BUCKETS - ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, + ap_log_perror(APLOG_MARK, LOG_LEVEL, 0, to->p, APLOGNO(03209) "h2_util_copy: %s, copied bucket %ld-%ld " "from=%lx(p=%lx) to=%lx(p=%lx)", msg, (long)b->start, (long)b->length, @@ -534,20 +632,6 @@ apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from, return status; } -int h2_util_has_flush_or_eos(apr_bucket_brigade *bb) -{ - apr_bucket *b; - for (b = APR_BRIGADE_FIRST(bb); - b != APR_BRIGADE_SENTINEL(bb); - b = APR_BUCKET_NEXT(b)) - { - if (APR_BUCKET_IS_EOS(b) || APR_BUCKET_IS_FLUSH(b)) { - return 1; - } - } - return 0; -} - int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len) { apr_bucket *b, *end; @@ -613,7 +697,7 @@ apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb, return status; } else if (blen == 0) { - /* empty brigade, does it have an EOS bucket somwhere? */ + /* brigade without data, does it have an EOS bucket somwhere? */ *plen = 0; *peos = h2_util_has_eos(bb, -1); } @@ -771,12 +855,13 @@ void h2_util_bb_log(conn_rec *c, int stream_id, int level, } line = *buffer? buffer : "(empty)"; } + /* Intentional no APLOGNO */ ap_log_cerror(APLOG_MARK, level, 0, c, "bb_dump(%ld-%d)-%s: %s", c->id, stream_id, tag, line); } -apr_status_t h2_transfer_brigade(apr_bucket_brigade *to, +apr_status_t h2_ltransfer_brigade(apr_bucket_brigade *to, apr_bucket_brigade *from, apr_pool_t *p, apr_off_t *plen, @@ -847,6 +932,113 @@ apr_status_t h2_transfer_brigade(apr_bucket_brigade *to, return APR_SUCCESS; } +apr_status_t h2_transfer_brigade(apr_bucket_brigade *to, + apr_bucket_brigade *from, + apr_pool_t *p) +{ + apr_bucket *e; + apr_status_t rv; + + while (!APR_BRIGADE_EMPTY(from)) { + e = APR_BRIGADE_FIRST(from); + + rv = apr_bucket_setaside(e, p); + + /* If the bucket type does not implement setaside, then + * (hopefully) morph it into a bucket type which does, and set + * *that* aside... */ + if (rv == APR_ENOTIMPL) { + const char *s; + apr_size_t n; + + rv = apr_bucket_read(e, &s, &n, APR_BLOCK_READ); + if (rv == APR_SUCCESS) { + rv = apr_bucket_setaside(e, p); + } + } + + if (rv != APR_SUCCESS) { + /* Return an error but still save the brigade if + * ->setaside() is really not implemented. */ + if (rv != APR_ENOTIMPL) { + return rv; + } + } + + APR_BUCKET_REMOVE(e); + APR_BRIGADE_INSERT_TAIL(to, e); + } + return APR_SUCCESS; +} + +apr_status_t h2_append_brigade(apr_bucket_brigade *to, + apr_bucket_brigade *from, + apr_off_t *plen, + int *peos) +{ + apr_bucket *e; + apr_off_t len = 0, remain = *plen; + apr_status_t rv; + + *peos = 0; + + while (!APR_BRIGADE_EMPTY(from)) { + e = APR_BRIGADE_FIRST(from); + + if (APR_BUCKET_IS_METADATA(e)) { + if (APR_BUCKET_IS_EOS(e)) { + *peos = 1; + } + } + else { + if (remain > 0 && e->length == ((apr_size_t)-1)) { + const char *ign; + apr_size_t ilen; + rv = apr_bucket_read(e, &ign, &ilen, APR_BLOCK_READ); + if (rv != APR_SUCCESS) { + return rv; + } + } + + if (remain < e->length) { + if (remain <= 0) { + return APR_SUCCESS; + } + apr_bucket_split(e, remain); + } + } + + APR_BUCKET_REMOVE(e); + APR_BRIGADE_INSERT_TAIL(to, e); + len += e->length; + remain -= e->length; + } + + *plen = len; + return APR_SUCCESS; +} + +apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb) +{ + apr_bucket *b; + apr_off_t total = 0; + + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); + b = APR_BUCKET_NEXT(b)) + { + total += sizeof(*b); + if (b->length > 0) { + if (APR_BUCKET_IS_HEAP(b) + || APR_BUCKET_IS_POOL(b)) { + total += b->length; + } + } + } + return total; +} + + /******************************************************************************* * h2_ngheader ******************************************************************************/ @@ -1002,6 +1194,9 @@ static literal IgnoredResponseTrailers[] = { H2_DEF_LITERAL("www-authenticate"), H2_DEF_LITERAL("proxy-authenticate"), }; +static literal IgnoredProxyRespHds[] = { + H2_DEF_LITERAL("alt-svc"), +}; static int ignore_header(const literal *lits, size_t llen, const char *name, size_t nlen) @@ -1034,12 +1229,126 @@ int h2_res_ignore_trailer(const char *name, size_t len) return ignore_header(H2_LIT_ARGS(IgnoredResponseTrailers), name, len); } -void h2_req_strip_ignored_header(apr_table_t *headers) +int h2_proxy_res_ignore_header(const char *name, size_t len) { - int i; - for (i = 0; i < H2_ALEN(IgnoredRequestHeaders); ++i) { - apr_table_unset(headers, IgnoredRequestHeaders[i].name); + return (h2_req_ignore_header(name, len) + || ignore_header(H2_LIT_ARGS(IgnoredProxyRespHds), name, len)); +} + + +/******************************************************************************* + * frame logging + ******************************************************************************/ + +int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen) +{ + char scratch[128]; + size_t s_len = sizeof(scratch)/sizeof(scratch[0]); + + switch (frame->hd.type) { + case NGHTTP2_DATA: { + return apr_snprintf(buffer, maxlen, + "DATA[length=%d, flags=%d, stream=%d, padlen=%d]", + (int)frame->hd.length, frame->hd.flags, + frame->hd.stream_id, (int)frame->data.padlen); + } + case NGHTTP2_HEADERS: { + return apr_snprintf(buffer, maxlen, + "HEADERS[length=%d, hend=%d, stream=%d, eos=%d]", + (int)frame->hd.length, + !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS), + frame->hd.stream_id, + !!(frame->hd.flags & NGHTTP2_FLAG_END_STREAM)); + } + case NGHTTP2_PRIORITY: { + return apr_snprintf(buffer, maxlen, + "PRIORITY[length=%d, flags=%d, stream=%d]", + (int)frame->hd.length, + frame->hd.flags, frame->hd.stream_id); + } + case NGHTTP2_RST_STREAM: { + return apr_snprintf(buffer, maxlen, + "RST_STREAM[length=%d, flags=%d, stream=%d]", + (int)frame->hd.length, + frame->hd.flags, frame->hd.stream_id); + } + case NGHTTP2_SETTINGS: { + if (frame->hd.flags & NGHTTP2_FLAG_ACK) { + return apr_snprintf(buffer, maxlen, + "SETTINGS[ack=1, stream=%d]", + frame->hd.stream_id); + } + return apr_snprintf(buffer, maxlen, + "SETTINGS[length=%d, stream=%d]", + (int)frame->hd.length, frame->hd.stream_id); + } + case NGHTTP2_PUSH_PROMISE: { + return apr_snprintf(buffer, maxlen, + "PUSH_PROMISE[length=%d, hend=%d, stream=%d]", + (int)frame->hd.length, + !!(frame->hd.flags & NGHTTP2_FLAG_END_HEADERS), + frame->hd.stream_id); + } + case NGHTTP2_PING: { + return apr_snprintf(buffer, maxlen, + "PING[length=%d, ack=%d, stream=%d]", + (int)frame->hd.length, + frame->hd.flags&NGHTTP2_FLAG_ACK, + frame->hd.stream_id); + } + case NGHTTP2_GOAWAY: { + size_t len = (frame->goaway.opaque_data_len < s_len)? + frame->goaway.opaque_data_len : s_len-1; + memcpy(scratch, frame->goaway.opaque_data, len); + scratch[len] = '\0'; + return apr_snprintf(buffer, maxlen, "GOAWAY[error=%d, reason='%s', " + "last_stream=%d]", frame->goaway.error_code, + scratch, frame->goaway.last_stream_id); + } + case NGHTTP2_WINDOW_UPDATE: { + return apr_snprintf(buffer, maxlen, + "WINDOW_UPDATE[stream=%d, incr=%d]", + frame->hd.stream_id, + frame->window_update.window_size_increment); + } + default: + return apr_snprintf(buffer, maxlen, + "type=%d[length=%d, flags=%d, stream=%d]", + frame->hd.type, (int)frame->hd.length, + frame->hd.flags, frame->hd.stream_id); } } +/******************************************************************************* + * push policy + ******************************************************************************/ +void h2_push_policy_determine(struct h2_request *req, apr_pool_t *p, int push_enabled) +{ + h2_push_policy policy = H2_PUSH_NONE; + if (push_enabled) { + const char *val = apr_table_get(req->headers, "accept-push-policy"); + if (val) { + if (ap_find_token(p, val, "fast-load")) { + policy = H2_PUSH_FAST_LOAD; + } + else if (ap_find_token(p, val, "head")) { + policy = H2_PUSH_HEAD; + } + else if (ap_find_token(p, val, "default")) { + policy = H2_PUSH_DEFAULT; + } + else if (ap_find_token(p, val, "none")) { + policy = H2_PUSH_NONE; + } + else { + /* nothing known found in this header, go by default */ + policy = H2_PUSH_DEFAULT; + } + } + else { + policy = H2_PUSH_DEFAULT; + } + } + req->push_policy = policy; +} diff --git a/mod_http2/h2_util.h b/mod_http2/h2_util.h index 6d86f76a..4ca2f9b6 100644 --- a/mod_http2/h2_util.h +++ b/mod_http2/h2_util.h @@ -16,6 +16,9 @@ #ifndef __mod_h2__h2_util__ #define __mod_h2__h2_util__ +/******************************************************************************* + * some debugging/format helpers + ******************************************************************************/ struct h2_request; struct nghttp2_frame; @@ -28,6 +31,45 @@ size_t h2_util_header_print(char *buffer, size_t maxlen, void h2_util_camel_case_header(char *s, size_t len); +int h2_util_frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen); + +/******************************************************************************* + * ihash - hash for structs with int identifier + ******************************************************************************/ +typedef struct h2_ihash_t h2_ihash_t; +typedef int h2_ihash_iter_t(void *ctx, void *val); + +/** + * Create a hash for structures that have an identifying int member. + * @param pool the pool to use + * @param offset_of_int the offsetof() the int member in the struct + */ +h2_ihash_t *h2_ihash_create(apr_pool_t *pool, size_t offset_of_int); + +size_t h2_ihash_count(h2_ihash_t *ih); +int h2_ihash_is_empty(h2_ihash_t *ih); +void *h2_ihash_get(h2_ihash_t *ih, int id); + +/** + * Iterate over the hash members (without defined order) and invoke + * fn for each member until 0 is returned. + * @param ih the hash to iterate over + * @param fn the function to invoke on each member + * @param ctx user supplied data passed into each iteration call + * @param 0 if one iteration returned 0, otherwise != 0 + */ +int h2_ihash_iter(h2_ihash_t *ih, h2_ihash_iter_t *fn, void *ctx); + +void h2_ihash_add(h2_ihash_t *ih, void *val); +void h2_ihash_remove(h2_ihash_t *ih, int id); +void h2_ihash_clear(h2_ihash_t *ih); + +/******************************************************************************* + * common helpers + ******************************************************************************/ +/* h2_log2(n) iff n is a power of 2 */ +unsigned char h2_log2(apr_uint32_t n); + /** * Count the bytes that all key/value pairs in a table have * in length (exlucding terminating 0s), plus additional extra per pair. @@ -38,11 +80,6 @@ void h2_util_camel_case_header(char *s, size_t len); */ apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra); -int h2_req_ignore_header(const char *name, size_t len); -int h2_req_ignore_trailer(const char *name, size_t len); -void h2_req_strip_ignored_header(apr_table_t *headers); -int h2_res_ignore_trailer(const char *name, size_t len); - /** * Return != 0 iff the string s contains the token, as specified in * HTTP header syntax, rfc7230. @@ -52,6 +89,32 @@ int h2_util_contains_token(apr_pool_t *pool, const char *s, const char *token); const char *h2_util_first_token_match(apr_pool_t *pool, const char *s, const char *tokens[], apr_size_t len); +/** Match a header value against a string constance, case insensitive */ +#define H2_HD_MATCH_LIT(l, name, nlen) \ + ((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name)) + +/******************************************************************************* + * HTTP/2 header helpers + ******************************************************************************/ +int h2_req_ignore_header(const char *name, size_t len); +int h2_req_ignore_trailer(const char *name, size_t len); +int h2_res_ignore_trailer(const char *name, size_t len); +int h2_proxy_res_ignore_header(const char *name, size_t len); + +/** + * Set the push policy for the given request. Takes request headers into + * account, see draft https://tools.ietf.org/html/draft-ruellan-http-accept-push-policy-00 + * for details. + * + * @param req the request to determine the policy for + * @param p the pool to use + * @param push_enabled if HTTP/2 server push is generally enabled for this request + */ +void h2_push_policy_determine(struct h2_request *req, apr_pool_t *p, int push_enabled); + +/******************************************************************************* + * base64 url encoding, different table from normal base64 + ******************************************************************************/ /** * I always wanted to write my own base64url decoder...not. See * https://tools.ietf.org/html/rfc4648#section-5 for description. @@ -62,8 +125,9 @@ apr_size_t h2_util_base64url_decode(const char **decoded, const char *h2_util_base64url_encode(const char *data, apr_size_t len, apr_pool_t *pool); -#define H2_HD_MATCH_LIT(l, name, nlen) \ - ((nlen == sizeof(l) - 1) && !apr_strnatcasecmp(l, name)) +/******************************************************************************* + * nghttp2 helpers + ******************************************************************************/ #define H2_HD_MATCH_LIT_CS(l, name) \ ((strlen(name) == sizeof(l) - 1) && !apr_strnatcasecmp(l, name)) @@ -97,6 +161,9 @@ h2_ngheader *h2_util_ngheader_make_res(apr_pool_t *p, h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p, const struct h2_request *req); +/******************************************************************************* + * apr brigade helpers + ******************************************************************************/ /** * Moves data from one brigade into another. If maxlen > 0, it only * moves up to maxlen bytes into the target brigade, making bucket splits @@ -129,7 +196,6 @@ apr_status_t h2_util_copy(apr_bucket_brigade *to, apr_bucket_brigade *from, * @param bb the brigade to check on * @return != 0 iff brigade holds FLUSH or EOS bucket (or both) */ -int h2_util_has_flush_or_eos(apr_bucket_brigade *bb); int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len); int h2_util_bb_has_data(apr_bucket_brigade *bb); int h2_util_bb_has_data_or_eos(apr_bucket_brigade *bb); @@ -177,17 +243,52 @@ void h2_util_bb_log(conn_rec *c, int stream_id, int level, /** * Transfer buckets from one brigade to another with a limit on the - * maximum amount of bytes transfered. + * maximum amount of bytes transfered. Sets aside the buckets to + * pool p. * @param to brigade to transfer buckets to * @param from brigades to remove buckets from * @param p pool that buckets should be setaside to * @param plen maximum bytes to transfer, actual bytes transferred * @param peos if an EOS bucket was transferred */ +apr_status_t h2_ltransfer_brigade(apr_bucket_brigade *to, + apr_bucket_brigade *from, + apr_pool_t *p, + apr_off_t *plen, + int *peos); + +/** + * Transfer all buckets from one brigade to another. Sets aside the buckets to + * pool p. + * @param to brigade to transfer buckets to + * @param from brigades to remove buckets from + * @param p pool that buckets should be setaside to + */ apr_status_t h2_transfer_brigade(apr_bucket_brigade *to, apr_bucket_brigade *from, - apr_pool_t *p, - apr_off_t *plen, - int *peos); + apr_pool_t *p); + +/** + * Transfer buckets from one brigade to another with a limit on the + * maximum amount of bytes transfered. Does no setaside magic, lifetime + * of brigades must fit. + * @param to brigade to transfer buckets to + * @param from brigades to remove buckets from + * @param plen maximum bytes to transfer, actual bytes transferred + * @param peos if an EOS bucket was transferred + */ +apr_status_t h2_append_brigade(apr_bucket_brigade *to, + apr_bucket_brigade *from, + apr_off_t *plen, + int *peos); + +/** + * Get an approximnation of the memory footprint of the given + * brigade. This varies from apr_brigade_length as + * - no buckets are ever read + * - only buckets known to allocate memory (HEAP+POOL) are counted + * - the bucket struct itself is counted + */ +apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb); #endif /* defined(__mod_h2__h2_util__) */ diff --git a/mod_http2/h2_version.h b/mod_http2/h2_version.h index cb2db4d9..d68130db 100644 --- a/mod_http2/h2_version.h +++ b/mod_http2/h2_version.h @@ -26,7 +26,7 @@ * @macro * Version number of the http2 module as c string */ -#define MOD_HTTP2_VERSION "1.2.8" +#define MOD_HTTP2_VERSION "1.4.6" /** * @macro @@ -34,7 +34,7 @@ * release. This is a 24 bit number with 8 bits for major number, 8 bits * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. */ -#define MOD_HTTP2_VERSION_NUM 0x010208 +#define MOD_HTTP2_VERSION_NUM 0x010406 #endif /* mod_h2_h2_version_h */ diff --git a/mod_http2/h2_worker.c b/mod_http2/h2_worker.c index 775a4869..ca6ce3a2 100644 --- a/mod_http2/h2_worker.c +++ b/mod_http2/h2_worker.c @@ -22,81 +22,43 @@ #include #include +#include "h2.h" #include "h2_private.h" #include "h2_conn.h" #include "h2_ctx.h" #include "h2_h2.h" #include "h2_mplx.h" -#include "h2_request.h" #include "h2_task.h" #include "h2_worker.h" static void* APR_THREAD_FUNC execute(apr_thread_t *thread, void *wctx) { h2_worker *worker = (h2_worker *)wctx; - apr_status_t status; - - (void)thread; - /* Other code might want to see a socket for this connection this - * worker processes. Allocate one without further function... - */ - status = apr_socket_create(&worker->socket, - APR_INET, SOCK_STREAM, - APR_PROTO_TCP, worker->pool); - if (status != APR_SUCCESS) { - ap_log_perror(APLOG_MARK, APLOG_ERR, status, worker->pool, - APLOGNO(02948) "h2_worker(%d): alloc socket", - worker->id); - worker->worker_done(worker, worker->ctx); - return NULL; - } + int sticky; while (!worker->aborted) { - h2_mplx *m; - const h2_request *req; + h2_task *task; - /* Get a h2_mplx + h2_request from the main workers queue. */ - status = worker->get_next(worker, &m, &req, worker->ctx); - - while (req) { - conn_rec *c, *master = m->c; - int stream_id = req->id; + /* Get a h2_task from the main workers queue. */ + worker->get_next(worker, worker->ctx, &task, &sticky); + while (task) { + h2_task_do(task, worker->io); - c = h2_slave_create(master, worker->task_pool, - worker->thread, worker->socket); - if (!c) { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c, - APLOGNO(02957) "h2_request(%ld-%d): error setting up slave connection", - m->id, stream_id); - h2_mplx_out_rst(m, stream_id, H2_ERR_INTERNAL_ERROR); + /* if someone was waiting on this task, time to wake up */ + apr_thread_cond_signal(worker->io); + /* report the task done and maybe get another one from the same + * mplx (= master connection), if we can be sticky. + */ + if (sticky && !worker->aborted) { + h2_mplx_task_done(task->mplx, task, &task); } else { - h2_task *task; - - task = h2_task_create(m->id, req, worker->task_pool, m); - h2_ctx_create_for(c, task); - h2_task_do(task, c, worker->io, worker->socket); + h2_mplx_task_done(task->mplx, task, NULL); task = NULL; - - apr_thread_cond_signal(worker->io); } - - /* clean our references and report request as done. Signal - * that we want another unless we have been aborted */ - /* TODO: this will keep a worker attached to this h2_mplx as - * long as it has requests to handle. Might no be fair to - * other mplx's. Perhaps leave after n requests? */ - req = NULL; - apr_pool_clear(worker->task_pool); - h2_mplx_request_done(&m, stream_id, worker->aborted? NULL : &req); } } - if (worker->socket) { - apr_socket_close(worker->socket); - worker->socket = NULL; - } - worker->worker_done(worker, worker->ctx); return NULL; } @@ -116,6 +78,7 @@ h2_worker *h2_worker_create(int id, apr_allocator_create(&allocator); apr_allocator_max_free_set(allocator, ap_max_mem_free); apr_pool_create_ex(&pool, parent_pool, NULL, allocator); + apr_pool_tag(pool, "h2_worker"); apr_allocator_owner_set(allocator, pool); w = apr_pcalloc(pool, sizeof(h2_worker)); @@ -134,7 +97,6 @@ h2_worker *h2_worker_create(int id, return NULL; } - apr_pool_create(&w->task_pool, w->pool); apr_thread_create(&w->thread, attr, execute, w, w->pool); } return w; @@ -173,13 +135,4 @@ int h2_worker_is_aborted(h2_worker *worker) return worker->aborted; } -h2_task *h2_worker_create_task(h2_worker *worker, h2_mplx *m, - const h2_request *req) -{ - h2_task *task; - - task = h2_task_create(m->id, req, worker->task_pool, m); - return task; -} - diff --git a/mod_http2/h2_worker.h b/mod_http2/h2_worker.h index fc0f359e..7a8c254f 100644 --- a/mod_http2/h2_worker.h +++ b/mod_http2/h2_worker.h @@ -30,9 +30,9 @@ typedef struct h2_worker h2_worker; * until a h2_mplx becomes available or the worker itself * gets aborted (idle timeout, for example). */ typedef apr_status_t h2_worker_mplx_next_fn(h2_worker *worker, - struct h2_mplx **pm, - const struct h2_request **preq, - void *ctx); + void *ctx, + struct h2_task **ptask, + int *psticky); /* Invoked just before the worker thread exits. */ typedef void h2_worker_done_fn(h2_worker *worker, void *ctx); @@ -45,9 +45,7 @@ struct h2_worker { int id; apr_thread_t *thread; apr_pool_t *pool; - apr_pool_t *task_pool; struct apr_thread_cond_t *io; - apr_socket_t *socket; h2_worker_mplx_next_fn *get_next; h2_worker_done_fn *worker_done; @@ -142,7 +140,4 @@ int h2_worker_get_id(h2_worker *worker); int h2_worker_is_aborted(h2_worker *worker); -struct h2_task *h2_worker_create_task(h2_worker *worker, struct h2_mplx *m, - const struct h2_request *req); - #endif /* defined(__mod_h2__h2_worker__) */ diff --git a/mod_http2/h2_workers.c b/mod_http2/h2_workers.c index f7606dc4..2c1dc8da 100644 --- a/mod_http2/h2_workers.c +++ b/mod_http2/h2_workers.c @@ -23,10 +23,10 @@ #include #include +#include "h2.h" #include "h2_private.h" #include "h2_mplx.h" -#include "h2_request.h" -#include "h2_task_queue.h" +#include "h2_task.h" #include "h2_worker.h" #include "h2_workers.h" @@ -61,124 +61,137 @@ static void cleanup_zombies(h2_workers *workers, int lock) } } +static h2_task *next_task(h2_workers *workers) +{ + h2_task *task = NULL; + h2_mplx *last = NULL; + int has_more; + + /* Get the next h2_mplx to process that has a task to hand out. + * If it does, place it at the end of the queu and return the + * task to the worker. + * If it (currently) has no tasks, remove it so that it needs + * to register again for scheduling. + * If we run out of h2_mplx in the queue, we need to wait for + * new mplx to arrive. Depending on how many workers do exist, + * we do a timed wait or block indefinitely. + */ + while (!task && !H2_MPLX_LIST_EMPTY(&workers->mplxs)) { + h2_mplx *m = H2_MPLX_LIST_FIRST(&workers->mplxs); + + if (last == m) { + break; + } + H2_MPLX_REMOVE(m); + --workers->mplx_count; + + task = h2_mplx_pop_task(m, &has_more); + if (has_more) { + H2_MPLX_LIST_INSERT_TAIL(&workers->mplxs, m); + ++workers->mplx_count; + if (!last) { + last = m; + } + } + } + return task; +} + /** * Get the next task for the given worker. Will block until a task arrives * or the max_wait timer expires and more than min workers exist. - * The previous h2_mplx instance might be passed in and will be served - * with preference, since we can ask it for the next task without aquiring - * the h2_workers lock. */ -static apr_status_t get_mplx_next(h2_worker *worker, h2_mplx **pm, - const h2_request **preq, void *ctx) +static apr_status_t get_mplx_next(h2_worker *worker, void *ctx, + h2_task **ptask, int *psticky) { apr_status_t status; - apr_time_t max_wait, start_wait; - h2_workers *workers = (h2_workers *)ctx; + apr_time_t wait_until = 0, now; + h2_workers *workers = ctx; + h2_task *task = NULL; - max_wait = apr_time_from_sec(apr_atomic_read32(&workers->max_idle_secs)); - start_wait = apr_time_now(); + *ptask = NULL; + *psticky = 0; status = apr_thread_mutex_lock(workers->lock); if (status == APR_SUCCESS) { - const h2_request *req = NULL; - h2_mplx *m = NULL; - int has_more = 0; - - ++workers->idle_worker_count; + ++workers->idle_workers; ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, "h2_worker(%d): looking for work", h2_worker_get_id(worker)); - while (!req && !h2_worker_is_aborted(worker) && !workers->aborted) { - - /* Get the next h2_mplx to process that has a task to hand out. - * If it does, place it at the end of the queu and return the - * task to the worker. - * If it (currently) has no tasks, remove it so that it needs - * to register again for scheduling. - * If we run out of h2_mplx in the queue, we need to wait for - * new mplx to arrive. Depending on how many workers do exist, - * we do a timed wait or block indefinitely. - */ - m = NULL; - while (!req && !H2_MPLX_LIST_EMPTY(&workers->mplxs)) { - m = H2_MPLX_LIST_FIRST(&workers->mplxs); - H2_MPLX_REMOVE(m); + while (!h2_worker_is_aborted(worker) && !workers->aborted + && !(task = next_task(workers))) { + + /* Need to wait for a new tasks to arrive. If we are above + * minimum workers, we do a timed wait. When timeout occurs + * and we have still more workers, we shut down one after + * the other. */ + cleanup_zombies(workers, 0); + if (workers->worker_count > workers->min_workers) { + now = apr_time_now(); + if (now >= wait_until) { + wait_until = now + apr_time_from_sec(workers->max_idle_secs); + } - req = h2_mplx_pop_request(m, &has_more); - if (req) { - if (has_more) { - H2_MPLX_LIST_INSERT_TAIL(&workers->mplxs, m); - } - else { - has_more = !H2_MPLX_LIST_EMPTY(&workers->mplxs); - } + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, + "h2_worker(%d): waiting signal, " + "workers=%d, idle=%d", worker->id, + (int)workers->worker_count, + workers->idle_workers); + status = apr_thread_cond_timedwait(workers->mplx_added, + workers->lock, + wait_until - now); + if (status == APR_TIMEUP + && workers->worker_count > workers->min_workers) { + /* waited long enough without getting a task and + * we are above min workers, abort this one. */ + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, + workers->s, + "h2_workers: aborting idle worker"); + h2_worker_abort(worker); break; } } - - if (!req) { - /* Need to wait for a new mplx to arrive. - */ - cleanup_zombies(workers, 0); - - if (workers->worker_count > workers->min_size) { - apr_time_t now = apr_time_now(); - if (now >= (start_wait + max_wait)) { - /* waited long enough without getting a task. */ - if (workers->worker_count > workers->min_size) { - ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, - workers->s, - "h2_workers: aborting idle worker"); - h2_worker_abort(worker); - break; - } - } - ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, - "h2_worker(%d): waiting signal, " - "worker_count=%d", worker->id, - (int)workers->worker_count); - apr_thread_cond_timedwait(workers->mplx_added, - workers->lock, max_wait); - } - else { - ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, - "h2_worker(%d): waiting signal (eternal), " - "worker_count=%d", worker->id, - (int)workers->worker_count); - apr_thread_cond_wait(workers->mplx_added, workers->lock); - } + else { + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, + "h2_worker(%d): waiting signal (eternal), " + "worker_count=%d, idle=%d", worker->id, + (int)workers->worker_count, + workers->idle_workers); + apr_thread_cond_wait(workers->mplx_added, workers->lock); } } - /* Here, we either have gotten task and mplx for the worker or - * needed to give up with more than enough workers. + /* Here, we either have gotten task or decided to shut down + * the calling worker. */ - if (req) { - ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, - "h2_worker(%d): start request(%ld-%d)", - h2_worker_get_id(worker), m->id, req->id); - *pm = m; - *preq = req; + if (task) { + /* Ok, we got something to give back to the worker for execution. + * If we have more idle workers than h2_mplx in our queue, then + * we let the worker be sticky, e.g. making it poll the task's + * h2_mplx instance for more work before asking back here. + * This avoids entering our global lock as long as enough idle + * workers remain. Stickiness of a worker ends when the connection + * has no new tasks to process, so the worker will get back here + * eventually. + */ + *ptask = task; + *psticky = (workers->max_workers >= workers->mplx_count); - if (has_more && workers->idle_worker_count > 1) { + if (workers->mplx_count && workers->idle_workers > 1) { apr_thread_cond_signal(workers->mplx_added); } - status = APR_SUCCESS; - } - else { - status = APR_EOF; } - --workers->idle_worker_count; + --workers->idle_workers; apr_thread_mutex_unlock(workers->lock); } - return status; + return *ptask? APR_SUCCESS : APR_EOF; } static void worker_done(h2_worker *worker, void *ctx) { - h2_workers *workers = (h2_workers *)ctx; + h2_workers *workers = ctx; apr_status_t status = apr_thread_mutex_lock(workers->lock); if (status == APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, @@ -213,7 +226,7 @@ static apr_status_t h2_workers_start(h2_workers *workers) ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, "h2_workers: starting"); - while (workers->worker_count < workers->min_size + while (workers->worker_count < workers->min_workers && status == APR_SUCCESS) { status = add_worker(workers); } @@ -223,7 +236,7 @@ static apr_status_t h2_workers_start(h2_workers *workers) } h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, - int min_size, int max_size, + int min_workers, int max_workers, apr_size_t max_tx_handles) { apr_status_t status; @@ -239,13 +252,14 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, * happen on the pool handed to us, which we do not guard. */ apr_pool_create(&pool, server_pool); + apr_pool_tag(pool, "h2_workers"); workers = apr_pcalloc(pool, sizeof(h2_workers)); if (workers) { workers->s = s; workers->pool = pool; - workers->min_size = min_size; - workers->max_size = max_size; - apr_atomic_set32(&workers->max_idle_secs, 10); + workers->min_workers = min_workers; + workers->max_workers = max_workers; + workers->max_idle_secs = 10; workers->max_tx_handles = max_tx_handles; workers->spare_tx_handles = workers->max_tx_handles; @@ -320,22 +334,22 @@ apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m) apr_status_t status = apr_thread_mutex_lock(workers->lock); if (status == APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_TRACE3, status, workers->s, - "h2_workers: register mplx(%ld)", m->id); + "h2_workers: register mplx(%ld), idle=%d", + m->id, workers->idle_workers); if (in_list(workers, m)) { - ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, - "h2_workers: already registered mplx(%ld)", m->id); status = APR_EAGAIN; } else { H2_MPLX_LIST_INSERT_TAIL(&workers->mplxs, m); + ++workers->mplx_count; status = APR_SUCCESS; } - if (workers->idle_worker_count > 0) { + if (workers->idle_workers > 0) { apr_thread_cond_signal(workers->mplx_added); } else if (status == APR_SUCCESS - && workers->worker_count < workers->max_size) { + && workers->worker_count < workers->max_workers) { ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, "h2_workers: got %d worker, adding 1", workers->worker_count); @@ -368,7 +382,7 @@ void h2_workers_set_max_idle_secs(h2_workers *workers, int idle_secs) " is not valid, ignored.", idle_secs); return; } - apr_atomic_set32(&workers->max_idle_secs, idle_secs); + workers->max_idle_secs = idle_secs; } apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count) diff --git a/mod_http2/h2_workers.h b/mod_http2/h2_workers.h index 7ec38813..ae7b4d89 100644 --- a/mod_http2/h2_workers.h +++ b/mod_http2/h2_workers.h @@ -27,7 +27,6 @@ struct apr_thread_cond_t; struct h2_mplx; struct h2_request; struct h2_task; -struct h2_task_queue; typedef struct h2_workers h2_workers; @@ -36,8 +35,11 @@ struct h2_workers { apr_pool_t *pool; int next_worker_id; - int min_size; - int max_size; + int min_workers; + int max_workers; + int worker_count; + int idle_workers; + int max_idle_secs; apr_size_t max_tx_handles; apr_size_t spare_tx_handles; @@ -49,10 +51,7 @@ struct h2_workers { APR_RING_HEAD(h2_worker_list, h2_worker) workers; APR_RING_HEAD(h2_worker_zombies, h2_worker) zombies; APR_RING_HEAD(h2_mplx_list, h2_mplx) mplxs; - - int worker_count; - volatile apr_uint32_t max_idle_secs; - volatile apr_uint32_t idle_worker_count; + int mplx_count; struct apr_thread_mutex_t *lock; struct apr_thread_cond_t *mplx_added; diff --git a/mod_http2/mod_http2.c b/mod_http2/mod_http2.c index 8bc438fc..0d339691 100644 --- a/mod_http2/mod_http2.c +++ b/mod_http2/mod_http2.c @@ -15,6 +15,8 @@ #include #include +#include +#include #include #include @@ -34,6 +36,7 @@ #include "h2_config.h" #include "h2_ctx.h" #include "h2_h2.h" +#include "h2_mplx.h" #include "h2_push.h" #include "h2_request.h" #include "h2_switch.h" @@ -71,14 +74,14 @@ static int h2_post_config(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) { void *data = NULL; - const char *mod_h2_init_key = "mod_h2_init_counter"; + const char *mod_h2_init_key = "mod_http2_init_counter"; nghttp2_info *ngh2; apr_status_t status; (void)plog;(void)ptemp; apr_pool_userdata_get(&data, mod_h2_init_key, s->process->pool); if ( data == NULL ) { - ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, s, + ap_log_error( APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03089) "initializing post config dry run"); apr_pool_userdata_set((const void *)1, mod_h2_init_key, apr_pool_cleanup_null, s->process->pool); @@ -86,11 +89,17 @@ static int h2_post_config(apr_pool_t *p, apr_pool_t *plog, } ngh2 = nghttp2_version(0); - ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, + ap_log_error( APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(03090) "mod_http2 (v%s, nghttp2 %s), initializing...", MOD_HTTP2_VERSION, ngh2? ngh2->version_str : "unknown"); switch (h2_conn_mpm_type()) { + case H2_MPM_SIMPLE: + case H2_MPM_MOTORZ: + case H2_MPM_NETWARE: + case H2_MPM_WINNT: + /* not sure we need something extra for those. */ + break; case H2_MPM_EVENT: case H2_MPM_WORKER: /* all fine, we know these ones */ @@ -100,7 +109,7 @@ static int h2_post_config(apr_pool_t *p, apr_pool_t *plog, break; case H2_MPM_UNKNOWN: /* ??? */ - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(03091) "post_config: mpm type unknown"); break; } @@ -120,6 +129,26 @@ static char *http2_var_lookup(apr_pool_t *, server_rec *, conn_rec *, request_rec *, char *name); static int http2_is_h2(conn_rec *); +static apr_status_t http2_req_engine_push(const char *ngn_type, + request_rec *r, + http2_req_engine_init *einit) +{ + return h2_mplx_req_engine_push(ngn_type, r, einit); +} + +static apr_status_t http2_req_engine_pull(h2_req_engine *ngn, + apr_read_type_e block, + apr_uint32_t capacity, + request_rec **pr) +{ + return h2_mplx_req_engine_pull(ngn, block, capacity, pr); +} + +static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn) +{ + h2_mplx_req_engine_done(ngn, r_conn); +} + /* Runs once per created child process. Perform any process * related initionalization here. */ @@ -132,8 +161,6 @@ static void h2_child_init(apr_pool_t *pool, server_rec *s) APLOGNO(02949) "initializing connection handling"); } - APR_REGISTER_OPTIONAL_FN(http2_is_h2); - APR_REGISTER_OPTIONAL_FN(http2_var_lookup); } /* Install this module into the apache2 infrastructure. @@ -142,6 +169,12 @@ static void h2_hooks(apr_pool_t *pool) { static const char *const mod_ssl[] = { "mod_ssl.c", NULL}; + APR_REGISTER_OPTIONAL_FN(http2_is_h2); + APR_REGISTER_OPTIONAL_FN(http2_var_lookup); + APR_REGISTER_OPTIONAL_FN(http2_req_engine_push); + APR_REGISTER_OPTIONAL_FN(http2_req_engine_pull); + APR_REGISTER_OPTIONAL_FN(http2_req_engine_done); + ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks"); /* Run once after configuration is set, but before mpm children initialize. @@ -166,36 +199,83 @@ static void h2_hooks(apr_pool_t *pool) ap_hook_handler(h2_filter_h2_status_handler, NULL, NULL, APR_HOOK_MIDDLE); } -static char *value_of_HTTP2(apr_pool_t *p, server_rec *s, - conn_rec *c, request_rec *r) +static const char *val_HTTP2(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) { - return c && http2_is_h2(c)? "on" : "off"; + return ctx? "on" : "off"; } -static char *value_of_H2PUSH(apr_pool_t *p, server_rec *s, - conn_rec *c, request_rec *r) +static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) { - h2_ctx *ctx; - if (r) { - ctx = h2_ctx_rget(r); - if (ctx) { + if (ctx) { + if (r) { h2_task *task = h2_ctx_get_task(ctx); - return (task && task->request->push_policy != H2_PUSH_NONE)? "on" : "off"; + if (task && task->request->push_policy != H2_PUSH_NONE) { + return "on"; + } + } + else if (c && h2_session_push_enabled(ctx->session)) { + return "on"; } - } - else if (c) { - ctx = h2_ctx_get(c, 0); - return ctx && h2_session_push_enabled(ctx->session)? "on" : "off"; } else if (s) { const h2_config *cfg = h2_config_sget(s); - return cfg && h2_config_geti(cfg, H2_CONF_PUSH)? "on" : "off"; + if (cfg && h2_config_geti(cfg, H2_CONF_PUSH)) { + return "on"; + } } return "off"; } -typedef char *h2_var_lookup(apr_pool_t *p, server_rec *s, - conn_rec *c, request_rec *r); +static const char *val_H2_PUSHED(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) +{ + if (ctx) { + h2_task *task = h2_ctx_get_task(ctx); + if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) { + return "PUSHED"; + } + } + return ""; +} + +static const char *val_H2_PUSHED_ON(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) +{ + if (ctx) { + h2_task *task = h2_ctx_get_task(ctx); + if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) { + return apr_itoa(p, task->request->initiated_on); + } + } + return ""; +} + +static const char *val_H2_STREAM_TAG(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) +{ + if (ctx) { + h2_task *task = h2_ctx_get_task(ctx); + if (task) { + return task->id; + } + } + return ""; +} + +static const char *val_H2_STREAM_ID(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) +{ + const char *cp = val_H2_STREAM_TAG(p, s, c, r, ctx); + if (cp && (cp = ap_strchr_c(cp, '-'))) { + return ++cp; + } + return NULL; +} + +typedef const char *h2_var_lookup(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx); typedef struct h2_var_def { const char *name; h2_var_lookup *lookup; @@ -203,8 +283,13 @@ typedef struct h2_var_def { } h2_var_def; static h2_var_def H2_VARS[] = { - { "HTTP2", value_of_HTTP2, 1 }, - { "H2PUSH", value_of_H2PUSH, 1 }, + { "HTTP2", val_HTTP2, 1 }, + { "H2PUSH", val_H2_PUSH, 1 }, + { "H2_PUSH", val_H2_PUSH, 1 }, + { "H2_PUSHED", val_H2_PUSHED, 1 }, + { "H2_PUSHED_ON", val_H2_PUSHED_ON, 1 }, + { "H2_STREAM_ID", val_H2_STREAM_ID, 1 }, + { "H2_STREAM_TAG", val_H2_STREAM_TAG, 1 }, }; #ifndef H2_ALEN @@ -225,7 +310,9 @@ static char *http2_var_lookup(apr_pool_t *p, server_rec *s, for (i = 0; i < H2_ALEN(H2_VARS); ++i) { h2_var_def *vdef = &H2_VARS[i]; if (!strcmp(vdef->name, name)) { - return vdef->lookup(p, s, c, r); + h2_ctx *ctx = (r? h2_ctx_rget(r) : + h2_ctx_get(c->master? c->master : c, 0)); + return (char *)vdef->lookup(p, s, c, r, ctx); } } return ""; @@ -241,7 +328,8 @@ static int h2_h2_fixups(request_rec *r) h2_var_def *vdef = &H2_VARS[i]; if (vdef->subprocess) { apr_table_setn(r->subprocess_env, vdef->name, - vdef->lookup(r->pool, r->server, r->connection, r)); + vdef->lookup(r->pool, r->server, r->connection, + r, ctx)); } } } diff --git a/mod_http2/mod_http2.h b/mod_http2/mod_http2.h index a8c58f2c..30735792 100644 --- a/mod_http2/mod_http2.h +++ b/mod_http2/mod_http2.h @@ -13,18 +13,81 @@ * limitations under the License. */ -#ifndef mod_http2_mod_http2_h -#define mod_http2_mod_http2_h +#ifndef __MOD_HTTP2_H__ +#define __MOD_HTTP2_H__ /** The http2_var_lookup() optional function retrieves HTTP2 environment * variables. */ -APR_DECLARE_OPTIONAL_FN(char *, http2_var_lookup, - (apr_pool_t *, server_rec *, - conn_rec *, request_rec *, - char *)); +APR_DECLARE_OPTIONAL_FN(char *, + http2_var_lookup, (apr_pool_t *, server_rec *, + conn_rec *, request_rec *, char *)); /** An optional function which returns non-zero if the given connection * or its master connection is using HTTP/2. */ -APR_DECLARE_OPTIONAL_FN(int, http2_is_h2, (conn_rec *)); +APR_DECLARE_OPTIONAL_FN(int, + http2_is_h2, (conn_rec *)); + +/******************************************************************************* + * HTTP/2 request engines + ******************************************************************************/ + +struct apr_thread_cond_t; + +typedef struct h2_req_engine h2_req_engine; + +typedef void http2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed); + +/** + * Initialize a h2_req_engine. The structure will be passed in but + * only the name and master are set. The function should initialize + * all fields. + * @param engine the allocated, partially filled structure + * @param r the first request to process, or NULL + */ +typedef apr_status_t http2_req_engine_init(h2_req_engine *engine, + const char *id, + const char *type, + apr_pool_t *pool, + apr_uint32_t req_buffer_size, + request_rec *r, + http2_output_consumed **pconsumed, + void **pbaton); + +/** + * Push a request to an engine with the specified name for further processing. + * If no such engine is available, einit is not NULL, einit is called + * with a new engine record and the caller is responsible for running the + * new engine instance. + * @param engine_type the type of the engine to add the request to + * @param r the request to push to an engine for processing + * @param einit an optional initialization callback for a new engine + * of the requested type, should no instance be available. + * By passing a non-NULL callback, the caller is willing + * to init and run a new engine itself. + * @return APR_SUCCESS iff slave was successfully added to an engine + */ +APR_DECLARE_OPTIONAL_FN(apr_status_t, + http2_req_engine_push, (const char *engine_type, + request_rec *r, + http2_req_engine_init *einit)); + +/** + * Get a new request for processing in this engine. + * @param engine the engine which is done processing the slave + * @param timeout wait a maximum amount of time for a new slave, 0 will not wait + * @param pslave the slave connection that needs processing or NULL + * @return APR_SUCCESS if new request was assigned + * APR_EAGAIN if no new request is available + * APR_EOF if engine may shut down, as no more request will be scheduled + * APR_ECONNABORTED if the engine needs to shut down immediately + */ +APR_DECLARE_OPTIONAL_FN(apr_status_t, + http2_req_engine_pull, (h2_req_engine *engine, + apr_read_type_e block, + apr_uint32_t capacity, + request_rec **pr)); +APR_DECLARE_OPTIONAL_FN(void, + http2_req_engine_done, (h2_req_engine *engine, + conn_rec *rconn)); #endif