diff --git a/ChangeLog b/ChangeLog index ce0d07d4..9740fb13 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,32 @@ +v1.1.0 +-------------------------------------------------------------------------------- + * GOAWAY will be sent when a HTTP/2 connection is shutdown, whenever the + connection still allows it. Calculation of the last stream id has been + corrected to include streams started, not only streams finished processing. + * several issues fixed with new fuzzing tests that simulate random closes + and delays/timeouts + * H2SessionExtraFiles are now shared between all sessions in the same process. + It now works like this: + H2MaxWorkers * H2SessionFiles is assumed to be the total number of file + handles that can be safely used for HTTP/2 transfers without other parts + of the server running out of handles. This number is shared between all + HTTP/2 connections in the same server process. + The default is set to 2. With H2MaxWorkers on most platforms/mpms + defaulting to 25 that gives a maximum of 50 handles that can be invoved in + transfers. + I think I have to write a blog post one day of how this works and affects + performance. tl;dr the more handles http2 may use, the faster static files + can be served. + * KeepAlive is not correctly visible on the server status page for HTTP/2 + connections. (Would like more info there, need to extend the scoreboard + for it.) + * KeepAlive connections are *not* set aside in async MPMs such as event. This + is a very desirable feature, but I could not get it to work reliably with + the existing MPM interface. Will work on that for the next Apache release. + (The main problem is that event will silently close such connections and + http2 has no chance to send a last GOAWAY frame. That makes clients fail + requests which they have just started.) + v1.0.18 -------------------------------------------------------------------------------- * fixed race condition in connnection shutdown that could cause indefinite diff --git a/configure.ac b/configure.ac index 90ae1044..d4e0cc07 100644 --- a/configure.ac +++ b/configure.ac @@ -14,7 +14,7 @@ # AC_PREREQ([2.69]) -AC_INIT([mod_http2], [1.0.18], [stefan.eissing@greenbytes.de]) +AC_INIT([mod_http2], [1.1.0], [stefan.eissing@greenbytes.de]) LT_PREREQ([2.2.6]) LT_INIT() diff --git a/mod_http2/h2_config.c b/mod_http2/h2_config.c index 1458f5a4..ecfe949d 100644 --- a/mod_http2/h2_config.c +++ b/mod_http2/h2_config.c @@ -64,31 +64,9 @@ static h2_config defconf = { 0, /* stream timeout */ }; -static int files_per_session; - void h2_config_init(apr_pool_t *pool) { - /* Determine a good default for this platform and mpm? - * TODO: not sure how APR wants to hand out this piece of - * information. - */ - int max_files = 256; - int conn_threads = 1; - int tx_files = max_files / 4; - (void)pool; - ap_mpm_query(AP_MPMQ_MAX_THREADS, &conn_threads); - switch (h2_conn_mpm_type()) { - case H2_MPM_PREFORK: - case H2_MPM_WORKER: - case H2_MPM_EVENT: - /* allow that many transfer open files per mplx */ - files_per_session = (tx_files / conn_threads); - break; - default: - /* don't know anything about it, stay safe */ - break; - } } static void *h2_config_create(apr_pool_t *pool, @@ -178,7 +156,6 @@ int h2_config_geti(const h2_config *conf, h2_config_var_t var) apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var) { - int n; switch(var) { case H2_CONF_MAX_STREAMS: return H2_CONFIG_GET(conf, &defconf, h2_max_streams); @@ -203,11 +180,7 @@ apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var) case H2_CONF_DIRECT: return H2_CONFIG_GET(conf, &defconf, h2_direct); case H2_CONF_SESSION_FILES: - n = H2_CONFIG_GET(conf, &defconf, session_extra_files); - if (n < 0) { - n = files_per_session; - } - return n; + return H2_CONFIG_GET(conf, &defconf, session_extra_files); case H2_CONF_TLS_WARMUP_SIZE: return H2_CONFIG_GET(conf, &defconf, tls_warmup_size); case H2_CONF_TLS_COOLDOWN_SECS: diff --git a/mod_http2/h2_conn.c b/mod_http2/h2_conn.c index d0b5ada2..49570c97 100644 --- a/mod_http2/h2_conn.c +++ b/mod_http2/h2_conn.c @@ -78,7 +78,7 @@ apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) { const h2_config *config = h2_config_sget(s); apr_status_t status = APR_SUCCESS; - int minw, maxw; + int minw, maxw, max_tx_handles, n; int max_threads_per_child = 0; int idle_secs = 0; @@ -105,11 +105,29 @@ apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) maxw = minw; } + /* How many file handles is it safe to use for transfer + * to the master connection to be streamed out? + * Is there a portable APR rlimit on NOFILES? Have not + * found it. And if, how many of those would we set aside? + * This leads all into a process wide handle allocation strategy + * which ultimately would limit the number of accepted connections + * with the assumption of implicitly reserving n handles for every + * connection and requiring modules with excessive needs to allocate + * from a central pool. + */ + n = h2_config_geti(config, H2_CONF_SESSION_FILES); + if (n < 0) { + max_tx_handles = maxw * 2; + } + else { + max_tx_handles = maxw * n; + } + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, - "h2_workers: min=%d max=%d, mthrpchild=%d", - minw, maxw, max_threads_per_child); + "h2_workers: min=%d max=%d, mthrpchild=%d, tx_files=%d", + minw, maxw, max_threads_per_child, max_tx_handles); + workers = h2_workers_create(s, pool, minw, maxw, max_tx_handles); - workers = h2_workers_create(s, pool, minw, maxw); idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS); h2_workers_set_max_idle_secs(workers, idle_secs); @@ -149,9 +167,6 @@ apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r) } h2_ctx_session_set(ctx, session); - - ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c); - return APR_SUCCESS; } diff --git a/mod_http2/h2_filter.c b/mod_http2/h2_filter.c index 5ad3aed5..fd8e25ce 100644 --- a/mod_http2/h2_filter.c +++ b/mod_http2/h2_filter.c @@ -133,7 +133,6 @@ apr_status_t h2_filter_core_input(ap_filter_t* f, apr_socket_timeout_set(cin->socket, t); } } - ap_update_child_status_from_conn(f->c->sbh, SERVER_BUSY_READ, f->c); status = ap_get_brigade(f->next, cin->bb, AP_MODE_READBYTES, block, readbytes); if (saved_timeout != UNSET) { diff --git a/mod_http2/h2_io.c b/mod_http2/h2_io.c index 5a2ad8f8..092a37c3 100644 --- a/mod_http2/h2_io.c +++ b/mod_http2/h2_io.c @@ -355,7 +355,7 @@ static void process_trailers(h2_io *io, apr_table_t *trailers) apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb, apr_size_t maxlen, apr_table_t *trailers, - int *pfile_handles_allowed) + apr_size_t *pfile_buckets_allowed) { apr_status_t status; int start_allowed; @@ -397,12 +397,12 @@ apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb, * many open files already buffered. Otherwise we will run out of * file handles. */ - start_allowed = *pfile_handles_allowed; - status = h2_util_move(io->bbout, bb, maxlen, pfile_handles_allowed, + start_allowed = *pfile_buckets_allowed; + status = h2_util_move(io->bbout, bb, maxlen, pfile_buckets_allowed, "h2_io_out_write"); /* track # file buckets moved into our pool */ - if (start_allowed != *pfile_handles_allowed) { - io->files_handles_owned += (start_allowed - *pfile_handles_allowed); + if (start_allowed != *pfile_buckets_allowed) { + io->files_handles_owned += (start_allowed - *pfile_buckets_allowed); } return status; } diff --git a/mod_http2/h2_io.h b/mod_http2/h2_io.h index 647d3043..fc09cef6 100644 --- a/mod_http2/h2_io.h +++ b/mod_http2/h2_io.h @@ -158,7 +158,7 @@ apr_status_t h2_io_out_read_to(h2_io *io, apr_status_t h2_io_out_write(h2_io *io, apr_bucket_brigade *bb, apr_size_t maxlen, apr_table_t *trailers, - int *pfile_buckets_allowed); + apr_size_t *pfile_buckets_allowed); /** * Closes the input. After existing data has been read, APR_EOF will diff --git a/mod_http2/h2_mplx.c b/mod_http2/h2_mplx.c index 53da5c56..743f7545 100644 --- a/mod_http2/h2_mplx.c +++ b/mod_http2/h2_mplx.c @@ -72,6 +72,28 @@ static int is_aborted(h2_mplx *m, apr_status_t *pstatus) static void have_out_data_for(h2_mplx *m, int stream_id); +static void check_tx_reservation(h2_mplx *m) +{ + if (m->tx_handles_reserved == 0) { + m->tx_handles_reserved += h2_workers_tx_reserve(m->workers, + H2MIN(m->tx_chunk_size, h2_io_set_size(m->stream_ios))); + } +} + +static void check_tx_free(h2_mplx *m) +{ + if (m->tx_handles_reserved > m->tx_chunk_size) { + apr_size_t count = m->tx_handles_reserved - m->tx_chunk_size; + m->tx_handles_reserved = m->tx_chunk_size; + h2_workers_tx_free(m->workers, count); + } + else if (m->tx_handles_reserved + && (!m->stream_ios || h2_io_set_is_empty(m->stream_ios))) { + h2_workers_tx_free(m->workers, m->tx_handles_reserved); + m->tx_handles_reserved = 0; + } +} + static void h2_mplx_destroy(h2_mplx *m) { AP_DEBUG_ASSERT(m); @@ -88,6 +110,8 @@ static void h2_mplx_destroy(h2_mplx *m) m->stream_ios = NULL; } + check_tx_free(m); + if (m->pool) { apr_pool_destroy(m->pool); } @@ -142,12 +166,25 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); m->workers = workers; - m->file_handles_allowed = h2_config_geti(conf, H2_CONF_SESSION_FILES); + m->tx_handles_reserved = 0; + m->tx_chunk_size = 4; + m->stream_timeout_secs = h2_config_geti(conf, H2_CONF_STREAM_TIMEOUT_SECS); } return m; } +int h2_mplx_get_max_stream_started(h2_mplx *m) +{ + int stream_id = 0; + + apr_thread_mutex_lock(m->lock); + stream_id = m->max_stream_started; + apr_thread_mutex_unlock(m->lock); + + return stream_id; +} + static void workers_register(h2_mplx *m) { /* Initially, there was ref count increase for this as well, but @@ -164,11 +201,6 @@ static void workers_register(h2_mplx *m) h2_workers_register(m->workers, m); } -static void workers_unregister(h2_mplx *m) -{ - h2_workers_unregister(m->workers, m); -} - static int io_process_events(h2_mplx *m, h2_io *io) { if (io->input_consumed && m->input_consumed) { @@ -195,7 +227,8 @@ static void io_destroy(h2_mplx *m, h2_io *io, int events) /* The pool is cleared/destroyed which also closes all * allocated file handles. Give this count back to our * file handle pool. */ - m->file_handles_allowed += io->files_handles_owned; + m->tx_handles_reserved += io->files_handles_owned; + h2_io_set_remove(m->stream_ios, io); h2_io_set_remove(m->ready_ios, io); h2_io_destroy(io); @@ -207,6 +240,8 @@ static void io_destroy(h2_mplx *m, h2_io *io, int events) } m->spare_pool = pool; } + + check_tx_free(m); } static int io_stream_done(h2_mplx *m, h2_io *io, int rst_error) @@ -235,7 +270,7 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) { apr_status_t status; - workers_unregister(m); + h2_workers_unregister(m->workers, m); status = apr_thread_mutex_lock(m->lock); if (APR_SUCCESS == status) { int i, wait_secs = 5; @@ -317,11 +352,14 @@ static const h2_request *pop_request(h2_mplx *m) { const h2_request *req = NULL; int sid; - while (!req && (sid = h2_tq_shift(m->q)) > 0) { + while (!m->aborted && !req && (sid = h2_tq_shift(m->q)) > 0) { h2_io *io = h2_io_set_get(m->stream_ios, sid); if (io) { req = io->request; io->worker_started = 1; + if (sid > m->max_stream_started) { + m->max_stream_started = sid; + } } } return req; @@ -613,7 +651,7 @@ static apr_status_t out_write(h2_mplx *m, h2_io *io, && !is_aborted(m, &status)) { status = h2_io_out_write(io, bb, m->stream_max_mem, trailers, - &m->file_handles_allowed); + &m->tx_handles_reserved); /* Wait for data to drain until there is room again or * stream timeout expires */ h2_io_signal_init(io, H2_IO_WRITE, m->stream_timeout_secs, iowait); @@ -654,6 +692,11 @@ static apr_status_t out_open(h2_mplx *m, int stream_id, h2_response *response, h2_io_set_response(io, response); h2_io_set_add(m->ready_ios, io); + if (response && response->http_status < 300) { + /* we might see some file buckets in the output, see + * if we have enough handles reserved. */ + check_tx_reservation(m); + } if (bb) { status = out_write(m, io, f, bb, response->trailers, iowait); } @@ -865,7 +908,6 @@ apr_status_t h2_mplx_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx) } apr_thread_mutex_unlock(m->lock); } - workers_register(m); return status; } @@ -892,6 +934,7 @@ apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, const h2_request *req, h2_stream_pri_cmp *cmp, void *ctx) { apr_status_t status; + int was_empty = 0; AP_DEBUG_ASSERT(m); status = apr_thread_mutex_lock(m->lock); @@ -907,6 +950,7 @@ apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, const h2_request *req, status = h2_io_in_close(io); } + was_empty = h2_tq_empty(m->q); h2_tq_add(m->q, io->id, cmp, ctx); ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, m->c, @@ -915,8 +959,7 @@ apr_status_t h2_mplx_process(h2_mplx *m, int stream_id, const h2_request *req, } apr_thread_mutex_unlock(m->lock); } - - if (status == APR_SUCCESS) { + if (status == APR_SUCCESS && was_empty) { workers_register(m); } return status; diff --git a/mod_http2/h2_mplx.h b/mod_http2/h2_mplx.h index 419f3f0e..f3edc513 100644 --- a/mod_http2/h2_mplx.h +++ b/mod_http2/h2_mplx.h @@ -71,6 +71,8 @@ struct h2_mplx { struct h2_io_set *stream_ios; struct h2_io_set *ready_ios; + int max_stream_started; /* highest stream id that started processing */ + apr_thread_mutex_t *lock; struct apr_thread_cond_t *added_output; struct apr_thread_cond_t *join_wait; @@ -80,7 +82,8 @@ struct h2_mplx { apr_pool_t *spare_pool; /* spare pool, ready for next io */ struct h2_workers *workers; - int file_handles_allowed; + apr_size_t tx_handles_reserved; + apr_size_t tx_chunk_size; h2_mplx_consumed_cb *input_consumed; void *input_consumed_ctx; @@ -118,6 +121,14 @@ void h2_mplx_abort(h2_mplx *mplx); void h2_mplx_request_done(h2_mplx **pm, int stream_id, const struct h2_request **preq); +/** + * Get the highest stream identifier that has been passed on to processing. + * Maybe 0 in case no stream has been processed yet. + * @param m the multiplexer + * @return highest stream identifier for which processing started + */ +int h2_mplx_get_max_stream_started(h2_mplx *m); + /******************************************************************************* * IO lifetime of streams. ******************************************************************************/ diff --git a/mod_http2/h2_private.h b/mod_http2/h2_private.h index 0ffaf50d..0ad02d3b 100644 --- a/mod_http2/h2_private.h +++ b/mod_http2/h2_private.h @@ -35,4 +35,7 @@ APLOG_USE_MODULE(http2); #define H2_ALEN(a) (sizeof(a)/sizeof((a)[0])) +#define H2MAX(x,y) ((x) > (y) ? (x) : (y)) +#define H2MIN(x,y) ((x) < (y) ? (x) : (y)) + #endif diff --git a/mod_http2/h2_session.c b/mod_http2/h2_session.c index 1dea34b9..de04202a 100644 --- a/mod_http2/h2_session.c +++ b/mod_http2/h2_session.c @@ -44,8 +44,6 @@ #include "h2_version.h" #include "h2_workers.h" -#define H2MAX(x,y) ((x) > (y) ? (x) : (y)) -#define H2MIN(x,y) ((x) < (y) ? (x) : (y)) static int frame_print(const nghttp2_frame *frame, char *buffer, size_t maxlen); @@ -460,9 +458,6 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, ++session->streams_reset; break; case NGHTTP2_GOAWAY: - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, - "h2_session(%ld): GOAWAY errror=%d", - session->id, (int)frame->goaway.error_code); session->client_goaway = 1; break; default: @@ -697,6 +692,45 @@ static void h2_session_destroy(h2_session *session) } } +static apr_status_t h2_session_shutdown(h2_session *session, int reason) +{ + apr_status_t status = APR_SUCCESS; + + AP_DEBUG_ASSERT(session); + if (session->state != H2_SESSION_ST_CLOSING + && session->state != H2_SESSION_ST_ABORTED) { + h2_mplx_abort(session->mplx); + if (session->server_goaway) { + /* already sent one */ + } + else if (!reason) { + nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, + h2_mplx_get_max_stream_started(session->mplx), + reason, NULL, 0); + status = nghttp2_session_send(session->ngh2); + session->server_goaway = 1; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + "session(%ld): shutdown, no err", session->id); + } + else { + const char *err = nghttp2_strerror(reason); + nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, + h2_mplx_get_max_stream_started(session->mplx), + reason, (const uint8_t *)err, + strlen(err)); + status = nghttp2_session_send(session->ngh2); + session->server_goaway = 1; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, + "session(%ld): shutdown, err=%d '%s'", + session->id, reason, err); + } + + h2_conn_io_flush(&session->io); + session->state = H2_SESSION_ST_CLOSING; + } + return status; +} + static apr_status_t session_pool_cleanup(void *data) { h2_session *session = data; @@ -709,6 +743,22 @@ static apr_status_t session_pool_cleanup(void *data) */ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, "session(%ld): pool_cleanup", session->id); + + AP_DEBUG_ASSERT(session->aborted || session->server_goaway); + if (!session->aborted && !session->server_goaway) { + /* Not good. The connection is being torn down and we have + * not sent a goaway. This is considered a protocol error and + * the client has to assume that any streams "in flight" may have + * been processed and are not safe to retry. + * As clients with idle connection may only learn about a closed + * connection when sending the next request, this has the effect + * that at least this one request will fail. + */ + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c, + "session(%ld): connection disappeared without proper " + "goodbye, clients will be confused, should not happen", + session->id); + } /* keep us from destroying the pool, since that is already ongoing. */ session->pool = NULL; h2_session_destroy(session); @@ -891,45 +941,6 @@ void h2_session_eoc_callback(h2_session *session) h2_session_destroy(session); } -static apr_status_t h2_session_shutdown(h2_session *session, int reason) -{ - AP_DEBUG_ASSERT(session); - session->aborted = 1; - if (session->state != H2_SESSION_ST_CLOSING - && session->state != H2_SESSION_ST_ABORTED) { - if (session->client_goaway) { - /* client sent us a GOAWAY, just terminate */ - nghttp2_session_terminate_session(session->ngh2, NGHTTP2_ERR_EOF); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, - "session(%ld): shutdown, GOAWAY from client", session->id); - } - else if (!reason) { - nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, - session->max_stream_received, - reason, NULL, 0); - nghttp2_session_send(session->ngh2); - session->server_goaway = 1; - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, - "session(%ld): shutdown, no err", session->id); - } - else { - const char *err = nghttp2_strerror(reason); - nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, - session->max_stream_received, - reason, (const uint8_t *)err, - strlen(err)); - nghttp2_session_send(session->ngh2); - session->server_goaway = 1; - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, - "session(%ld): shutdown, err=%d '%s'", - session->id, reason, err); - } - session->state = H2_SESSION_ST_CLOSING; - h2_mplx_abort(session->mplx); - } - return APR_SUCCESS; -} - void h2_session_abort(h2_session *session, apr_status_t status) { AP_DEBUG_ASSERT(session); @@ -1101,24 +1112,20 @@ h2_stream *h2_session_get_stream(h2_session *session, int stream_id) void h2_session_close(h2_session *session) { + apr_status_t status = APR_SUCCESS; apr_bucket *b; conn_rec *c = session->c; - apr_status_t status; AP_DEBUG_ASSERT(session); - if (!session->aborted) { - h2_session_shutdown(session, 0); + if (!session->server_goaway) { + status = h2_session_shutdown(session, 0); } h2_session_cleanup(session); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, "h2_session(%ld): writing eoc", c->id); b = h2_bucket_eoc_create(c->bucket_alloc, session); - status = h2_conn_io_write_eoc(&session->io, b); - if (status != APR_SUCCESS) { - ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c, - "h2_session(%ld): flushed eoc bucket", c->id); - } + h2_conn_io_write_eoc(&session->io, b); /* and all is or will be destroyed */ } @@ -1604,9 +1611,13 @@ static apr_status_t h2_session_receive(void *ctx, const char *data, apr_size_t len, apr_size_t *readlen) { h2_session *session = ctx; + ssize_t n; + if (len > 0) { - ssize_t n = nghttp2_session_mem_recv(session->ngh2, - (const uint8_t *)data, len); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + "h2_session(%ld): feeding %ld bytes to nghttp2", + session->id, (long)len); + n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len); if (n < 0) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EGENERAL, session->c, @@ -1635,7 +1646,7 @@ static apr_status_t h2_session_read(h2_session *session, int block, int loops) * We just pull at the filter chain to make it happen */ status = ap_get_brigade(c->input_filters, session->bbtmp, AP_MODE_READBYTES, - block? APR_BLOCK_READ : APR_NONBLOCK_READ, + (block && !session->aborted)? APR_BLOCK_READ : APR_NONBLOCK_READ, APR_BUCKET_BUFF_SIZE); /* get rid of any possible data we do not expect to get */ apr_brigade_cleanup(session->bbtmp); @@ -1679,6 +1690,10 @@ static apr_status_t h2_session_read(h2_session *session, int block, int loops) * status. */ return rstatus; } + + if (session->aborted) { + break; + } } return rstatus; } @@ -1719,7 +1734,7 @@ apr_status_t h2_session_process(h2_session *session, int async) { apr_status_t status = APR_SUCCESS; conn_rec *c = session->c; - int rv, have_written, have_read, remain_secs; + int rv, have_written, have_read; const char *reason = ""; ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c, @@ -1733,6 +1748,9 @@ apr_status_t h2_session_process(h2_session *session, int async) status = APR_ECONNABORTED; goto out; } + else if (session->client_goaway) { + h2_session_shutdown(session, 0); + } switch (session->state) { case H2_SESSION_ST_INIT: @@ -1743,6 +1761,7 @@ apr_status_t h2_session_process(h2_session *session, int async) session->server_goaway = 1; } + ap_update_child_status(c->sbh, SERVER_BUSY_READ, NULL); status = h2_session_start(session, &rv); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, "h2_session(%ld): started on %s:%d", session->id, @@ -1758,19 +1777,28 @@ apr_status_t h2_session_process(h2_session *session, int async) break; case H2_SESSION_ST_IDLE_READ: - h2_filter_cin_timeout_set(session->cin, session->timeout_secs); - ap_update_child_status(c->sbh, SERVER_BUSY_READ, NULL); + h2_filter_cin_timeout_set(session->cin, session->keepalive_secs); + ap_update_child_status(c->sbh, SERVER_BUSY_KEEPALIVE, NULL); status = h2_session_read(session, 1, 10); if (APR_STATUS_IS_TIMEUP(status)) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "h2_session(%ld): IDLE -> KEEPALIVE", session->id); - session->state = H2_SESSION_ST_KEEPALIVE; + h2_session_shutdown(session, 0); + goto out; } else if (status == APR_SUCCESS) { /* got something, go busy again */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, - "h2_session(%ld): IDLE -> BUSY", session->id); - session->state = H2_SESSION_ST_BUSY; + have_read = 1; + if (session->client_goaway) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, + "h2_session(%ld): IDLE -> CLOSING", session->id); + h2_session_shutdown(session, 0); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, + "h2_session(%ld): IDLE -> BUSY", session->id); + session->state = H2_SESSION_ST_BUSY; + } } else { reason = "keepalive error"; @@ -1780,6 +1808,7 @@ apr_status_t h2_session_process(h2_session *session, int async) case H2_SESSION_ST_BUSY: if (nghttp2_session_want_read(session->ngh2)) { + ap_update_child_status(c->sbh, SERVER_BUSY_READ, NULL); h2_filter_cin_timeout_set(session->cin, session->timeout_secs); status = h2_session_read(session, 0, 10); if (status == APR_SUCCESS) { @@ -1837,8 +1866,8 @@ apr_status_t h2_session_process(h2_session *session, int async) if (h2_stream_set_is_empty(session->streams)) { /* When we have no streams, no task event are possible, * switch to blocking reads */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, - "h2_session(%ld): BUSY -> IDLE", session->id); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, + "h2_session(%ld): BUSY -> IDLE", session->id); session->state = H2_SESSION_ST_IDLE_READ; } else if (!h2_stream_set_has_unsubmitted(session->streams) @@ -1846,8 +1875,8 @@ apr_status_t h2_session_process(h2_session *session, int async) /* none of our streams is waiting for a response or * new output data from task processing, * switch to blocking reads. */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, - "h2_session(%ld): BUSY -> IDLE", session->id); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, + "h2_session(%ld): BUSY -> IDLE", session->id); session->state = H2_SESSION_ST_IDLE_READ; } else { @@ -1882,6 +1911,7 @@ apr_status_t h2_session_process(h2_session *session, int async) } else if (status == APR_TIMEUP) { if (nghttp2_session_want_read(session->ngh2)) { + ap_update_child_status(c->sbh, SERVER_BUSY_READ, NULL); status = h2_session_read(session, 0, 1); if (status == APR_SUCCESS) { /* got something, go busy again */ @@ -1907,62 +1937,9 @@ apr_status_t h2_session_process(h2_session *session, int async) } break; - case H2_SESSION_ST_KEEPALIVE: - /* Our normal H2Timeout has passed and we are considering to - * extend that with the H2KeepAliveTimeout. */ - remain_secs = session->keepalive_secs - session->timeout_secs; - if (remain_secs <= 0) { - /* keepalive is <= normal timeout, close the session */ - reason = "keepalive expired"; - h2_session_shutdown(session, 0); - goto out; - } - session->c->keepalive = AP_CONN_KEEPALIVE; - ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_KEEPALIVE, c); - - if ((apr_time_sec(session->s->keep_alive_timeout) >= remain_secs) - && async && session->c->cs - && !session->r) { - /* Async MPMs are able to handle keep-alive connections without - * blocking a thread. For this to happen, we need to return from - * processing, indicating the IO event we are waiting for, and - * may be called again if the event happens. - * TODO: this does not properly GOAWAY connections... - * TODO: This currently does not work on upgraded requests... - */ - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, - "h2_session(%ld): async KEEPALIVE -> IDLE_READ", session->id); - session->state = H2_SESSION_ST_IDLE_READ; - session->c->cs->state = CONN_STATE_WRITE_COMPLETION; - reason = "async keepalive"; - status = APR_SUCCESS; - goto out; - } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, - "h2_session(%ld): KEEPALIVE read", session->id); - h2_filter_cin_timeout_set(session->cin, remain_secs); - status = h2_session_read(session, 1, 1); - if (APR_STATUS_IS_TIMEUP(status)) { - reason = "keepalive expired"; - h2_session_shutdown(session, 0); - goto out; - } - else if (status != APR_SUCCESS) { - reason = "keepalive error"; - goto out; - } - ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, - "h2_session(%ld): KEEPALIVE -> BUSY", session->id); - session->state = H2_SESSION_ST_BUSY; - break; - case H2_SESSION_ST_CLOSING: if (nghttp2_session_want_write(session->ngh2)) { status = h2_session_send(session); - if (status != APR_SUCCESS) { - reason = "send error"; - goto out; - } have_written = 1; } reason = "closing"; diff --git a/mod_http2/h2_session.h b/mod_http2/h2_session.h index b0652fa7..b3b26b7b 100644 --- a/mod_http2/h2_session.h +++ b/mod_http2/h2_session.h @@ -58,7 +58,6 @@ typedef enum { H2_SESSION_ST_IDLE_READ, /* nothing to write, expecting data inc */ H2_SESSION_ST_BUSY, /* read/write without stop */ H2_SESSION_ST_BUSY_WAIT, /* waiting for tasks reporting back */ - H2_SESSION_ST_KEEPALIVE, /* nothing to write, normal timeout passed */ H2_SESSION_ST_CLOSING, /* shuting down */ H2_SESSION_ST_ABORTED, /* client closed connection or sky fall */ } h2_session_state; @@ -91,7 +90,7 @@ typedef struct h2_session { int streams_reset; /* number of http/2 streams reset by client */ int streams_pushed; /* number of http/2 streams pushed */ int max_stream_received; /* highest stream id created */ - int max_stream_handled; /* highest stream id handled successfully */ + int max_stream_handled; /* highest stream id completed */ apr_size_t max_stream_count; /* max number of open streams */ apr_size_t max_stream_mem; /* max buffer memory for a single stream */ diff --git a/mod_http2/h2_stream.c b/mod_http2/h2_stream.c index 95c5b4a1..1777c990 100644 --- a/mod_http2/h2_stream.c +++ b/mod_http2/h2_stream.c @@ -219,7 +219,7 @@ apr_status_t h2_stream_set_response(h2_stream *stream, h2_response *response, stream->response = response; if (bb && !APR_BRIGADE_EMPTY(bb)) { - int move_all = INT_MAX; + apr_size_t move_all = INT_MAX; /* we can move file handles from h2_mplx into this h2_stream as many * as we want, since the lifetimes are the same and we are not freeing * the ones in h2_mplx->io before this stream is done. */ diff --git a/mod_http2/h2_util.c b/mod_http2/h2_util.c index 1ab71fb3..68d12324 100644 --- a/mod_http2/h2_util.c +++ b/mod_http2/h2_util.c @@ -211,7 +211,7 @@ static const int FILE_MOVE = 1; static apr_status_t last_not_included(apr_bucket_brigade *bb, apr_off_t maxlen, int same_alloc, - int *pfile_buckets_allowed, + apr_size_t *pfile_buckets_allowed, apr_bucket **pend) { apr_bucket *b; @@ -269,7 +269,7 @@ static apr_status_t last_not_included(apr_bucket_brigade *bb, #define LOG_LEVEL APLOG_INFO apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, - apr_off_t maxlen, int *pfile_handles_allowed, + apr_off_t maxlen, apr_size_t *pfile_buckets_allowed, const char *msg) { apr_status_t status = APR_SUCCESS; @@ -281,14 +281,14 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, || to->p == from->p); if (!FILE_MOVE) { - pfile_handles_allowed = NULL; + pfile_buckets_allowed = NULL; } if (!APR_BRIGADE_EMPTY(from)) { apr_bucket *b, *end; status = last_not_included(from, maxlen, same_alloc, - pfile_handles_allowed, &end); + pfile_buckets_allowed, &end); if (status != APR_SUCCESS) { return status; } @@ -332,8 +332,8 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, /* ignore */ } } - else if (pfile_handles_allowed - && *pfile_handles_allowed > 0 + else if (pfile_buckets_allowed + && *pfile_buckets_allowed > 0 && APR_BUCKET_IS_FILE(b)) { /* We do not want to read files when passing buckets, if * we can avoid it. However, what we've come up so far @@ -362,7 +362,7 @@ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, } apr_brigade_insert_file(to, fd, b->start, b->length, to->p); - --(*pfile_handles_allowed); + --(*pfile_buckets_allowed); } else { const char *data; diff --git a/mod_http2/h2_util.h b/mod_http2/h2_util.h index 0a3790d0..10ad7d6b 100644 --- a/mod_http2/h2_util.h +++ b/mod_http2/h2_util.h @@ -97,7 +97,7 @@ h2_ngheader *h2_util_ngheader_make_req(apr_pool_t *p, * @param msg message for use in logging */ apr_status_t h2_util_move(apr_bucket_brigade *to, apr_bucket_brigade *from, - apr_off_t maxlen, int *pfile_buckets_allowed, + apr_off_t maxlen, apr_size_t *pfile_buckets_allowed, const char *msg); /** diff --git a/mod_http2/h2_version.h b/mod_http2/h2_version.h index 8ca80843..648dcec4 100644 --- a/mod_http2/h2_version.h +++ b/mod_http2/h2_version.h @@ -26,7 +26,7 @@ * @macro * Version number of the http2 module as c string */ -#define MOD_HTTP2_VERSION "1.0.18" +#define MOD_HTTP2_VERSION "1.1.0" /** * @macro @@ -34,7 +34,7 @@ * release. This is a 24 bit number with 8 bits for major number, 8 bits * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203. */ -#define MOD_HTTP2_VERSION_NUM 0x010012 +#define MOD_HTTP2_VERSION_NUM 0x010100 #endif /* mod_h2_h2_version_h */ diff --git a/mod_http2/h2_workers.c b/mod_http2/h2_workers.c index 7542daa6..f7606dc4 100644 --- a/mod_http2/h2_workers.c +++ b/mod_http2/h2_workers.c @@ -30,6 +30,7 @@ #include "h2_worker.h" #include "h2_workers.h" + static int in_list(h2_workers *workers, h2_mplx *m) { h2_mplx *e; @@ -222,7 +223,8 @@ static apr_status_t h2_workers_start(h2_workers *workers) } h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, - int min_size, int max_size) + int min_size, int max_size, + apr_size_t max_tx_handles) { apr_status_t status; h2_workers *workers; @@ -245,6 +247,9 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, workers->max_size = max_size; apr_atomic_set32(&workers->max_idle_secs, 10); + workers->max_tx_handles = max_tx_handles; + workers->spare_tx_handles = workers->max_tx_handles; + apr_threadattr_create(&workers->thread_attr, workers->pool); if (ap_thread_stacksize != 0) { apr_threadattr_stacksize_set(workers->thread_attr, @@ -265,6 +270,12 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *server_pool, status = apr_thread_cond_create(&workers->mplx_added, workers->pool); } + if (status == APR_SUCCESS) { + status = apr_thread_mutex_create(&workers->tx_lock, + APR_THREAD_MUTEX_DEFAULT, + workers->pool); + } + if (status == APR_SUCCESS) { status = h2_workers_start(workers); } @@ -311,6 +322,8 @@ apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m) ap_log_error(APLOG_MARK, APLOG_TRACE3, status, workers->s, "h2_workers: register mplx(%ld)", m->id); if (in_list(workers, m)) { + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, + "h2_workers: already registered mplx(%ld)", m->id); status = APR_EAGAIN; } else { @@ -321,16 +334,13 @@ apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m) if (workers->idle_worker_count > 0) { apr_thread_cond_signal(workers->mplx_added); } - else if (workers->worker_count < workers->max_size) { + else if (status == APR_SUCCESS + && workers->worker_count < workers->max_size) { ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, workers->s, "h2_workers: got %d worker, adding 1", workers->worker_count); add_worker(workers); } - - /* cleanup any zombie workers that may have accumulated */ - cleanup_zombies(workers, 0); - apr_thread_mutex_unlock(workers->lock); } return status; @@ -345,9 +355,6 @@ apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m) H2_MPLX_REMOVE(m); status = APR_SUCCESS; } - /* cleanup any zombie workers that may have accumulated */ - cleanup_zombies(workers, 0); - apr_thread_mutex_unlock(workers->lock); } return status; @@ -363,3 +370,33 @@ void h2_workers_set_max_idle_secs(h2_workers *workers, int idle_secs) } apr_atomic_set32(&workers->max_idle_secs, idle_secs); } + +apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count) +{ + apr_status_t status = apr_thread_mutex_lock(workers->tx_lock); + if (status == APR_SUCCESS) { + count = H2MIN(workers->spare_tx_handles, count); + workers->spare_tx_handles -= count; + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s, + "h2_workers: reserved %d tx handles, %d/%d left", + (int)count, (int)workers->spare_tx_handles, + (int)workers->max_tx_handles); + apr_thread_mutex_unlock(workers->tx_lock); + return count; + } + return 0; +} + +void h2_workers_tx_free(h2_workers *workers, apr_size_t count) +{ + apr_status_t status = apr_thread_mutex_lock(workers->tx_lock); + if (status == APR_SUCCESS) { + workers->spare_tx_handles += count; + ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, workers->s, + "h2_workers: freed %d tx handles, %d/%d left", + (int)count, (int)workers->spare_tx_handles, + (int)workers->max_tx_handles); + apr_thread_mutex_unlock(workers->tx_lock); + } +} + diff --git a/mod_http2/h2_workers.h b/mod_http2/h2_workers.h index 16ec4443..7ec38813 100644 --- a/mod_http2/h2_workers.h +++ b/mod_http2/h2_workers.h @@ -39,6 +39,9 @@ struct h2_workers { int min_size; int max_size; + apr_size_t max_tx_handles; + apr_size_t spare_tx_handles; + unsigned int aborted : 1; apr_threadattr_t *thread_attr; @@ -53,6 +56,8 @@ struct h2_workers { struct apr_thread_mutex_t *lock; struct apr_thread_cond_t *mplx_added; + + struct apr_thread_mutex_t *tx_lock; }; @@ -60,7 +65,8 @@ struct h2_workers { * threads. */ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pool, - int min_size, int max_size); + int min_size, int max_size, + apr_size_t max_tx_handles); /* Destroy the worker pool and all its threads. */ @@ -71,14 +77,12 @@ void h2_workers_destroy(h2_workers *workers); * out of tasks, it will be automatically be unregistered. Should * new tasks arrive, it needs to be registered again. */ -apr_status_t h2_workers_register(h2_workers *workers, - struct h2_mplx *m); +apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m); /** * Remove a h2_mplx from the worker registry. */ -apr_status_t h2_workers_unregister(h2_workers *workers, - struct h2_mplx *m); +apr_status_t h2_workers_unregister(h2_workers *workers, struct h2_mplx *m); /** * Set the amount of seconds a h2_worker should wait for new tasks @@ -87,4 +91,31 @@ apr_status_t h2_workers_unregister(h2_workers *workers, */ void h2_workers_set_max_idle_secs(h2_workers *workers, int idle_secs); +/** + * Reservation of file handles available for transfer between workers + * and master connections. + * + * When handling output from request processing, file handles are often + * encountered when static files are served. The most efficient way is then + * to forward the handle itself to the master connection where it can be + * read or sendfile'd to the client. But file handles are a scarce resource, + * so there needs to be a limit on how many handles are transferred this way. + * + * h2_workers keeps track of the number of reserved handles and observes a + * configurable maximum value. + * + * @param workers the workers instance + * @param count how many handles the caller wishes to reserve + * @return the number of reserved handles, may be 0. + */ +apr_size_t h2_workers_tx_reserve(h2_workers *workers, apr_size_t count); + +/** + * Return a number of reserved file handles back to the pool. The number + * overall may not exceed the numbers reserved. + * @param workers the workers instance + * @param count how many handles are returned to the pool + */ +void h2_workers_tx_free(h2_workers *workers, apr_size_t count); + #endif /* defined(__mod_h2__h2_workers__) */