Skip to content

Commit

Permalink
push diary, cache digest, http2-status handler
Browse files Browse the repository at this point in the history
  • Loading branch information
Stefan Eissing committed Jan 15, 2016
1 parent 4dde268 commit a09062f
Show file tree
Hide file tree
Showing 22 changed files with 1,779 additions and 529 deletions.
26 changes: 26 additions & 0 deletions ChangeLog
Original file line number Diff line number Diff line change
@@ -1,3 +1,29 @@
v1.2.0
--------------------------------------------------------------------------------
* Each connection now has a push diary where already pushed resources are
recorded (as 64-bit subsets sha256 URL hashes). The maximum size of a diary
can be configured with the new directive 'H2PushDiarySize'. The default is 256.
* The module recognizes the request header 'Cache-Digest', carrying a base64url
encoded set of hash values using Golomb set encoding as described in
https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
This is highly experimental and will most likely change in format and
interpretation as the draft evolves. For now, this value is used to replace
the current push diary. This allows clients to inform the server about
which resources they already have cached.
* module configuration now looks to the crypto library (because it wants SHA256
from it). If it is not found, a replacement hash is used, however this is
not interoperable with cache digests from clients, e.g. hits will be missed.
* the module has a new handler named "http2-status" which exposes certain
properties and statistics of the *current* HTTP/2 connection. It can be
configured just like:
<Location "/http2-status">
SetHandler http2-status
</Location>
The usual precautions about exposing some internals of your server to the
outside world apply. Just as for "server-status"."
* Due to more test cases and new functions, more bugs have been exposed,
examined and exterminated.

v1.1.0
--------------------------------------------------------------------------------
* GOAWAY will be sent when a HTTP/2 connection is shutdown, whenever the
Expand Down
8 changes: 7 additions & 1 deletion configure.ac
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
#

AC_PREREQ([2.69])
AC_INIT([mod_http2], [1.1.0], [[email protected]])
AC_INIT([mod_http2], [1.2.0], [[email protected]])

LT_PREREQ([2.2.6])
LT_INIT()
Expand Down Expand Up @@ -130,6 +130,12 @@ AC_CHECK_FUNCS([nghttp2_stream_get_weight],
AC_CHECK_FUNCS([nghttp2_session_change_stream_priority],
[CPPFLAGS="$CPPFLAGS -DH2_NG2_CHANGE_PRIO"], [])


AC_CHECK_LIB([crypto], [SHA256_Init], [
CPPFLAGS="$CPPFLAGS -DH2_OPENSSL"
LIBS="$LIBS -lcrypto"],
[AC_MSG_ERROR("libcrypto not found")])

# Checks for header files.
AC_CHECK_HEADERS([ \
assert.h \
Expand Down
25 changes: 25 additions & 0 deletions mod_http2/h2_config.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ static h2_config defconf = {
-1, /* connection timeout */
-1, /* keepalive timeout */
0, /* stream timeout */
256, /* push diary size */

};

void h2_config_init(apr_pool_t *pool)
Expand Down Expand Up @@ -97,6 +99,7 @@ static void *h2_config_create(apr_pool_t *pool,
conf->h2_timeout = DEF_VAL;
conf->h2_keepalive = DEF_VAL;
conf->h2_stream_timeout = DEF_VAL;
conf->push_diary_size = DEF_VAL;

return conf;
}
Expand Down Expand Up @@ -145,6 +148,7 @@ void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
n->h2_timeout = H2_CONFIG_GET(add, base, h2_timeout);
n->h2_keepalive = H2_CONFIG_GET(add, base, h2_keepalive);
n->h2_stream_timeout = H2_CONFIG_GET(add, base, h2_stream_timeout);
n->push_diary_size = H2_CONFIG_GET(add, base, push_diary_size);

return n;
}
Expand Down Expand Up @@ -193,6 +197,8 @@ apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var)
return H2_CONFIG_GET(conf, &defconf, h2_keepalive);
case H2_CONF_STREAM_TIMEOUT_SECS:
return H2_CONFIG_GET(conf, &defconf, h2_stream_timeout);
case H2_CONF_PUSH_DIARY_SIZE:
return H2_CONFIG_GET(conf, &defconf, push_diary_size);
default:
return DEF_VAL;
}
Expand Down Expand Up @@ -526,6 +532,23 @@ static const char *h2_conf_set_stream_timeout(cmd_parms *parms,
return NULL;
}

static const char *h2_conf_set_push_diary_size(cmd_parms *parms,
void *arg, const char *value)
{
h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
(void)arg;
cfg->push_diary_size = (int)apr_atoi64(value);
if (cfg->push_diary_size < 0) {
return "value must be >= 0";
}
if (cfg->push_diary_size > 0 && (cfg->push_diary_size & (cfg->push_diary_size-1))) {
return "value must a power of 2";
}
if (cfg->push_diary_size > (1 << 15)) {
return "value must <= 65536";
}
return NULL;
}

#define AP_END_CMD AP_INIT_TAKE1(NULL, NULL, NULL, RSRC_CONF, NULL)

Expand Down Expand Up @@ -570,6 +593,8 @@ const command_rec h2_cmds[] = {
RSRC_CONF, "timeout (seconds) for idle HTTP/2 connections, no streams open"),
AP_INIT_TAKE1("H2StreamTimeout", h2_conf_set_stream_timeout, NULL,
RSRC_CONF, "read/write timeout (seconds) for HTTP/2 streams"),
AP_INIT_TAKE1("H2PushDiarySize", h2_conf_set_push_diary_size, NULL,
RSRC_CONF, "size of push diary"),
AP_END_CMD
};

Expand Down
2 changes: 2 additions & 0 deletions mod_http2/h2_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ typedef enum {
H2_CONF_TIMEOUT_SECS,
H2_CONF_KEEPALIVE_SECS,
H2_CONF_STREAM_TIMEOUT_SECS,
H2_CONF_PUSH_DIARY_SIZE,
} h2_config_var_t;

struct apr_hash_t;
Expand Down Expand Up @@ -72,6 +73,7 @@ typedef struct h2_config {
int h2_timeout; /* timeout for http/2 connections */
int h2_keepalive; /* timeout for idle connections, no streams */
int h2_stream_timeout; /* timeout for http/2 streams, slave connections */
int push_diary_size; /* # of entries in push diary */
} h2_config;


Expand Down
72 changes: 19 additions & 53 deletions mod_http2/h2_conn.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
#include "h2_worker.h"
#include "h2_workers.h"
#include "h2_conn.h"
#include "h2_version.h"

static struct h2_workers *workers;

Expand Down Expand Up @@ -170,63 +171,28 @@ apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r)
return APR_SUCCESS;
}

static apr_status_t h2_conn_process(h2_ctx *ctx)
{
h2_session *session;
apr_status_t status;

session = h2_ctx_session_get(ctx);
if (session->c->cs) {
session->c->cs->sense = CONN_SENSE_DEFAULT;
}

status = h2_session_process(session, async_mpm);

session->c->keepalive = AP_CONN_KEEPALIVE;
if (session->c->cs) {
session->c->cs->state = CONN_STATE_WRITE_COMPLETION;
}

if (APR_STATUS_IS_EOF(status)
|| APR_STATUS_IS_ECONNRESET(status)
|| APR_STATUS_IS_ECONNABORTED(status)) {
/* fatal, probably client just closed connection. emergency shutdown */
/* Make sure this connection gets closed properly. */
ap_log_cerror( APLOG_MARK, APLOG_DEBUG, 0, session->c,
"h2_session(%ld): aborted", session->id);
session->c->keepalive = AP_CONN_CLOSE;

h2_ctx_clear(session->c);
h2_session_abort(session, status);
h2_session_eoc_callback(session);
/* hereafter session might be gone */
return APR_ECONNABORTED;
}

if (session->state == H2_SESSION_ST_CLOSING) {
ap_log_cerror( APLOG_MARK, APLOG_DEBUG, 0, session->c,
"h2_session(%ld): closing", session->id);
/* Make sure this connection gets closed properly. */
session->c->keepalive = AP_CONN_CLOSE;

h2_ctx_clear(session->c);
h2_session_close(session);
/* hereafter session may be gone */
}
else if (session->state == H2_SESSION_ST_ABORTED) {
ap_log_cerror( APLOG_MARK, APLOG_DEBUG, 0, session->c,
"h2_session(%ld): already aborted", session->id);
return APR_ECONNABORTED;
}

return APR_SUCCESS;
}

apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c)
{
apr_status_t status;
int mpm_state = 0;

do {
h2_conn_process(ctx);
if (c->cs) {
c->cs->sense = CONN_SENSE_DEFAULT;
}
status = h2_session_process(h2_ctx_session_get(ctx), async_mpm);

if (c->cs) {
c->cs->state = CONN_STATE_WRITE_COMPLETION;
}
if (APR_STATUS_IS_EOF(status)) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
"h2_session(%ld): process, closing conn", c->id);
c->keepalive = AP_CONN_CLOSE;
}
else {
c->keepalive = AP_CONN_KEEPALIVE;
}

if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
break;
Expand Down
1 change: 1 addition & 0 deletions mod_http2/h2_conn_io.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ typedef struct {

apr_size_t write_size;
apr_time_t last_write;
apr_int64_t bytes_read;
apr_int64_t bytes_written;

int buffer_output;
Expand Down
149 changes: 149 additions & 0 deletions mod_http2/h2_filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,16 @@

#include "h2_private.h"
#include "h2_conn_io.h"
#include "h2_ctx.h"
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_task.h"
#include "h2_stream.h"
#include "h2_stream_set.h"
#include "h2_response.h"
#include "h2_session.h"
#include "h2_util.h"
#include "h2_version.h"

#include "h2_filter.h"

Expand Down Expand Up @@ -157,3 +166,143 @@ apr_status_t h2_filter_core_input(ap_filter_t* f,
}
return status;
}

/*******************************************************************************
* http2 connection status handler + stream out source
******************************************************************************/

static const char *H2_SOS_H2_STATUS = "http2-status";

int h2_filter_h2_status_handler(request_rec *r)
{
h2_ctx *ctx = h2_ctx_rget(r);
h2_task *task;

if (strcmp(r->handler, "http2-status")) {
return DECLINED;
}
if (r->method_number != M_GET) {
return DECLINED;
}

task = ctx? h2_ctx_get_task(ctx) : NULL;
if (task) {
/* We need to handle the actual output on the main thread, as
* we need to access h2_session information. */
apr_table_setn(r->notes, H2_RESP_SOS_NOTE, H2_SOS_H2_STATUS);
apr_table_setn(r->headers_out, "Content-Type", "application/json");
r->status = 200;
return DONE;
}
return DECLINED;
}

#define bbout(...) apr_brigade_printf(bb, NULL, NULL, __VA_ARGS__)
static apr_status_t h2_sos_h2_status_buffer(h2_sos *sos, apr_bucket_brigade *bb)
{
h2_stream *stream = sos->stream;
h2_session *session = stream->session;
h2_mplx *mplx = session->mplx;
apr_status_t status;

if (!bb) {
bb = apr_brigade_create(stream->pool, session->c->bucket_alloc);
}

bbout("{\n");
bbout(" \"HTTP2\": \"on\",\n");
bbout(" \"H2PUSH\": \"%s\",\n", h2_session_push_enabled(session)? "on" : "off");
bbout(" \"mod_http2_version\": \"%s\",\n", MOD_HTTP2_VERSION);
bbout(" \"session_id\": %ld,\n", (long)session->id);
bbout(" \"streams_max\": %d,\n", (int)session->max_stream_count);
bbout(" \"this_stream\": %d,\n", stream->id);
bbout(" \"streams_open\": %d,\n", (int)h2_stream_set_size(session->streams));
bbout(" \"max_stream_started\": %d,\n", mplx->max_stream_started);
bbout(" \"requests_received\": %d,\n", session->requests_received);
bbout(" \"responses_submitted\": %d,\n", session->responses_submitted);
bbout(" \"streams_reset\": %d, \n", session->streams_reset);
bbout(" \"pushes_promised\": %d,\n", session->pushes_promised);
bbout(" \"pushes_submitted\": %d,\n", session->pushes_submitted);
bbout(" \"pushes_reset\": %d,\n", session->pushes_reset);

if (session->push_diary) {
const char *data;
const char *base64_digest;
apr_size_t len;

status = h2_push_diary_digest_get(session->push_diary, stream->pool, 1024, &data, &len);
if (status == APR_SUCCESS) {
base64_digest = h2_util_base64url_encode(data, len, stream->pool);
bbout(" \"cache_digest\": \"%s\",\n", base64_digest);
}

/* try the reverse for testing purposes */
status = h2_push_diary_digest_set(session->push_diary, data, len);
if (status == APR_SUCCESS) {
status = h2_push_diary_digest_get(session->push_diary, stream->pool, 1024, &data, &len);
if (status == APR_SUCCESS) {
base64_digest = h2_util_base64url_encode(data, len, stream->pool);
bbout(" \"cache_digest^2\": \"%s\",\n", base64_digest);
}
}
}
bbout(" \"frames_received\": %ld,\n", (long)session->frames_received);
bbout(" \"frames_sent\": %ld,\n", (long)session->frames_sent);
bbout(" \"bytes_received\": %"APR_UINT64_T_FMT",\n", session->io.bytes_read);
bbout(" \"bytes_sent\": %"APR_UINT64_T_FMT"\n", session->io.bytes_written);
bbout("}\n");

return sos->prev->buffer(sos->prev, bb);
}

static apr_status_t h2_sos_h2_status_read_to(h2_sos *sos, apr_bucket_brigade *bb,
apr_off_t *plen, int *peos)
{
return sos->prev->read_to(sos->prev, bb, plen, peos);
}

static apr_status_t h2_sos_h2_status_prep_read(h2_sos *sos, apr_off_t *plen, int *peos)
{
return sos->prev->prep_read(sos->prev, plen, peos);
}

static apr_status_t h2_sos_h2_status_readx(h2_sos *sos, h2_io_data_cb *cb, void *ctx,
apr_off_t *plen, int *peos)
{
return sos->prev->readx(sos->prev, cb, ctx, plen, peos);
}

static apr_table_t *h2_sos_h2_status_get_trailers(h2_sos *sos)
{
return sos->prev->get_trailers(sos->prev);
}

static h2_sos *h2_sos_h2_status_create(h2_sos *prev)
{
h2_sos *sos;
h2_response *response = prev->response;

apr_table_unset(response->headers, "Content-Length");
response->content_length = -1;

sos = apr_pcalloc(prev->stream->pool, sizeof(*sos));
sos->prev = prev;
sos->response = response;
sos->stream = prev->stream;
sos->buffer = h2_sos_h2_status_buffer;
sos->prep_read = h2_sos_h2_status_prep_read;
sos->readx = h2_sos_h2_status_readx;
sos->read_to = h2_sos_h2_status_read_to;
sos->get_trailers = h2_sos_h2_status_get_trailers;

return sos;
}

h2_sos *h2_filter_sos_create(const char *name, struct h2_sos *prev)
{
if (!strcmp(H2_SOS_H2_STATUS, name)) {
return h2_sos_h2_status_create(prev);
}
return prev;
}

Loading

2 comments on commit a09062f

@lkraav
Copy link

@lkraav lkraav commented on a09062f Jan 15, 2016

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

h2_push.c:682:22: error: conflicting types for built-in function ‘log2’ [-Werror]
 static unsigned char log2(apr_uint32_t n)
                      ^
  CC       mod_http2_la-h2_stream.lo
  CC       mod_http2_la-h2_stream_set.lo
cc1: all warnings being treated as errors
Makefile:703: recipe for target 'mod_http2_la-h2_push.lo' failed
make[1]: *** [mod_http2_la-h2_push.lo] Error 1
make[1]: *** Waiting for unfinished jobs....
make[1]: Leaving directory '/mnt/datapool/src/mod_h2.git/mod_http2'
Makefile:458: recipe for target 'all-recursive' failed
make: *** [all-recursive] Error 1
configure: summary of build options:

    Version:        1.2.0 shared 11:0:6
    Host type:      x86_64-unknown-linux-gnu
    Install prefix: /usr
    APXS:           /usr/bin/apxs
    HTTPD-VERSION:  2.4.18
    C compiler:     gcc -std=gnu99
    CFLAGS:         -g -O2
    WARNCFLAGS:     
    LDFLAGS:         -L/usr/lib64
    LIBS:           -lnghttp2  -lcrypto
    CPPFLAGS:        -I/usr/include/apache2 -I/usr/include/apr-1 -DH2_NG2_STREAM_API -DH2_NG2_CHANGE_PRIO -DH2_OPENSSL
    a2enmod         -

@icing
Copy link
Owner

@icing icing commented on a09062f Jan 16, 2016 via email

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.