]> granicus.if.org Git - apache/commitdiff
On the 2.4.x branch: merge r1778630,1779459,1779525,1779528,1779738 from trunk
authorStefan Eissing <icing@apache.org>
Sat, 21 Jan 2017 15:48:17 +0000 (15:48 +0000)
committerStefan Eissing <icing@apache.org>
Sat, 21 Jan 2017 15:48:17 +0000 (15:48 +0000)
  *) mod_http2: rework of stream resource cleanup to avoid a crash in a close
     of a lingering connection. Prohibit special file bucket beaming for
     shared buckets. Files sent in stream output now use the stream pool
     as read buffer, reducing memory footprint of connections.
     [Yann Ylavic, Stefan Eissing]

git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1779742 13f79535-47bb-0310-9956-ffa450edef68

18 files changed:
CHANGES
CMakeLists.txt
modules/http2/NWGNUmod_http2
modules/http2/config2.m4
modules/http2/h2_bucket_beam.c
modules/http2/h2_bucket_beam.h
modules/http2/h2_bucket_eoc.c [deleted file]
modules/http2/h2_bucket_eoc.h [deleted file]
modules/http2/h2_config.c
modules/http2/h2_conn_io.c
modules/http2/h2_conn_io.h
modules/http2/h2_mplx.c
modules/http2/h2_session.c
modules/http2/h2_session.h
modules/http2/h2_stream.c
modules/http2/h2_stream.h
modules/http2/h2_version.h
modules/http2/mod_http2.dsp

diff --git a/CHANGES b/CHANGES
index eb2cc010e8b0565e305c8ccad047a6e5319755da..4bbc8e51f229eb2adab7e991e57afd0998324ce1 100644 (file)
--- a/CHANGES
+++ b/CHANGES
@@ -2,9 +2,11 @@
 
 Changes with Apache 2.4.26
 
-  *) mod_http2: streaming of request output now reacts timely to data
-     from other streams becoming available. Same for new incoming requests.
-     [Stefan Eissing]
+  *) mod_http2: rework of stream resource cleanup to avoid a crash in a close
+     of a lingering connection. Prohibit special file bucket beaming for
+     shared buckets. Files sent in stream output now use the stream pool
+     as read buffer, reducing memory footprint of connections.
+     [Yann Ylavic, Stefan Eissing]
      
   *) mod_proxy_fcgi, mod_fcgid: Fix crashes in ap_fcgi_encoded_env_len() when
      modules add empty environment variables to the request. PR60275.
index c2324a341a37b662bb8eb9952d81f94cc834c458..c5edf60241bd6be29410fd9174739c8e3db29e3f 100644 (file)
@@ -382,7 +382,7 @@ SET(mod_http2_requires               NGHTTP2_FOUND)
 SET(mod_http2_extra_defines          ssize_t=long)
 SET(mod_http2_extra_libs             ${NGHTTP2_LIBRARIES})
 SET(mod_http2_extra_sources
-  modules/http2/h2_alt_svc.c         modules/http2/h2_bucket_eoc.c
+  modules/http2/h2_alt_svc.c
   modules/http2/h2_bucket_eos.c      modules/http2/h2_config.c
   modules/http2/h2_conn.c            modules/http2/h2_conn_io.c
   modules/http2/h2_ctx.c             modules/http2/h2_filter.c
index 10974a7ebc763cc9b8dca04a2e31f9647dc5f1ae..74a7a9787437989d3b0f58628208db9d9cb384cb 100644 (file)
@@ -186,7 +186,6 @@ TARGET_lib = \
 FILES_nlm_objs = \
        $(OBJDIR)/h2_alt_svc.o \
        $(OBJDIR)/h2_bucket_beam.o \
-       $(OBJDIR)/h2_bucket_eoc.o \
        $(OBJDIR)/h2_bucket_eos.o \
        $(OBJDIR)/h2_config.o \
        $(OBJDIR)/h2_conn.o \
index ceb183533d002ea67318355c836b46414adbd459..a33b0df3f120b6fc98103a8451d791a4e80a512c 100644 (file)
@@ -21,7 +21,6 @@ http2_objs="dnl
 mod_http2.lo dnl
 h2_alt_svc.lo dnl
 h2_bucket_beam.lo dnl
-h2_bucket_eoc.lo dnl
 h2_bucket_eos.lo dnl
 h2_config.lo dnl
 h2_conn.lo dnl
index 39927fac4dfeaeb12d3d8428b230a87bc25d3640..30b9ffdec56d7fed56640e5b631d54cf612e4c03 100644 (file)
@@ -68,7 +68,7 @@ struct h2_beam_proxy {
     apr_bucket_refcount refcount;
     APR_RING_ENTRY(h2_beam_proxy) link;
     h2_bucket_beam *beam;
-    apr_bucket *bred;
+    apr_bucket *bsender;
     apr_size_t n;
 };
 
@@ -78,9 +78,9 @@ static apr_status_t beam_bucket_read(apr_bucket *b, const char **str,
                                      apr_size_t *len, apr_read_type_e block)
 {
     h2_beam_proxy *d = b->data;
-    if (d->bred) {
+    if (d->bsender) {
         const char *data;
-        apr_status_t status = apr_bucket_read(d->bred, &data, len, block);
+        apr_status_t status = apr_bucket_read(d->bsender, &data, len, block);
         if (status == APR_SUCCESS) {
             *str = data + b->start;
             *len = b->length;
@@ -111,24 +111,24 @@ static void beam_bucket_destroy(void *data)
 
 static apr_bucket * h2_beam_bucket_make(apr_bucket *b, 
                                         h2_bucket_beam *beam,
-                                        apr_bucket *bred, apr_size_t n)
+                                        apr_bucket *bsender, apr_size_t n)
 {
     h2_beam_proxy *d;
 
     d = apr_bucket_alloc(sizeof(*d), b->list);
     H2_BPROXY_LIST_INSERT_TAIL(&beam->proxies, d);
     d->beam = beam;
-    d->bred = bred;
+    d->bsender = bsender;
     d->n = n;
     
-    b = apr_bucket_shared_make(b, d, 0, bred? bred->length : 0);
+    b = apr_bucket_shared_make(b, d, 0, bsender? bsender->length : 0);
     b->type = &h2_bucket_type_beam;
 
     return b;
 }
 
 static apr_bucket *h2_beam_bucket_create(h2_bucket_beam *beam,
-                                         apr_bucket *bred,
+                                         apr_bucket *bsender,
                                          apr_bucket_alloc_t *list,
                                          apr_size_t n)
 {
@@ -137,7 +137,7 @@ static apr_bucket *h2_beam_bucket_create(h2_bucket_beam *beam,
     APR_BUCKET_INIT(b);
     b->free = apr_bucket_free;
     b->list = list;
-    return h2_beam_bucket_make(b, beam, bred, n);
+    return h2_beam_bucket_make(b, beam, bsender, n);
 }
 
 const apr_bucket_type_t h2_bucket_type_beam = {
@@ -350,11 +350,11 @@ static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
         /* invoked from receiver thread, the last beam bucket for the send
          * bucket is about to be destroyed.
          * remove it from the hold, where it should be now */
-        if (proxy->bred) {
+        if (proxy->bsender) {
             for (b = H2_BLIST_FIRST(&beam->hold_list); 
                  b != H2_BLIST_SENTINEL(&beam->hold_list);
                  b = APR_BUCKET_NEXT(b)) {
-                 if (b == proxy->bred) {
+                 if (b == proxy->bsender) {
                     break;
                  }
             }
@@ -367,7 +367,7 @@ static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
                      b != H2_BLIST_SENTINEL(&beam->hold_list);
                      b = next) {
                     next = APR_BUCKET_NEXT(b);
-                    if (b == proxy->bred) {
+                    if (b == proxy->bsender) {
                         APR_BUCKET_REMOVE(b);
                         H2_BLIST_INSERT_TAIL(&beam->purge_list, b);
                         break;
@@ -383,7 +383,7 @@ static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
                     }
                 }
                 
-                proxy->bred = NULL;
+                proxy->bsender = NULL;
             }
             else {
                 /* it should be there unless we screwed up */
@@ -391,7 +391,7 @@ static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
                               APLOGNO(03384) "h2_beam(%d-%s): emitted bucket not "
                               "in hold, n=%d", beam->id, beam->tag, 
                               (int)proxy->n);
-                ap_assert(!proxy->bred);
+                ap_assert(!proxy->bsender);
             }
         }
         /* notify anyone waiting on space to become available */
@@ -463,7 +463,7 @@ static apr_status_t beam_send_cleanup(void *data)
         h2_beam_proxy *proxy = H2_BPROXY_LIST_FIRST(&beam->proxies);
         H2_BPROXY_REMOVE(proxy);
         proxy->beam = NULL;
-        proxy->bred = NULL;
+        proxy->bsender = NULL;
     }
     h2_blist_cleanup(&beam->purge_list);
     h2_blist_cleanup(&beam->hold_list);
@@ -685,11 +685,11 @@ apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block)
 }
 
 static void move_to_hold(h2_bucket_beam *beam, 
-                         apr_bucket_brigade *red_brigade)
+                         apr_bucket_brigade *sender_bb)
 {
     apr_bucket *b;
-    while (red_brigade && !APR_BRIGADE_EMPTY(red_brigade)) {
-        b = APR_BRIGADE_FIRST(red_brigade);
+    while (sender_bb && !APR_BRIGADE_EMPTY(sender_bb)) {
+        b = APR_BRIGADE_FIRST(sender_bb);
         APR_BUCKET_REMOVE(b);
         H2_BLIST_INSERT_TAIL(&beam->send_list, b);
     }
@@ -726,7 +726,7 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
             }
         }
         
-        if (space_left < b->length) {
+        if (space_left <= 0) {
             status = r_wait_space(beam, block, pbl, &space_left);
             if (status != APR_SUCCESS) {
                 return status;
@@ -739,7 +739,7 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
     }
     
 
-    /* The fundamental problem is that reading a red bucket from
+    /* The fundamental problem is that reading a sender bucket from
      * a green thread is a total NO GO, because the bucket might use
      * its pool/bucket_alloc from a foreign thread and that will
      * corrupt. */
@@ -772,14 +772,21 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
          * is used on the first read to allocate buffer/mmap.
          * Since setting aside a file bucket will de-register the
          * file cleanup function from the previous pool, we need to
-         * call that from a red thread. 
+         * call that only from the sender thread.
+         *
+         * Currently, we do not handle file bucket with refcount > 1 as
+         * the beam is then not in complete control of the file's lifetime.
+         * Which results in the bug that a file get closed by the receiver
+         * while the sender or the beam still have buckets using it. 
+         * 
          * Additionally, we allow callbacks to prevent beaming file
          * handles across. The use case for this is to limit the number 
          * of open file handles and rather use a less efficient beam
          * transport. */
-        apr_file_t *fd = ((apr_bucket_file *)b->data)->fd;
-        int can_beam = 1;
-        if (beam->last_beamed != fd && beam->can_beam_fn) {
+        apr_bucket_file *bf = b->data;
+        apr_file_t *fd = bf->fd;
+        int can_beam = (bf->refcount.refcount == 1);
+        if (can_beam && beam->last_beamed != fd && beam->can_beam_fn) {
             can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd);
         }
         if (can_beam) {
@@ -794,7 +801,7 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
          * but hope that after read, its data stays immutable for the
          * lifetime of the bucket. (see pool bucket handling above for
          * a counter example).
-         * We do the read while in a red thread, so that the bucket may
+         * We do the read while in the sender thread, so that the bucket may
          * use pools/allocators safely. */
         if (space_left < APR_BUCKET_BUFF_SIZE) {
             space_left = APR_BUCKET_BUFF_SIZE;
@@ -822,7 +829,7 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
 void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p)
 {
     h2_beam_lock bl;
-    /* Called from the red thread to add buckets to the beam */
+    /* Called from the sender thread to add buckets to the beam */
     if (enter_yellow(beam, &bl) == APR_SUCCESS) {
         r_purge_sent(beam);
         beam_set_send_pool(beam, p);
@@ -831,29 +838,28 @@ void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p)
 }
 
 apr_status_t h2_beam_send(h2_bucket_beam *beam, 
-                          apr_bucket_brigade *red_brigade
+                          apr_bucket_brigade *sender_bb
                           apr_read_type_e block)
 {
     apr_bucket *b;
     apr_status_t status = APR_SUCCESS;
     h2_beam_lock bl;
 
-    /* Called from the red thread to add buckets to the beam */
+    /* Called from the sender thread to add buckets to the beam */
     if (enter_yellow(beam, &bl) == APR_SUCCESS) {
         r_purge_sent(beam);
-        if (red_brigade && !beam->send_pool) {
-            beam_set_send_pool(beam, red_brigade->p);
+        if (sender_bb && !beam->send_pool) {
+            beam_set_send_pool(beam, sender_bb->p);
         }
         
         if (beam->aborted) {
-            move_to_hold(beam, red_brigade);
+            move_to_hold(beam, sender_bb);
             status = APR_ECONNABORTED;
         }
-        else if (red_brigade) {
-            int force_report = !APR_BRIGADE_EMPTY(red_brigade); 
-            while (!APR_BRIGADE_EMPTY(red_brigade)
-                   && status == APR_SUCCESS) {
-                b = APR_BRIGADE_FIRST(red_brigade);
+        else if (sender_bb) {
+            int force_report = !APR_BRIGADE_EMPTY(sender_bb); 
+            while (!APR_BRIGADE_EMPTY(sender_bb) && status == APR_SUCCESS) {
+                b = APR_BRIGADE_FIRST(sender_bb);
                 status = append_bucket(beam, b, block, &bl);
             }
             report_prod_io(beam, force_report);
@@ -873,7 +879,7 @@ apr_status_t h2_beam_receive(h2_bucket_beam *beam,
                              apr_off_t readbytes)
 {
     h2_beam_lock bl;
-    apr_bucket *bred, *bgreen, *ng;
+    apr_bucket *bsender, *bgreen, *ng;
     int transferred = 0;
     apr_status_t status = APR_SUCCESS;
     apr_off_t remain = readbytes;
@@ -905,35 +911,35 @@ transfer:
             ++transferred;
         }
 
-        /* transfer from our red brigade, transforming red buckets to
+        /* transfer from our sender brigade, transforming sender buckets to
          * green ones until we have enough */
         while (!H2_BLIST_EMPTY(&beam->send_list) && (readbytes <= 0 || remain >= 0)) {
-            bred = H2_BLIST_FIRST(&beam->send_list);
+            bsender = H2_BLIST_FIRST(&beam->send_list);
             bgreen = NULL;
             
-            if (readbytes > 0 && bred->length > 0 && remain <= 0) {
+            if (readbytes > 0 && bsender->length > 0 && remain <= 0) {
                 break;
             }
                         
-            if (APR_BUCKET_IS_METADATA(bred)) {
-                if (APR_BUCKET_IS_EOS(bred)) {
+            if (APR_BUCKET_IS_METADATA(bsender)) {
+                if (APR_BUCKET_IS_EOS(bsender)) {
                     bgreen = apr_bucket_eos_create(bb->bucket_alloc);
                     beam->close_sent = 1;
                 }
-                else if (APR_BUCKET_IS_FLUSH(bred)) {
+                else if (APR_BUCKET_IS_FLUSH(bsender)) {
                     bgreen = apr_bucket_flush_create(bb->bucket_alloc);
                 }
-                else if (AP_BUCKET_IS_ERROR(bred)) {
-                    ap_bucket_error *eb = (ap_bucket_error *)bred;
+                else if (AP_BUCKET_IS_ERROR(bsender)) {
+                    ap_bucket_error *eb = (ap_bucket_error *)bsender;
                     bgreen = ap_bucket_error_create(eb->status, eb->data,
                                                     bb->p, bb->bucket_alloc);
                 }
             }
-            else if (APR_BUCKET_IS_FILE(bred)) {
+            else if (APR_BUCKET_IS_FILE(bsender)) {
                 /* This is set aside into the target brigade pool so that 
                  * any read operation messes with that pool and not 
-                 * the red one. */
-                apr_bucket_file *f = (apr_bucket_file *)bred->data;
+                 * the sender one. */
+                apr_bucket_file *f = (apr_bucket_file *)bsender->data;
                 apr_file_t *fd = f->fd;
                 int setaside = (f->readpool != bb->p);
                 
@@ -944,7 +950,7 @@ transfer:
                     }
                     ++beam->files_beamed;
                 }
-                ng = apr_brigade_insert_file(bb, fd, bred->start, bred->length, 
+                ng = apr_brigade_insert_file(bb, fd, bsender->start, bsender->length, 
                                              bb->p);
 #if APR_HAS_MMAP
                 /* disable mmap handling as this leads to segfaults when
@@ -952,28 +958,28 @@ transfer:
                  * been handed out. See also PR 59348 */
                 apr_bucket_file_enable_mmap(ng, 0);
 #endif
-                remain -= bred->length;
+                remain -= bsender->length;
                 ++transferred;
-                APR_BUCKET_REMOVE(bred);
-                H2_BLIST_INSERT_TAIL(&beam->hold_list, bred);
+                APR_BUCKET_REMOVE(bsender);
+                H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
                 ++transferred;
                 continue;
             }
             else {
                 /* create a "green" standin bucket. we took care about the
-                 * underlying red bucket and its data when we placed it into
-                 * the red brigade.
-                 * the beam bucket will notify us on destruction that bred is
+                 * underlying sender bucket and its data when we placed it into
+                 * the sender brigade.
+                 * the beam bucket will notify us on destruction that bsender is
                  * no longer needed. */
-                bgreen = h2_beam_bucket_create(beam, bred, bb->bucket_alloc,
+                bgreen = h2_beam_bucket_create(beam, bsender, bb->bucket_alloc,
                                                beam->buckets_sent++);
             }
             
-            /* Place the red bucket into our hold, to be destroyed when no
+            /* Place the sender bucket into our hold, to be destroyed when no
              * green bucket references it any more. */
-            APR_BUCKET_REMOVE(bred);
-            H2_BLIST_INSERT_TAIL(&beam->hold_list, bred);
-            beam->received_bytes += bred->length;
+            APR_BUCKET_REMOVE(bsender);
+            H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
+            beam->received_bytes += bsender->length;
             ++transferred_buckets;
             
             if (bgreen) {
@@ -982,7 +988,7 @@ transfer:
                 ++transferred;
             }
             else {
-                bgreen = h2_beam_bucket(beam, bb, bred);
+                bgreen = h2_beam_bucket(beam, bb, bsender);
                 while (bgreen && bgreen != APR_BRIGADE_SENTINEL(bb)) {
                     ++transferred;
                     remain -= bgreen->length;
index a4ead053cef378a771b9166682708c1436968462..2918114916452c09e8160ba9cae1730520001631 100644 (file)
@@ -72,7 +72,7 @@ apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax,
  * A h2_bucket_beam solves the task of transferring buckets, esp. their data,
  * across threads with zero buffer copies.
  *
- * When a thread, let's call it the red thread, wants to send buckets to
+ * When a thread, let's call it the sender thread, wants to send buckets to
  * another, the green thread, it creates a h2_bucket_beam and adds buckets
  * via the h2_beam_send(). It gives the beam to the green thread which then
  * can receive buckets into its own brigade via h2_beam_receive().
@@ -92,7 +92,7 @@ apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax,
  * 
  * The proper way of shutting down a beam is to first make sure there are no
  * more green buckets out there, then cleanup the beam to purge eventually
- * still existing red buckets and then, possibly, terminate the beam itself
+ * still existing sender buckets and then, possibly, terminate the beam itself
  * (or the pool it was created with).
  *
  * The following restrictions apply to bucket transport:
@@ -105,32 +105,32 @@ apr_size_t h2_util_bl_print(char *buffer, apr_size_t bmax,
  *   - file buckets will transfer the file itself into a new bucket, if allowed
  *   - all other buckets are read on send to make sure data is present
  *
- * This assures that when the red thread sends its red buckets, the data
- * is made accessible while still on the red side. The red bucket then enters
+ * This assures that when the sender thread sends its sender buckets, the data
+ * is made accessible while still on the sender side. The sender bucket then enters
  * the beams hold storage.
- * When the green thread calls receive, red buckets in the hold are wrapped
+ * When the green thread calls receive, sender buckets in the hold are wrapped
  * into special beam buckets. Beam buckets on read present the data directly
- * from the internal red one, but otherwise live on the green side. When a
+ * from the internal sender one, but otherwise live on the green side. When a
  * beam bucket gets destroyed, it notifies its beam that the corresponding
- * red bucket from the hold may be destroyed.
+ * sender bucket from the hold may be destroyed.
  * Since the destruction of green buckets happens in the green thread, any
- * corresponding red bucket can not immediately be destroyed, as that would
+ * corresponding sender bucket can not immediately be destroyed, as that would
  * result in race conditions.
- * Instead, the beam transfers such red buckets from the hold to the purge
- * storage. Next time there is a call from the red side, the buckets in
+ * Instead, the beam transfers such sender buckets from the hold to the purge
+ * storage. Next time there is a call from the sender side, the buckets in
  * purge will be deleted.
  *
- * There are callbacks that can be registered with a beam:
- * - a "consumed" callback that gets called on the red side with the
+ * There are callbacks that can be registesender with a beam:
+ * - a "consumed" callback that gets called on the sender side with the
  *   amount of data that has been received by the green side. The amount
- *   is a delta from the last callback invocation. The red side can trigger
+ *   is a delta from the last callback invocation. The sender side can trigger
  *   these callbacks by calling h2_beam_send() with a NULL brigade.
  * - a "can_beam_file" callback that can prohibit the transfer of file handles
  *   through the beam. This will cause file buckets to be read on send and
  *   its data buffer will then be transports just like a heap bucket would.
  *   When no callback is registered, no restrictions apply and all files are
  *   passed through.
- *   File handles transferred to the green side will stay there until the
+ *   File handles transfersender to the green side will stay there until the
  *   receiving brigade's pool is destroyed/cleared. If the pool lives very
  *   long or if many different files are beamed, the process might run out
  *   of available file handles.
diff --git a/modules/http2/h2_bucket_eoc.c b/modules/http2/h2_bucket_eoc.c
deleted file mode 100644 (file)
index 33144ef..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-#include <stddef.h>
-
-#include <httpd.h>
-#include <http_core.h>
-#include <http_connection.h>
-#include <http_log.h>
-
-#include "h2_private.h"
-#include "h2.h"
-#include "h2_mplx.h"
-#include "h2_session.h"
-#include "h2_bucket_eoc.h"
-
-typedef struct {
-    apr_bucket_refcount refcount;
-    h2_session *session;
-} h2_bucket_eoc;
-
-static apr_status_t bucket_cleanup(void *data)
-{
-    h2_session **psession = data;
-
-    if (*psession) {
-        /*
-         * If bucket_destroy is called after us, this prevents
-         * bucket_destroy from trying to destroy the pool again.
-         */
-        *psession = NULL;
-    }
-    return APR_SUCCESS;
-}
-
-static apr_status_t bucket_read(apr_bucket *b, const char **str,
-                                apr_size_t *len, apr_read_type_e block)
-{
-    (void)b;
-    (void)block;
-    *str = NULL;
-    *len = 0;
-    return APR_SUCCESS;
-}
-
-apr_bucket * h2_bucket_eoc_make(apr_bucket *b, h2_session *session)
-{
-    h2_bucket_eoc *h;
-
-    h = apr_bucket_alloc(sizeof(*h), b->list);
-    h->session = session;
-
-    b = apr_bucket_shared_make(b, h, 0, 0);
-    b->type = &h2_bucket_type_eoc;
-    
-    return b;
-}
-
-apr_bucket * h2_bucket_eoc_create(apr_bucket_alloc_t *list, h2_session *session)
-{
-    apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
-
-    APR_BUCKET_INIT(b);
-    b->free = apr_bucket_free;
-    b->list = list;
-    b = h2_bucket_eoc_make(b, session);
-    if (session) {
-        h2_bucket_eoc *h = b->data;
-        apr_pool_pre_cleanup_register(session->pool, &h->session, bucket_cleanup);
-    }
-    return b;
-}
-
-static void bucket_destroy(void *data)
-{
-    h2_bucket_eoc *h = data;
-
-    if (apr_bucket_shared_destroy(h)) {
-        h2_session *session = h->session;
-        apr_bucket_free(h);
-        if (session) {
-            h2_session_eoc_callback(session);
-            /* all is gone now */
-        }
-    }
-}
-
-const apr_bucket_type_t h2_bucket_type_eoc = {
-    "H2EOC", 5, APR_BUCKET_METADATA,
-    bucket_destroy,
-    bucket_read,
-    apr_bucket_setaside_noop,
-    apr_bucket_split_notimpl,
-    apr_bucket_shared_copy
-};
-
diff --git a/modules/http2/h2_bucket_eoc.h b/modules/http2/h2_bucket_eoc.h
deleted file mode 100644 (file)
index 2d46691..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef mod_http2_h2_bucket_eoc_h
-#define mod_http2_h2_bucket_eoc_h
-
-struct h2_session;
-
-/** End Of HTTP/2 SESSION (H2EOC) bucket */
-extern const apr_bucket_type_t h2_bucket_type_eoc;
-
-#define H2_BUCKET_IS_H2EOC(e)     (e->type == &h2_bucket_type_eoc)
-
-apr_bucket * h2_bucket_eoc_make(apr_bucket *b, 
-                                struct h2_session *session);
-
-apr_bucket * h2_bucket_eoc_create(apr_bucket_alloc_t *list,
-                                  struct h2_session *session);
-
-#endif /* mod_http2_h2_bucket_eoc_h */
index 3ac969d76896ca9fd7b7214e8fc74d0dfdb3d3f0..0adc5528521bd2a88d0a0b4e5a8a55d3e471527e 100644 (file)
@@ -48,7 +48,7 @@ static h2_config defconf = {
     -1,                     /* min workers */
     -1,                     /* max workers */
     10 * 60,                /* max workers idle secs */
-    64 * 1024,              /* stream max mem size */
+    32 * 1024,              /* stream max mem size */
     NULL,                   /* no alt-svcs */
     -1,                     /* alt-svc max age */
     0,                      /* serialize headers */
index 2998c8b338bceb9be38ca810262c96b32103f68c..f1f0de49ac93a2a0f81afe0bbf67996796c461a9 100644 (file)
@@ -24,7 +24,6 @@
 #include <http_request.h>
 
 #include "h2_private.h"
-#include "h2_bucket_eoc.h"
 #include "h2_bucket_eos.h"
 #include "h2_config.h"
 #include "h2_conn_io.h"
@@ -72,9 +71,6 @@ static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level,
                 else if (AP_BUCKET_IS_EOR(b)) {
                     off += apr_snprintf(buffer+off, bmax-off, "eor ");
                 }
-                else if (H2_BUCKET_IS_H2EOC(b)) {
-                    off += apr_snprintf(buffer+off, bmax-off, "h2eoc ");
-                }
                 else if (H2_BUCKET_IS_H2EOS(b)) {
                     off += apr_snprintf(buffer+off, bmax-off, "h2eos ");
                 }
@@ -269,8 +265,7 @@ static void check_write_size(h2_conn_io *io)
     }
 }
 
-static apr_status_t pass_output(h2_conn_io *io, int flush,
-                                h2_session *session_eoc)
+static apr_status_t pass_output(h2_conn_io *io, int flush)
 {
     conn_rec *c = io->c;
     apr_bucket_brigade *bb = io->output;
@@ -303,20 +298,6 @@ static apr_status_t pass_output(h2_conn_io *io, int flush,
     }
     apr_brigade_cleanup(bb);
 
-    if (session_eoc) {
-        apr_status_t tmp;
-        b = h2_bucket_eoc_create(c->bucket_alloc, session_eoc);
-        APR_BRIGADE_INSERT_TAIL(bb, b);
-        h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "master conn pass", bb);
-        tmp = ap_pass_brigade(c->output_filters, bb);
-        if (status == APR_SUCCESS) {
-            status = tmp;
-        }
-        /* careful with access to io after this, we have flushed an EOC bucket
-         * that de-allocated us all. */
-        apr_brigade_cleanup(bb);
-    }
-    
     if (status != APR_SUCCESS) {
         ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03044)
                       "h2_conn_io(%ld): pass_out brigade %ld bytes",
@@ -343,16 +324,11 @@ int h2_conn_io_needs_flush(h2_conn_io *io)
 apr_status_t h2_conn_io_flush(h2_conn_io *io)
 {
     apr_status_t status;
-    status = pass_output(io, 1, NULL);
+    status = pass_output(io, 1);
     check_write_size(io);
     return status;
 }
 
-apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, h2_session *session)
-{
-    return pass_output(io, 1, session);
-}
-
 apr_status_t h2_conn_io_write(h2_conn_io *io, const char *data, size_t length)
 {
     apr_status_t status = APR_SUCCESS;
index 0cbe479d1dfcfe3ec2ed231125bcb08301c22637..2adf13bf7944461db31990a10f41f91282ecf5b2 100644 (file)
@@ -61,12 +61,6 @@ apr_status_t h2_conn_io_write(h2_conn_io *io,
 
 apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb);
 
-/**
- * Append an End-Of-Connection bucket to the output that, once destroyed,
- * will tear down the complete http2 session.
- */
-apr_status_t h2_conn_io_write_eoc(h2_conn_io *io, struct h2_session *session);
-
 /**
  * Pass any buffered data on to the connection output filters.
  * @param io the connection io
index b535a02a13a1d28851394ee9e0de02e48c72e75b..4e200648e4e421a425ea8be316b1543a93fe8183 100644 (file)
@@ -199,9 +199,13 @@ static void check_tx_free(h2_mplx *m)
     }
 }
 
+typedef struct {
+    h2_mplx *m;
+} purge_ctx;
+
 static int purge_stream(void *ctx, void *val) 
 {
-    h2_mplx *m = ctx;
+    purge_ctx *pctx = ctx;
     h2_stream *stream = val;
     int stream_id = stream->id;
     h2_task *task;
@@ -211,20 +215,22 @@ static int purge_stream(void *ctx, void *val)
      * before task destruction, otherwise it will complain. */
     h2_stream_cleanup(stream);
     
-    task = h2_ihash_get(m->tasks, stream_id);    
+    task = h2_ihash_get(pctx->m->tasks, stream_id);    
     if (task) {
-        task_destroy(m, task, 1);
+        task_destroy(pctx->m, task, 1);
     }
     
     h2_stream_destroy(stream);
-    h2_ihash_remove(m->spurge, stream_id);
+    h2_ihash_remove(pctx->m->spurge, stream_id);
     return 0;
 }
 
 static void purge_streams(h2_mplx *m)
 {
     if (!h2_ihash_empty(m->spurge)) {
-        while(!h2_ihash_iter(m->spurge, purge_stream, m)) {
+        purge_ctx ctx;
+        ctx.m = m;
+        while(!h2_ihash_iter(m->spurge, purge_stream, &ctx)) {
             /* repeat until empty */
         }
         h2_ihash_clear(m->spurge);
@@ -233,20 +239,13 @@ static void purge_streams(h2_mplx *m)
 
 static void h2_mplx_destroy(h2_mplx *m)
 {
-    conn_rec **pslave;
     ap_assert(m);
     ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
                   "h2_mplx(%ld): destroy, tasks=%d", 
                   m->id, (int)h2_ihash_count(m->tasks));
     check_tx_free(m);
-    
-    while (m->spare_slaves->nelts > 0) {
-        pslave = (conn_rec **)apr_array_pop(m->spare_slaves);
-        h2_slave_destroy(*pslave);
-    }
-    if (m->pool) {
-        apr_pool_destroy(m->pool);
-    }
+    /* pool will be destroyed as child of h2_session->pool,
+       slave connection pools are children of m->pool */
 }
 
 /**
@@ -442,6 +441,7 @@ static void stream_done(h2_mplx *m, h2_stream *stream, int rst_error)
      */
     h2_iq_remove(m->q, stream->id);
     h2_ihash_remove(m->streams, stream->id);
+    h2_ihash_remove(m->shold, stream->id);
     
     h2_stream_cleanup(stream);
     m->tx_handles_reserved += h2_beam_get_files_beamed(stream->input);
@@ -467,7 +467,7 @@ static void stream_done(h2_mplx *m, h2_stream *stream, int rst_error)
             task_destroy(m, task, 1);
         }
     }
-    h2_stream_destroy(stream);
+    h2_ihash_add(m->spurge, stream);
 }
 
 static int stream_done_iter(void *ctx, void *val)
@@ -568,7 +568,7 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
         h2_mplx_set_consumed_cb(m, NULL, NULL);
         h2_mplx_abort(m);
         h2_iq_clear(m->q);
-        purge_streams(m);
+        h2_ihash_clear(m->spurge);
 
         /* 3. wakeup all sleeping tasks. Mark all still active streams as 'done'. 
          *    m->streams has to be empty afterwards with streams either in
@@ -581,10 +581,7 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
         }
         ap_assert(h2_ihash_empty(m->streams));
 
-        /* 4. purge all streams we collected by marking them 'done' */
-        purge_streams(m);
-        
-        /* 5. while workers are busy on this connection, meaning they
+        /* 4. while workers are busy on this connection, meaning they
          *    are processing tasks from this connection, wait on them finishing
          *    to wake us and check again. Eventually, this has to succeed. */    
         m->join_wait = wait;
@@ -602,11 +599,10 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
                 h2_ihash_iter(m->shold, report_stream_iter, m);
                 h2_ihash_iter(m->tasks, task_print, m);
             }
-            purge_streams(m);
         }
         m->join_wait = NULL;
         
-        /* 6. All workers for this connection are done, we are in 
+        /* 5. All workers for this connection are done, we are in 
          * single-threaded processing now effectively. */
         leave_mutex(m, acquired);
 
@@ -625,12 +621,10 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
             }
         }
 
-        /* 7. With all tasks done, the stream hold should be empty and all
-         *    remaining streams are ready for purging */
+        /* 6. With all tasks done, the stream hold should be empty now. */
         ap_assert(h2_ihash_empty(m->shold));
-        purge_streams(m);
         
-        /* 8. close the h2_req_enginge shed and self destruct */
+        /* 7. close the h2_req_enginge shed and self destruct */
         h2_ngn_shed_destroy(m->ngn_shed);
         m->ngn_shed = NULL;
         h2_mplx_destroy(m);
@@ -913,7 +907,6 @@ static h2_task *next_stream_task(h2_mplx *m)
                 h2_slave_run_pre_connection(slave, ap_get_conn_socket(slave));
             }
             stream->started = 1;
-            stream->can_be_cleaned = 0;
             task->worker_started = 1;
             task->started_at = apr_time_now();
             if (sid > m->max_stream_started) {
@@ -1392,6 +1385,7 @@ apr_status_t h2_mplx_dispatch_master_events(h2_mplx *m,
         apr_atomic_set32(&m->event_pending, 0);
         /* update input windows for streams */
         h2_ihash_iter(m->streams, report_consumption_iter, m);
+        purge_streams(m);
         
         if (!h2_iq_empty(m->readyq)) {
             n = h2_iq_mshift(m->readyq, ids, H2_ALEN(ids));
index 93cfe33718c031012e26cb838667bd3656485d67..053bea94ba5b4f903956543cfd1d161cc5cc8b51 100644 (file)
@@ -29,7 +29,6 @@
 
 #include "h2_private.h"
 #include "h2.h"
-#include "h2_bucket_eoc.h"
 #include "h2_bucket_eos.h"
 #include "h2_config.h"
 #include "h2_ctx.h"
@@ -730,7 +729,7 @@ static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb)
     return APR_SUCCESS;
 }
 
-static void h2_session_destroy(h2_session *session)
+static void h2_session_cleanup(h2_session *session)
 {
     ap_assert(session);    
 
@@ -752,10 +751,7 @@ static void h2_session_destroy(h2_session *session)
 
     if (APLOGctrace1(session->c)) {
         ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-                      "h2_session(%ld): destroy", session->id);
-    }
-    if (session->pool) {
-        apr_pool_destroy(session->pool);
+                      "h2_session(%ld): cleanup", session->id);
     }
 }
 
@@ -857,9 +853,8 @@ static apr_status_t session_pool_cleanup(void *data)
                       "goodbye, clients will be confused, should not happen", 
                       session->id);
     }
-    /* keep us from destroying the pool, since that is already ongoing. */
+    h2_session_cleanup(session);
     session->pool = NULL;
-    h2_session_destroy(session);
     return APR_SUCCESS;
 }
 
@@ -918,7 +913,9 @@ static h2_session *h2_session_create_int(conn_rec *c,
     }
     apr_pool_tag(pool, "h2_session");
 
-    session = apr_pcalloc(pool, sizeof(h2_session));
+    /* get h2_session a lifetime beyond its pool and everything
+     * connected to it. */
+    session = apr_pcalloc(c->pool, sizeof(h2_session));
     if (session) {
         int rv;
         nghttp2_mem *mem;
@@ -964,7 +961,6 @@ static h2_session *h2_session_create_int(conn_rec *c,
         if (status != APR_SUCCESS) {
             ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c, APLOGNO(02927) 
                           "nghttp2: error in init_callbacks");
-            h2_session_destroy(session);
             return NULL;
         }
         
@@ -973,7 +969,6 @@ static h2_session *h2_session_create_int(conn_rec *c,
             ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
                           APLOGNO(02928) "nghttp2_option_new: %s", 
                           nghttp2_strerror(rv));
-            h2_session_destroy(session);
             return NULL;
         }
         nghttp2_option_set_peer_max_concurrent_streams(
@@ -1004,10 +999,9 @@ static h2_session *h2_session_create_int(conn_rec *c,
             ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
                           APLOGNO(02929) "nghttp2_session_server_new: %s",
                           nghttp2_strerror(rv));
-            h2_session_destroy(session);
             return NULL;
         }
-         
+        
         n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE);
         session->push_diary = h2_push_diary_create(session->pool, n);
         
@@ -1037,14 +1031,6 @@ h2_session *h2_session_rcreate(request_rec *r, h2_ctx *ctx, h2_workers *workers)
     return h2_session_create_int(r->connection, r, ctx, workers);
 }
 
-void h2_session_eoc_callback(h2_session *session)
-{
-    ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-                  "session(%ld): cleanup and destroy", session->id);
-    apr_pool_cleanup_kill(session->pool, session, session_pool_cleanup);
-    h2_session_destroy(session);
-}
-
 static apr_status_t h2_session_start(h2_session *session, int *rv)
 {
     apr_status_t status = APR_SUCCESS;
@@ -2342,10 +2328,6 @@ out:
     status = APR_SUCCESS;
     if (session->state == H2_SESSION_ST_DONE) {
         status = APR_EOF;
-        if (!session->eoc_written) {
-            session->eoc_written = 1;
-            h2_conn_io_write_eoc(&session->io, session);
-        }
     }
     
     return status;
index 417e3c1ebbb92481520358754f459a9a485b4394..a90c0b47caac674dcb873c260b4026a15289ad97 100644 (file)
@@ -94,7 +94,6 @@ typedef struct h2_session {
     h2_session_props remote;        /* properites of remote session */
     
     unsigned int reprioritize  : 1; /* scheduled streams priority changed */
-    unsigned int eoc_written   : 1; /* h2 eoc bucket written */
     unsigned int flush         : 1; /* flushing output necessary */
     unsigned int have_read     : 1; /* session has read client data */
     unsigned int have_written  : 1; /* session did write data to client */
@@ -165,13 +164,6 @@ apr_status_t h2_session_process(h2_session *session, int async);
  */
 apr_status_t h2_session_pre_close(h2_session *session, int async);
 
-/**
- * Cleanup the session and all objects it still contains. This will not
- * destroy h2_task instances that have not finished yet. 
- * @param session the session to destroy
- */
-void h2_session_eoc_callback(h2_session *session);
-
 /**
  * Called when a serious error occurred and the session needs to terminate
  * without further connection io.
index 7f919ab6e25738031682d7f0a163c932cb39fd2e..0a82e510bc322d4e339024c4fdfc16d850e4c7bf 100644 (file)
@@ -168,27 +168,6 @@ static void prepend_response(h2_stream *stream, h2_headers *response)
     APR_BRIGADE_INSERT_HEAD(stream->out_buffer, b);
 }
 
-static apr_status_t stream_pool_cleanup(void *ctx)
-{
-    h2_stream *stream = ctx;
-    apr_status_t status;
-    
-    ap_assert(stream->can_be_cleaned);
-    if (stream->files) {
-        apr_file_t *file;
-        int i;
-        for (i = 0; i < stream->files->nelts; ++i) {
-            file = APR_ARRAY_IDX(stream->files, i, apr_file_t*);
-            status = apr_file_close(file);
-            ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, stream->session->c, 
-                          "h2_stream(%ld-%d): destroy, closed file %d", 
-                          stream->session->id, stream->id, i);
-        }
-        stream->files = NULL;
-    }
-    return APR_SUCCESS;
-}
-
 h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session,
                           int initiated_on)
 {
@@ -200,15 +179,12 @@ h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session,
     stream->state        = H2_STREAM_ST_IDLE;
     stream->pool         = pool;
     stream->session      = session;
-    stream->can_be_cleaned = 1;
-
+    
     h2_beam_create(&stream->input, pool, id, "input", H2_BEAM_OWNER_SEND, 0);
     h2_beam_send_from(stream->input, stream->pool);
     h2_beam_create(&stream->output, pool, id, "output", H2_BEAM_OWNER_RECV, 0);
     
     set_state(stream, H2_STREAM_ST_OPEN);
-    apr_pool_cleanup_register(pool, stream, stream_pool_cleanup, 
-                              apr_pool_cleanup_null);
     ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03082)
                   "h2_stream(%ld-%d): opened", session->id, stream->id);
     return stream;
@@ -240,13 +216,12 @@ void h2_stream_cleanup(h2_stream *stream)
 void h2_stream_destroy(h2_stream *stream)
 {
     ap_assert(stream);
-    ap_assert(!h2_mplx_stream_get(stream->session->mplx, stream->id));
     ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c, 
                   "h2_stream(%ld-%d): destroy", 
                   stream->session->id, stream->id);
-    stream->can_be_cleaned = 1;
     if (stream->pool) {
         apr_pool_destroy(stream->pool);
+        stream->pool = NULL;
     }
 }
 
@@ -525,8 +500,6 @@ apr_status_t h2_stream_write_data(h2_stream *stream,
 
 static apr_status_t fill_buffer(h2_stream *stream, apr_size_t amount)
 {
-    conn_rec *c = stream->session->c;
-    apr_bucket *b;
     apr_status_t status;
     
     if (!stream->output) {
@@ -537,33 +510,6 @@ static apr_status_t fill_buffer(h2_stream *stream, apr_size_t amount)
     ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c,
                   "h2_stream(%ld-%d): beam_received",
                   stream->session->id, stream->id);
-    /* The buckets we reveive are using the stream->out_buffer pool as
-     * lifetime which is exactly what we want since this is stream->pool.
-     *
-     * However: when we send these buckets down the core output filters, the
-     * filter might decide to setaside them into a pool of its own. And it
-     * might decide, after having sent the buckets, to clear its pool.
-     *
-     * This is problematic for file buckets because it then closed the contained
-     * file. Any split off buckets we sent afterwards will result in a 
-     * APR_EBADF.
-     */
-    for (b = APR_BRIGADE_FIRST(stream->out_buffer);
-         b != APR_BRIGADE_SENTINEL(stream->out_buffer);
-         b = APR_BUCKET_NEXT(b)) {
-        if (APR_BUCKET_IS_FILE(b)) {
-            apr_bucket_file *f = (apr_bucket_file *)b->data;
-            apr_pool_t *fpool = apr_file_pool_get(f->fd);
-            if (fpool != c->pool) {
-                apr_bucket_setaside(b, c->pool);
-                if (!stream->files) {
-                    stream->files = apr_array_make(stream->pool, 
-                                                   5, sizeof(apr_file_t*));
-                }
-                APR_ARRAY_PUSH(stream->files, apr_file_t*) = f->fd;
-            }
-        }
-    }
     return status;
 }
 
index adb419e5165e50cfee0618435dfdf77e8f452bc7..0a386ecb95ad82bf056b457ddb2716fcbb09c8dd 100644 (file)
@@ -57,7 +57,6 @@ struct h2_stream {
     struct h2_bucket_beam *input;
     struct h2_bucket_beam *output;
     apr_bucket_brigade *out_buffer;
-    apr_array_header_t *files;  /* apr_file_t* we collected during I/O */
 
     int rst_error;              /* stream error for RST_STREAM */
     unsigned int aborted   : 1; /* was aborted */
@@ -65,7 +64,6 @@ struct h2_stream {
     unsigned int started   : 1; /* stream has started processing */
     unsigned int has_response : 1; /* response headers are known */
     unsigned int push_policy;   /* which push policy to use for this request */
-    unsigned int can_be_cleaned : 1; /* stream pool can be cleaned */
     
     const h2_priority *pref_priority; /* preferred priority for this stream */
     apr_off_t out_data_frames;  /* # of DATA frames sent */
index 7a6793ff0fab5329b14836af50b719694f3b443c..f1101a030225d90a47278b35605f02722a02f04d 100644 (file)
@@ -26,7 +26,7 @@
  * @macro
  * Version number of the http2 module as c string
  */
-#define MOD_HTTP2_VERSION "1.8.8"
+#define MOD_HTTP2_VERSION "1.8.9"
 
 /**
  * @macro
@@ -34,7 +34,7 @@
  * release. This is a 24 bit number with 8 bits for major number, 8 bits
  * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
  */
-#define MOD_HTTP2_VERSION_NUM 0x010808
+#define MOD_HTTP2_VERSION_NUM 0x010809
 
 
 #endif /* mod_h2_h2_version_h */
index f26607470d4fa17a7a90a66f93cc7b33e5103e09..fbc75a616e21be486ec42c463b388251dc06adb3 100644 (file)
@@ -109,10 +109,6 @@ SOURCE=./h2_bucket_beam.c
 # End Source File
 # Begin Source File
 
-SOURCE=./h2_bucket_eoc.c
-# End Source File
-# Begin Source File
-
 SOURCE=./h2_bucket_eos.c
 # End Source File
 # Begin Source File