1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "http_request.h"
20 #include "http_protocol.h"
21 #include "http_config.h"
22 #include "mod_status.h"
25 #include "apr_strings.h"
28 #define APR_WANT_STRFUNC
30 #include "apr_general.h"
36 #include "ap_socache.h"
38 /* XXX Unfortunately, there are still many unsigned ints in use here, so we
39 * XXX cannot allow more than UINT_MAX. Since some of the ints are exposed in
40 * XXX public interfaces, a simple search and replace is not enough.
41 * XXX It should be possible to extend that so that the total cache size can
42 * XXX be APR_SIZE_MAX and only the object size needs to be smaller than
45 #define SHMCB_MAX_SIZE (UINT_MAX<APR_SIZE_MAX ? UINT_MAX : APR_SIZE_MAX)
47 #define DEFAULT_SHMCB_PREFIX "socache-shmcb-"
49 #define DEFAULT_SHMCB_SUFFIX ".cache"
51 #define ALIGNED_HEADER_SIZE APR_ALIGN_DEFAULT(sizeof(SHMCBHeader))
52 #define ALIGNED_SUBCACHE_SIZE APR_ALIGN_DEFAULT(sizeof(SHMCBSubcache))
53 #define ALIGNED_INDEX_SIZE APR_ALIGN_DEFAULT(sizeof(SHMCBIndex))
56 * Header structure - the start of the shared-mem segment
59 /* Stats for cache operations */
60 unsigned long stat_stores;
61 unsigned long stat_replaced;
62 unsigned long stat_expiries;
63 unsigned long stat_scrolled;
64 unsigned long stat_retrieves_hit;
65 unsigned long stat_retrieves_miss;
66 unsigned long stat_removes_hit;
67 unsigned long stat_removes_miss;
68 /* Number of subcaches */
69 unsigned int subcache_num;
70 /* How many indexes each subcache's queue has */
71 unsigned int index_num;
72 /* How large each subcache is, including the queue and data */
73 unsigned int subcache_size;
74 /* How far into each subcache the data area is (optimisation) */
75 unsigned int subcache_data_offset;
76 /* How large the data area in each subcache is (optimisation) */
77 unsigned int subcache_data_size;
81 * Subcache structure - the start of each subcache, followed by
85 /* The start position and length of the cyclic buffer of indexes */
86 unsigned int idx_pos, idx_used;
87 /* Same for the data area */
88 unsigned int data_pos, data_used;
92 * Index structure - each subcache has an array of these
95 /* absolute time this entry expires */
97 /* location within the subcache's data area */
98 unsigned int data_pos;
99 /* size (most logic ignores this, we keep it only to minimise memcpy) */
100 unsigned int data_used;
101 /* length of the used data which contains the id */
103 /* Used to mark explicitly-removed socache entries */
104 unsigned char removed;
107 struct ap_socache_instance_t {
108 const char *data_file;
114 /* The SHM data segment is of fixed size and stores data as follows.
116 * [ SHMCBHeader | Subcaches ]
118 * The SHMCBHeader header structure stores metadata concerning the
119 * cache and the contained subcaches.
121 * Subcaches is a hash table of header->subcache_num SHMCBSubcache
122 * structures. The hash table is indexed by SHMCB_MASK(id). Each
123 * SHMCBSubcache structure has a fixed size (header->subcache_size),
124 * which is determined at creation time, and looks like the following:
126 * [ SHMCBSubcache | Indexes | Data ]
128 * Each subcache is prefixed by the SHMCBSubcache structure.
130 * The subcache's "Data" segment is a single cyclic data buffer, of
131 * total size header->subcache_data_size; data inside is referenced
132 * using byte offsets. The offset marking the beginning of the cyclic
133 * buffer is subcache->data_pos; the buffer's length is
134 * subcache->data_used.
136 * "Indexes" is an array of header->index_num SHMCBIndex structures,
137 * which is used as a cyclic queue; subcache->idx_pos gives the array
138 * index of the first in use, subcache->idx_used gives the number in
139 * use. Both ->idx_* values have a range of [0, header->index_num)
141 * Each in-use SHMCBIndex structure represents a single cached object.
142 * The ID and data segment are stored consecutively in the subcache's
143 * cyclic data buffer. The "Data" segment can thus be seen to
144 * look like this, for example
146 * offset: [ 0 1 2 3 4 5 6 ...
147 * contents:[ ID1 Data1 ID2 Data2 ID3 ...
149 * where the corresponding indices would look like:
151 * idx1 = { data_pos = 0, data_used = 3, id_len = 1, ...}
152 * idx2 = { data_pos = 3, data_used = 3, id_len = 1, ...}
156 /* This macro takes a pointer to the header and a zero-based index and returns
157 * a pointer to the corresponding subcache. */
158 #define SHMCB_SUBCACHE(pHeader, num) \
159 (SHMCBSubcache *)(((unsigned char *)(pHeader)) + \
160 ALIGNED_HEADER_SIZE + \
161 (num) * ((pHeader)->subcache_size))
163 /* This macro takes a pointer to the header and an id and returns a
164 * pointer to the corresponding subcache. */
165 #define SHMCB_MASK(pHeader, id) \
166 SHMCB_SUBCACHE((pHeader), *(id) & ((pHeader)->subcache_num - 1))
168 /* This macro takes the same params as the last, generating two outputs for use
169 * in ap_log_error(...). */
170 #define SHMCB_MASK_DBG(pHeader, id) \
171 *(id), (*(id) & ((pHeader)->subcache_num - 1))
173 /* This macro takes a pointer to a subcache and a zero-based index and returns
174 * a pointer to the corresponding SHMCBIndex. */
175 #define SHMCB_INDEX(pSubcache, num) \
176 (SHMCBIndex *)(((unsigned char *)pSubcache) + \
177 ALIGNED_SUBCACHE_SIZE + \
178 (num) * ALIGNED_INDEX_SIZE)
180 /* This macro takes a pointer to the header and a subcache and returns a
181 * pointer to the corresponding data area. */
182 #define SHMCB_DATA(pHeader, pSubcache) \
183 ((unsigned char *)(pSubcache) + (pHeader)->subcache_data_offset)
186 * Cyclic functions - assists in "wrap-around"/modulo logic
189 /* Addition modulo 'mod' */
190 #define SHMCB_CYCLIC_INCREMENT(val,inc,mod) \
191 (((val) + (inc)) % (mod))
193 /* Subtraction (or "distance between") modulo 'mod' */
194 #define SHMCB_CYCLIC_SPACE(val1,val2,mod) \
195 ((val2) >= (val1) ? ((val2) - (val1)) : \
196 ((val2) + (mod) - (val1)))
198 /* A "normal-to-cyclic" memcpy. */
199 static void shmcb_cyclic_ntoc_memcpy(unsigned int buf_size, unsigned char *data,
200 unsigned int dest_offset, const unsigned char *src,
201 unsigned int src_len)
203 if (dest_offset + src_len < buf_size)
204 /* It be copied all in one go */
205 memcpy(data + dest_offset, src, src_len);
207 /* Copy the two splits */
208 memcpy(data + dest_offset, src, buf_size - dest_offset);
209 memcpy(data, src + buf_size - dest_offset,
210 src_len + dest_offset - buf_size);
214 /* A "cyclic-to-normal" memcpy. */
215 static void shmcb_cyclic_cton_memcpy(unsigned int buf_size, unsigned char *dest,
216 const unsigned char *data, unsigned int src_offset,
217 unsigned int src_len)
219 if (src_offset + src_len < buf_size)
220 /* It be copied all in one go */
221 memcpy(dest, data + src_offset, src_len);
223 /* Copy the two splits */
224 memcpy(dest, data + src_offset, buf_size - src_offset);
225 memcpy(dest + buf_size - src_offset, data,
226 src_len + src_offset - buf_size);
230 /* A memcmp against a cyclic data buffer. Compares SRC of length
231 * SRC_LEN against the contents of cyclic buffer DATA (which is of
232 * size BUF_SIZE), starting at offset DEST_OFFSET. Got that? Good. */
233 static int shmcb_cyclic_memcmp(unsigned int buf_size, unsigned char *data,
234 unsigned int dest_offset,
235 const unsigned char *src,
236 unsigned int src_len)
238 if (dest_offset + src_len < buf_size)
239 /* It be compared all in one go */
240 return memcmp(data + dest_offset, src, src_len);
242 /* Compare the two splits */
245 diff = memcmp(data + dest_offset, src, buf_size - dest_offset);
249 return memcmp(data, src + buf_size - dest_offset,
250 src_len + dest_offset - buf_size);
255 /* Prototypes for low-level subcache operations */
256 static void shmcb_subcache_expire(server_rec *, SHMCBHeader *, SHMCBSubcache *,
258 /* Returns zero on success, non-zero on failure. */
259 static int shmcb_subcache_store(server_rec *s, SHMCBHeader *header,
260 SHMCBSubcache *subcache,
261 unsigned char *data, unsigned int data_len,
262 const unsigned char *id, unsigned int id_len,
264 /* Returns zero on success, non-zero on failure. */
265 static int shmcb_subcache_retrieve(server_rec *, SHMCBHeader *, SHMCBSubcache *,
266 const unsigned char *id, unsigned int idlen,
267 unsigned char *data, unsigned int *datalen);
268 /* Returns zero on success, non-zero on failure. */
269 static int shmcb_subcache_remove(server_rec *, SHMCBHeader *, SHMCBSubcache *,
270 const unsigned char *, unsigned int);
272 /* Returns result of the (iterator)() call, zero is success (continue) */
273 static apr_status_t shmcb_subcache_iterate(ap_socache_instance_t *instance,
277 SHMCBSubcache *subcache,
278 ap_socache_iterator_t *iterator,
285 * High-Level "handlers" as per ssl_scache.c
286 * subcache internals are deferred to shmcb_subcache_*** functions lower down
289 static const char *socache_shmcb_create(ap_socache_instance_t **context,
291 apr_pool_t *tmp, apr_pool_t *p)
293 ap_socache_instance_t *ctx;
294 char *path, *cp, *cp2;
296 /* Allocate the context. */
297 *context = ctx = apr_pcalloc(p, sizeof *ctx);
299 ctx->shm_size = 1024*512; /* 512KB */
301 if (!arg || *arg == '\0') {
306 ctx->data_file = path = ap_runtime_dir_relative(p, arg);
308 cp = strrchr(path, '(');
309 cp2 = path + strlen(path) - 1;
313 return "Invalid argument: no closing parenthesis or cache size "
314 "missing after pathname with parenthesis";
320 ctx->shm_size = strtol(cp, &endptr, 10);
322 return "Invalid argument: cache size not numerical";
325 if (ctx->shm_size < 8192) {
326 return "Invalid argument: size has to be >= 8192 bytes";
330 if (ctx->shm_size >= SHMCB_MAX_SIZE) {
331 return apr_psprintf(tmp, "Invalid argument: size has "
332 "to be < %" APR_SIZE_T_FMT " bytes on this platform",
336 else if (cp2 >= path && *cp2 == ')') {
337 return "Invalid argument: no opening parenthesis";
343 static apr_status_t socache_shmcb_init(ap_socache_instance_t *ctx,
344 const char *namespace,
345 const struct ap_socache_hints *hints,
346 server_rec *s, apr_pool_t *p)
349 apr_size_t shm_segsize;
352 unsigned int num_subcache, num_idx, loop;
353 apr_size_t avg_obj_size, avg_id_len;
355 /* Create shared memory segment */
356 if (ctx->data_file == NULL) {
357 const char *path = apr_pstrcat(p, DEFAULT_SHMCB_PREFIX, namespace,
358 DEFAULT_SHMCB_SUFFIX, NULL);
360 ctx->data_file = ap_runtime_dir_relative(p, path);
363 /* Use anonymous shm by default, fall back on name-based. */
364 rv = apr_shm_create(&ctx->shm, ctx->shm_size, NULL, p);
365 if (APR_STATUS_IS_ENOTIMPL(rv)) {
366 /* If anon shm isn't supported, fail if no named file was
367 * configured successfully; the ap_runtime_dir_relative call
368 * above will return NULL for invalid paths. */
369 if (ctx->data_file == NULL) {
370 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00818)
371 "Could not use anonymous shm for '%s' cache",
376 /* For a name-based segment, remove it first in case of a
377 * previous unclean shutdown. */
378 apr_shm_remove(ctx->data_file, p);
380 rv = apr_shm_create(&ctx->shm, ctx->shm_size, ctx->data_file, p);
383 if (rv != APR_SUCCESS) {
384 ap_log_error(APLOG_MARK, APLOG_ERR, rv, s, APLOGNO(00819)
385 "Could not allocate shared memory segment for shmcb "
390 shm_segment = apr_shm_baseaddr_get(ctx->shm);
391 shm_segsize = apr_shm_size_get(ctx->shm);
392 if (shm_segsize < (5 * ALIGNED_HEADER_SIZE)) {
393 /* the segment is ridiculously small, bail out */
394 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00820)
395 "shared memory segment too small");
398 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00821)
399 "shmcb_init allocated %" APR_SIZE_T_FMT
400 " bytes of shared memory",
402 /* Discount the header */
403 shm_segsize -= ALIGNED_HEADER_SIZE;
404 /* Select index size based on average object size hints, if given. */
405 avg_obj_size = hints && hints->avg_obj_size ? hints->avg_obj_size : 150;
406 avg_id_len = hints && hints->avg_id_len ? hints->avg_id_len : 30;
407 num_idx = (shm_segsize) / (avg_obj_size + avg_id_len);
409 while ((num_idx / num_subcache) < (2 * num_subcache))
411 num_idx /= num_subcache;
412 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00822)
413 "for %" APR_SIZE_T_FMT " bytes (%" APR_SIZE_T_FMT
414 " including header), recommending %u subcaches, "
415 "%u indexes each", shm_segsize,
416 shm_segsize + ALIGNED_HEADER_SIZE,
417 num_subcache, num_idx);
419 /* we're still too small, bail out */
420 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00823)
421 "shared memory segment too small");
424 /* OK, we're sorted */
425 ctx->header = header = shm_segment;
426 header->stat_stores = 0;
427 header->stat_replaced = 0;
428 header->stat_expiries = 0;
429 header->stat_scrolled = 0;
430 header->stat_retrieves_hit = 0;
431 header->stat_retrieves_miss = 0;
432 header->stat_removes_hit = 0;
433 header->stat_removes_miss = 0;
434 header->subcache_num = num_subcache;
435 /* Convert the subcache size (in bytes) to a value that is suitable for
436 * structure alignment on the host platform, by rounding down if necessary. */
437 header->subcache_size = (size_t)(shm_segsize / num_subcache);
438 if (header->subcache_size != APR_ALIGN_DEFAULT(header->subcache_size)) {
439 header->subcache_size = APR_ALIGN_DEFAULT(header->subcache_size) -
440 APR_ALIGN_DEFAULT(1);
442 header->subcache_data_offset = ALIGNED_SUBCACHE_SIZE +
443 num_idx * ALIGNED_INDEX_SIZE;
444 header->subcache_data_size = header->subcache_size -
445 header->subcache_data_offset;
446 header->index_num = num_idx;
448 /* Output trace info */
449 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00824)
450 "shmcb_init_memory choices follow");
451 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00825)
452 "subcache_num = %u", header->subcache_num);
453 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00826)
454 "subcache_size = %u", header->subcache_size);
455 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00827)
456 "subcache_data_offset = %u", header->subcache_data_offset);
457 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00828)
458 "subcache_data_size = %u", header->subcache_data_size);
459 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00829)
460 "index_num = %u", header->index_num);
461 /* The header is done, make the caches empty */
462 for (loop = 0; loop < header->subcache_num; loop++) {
463 SHMCBSubcache *subcache = SHMCB_SUBCACHE(header, loop);
464 subcache->idx_pos = subcache->idx_used = 0;
465 subcache->data_pos = subcache->data_used = 0;
467 ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, APLOGNO(00830)
468 "Shared memory socache initialised");
474 static void socache_shmcb_destroy(ap_socache_instance_t *ctx, server_rec *s)
476 if (ctx && ctx->shm) {
477 apr_shm_destroy(ctx->shm);
482 static apr_status_t socache_shmcb_store(ap_socache_instance_t *ctx,
483 server_rec *s, const unsigned char *id,
484 unsigned int idlen, apr_time_t expiry,
485 unsigned char *encoded,
486 unsigned int len_encoded,
489 SHMCBHeader *header = ctx->header;
490 SHMCBSubcache *subcache = SHMCB_MASK(header, id);
493 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00831)
494 "socache_shmcb_store (0x%02x -> subcache %d)",
495 SHMCB_MASK_DBG(header, id));
496 /* XXX: Says who? Why shouldn't this be acceptable, or padded if not? */
498 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00832) "unusably short id provided "
499 "(%u bytes)", idlen);
502 tryreplace = shmcb_subcache_remove(s, header, subcache, id, idlen);
503 if (shmcb_subcache_store(s, header, subcache, encoded,
504 len_encoded, id, idlen, expiry)) {
505 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00833)
506 "can't store an socache entry!");
509 if (tryreplace == 0) {
510 header->stat_replaced++;
513 header->stat_stores++;
515 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00834)
516 "leaving socache_shmcb_store successfully");
520 static apr_status_t socache_shmcb_retrieve(ap_socache_instance_t *ctx,
522 const unsigned char *id, unsigned int idlen,
523 unsigned char *dest, unsigned int *destlen,
526 SHMCBHeader *header = ctx->header;
527 SHMCBSubcache *subcache = SHMCB_MASK(header, id);
530 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00835)
531 "socache_shmcb_retrieve (0x%02x -> subcache %d)",
532 SHMCB_MASK_DBG(header, id));
534 /* Get the entry corresponding to the id, if it exists. */
535 rv = shmcb_subcache_retrieve(s, header, subcache, id, idlen,
538 header->stat_retrieves_hit++;
540 header->stat_retrieves_miss++;
541 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00836)
542 "leaving socache_shmcb_retrieve successfully");
544 return rv == 0 ? APR_SUCCESS : APR_NOTFOUND;
547 static apr_status_t socache_shmcb_remove(ap_socache_instance_t *ctx,
548 server_rec *s, const unsigned char *id,
549 unsigned int idlen, apr_pool_t *p)
551 SHMCBHeader *header = ctx->header;
552 SHMCBSubcache *subcache = SHMCB_MASK(header, id);
555 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00837)
556 "socache_shmcb_remove (0x%02x -> subcache %d)",
557 SHMCB_MASK_DBG(header, id));
559 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00838) "unusably short id provided "
560 "(%u bytes)", idlen);
563 if (shmcb_subcache_remove(s, header, subcache, id, idlen) == 0) {
564 header->stat_removes_hit++;
567 header->stat_removes_miss++;
570 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00839)
571 "leaving socache_shmcb_remove successfully");
576 static void socache_shmcb_status(ap_socache_instance_t *ctx,
577 request_rec *r, int flags)
579 server_rec *s = r->server;
580 SHMCBHeader *header = ctx->header;
581 unsigned int loop, total = 0, cache_total = 0, non_empty_subcaches = 0;
582 apr_time_t idx_expiry, min_expiry = 0, max_expiry = 0;
583 apr_time_t now = apr_time_now();
584 double expiry_total = 0;
585 int index_pct, cache_pct;
587 AP_DEBUG_ASSERT(header->subcache_num > 0);
588 ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00840) "inside shmcb_status");
589 /* Perform the iteration inside the mutex to avoid corruption or invalid
590 * pointer arithmetic. The rest of our logic uses read-only header data so
591 * doesn't need the lock. */
592 /* Iterate over the subcaches */
593 for (loop = 0; loop < header->subcache_num; loop++) {
594 SHMCBSubcache *subcache = SHMCB_SUBCACHE(header, loop);
595 shmcb_subcache_expire(s, header, subcache, now);
596 total += subcache->idx_used;
597 cache_total += subcache->data_used;
598 if (subcache->idx_used) {
599 SHMCBIndex *idx = SHMCB_INDEX(subcache, subcache->idx_pos);
600 non_empty_subcaches++;
601 idx_expiry = idx->expires;
602 expiry_total += (double)idx_expiry;
603 max_expiry = ((idx_expiry > max_expiry) ? idx_expiry : max_expiry);
605 min_expiry = idx_expiry;
607 min_expiry = ((idx_expiry < min_expiry) ? idx_expiry : min_expiry);
610 index_pct = (100 * total) / (header->index_num *
611 header->subcache_num);
612 cache_pct = (100 * cache_total) / (header->subcache_data_size *
613 header->subcache_num);
614 /* Generate Output */
615 if (!(flags & AP_STATUS_SHORT)) {
616 ap_rprintf(r, "cache type: <b>SHMCB</b>, shared memory: <b>%" APR_SIZE_T_FMT "</b> "
617 "bytes, current entries: <b>%d</b><br>",
618 ctx->shm_size, total);
619 ap_rprintf(r, "subcaches: <b>%d</b>, indexes per subcache: <b>%d</b><br>",
620 header->subcache_num, header->index_num);
621 if (non_empty_subcaches) {
622 apr_time_t average_expiry = (apr_time_t)(expiry_total / (double)non_empty_subcaches);
623 ap_rprintf(r, "time left on oldest entries' objects: ");
624 if (now < average_expiry)
625 ap_rprintf(r, "avg: <b>%d</b> seconds, (range: %d...%d)<br>",
626 (int)apr_time_sec(average_expiry - now),
627 (int)apr_time_sec(min_expiry - now),
628 (int)apr_time_sec(max_expiry - now));
630 ap_rprintf(r, "expiry_threshold: <b>Calculation error!</b><br>");
633 ap_rprintf(r, "index usage: <b>%d%%</b>, cache usage: <b>%d%%</b><br>",
634 index_pct, cache_pct);
635 ap_rprintf(r, "total entries stored since starting: <b>%lu</b><br>",
636 header->stat_stores);
637 ap_rprintf(r, "total entries replaced since starting: <b>%lu</b><br>",
638 header->stat_replaced);
639 ap_rprintf(r, "total entries expired since starting: <b>%lu</b><br>",
640 header->stat_expiries);
641 ap_rprintf(r, "total (pre-expiry) entries scrolled out of the cache: "
642 "<b>%lu</b><br>", header->stat_scrolled);
643 ap_rprintf(r, "total retrieves since starting: <b>%lu</b> hit, "
644 "<b>%lu</b> miss<br>", header->stat_retrieves_hit,
645 header->stat_retrieves_miss);
646 ap_rprintf(r, "total removes since starting: <b>%lu</b> hit, "
647 "<b>%lu</b> miss<br>", header->stat_removes_hit,
648 header->stat_removes_miss);
651 ap_rputs("CacheType: SHMCB\n", r);
652 ap_rprintf(r, "CacheSharedMemory: %" APR_SIZE_T_FMT "\n",
654 ap_rprintf(r, "CacheCurrentEntries: %d\n", total);
655 ap_rprintf(r, "CacheSubcaches: %d\n", header->subcache_num);
656 ap_rprintf(r, "CacheIndexesPerSubcaches: %d\n", header->index_num);
657 if (non_empty_subcaches) {
658 apr_time_t average_expiry = (apr_time_t)(expiry_total / (double)non_empty_subcaches);
659 if (now < average_expiry) {
660 ap_rprintf(r, "CacheTimeLeftOldestAvg: %d\n", (int)apr_time_sec(average_expiry - now));
661 ap_rprintf(r, "CacheTimeLeftOldestMin: %d\n", (int)apr_time_sec(min_expiry - now));
662 ap_rprintf(r, "CacheTimeLeftOldestMax: %d\n", (int)apr_time_sec(max_expiry - now));
666 ap_rprintf(r, "CacheIndexUsage: %d%%\n", index_pct);
667 ap_rprintf(r, "CacheUsage: %d%%\n", cache_pct);
668 ap_rprintf(r, "CacheStoreCount: %lu\n", header->stat_stores);
669 ap_rprintf(r, "CacheReplaceCount: %lu\n", header->stat_replaced);
670 ap_rprintf(r, "CacheExpireCount: %lu\n", header->stat_expiries);
671 ap_rprintf(r, "CacheDiscardCount: %lu\n", header->stat_scrolled);
672 ap_rprintf(r, "CacheRetrieveHitCount: %lu\n", header->stat_retrieves_hit);
673 ap_rprintf(r, "CacheRetrieveMissCount: %lu\n", header->stat_retrieves_miss);
674 ap_rprintf(r, "CacheRemoveHitCount: %lu\n", header->stat_removes_hit);
675 ap_rprintf(r, "CacheRemoveMissCount: %lu\n", header->stat_removes_miss);
677 ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(00841) "leaving shmcb_status");
680 static apr_status_t socache_shmcb_iterate(ap_socache_instance_t *instance,
681 server_rec *s, void *userctx,
682 ap_socache_iterator_t *iterator,
685 SHMCBHeader *header = instance->header;
687 apr_time_t now = apr_time_now();
688 apr_status_t rv = APR_SUCCESS;
689 apr_size_t buflen = 0;
690 unsigned char *buf = NULL;
692 /* Perform the iteration inside the mutex to avoid corruption or invalid
693 * pointer arithmetic. The rest of our logic uses read-only header data so
694 * doesn't need the lock. */
695 /* Iterate over the subcaches */
696 for (loop = 0; loop < header->subcache_num && rv == APR_SUCCESS; loop++) {
697 SHMCBSubcache *subcache = SHMCB_SUBCACHE(header, loop);
698 rv = shmcb_subcache_iterate(instance, s, userctx, header, subcache,
699 iterator, &buf, &buflen, pool, now);
705 * Subcache-level cache operations
708 static void shmcb_subcache_expire(server_rec *s, SHMCBHeader *header,
709 SHMCBSubcache *subcache, apr_time_t now)
711 unsigned int loop = 0, freed = 0, expired = 0;
712 unsigned int new_idx_pos = subcache->idx_pos;
713 SHMCBIndex *idx = NULL;
715 while (loop < subcache->idx_used) {
716 idx = SHMCB_INDEX(subcache, new_idx_pos);
719 else if (idx->expires <= now)
722 /* not removed and not expired yet, we're done iterating */
725 new_idx_pos = SHMCB_CYCLIC_INCREMENT(new_idx_pos, 1, header->index_num);
730 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00842)
731 "expiring %u and reclaiming %u removed socache entries",
733 if (loop == subcache->idx_used) {
734 /* We're expiring everything, piece of cake */
735 subcache->idx_used = 0;
736 subcache->data_used = 0;
738 /* There remain other indexes, so we can use idx to adjust 'data' */
739 unsigned int diff = SHMCB_CYCLIC_SPACE(subcache->data_pos,
741 header->subcache_data_size);
742 /* Adjust the indexes */
743 subcache->idx_used -= loop;
744 subcache->idx_pos = new_idx_pos;
745 /* Adjust the data area */
746 subcache->data_used -= diff;
747 subcache->data_pos = idx->data_pos;
749 header->stat_expiries += expired;
750 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00843)
751 "we now have %u socache entries", subcache->idx_used);
754 static int shmcb_subcache_store(server_rec *s, SHMCBHeader *header,
755 SHMCBSubcache *subcache,
756 unsigned char *data, unsigned int data_len,
757 const unsigned char *id, unsigned int id_len,
760 unsigned int data_offset, new_idx, id_offset;
762 unsigned int total_len = id_len + data_len;
764 /* Sanity check the input */
765 if (total_len > header->subcache_data_size) {
766 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(00844)
767 "inserting socache entry larger (%d) than subcache data area (%d)",
768 total_len, header->subcache_data_size);
772 /* First reclaim space from removed and expired records. */
773 shmcb_subcache_expire(s, header, subcache, apr_time_now());
775 /* Loop until there is enough space to insert
776 * XXX: This should first compress out-of-order expiries and
777 * removed records, and then force-remove oldest-first
779 if (header->subcache_data_size - subcache->data_used < total_len
780 || subcache->idx_used == header->index_num) {
781 unsigned int loop = 0;
783 idx = SHMCB_INDEX(subcache, subcache->idx_pos);
784 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00845)
785 "about to force-expire, subcache: idx_used=%d, "
786 "data_used=%d", subcache->idx_used, subcache->data_used);
790 /* Adjust the indexes by one */
791 subcache->idx_pos = SHMCB_CYCLIC_INCREMENT(subcache->idx_pos, 1,
793 subcache->idx_used--;
794 if (!subcache->idx_used) {
795 /* There's nothing left */
796 subcache->data_used = 0;
799 /* Adjust the data */
800 idx2 = SHMCB_INDEX(subcache, subcache->idx_pos);
801 subcache->data_used -= SHMCB_CYCLIC_SPACE(idx->data_pos, idx2->data_pos,
802 header->subcache_data_size);
803 subcache->data_pos = idx2->data_pos;
805 header->stat_scrolled++;
809 } while (header->subcache_data_size - subcache->data_used < total_len);
811 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00846)
812 "finished force-expire, subcache: idx_used=%d, "
813 "data_used=%d", subcache->idx_used, subcache->data_used);
816 /* HERE WE ASSUME THAT THE NEW ENTRY SHOULD GO ON THE END! I'M NOT
817 * CHECKING WHETHER IT SHOULD BE GENUINELY "INSERTED" SOMEWHERE.
819 * We aught to fix that. httpd (never mind third party modules)
820 * does not promise to perform any processing in date order
821 * (c.f. FAQ "My log entries are not in date order!")
824 id_offset = SHMCB_CYCLIC_INCREMENT(subcache->data_pos, subcache->data_used,
825 header->subcache_data_size);
826 shmcb_cyclic_ntoc_memcpy(header->subcache_data_size,
827 SHMCB_DATA(header, subcache), id_offset,
829 subcache->data_used += id_len;
830 /* Insert the data */
831 data_offset = SHMCB_CYCLIC_INCREMENT(subcache->data_pos, subcache->data_used,
832 header->subcache_data_size);
833 shmcb_cyclic_ntoc_memcpy(header->subcache_data_size,
834 SHMCB_DATA(header, subcache), data_offset,
836 subcache->data_used += data_len;
837 /* Insert the index */
838 new_idx = SHMCB_CYCLIC_INCREMENT(subcache->idx_pos, subcache->idx_used,
840 idx = SHMCB_INDEX(subcache, new_idx);
841 idx->expires = expiry;
842 idx->data_pos = id_offset;
843 idx->data_used = total_len;
844 idx->id_len = id_len;
846 subcache->idx_used++;
847 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00847)
848 "insert happened at idx=%d, data=(%u:%u)", new_idx,
849 id_offset, data_offset);
850 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00848)
851 "finished insert, subcache: idx_pos/idx_used=%d/%d, "
852 "data_pos/data_used=%d/%d",
853 subcache->idx_pos, subcache->idx_used,
854 subcache->data_pos, subcache->data_used);
858 static int shmcb_subcache_retrieve(server_rec *s, SHMCBHeader *header,
859 SHMCBSubcache *subcache,
860 const unsigned char *id, unsigned int idlen,
861 unsigned char *dest, unsigned int *destlen)
864 unsigned int loop = 0;
865 apr_time_t now = apr_time_now();
867 pos = subcache->idx_pos;
869 while (loop < subcache->idx_used) {
870 SHMCBIndex *idx = SHMCB_INDEX(subcache, pos);
872 /* Only consider 'idx' if the id matches, and the "removed"
873 * flag isn't set, and the record is not expired.
874 * Check the data length too to avoid a buffer overflow
875 * in case of corruption, which should be impossible,
876 * but it's cheap to be safe. */
878 && idx->id_len == idlen
879 && (idx->data_used - idx->id_len) <= *destlen
880 && shmcb_cyclic_memcmp(header->subcache_data_size,
881 SHMCB_DATA(header, subcache),
882 idx->data_pos, id, idx->id_len) == 0) {
883 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00849)
884 "match at idx=%d, data=%d", pos, idx->data_pos);
885 if (idx->expires > now) {
886 unsigned int data_offset;
888 /* Find the offset of the data segment, after the id */
889 data_offset = SHMCB_CYCLIC_INCREMENT(idx->data_pos,
891 header->subcache_data_size);
893 *destlen = idx->data_used - idx->id_len;
895 /* Copy out the data */
896 shmcb_cyclic_cton_memcpy(header->subcache_data_size,
897 dest, SHMCB_DATA(header, subcache),
898 data_offset, *destlen);
903 /* Already stale, quietly remove and treat as not-found */
905 header->stat_expiries++;
906 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00850)
907 "shmcb_subcache_retrieve discarding expired entry");
913 pos = SHMCB_CYCLIC_INCREMENT(pos, 1, header->index_num);
916 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00851)
917 "shmcb_subcache_retrieve found no match");
921 static int shmcb_subcache_remove(server_rec *s, SHMCBHeader *header,
922 SHMCBSubcache *subcache,
923 const unsigned char *id,
927 unsigned int loop = 0;
929 pos = subcache->idx_pos;
930 while (loop < subcache->idx_used) {
931 SHMCBIndex *idx = SHMCB_INDEX(subcache, pos);
933 /* Only consider 'idx' if the id matches, and the "removed"
935 if (!idx->removed && idx->id_len == idlen
936 && shmcb_cyclic_memcmp(header->subcache_data_size,
937 SHMCB_DATA(header, subcache),
938 idx->data_pos, id, idx->id_len) == 0) {
939 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00852)
940 "possible match at idx=%d, data=%d", pos, idx->data_pos);
942 /* Found the matching entry, remove it quietly. */
944 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00853)
945 "shmcb_subcache_remove removing matching entry");
950 pos = SHMCB_CYCLIC_INCREMENT(pos, 1, header->index_num);
953 return -1; /* failure */
957 static apr_status_t shmcb_subcache_iterate(ap_socache_instance_t *instance,
961 SHMCBSubcache *subcache,
962 ap_socache_iterator_t *iterator,
969 unsigned int loop = 0;
972 pos = subcache->idx_pos;
973 while (loop < subcache->idx_used) {
974 SHMCBIndex *idx = SHMCB_INDEX(subcache, pos);
976 /* Only consider 'idx' if the "removed" flag isn't set. */
979 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00854)
980 "iterating idx=%d, data=%d", pos, idx->data_pos);
981 if (idx->expires > now) {
982 unsigned char *id = *buf;
984 unsigned int data_offset, dest_len;
987 /* Find the offset of the data segment, after the id */
988 data_offset = SHMCB_CYCLIC_INCREMENT(idx->data_pos,
990 header->subcache_data_size);
992 dest_len = idx->data_used - idx->id_len;
994 buf_req = APR_ALIGN_DEFAULT(idx->id_len + 1)
995 + APR_ALIGN_DEFAULT(dest_len + 1);
997 if (buf_req > *buf_len) {
998 /* Grow to ~150% of this buffer requirement on resize
999 * always using APR_ALIGN_DEFAULT sized pages
1001 *buf_len = buf_req + APR_ALIGN_DEFAULT(buf_req / 2);
1002 *buf = apr_palloc(pool, *buf_len);
1006 dest = *buf + APR_ALIGN_DEFAULT(idx->id_len + 1);
1008 /* Copy out the data, because it's potentially cyclic */
1009 shmcb_cyclic_cton_memcpy(header->subcache_data_size, id,
1010 SHMCB_DATA(header, subcache),
1011 idx->data_pos, idx->id_len);
1012 id[idx->id_len] = '\0';
1014 shmcb_cyclic_cton_memcpy(header->subcache_data_size, dest,
1015 SHMCB_DATA(header, subcache),
1016 data_offset, dest_len);
1017 dest[dest_len] = '\0';
1019 rv = iterator(instance, s, userctx, id, idx->id_len,
1020 dest, dest_len, pool);
1021 ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(00855)
1022 "shmcb entry iterated");
1023 if (rv != APR_SUCCESS)
1027 /* Already stale, quietly remove and treat as not-found */
1029 header->stat_expiries++;
1030 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(00856)
1031 "shmcb_subcache_iterate discarding expired entry");
1036 pos = SHMCB_CYCLIC_INCREMENT(pos, 1, header->index_num);
1042 static const ap_socache_provider_t socache_shmcb = {
1044 AP_SOCACHE_FLAG_NOTMPSAFE,
1045 socache_shmcb_create,
1047 socache_shmcb_destroy,
1048 socache_shmcb_store,
1049 socache_shmcb_retrieve,
1050 socache_shmcb_remove,
1051 socache_shmcb_status,
1052 socache_shmcb_iterate
1055 static void register_hooks(apr_pool_t *p)
1057 ap_register_provider(p, AP_SOCACHE_PROVIDER_GROUP, "shmcb",
1058 AP_SOCACHE_PROVIDER_VERSION,
1061 /* Also register shmcb under the default provider name. */
1062 ap_register_provider(p, AP_SOCACHE_PROVIDER_GROUP,
1063 AP_SOCACHE_DEFAULT_PROVIDER,
1064 AP_SOCACHE_PROVIDER_VERSION,
1068 AP_DECLARE_MODULE(socache_shmcb) = {
1069 STANDARD20_MODULE_STUFF,
1070 NULL, NULL, NULL, NULL, NULL,