1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "http_request.h"
20 #include "http_protocol.h"
21 #include "http_config.h"
24 #include "apr_strings.h"
27 #define APR_WANT_STRFUNC
29 #include "apr_general.h"
31 #include "ap_socache.h"
33 #define SHMCB_MAX_SIZE (64 * 1024 * 1024)
35 /* Check for definition of DEFAULT_REL_RUNTIMEDIR */
36 #ifndef DEFAULT_REL_RUNTIMEDIR
37 #define DEFAULT_SHMCB_PREFIX "logs/socache-shmcb-"
39 #define DEFAULT_SHMCB_PREFIX DEFAULT_REL_RUNTIMEDIR "/socache-shmcb-"
42 #define DEFAULT_SHMCB_SUFFIX ".cache"
45 * Header structure - the start of the shared-mem segment
48 /* Stats for cache operations */
49 unsigned long stat_stores;
50 unsigned long stat_expiries;
51 unsigned long stat_scrolled;
52 unsigned long stat_retrieves_hit;
53 unsigned long stat_retrieves_miss;
54 unsigned long stat_removes_hit;
55 unsigned long stat_removes_miss;
56 /* Number of subcaches */
57 unsigned int subcache_num;
58 /* How many indexes each subcache's queue has */
59 unsigned int index_num;
60 /* How large each subcache is, including the queue and data */
61 unsigned int subcache_size;
62 /* How far into each subcache the data area is (optimisation) */
63 unsigned int subcache_data_offset;
64 /* How large the data area in each subcache is (optimisation) */
65 unsigned int subcache_data_size;
69 * Subcache structure - the start of each subcache, followed by
73 /* The start position and length of the cyclic buffer of indexes */
74 unsigned int idx_pos, idx_used;
75 /* Same for the data area */
76 unsigned int data_pos, data_used;
80 * Index structure - each subcache has an array of these
83 /* absolute time this entry expires */
85 /* location within the subcache's data area */
86 unsigned int data_pos;
87 /* size (most logic ignores this, we keep it only to minimise memcpy) */
88 unsigned int data_used;
89 /* length of the used data which contains the id */
91 /* Used to mark explicitly-removed socache entries */
92 unsigned char removed;
95 struct ap_socache_instance_t {
96 const char *data_file;
102 /* The SHM data segment is of fixed size and stores data as follows.
104 * [ SHMCBHeader | Subcaches ]
106 * The SHMCBHeader header structure stores metadata concerning the
107 * cache and the contained subcaches.
109 * Subcaches is a hash table of header->subcache_num SHMCBSubcache
110 * structures. The hash table is indexed by SHMCB_MASK(id). Each
111 * SHMCBSubcache structure has a fixed size (header->subcache_size),
112 * which is determined at creation time, and looks like the following:
114 * [ SHMCBSubcache | Indexes | Data ]
116 * Each subcache is prefixed by the SHMCBSubcache structure.
118 * The subcache's "Data" segment is a single cyclic data buffer, of
119 * total size header->subcache_data_size; data inside is referenced
120 * using byte offsets. The offset marking the beginning of the cyclic
121 * buffer is subcache->data_pos the buffer's length is
122 * subcache->data_used.
124 * "Indexes" is an array of header->index_num SHMCBIndex structures,
125 * which is used as a cyclic queue; subcache->idx_pos gives the array
126 * index of the first in use, subcache->idx_used gives the number in
127 * use. Both ->idx_* values have a range of [0, header->index_num)
129 * Each in-use SHMCBIndex structure represents a single cached object.
130 * The ID and data segment are stored consecutively in the subcache's
131 * cyclic data buffer. The "Data" segment can thus be seen to
132 * look like this, for example
134 * offset: [ 0 1 2 3 4 5 6 ...
135 * contents:[ ID1 Data1 ID2 Data2 ID3 ...
137 * where the corresponding indices would look like:
139 * idx1 = { data_pos = 0, data_used = 3, id_len = 1, ...}
140 * idx2 = { data_pos = 3, data_used = 3, id_len = 1, ...}
144 /* This macro takes a pointer to the header and a zero-based index and returns
145 * a pointer to the corresponding subcache. */
146 #define SHMCB_SUBCACHE(pHeader, num) \
147 (SHMCBSubcache *)(((unsigned char *)(pHeader)) + \
148 sizeof(SHMCBHeader) + \
149 (num) * ((pHeader)->subcache_size))
151 /* This macro takes a pointer to the header and an id and returns a
152 * pointer to the corresponding subcache. */
153 #define SHMCB_MASK(pHeader, id) \
154 SHMCB_SUBCACHE((pHeader), *(id) & ((pHeader)->subcache_num - 1))
156 /* This macro takes the same params as the last, generating two outputs for use
157 * in ap_log_error(...). */
158 #define SHMCB_MASK_DBG(pHeader, id) \
159 *(id), (*(id) & ((pHeader)->subcache_num - 1))
161 /* This macro takes a pointer to a subcache and a zero-based index and returns
162 * a pointer to the corresponding SHMCBIndex. */
163 #define SHMCB_INDEX(pSubcache, num) \
164 ((SHMCBIndex *)(((unsigned char *)pSubcache) + \
165 sizeof(SHMCBSubcache)) + num)
167 /* This macro takes a pointer to the header and a subcache and returns a
168 * pointer to the corresponding data area. */
169 #define SHMCB_DATA(pHeader, pSubcache) \
170 ((unsigned char *)(pSubcache) + (pHeader)->subcache_data_offset)
173 * Cyclic functions - assists in "wrap-around"/modulo logic
176 /* Addition modulo 'mod' */
177 #define SHMCB_CYCLIC_INCREMENT(val,inc,mod) \
178 (((val) + (inc)) % (mod))
180 /* Subtraction (or "distance between") modulo 'mod' */
181 #define SHMCB_CYCLIC_SPACE(val1,val2,mod) \
182 ((val2) >= (val1) ? ((val2) - (val1)) : \
183 ((val2) + (mod) - (val1)))
185 /* A "normal-to-cyclic" memcpy. */
186 static void shmcb_cyclic_ntoc_memcpy(unsigned int buf_size, unsigned char *data,
187 unsigned int dest_offset, const unsigned char *src,
188 unsigned int src_len)
190 if (dest_offset + src_len < buf_size)
191 /* It be copied all in one go */
192 memcpy(data + dest_offset, src, src_len);
194 /* Copy the two splits */
195 memcpy(data + dest_offset, src, buf_size - dest_offset);
196 memcpy(data, src + buf_size - dest_offset,
197 src_len + dest_offset - buf_size);
201 /* A "cyclic-to-normal" memcpy. */static void shmcb_cyclic_cton_memcpy(unsigned int buf_size, unsigned char *dest,
202 const unsigned char *data, unsigned int src_offset,
203 unsigned int src_len)
205 if (src_offset + src_len < buf_size)
206 /* It be copied all in one go */
207 memcpy(dest, data + src_offset, src_len);
209 /* Copy the two splits */
210 memcpy(dest, data + src_offset, buf_size - src_offset);
211 memcpy(dest + buf_size - src_offset, data,
212 src_len + src_offset - buf_size);
216 /* A memcmp against a cyclic data buffer. Compares SRC of length
217 * SRC_LEN against the contents of cyclic buffer DATA (which is of
218 * size BUF_SIZE), starting at offset DEST_OFFSET. Got that? Good. */
219 static int shmcb_cyclic_memcmp(unsigned int buf_size, unsigned char *data,
220 unsigned int dest_offset,
221 const unsigned char *src,
222 unsigned int src_len)
224 if (dest_offset + src_len < buf_size)
225 /* It be compared all in one go */
226 return memcmp(data + dest_offset, src, src_len);
228 /* Compare the two splits */
231 diff = memcmp(data + dest_offset, src, buf_size - dest_offset);
235 return memcmp(data, src + buf_size - dest_offset,
236 src_len + dest_offset - buf_size);
241 /* Prototypes for low-level subcache operations */
242 static void shmcb_subcache_expire(server_rec *, SHMCBHeader *, SHMCBSubcache *,
244 /* Returns zero on success, non-zero on failure. */
245 static int shmcb_subcache_store(server_rec *s, SHMCBHeader *header,
246 SHMCBSubcache *subcache,
247 unsigned char *data, unsigned int data_len,
248 const unsigned char *id, unsigned int id_len,
250 /* Returns zero on success, non-zero on failure. */
251 static int shmcb_subcache_retrieve(server_rec *, SHMCBHeader *, SHMCBSubcache *,
252 const unsigned char *id, unsigned int idlen,
253 unsigned char *data, unsigned int *datalen);
254 /* Returns zero on success, non-zero on failure. */
255 static int shmcb_subcache_remove(server_rec *, SHMCBHeader *, SHMCBSubcache *,
256 const unsigned char *, unsigned int);
258 /* Returns result of the (iterator)() call, zero is success (continue) */
259 static apr_status_t shmcb_subcache_iterate(ap_socache_instance_t *instance,
262 SHMCBSubcache *subcache,
263 ap_socache_iterator_t *iterator,
270 * High-Level "handlers" as per ssl_scache.c
271 * subcache internals are deferred to shmcb_subcache_*** functions lower down
274 static const char *socache_shmcb_create(ap_socache_instance_t **context,
276 apr_pool_t *tmp, apr_pool_t *p)
278 ap_socache_instance_t *ctx;
279 char *path, *cp, *cp2;
281 /* Allocate the context. */
282 *context = ctx = apr_pcalloc(p, sizeof *ctx);
284 ctx->shm_size = 1024*512; /* 512KB */
286 if (!arg || *arg == '\0') {
291 ctx->data_file = path = ap_server_root_relative(p, arg);
293 cp = strrchr(path, '(');
294 cp2 = path + strlen(path) - 1;
298 return "Invalid argument: no closing parenthesis or cache size "
299 "missing after pathname with parenthesis";
305 ctx->shm_size = strtol(cp, &endptr, 10);
307 return "Invalid argument: cache size not numerical";
310 if (ctx->shm_size < 8192) {
311 return "Invalid argument: size has to be >= 8192 bytes";
315 if (ctx->shm_size >= SHMCB_MAX_SIZE) {
316 return apr_psprintf(tmp,
317 "Invalid argument: size has "
318 "to be < %d bytes on this platform",
323 else if (cp2 >= path && *cp2 == ')') {
324 return "Invalid argument: no opening parenthesis";
330 static apr_status_t socache_shmcb_init(ap_socache_instance_t *ctx,
331 const char *namespace,
332 const struct ap_socache_hints *hints,
333 server_rec *s, apr_pool_t *p)
336 apr_size_t shm_segsize;
339 unsigned int num_subcache, num_idx, loop;
340 apr_size_t avg_obj_size, avg_id_len;
342 /* Create shared memory segment */
343 if (ctx->data_file == NULL) {
344 const char *path = apr_pstrcat(p, DEFAULT_SHMCB_PREFIX, namespace,
345 DEFAULT_SHMCB_SUFFIX, NULL);
347 ctx->data_file = ap_server_root_relative(p, path);
350 /* Use anonymous shm by default, fall back on name-based. */
351 rv = apr_shm_create(&ctx->shm, ctx->shm_size, NULL, p);
352 if (APR_STATUS_IS_ENOTIMPL(rv)) {
353 /* If anon shm isn't supported, fail if no named file was
354 * configured successfully; the ap_server_root_relative call
355 * above will return NULL for invalid paths. */
356 if (ctx->data_file == NULL) {
357 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
358 "Could not use default path '%s' for shmcb socache",
363 /* For a name-based segment, remove it first in case of a
364 * previous unclean shutdown. */
365 apr_shm_remove(ctx->data_file, p);
367 rv = apr_shm_create(&ctx->shm, ctx->shm_size, ctx->data_file, p);
370 if (rv != APR_SUCCESS) {
371 ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
372 "Could not allocate shared memory segment for shmcb "
377 shm_segment = apr_shm_baseaddr_get(ctx->shm);
378 shm_segsize = apr_shm_size_get(ctx->shm);
379 if (shm_segsize < (5 * sizeof(SHMCBHeader))) {
380 /* the segment is ridiculously small, bail out */
381 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
382 "shared memory segment too small");
385 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
386 "shmcb_init allocated %" APR_SIZE_T_FMT
387 " bytes of shared memory",
389 /* Discount the header */
390 shm_segsize -= sizeof(SHMCBHeader);
391 /* Select index size based on average object size hints, if given. */
392 avg_obj_size = hints && hints->avg_obj_size ? hints->avg_obj_size : 150;
393 avg_id_len = hints && hints->avg_id_len ? hints->avg_id_len : 30;
394 num_idx = (shm_segsize) / (avg_obj_size + avg_id_len);
396 while ((num_idx / num_subcache) < (2 * num_subcache))
398 num_idx /= num_subcache;
399 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
400 "for %" APR_SIZE_T_FMT " bytes (%" APR_SIZE_T_FMT
401 " including header), recommending %u subcaches, "
402 "%u indexes each", shm_segsize,
403 shm_segsize + sizeof(SHMCBHeader), num_subcache, num_idx);
405 /* we're still too small, bail out */
406 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
407 "shared memory segment too small");
410 /* OK, we're sorted */
411 ctx->header = header = shm_segment;
412 header->stat_stores = 0;
413 header->stat_expiries = 0;
414 header->stat_scrolled = 0;
415 header->stat_retrieves_hit = 0;
416 header->stat_retrieves_miss = 0;
417 header->stat_removes_hit = 0;
418 header->stat_removes_miss = 0;
419 header->subcache_num = num_subcache;
420 /* Convert the subcache size (in bytes) to a value that is suitable for
421 * structure alignment on the host platform, by rounding down if necessary.
422 * This assumes that sizeof(unsigned long) provides an appropriate
424 header->subcache_size = ((size_t)(shm_segsize / num_subcache) &
425 ~(size_t)(sizeof(unsigned long) - 1));
426 header->subcache_data_offset = sizeof(SHMCBSubcache) +
427 num_idx * sizeof(SHMCBIndex);
428 header->subcache_data_size = header->subcache_size -
429 header->subcache_data_offset;
430 header->index_num = num_idx;
432 /* Output trace info */
433 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
434 "shmcb_init_memory choices follow");
435 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
436 "subcache_num = %u", header->subcache_num);
437 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
438 "subcache_size = %u", header->subcache_size);
439 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
440 "subcache_data_offset = %u", header->subcache_data_offset);
441 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
442 "subcache_data_size = %u", header->subcache_data_size);
443 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
444 "index_num = %u", header->index_num);
445 /* The header is done, make the caches empty */
446 for (loop = 0; loop < header->subcache_num; loop++) {
447 SHMCBSubcache *subcache = SHMCB_SUBCACHE(header, loop);
448 subcache->idx_pos = subcache->idx_used = 0;
449 subcache->data_pos = subcache->data_used = 0;
451 ap_log_error(APLOG_MARK, APLOG_INFO, 0, s,
452 "Shared memory socache initialised");
458 static void socache_shmcb_kill(ap_socache_instance_t *ctx, server_rec *s)
460 if (ctx && ctx->shm) {
461 apr_shm_destroy(ctx->shm);
466 static apr_status_t socache_shmcb_store(ap_socache_instance_t *ctx,
467 server_rec *s, const unsigned char *id,
468 unsigned int idlen, apr_time_t expiry,
469 unsigned char *encoded,
470 unsigned int len_encoded,
473 SHMCBHeader *header = ctx->header;
474 SHMCBSubcache *subcache = SHMCB_MASK(header, id);
476 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
477 "socache_shmcb_store (0x%02x -> subcache %d)",
478 SHMCB_MASK_DBG(header, id));
479 /* XXX: Says who? Why shouldn't this be acceptable, or padded if not? */
481 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "unusably short id provided "
482 "(%u bytes)", idlen);
485 if (shmcb_subcache_store(s, header, subcache, encoded,
486 len_encoded, id, idlen, expiry)) {
487 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
488 "can't store an socache entry!");
491 header->stat_stores++;
492 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
493 "leaving socache_shmcb_store successfully");
497 static apr_status_t socache_shmcb_retrieve(ap_socache_instance_t *ctx,
499 const unsigned char *id, unsigned int idlen,
500 unsigned char *dest, unsigned int *destlen,
503 SHMCBHeader *header = ctx->header;
504 SHMCBSubcache *subcache = SHMCB_MASK(header, id);
507 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
508 "socache_shmcb_retrieve (0x%02x -> subcache %d)",
509 SHMCB_MASK_DBG(header, id));
511 /* Get the entry corresponding to the id, if it exists. */
512 rv = shmcb_subcache_retrieve(s, header, subcache, id, idlen,
515 header->stat_retrieves_hit++;
517 header->stat_retrieves_miss++;
518 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
519 "leaving socache_shmcb_retrieve successfully");
521 return rv == 0 ? APR_SUCCESS : APR_EGENERAL;
524 static apr_status_t socache_shmcb_remove(ap_socache_instance_t *ctx,
525 server_rec *s, const unsigned char *id,
526 unsigned int idlen, apr_pool_t *p)
528 SHMCBHeader *header = ctx->header;
529 SHMCBSubcache *subcache = SHMCB_MASK(header, id);
532 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
533 "socache_shmcb_remove (0x%02x -> subcache %d)",
534 SHMCB_MASK_DBG(header, id));
536 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, "unusably short id provided "
537 "(%u bytes)", idlen);
540 if (shmcb_subcache_remove(s, header, subcache, id, idlen) == 0) {
541 header->stat_removes_hit++;
544 header->stat_removes_miss++;
547 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
548 "leaving socache_shmcb_remove successfully");
553 static void socache_shmcb_status(ap_socache_instance_t *ctx,
554 request_rec *r, int flags)
556 server_rec *s = r->server;
557 SHMCBHeader *header = ctx->header;
558 unsigned int loop, total = 0, cache_total = 0, non_empty_subcaches = 0;
559 apr_time_t idx_expiry, min_expiry = 0, max_expiry = 0, average_expiry = 0;
560 apr_time_t now = apr_time_now();
561 double expiry_total = 0;
562 int index_pct, cache_pct;
564 ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "inside shmcb_status");
565 /* Perform the iteration inside the mutex to avoid corruption or invalid
566 * pointer arithmetic. The rest of our logic uses read-only header data so
567 * doesn't need the lock. */
568 /* Iterate over the subcaches */
569 for (loop = 0; loop < header->subcache_num; loop++) {
570 SHMCBSubcache *subcache = SHMCB_SUBCACHE(header, loop);
571 shmcb_subcache_expire(s, header, subcache, now);
572 total += subcache->idx_used;
573 cache_total += subcache->data_used;
574 if (subcache->idx_used) {
575 SHMCBIndex *idx = SHMCB_INDEX(subcache, subcache->idx_pos);
576 non_empty_subcaches++;
577 idx_expiry = idx->expires;
578 expiry_total += (double)idx_expiry;
579 max_expiry = ((idx_expiry > max_expiry) ? idx_expiry : max_expiry);
581 min_expiry = idx_expiry;
583 min_expiry = ((idx_expiry < min_expiry) ? idx_expiry : min_expiry);
586 index_pct = (100 * total) / (header->index_num *
587 header->subcache_num);
588 cache_pct = (100 * cache_total) / (header->subcache_data_size *
589 header->subcache_num);
591 ap_rprintf(r, "cache type: <b>SHMCB</b>, shared memory: <b>%" APR_SIZE_T_FMT "</b> "
592 "bytes, current entries: <b>%d</b><br>",
593 ctx->shm_size, total);
594 ap_rprintf(r, "subcaches: <b>%d</b>, indexes per subcache: <b>%d</b><br>",
595 header->subcache_num, header->index_num);
596 if (non_empty_subcaches) {
597 average_expiry = (apr_time_t)(expiry_total / (double)non_empty_subcaches);
598 ap_rprintf(r, "time left on oldest entries' objects: ");
599 if (now < average_expiry)
600 ap_rprintf(r, "avg: <b>%d</b> seconds, (range: %d...%d)<br>",
601 (int)apr_time_sec(average_expiry - now),
602 (int)apr_time_sec(min_expiry - now),
603 (int)apr_time_sec(max_expiry - now));
605 ap_rprintf(r, "expiry_threshold: <b>Calculation error!</b><br>");
608 ap_rprintf(r, "index usage: <b>%d%%</b>, cache usage: <b>%d%%</b><br>",
609 index_pct, cache_pct);
610 ap_rprintf(r, "total entries stored since starting: <b>%lu</b><br>",
611 header->stat_stores);
612 ap_rprintf(r, "total entries expired since starting: <b>%lu</b><br>",
613 header->stat_expiries);
614 ap_rprintf(r, "total (pre-expiry) entries scrolled out of the cache: "
615 "<b>%lu</b><br>", header->stat_scrolled);
616 ap_rprintf(r, "total retrieves since starting: <b>%lu</b> hit, "
617 "<b>%lu</b> miss<br>", header->stat_retrieves_hit,
618 header->stat_retrieves_miss);
619 ap_rprintf(r, "total removes since starting: <b>%lu</b> hit, "
620 "<b>%lu</b> miss<br>", header->stat_removes_hit,
621 header->stat_removes_miss);
622 ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "leaving shmcb_status");
625 apr_status_t socache_shmcb_iterate(ap_socache_instance_t *instance,
627 ap_socache_iterator_t *iterator,
630 SHMCBHeader *header = instance->header;
632 apr_time_t now = apr_time_now();
633 apr_status_t rv = APR_SUCCESS;
634 apr_size_t buflen = 0;
637 /* Perform the iteration inside the mutex to avoid corruption or invalid
638 * pointer arithmetic. The rest of our logic uses read-only header data so
639 * doesn't need the lock. */
640 /* Iterate over the subcaches */
641 for (loop = 0; loop < header->subcache_num && rv == APR_SUCCESS; loop++) {
642 SHMCBSubcache *subcache = SHMCB_SUBCACHE(header, loop);
643 rv = shmcb_subcache_iterate(instance, s, header, subcache, iterator,
644 &buf, &buflen, pool, now);
650 * Subcache-level cache operations
653 static void shmcb_subcache_expire(server_rec *s, SHMCBHeader *header,
654 SHMCBSubcache *subcache, apr_time_t now)
656 unsigned int loop = 0;
657 unsigned int new_idx_pos = subcache->idx_pos;
658 SHMCBIndex *idx = NULL;
660 while (loop < subcache->idx_used) {
661 idx = SHMCB_INDEX(subcache, new_idx_pos);
662 if (idx->expires > now)
663 /* it hasn't expired yet, we're done iterating */
666 new_idx_pos = SHMCB_CYCLIC_INCREMENT(new_idx_pos, 1, header->index_num);
671 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
672 "will be expiring %u socache entries", loop);
673 if (loop == subcache->idx_used) {
674 /* We're expiring everything, piece of cake */
675 subcache->idx_used = 0;
676 subcache->data_used = 0;
678 /* There remain other indexes, so we can use idx to adjust 'data' */
679 unsigned int diff = SHMCB_CYCLIC_SPACE(subcache->data_pos,
681 header->subcache_data_size);
682 /* Adjust the indexes */
683 subcache->idx_used -= loop;
684 subcache->idx_pos = new_idx_pos;
685 /* Adjust the data area */
686 subcache->data_used -= diff;
687 subcache->data_pos = idx->data_pos;
689 header->stat_expiries += loop;
690 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
691 "we now have %u socache entries", subcache->idx_used);
694 static int shmcb_subcache_store(server_rec *s, SHMCBHeader *header,
695 SHMCBSubcache *subcache,
696 unsigned char *data, unsigned int data_len,
697 const unsigned char *id, unsigned int id_len,
700 unsigned int data_offset, new_idx, id_offset;
702 unsigned int total_len = id_len + data_len;
704 /* Sanity check the input */
705 if (total_len > header->subcache_data_size) {
706 ap_log_error(APLOG_MARK, APLOG_ERR, 0, s,
707 "inserting socache entry larger (%d) than subcache data area (%d)",
708 total_len, header->subcache_data_size);
712 /* If there are entries to expire, ditch them first. */
713 shmcb_subcache_expire(s, header, subcache, apr_time_now());
715 /* Loop until there is enough space to insert */
716 if (header->subcache_data_size - subcache->data_used < total_len
717 || subcache->idx_used == header->index_num) {
718 unsigned int loop = 0;
720 idx = SHMCB_INDEX(subcache, subcache->idx_pos);
721 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
722 "about to force-expire, subcache: idx_used=%d, "
723 "data_used=%d", subcache->idx_used, subcache->data_used);
727 /* Adjust the indexes by one */
728 subcache->idx_pos = SHMCB_CYCLIC_INCREMENT(subcache->idx_pos, 1,
730 subcache->idx_used--;
731 if (!subcache->idx_used) {
732 /* There's nothing left */
733 subcache->data_used = 0;
736 /* Adjust the data */
737 idx2 = SHMCB_INDEX(subcache, subcache->idx_pos);
738 subcache->data_used -= SHMCB_CYCLIC_SPACE(idx->data_pos, idx2->data_pos,
739 header->subcache_data_size);
740 subcache->data_pos = idx2->data_pos;
742 header->stat_scrolled++;
746 } while (header->subcache_data_size - subcache->data_used < total_len);
748 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
749 "finished force-expire, subcache: idx_used=%d, "
750 "data_used=%d", subcache->idx_used, subcache->data_used);
753 /* HERE WE ASSUME THAT THE NEW SESSION SHOULD GO ON THE END! I'M NOT
754 * CHECKING WHETHER IT SHOULD BE GENUINELY "INSERTED" SOMEWHERE.
756 * We aught to fix that. httpd (never mind third party modules)
757 * does not promise to perform any processing in date order
758 * (c.f. FAQ "My log entries are not in date order!")
761 id_offset = SHMCB_CYCLIC_INCREMENT(subcache->data_pos, subcache->data_used,
762 header->subcache_data_size);
763 shmcb_cyclic_ntoc_memcpy(header->subcache_data_size,
764 SHMCB_DATA(header, subcache), id_offset,
766 subcache->data_used += id_len;
767 /* Insert the data */
768 data_offset = SHMCB_CYCLIC_INCREMENT(subcache->data_pos, subcache->data_used,
769 header->subcache_data_size);
770 shmcb_cyclic_ntoc_memcpy(header->subcache_data_size,
771 SHMCB_DATA(header, subcache), data_offset,
773 subcache->data_used += data_len;
774 /* Insert the index */
775 new_idx = SHMCB_CYCLIC_INCREMENT(subcache->idx_pos, subcache->idx_used,
777 idx = SHMCB_INDEX(subcache, new_idx);
778 idx->expires = expiry;
779 idx->data_pos = id_offset;
780 idx->data_used = total_len;
781 idx->id_len = id_len;
783 subcache->idx_used++;
784 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
785 "insert happened at idx=%d, data=(%u:%u)", new_idx,
786 id_offset, data_offset);
787 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
788 "finished insert, subcache: idx_pos/idx_used=%d/%d, "
789 "data_pos/data_used=%d/%d",
790 subcache->idx_pos, subcache->idx_used,
791 subcache->data_pos, subcache->data_used);
795 static int shmcb_subcache_retrieve(server_rec *s, SHMCBHeader *header,
796 SHMCBSubcache *subcache,
797 const unsigned char *id, unsigned int idlen,
798 unsigned char *dest, unsigned int *destlen)
801 unsigned int loop = 0;
802 apr_time_t now = apr_time_now();
804 pos = subcache->idx_pos;
806 while (loop < subcache->idx_used) {
807 SHMCBIndex *idx = SHMCB_INDEX(subcache, pos);
809 /* Only consider 'idx' if the id matches, and the "removed"
810 * flag isn't set, and the record is not expired.
811 * Check the data length too to avoid a buffer overflow
812 * in case of corruption, which should be impossible,
813 * but it's cheap to be safe. */
815 && idx->id_len == idlen && (idx->data_used - idx->id_len) < *destlen
816 && shmcb_cyclic_memcmp(header->subcache_data_size,
817 SHMCB_DATA(header, subcache),
818 idx->data_pos, id, idx->id_len) == 0)
820 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
821 "match at idx=%d, data=%d", pos, idx->data_pos);
822 if (idx->expires > now)
824 unsigned int data_offset;
826 /* Find the offset of the data segment, after the id */
827 data_offset = SHMCB_CYCLIC_INCREMENT(idx->data_pos,
829 header->subcache_data_size);
831 *destlen = idx->data_used - idx->id_len;
833 /* Copy out the data */
834 shmcb_cyclic_cton_memcpy(header->subcache_data_size,
835 dest, SHMCB_DATA(header, subcache),
836 data_offset, *destlen);
841 /* Already stale, quietly remove and treat as not-found */
843 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
844 "shmcb_subcache_retrieve discarding expired entry");
850 pos = SHMCB_CYCLIC_INCREMENT(pos, 1, header->index_num);
853 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
854 "shmcb_subcache_retrieve found no match");
858 static int shmcb_subcache_remove(server_rec *s, SHMCBHeader *header,
859 SHMCBSubcache *subcache,
860 const unsigned char *id,
864 unsigned int loop = 0;
866 pos = subcache->idx_pos;
867 while (loop < subcache->idx_used) {
868 SHMCBIndex *idx = SHMCB_INDEX(subcache, pos);
870 /* Only consider 'idx' if the id matches, and the "removed"
872 if (!idx->removed && idx->id_len == idlen
873 && shmcb_cyclic_memcmp(header->subcache_data_size,
874 SHMCB_DATA(header, subcache),
875 idx->data_pos, id, idx->id_len) == 0) {
876 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
877 "possible match at idx=%d, data=%d", pos, idx->data_pos);
879 /* Found the matching entry, remove it quietly. */
881 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
882 "shmcb_subcache_remove removing matching entry");
887 pos = SHMCB_CYCLIC_INCREMENT(pos, 1, header->index_num);
890 return -1; /* failure */
894 static apr_status_t shmcb_subcache_iterate(ap_socache_instance_t *instance,
897 SHMCBSubcache *subcache,
898 ap_socache_iterator_t *iterator,
905 unsigned int loop = 0;
908 pos = subcache->idx_pos;
909 while (loop < subcache->idx_used) {
910 SHMCBIndex *idx = SHMCB_INDEX(subcache, pos);
912 /* Only consider 'idx' if the "removed" flag isn't set. */
915 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
916 "iterating idx=%d, data=%d", pos, idx->data_pos);
917 if (idx->expires > now)
919 unsigned char *id = *buf;
921 unsigned int data_offset, dest_len;
924 /* Find the offset of the data segment, after the id */
925 data_offset = SHMCB_CYCLIC_INCREMENT(idx->data_pos,
927 header->subcache_data_size);
929 dest_len = idx->data_used - idx->id_len;
931 buf_req = APR_ALIGN_DEFAULT(idx->id_len + 1)
932 + APR_ALIGN_DEFAULT(dest_len + 1);
934 if (buf_req > *buf_len) {
935 /* Grow to ~150% of this buffer requirement on resize
936 * always using APR_ALIGN_DEFAULT sized pages
938 *buf_len = buf_req + APR_ALIGN_DEFAULT(buf_req / 2);
939 *buf = apr_palloc(pool, *buf_len);
943 dest = *buf + APR_ALIGN_DEFAULT(idx->id_len + 1);
945 /* Copy out the data, because it's potentially cyclic */
946 shmcb_cyclic_cton_memcpy(header->subcache_data_size, id,
947 SHMCB_DATA(header, subcache),
948 idx->data_pos, idx->id_len);
949 id[idx->id_len] = '\0';
951 shmcb_cyclic_cton_memcpy(header->subcache_data_size, dest,
952 SHMCB_DATA(header, subcache),
953 data_offset, dest_len);
954 dest[dest_len] = '\0';
956 rv = (*iterator)(instance, s, id, idx->id_len,
957 dest, dest_len, pool);
958 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
959 "shmcb_subcache_iterate discarding expired entry");
960 if (rv != APR_SUCCESS)
964 /* Already stale, quietly remove and treat as not-found */
966 ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s,
967 "shmcb_subcache_iterate discarding expired entry");
972 pos = SHMCB_CYCLIC_INCREMENT(pos, 1, header->index_num);
975 return -1; /* failure */
978 static const ap_socache_provider_t socache_shmcb = {
980 AP_SOCACHE_FLAG_NOTMPSAFE,
981 socache_shmcb_create,
985 socache_shmcb_retrieve,
986 socache_shmcb_remove,
987 socache_shmcb_status,
988 socache_shmcb_iterate
991 static void register_hooks(apr_pool_t *p)
993 ap_register_provider(p, AP_SOCACHE_PROVIDER_GROUP, "shmcb",
994 AP_SOCACHE_PROVIDER_VERSION,
997 /* Also register shmcb under the default provider name. */
998 ap_register_provider(p, AP_SOCACHE_PROVIDER_GROUP,
999 AP_SOCACHE_DEFAULT_PROVIDER,
1000 AP_SOCACHE_PROVIDER_VERSION,
1004 module AP_MODULE_DECLARE_DATA socache_shmcb_module = {
1005 STANDARD20_MODULE_STUFF,
1006 NULL, NULL, NULL, NULL, NULL,