4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * based on usr/src/uts/common/os/kmem.c r1.64 from 2001/12/18
32 * The slab allocator, as described in the following two papers:
35 * The Slab Allocator: An Object-Caching Kernel Memory Allocator.
36 * Proceedings of the Summer 1994 Usenix Conference.
37 * Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
39 * Jeff Bonwick and Jonathan Adams,
40 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
41 * Arbitrary Resources.
42 * Proceedings of the 2001 Usenix Conference.
43 * Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
47 * umem is very close to kmem in implementation. There are four major
48 * areas of divergence:
56 * * KM_SLEEP v.s. UMEM_NOFAIL
62 * kmem is initialized early on in boot, and knows that no one will call
63 * into it before it is ready. umem does not have these luxuries. Instead,
64 * initialization is divided into two phases:
66 * * library initialization, and
70 * umem's full initialization happens at the time of the first allocation
71 * request (via malloc() and friends, umem_alloc(), or umem_zalloc()),
72 * or the first call to umem_cache_create().
74 * umem_free(), and umem_cache_alloc() do not require special handling,
75 * since the only way to get valid arguments for them is to successfully
76 * call a function from the first group.
78 * 2.1. Library Initialization: umem_startup()
79 * -------------------------------------------
80 * umem_startup() is libumem.so's .init section. It calls pthread_atfork()
81 * to install the handlers necessary for umem's Fork1-Safety. Because of
82 * race condition issues, all other pre-umem_init() initialization is done
83 * statically (i.e. by the dynamic linker).
85 * For standalone use, umem_startup() returns everything to its initial
88 * 2.2. First use: umem_init()
89 * ------------------------------
90 * The first time any memory allocation function is used, we have to
91 * create the backing caches and vmem arenas which are needed for it.
92 * umem_init() is the central point for that task. When it completes,
93 * umem_ready is either UMEM_READY (all set) or UMEM_READY_INIT_FAILED (unable
94 * to initialize, probably due to lack of memory).
96 * There are four different paths from which umem_init() is called:
98 * * from umem_alloc() or umem_zalloc(), with 0 < size < UMEM_MAXBUF,
100 * * from umem_alloc() or umem_zalloc(), with size > UMEM_MAXBUF,
102 * * from umem_cache_create(), and
104 * * from memalign(), with align > UMEM_ALIGN.
106 * The last three just check if umem is initialized, and call umem_init()
107 * if it is not. For performance reasons, the first case is more complicated.
109 * 2.2.1. umem_alloc()/umem_zalloc(), with 0 < size < UMEM_MAXBUF
110 * -----------------------------------------------------------------
111 * In this case, umem_cache_alloc(&umem_null_cache, ...) is called.
112 * There is special case code in which causes any allocation on
113 * &umem_null_cache to fail by returning (NULL), regardless of the
116 * So umem_cache_alloc() returns NULL, and umem_alloc()/umem_zalloc() call
117 * umem_alloc_retry(). umem_alloc_retry() sees that the allocation
118 * was agains &umem_null_cache, and calls umem_init().
120 * If initialization is successful, umem_alloc_retry() returns 1, which
121 * causes umem_alloc()/umem_zalloc() to start over, which causes it to load
122 * the (now valid) cache pointer from umem_alloc_table.
124 * 2.2.2. Dealing with race conditions
125 * -----------------------------------
126 * There are a couple race conditions resulting from the initialization
127 * code that we have to guard against:
129 * * In umem_cache_create(), there is a special UMC_INTERNAL cflag
130 * that is passed for caches created during initialization. It
131 * is illegal for a user to try to create a UMC_INTERNAL cache.
132 * This allows initialization to proceed, but any other
133 * umem_cache_create()s will block by calling umem_init().
135 * * Since umem_null_cache has a 1-element cache_cpu, it's cache_cpu_mask
136 * is always zero. umem_cache_alloc uses cp->cache_cpu_mask to
137 * mask the cpu number. This prevents a race between grabbing a
138 * cache pointer out of umem_alloc_table and growing the cpu array.
143 * kmem uses the CPU's sequence number to determine which "cpu cache" to
144 * use for an allocation. Currently, there is no way to get the sequence
145 * number in userspace.
147 * umem keeps track of cpu information in umem_cpus, an array of umem_max_ncpus
148 * umem_cpu_t structures. CURCPU() is a a "hint" function, which we then mask
149 * with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id.
150 * The mechanics of this is all in the CPU(mask) macro.
152 * Currently, umem uses _lwp_self() as its hint.
155 * 4. The update thread
156 * --------------------
157 * kmem uses a task queue, kmem_taskq, to do periodic maintenance on
158 * every kmem cache. vmem has a periodic timeout for hash table resizing.
159 * The kmem_taskq also provides a separate context for kmem_cache_reap()'s
160 * to be done in, avoiding issues of the context of kmem_reap() callers.
162 * Instead, umem has the concept of "updates", which are asynchronous requests
163 * for work attached to single caches. All caches with pending work are
164 * on a doubly linked list rooted at the umem_null_cache. All update state
165 * is protected by the umem_update_lock mutex, and the umem_update_cv is used
166 * for notification between threads.
168 * 4.1. Cache states with regards to updates
169 * -----------------------------------------
170 * A given cache is in one of three states:
172 * Inactive cache_uflags is zero, cache_u{next,prev} are NULL
174 * Work Requested cache_uflags is non-zero (but UMU_ACTIVE is not set),
175 * cache_u{next,prev} link the cache onto the global
178 * Active cache_uflags has UMU_ACTIVE set, cache_u{next,prev}
179 * are NULL, and either umem_update_thr or
180 * umem_st_update_thr are actively doing work on the
183 * An update can be added to any cache in any state -- if the cache is
184 * Inactive, it transitions to being Work Requested. If the cache is
185 * Active, the worker will notice the new update and act on it before
186 * transitioning the cache to the Inactive state.
188 * If a cache is in the Active state, UMU_NOTIFY can be set, which asks
189 * the worker to broadcast the umem_update_cv when it has finished.
191 * 4.2. Update interface
192 * ---------------------
193 * umem_add_update() adds an update to a particular cache.
194 * umem_updateall() adds an update to all caches.
195 * umem_remove_updates() returns a cache to the Inactive state.
197 * umem_process_updates() process all caches in the Work Requested state.
201 * When umem_reap() is called (at the time of heap growth), it schedule
202 * UMU_REAP updates on every cache. It then checks to see if the update
203 * thread exists (umem_update_thr != 0). If it is, it broadcasts
204 * the umem_update_cv to wake the update thread up, and returns.
206 * If the update thread does not exist (umem_update_thr == 0), and the
207 * program currently has multiple threads, umem_reap() attempts to create
208 * a new update thread.
210 * If the process is not multithreaded, or the creation fails, umem_reap()
211 * calls umem_st_update() to do an inline update.
213 * 4.4. The update thread
214 * ----------------------
215 * The update thread spends most of its time in cond_timedwait() on the
216 * umem_update_cv. It wakes up under two conditions:
218 * * The timedwait times out, in which case it needs to run a global
221 * * someone cond_broadcast(3THR)s the umem_update_cv, in which case
222 * it needs to check if there are any caches in the Work Requested
225 * When it is time for another global update, umem calls umem_cache_update()
226 * on every cache, then calls vmem_update(), which tunes the vmem structures.
227 * umem_cache_update() can request further work using umem_add_update().
229 * After any work from the global update completes, the update timer is
230 * reset to umem_reap_interval seconds in the future. This makes the
231 * updates self-throttling.
233 * Reaps are similarly self-throttling. After a UMU_REAP update has
234 * been scheduled on all caches, umem_reap() sets a flag and wakes up the
235 * update thread. The update thread notices the flag, and resets the
238 * 4.5. Inline updates
239 * -------------------
240 * If the update thread is not running, umem_st_update() is used instead. It
241 * immediately does a global update (as above), then calls
242 * umem_process_updates() to process both the reaps that umem_reap() added and
243 * any work generated by the global update. Afterwards, it resets the reap
246 * While the umem_st_update() is running, umem_st_update_thr holds the thread
247 * id of the thread performing the update.
249 * 4.6. Updates and fork1()
250 * ------------------------
251 * umem has fork1() pre- and post-handlers which lock up (and release) every
252 * mutex in every cache. They also lock up the umem_update_lock. Since
253 * fork1() only copies over a single lwp, other threads (including the update
254 * thread) could have been actively using a cache in the parent. This
255 * can lead to inconsistencies in the child process.
257 * Because we locked all of the mutexes, the only possible inconsistancies are:
259 * * a umem_cache_alloc() could leak its buffer.
261 * * a caller of umem_depot_alloc() could leak a magazine, and all the
262 * buffers contained in it.
264 * * a cache could be in the Active update state. In the child, there
265 * would be no thread actually working on it.
267 * * a umem_hash_rescale() could leak the new hash table.
269 * * a umem_magazine_resize() could be in progress.
271 * * a umem_reap() could be in progress.
273 * The memory leaks we can't do anything about. umem_release_child() resets
274 * the update state, moves any caches in the Active state to the Work Requested
275 * state. This might cause some updates to be re-run, but UMU_REAP and
276 * UMU_HASH_RESCALE are effectively idempotent, and the worst that can
277 * happen from umem_magazine_resize() is resizing the magazine twice in close
280 * Much of the cleanup in umem_release_child() is skipped if
281 * umem_st_update_thr == thr_self(). This is so that applications which call
282 * fork1() from a cache callback does not break. Needless to say, any such
283 * application is tremendously broken.
286 * 5. KM_SLEEP v.s. UMEM_NOFAIL
287 * ----------------------------
288 * Allocations against kmem and vmem have two basic modes: SLEEP and
289 * NOSLEEP. A sleeping allocation is will go to sleep (waiting for
290 * more memory) instead of failing (returning NULL).
292 * SLEEP allocations presume an extremely multithreaded model, with
293 * a lot of allocation and deallocation activity. umem cannot presume
294 * that its clients have any particular type of behavior. Instead,
295 * it provides two types of allocations:
297 * * UMEM_DEFAULT, equivalent to KM_NOSLEEP (i.e. return NULL on
300 * * UMEM_NOFAIL, which, on failure, calls an optional callback
301 * (registered with umem_nofail_callback()).
303 * The callback is invoked with no locks held, and can do an arbitrary
304 * amount of work. It then has a choice between:
306 * * Returning UMEM_CALLBACK_RETRY, which will cause the allocation
309 * * Returning UMEM_CALLBACK_EXIT(status), which will cause exit(2)
310 * to be invoked with status. If multiple threads attempt to do
311 * this simultaneously, only one will call exit(2).
313 * * Doing some kind of non-local exit (thr_exit(3thr), longjmp(3C),
316 * The default callback returns UMEM_CALLBACK_EXIT(255).
318 * To have these callbacks without risk of state corruption (in the case of
319 * a non-local exit), we have to ensure that the callbacks get invoked
320 * close to the original allocation, with no inconsistent state or held
321 * locks. The following steps are taken:
323 * * All invocations of vmem are VM_NOSLEEP.
325 * * All constructor callbacks (which can themselves to allocations)
326 * are passed UMEM_DEFAULT as their required allocation argument. This
327 * way, the constructor will fail, allowing the highest-level allocation
328 * invoke the nofail callback.
330 * If a constructor callback _does_ do a UMEM_NOFAIL allocation, and
331 * the nofail callback does a non-local exit, we will leak the
332 * partially-constructed buffer.
337 * umem has a few more locks than kmem does, mostly in the update path. The
338 * overall lock ordering (earlier locks must be acquired first) is:
343 * vmem_nosleep_lock.vmpl_mutex
352 * cache_cpu[*].cc_lock
355 * umem_log_header_t's:
360 #include <umem_impl.h>
361 #include <sys/vmem_impl_user.h>
362 #include "umem_base.h"
363 #include "vmem_base.h"
365 #include <sys/processor.h>
366 #include <sys/sysmacros.h>
381 #define UMEM_VMFLAGS(umflag) (VM_NOSLEEP)
386 * The default set of caches to back umem_alloc().
387 * These sizes should be reevaluated periodically.
389 * We want allocations that are multiples of the coherency granularity
390 * (64 bytes) to be satisfied from a cache which is a multiple of 64
391 * bytes, so that it will be 64-byte aligned. For all multiples of 64,
392 * the next kmem_cache_size greater than or equal to it must be a
395 * This table must be in sorted order, from smallest to highest. The
396 * highest slot must be UMEM_MAXBUF, and every slot afterwards must be
399 static int umem_alloc_sizes[] = {
409 4 * 8, 5 * 8, 6 * 8, 7 * 8,
411 4 * 16, 5 * 16, 6 * 16, 7 * 16,
412 4 * 32, 5 * 32, 6 * 32, 7 * 32,
413 4 * 64, 5 * 64, 6 * 64, 7 * 64,
414 4 * 128, 5 * 128, 6 * 128, 7 * 128,
415 P2ALIGN(8192 / 7, 64),
416 P2ALIGN(8192 / 6, 64),
417 P2ALIGN(8192 / 5, 64),
418 P2ALIGN(8192 / 4, 64), 2304,
419 P2ALIGN(8192 / 3, 64),
420 P2ALIGN(8192 / 2, 64), 4544,
421 P2ALIGN(8192 / 1, 64), 9216,
423 UMEM_MAXBUF, /* = 8192 * 2 */
424 /* 24 slots for user expansion */
425 0, 0, 0, 0, 0, 0, 0, 0,
426 0, 0, 0, 0, 0, 0, 0, 0,
427 0, 0, 0, 0, 0, 0, 0, 0,
429 #define NUM_ALLOC_SIZES (sizeof (umem_alloc_sizes) / sizeof (*umem_alloc_sizes))
431 static umem_magtype_t umem_magtype[] = {
432 { 1, 8, 3200, 65536 },
433 { 3, 16, 256, 32768 },
434 { 7, 32, 64, 16384 },
446 uint32_t umem_max_ncpus; /* # of CPU caches. */
448 uint32_t umem_stack_depth = 15; /* # stack frames in a bufctl_audit */
449 uint32_t umem_reap_interval = 10; /* max reaping rate (seconds) */
450 uint_t umem_depot_contention = 2; /* max failed trylocks per real interval */
451 uint_t umem_abort = 1; /* whether to abort on error */
452 uint_t umem_output = 0; /* whether to write to standard error */
453 uint_t umem_logging = 0; /* umem_log_enter() override */
454 uint32_t umem_mtbf = 0; /* mean time between failures [default: off] */
455 size_t umem_transaction_log_size; /* size of transaction log */
456 size_t umem_content_log_size; /* size of content log */
457 size_t umem_failure_log_size; /* failure log [4 pages per CPU] */
458 size_t umem_slab_log_size; /* slab create log [4 pages per CPU] */
459 size_t umem_content_maxsave = 256; /* UMF_CONTENTS max bytes to log */
460 size_t umem_lite_minsize = 0; /* minimum buffer size for UMF_LITE */
461 size_t umem_lite_maxalign = 1024; /* maximum buffer alignment for UMF_LITE */
462 size_t umem_maxverify; /* maximum bytes to inspect in debug routines */
463 size_t umem_minfirewall; /* hardware-enforced redzone threshold */
465 uint_t umem_flags = 0;
467 mutex_t umem_init_lock; /* locks initialization */
468 cond_t umem_init_cv; /* initialization CV */
469 thread_t umem_init_thr; /* thread initializing */
470 int umem_init_env_ready; /* environ pre-initted */
471 int umem_ready = UMEM_READY_STARTUP;
473 static umem_nofail_callback_t *nofail_callback;
474 static mutex_t umem_nofail_exit_lock;
475 static thread_t umem_nofail_exit_thr;
477 static umem_cache_t *umem_slab_cache;
478 static umem_cache_t *umem_bufctl_cache;
479 static umem_cache_t *umem_bufctl_audit_cache;
481 mutex_t umem_flags_lock;
483 static vmem_t *heap_arena;
484 static vmem_alloc_t *heap_alloc;
485 static vmem_free_t *heap_free;
487 static vmem_t *umem_internal_arena;
488 static vmem_t *umem_cache_arena;
489 static vmem_t *umem_hash_arena;
490 static vmem_t *umem_log_arena;
491 static vmem_t *umem_oversize_arena;
492 static vmem_t *umem_va_arena;
493 static vmem_t *umem_default_arena;
494 static vmem_t *umem_firewall_va_arena;
495 static vmem_t *umem_firewall_arena;
497 vmem_t *umem_memalign_arena;
499 umem_log_header_t *umem_transaction_log;
500 umem_log_header_t *umem_content_log;
501 umem_log_header_t *umem_failure_log;
502 umem_log_header_t *umem_slab_log;
504 #define CPUHINT() (thr_self())
505 #define CPUHINT_MAX() INT_MAX
507 #define CPU(mask) (umem_cpus + (CPUHINT() & (mask)))
508 static umem_cpu_t umem_startup_cpu = { /* initial, single, cpu */
513 static uint32_t umem_cpu_mask = 0; /* global cpu mask */
514 static umem_cpu_t *umem_cpus = &umem_startup_cpu; /* cpu list */
516 volatile uint32_t umem_reaping;
518 thread_t umem_update_thr;
519 struct timeval umem_update_next; /* timeofday of next update */
520 volatile thread_t umem_st_update_thr; /* only used when single-thd */
522 #define IN_UPDATE() (thr_self() == umem_update_thr || \
523 thr_self() == umem_st_update_thr)
524 #define IN_REAP() IN_UPDATE()
526 mutex_t umem_update_lock; /* cache_u{next,prev,flags} */
527 cond_t umem_update_cv;
529 volatile hrtime_t umem_reap_next; /* min hrtime of next reap */
531 mutex_t umem_cache_lock; /* inter-cache linkage only */
533 #ifdef UMEM_STANDALONE
534 umem_cache_t umem_null_cache;
535 static const umem_cache_t umem_null_cache_template = {
537 umem_cache_t umem_null_cache = {
545 NULL, NULL, NULL, NULL,
548 &umem_null_cache, &umem_null_cache,
549 &umem_null_cache, &umem_null_cache,
551 DEFAULTMUTEX, /* start of slab layer */
552 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
553 &umem_null_cache.cache_nullslab,
557 &umem_null_cache.cache_nullslab,
558 &umem_null_cache.cache_nullslab,
565 DEFAULTMUTEX, /* start of depot layer */
572 DEFAULTMUTEX, /* start of CPU cache */
573 0, 0, NULL, NULL, -1, -1, 0
578 #define ALLOC_TABLE_4 \
579 &umem_null_cache, &umem_null_cache, &umem_null_cache, &umem_null_cache
581 #define ALLOC_TABLE_64 \
582 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
583 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
584 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
585 ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4
587 #define ALLOC_TABLE_1024 \
588 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
589 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
590 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
591 ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64
593 static umem_cache_t *umem_alloc_table[UMEM_MAXBUF >> UMEM_ALIGN_SHIFT] = {
599 /* Used to constrain audit-log stack traces */
600 caddr_t umem_min_stack;
601 caddr_t umem_max_stack;
604 #define UMERR_MODIFIED 0 /* buffer modified while on freelist */
605 #define UMERR_REDZONE 1 /* redzone violation (write past end of buf) */
606 #define UMERR_DUPFREE 2 /* freed a buffer twice */
607 #define UMERR_BADADDR 3 /* freed a bad (unallocated) address */
608 #define UMERR_BADBUFTAG 4 /* buftag corrupted */
609 #define UMERR_BADBUFCTL 5 /* bufctl corrupted */
610 #define UMERR_BADCACHE 6 /* freed a buffer to the wrong cache */
611 #define UMERR_BADSIZE 7 /* alloc size != free size */
612 #define UMERR_BADBASE 8 /* buffer base address wrong */
615 hrtime_t ump_timestamp; /* timestamp of error */
616 int ump_error; /* type of umem error (UMERR_*) */
617 void *ump_buffer; /* buffer that induced abort */
618 void *ump_realbuf; /* real start address for buffer */
619 umem_cache_t *ump_cache; /* buffer's cache according to client */
620 umem_cache_t *ump_realcache; /* actual cache containing buffer */
621 umem_slab_t *ump_slab; /* slab accoring to umem_findslab() */
622 umem_bufctl_t *ump_bufctl; /* bufctl */
626 copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
628 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
629 uint64_t *buf = buf_arg;
636 verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
638 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
641 for (buf = buf_arg; buf < bufend; buf++)
648 verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
650 uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
653 for (buf = buf_arg; buf < bufend; buf++) {
655 copy_pattern(old, buf_arg,
656 (char *)buf - (char *)buf_arg);
666 umem_cache_applyall(void (*func)(umem_cache_t *))
670 (void) mutex_lock(&umem_cache_lock);
671 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
674 (void) mutex_unlock(&umem_cache_lock);
678 umem_add_update_unlocked(umem_cache_t *cp, int flags)
680 umem_cache_t *cnext, *cprev;
682 flags &= ~UMU_ACTIVE;
687 if (cp->cache_uflags & UMU_ACTIVE) {
688 cp->cache_uflags |= flags;
690 if (cp->cache_unext != NULL) {
691 ASSERT(cp->cache_uflags != 0);
692 cp->cache_uflags |= flags;
694 ASSERT(cp->cache_uflags == 0);
695 cp->cache_uflags = flags;
696 cp->cache_unext = cnext = &umem_null_cache;
697 cp->cache_uprev = cprev = umem_null_cache.cache_uprev;
698 cnext->cache_uprev = cp;
699 cprev->cache_unext = cp;
705 umem_add_update(umem_cache_t *cp, int flags)
707 (void) mutex_lock(&umem_update_lock);
709 umem_add_update_unlocked(cp, flags);
712 (void) cond_broadcast(&umem_update_cv);
714 (void) mutex_unlock(&umem_update_lock);
718 * Remove a cache from the update list, waiting for any in-progress work to
722 umem_remove_updates(umem_cache_t *cp)
724 (void) mutex_lock(&umem_update_lock);
727 * Get it out of the active state
729 while (cp->cache_uflags & UMU_ACTIVE) {
732 ASSERT(cp->cache_unext == NULL);
734 cp->cache_uflags |= UMU_NOTIFY;
737 * Make sure the update state is sane, before we wait
739 ASSERT(umem_update_thr != 0 || umem_st_update_thr != 0);
740 ASSERT(umem_update_thr != thr_self() &&
741 umem_st_update_thr != thr_self());
743 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE,
745 (void) cond_wait(&umem_update_cv, &umem_update_lock);
746 (void) pthread_setcancelstate(cancel_state, NULL);
749 * Get it out of the Work Requested state
751 if (cp->cache_unext != NULL) {
752 cp->cache_uprev->cache_unext = cp->cache_unext;
753 cp->cache_unext->cache_uprev = cp->cache_uprev;
754 cp->cache_uprev = cp->cache_unext = NULL;
755 cp->cache_uflags = 0;
758 * Make sure it is in the Inactive state
760 ASSERT(cp->cache_unext == NULL && cp->cache_uflags == 0);
761 (void) mutex_unlock(&umem_update_lock);
765 umem_updateall(int flags)
770 * NOTE: To prevent deadlock, umem_cache_lock is always acquired first.
772 * (umem_add_update is called from things run via umem_cache_applyall)
774 (void) mutex_lock(&umem_cache_lock);
775 (void) mutex_lock(&umem_update_lock);
777 for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
779 umem_add_update_unlocked(cp, flags);
782 (void) cond_broadcast(&umem_update_cv);
784 (void) mutex_unlock(&umem_update_lock);
785 (void) mutex_unlock(&umem_cache_lock);
789 * Debugging support. Given a buffer address, find its slab.
792 umem_findslab(umem_cache_t *cp, void *buf)
796 (void) mutex_lock(&cp->cache_lock);
797 for (sp = cp->cache_nullslab.slab_next;
798 sp != &cp->cache_nullslab; sp = sp->slab_next) {
799 if (UMEM_SLAB_MEMBER(sp, buf)) {
800 (void) mutex_unlock(&cp->cache_lock);
804 (void) mutex_unlock(&cp->cache_lock);
810 umem_error(int error, umem_cache_t *cparg, void *bufarg)
812 umem_buftag_t *btp = NULL;
813 umem_bufctl_t *bcp = NULL;
814 umem_cache_t *cp = cparg;
819 int old_logging = umem_logging;
821 umem_logging = 0; /* stop logging when a bad thing happens */
823 umem_abort_info.ump_timestamp = gethrtime();
825 sp = umem_findslab(cp, buf);
827 for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache;
828 cp = cp->cache_prev) {
829 if ((sp = umem_findslab(cp, buf)) != NULL)
836 error = UMERR_BADADDR;
839 error = UMERR_BADCACHE;
841 buf = (char *)bufarg - ((uintptr_t)bufarg -
842 (uintptr_t)sp->slab_base) % cp->cache_chunksize;
844 error = UMERR_BADBASE;
845 if (cp->cache_flags & UMF_BUFTAG)
846 btp = UMEM_BUFTAG(cp, buf);
847 if (cp->cache_flags & UMF_HASH) {
848 (void) mutex_lock(&cp->cache_lock);
849 for (bcp = *UMEM_HASH(cp, buf); bcp; bcp = bcp->bc_next)
850 if (bcp->bc_addr == buf)
852 (void) mutex_unlock(&cp->cache_lock);
853 if (bcp == NULL && btp != NULL)
854 bcp = btp->bt_bufctl;
855 if (umem_findslab(cp->cache_bufctl_cache, bcp) ==
856 NULL || P2PHASE((uintptr_t)bcp, UMEM_ALIGN) ||
857 bcp->bc_addr != buf) {
858 error = UMERR_BADBUFCTL;
864 umem_abort_info.ump_error = error;
865 umem_abort_info.ump_buffer = bufarg;
866 umem_abort_info.ump_realbuf = buf;
867 umem_abort_info.ump_cache = cparg;
868 umem_abort_info.ump_realcache = cp;
869 umem_abort_info.ump_slab = sp;
870 umem_abort_info.ump_bufctl = bcp;
872 umem_printf("umem allocator: ");
877 umem_printf("buffer modified after being freed\n");
878 off = verify_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
879 if (off == NULL) /* shouldn't happen */
881 umem_printf("modification occurred at offset 0x%lx "
882 "(0x%llx replaced by 0x%llx)\n",
883 (uintptr_t)off - (uintptr_t)buf,
884 (longlong_t)UMEM_FREE_PATTERN, (longlong_t)*off);
888 umem_printf("redzone violation: write past end of buffer\n");
892 umem_printf("invalid free: buffer not in cache\n");
896 umem_printf("duplicate free: buffer freed twice\n");
899 case UMERR_BADBUFTAG:
900 umem_printf("boundary tag corrupted\n");
901 umem_printf("bcp ^ bxstat = %lx, should be %lx\n",
902 (intptr_t)btp->bt_bufctl ^ btp->bt_bxstat,
906 case UMERR_BADBUFCTL:
907 umem_printf("bufctl corrupted\n");
911 umem_printf("buffer freed to wrong cache\n");
912 umem_printf("buffer was allocated from %s,\n", cp->cache_name);
913 umem_printf("caller attempting free to %s.\n",
918 umem_printf("bad free: free size (%u) != alloc size (%u)\n",
919 UMEM_SIZE_DECODE(((uint32_t *)btp)[0]),
920 UMEM_SIZE_DECODE(((uint32_t *)btp)[1]));
924 umem_printf("bad free: free address (%p) != alloc address "
925 "(%p)\n", bufarg, buf);
929 umem_printf("buffer=%p bufctl=%p cache: %s\n",
930 bufarg, (void *)bcp, cparg->cache_name);
932 if (bcp != NULL && (cp->cache_flags & UMF_AUDIT) &&
933 error != UMERR_BADBUFCTL) {
937 umem_bufctl_audit_t *bcap = (umem_bufctl_audit_t *)bcp;
939 diff = umem_abort_info.ump_timestamp - bcap->bc_timestamp;
940 ts.tv_sec = diff / NANOSEC;
941 ts.tv_nsec = diff % NANOSEC;
943 umem_printf("previous transaction on buffer %p:\n", buf);
944 umem_printf("thread=%p time=T-%ld.%09ld slab=%p cache: %s\n",
945 (void *)(intptr_t)bcap->bc_thread, ts.tv_sec, ts.tv_nsec,
946 (void *)sp, cp->cache_name);
947 for (d = 0; d < MIN(bcap->bc_depth, umem_stack_depth); d++) {
948 (void) print_sym((void *)bcap->bc_stack[d]);
953 umem_err_recoverable("umem: heap corruption detected");
955 umem_logging = old_logging; /* resume logging */
959 umem_nofail_callback(umem_nofail_callback_t *cb)
961 nofail_callback = cb;
965 umem_alloc_retry(umem_cache_t *cp, int umflag)
967 if (cp == &umem_null_cache) {
969 return (1); /* retry */
971 * Initialization failed. Do normal failure processing.
974 if (umflag & UMEM_NOFAIL) {
975 int def_result = UMEM_CALLBACK_EXIT(255);
976 int result = def_result;
977 umem_nofail_callback_t *callback = nofail_callback;
979 if (callback != NULL)
982 if (result == UMEM_CALLBACK_RETRY)
985 if ((result & ~0xFF) != UMEM_CALLBACK_EXIT(0)) {
986 log_message("nofail callback returned %x\n", result);
991 * only one thread will call exit
993 if (umem_nofail_exit_thr == thr_self())
994 umem_panic("recursive UMEM_CALLBACK_EXIT()\n");
996 (void) mutex_lock(&umem_nofail_exit_lock);
997 umem_nofail_exit_thr = thr_self();
1004 static umem_log_header_t *
1005 umem_log_init(size_t logsize)
1007 umem_log_header_t *lhp;
1008 int nchunks = 4 * umem_max_ncpus;
1009 size_t lhsize = offsetof(umem_log_header_t, lh_cpu[umem_max_ncpus]);
1016 * Make sure that lhp->lh_cpu[] is nicely aligned
1017 * to prevent false sharing of cache lines.
1019 lhsize = P2ROUNDUP(lhsize, UMEM_ALIGN);
1020 lhp = vmem_xalloc(umem_log_arena, lhsize, 64, P2NPHASE(lhsize, 64), 0,
1021 NULL, NULL, VM_NOSLEEP);
1027 (void) mutex_init(&lhp->lh_lock, USYNC_THREAD, NULL);
1028 lhp->lh_nchunks = nchunks;
1029 lhp->lh_chunksize = P2ROUNDUP(logsize / nchunks, PAGESIZE);
1030 if (lhp->lh_chunksize == 0)
1031 lhp->lh_chunksize = PAGESIZE;
1033 lhp->lh_base = vmem_alloc(umem_log_arena,
1034 lhp->lh_chunksize * nchunks, VM_NOSLEEP);
1035 if (lhp->lh_base == NULL)
1038 lhp->lh_free = vmem_alloc(umem_log_arena,
1039 nchunks * sizeof (int), VM_NOSLEEP);
1040 if (lhp->lh_free == NULL)
1043 bzero(lhp->lh_base, lhp->lh_chunksize * nchunks);
1045 for (i = 0; i < umem_max_ncpus; i++) {
1046 umem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1047 (void) mutex_init(&clhp->clh_lock, USYNC_THREAD, NULL);
1048 clhp->clh_chunk = i;
1051 for (i = umem_max_ncpus; i < nchunks; i++)
1052 lhp->lh_free[i] = i;
1054 lhp->lh_head = umem_max_ncpus;
1061 if (lhp->lh_base != NULL)
1062 vmem_free(umem_log_arena, lhp->lh_base,
1063 lhp->lh_chunksize * nchunks);
1065 vmem_xfree(umem_log_arena, lhp, lhsize);
1071 umem_log_enter(umem_log_header_t *lhp, void *data, size_t size)
1074 umem_cpu_log_header_t *clhp =
1075 &lhp->lh_cpu[CPU(umem_cpu_mask)->cpu_number];
1077 if (lhp == NULL || umem_logging == 0)
1080 (void) mutex_lock(&clhp->clh_lock);
1082 if (size > clhp->clh_avail) {
1083 (void) mutex_lock(&lhp->lh_lock);
1085 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1086 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1087 clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1088 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1089 clhp->clh_current = lhp->lh_base +
1090 clhp->clh_chunk * lhp->lh_chunksize;
1091 clhp->clh_avail = lhp->lh_chunksize;
1092 if (size > lhp->lh_chunksize)
1093 size = lhp->lh_chunksize;
1094 (void) mutex_unlock(&lhp->lh_lock);
1096 logspace = clhp->clh_current;
1097 clhp->clh_current += size;
1098 clhp->clh_avail -= size;
1099 bcopy(data, logspace, size);
1100 (void) mutex_unlock(&clhp->clh_lock);
1104 #define UMEM_AUDIT(lp, cp, bcp) \
1106 umem_bufctl_audit_t *_bcp = (umem_bufctl_audit_t *)(bcp); \
1107 _bcp->bc_timestamp = gethrtime(); \
1108 _bcp->bc_thread = thr_self(); \
1109 _bcp->bc_depth = getpcstack(_bcp->bc_stack, umem_stack_depth, \
1110 (cp != NULL) && (cp->cache_flags & UMF_CHECKSIGNAL)); \
1111 _bcp->bc_lastlog = umem_log_enter((lp), _bcp, \
1112 UMEM_BUFCTL_AUDIT_SIZE); \
1116 umem_log_event(umem_log_header_t *lp, umem_cache_t *cp,
1117 umem_slab_t *sp, void *addr)
1119 umem_bufctl_audit_t *bcp;
1120 UMEM_LOCAL_BUFCTL_AUDIT(&bcp);
1122 bzero(bcp, UMEM_BUFCTL_AUDIT_SIZE);
1123 bcp->bc_addr = addr;
1126 UMEM_AUDIT(lp, cp, bcp);
1130 * Create a new slab for cache cp.
1132 static umem_slab_t *
1133 umem_slab_create(umem_cache_t *cp, int umflag)
1135 size_t slabsize = cp->cache_slabsize;
1136 size_t chunksize = cp->cache_chunksize;
1137 int cache_flags = cp->cache_flags;
1138 size_t color, chunks;
1142 vmem_t *vmp = cp->cache_arena;
1144 color = cp->cache_color + cp->cache_align;
1145 if (color > cp->cache_maxcolor)
1146 color = cp->cache_mincolor;
1147 cp->cache_color = color;
1149 slab = vmem_alloc(vmp, slabsize, UMEM_VMFLAGS(umflag));
1152 goto vmem_alloc_failure;
1154 ASSERT(P2PHASE((uintptr_t)slab, vmp->vm_quantum) == 0);
1156 if (!(cp->cache_cflags & UMC_NOTOUCH) &&
1157 (cp->cache_flags & UMF_DEADBEEF))
1158 copy_pattern(UMEM_UNINITIALIZED_PATTERN, slab, slabsize);
1160 if (cache_flags & UMF_HASH) {
1161 if ((sp = _umem_cache_alloc(umem_slab_cache, umflag)) == NULL)
1162 goto slab_alloc_failure;
1163 chunks = (slabsize - color) / chunksize;
1165 sp = UMEM_SLAB(cp, slab);
1166 chunks = (slabsize - sizeof (umem_slab_t) - color) / chunksize;
1169 sp->slab_cache = cp;
1170 sp->slab_head = NULL;
1171 sp->slab_refcnt = 0;
1172 sp->slab_base = buf = slab + color;
1173 sp->slab_chunks = chunks;
1176 while (chunks-- != 0) {
1177 if (cache_flags & UMF_HASH) {
1178 bcp = _umem_cache_alloc(cp->cache_bufctl_cache, umflag);
1180 goto bufctl_alloc_failure;
1181 if (cache_flags & UMF_AUDIT) {
1182 umem_bufctl_audit_t *bcap =
1183 (umem_bufctl_audit_t *)bcp;
1184 bzero(bcap, UMEM_BUFCTL_AUDIT_SIZE);
1185 bcap->bc_cache = cp;
1190 bcp = UMEM_BUFCTL(cp, buf);
1192 if (cache_flags & UMF_BUFTAG) {
1193 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1194 btp->bt_redzone = UMEM_REDZONE_PATTERN;
1195 btp->bt_bufctl = bcp;
1196 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
1197 if (cache_flags & UMF_DEADBEEF) {
1198 copy_pattern(UMEM_FREE_PATTERN, buf,
1202 bcp->bc_next = sp->slab_head;
1203 sp->slab_head = bcp;
1207 umem_log_event(umem_slab_log, cp, sp, slab);
1211 bufctl_alloc_failure:
1213 while ((bcp = sp->slab_head) != NULL) {
1214 sp->slab_head = bcp->bc_next;
1215 _umem_cache_free(cp->cache_bufctl_cache, bcp);
1217 _umem_cache_free(umem_slab_cache, sp);
1221 vmem_free(vmp, slab, slabsize);
1225 umem_log_event(umem_failure_log, cp, NULL, NULL);
1226 atomic_add_64(&cp->cache_alloc_fail, 1);
1235 umem_slab_destroy(umem_cache_t *cp, umem_slab_t *sp)
1237 vmem_t *vmp = cp->cache_arena;
1238 void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1240 if (cp->cache_flags & UMF_HASH) {
1242 while ((bcp = sp->slab_head) != NULL) {
1243 sp->slab_head = bcp->bc_next;
1244 _umem_cache_free(cp->cache_bufctl_cache, bcp);
1246 _umem_cache_free(umem_slab_cache, sp);
1248 vmem_free(vmp, slab, cp->cache_slabsize);
1252 * Allocate a raw (unconstructed) buffer from cp's slab layer.
1255 umem_slab_alloc(umem_cache_t *cp, int umflag)
1257 umem_bufctl_t *bcp, **hash_bucket;
1261 (void) mutex_lock(&cp->cache_lock);
1262 cp->cache_slab_alloc++;
1263 sp = cp->cache_freelist;
1264 ASSERT(sp->slab_cache == cp);
1265 if (sp->slab_head == NULL) {
1267 * The freelist is empty. Create a new slab.
1269 (void) mutex_unlock(&cp->cache_lock);
1270 if (cp == &umem_null_cache)
1272 if ((sp = umem_slab_create(cp, umflag)) == NULL)
1274 (void) mutex_lock(&cp->cache_lock);
1275 cp->cache_slab_create++;
1276 if ((cp->cache_buftotal += sp->slab_chunks) > cp->cache_bufmax)
1277 cp->cache_bufmax = cp->cache_buftotal;
1278 sp->slab_next = cp->cache_freelist;
1279 sp->slab_prev = cp->cache_freelist->slab_prev;
1280 sp->slab_next->slab_prev = sp;
1281 sp->slab_prev->slab_next = sp;
1282 cp->cache_freelist = sp;
1286 ASSERT(sp->slab_refcnt <= sp->slab_chunks);
1289 * If we're taking the last buffer in the slab,
1290 * remove the slab from the cache's freelist.
1292 bcp = sp->slab_head;
1293 if ((sp->slab_head = bcp->bc_next) == NULL) {
1294 cp->cache_freelist = sp->slab_next;
1295 ASSERT(sp->slab_refcnt == sp->slab_chunks);
1298 if (cp->cache_flags & UMF_HASH) {
1300 * Add buffer to allocated-address hash table.
1303 hash_bucket = UMEM_HASH(cp, buf);
1304 bcp->bc_next = *hash_bucket;
1306 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
1307 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1310 buf = UMEM_BUF(cp, bcp);
1313 ASSERT(UMEM_SLAB_MEMBER(sp, buf));
1315 (void) mutex_unlock(&cp->cache_lock);
1321 * Free a raw (unconstructed) buffer to cp's slab layer.
1324 umem_slab_free(umem_cache_t *cp, void *buf)
1327 umem_bufctl_t *bcp, **prev_bcpp;
1329 ASSERT(buf != NULL);
1331 (void) mutex_lock(&cp->cache_lock);
1332 cp->cache_slab_free++;
1334 if (cp->cache_flags & UMF_HASH) {
1336 * Look up buffer in allocated-address hash table.
1338 prev_bcpp = UMEM_HASH(cp, buf);
1339 while ((bcp = *prev_bcpp) != NULL) {
1340 if (bcp->bc_addr == buf) {
1341 *prev_bcpp = bcp->bc_next;
1345 cp->cache_lookup_depth++;
1346 prev_bcpp = &bcp->bc_next;
1349 bcp = UMEM_BUFCTL(cp, buf);
1350 sp = UMEM_SLAB(cp, buf);
1353 if (bcp == NULL || sp->slab_cache != cp || !UMEM_SLAB_MEMBER(sp, buf)) {
1354 (void) mutex_unlock(&cp->cache_lock);
1355 umem_error(UMERR_BADADDR, cp, buf);
1359 if ((cp->cache_flags & (UMF_AUDIT | UMF_BUFTAG)) == UMF_AUDIT) {
1360 if (cp->cache_flags & UMF_CONTENTS)
1361 ((umem_bufctl_audit_t *)bcp)->bc_contents =
1362 umem_log_enter(umem_content_log, buf,
1363 cp->cache_contents);
1364 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1368 * If this slab isn't currently on the freelist, put it there.
1370 if (sp->slab_head == NULL) {
1371 ASSERT(sp->slab_refcnt == sp->slab_chunks);
1372 ASSERT(cp->cache_freelist != sp);
1373 sp->slab_next->slab_prev = sp->slab_prev;
1374 sp->slab_prev->slab_next = sp->slab_next;
1375 sp->slab_next = cp->cache_freelist;
1376 sp->slab_prev = cp->cache_freelist->slab_prev;
1377 sp->slab_next->slab_prev = sp;
1378 sp->slab_prev->slab_next = sp;
1379 cp->cache_freelist = sp;
1382 bcp->bc_next = sp->slab_head;
1383 sp->slab_head = bcp;
1385 ASSERT(sp->slab_refcnt >= 1);
1386 if (--sp->slab_refcnt == 0) {
1388 * There are no outstanding allocations from this slab,
1389 * so we can reclaim the memory.
1391 sp->slab_next->slab_prev = sp->slab_prev;
1392 sp->slab_prev->slab_next = sp->slab_next;
1393 if (sp == cp->cache_freelist)
1394 cp->cache_freelist = sp->slab_next;
1395 cp->cache_slab_destroy++;
1396 cp->cache_buftotal -= sp->slab_chunks;
1397 (void) mutex_unlock(&cp->cache_lock);
1398 umem_slab_destroy(cp, sp);
1401 (void) mutex_unlock(&cp->cache_lock);
1405 umem_cache_alloc_debug(umem_cache_t *cp, void *buf, int umflag)
1407 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1408 umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl;
1412 if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) {
1413 umem_error(UMERR_BADBUFTAG, cp, buf);
1417 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_ALLOC;
1419 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
1420 umem_error(UMERR_BADBUFCTL, cp, buf);
1424 btp->bt_redzone = UMEM_REDZONE_PATTERN;
1426 if (cp->cache_flags & UMF_DEADBEEF) {
1427 if (verify_and_copy_pattern(UMEM_FREE_PATTERN,
1428 UMEM_UNINITIALIZED_PATTERN, buf, cp->cache_verify)) {
1429 umem_error(UMERR_MODIFIED, cp, buf);
1434 if ((mtbf = umem_mtbf | cp->cache_mtbf) != 0 &&
1435 gethrtime() % mtbf == 0 &&
1436 (umflag & (UMEM_FATAL_FLAGS)) == 0) {
1437 umem_log_event(umem_failure_log, cp, NULL, NULL);
1443 * We do not pass fatal flags on to the constructor. This prevents
1444 * leaking buffers in the event of a subordinate constructor failing.
1446 flags_nfatal = UMEM_DEFAULT;
1447 if (mtbf || (cp->cache_constructor != NULL &&
1448 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0)) {
1449 atomic_add_64(&cp->cache_alloc_fail, 1);
1450 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
1451 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1452 umem_slab_free(cp, buf);
1456 if (cp->cache_flags & UMF_AUDIT) {
1457 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1464 umem_cache_free_debug(umem_cache_t *cp, void *buf)
1466 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1467 umem_bufctl_audit_t *bcp = (umem_bufctl_audit_t *)btp->bt_bufctl;
1470 if (btp->bt_bxstat != ((intptr_t)bcp ^ UMEM_BUFTAG_ALLOC)) {
1471 if (btp->bt_bxstat == ((intptr_t)bcp ^ UMEM_BUFTAG_FREE)) {
1472 umem_error(UMERR_DUPFREE, cp, buf);
1475 sp = umem_findslab(cp, buf);
1476 if (sp == NULL || sp->slab_cache != cp)
1477 umem_error(UMERR_BADADDR, cp, buf);
1479 umem_error(UMERR_REDZONE, cp, buf);
1483 btp->bt_bxstat = (intptr_t)bcp ^ UMEM_BUFTAG_FREE;
1485 if ((cp->cache_flags & UMF_HASH) && bcp->bc_addr != buf) {
1486 umem_error(UMERR_BADBUFCTL, cp, buf);
1490 if (btp->bt_redzone != UMEM_REDZONE_PATTERN) {
1491 umem_error(UMERR_REDZONE, cp, buf);
1495 if (cp->cache_flags & UMF_AUDIT) {
1496 if (cp->cache_flags & UMF_CONTENTS)
1497 bcp->bc_contents = umem_log_enter(umem_content_log,
1498 buf, cp->cache_contents);
1499 UMEM_AUDIT(umem_transaction_log, cp, bcp);
1502 if (cp->cache_destructor != NULL)
1503 cp->cache_destructor(buf, cp->cache_private);
1505 if (cp->cache_flags & UMF_DEADBEEF)
1506 copy_pattern(UMEM_FREE_PATTERN, buf, cp->cache_verify);
1512 * Free each object in magazine mp to cp's slab layer, and free mp itself.
1515 umem_magazine_destroy(umem_cache_t *cp, umem_magazine_t *mp, int nrounds)
1519 ASSERT(cp->cache_next == NULL || IN_UPDATE());
1521 for (round = 0; round < nrounds; round++) {
1522 void *buf = mp->mag_round[round];
1524 if ((cp->cache_flags & UMF_DEADBEEF) &&
1525 verify_pattern(UMEM_FREE_PATTERN, buf,
1526 cp->cache_verify) != NULL) {
1527 umem_error(UMERR_MODIFIED, cp, buf);
1531 if (!(cp->cache_flags & UMF_BUFTAG) &&
1532 cp->cache_destructor != NULL)
1533 cp->cache_destructor(buf, cp->cache_private);
1535 umem_slab_free(cp, buf);
1537 ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1538 _umem_cache_free(cp->cache_magtype->mt_cache, mp);
1542 * Allocate a magazine from the depot.
1544 static umem_magazine_t *
1545 umem_depot_alloc(umem_cache_t *cp, umem_maglist_t *mlp)
1547 umem_magazine_t *mp;
1550 * If we can't get the depot lock without contention,
1551 * update our contention count. We use the depot
1552 * contention rate to determine whether we need to
1553 * increase the magazine size for better scalability.
1555 if (mutex_trylock(&cp->cache_depot_lock) != 0) {
1556 (void) mutex_lock(&cp->cache_depot_lock);
1557 cp->cache_depot_contention++;
1560 if ((mp = mlp->ml_list) != NULL) {
1561 ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1562 mlp->ml_list = mp->mag_next;
1563 if (--mlp->ml_total < mlp->ml_min)
1564 mlp->ml_min = mlp->ml_total;
1568 (void) mutex_unlock(&cp->cache_depot_lock);
1574 * Free a magazine to the depot.
1577 umem_depot_free(umem_cache_t *cp, umem_maglist_t *mlp, umem_magazine_t *mp)
1579 (void) mutex_lock(&cp->cache_depot_lock);
1580 ASSERT(UMEM_MAGAZINE_VALID(cp, mp));
1581 mp->mag_next = mlp->ml_list;
1584 (void) mutex_unlock(&cp->cache_depot_lock);
1588 * Update the working set statistics for cp's depot.
1591 umem_depot_ws_update(umem_cache_t *cp)
1593 (void) mutex_lock(&cp->cache_depot_lock);
1594 cp->cache_full.ml_reaplimit = cp->cache_full.ml_min;
1595 cp->cache_full.ml_min = cp->cache_full.ml_total;
1596 cp->cache_empty.ml_reaplimit = cp->cache_empty.ml_min;
1597 cp->cache_empty.ml_min = cp->cache_empty.ml_total;
1598 (void) mutex_unlock(&cp->cache_depot_lock);
1602 * Reap all magazines that have fallen out of the depot's working set.
1605 umem_depot_ws_reap(umem_cache_t *cp)
1608 umem_magazine_t *mp;
1610 ASSERT(cp->cache_next == NULL || IN_REAP());
1612 reap = MIN(cp->cache_full.ml_reaplimit, cp->cache_full.ml_min);
1613 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_full)) != NULL)
1614 umem_magazine_destroy(cp, mp, cp->cache_magtype->mt_magsize);
1616 reap = MIN(cp->cache_empty.ml_reaplimit, cp->cache_empty.ml_min);
1617 while (reap-- && (mp = umem_depot_alloc(cp, &cp->cache_empty)) != NULL)
1618 umem_magazine_destroy(cp, mp, 0);
1622 umem_cpu_reload(umem_cpu_cache_t *ccp, umem_magazine_t *mp, int rounds)
1624 ASSERT((ccp->cc_loaded == NULL && ccp->cc_rounds == -1) ||
1625 (ccp->cc_loaded && ccp->cc_rounds + rounds == ccp->cc_magsize));
1626 ASSERT(ccp->cc_magsize > 0);
1628 ccp->cc_ploaded = ccp->cc_loaded;
1629 ccp->cc_prounds = ccp->cc_rounds;
1630 ccp->cc_loaded = mp;
1631 ccp->cc_rounds = rounds;
1635 * Allocate a constructed object from cache cp.
1637 #pragma weak umem_cache_alloc = _umem_cache_alloc
1639 _umem_cache_alloc(umem_cache_t *cp, int umflag)
1641 umem_cpu_cache_t *ccp;
1642 umem_magazine_t *fmp;
1647 ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
1648 (void) mutex_lock(&ccp->cc_lock);
1651 * If there's an object available in the current CPU's
1652 * loaded magazine, just take it and return.
1654 if (ccp->cc_rounds > 0) {
1655 buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
1657 (void) mutex_unlock(&ccp->cc_lock);
1658 if ((ccp->cc_flags & UMF_BUFTAG) &&
1659 umem_cache_alloc_debug(cp, buf, umflag) == -1) {
1660 if (umem_alloc_retry(cp, umflag)) {
1670 * The loaded magazine is empty. If the previously loaded
1671 * magazine was full, exchange them and try again.
1673 if (ccp->cc_prounds > 0) {
1674 umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
1679 * If the magazine layer is disabled, break out now.
1681 if (ccp->cc_magsize == 0)
1685 * Try to get a full magazine from the depot.
1687 fmp = umem_depot_alloc(cp, &cp->cache_full);
1689 if (ccp->cc_ploaded != NULL)
1690 umem_depot_free(cp, &cp->cache_empty,
1692 umem_cpu_reload(ccp, fmp, ccp->cc_magsize);
1697 * There are no full magazines in the depot,
1698 * so fall through to the slab layer.
1702 (void) mutex_unlock(&ccp->cc_lock);
1705 * We couldn't allocate a constructed object from the magazine layer,
1706 * so get a raw buffer from the slab layer and apply its constructor.
1708 buf = umem_slab_alloc(cp, umflag);
1711 if (cp == &umem_null_cache)
1713 if (umem_alloc_retry(cp, umflag)) {
1720 if (cp->cache_flags & UMF_BUFTAG) {
1722 * Let umem_cache_alloc_debug() apply the constructor for us.
1724 if (umem_cache_alloc_debug(cp, buf, umflag) == -1) {
1725 if (umem_alloc_retry(cp, umflag)) {
1734 * We do not pass fatal flags on to the constructor. This prevents
1735 * leaking buffers in the event of a subordinate constructor failing.
1737 flags_nfatal = UMEM_DEFAULT;
1738 if (cp->cache_constructor != NULL &&
1739 cp->cache_constructor(buf, cp->cache_private, flags_nfatal) != 0) {
1740 atomic_add_64(&cp->cache_alloc_fail, 1);
1741 umem_slab_free(cp, buf);
1743 if (umem_alloc_retry(cp, umflag)) {
1753 * Free a constructed object to cache cp.
1755 #pragma weak umem_cache_free = _umem_cache_free
1757 _umem_cache_free(umem_cache_t *cp, void *buf)
1759 umem_cpu_cache_t *ccp = UMEM_CPU_CACHE(cp, CPU(cp->cache_cpu_mask));
1760 umem_magazine_t *emp;
1761 umem_magtype_t *mtp;
1763 if (ccp->cc_flags & UMF_BUFTAG)
1764 if (umem_cache_free_debug(cp, buf) == -1)
1767 (void) mutex_lock(&ccp->cc_lock);
1770 * If there's a slot available in the current CPU's
1771 * loaded magazine, just put the object there and return.
1773 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
1774 ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;
1776 (void) mutex_unlock(&ccp->cc_lock);
1781 * The loaded magazine is full. If the previously loaded
1782 * magazine was empty, exchange them and try again.
1784 if (ccp->cc_prounds == 0) {
1785 umem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
1790 * If the magazine layer is disabled, break out now.
1792 if (ccp->cc_magsize == 0)
1796 * Try to get an empty magazine from the depot.
1798 emp = umem_depot_alloc(cp, &cp->cache_empty);
1800 if (ccp->cc_ploaded != NULL)
1801 umem_depot_free(cp, &cp->cache_full,
1803 umem_cpu_reload(ccp, emp, 0);
1808 * There are no empty magazines in the depot,
1809 * so try to allocate a new one. We must drop all locks
1810 * across umem_cache_alloc() because lower layers may
1811 * attempt to allocate from this cache.
1813 mtp = cp->cache_magtype;
1814 (void) mutex_unlock(&ccp->cc_lock);
1815 emp = _umem_cache_alloc(mtp->mt_cache, UMEM_DEFAULT);
1816 (void) mutex_lock(&ccp->cc_lock);
1820 * We successfully allocated an empty magazine.
1821 * However, we had to drop ccp->cc_lock to do it,
1822 * so the cache's magazine size may have changed.
1823 * If so, free the magazine and try again.
1825 if (ccp->cc_magsize != mtp->mt_magsize) {
1826 (void) mutex_unlock(&ccp->cc_lock);
1827 _umem_cache_free(mtp->mt_cache, emp);
1828 (void) mutex_lock(&ccp->cc_lock);
1833 * We got a magazine of the right size. Add it to
1834 * the depot and try the whole dance again.
1836 umem_depot_free(cp, &cp->cache_empty, emp);
1841 * We couldn't allocate an empty magazine,
1842 * so fall through to the slab layer.
1846 (void) mutex_unlock(&ccp->cc_lock);
1849 * We couldn't free our constructed object to the magazine layer,
1850 * so apply its destructor and free it to the slab layer.
1851 * Note that if UMF_BUFTAG is in effect, umem_cache_free_debug()
1852 * will have already applied the destructor.
1854 if (!(cp->cache_flags & UMF_BUFTAG) && cp->cache_destructor != NULL)
1855 cp->cache_destructor(buf, cp->cache_private);
1857 umem_slab_free(cp, buf);
1860 #pragma weak umem_zalloc = _umem_zalloc
1862 _umem_zalloc(size_t size, int umflag)
1864 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
1868 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
1869 umem_cache_t *cp = umem_alloc_table[index];
1870 buf = _umem_cache_alloc(cp, umflag);
1872 if (cp->cache_flags & UMF_BUFTAG) {
1873 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1874 ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE;
1875 ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size);
1878 } else if (umem_alloc_retry(cp, umflag))
1881 buf = _umem_alloc(size, umflag); /* handles failure */
1888 #pragma weak umem_alloc = _umem_alloc
1890 _umem_alloc(size_t size, int umflag)
1892 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
1895 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
1896 umem_cache_t *cp = umem_alloc_table[index];
1897 buf = _umem_cache_alloc(cp, umflag);
1898 if ((cp->cache_flags & UMF_BUFTAG) && buf != NULL) {
1899 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1900 ((uint8_t *)buf)[size] = UMEM_REDZONE_BYTE;
1901 ((uint32_t *)btp)[1] = UMEM_SIZE_ENCODE(size);
1903 if (buf == NULL && umem_alloc_retry(cp, umflag))
1904 goto umem_alloc_retry;
1909 if (umem_oversize_arena == NULL) {
1911 ASSERT(umem_oversize_arena != NULL);
1915 buf = vmem_alloc(umem_oversize_arena, size, UMEM_VMFLAGS(umflag));
1917 umem_log_event(umem_failure_log, NULL, NULL, (void *)size);
1918 if (umem_alloc_retry(NULL, umflag))
1919 goto umem_alloc_retry;
1924 #pragma weak umem_alloc_align = _umem_alloc_align
1926 _umem_alloc_align(size_t size, size_t align, int umflag)
1932 if ((align & (align - 1)) != 0)
1934 if (align < UMEM_ALIGN)
1937 umem_alloc_align_retry:
1938 if (umem_memalign_arena == NULL) {
1940 ASSERT(umem_oversize_arena != NULL);
1944 buf = vmem_xalloc(umem_memalign_arena, size, align, 0, 0, NULL, NULL,
1945 UMEM_VMFLAGS(umflag));
1947 umem_log_event(umem_failure_log, NULL, NULL, (void *)size);
1948 if (umem_alloc_retry(NULL, umflag))
1949 goto umem_alloc_align_retry;
1954 #pragma weak umem_free = _umem_free
1956 _umem_free(void *buf, size_t size)
1958 size_t index = (size - 1) >> UMEM_ALIGN_SHIFT;
1960 if (index < UMEM_MAXBUF >> UMEM_ALIGN_SHIFT) {
1961 umem_cache_t *cp = umem_alloc_table[index];
1962 if (cp->cache_flags & UMF_BUFTAG) {
1963 umem_buftag_t *btp = UMEM_BUFTAG(cp, buf);
1964 uint32_t *ip = (uint32_t *)btp;
1965 if (ip[1] != UMEM_SIZE_ENCODE(size)) {
1966 if (*(uint64_t *)buf == UMEM_FREE_PATTERN) {
1967 umem_error(UMERR_DUPFREE, cp, buf);
1970 if (UMEM_SIZE_VALID(ip[1])) {
1971 ip[0] = UMEM_SIZE_ENCODE(size);
1972 umem_error(UMERR_BADSIZE, cp, buf);
1974 umem_error(UMERR_REDZONE, cp, buf);
1978 if (((uint8_t *)buf)[size] != UMEM_REDZONE_BYTE) {
1979 umem_error(UMERR_REDZONE, cp, buf);
1982 btp->bt_redzone = UMEM_REDZONE_PATTERN;
1984 _umem_cache_free(cp, buf);
1986 if (buf == NULL && size == 0)
1988 vmem_free(umem_oversize_arena, buf, size);
1992 #pragma weak umem_free_align = _umem_free_align
1994 _umem_free_align(void *buf, size_t size)
1996 if (buf == NULL && size == 0)
1998 vmem_xfree(umem_memalign_arena, buf, size);
2002 umem_firewall_va_alloc(vmem_t *vmp, size_t size, int vmflag)
2004 size_t realsize = size + vmp->vm_quantum;
2007 * Annoying edge case: if 'size' is just shy of ULONG_MAX, adding
2008 * vm_quantum will cause integer wraparound. Check for this, and
2009 * blow off the firewall page in this case. Note that such a
2010 * giant allocation (the entire address space) can never be
2011 * satisfied, so it will either fail immediately (VM_NOSLEEP)
2012 * or sleep forever (VM_SLEEP). Thus, there is no need for a
2013 * corresponding check in umem_firewall_va_free().
2015 if (realsize < size)
2018 return (vmem_alloc(vmp, realsize, vmflag | VM_NEXTFIT));
2022 umem_firewall_va_free(vmem_t *vmp, void *addr, size_t size)
2024 vmem_free(vmp, addr, size + vmp->vm_quantum);
2028 * Reclaim all unused memory from a cache.
2031 umem_cache_reap(umem_cache_t *cp)
2034 * Ask the cache's owner to free some memory if possible.
2035 * The idea is to handle things like the inode cache, which
2036 * typically sits on a bunch of memory that it doesn't truly
2037 * *need*. Reclaim policy is entirely up to the owner; this
2038 * callback is just an advisory plea for help.
2040 if (cp->cache_reclaim != NULL)
2041 cp->cache_reclaim(cp->cache_private);
2043 umem_depot_ws_reap(cp);
2047 * Purge all magazines from a cache and set its magazine limit to zero.
2048 * All calls are serialized by being done by the update thread, except for
2049 * the final call from umem_cache_destroy().
2052 umem_cache_magazine_purge(umem_cache_t *cp)
2054 umem_cpu_cache_t *ccp;
2055 umem_magazine_t *mp, *pmp;
2056 int rounds, prounds, cpu_seqid;
2058 ASSERT(cp->cache_next == NULL || IN_UPDATE());
2060 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
2061 ccp = &cp->cache_cpu[cpu_seqid];
2063 (void) mutex_lock(&ccp->cc_lock);
2064 mp = ccp->cc_loaded;
2065 pmp = ccp->cc_ploaded;
2066 rounds = ccp->cc_rounds;
2067 prounds = ccp->cc_prounds;
2068 ccp->cc_loaded = NULL;
2069 ccp->cc_ploaded = NULL;
2070 ccp->cc_rounds = -1;
2071 ccp->cc_prounds = -1;
2072 ccp->cc_magsize = 0;
2073 (void) mutex_unlock(&ccp->cc_lock);
2076 umem_magazine_destroy(cp, mp, rounds);
2078 umem_magazine_destroy(cp, pmp, prounds);
2082 * Updating the working set statistics twice in a row has the
2083 * effect of setting the working set size to zero, so everything
2084 * is eligible for reaping.
2086 umem_depot_ws_update(cp);
2087 umem_depot_ws_update(cp);
2089 umem_depot_ws_reap(cp);
2093 * Enable per-cpu magazines on a cache.
2096 umem_cache_magazine_enable(umem_cache_t *cp)
2100 if (cp->cache_flags & UMF_NOMAGAZINE)
2103 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
2104 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2105 (void) mutex_lock(&ccp->cc_lock);
2106 ccp->cc_magsize = cp->cache_magtype->mt_magsize;
2107 (void) mutex_unlock(&ccp->cc_lock);
2113 * Recompute a cache's magazine size. The trade-off is that larger magazines
2114 * provide a higher transfer rate with the depot, while smaller magazines
2115 * reduce memory consumption. Magazine resizing is an expensive operation;
2116 * it should not be done frequently.
2118 * Changes to the magazine size are serialized by only having one thread
2119 * doing updates. (the update thread)
2121 * Note: at present this only grows the magazine size. It might be useful
2122 * to allow shrinkage too.
2125 umem_cache_magazine_resize(umem_cache_t *cp)
2127 umem_magtype_t *mtp = cp->cache_magtype;
2129 ASSERT(IN_UPDATE());
2131 if (cp->cache_chunksize < mtp->mt_maxbuf) {
2132 umem_cache_magazine_purge(cp);
2133 (void) mutex_lock(&cp->cache_depot_lock);
2134 cp->cache_magtype = ++mtp;
2135 cp->cache_depot_contention_prev =
2136 cp->cache_depot_contention + INT_MAX;
2137 (void) mutex_unlock(&cp->cache_depot_lock);
2138 umem_cache_magazine_enable(cp);
2143 * Rescale a cache's hash table, so that the table size is roughly the
2144 * cache size. We want the average lookup time to be extremely small.
2147 umem_hash_rescale(umem_cache_t *cp)
2149 umem_bufctl_t **old_table, **new_table, *bcp;
2150 size_t old_size, new_size, h;
2152 ASSERT(IN_UPDATE());
2154 new_size = MAX(UMEM_HASH_INITIAL,
2155 1 << (highbit(3 * cp->cache_buftotal + 4) - 2));
2156 old_size = cp->cache_hash_mask + 1;
2158 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
2161 new_table = vmem_alloc(umem_hash_arena, new_size * sizeof (void *),
2163 if (new_table == NULL)
2165 bzero(new_table, new_size * sizeof (void *));
2167 (void) mutex_lock(&cp->cache_lock);
2169 old_size = cp->cache_hash_mask + 1;
2170 old_table = cp->cache_hash_table;
2172 cp->cache_hash_mask = new_size - 1;
2173 cp->cache_hash_table = new_table;
2174 cp->cache_rescale++;
2176 for (h = 0; h < old_size; h++) {
2178 while (bcp != NULL) {
2179 void *addr = bcp->bc_addr;
2180 umem_bufctl_t *next_bcp = bcp->bc_next;
2181 umem_bufctl_t **hash_bucket = UMEM_HASH(cp, addr);
2182 bcp->bc_next = *hash_bucket;
2188 (void) mutex_unlock(&cp->cache_lock);
2190 vmem_free(umem_hash_arena, old_table, old_size * sizeof (void *));
2194 * Perform periodic maintenance on a cache: hash rescaling,
2195 * depot working-set update, and magazine resizing.
2198 umem_cache_update(umem_cache_t *cp)
2200 int update_flags = 0;
2202 ASSERT(MUTEX_HELD(&umem_cache_lock));
2205 * If the cache has become much larger or smaller than its hash table,
2206 * fire off a request to rescale the hash table.
2208 (void) mutex_lock(&cp->cache_lock);
2210 if ((cp->cache_flags & UMF_HASH) &&
2211 (cp->cache_buftotal > (cp->cache_hash_mask << 1) ||
2212 (cp->cache_buftotal < (cp->cache_hash_mask >> 1) &&
2213 cp->cache_hash_mask > UMEM_HASH_INITIAL)))
2214 update_flags |= UMU_HASH_RESCALE;
2216 (void) mutex_unlock(&cp->cache_lock);
2219 * Update the depot working set statistics.
2221 umem_depot_ws_update(cp);
2224 * If there's a lot of contention in the depot,
2225 * increase the magazine size.
2227 (void) mutex_lock(&cp->cache_depot_lock);
2229 if (cp->cache_chunksize < cp->cache_magtype->mt_maxbuf &&
2230 (int)(cp->cache_depot_contention -
2231 cp->cache_depot_contention_prev) > umem_depot_contention)
2232 update_flags |= UMU_MAGAZINE_RESIZE;
2234 cp->cache_depot_contention_prev = cp->cache_depot_contention;
2236 (void) mutex_unlock(&cp->cache_depot_lock);
2239 umem_add_update(cp, update_flags);
2243 * Runs all pending updates.
2245 * The update lock must be held on entrance, and will be held on exit.
2248 umem_process_updates(void)
2250 ASSERT(MUTEX_HELD(&umem_update_lock));
2252 while (umem_null_cache.cache_unext != &umem_null_cache) {
2254 umem_cache_t *cp = umem_null_cache.cache_unext;
2256 cp->cache_uprev->cache_unext = cp->cache_unext;
2257 cp->cache_unext->cache_uprev = cp->cache_uprev;
2258 cp->cache_uprev = cp->cache_unext = NULL;
2260 ASSERT(!(cp->cache_uflags & UMU_ACTIVE));
2262 while (cp->cache_uflags) {
2263 int uflags = (cp->cache_uflags |= UMU_ACTIVE);
2264 (void) mutex_unlock(&umem_update_lock);
2267 * The order here is important. Each step can speed up
2271 if (uflags & UMU_HASH_RESCALE)
2272 umem_hash_rescale(cp);
2274 if (uflags & UMU_MAGAZINE_RESIZE)
2275 umem_cache_magazine_resize(cp);
2277 if (uflags & UMU_REAP)
2278 umem_cache_reap(cp);
2280 (void) mutex_lock(&umem_update_lock);
2283 * check if anyone has requested notification
2285 if (cp->cache_uflags & UMU_NOTIFY) {
2286 uflags |= UMU_NOTIFY;
2289 cp->cache_uflags &= ~uflags;
2292 (void) cond_broadcast(&umem_update_cv);
2296 #ifndef UMEM_STANDALONE
2298 umem_st_update(void)
2300 ASSERT(MUTEX_HELD(&umem_update_lock));
2301 ASSERT(umem_update_thr == 0 && umem_st_update_thr == 0);
2303 umem_st_update_thr = thr_self();
2305 (void) mutex_unlock(&umem_update_lock);
2308 umem_cache_applyall(umem_cache_update);
2310 (void) mutex_lock(&umem_update_lock);
2312 umem_process_updates(); /* does all of the requested work */
2314 umem_reap_next = gethrtime() +
2315 (hrtime_t)umem_reap_interval * NANOSEC;
2317 umem_reaping = UMEM_REAP_DONE;
2319 umem_st_update_thr = 0;
2324 * Reclaim all unused memory from all caches. Called from vmem when memory
2325 * gets tight. Must be called with no locks held.
2327 * This just requests a reap on all caches, and notifies the update thread.
2332 #ifndef UMEM_STANDALONE
2333 extern int __nthreads(void);
2336 if (umem_ready != UMEM_READY || umem_reaping != UMEM_REAP_DONE ||
2337 gethrtime() < umem_reap_next)
2340 (void) mutex_lock(&umem_update_lock);
2342 if (umem_reaping != UMEM_REAP_DONE || gethrtime() < umem_reap_next) {
2343 (void) mutex_unlock(&umem_update_lock);
2346 umem_reaping = UMEM_REAP_ADDING; /* lock out other reaps */
2348 (void) mutex_unlock(&umem_update_lock);
2350 umem_updateall(UMU_REAP);
2352 (void) mutex_lock(&umem_update_lock);
2354 umem_reaping = UMEM_REAP_ACTIVE;
2356 /* Standalone is single-threaded */
2357 #ifndef UMEM_STANDALONE
2358 if (umem_update_thr == 0) {
2360 * The update thread does not exist. If the process is
2361 * multi-threaded, create it. If not, or the creation fails,
2362 * do the update processing inline.
2364 ASSERT(umem_st_update_thr == 0);
2366 if (__nthreads() <= 1 || umem_create_update_thread() == 0)
2370 (void) cond_broadcast(&umem_update_cv); /* wake up the update thread */
2373 (void) mutex_unlock(&umem_update_lock);
2378 char *name, /* descriptive name for this cache */
2379 size_t bufsize, /* size of the objects it manages */
2380 size_t align, /* required object alignment */
2381 umem_constructor_t *constructor, /* object constructor */
2382 umem_destructor_t *destructor, /* object destructor */
2383 umem_reclaim_t *reclaim, /* memory reclaim callback */
2384 void *private, /* pass-thru arg for constr/destr/reclaim */
2385 vmem_t *vmp, /* vmem source for slab allocation */
2386 int cflags) /* cache creation flags */
2390 umem_cache_t *cp, *cnext, *cprev;
2391 umem_magtype_t *mtp;
2396 * The init thread is allowed to create internal and quantum caches.
2398 * Other threads must wait until until initialization is complete.
2400 if (umem_init_thr == thr_self())
2401 ASSERT((cflags & (UMC_INTERNAL | UMC_QCACHE)) != 0);
2403 ASSERT(!(cflags & UMC_INTERNAL));
2404 if (umem_ready != UMEM_READY && umem_init() == 0) {
2410 csize = UMEM_CACHE_SIZE(umem_max_ncpus);
2411 phase = P2NPHASE(csize, UMEM_CPU_CACHE_SIZE);
2414 vmp = umem_default_arena;
2416 ASSERT(P2PHASE(phase, UMEM_ALIGN) == 0);
2419 * Check that the arguments are reasonable
2421 if ((align & (align - 1)) != 0 || align > vmp->vm_quantum ||
2422 ((cflags & UMC_NOHASH) && (cflags & UMC_NOTOUCH)) ||
2423 name == NULL || bufsize == 0) {
2429 * If align == 0, we set it to the minimum required alignment.
2431 * If align < UMEM_ALIGN, we round it up to UMEM_ALIGN, unless
2432 * UMC_NOTOUCH was passed.
2435 if (P2ROUNDUP(bufsize, UMEM_ALIGN) >= UMEM_SECOND_ALIGN)
2436 align = UMEM_SECOND_ALIGN;
2439 } else if (align < UMEM_ALIGN && (cflags & UMC_NOTOUCH) == 0)
2444 * Get a umem_cache structure. We arrange that cp->cache_cpu[]
2445 * is aligned on a UMEM_CPU_CACHE_SIZE boundary to prevent
2446 * false sharing of per-CPU data.
2448 cp = vmem_xalloc(umem_cache_arena, csize, UMEM_CPU_CACHE_SIZE, phase,
2449 0, NULL, NULL, VM_NOSLEEP);
2458 (void) mutex_lock(&umem_flags_lock);
2459 if (umem_flags & UMF_RANDOMIZE)
2460 umem_flags = (((umem_flags | ~UMF_RANDOM) + 1) & UMF_RANDOM) |
2462 cp->cache_flags = umem_flags | (cflags & UMF_DEBUG);
2463 (void) mutex_unlock(&umem_flags_lock);
2466 * Make sure all the various flags are reasonable.
2468 if (cp->cache_flags & UMF_LITE) {
2469 if (bufsize >= umem_lite_minsize &&
2470 align <= umem_lite_maxalign &&
2471 P2PHASE(bufsize, umem_lite_maxalign) != 0) {
2472 cp->cache_flags |= UMF_BUFTAG;
2473 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
2475 cp->cache_flags &= ~UMF_DEBUG;
2479 if ((cflags & UMC_QCACHE) && (cp->cache_flags & UMF_AUDIT))
2480 cp->cache_flags |= UMF_NOMAGAZINE;
2482 if (cflags & UMC_NODEBUG)
2483 cp->cache_flags &= ~UMF_DEBUG;
2485 if (cflags & UMC_NOTOUCH)
2486 cp->cache_flags &= ~UMF_TOUCH;
2488 if (cflags & UMC_NOHASH)
2489 cp->cache_flags &= ~(UMF_AUDIT | UMF_FIREWALL);
2491 if (cflags & UMC_NOMAGAZINE)
2492 cp->cache_flags |= UMF_NOMAGAZINE;
2494 if ((cp->cache_flags & UMF_AUDIT) && !(cflags & UMC_NOTOUCH))
2495 cp->cache_flags |= UMF_REDZONE;
2497 if ((cp->cache_flags & UMF_BUFTAG) && bufsize >= umem_minfirewall &&
2498 !(cp->cache_flags & UMF_LITE) && !(cflags & UMC_NOHASH))
2499 cp->cache_flags |= UMF_FIREWALL;
2501 if (vmp != umem_default_arena || umem_firewall_arena == NULL)
2502 cp->cache_flags &= ~UMF_FIREWALL;
2504 if (cp->cache_flags & UMF_FIREWALL) {
2505 cp->cache_flags &= ~UMF_BUFTAG;
2506 cp->cache_flags |= UMF_NOMAGAZINE;
2507 ASSERT(vmp == umem_default_arena);
2508 vmp = umem_firewall_arena;
2512 * Set cache properties.
2514 (void) strncpy(cp->cache_name, name, sizeof (cp->cache_name) - 1);
2515 cp->cache_bufsize = bufsize;
2516 cp->cache_align = align;
2517 cp->cache_constructor = constructor;
2518 cp->cache_destructor = destructor;
2519 cp->cache_reclaim = reclaim;
2520 cp->cache_private = private;
2521 cp->cache_arena = vmp;
2522 cp->cache_cflags = cflags;
2523 cp->cache_cpu_mask = umem_cpu_mask;
2526 * Determine the chunk size.
2528 chunksize = bufsize;
2530 if (align >= UMEM_ALIGN) {
2531 chunksize = P2ROUNDUP(chunksize, UMEM_ALIGN);
2532 cp->cache_bufctl = chunksize - UMEM_ALIGN;
2535 if (cp->cache_flags & UMF_BUFTAG) {
2536 cp->cache_bufctl = chunksize;
2537 cp->cache_buftag = chunksize;
2538 chunksize += sizeof (umem_buftag_t);
2541 if (cp->cache_flags & UMF_DEADBEEF) {
2542 cp->cache_verify = MIN(cp->cache_buftag, umem_maxverify);
2543 if (cp->cache_flags & UMF_LITE)
2544 cp->cache_verify = MIN(cp->cache_verify, UMEM_ALIGN);
2547 cp->cache_contents = MIN(cp->cache_bufctl, umem_content_maxsave);
2549 cp->cache_chunksize = chunksize = P2ROUNDUP(chunksize, align);
2551 if (chunksize < bufsize) {
2557 * Now that we know the chunk size, determine the optimal slab size.
2559 if (vmp == umem_firewall_arena) {
2560 cp->cache_slabsize = P2ROUNDUP(chunksize, vmp->vm_quantum);
2561 cp->cache_mincolor = cp->cache_slabsize - chunksize;
2562 cp->cache_maxcolor = cp->cache_mincolor;
2563 cp->cache_flags |= UMF_HASH;
2564 ASSERT(!(cp->cache_flags & UMF_BUFTAG));
2565 } else if ((cflags & UMC_NOHASH) || (!(cflags & UMC_NOTOUCH) &&
2566 !(cp->cache_flags & UMF_AUDIT) &&
2567 chunksize < vmp->vm_quantum / UMEM_VOID_FRACTION)) {
2568 cp->cache_slabsize = vmp->vm_quantum;
2569 cp->cache_mincolor = 0;
2570 cp->cache_maxcolor =
2571 (cp->cache_slabsize - sizeof (umem_slab_t)) % chunksize;
2573 if (chunksize + sizeof (umem_slab_t) > cp->cache_slabsize) {
2577 ASSERT(!(cp->cache_flags & UMF_AUDIT));
2579 size_t chunks, bestfit, waste, slabsize;
2580 size_t minwaste = LONG_MAX;
2582 for (chunks = 1; chunks <= UMEM_VOID_FRACTION; chunks++) {
2583 slabsize = P2ROUNDUP(chunksize * chunks,
2586 * check for overflow
2588 if ((slabsize / chunks) < chunksize) {
2592 chunks = slabsize / chunksize;
2593 waste = (slabsize % chunksize) / chunks;
2594 if (waste < minwaste) {
2599 if (cflags & UMC_QCACHE)
2600 bestfit = MAX(1 << highbit(3 * vmp->vm_qcache_max), 64);
2601 cp->cache_slabsize = bestfit;
2602 cp->cache_mincolor = 0;
2603 cp->cache_maxcolor = bestfit % chunksize;
2604 cp->cache_flags |= UMF_HASH;
2607 if (cp->cache_flags & UMF_HASH) {
2608 ASSERT(!(cflags & UMC_NOHASH));
2609 cp->cache_bufctl_cache = (cp->cache_flags & UMF_AUDIT) ?
2610 umem_bufctl_audit_cache : umem_bufctl_cache;
2613 if (cp->cache_maxcolor >= vmp->vm_quantum)
2614 cp->cache_maxcolor = vmp->vm_quantum - 1;
2616 cp->cache_color = cp->cache_mincolor;
2619 * Initialize the rest of the slab layer.
2621 (void) mutex_init(&cp->cache_lock, USYNC_THREAD, NULL);
2623 cp->cache_freelist = &cp->cache_nullslab;
2624 cp->cache_nullslab.slab_cache = cp;
2625 cp->cache_nullslab.slab_refcnt = -1;
2626 cp->cache_nullslab.slab_next = &cp->cache_nullslab;
2627 cp->cache_nullslab.slab_prev = &cp->cache_nullslab;
2629 if (cp->cache_flags & UMF_HASH) {
2630 cp->cache_hash_table = vmem_alloc(umem_hash_arena,
2631 UMEM_HASH_INITIAL * sizeof (void *), VM_NOSLEEP);
2632 if (cp->cache_hash_table == NULL) {
2636 bzero(cp->cache_hash_table,
2637 UMEM_HASH_INITIAL * sizeof (void *));
2638 cp->cache_hash_mask = UMEM_HASH_INITIAL - 1;
2639 cp->cache_hash_shift = highbit((ulong_t)chunksize) - 1;
2643 * Initialize the depot.
2645 (void) mutex_init(&cp->cache_depot_lock, USYNC_THREAD, NULL);
2647 for (mtp = umem_magtype; chunksize <= mtp->mt_minbuf; mtp++)
2650 cp->cache_magtype = mtp;
2653 * Initialize the CPU layer.
2655 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++) {
2656 umem_cpu_cache_t *ccp = &cp->cache_cpu[cpu_seqid];
2657 (void) mutex_init(&ccp->cc_lock, USYNC_THREAD, NULL);
2658 ccp->cc_flags = cp->cache_flags;
2659 ccp->cc_rounds = -1;
2660 ccp->cc_prounds = -1;
2664 * Add the cache to the global list. This makes it visible
2665 * to umem_update(), so the cache must be ready for business.
2667 (void) mutex_lock(&umem_cache_lock);
2668 cp->cache_next = cnext = &umem_null_cache;
2669 cp->cache_prev = cprev = umem_null_cache.cache_prev;
2670 cnext->cache_prev = cp;
2671 cprev->cache_next = cp;
2672 (void) mutex_unlock(&umem_cache_lock);
2674 if (umem_ready == UMEM_READY)
2675 umem_cache_magazine_enable(cp);
2680 (void) mutex_destroy(&cp->cache_lock);
2682 vmem_xfree(umem_cache_arena, cp, csize);
2687 umem_cache_destroy(umem_cache_t *cp)
2692 * Remove the cache from the global cache list so that no new updates
2693 * will be scheduled on its behalf, wait for any pending tasks to
2694 * complete, purge the cache, and then destroy it.
2696 (void) mutex_lock(&umem_cache_lock);
2697 cp->cache_prev->cache_next = cp->cache_next;
2698 cp->cache_next->cache_prev = cp->cache_prev;
2699 cp->cache_prev = cp->cache_next = NULL;
2700 (void) mutex_unlock(&umem_cache_lock);
2702 umem_remove_updates(cp);
2704 umem_cache_magazine_purge(cp);
2706 (void) mutex_lock(&cp->cache_lock);
2707 if (cp->cache_buftotal != 0)
2708 log_message("umem_cache_destroy: '%s' (%p) not empty\n",
2709 cp->cache_name, (void *)cp);
2710 cp->cache_reclaim = NULL;
2712 * The cache is now dead. There should be no further activity.
2713 * We enforce this by setting land mines in the constructor and
2714 * destructor routines that induce a segmentation fault if invoked.
2716 cp->cache_constructor = (umem_constructor_t *)1;
2717 cp->cache_destructor = (umem_destructor_t *)2;
2718 (void) mutex_unlock(&cp->cache_lock);
2720 if (cp->cache_hash_table != NULL)
2721 vmem_free(umem_hash_arena, cp->cache_hash_table,
2722 (cp->cache_hash_mask + 1) * sizeof (void *));
2724 for (cpu_seqid = 0; cpu_seqid < umem_max_ncpus; cpu_seqid++)
2725 (void) mutex_destroy(&cp->cache_cpu[cpu_seqid].cc_lock);
2727 (void) mutex_destroy(&cp->cache_depot_lock);
2728 (void) mutex_destroy(&cp->cache_lock);
2730 vmem_free(umem_cache_arena, cp, UMEM_CACHE_SIZE(umem_max_ncpus));
2734 umem_alloc_sizes_clear(void)
2738 umem_alloc_sizes[0] = UMEM_MAXBUF;
2739 for (i = 1; i < NUM_ALLOC_SIZES; i++)
2740 umem_alloc_sizes[i] = 0;
2744 umem_alloc_sizes_add(size_t size_arg)
2747 size_t size = size_arg;
2750 log_message("size_add: cannot add zero-sized cache\n",
2755 if (size > UMEM_MAXBUF) {
2756 log_message("size_add: %ld > %d, cannot add\n", size,
2761 if (umem_alloc_sizes[NUM_ALLOC_SIZES - 1] != 0) {
2762 log_message("size_add: no space in alloc_table for %d\n",
2767 if (P2PHASE(size, UMEM_ALIGN) != 0) {
2768 size = P2ROUNDUP(size, UMEM_ALIGN);
2769 log_message("size_add: rounding %d up to %d\n", size_arg,
2773 for (i = 0; i < NUM_ALLOC_SIZES; i++) {
2774 int cur = umem_alloc_sizes[i];
2776 log_message("size_add: %ld already in table\n",
2784 for (j = NUM_ALLOC_SIZES - 1; j > i; j--)
2785 umem_alloc_sizes[j] = umem_alloc_sizes[j-1];
2786 umem_alloc_sizes[i] = size;
2790 umem_alloc_sizes_remove(size_t size)
2794 if (size == UMEM_MAXBUF) {
2795 log_message("size_remove: cannot remove %ld\n", size);
2799 for (i = 0; i < NUM_ALLOC_SIZES; i++) {
2800 int cur = umem_alloc_sizes[i];
2803 else if (cur > size || cur == 0) {
2804 log_message("size_remove: %ld not found in table\n",
2810 for (; i + 1 < NUM_ALLOC_SIZES; i++)
2811 umem_alloc_sizes[i] = umem_alloc_sizes[i+1];
2812 umem_alloc_sizes[i] = 0;
2816 umem_cache_init(void)
2819 size_t size, max_size;
2821 umem_magtype_t *mtp;
2822 char name[UMEM_CACHE_NAMELEN + 1];
2823 umem_cache_t *umem_alloc_caches[NUM_ALLOC_SIZES];
2825 for (i = 0; i < sizeof (umem_magtype) / sizeof (*mtp); i++) {
2826 mtp = &umem_magtype[i];
2827 (void) snprintf(name, sizeof (name), "umem_magazine_%d",
2829 mtp->mt_cache = umem_cache_create(name,
2830 (mtp->mt_magsize + 1) * sizeof (void *),
2831 mtp->mt_align, NULL, NULL, NULL, NULL,
2832 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
2833 if (mtp->mt_cache == NULL)
2837 umem_slab_cache = umem_cache_create("umem_slab_cache",
2838 sizeof (umem_slab_t), 0, NULL, NULL, NULL, NULL,
2839 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
2841 if (umem_slab_cache == NULL)
2844 umem_bufctl_cache = umem_cache_create("umem_bufctl_cache",
2845 sizeof (umem_bufctl_t), 0, NULL, NULL, NULL, NULL,
2846 umem_internal_arena, UMC_NOHASH | UMC_INTERNAL);
2848 if (umem_bufctl_cache == NULL)
2852 * The size of the umem_bufctl_audit structure depends upon
2853 * umem_stack_depth. See umem_impl.h for details on the size
2857 size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth);
2858 max_size = UMEM_BUFCTL_AUDIT_MAX_SIZE;
2860 if (size > max_size) { /* too large -- truncate */
2861 int max_frames = UMEM_MAX_STACK_DEPTH;
2863 ASSERT(UMEM_BUFCTL_AUDIT_SIZE_DEPTH(max_frames) <= max_size);
2865 umem_stack_depth = max_frames;
2866 size = UMEM_BUFCTL_AUDIT_SIZE_DEPTH(umem_stack_depth);
2869 umem_bufctl_audit_cache = umem_cache_create("umem_bufctl_audit_cache",
2870 size, 0, NULL, NULL, NULL, NULL, umem_internal_arena,
2871 UMC_NOHASH | UMC_INTERNAL);
2873 if (umem_bufctl_audit_cache == NULL)
2876 if (vmem_backend & VMEM_BACKEND_MMAP)
2877 umem_va_arena = vmem_create("umem_va",
2879 vmem_alloc, vmem_free, heap_arena,
2880 8 * pagesize, VM_NOSLEEP);
2882 umem_va_arena = heap_arena;
2884 if (umem_va_arena == NULL)
2887 umem_default_arena = vmem_create("umem_default",
2889 heap_alloc, heap_free, umem_va_arena,
2892 if (umem_default_arena == NULL)
2896 * make sure the umem_alloc table initializer is correct
2898 i = sizeof (umem_alloc_table) / sizeof (*umem_alloc_table);
2899 ASSERT(umem_alloc_table[i - 1] == &umem_null_cache);
2902 * Create the default caches to back umem_alloc()
2904 for (i = 0; i < NUM_ALLOC_SIZES; i++) {
2905 size_t cache_size = umem_alloc_sizes[i];
2908 if (cache_size == 0)
2909 break; /* 0 terminates the list */
2912 * If they allocate a multiple of the coherency granularity,
2913 * they get a coherency-granularity-aligned address.
2915 if (IS_P2ALIGNED(cache_size, 64))
2917 if (IS_P2ALIGNED(cache_size, pagesize))
2919 (void) snprintf(name, sizeof (name), "umem_alloc_%lu",
2922 cp = umem_cache_create(name, cache_size, align,
2923 NULL, NULL, NULL, NULL, NULL, UMC_INTERNAL);
2927 umem_alloc_caches[i] = cp;
2931 * Initialization cannot fail at this point. Make the caches
2932 * visible to umem_alloc() and friends.
2935 for (i = 0; i < NUM_ALLOC_SIZES; i++) {
2936 size_t cache_size = umem_alloc_sizes[i];
2938 if (cache_size == 0)
2939 break; /* 0 terminates the list */
2941 cp = umem_alloc_caches[i];
2943 while (size <= cache_size) {
2944 umem_alloc_table[(size - 1) >> UMEM_ALIGN_SHIFT] = cp;
2948 ASSERT(size - UMEM_ALIGN == UMEM_MAXBUF);
2953 * umem_startup() is called early on, and must be called explicitly if we're
2954 * the standalone version.
2956 #ifdef UMEM_STANDALONE
2959 #pragma init(umem_startup)
2962 umem_startup(caddr_t start, size_t len, size_t pagesize, caddr_t minstack,
2965 #ifdef UMEM_STANDALONE
2967 /* Standalone doesn't fork */
2969 umem_forkhandler_init(); /* register the fork handler */
2973 /* make lint happy */
2974 minstack = maxstack;
2977 #ifdef UMEM_STANDALONE
2978 umem_ready = UMEM_READY_STARTUP;
2979 umem_init_env_ready = 0;
2981 umem_min_stack = minstack;
2982 umem_max_stack = maxstack;
2984 nofail_callback = NULL;
2985 umem_slab_cache = NULL;
2986 umem_bufctl_cache = NULL;
2987 umem_bufctl_audit_cache = NULL;
2991 umem_internal_arena = NULL;
2992 umem_cache_arena = NULL;
2993 umem_hash_arena = NULL;
2994 umem_log_arena = NULL;
2995 umem_oversize_arena = NULL;
2996 umem_va_arena = NULL;
2997 umem_default_arena = NULL;
2998 umem_firewall_va_arena = NULL;
2999 umem_firewall_arena = NULL;
3000 umem_memalign_arena = NULL;
3001 umem_transaction_log = NULL;
3002 umem_content_log = NULL;
3003 umem_failure_log = NULL;
3004 umem_slab_log = NULL;
3007 umem_cpus = &umem_startup_cpu;
3008 umem_startup_cpu.cpu_cache_offset = UMEM_CACHE_SIZE(0);
3009 umem_startup_cpu.cpu_number = 0;
3011 bcopy(&umem_null_cache_template, &umem_null_cache,
3012 sizeof (umem_cache_t));
3014 for (idx = 0; idx < (UMEM_MAXBUF >> UMEM_ALIGN_SHIFT); idx++)
3015 umem_alloc_table[idx] = &umem_null_cache;
3019 * Perform initialization specific to the way we've been compiled
3020 * (library or standalone)
3022 umem_type_init(start, len, pagesize);
3030 size_t maxverify, minfirewall;
3033 umem_cpu_t *new_cpus;
3035 vmem_t *memalign_arena, *oversize_arena;
3037 if (thr_self() != umem_init_thr) {
3039 * The usual case -- non-recursive invocation of umem_init().
3041 (void) mutex_lock(&umem_init_lock);
3042 if (umem_ready != UMEM_READY_STARTUP) {
3044 * someone else beat us to initializing umem. Wait
3045 * for them to complete, then return.
3047 while (umem_ready == UMEM_READY_INITING) {
3050 (void) pthread_setcancelstate(
3051 PTHREAD_CANCEL_DISABLE, &cancel_state);
3052 (void) cond_wait(&umem_init_cv,
3054 (void) pthread_setcancelstate(
3055 cancel_state, NULL);
3057 ASSERT(umem_ready == UMEM_READY ||
3058 umem_ready == UMEM_READY_INIT_FAILED);
3059 (void) mutex_unlock(&umem_init_lock);
3060 return (umem_ready == UMEM_READY);
3063 ASSERT(umem_ready == UMEM_READY_STARTUP);
3064 ASSERT(umem_init_env_ready == 0);
3066 umem_ready = UMEM_READY_INITING;
3067 umem_init_thr = thr_self();
3069 (void) mutex_unlock(&umem_init_lock);
3070 umem_setup_envvars(0); /* can recurse -- see below */
3071 if (umem_init_env_ready) {
3073 * initialization was completed already
3075 ASSERT(umem_ready == UMEM_READY ||
3076 umem_ready == UMEM_READY_INIT_FAILED);
3077 ASSERT(umem_init_thr == 0);
3078 return (umem_ready == UMEM_READY);
3080 } else if (!umem_init_env_ready) {
3082 * The umem_setup_envvars() call (above) makes calls into
3083 * the dynamic linker and directly into user-supplied code.
3084 * Since we cannot know what that code will do, we could be
3085 * recursively invoked (by, say, a malloc() call in the code
3086 * itself, or in a (C++) _init section it causes to be fired).
3088 * This code is where we end up if such recursion occurs. We
3089 * first clean up any partial results in the envvar code, then
3090 * proceed to finish initialization processing in the recursive
3091 * call. The original call will notice this, and return
3094 umem_setup_envvars(1); /* clean up any partial state */
3097 "recursive allocation while initializing umem\n");
3099 umem_init_env_ready = 1;
3102 * From this point until we finish, recursion into umem_init() will
3103 * cause a umem_panic().
3105 maxverify = minfirewall = ULONG_MAX;
3107 /* LINTED constant condition */
3108 if (sizeof (umem_cpu_cache_t) != UMEM_CPU_CACHE_SIZE) {
3109 umem_panic("sizeof (umem_cpu_cache_t) = %d, should be %d\n",
3110 sizeof (umem_cpu_cache_t), UMEM_CPU_CACHE_SIZE);
3113 umem_max_ncpus = umem_get_max_ncpus();
3116 * load tunables from environment
3118 umem_process_envvars();
3126 if (!(umem_flags & UMF_AUDIT))
3129 heap_arena = vmem_heap_arena(&heap_alloc, &heap_free);
3131 pagesize = heap_arena->vm_quantum;
3133 umem_internal_arena = vmem_create("umem_internal", NULL, 0, pagesize,
3134 heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP);
3136 umem_default_arena = umem_internal_arena;
3138 if (umem_internal_arena == NULL)
3141 umem_cache_arena = vmem_create("umem_cache", NULL, 0, UMEM_ALIGN,
3142 vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP);
3144 umem_hash_arena = vmem_create("umem_hash", NULL, 0, UMEM_ALIGN,
3145 vmem_alloc, vmem_free, umem_internal_arena, 0, VM_NOSLEEP);
3147 umem_log_arena = vmem_create("umem_log", NULL, 0, UMEM_ALIGN,
3148 heap_alloc, heap_free, heap_arena, 0, VM_NOSLEEP);
3150 umem_firewall_va_arena = vmem_create("umem_firewall_va",
3152 umem_firewall_va_alloc, umem_firewall_va_free, heap_arena,
3155 if (umem_cache_arena == NULL || umem_hash_arena == NULL ||
3156 umem_log_arena == NULL || umem_firewall_va_arena == NULL)
3159 umem_firewall_arena = vmem_create("umem_firewall", NULL, 0, pagesize,
3160 heap_alloc, heap_free, umem_firewall_va_arena, 0,
3163 if (umem_firewall_arena == NULL)
3166 oversize_arena = vmem_create("umem_oversize", NULL, 0, pagesize,
3167 heap_alloc, heap_free, minfirewall < ULONG_MAX ?
3168 umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP);
3170 memalign_arena = vmem_create("umem_memalign", NULL, 0, UMEM_ALIGN,
3171 heap_alloc, heap_free, minfirewall < ULONG_MAX ?
3172 umem_firewall_va_arena : heap_arena, 0, VM_NOSLEEP);
3174 if (oversize_arena == NULL || memalign_arena == NULL)
3177 if (umem_max_ncpus > CPUHINT_MAX())
3178 umem_max_ncpus = CPUHINT_MAX();
3180 while ((umem_max_ncpus & (umem_max_ncpus - 1)) != 0)
3183 if (umem_max_ncpus == 0)
3186 size = umem_max_ncpus * sizeof (umem_cpu_t);
3187 new_cpus = vmem_alloc(umem_internal_arena, size, VM_NOSLEEP);
3188 if (new_cpus == NULL)
3191 bzero(new_cpus, size);
3192 for (idx = 0; idx < umem_max_ncpus; idx++) {
3193 new_cpus[idx].cpu_number = idx;
3194 new_cpus[idx].cpu_cache_offset = UMEM_CACHE_SIZE(idx);
3196 umem_cpus = new_cpus;
3197 umem_cpu_mask = (umem_max_ncpus - 1);
3199 if (umem_maxverify == 0)
3200 umem_maxverify = maxverify;
3202 if (umem_minfirewall == 0)
3203 umem_minfirewall = minfirewall;
3206 * Set up updating and reaping
3208 umem_reap_next = gethrtime() + NANOSEC;
3210 #ifndef UMEM_STANDALONE
3211 (void) gettimeofday(&umem_update_next, NULL);
3215 * Set up logging -- failure here is okay, since it will just disable
3219 umem_transaction_log = umem_log_init(umem_transaction_log_size);
3220 umem_content_log = umem_log_init(umem_content_log_size);
3221 umem_failure_log = umem_log_init(umem_failure_log_size);
3222 umem_slab_log = umem_log_init(umem_slab_log_size);
3226 * Set up caches -- if successful, initialization cannot fail, since
3227 * allocations from other threads can now succeed.
3229 if (umem_cache_init() == 0) {
3230 log_message("unable to create initial caches\n");
3233 umem_oversize_arena = oversize_arena;
3234 umem_memalign_arena = memalign_arena;
3236 umem_cache_applyall(umem_cache_magazine_enable);
3239 * initialization done, ready to go
3241 (void) mutex_lock(&umem_init_lock);
3242 umem_ready = UMEM_READY;
3244 (void) cond_broadcast(&umem_init_cv);
3245 (void) mutex_unlock(&umem_init_lock);
3249 log_message("umem initialization failed\n");
3251 (void) mutex_lock(&umem_init_lock);
3252 umem_ready = UMEM_READY_INIT_FAILED;
3254 (void) cond_broadcast(&umem_init_cv);
3255 (void) mutex_unlock(&umem_init_lock);