list_add_tail(&sko->sko_list, &sks->sks_free_list);
}
- list_for_each_entry(sko, &sks->sks_free_list, sko_list)
- if (skc->skc_ctor)
- skc->skc_ctor(sko->sko_addr, skc->skc_private, flags);
out:
if (rc) {
if (skc->skc_flags & KMC_OFFSLAB)
list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
ASSERT(sko->sko_magic == SKO_MAGIC);
- if (skc->skc_dtor)
- skc->skc_dtor(sko->sko_addr, skc->skc_private);
-
if (skc->skc_flags & KMC_OFFSLAB)
kv_free(skc, sko->sko_addr, size);
}
SRETURN(-EINVAL);
}
- if (skc->skc_ctor)
- skc->skc_ctor(ske->ske_obj, skc->skc_private, flags);
-
*obj = ske->ske_obj;
SRETURN(0);
if (unlikely(ske == NULL))
SRETURN(-ENOENT);
- if (skc->skc_dtor)
- skc->skc_dtor(ske->ske_obj, skc->skc_private);
-
kfree(ske->ske_obj);
kfree(ske);
do {
obj = kmem_cache_alloc(slc, flags | __GFP_COMP);
- if (obj && skc->skc_ctor)
- skc->skc_ctor(obj, skc->skc_private, flags);
-
} while ((obj == NULL) && !(flags & KM_NOSLEEP));
- atomic_dec(&skc->skc_ref);
- SRETURN(obj);
+ goto ret;
}
local_irq_disable();
ASSERT(obj);
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
+ret:
/* Pre-emptively migrate object to CPU L1 cache */
- prefetchw(obj);
+ if (obj) {
+ if (obj && skc->skc_ctor)
+ skc->skc_ctor(obj, skc->skc_private, flags);
+ else
+ prefetchw(obj);
+ }
+
atomic_dec(&skc->skc_ref);
SRETURN(obj);
}
+
EXPORT_SYMBOL(spl_kmem_cache_alloc);
/*
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
atomic_inc(&skc->skc_ref);
+ /*
+ * Run the destructor
+ */
+ if (skc->skc_dtor)
+ skc->skc_dtor(obj, skc->skc_private);
+
/*
* Free the object from the Linux underlying Linux slab.
*/
if (skc->skc_flags & KMC_SLAB) {
- if (skc->skc_dtor)
- skc->skc_dtor(obj, skc->skc_private);
-
kmem_cache_free(skc->skc_linux_cache, obj);
goto out;
}