ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
- atomic_inc(&skc->skc_ref);
-
/*
* Allocate directly from a Linux slab. All optimizations are left
* to the underlying cache we only need to guarantee that KM_SLEEP
prefetchw(obj);
}
- atomic_dec(&skc->skc_ref);
-
return (obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
- atomic_inc(&skc->skc_ref);
/*
* Run the destructor
*/
if (skc->skc_flags & KMC_SLAB) {
kmem_cache_free(skc->skc_linux_cache, obj);
- goto out;
+ return;
}
/*
spin_unlock(&skc->skc_lock);
if (do_emergency && (spl_emergency_free(skc, obj) == 0))
- goto out;
+ return;
}
local_irq_save(flags);
if (do_reclaim)
spl_slab_reclaim(skc);
-out:
- atomic_dec(&skc->skc_ref);
}
EXPORT_SYMBOL(spl_kmem_cache_free);