]> granicus.if.org Git - zfs/commitdiff
* modules/spl/spl-kmem.c : Make sure to disable interrupts
authorwartens2 <wartens2@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
Wed, 16 Apr 2008 16:37:51 +0000 (16:37 +0000)
committerwartens2 <wartens2@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
Wed, 16 Apr 2008 16:37:51 +0000 (16:37 +0000)
        when necessary to avoid deadlocks.  We were seeing the deadlock
        when calling kmem_cache_generic_constructor() and then an interrupt
        forced us to end up calling kmem_cache_generic_destructor()
        which caused our deadlock.

git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@74 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c

ChangeLog
modules/spl/spl-kmem.c

index b21cdf2faa9e456dc3788065cab0b9f1ba64a76d..da2f6da4b3c5bbee9a1346fc6321be7591ecf130 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,11 @@
+2008-04-16 Herb Wartens <wartens2@llnl.gov>
+
+       * modules/spl/spl-kmem.c : Make sure to disable interrupts
+       when necessary to avoid deadlocks.  We were seeing the deadlock
+       when calling kmem_cache_generic_constructor() and then an interrupt
+       forced us to end up calling kmem_cache_generic_destructor()
+       which caused our deadlock.
+
 2008-02-26 Brian Behlendorf <behlendorf1@llnl.gov>
 
        : Initial commit of the solaris porting layer (spl).  Included
index 7c88eda5f79484078b0451d31bf2ca2940b4ad4e..95eb696a692b0318ca4f9c901b5a5eb405bfdd2c 100644 (file)
@@ -77,6 +77,7 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
                         void *priv, void *vmp)
 {
         kmem_cache_cb_t *kcc;
+       unsigned long flags;
 
         kcc = (kmem_cache_cb_t *)kmalloc(sizeof(*kcc), GFP_KERNEL);
         if (kcc) {
@@ -86,9 +87,9 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
                 kcc->kcc_reclaim = reclaim;
                 kcc->kcc_private = priv;
                 kcc->kcc_vmp = vmp;
-               spin_lock(&kmem_cache_cb_lock);
+               spin_lock_irqsave(&kmem_cache_cb_lock, flags);
                 list_add(&kcc->kcc_list, &kmem_cache_cb_list);
-               spin_unlock(&kmem_cache_cb_lock);
+               spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
         }
 
         return kcc;
@@ -97,9 +98,11 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
 static void
 kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
 {
-       spin_lock(&kmem_cache_cb_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&kmem_cache_cb_lock, flags);
         list_del(&kcc->kcc_list);
-       spin_unlock(&kmem_cache_cb_lock);
+       spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
 
        if (kcc)
               kfree(kcc);
@@ -110,9 +113,10 @@ kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long fla
 {
         kmem_cache_cb_t *kcc;
        kmem_constructor_t constructor;
+       unsigned long irqflags;
        void *private;
 
-       spin_lock(&kmem_cache_cb_lock);
+       spin_lock_irqsave(&kmem_cache_cb_lock, irqflags);
 
         /* Callback list must be in sync with linux slab caches */
         kcc = kmem_cache_find_cache_cb(cache);
@@ -120,7 +124,7 @@ kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long fla
        constructor = kcc->kcc_constructor;
        private = kcc->kcc_private;
 
-       spin_unlock(&kmem_cache_cb_lock);
+       spin_unlock_irqrestore(&kmem_cache_cb_lock, irqflags);
 
        if (constructor)
                constructor(ptr, private, (int)flags);
@@ -133,9 +137,10 @@ kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flag
 {
         kmem_cache_cb_t *kcc;
         kmem_destructor_t destructor;
+       unsigned long irqflags;
        void *private;
 
-       spin_lock(&kmem_cache_cb_lock);
+       spin_lock_irqsave(&kmem_cache_cb_lock, irqflags);
 
         /* Callback list must be in sync with linux slab caches */
         kcc = kmem_cache_find_cache_cb(cache);
@@ -143,7 +148,7 @@ kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flag
        destructor = kcc->kcc_destructor;
        private = kcc->kcc_private;
 
-       spin_unlock(&kmem_cache_cb_lock);
+       spin_unlock_irqrestore(&kmem_cache_cb_lock, irqflags);
 
        /* Solaris destructor takes no flags, silently eat them */
        if (destructor)
@@ -155,6 +160,7 @@ static int
 kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
 {
         kmem_cache_cb_t *kcc;
+       unsigned long flags;
         int total = 0;
 
        /* Under linux a shrinker is not tightly coupled with a slab
@@ -164,7 +170,7 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
         * function in the shim layer for all slab caches.  And we always
         * attempt to shrink all caches when this generic shrinker is called.
         */
-       spin_lock(&kmem_cache_cb_lock);
+       spin_lock_irqsave(&kmem_cache_cb_lock, flags);
 
         list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) {
                /* Under linux the desired number and gfp type of objects
@@ -185,7 +191,7 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
         * was registered with the generic shrinker.  This should fake out
         * the linux VM when it attempts to shrink caches.
         */
-       spin_unlock(&kmem_cache_cb_lock);
+       spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
        return total;
 }
 
@@ -257,11 +263,12 @@ __kmem_cache_destroy(kmem_cache_t *cache)
 {
         kmem_cache_cb_t *kcc;
        char *name;
+       unsigned long flags;
        int rc;
 
-       spin_lock(&kmem_cache_cb_lock);
+       spin_lock_irqsave(&kmem_cache_cb_lock, flags);
         kcc = kmem_cache_find_cache_cb(cache);
-       spin_unlock(&kmem_cache_cb_lock);
+       spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
         if (kcc == NULL)
                 return -EINVAL;
 
@@ -271,11 +278,11 @@ __kmem_cache_destroy(kmem_cache_t *cache)
        kfree(name);
 
        /* Unregister generic shrinker on removal of all caches */
-       spin_lock(&kmem_cache_cb_lock);
+       spin_lock_irqsave(&kmem_cache_cb_lock, flags);
        if (list_empty(&kmem_cache_cb_list))
                 remove_shrinker(kmem_cache_shrinker);
 
-       spin_unlock(&kmem_cache_cb_lock);
+       spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
        return rc;
 }
 EXPORT_SYMBOL(__kmem_cache_destroy);