]> granicus.if.org Git - zfs/commitdiff
Linux compat 4.16: blk_queue_flag_{set,clear}
authorBrian Behlendorf <behlendorf1@llnl.gov>
Fri, 13 Apr 2018 02:46:14 +0000 (19:46 -0700)
committerGitHub <noreply@github.com>
Fri, 13 Apr 2018 02:46:14 +0000 (19:46 -0700)
The HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY case was overlooked in
the original 10f88c5c commit because blk_queue_write_cache()
was available for the in-kernel builds.

Update the blk_queue_flag_{set,clear} wrappers to call the locked
versions to avoid confusion.  This is safe for all existing callers.

The blk_queue_set_write_cache() function has been updated to use
these wrappers.  This means setting/clearing both QUEUE_FLAG_WC
and QUEUE_FLAG_FUA is no longer atomic but this only done early
in zvol_alloc() prior to any requests so there is no issue.

Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Kash Pande <kash@tripleback.net>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #7428
Closes #7431

include/linux/blkdev_compat.h

index d7af1d89d240f62b3bc4c0ccc7427d672afc8bd7..f99980ab3e0fdb383a1ed006761ecc2ab3567f07 100644 (file)
@@ -41,7 +41,7 @@ typedef unsigned __bitwise__ fmode_t;
 static inline void
 blk_queue_flag_set(unsigned int flag, struct request_queue *q)
 {
-       queue_flag_set_unlocked(flag, q);
+       queue_flag_set(flag, q);
 }
 #endif
 
@@ -49,7 +49,7 @@ blk_queue_flag_set(unsigned int flag, struct request_queue *q)
 static inline void
 blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
 {
-       queue_flag_clear_unlocked(flag, q);
+       queue_flag_clear(flag, q);
 }
 #endif
 
@@ -72,16 +72,14 @@ static inline void
 blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua)
 {
 #if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY)
-       spin_lock_irq(q->queue_lock);
        if (wc)
-               queue_flag_set(QUEUE_FLAG_WC, q);
+               blk_queue_flag_set(QUEUE_FLAG_WC, q);
        else
-               queue_flag_clear(QUEUE_FLAG_WC, q);
+               blk_queue_flag_clear(QUEUE_FLAG_WC, q);
        if (fua)
-               queue_flag_set(QUEUE_FLAG_FUA, q);
+               blk_queue_flag_set(QUEUE_FLAG_FUA, q);
        else
-               queue_flag_clear(QUEUE_FLAG_FUA, q);
-       spin_unlock_irq(q->queue_lock);
+               blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
 #elif defined(HAVE_BLK_QUEUE_WRITE_CACHE)
        blk_queue_write_cache(q, wc, fua);
 #elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY)