4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
24 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
25 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
32 #include <linux/blkdev.h>
33 #include <linux/elevator.h>
36 typedef unsigned __bitwise__ fmode_t;
37 #endif /* HAVE_FMODE_T */
41 * The blk_queue_flush() interface has replaced blk_queue_ordered()
42 * interface. However, while the old interface was available to all the
43 * new one is GPL-only. Thus if the GPL-only version is detected we
44 * implement our own trivial helper compatibility funcion. The hope is
45 * that long term this function will be opened up.
47 #if defined(HAVE_BLK_QUEUE_FLUSH) && defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY)
48 #define blk_queue_flush __blk_queue_flush
50 __blk_queue_flush(struct request_queue *q, unsigned int flags)
52 q->flush_flags = flags & (REQ_FLUSH | REQ_FUA);
54 #endif /* HAVE_BLK_QUEUE_FLUSH && HAVE_BLK_QUEUE_FLUSH_GPL_ONLY */
58 * The blk_queue_write_cache() interface has replaced blk_queue_flush()
59 * interface. However, while the new interface is GPL-only. Thus if the
60 * GPL-only version is detected we implement our own trivial helper
61 * compatibility funcion.
63 #if defined(HAVE_BLK_QUEUE_WRITE_CACHE) && \
64 defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY)
65 #define blk_queue_write_cache __blk_queue_write_cache
67 __blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
69 spin_lock_irq(q->queue_lock);
71 queue_flag_set(QUEUE_FLAG_WC, q);
73 queue_flag_clear(QUEUE_FLAG_WC, q);
75 queue_flag_set(QUEUE_FLAG_FUA, q);
77 queue_flag_clear(QUEUE_FLAG_FUA, q);
78 spin_unlock_irq(q->queue_lock);
83 * Most of the blk_* macros were removed in 2.6.36. Ostensibly this was
84 * done to improve readability and allow easier grepping. However, from
85 * a portability stand point the macros are helpful. Therefore the needed
86 * macros are redefined here if they are missing from the kernel.
88 #ifndef blk_fs_request
89 #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS)
94 * The blk_queue_stackable() queue flag was added in 2.6.27 to handle dm
95 * stacking drivers. Prior to this request stacking drivers were detected
96 * by checking (q->request_fn == NULL), for earlier kernels we revert to
97 * this legacy behavior.
99 #ifndef blk_queue_stackable
100 #define blk_queue_stackable(q) ((q)->request_fn == NULL)
105 * The blk_queue_max_hw_sectors() function replaces blk_queue_max_sectors().
107 #ifndef HAVE_BLK_QUEUE_MAX_HW_SECTORS
108 #define blk_queue_max_hw_sectors __blk_queue_max_hw_sectors
110 __blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
112 blk_queue_max_sectors(q, max_hw_sectors);
118 * The blk_queue_max_segments() function consolidates
119 * blk_queue_max_hw_segments() and blk_queue_max_phys_segments().
121 #ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS
122 #define blk_queue_max_segments __blk_queue_max_segments
124 __blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
126 blk_queue_max_phys_segments(q, max_segments);
127 blk_queue_max_hw_segments(q, max_segments);
131 #ifndef HAVE_GET_DISK_RO
133 get_disk_ro(struct gendisk *disk)
138 policy = disk->part[0]->policy;
142 #endif /* HAVE_GET_DISK_RO */
144 #ifdef HAVE_BIO_BVEC_ITER
145 #define BIO_BI_SECTOR(bio) (bio)->bi_iter.bi_sector
146 #define BIO_BI_SIZE(bio) (bio)->bi_iter.bi_size
147 #define BIO_BI_IDX(bio) (bio)->bi_iter.bi_idx
148 #define BIO_BI_SKIP(bio) (bio)->bi_iter.bi_bvec_done
149 #define bio_for_each_segment4(bv, bvp, b, i) \
150 bio_for_each_segment((bv), (b), (i))
151 typedef struct bvec_iter bvec_iterator_t;
153 #define BIO_BI_SECTOR(bio) (bio)->bi_sector
154 #define BIO_BI_SIZE(bio) (bio)->bi_size
155 #define BIO_BI_IDX(bio) (bio)->bi_idx
156 #define BIO_BI_SKIP(bio) (0)
157 #define bio_for_each_segment4(bv, bvp, b, i) \
158 bio_for_each_segment((bvp), (b), (i))
159 typedef int bvec_iterator_t;
163 * Portable helper for correctly setting the FAILFAST flags. The
164 * correct usage has changed 3 times from 2.6.12 to 2.6.38.
167 bio_set_flags_failfast(struct block_device *bdev, int *flags)
171 * Disable FAILFAST for loopback devices because of the
172 * following incorrect BUG_ON() in loop_make_request().
173 * This support is also disabled for md devices because the
174 * test suite layers md devices on top of loopback devices.
175 * This may be removed when the loopback driver is fixed.
177 * BUG_ON(!lo || (rw != READ && rw != WRITE));
179 if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) ||
180 (MAJOR(bdev->bd_dev) == MD_MAJOR))
183 #ifdef BLOCK_EXT_MAJOR
184 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
186 #endif /* BLOCK_EXT_MAJOR */
187 #endif /* CONFIG_BUG */
189 #if defined(HAVE_BIO_RW_FAILFAST_DTD)
190 /* BIO_RW_FAILFAST_* preferred interface from 2.6.28 - 2.6.35 */
192 (1 << BIO_RW_FAILFAST_DEV) |
193 (1 << BIO_RW_FAILFAST_TRANSPORT) |
194 (1 << BIO_RW_FAILFAST_DRIVER));
195 #elif defined(HAVE_REQ_FAILFAST_MASK)
197 * REQ_FAILFAST_* preferred interface from 2.6.36 - 2.6.xx,
198 * the BIO_* and REQ_* flags were unified under REQ_* flags.
200 *flags |= REQ_FAILFAST_MASK;
202 #error "Undefined block IO FAILFAST interface."
207 * Maximum disk label length, it may be undefined for some kernels.
209 #ifndef DISK_NAME_LEN
210 #define DISK_NAME_LEN 32
211 #endif /* DISK_NAME_LEN */
215 * The bio_endio() prototype changed slightly. These are helper
216 * macro's to ensure the prototype and invocation are handled.
218 #ifdef HAVE_1ARG_BIO_END_IO_T
219 #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x)
220 #define BIO_END_IO(bio, error) bio->bi_error = error; bio_endio(bio);
222 #define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x, int z)
223 #define BIO_END_IO(bio, error) bio_endio(bio, error);
224 #endif /* HAVE_1ARG_BIO_END_IO_T */
227 * 2.6.38 - 2.6.x API,
228 * blkdev_get_by_path()
231 * 2.6.28 - 2.6.37 API,
232 * open_bdev_exclusive()
233 * close_bdev_exclusive()
235 * 2.6.12 - 2.6.27 API,
239 * Used to exclusively open a block device from within the kernel.
241 #if defined(HAVE_BLKDEV_GET_BY_PATH)
242 #define vdev_bdev_open(path, md, hld) blkdev_get_by_path(path, \
243 (md) | FMODE_EXCL, hld)
244 #define vdev_bdev_close(bdev, md) blkdev_put(bdev, (md) | FMODE_EXCL)
245 #elif defined(HAVE_OPEN_BDEV_EXCLUSIVE)
246 #define vdev_bdev_open(path, md, hld) open_bdev_exclusive(path, md, hld)
247 #define vdev_bdev_close(bdev, md) close_bdev_exclusive(bdev, md)
249 #define vdev_bdev_open(path, md, hld) open_bdev_excl(path, md, hld)
250 #define vdev_bdev_close(bdev, md) close_bdev_excl(bdev)
251 #endif /* HAVE_BLKDEV_GET_BY_PATH | HAVE_OPEN_BDEV_EXCLUSIVE */
255 * The function invalidate_bdev() lost it's second argument because
258 #ifdef HAVE_1ARG_INVALIDATE_BDEV
259 #define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev)
261 #define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev, 1)
262 #endif /* HAVE_1ARG_INVALIDATE_BDEV */
266 * The function was exported for use, prior to this it existed by the
267 * symbol was not exported.
269 #ifndef HAVE_LOOKUP_BDEV
270 #define lookup_bdev(path) ERR_PTR(-ENOTSUP)
275 * To ensure good performance preferentially use the physical block size
276 * for proper alignment. The physical size is supposed to be the internal
277 * sector size used by the device. This is often 4096 byte for AF devices,
278 * while a smaller 512 byte logical size is supported for compatibility.
280 * Unfortunately, many drives still misreport their physical sector size.
281 * For devices which are known to lie you may need to manually set this
282 * at pool creation time with 'zpool create -o ashift=12 ...'.
284 * When the physical block size interface isn't available, we fall back to
285 * the logical block size interface and then the older hard sector size.
287 #ifdef HAVE_BDEV_PHYSICAL_BLOCK_SIZE
288 #define vdev_bdev_block_size(bdev) bdev_physical_block_size(bdev)
290 #ifdef HAVE_BDEV_LOGICAL_BLOCK_SIZE
291 #define vdev_bdev_block_size(bdev) bdev_logical_block_size(bdev)
293 #define vdev_bdev_block_size(bdev) bdev_hardsect_size(bdev)
294 #endif /* HAVE_BDEV_LOGICAL_BLOCK_SIZE */
295 #endif /* HAVE_BDEV_PHYSICAL_BLOCK_SIZE */
299 * The WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags have been
300 * introduced as a replacement for WRITE_BARRIER. This was done to
301 * allow richer semantics to be expressed to the block layer. It is
302 * the block layers responsibility to choose the correct way to
303 * implement these semantics.
305 * The existence of these flags implies that REQ_FLUSH an REQ_FUA are
306 * defined. Thus we can safely define VDEV_REQ_FLUSH and VDEV_REQ_FUA
307 * compatibility macros.
309 * Linux 4.8 renamed the REQ_FLUSH to REQ_PREFLUSH but there was no
310 * functional change in behavior.
312 #ifdef WRITE_FLUSH_FUA
314 #define VDEV_WRITE_FLUSH_FUA WRITE_FLUSH_FUA
316 #define VDEV_REQ_FLUSH REQ_PREFLUSH
317 #define VDEV_REQ_FUA REQ_FUA
319 #define VDEV_REQ_FLUSH REQ_FLUSH
320 #define VDEV_REQ_FUA REQ_FUA
325 #define VDEV_WRITE_FLUSH_FUA WRITE_BARRIER
326 #ifdef HAVE_BIO_RW_BARRIER
327 #define VDEV_REQ_FLUSH (1 << BIO_RW_BARRIER)
328 #define VDEV_REQ_FUA (1 << BIO_RW_BARRIER)
330 #define VDEV_REQ_FLUSH REQ_HARDBARRIER
331 #define VDEV_REQ_FUA REQ_FUA
338 * Use the normal I/O patch for discards.
340 #ifdef QUEUE_FLAG_DISCARD
341 #ifdef HAVE_BIO_RW_DISCARD
342 #define VDEV_REQ_DISCARD (1 << BIO_RW_DISCARD)
344 #define VDEV_REQ_DISCARD REQ_DISCARD
347 #error "Allowing the build will cause discard requests to become writes "
348 "potentially triggering the DMU_MAX_ACCESS assertion. Please file a "
349 "an issue report at: https://github.com/zfsonlinux/zfs/issues/new"
354 * Discard granularity and alignment restrictions may now be set. For
355 * older kernels which do not support this it is safe to skip it.
357 #ifdef HAVE_DISCARD_GRANULARITY
359 blk_queue_discard_granularity(struct request_queue *q, unsigned int dg)
361 q->limits.discard_granularity = dg;
364 #define blk_queue_discard_granularity(x, dg) ((void)0)
365 #endif /* HAVE_DISCARD_GRANULARITY */
368 * Default Linux IO Scheduler,
369 * Setting the scheduler to noop will allow the Linux IO scheduler to
370 * still perform front and back merging, while leaving the request
371 * ordering and prioritization to the ZFS IO scheduler.
373 #define VDEV_SCHEDULER "noop"
376 * A common holder for vdev_bdev_open() is used to relax the exclusive open
377 * semantics slightly. Internal vdev disk callers may pass VDEV_HOLDER to
378 * allow them to open the device multiple times. Other kernel callers and
379 * user space processes which don't pass this value will get EBUSY. This is
380 * currently required for the correct operation of hot spares.
382 #define VDEV_HOLDER ((void *)0x2401de7)
384 #ifndef HAVE_GENERIC_IO_ACCT
385 #define generic_start_io_acct(rw, slen, part) ((void)0)
386 #define generic_end_io_acct(rw, part, start_jiffies) ((void)0)
389 #endif /* _ZFS_BLKDEV_H */