4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
27 * ZFS volume emulation driver.
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
32 * /dev/<pool_name>/<dataset_name>
34 * Volumes are persistent through reboot and module load. No user command
35 * needs to be run before opening and using a device.
39 #include <sys/dmu_traverse.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/dsl_prop.h>
43 #include <sys/zil_impl.h>
45 #include <sys/zfs_rlock.h>
46 #include <sys/zfs_znode.h>
48 #include <linux/blkdev_compat.h>
50 unsigned int zvol_inhibit_dev = 0;
51 unsigned int zvol_major = ZVOL_MAJOR;
52 unsigned int zvol_threads = 32;
53 unsigned long zvol_max_discard_blocks = 16384;
55 static taskq_t *zvol_taskq;
56 static kmutex_t zvol_state_lock;
57 static list_t zvol_state_list;
58 static char *zvol_tag = "zvol_tag";
61 * The in-core state of each volume.
63 typedef struct zvol_state {
64 char zv_name[MAXNAMELEN]; /* name */
65 uint64_t zv_volsize; /* advertised space */
66 uint64_t zv_volblocksize;/* volume block size */
67 objset_t *zv_objset; /* objset handle */
68 uint32_t zv_flags; /* ZVOL_* flags */
69 uint32_t zv_open_count; /* open counts */
70 uint32_t zv_changed; /* disk changed */
71 zilog_t *zv_zilog; /* ZIL handle */
72 znode_t zv_znode; /* for range locking */
73 dmu_buf_t *zv_dbuf; /* bonus handle */
74 dev_t zv_dev; /* device id */
75 struct gendisk *zv_disk; /* generic disk */
76 struct request_queue *zv_queue; /* request queue */
77 spinlock_t zv_lock; /* request queue lock */
78 list_node_t zv_next; /* next zvol_state_t linkage */
81 #define ZVOL_RDONLY 0x1
84 * Find the next available range of ZVOL_MINORS minor numbers. The
85 * zvol_state_list is kept in ascending minor order so we simply need
86 * to scan the list for the first gap in the sequence. This allows us
87 * to recycle minor number as devices are created and removed.
90 zvol_find_minor(unsigned *minor)
95 ASSERT(MUTEX_HELD(&zvol_state_lock));
96 for (zv = list_head(&zvol_state_list); zv != NULL;
97 zv = list_next(&zvol_state_list, zv), *minor += ZVOL_MINORS) {
98 if (MINOR(zv->zv_dev) != MINOR(*minor))
102 /* All minors are in use */
103 if (*minor >= (1 << MINORBITS))
110 * Find a zvol_state_t given the full major+minor dev_t.
112 static zvol_state_t *
113 zvol_find_by_dev(dev_t dev)
117 ASSERT(MUTEX_HELD(&zvol_state_lock));
118 for (zv = list_head(&zvol_state_list); zv != NULL;
119 zv = list_next(&zvol_state_list, zv)) {
120 if (zv->zv_dev == dev)
128 * Find a zvol_state_t given the name provided at zvol_alloc() time.
130 static zvol_state_t *
131 zvol_find_by_name(const char *name)
135 ASSERT(MUTEX_HELD(&zvol_state_lock));
136 for (zv = list_head(&zvol_state_list); zv != NULL;
137 zv = list_next(&zvol_state_list, zv)) {
138 if (!strncmp(zv->zv_name, name, MAXNAMELEN))
147 * Given a path, return TRUE if path is a ZVOL.
150 zvol_is_zvol(const char *device)
152 struct block_device *bdev;
155 bdev = lookup_bdev(device);
159 major = MAJOR(bdev->bd_dev);
162 if (major == zvol_major)
169 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
172 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
174 zfs_creat_t *zct = arg;
175 nvlist_t *nvprops = zct->zct_props;
177 uint64_t volblocksize, volsize;
179 VERIFY(nvlist_lookup_uint64(nvprops,
180 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
181 if (nvlist_lookup_uint64(nvprops,
182 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
183 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
186 * These properties must be removed from the list so the generic
187 * property setting step won't apply to them.
189 VERIFY(nvlist_remove_all(nvprops,
190 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
191 (void) nvlist_remove_all(nvprops,
192 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
194 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
198 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
202 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
207 * ZFS_IOC_OBJSET_STATS entry point.
210 zvol_get_stats(objset_t *os, nvlist_t *nv)
213 dmu_object_info_t *doi;
216 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
220 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
221 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
222 error = dmu_object_info(os, ZVOL_OBJ, doi);
225 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
226 doi->doi_data_block_size);
229 kmem_free(doi, sizeof(dmu_object_info_t));
235 * Sanity check volume size.
238 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
241 return (SET_ERROR(EINVAL));
243 if (volsize % blocksize != 0)
244 return (SET_ERROR(EINVAL));
247 if (volsize - 1 > MAXOFFSET_T)
248 return (SET_ERROR(EOVERFLOW));
254 * Ensure the zap is flushed then inform the VFS of the capacity change.
257 zvol_update_volsize(zvol_state_t *zv, uint64_t volsize, objset_t *os)
259 struct block_device *bdev;
263 ASSERT(MUTEX_HELD(&zvol_state_lock));
265 tx = dmu_tx_create(os);
266 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
267 error = dmu_tx_assign(tx, TXG_WAIT);
273 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
280 error = dmu_free_long_range(os,
281 ZVOL_OBJ, volsize, DMU_OBJECT_END);
285 bdev = bdget_disk(zv->zv_disk, 0);
287 return (SET_ERROR(EIO));
290 * Added check_disk_size_change() helper function.
292 #ifdef HAVE_CHECK_DISK_SIZE_CHANGE
293 set_capacity(zv->zv_disk, volsize >> 9);
294 zv->zv_volsize = volsize;
295 check_disk_size_change(zv->zv_disk, bdev);
297 zv->zv_volsize = volsize;
299 (void) check_disk_change(bdev);
300 #endif /* HAVE_CHECK_DISK_SIZE_CHANGE */
308 * Set ZFS_PROP_VOLSIZE set entry point.
311 zvol_set_volsize(const char *name, uint64_t volsize)
314 dmu_object_info_t *doi;
319 error = dsl_prop_get_integer(name,
320 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
324 return (SET_ERROR(EROFS));
326 mutex_enter(&zvol_state_lock);
328 zv = zvol_find_by_name(name);
330 error = SET_ERROR(ENXIO);
334 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
336 error = dmu_objset_hold(name, FTAG, &os);
340 if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) != 0 ||
341 (error = zvol_check_volsize(volsize,doi->doi_data_block_size)) != 0)
344 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly, NULL) == 0);
346 error = SET_ERROR(EROFS);
350 if (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY)) {
351 error = SET_ERROR(EROFS);
355 error = zvol_update_volsize(zv, volsize, os);
357 kmem_free(doi, sizeof(dmu_object_info_t));
360 dmu_objset_rele(os, FTAG);
362 mutex_exit(&zvol_state_lock);
368 * Sanity check volume block size.
371 zvol_check_volblocksize(uint64_t volblocksize)
373 if (volblocksize < SPA_MINBLOCKSIZE ||
374 volblocksize > SPA_MAXBLOCKSIZE ||
376 return (SET_ERROR(EDOM));
382 * Set ZFS_PROP_VOLBLOCKSIZE set entry point.
385 zvol_set_volblocksize(const char *name, uint64_t volblocksize)
391 mutex_enter(&zvol_state_lock);
393 zv = zvol_find_by_name(name);
395 error = SET_ERROR(ENXIO);
399 if (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY)) {
400 error = SET_ERROR(EROFS);
404 tx = dmu_tx_create(zv->zv_objset);
405 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
406 error = dmu_tx_assign(tx, TXG_WAIT);
410 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
411 volblocksize, 0, tx);
412 if (error == ENOTSUP)
413 error = SET_ERROR(EBUSY);
416 zv->zv_volblocksize = volblocksize;
419 mutex_exit(&zvol_state_lock);
425 * Replay a TX_WRITE ZIL transaction that didn't get committed
426 * after a system failure
429 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
431 objset_t *os = zv->zv_objset;
432 char *data = (char *)(lr + 1); /* data follows lr_write_t */
433 uint64_t off = lr->lr_offset;
434 uint64_t len = lr->lr_length;
439 byteswap_uint64_array(lr, sizeof (*lr));
441 tx = dmu_tx_create(os);
442 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
443 error = dmu_tx_assign(tx, TXG_WAIT);
447 dmu_write(os, ZVOL_OBJ, off, len, data, tx);
455 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
457 return (SET_ERROR(ENOTSUP));
461 * Callback vectors for replaying records.
462 * Only TX_WRITE is needed for zvol.
464 zil_replay_func_t zvol_replay_vector[TX_MAX_TYPE] = {
465 (zil_replay_func_t)zvol_replay_err, /* no such transaction type */
466 (zil_replay_func_t)zvol_replay_err, /* TX_CREATE */
467 (zil_replay_func_t)zvol_replay_err, /* TX_MKDIR */
468 (zil_replay_func_t)zvol_replay_err, /* TX_MKXATTR */
469 (zil_replay_func_t)zvol_replay_err, /* TX_SYMLINK */
470 (zil_replay_func_t)zvol_replay_err, /* TX_REMOVE */
471 (zil_replay_func_t)zvol_replay_err, /* TX_RMDIR */
472 (zil_replay_func_t)zvol_replay_err, /* TX_LINK */
473 (zil_replay_func_t)zvol_replay_err, /* TX_RENAME */
474 (zil_replay_func_t)zvol_replay_write, /* TX_WRITE */
475 (zil_replay_func_t)zvol_replay_err, /* TX_TRUNCATE */
476 (zil_replay_func_t)zvol_replay_err, /* TX_SETATTR */
477 (zil_replay_func_t)zvol_replay_err, /* TX_ACL */
481 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
483 * We store data in the log buffers if it's small enough.
484 * Otherwise we will later flush the data out via dmu_sync().
486 ssize_t zvol_immediate_write_sz = 32768;
489 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx,
490 uint64_t offset, uint64_t size, int sync)
492 uint32_t blocksize = zv->zv_volblocksize;
493 zilog_t *zilog = zv->zv_zilog;
495 ssize_t immediate_write_sz;
497 if (zil_replaying(zilog, tx))
500 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
501 ? 0 : zvol_immediate_write_sz;
502 slogging = spa_has_slogs(zilog->zl_spa) &&
503 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
509 itx_wr_state_t write_state;
512 * Unlike zfs_log_write() we can be called with
513 * up to DMU_MAX_ACCESS/2 (5MB) writes.
515 if (blocksize > immediate_write_sz && !slogging &&
516 size >= blocksize && offset % blocksize == 0) {
517 write_state = WR_INDIRECT; /* uses dmu_sync */
520 write_state = WR_COPIED;
521 len = MIN(ZIL_MAX_LOG_DATA, size);
523 write_state = WR_NEED_COPY;
524 len = MIN(ZIL_MAX_LOG_DATA, size);
527 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
528 (write_state == WR_COPIED ? len : 0));
529 lr = (lr_write_t *)&itx->itx_lr;
530 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
531 ZVOL_OBJ, offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
532 zil_itx_destroy(itx);
533 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
534 lr = (lr_write_t *)&itx->itx_lr;
535 write_state = WR_NEED_COPY;
538 itx->itx_wr_state = write_state;
539 if (write_state == WR_NEED_COPY)
541 lr->lr_foid = ZVOL_OBJ;
542 lr->lr_offset = offset;
545 BP_ZERO(&lr->lr_blkptr);
547 itx->itx_private = zv;
548 itx->itx_sync = sync;
550 (void) zil_itx_assign(zilog, itx, tx);
558 * Common write path running under the zvol taskq context. This function
559 * is responsible for copying the request structure data in to the DMU and
560 * signaling the request queue with the result of the copy.
563 zvol_write(void *arg)
565 struct request *req = (struct request *)arg;
566 struct request_queue *q = req->q;
567 zvol_state_t *zv = q->queuedata;
568 uint64_t offset = blk_rq_pos(req) << 9;
569 uint64_t size = blk_rq_bytes(req);
575 * Annotate this call path with a flag that indicates that it is
576 * unsafe to use KM_SLEEP during memory allocations due to the
577 * potential for a deadlock. KM_PUSHPAGE should be used instead.
579 ASSERT(!(current->flags & PF_NOFS));
580 current->flags |= PF_NOFS;
582 if (req->cmd_flags & VDEV_REQ_FLUSH)
583 zil_commit(zv->zv_zilog, ZVOL_OBJ);
586 * Some requests are just for flush and nothing else.
589 blk_end_request(req, 0, size);
593 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
595 tx = dmu_tx_create(zv->zv_objset);
596 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);
598 /* This will only fail for ENOSPC */
599 error = dmu_tx_assign(tx, TXG_WAIT);
602 zfs_range_unlock(rl);
603 blk_end_request(req, -error, size);
607 error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
609 zvol_log_write(zv, tx, offset, size,
610 req->cmd_flags & VDEV_REQ_FUA);
613 zfs_range_unlock(rl);
615 if ((req->cmd_flags & VDEV_REQ_FUA) ||
616 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
617 zil_commit(zv->zv_zilog, ZVOL_OBJ);
619 blk_end_request(req, -error, size);
621 current->flags &= ~PF_NOFS;
624 #ifdef HAVE_BLK_QUEUE_DISCARD
626 zvol_discard(void *arg)
628 struct request *req = (struct request *)arg;
629 struct request_queue *q = req->q;
630 zvol_state_t *zv = q->queuedata;
631 uint64_t start = blk_rq_pos(req) << 9;
632 uint64_t end = start + blk_rq_bytes(req);
637 * Annotate this call path with a flag that indicates that it is
638 * unsafe to use KM_SLEEP during memory allocations due to the
639 * potential for a deadlock. KM_PUSHPAGE should be used instead.
641 ASSERT(!(current->flags & PF_NOFS));
642 current->flags |= PF_NOFS;
644 if (end > zv->zv_volsize) {
645 blk_end_request(req, -EIO, blk_rq_bytes(req));
650 * Align the request to volume block boundaries. If we don't,
651 * then this will force dnode_free_range() to zero out the
652 * unaligned parts, which is slow (read-modify-write) and
653 * useless since we are not freeing any space by doing so.
655 start = P2ROUNDUP(start, zv->zv_volblocksize);
656 end = P2ALIGN(end, zv->zv_volblocksize);
659 blk_end_request(req, 0, blk_rq_bytes(req));
663 rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);
665 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end - start);
668 * TODO: maybe we should add the operation to the log.
671 zfs_range_unlock(rl);
673 blk_end_request(req, -error, blk_rq_bytes(req));
675 current->flags &= ~PF_NOFS;
677 #endif /* HAVE_BLK_QUEUE_DISCARD */
680 * Common read path running under the zvol taskq context. This function
681 * is responsible for copying the requested data out of the DMU and in to
682 * a linux request structure. It then must signal the request queue with
683 * an error code describing the result of the copy.
688 struct request *req = (struct request *)arg;
689 struct request_queue *q = req->q;
690 zvol_state_t *zv = q->queuedata;
691 uint64_t offset = blk_rq_pos(req) << 9;
692 uint64_t size = blk_rq_bytes(req);
697 blk_end_request(req, 0, size);
701 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
703 error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);
705 zfs_range_unlock(rl);
707 /* convert checksum errors into IO errors */
709 error = SET_ERROR(EIO);
711 blk_end_request(req, -error, size);
715 * Request will be added back to the request queue and retried if
716 * it cannot be immediately dispatched to the taskq for handling
719 zvol_dispatch(task_func_t func, struct request *req)
721 if (!taskq_dispatch(zvol_taskq, func, (void *)req, TQ_NOSLEEP))
722 blk_requeue_request(req->q, req);
726 * Common request path. Rather than registering a custom make_request()
727 * function we use the generic Linux version. This is done because it allows
728 * us to easily merge read requests which would otherwise we performed
729 * synchronously by the DMU. This is less critical in write case where the
730 * DMU will perform the correct merging within a transaction group. Using
731 * the generic make_request() also let's use leverage the fact that the
732 * elevator with ensure correct ordering in regards to barrior IOs. On
733 * the downside it means that in the write case we end up doing request
734 * merging twice once in the elevator and once in the DMU.
736 * The request handler is called under a spin lock so all the real work
737 * is handed off to be done in the context of the zvol taskq. This function
738 * simply performs basic request sanity checking and hands off the request.
741 zvol_request(struct request_queue *q)
743 zvol_state_t *zv = q->queuedata;
747 while ((req = blk_fetch_request(q)) != NULL) {
748 size = blk_rq_bytes(req);
750 if (size != 0 && blk_rq_pos(req) + blk_rq_sectors(req) >
751 get_capacity(zv->zv_disk)) {
753 "%s: bad access: block=%llu, count=%lu\n",
754 req->rq_disk->disk_name,
755 (long long unsigned)blk_rq_pos(req),
756 (long unsigned)blk_rq_sectors(req));
757 __blk_end_request(req, -EIO, size);
761 if (!blk_fs_request(req)) {
762 printk(KERN_INFO "%s: non-fs cmd\n",
763 req->rq_disk->disk_name);
764 __blk_end_request(req, -EIO, size);
768 switch (rq_data_dir(req)) {
770 zvol_dispatch(zvol_read, req);
773 if (unlikely(get_disk_ro(zv->zv_disk)) ||
774 unlikely(zv->zv_flags & ZVOL_RDONLY)) {
775 __blk_end_request(req, -EROFS, size);
779 #ifdef HAVE_BLK_QUEUE_DISCARD
780 if (req->cmd_flags & VDEV_REQ_DISCARD) {
781 zvol_dispatch(zvol_discard, req);
784 #endif /* HAVE_BLK_QUEUE_DISCARD */
786 zvol_dispatch(zvol_write, req);
789 printk(KERN_INFO "%s: unknown cmd: %d\n",
790 req->rq_disk->disk_name, (int)rq_data_dir(req));
791 __blk_end_request(req, -EIO, size);
798 zvol_get_done(zgd_t *zgd, int error)
801 dmu_buf_rele(zgd->zgd_db, zgd);
803 zfs_range_unlock(zgd->zgd_rl);
805 if (error == 0 && zgd->zgd_bp)
806 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
808 kmem_free(zgd, sizeof (zgd_t));
812 * Get data to generate a TX_WRITE intent log record.
815 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
817 zvol_state_t *zv = arg;
818 objset_t *os = zv->zv_objset;
819 uint64_t object = ZVOL_OBJ;
820 uint64_t offset = lr->lr_offset;
821 uint64_t size = lr->lr_length;
822 blkptr_t *bp = &lr->lr_blkptr;
830 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_PUSHPAGE);
831 zgd->zgd_zilog = zv->zv_zilog;
832 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
835 * Write records come in two flavors: immediate and indirect.
836 * For small writes it's cheaper to store the data with the
837 * log record (immediate); for large writes it's cheaper to
838 * sync the data and get a pointer to it (indirect) so that
839 * we don't have to write the data twice.
841 if (buf != NULL) { /* immediate write */
842 error = dmu_read(os, object, offset, size, buf,
843 DMU_READ_NO_PREFETCH);
845 size = zv->zv_volblocksize;
846 offset = P2ALIGN_TYPED(offset, size, uint64_t);
847 error = dmu_buf_hold(os, object, offset, zgd, &db,
848 DMU_READ_NO_PREFETCH);
850 blkptr_t *obp = dmu_buf_get_blkptr(db);
852 ASSERT(BP_IS_HOLE(bp));
857 zgd->zgd_bp = &lr->lr_blkptr;
860 ASSERT(db->db_offset == offset);
861 ASSERT(db->db_size == size);
863 error = dmu_sync(zio, lr->lr_common.lrc_txg,
871 zvol_get_done(zgd, error);
877 * The zvol_state_t's are inserted in increasing MINOR(dev_t) order.
880 zvol_insert(zvol_state_t *zv_insert)
882 zvol_state_t *zv = NULL;
884 ASSERT(MUTEX_HELD(&zvol_state_lock));
885 ASSERT3U(MINOR(zv_insert->zv_dev) & ZVOL_MINOR_MASK, ==, 0);
886 for (zv = list_head(&zvol_state_list); zv != NULL;
887 zv = list_next(&zvol_state_list, zv)) {
888 if (MINOR(zv->zv_dev) > MINOR(zv_insert->zv_dev))
892 list_insert_before(&zvol_state_list, zv, zv_insert);
896 * Simply remove the zvol from to list of zvols.
899 zvol_remove(zvol_state_t *zv_remove)
901 ASSERT(MUTEX_HELD(&zvol_state_lock));
902 list_remove(&zvol_state_list, zv_remove);
906 zvol_first_open(zvol_state_t *zv)
915 * In all other cases the spa_namespace_lock is taken before the
916 * bdev->bd_mutex lock. But in this case the Linux __blkdev_get()
917 * function calls fops->open() with the bdev->bd_mutex lock held.
919 * To avoid a potential lock inversion deadlock we preemptively
920 * try to take the spa_namespace_lock(). Normally it will not
921 * be contended and this is safe because spa_open_common() handles
922 * the case where the caller already holds the spa_namespace_lock.
924 * When it is contended we risk a lock inversion if we were to
925 * block waiting for the lock. Luckily, the __blkdev_get()
926 * function allows us to return -ERESTARTSYS which will result in
927 * bdev->bd_mutex being dropped, reacquired, and fops->open() being
928 * called again. This process can be repeated safely until both
929 * locks are acquired.
931 if (!mutex_owned(&spa_namespace_lock)) {
932 locked = mutex_tryenter(&spa_namespace_lock);
934 return (-SET_ERROR(ERESTARTSYS));
937 /* lie and say we're read-only */
938 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
942 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
944 dmu_objset_disown(os, zvol_tag);
949 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
951 dmu_objset_disown(os, zvol_tag);
955 set_capacity(zv->zv_disk, volsize >> 9);
956 zv->zv_volsize = volsize;
957 zv->zv_zilog = zil_open(os, zvol_get_data);
959 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL) == 0);
960 if (ro || dmu_objset_is_snapshot(os) ||
961 !spa_writeable(dmu_objset_spa(os))) {
962 set_disk_ro(zv->zv_disk, 1);
963 zv->zv_flags |= ZVOL_RDONLY;
965 set_disk_ro(zv->zv_disk, 0);
966 zv->zv_flags &= ~ZVOL_RDONLY;
971 mutex_exit(&spa_namespace_lock);
977 zvol_last_close(zvol_state_t *zv)
979 zil_close(zv->zv_zilog);
982 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
988 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
989 !(zv->zv_flags & ZVOL_RDONLY))
990 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
991 (void) dmu_objset_evict_dbufs(zv->zv_objset);
993 dmu_objset_disown(zv->zv_objset, zvol_tag);
994 zv->zv_objset = NULL;
998 zvol_open(struct block_device *bdev, fmode_t flag)
1000 zvol_state_t *zv = bdev->bd_disk->private_data;
1001 int error = 0, drop_mutex = 0;
1004 * If the caller is already holding the mutex do not take it
1005 * again, this will happen as part of zvol_create_minor().
1006 * Once add_disk() is called the device is live and the kernel
1007 * will attempt to open it to read the partition information.
1009 if (!mutex_owned(&zvol_state_lock)) {
1010 mutex_enter(&zvol_state_lock);
1014 ASSERT3P(zv, !=, NULL);
1016 if (zv->zv_open_count == 0) {
1017 error = zvol_first_open(zv);
1022 if ((flag & FMODE_WRITE) &&
1023 (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY))) {
1025 goto out_open_count;
1028 zv->zv_open_count++;
1031 if (zv->zv_open_count == 0)
1032 zvol_last_close(zv);
1036 mutex_exit(&zvol_state_lock);
1038 check_disk_change(bdev);
1043 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1048 zvol_release(struct gendisk *disk, fmode_t mode)
1050 zvol_state_t *zv = disk->private_data;
1053 if (!mutex_owned(&zvol_state_lock)) {
1054 mutex_enter(&zvol_state_lock);
1058 ASSERT3P(zv, !=, NULL);
1059 ASSERT3U(zv->zv_open_count, >, 0);
1060 zv->zv_open_count--;
1061 if (zv->zv_open_count == 0)
1062 zvol_last_close(zv);
1065 mutex_exit(&zvol_state_lock);
1067 #ifndef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1073 zvol_ioctl(struct block_device *bdev, fmode_t mode,
1074 unsigned int cmd, unsigned long arg)
1076 zvol_state_t *zv = bdev->bd_disk->private_data;
1080 return (-SET_ERROR(ENXIO));
1084 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1087 error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
1099 #ifdef CONFIG_COMPAT
1101 zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
1102 unsigned cmd, unsigned long arg)
1104 return zvol_ioctl(bdev, mode, cmd, arg);
1107 #define zvol_compat_ioctl NULL
1110 static int zvol_media_changed(struct gendisk *disk)
1112 zvol_state_t *zv = disk->private_data;
1114 return zv->zv_changed;
1117 static int zvol_revalidate_disk(struct gendisk *disk)
1119 zvol_state_t *zv = disk->private_data;
1122 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1128 * Provide a simple virtual geometry for legacy compatibility. For devices
1129 * smaller than 1 MiB a small head and sector count is used to allow very
1130 * tiny devices. For devices over 1 Mib a standard head and sector count
1131 * is used to keep the cylinders count reasonable.
1134 zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1136 zvol_state_t *zv = bdev->bd_disk->private_data;
1137 sector_t sectors = get_capacity(zv->zv_disk);
1139 if (sectors > 2048) {
1148 geo->cylinders = sectors / (geo->heads * geo->sectors);
1153 static struct kobject *
1154 zvol_probe(dev_t dev, int *part, void *arg)
1157 struct kobject *kobj;
1159 mutex_enter(&zvol_state_lock);
1160 zv = zvol_find_by_dev(dev);
1161 kobj = zv ? get_disk(zv->zv_disk) : NULL;
1162 mutex_exit(&zvol_state_lock);
1167 #ifdef HAVE_BDEV_BLOCK_DEVICE_OPERATIONS
1168 static struct block_device_operations zvol_ops = {
1170 .release = zvol_release,
1171 .ioctl = zvol_ioctl,
1172 .compat_ioctl = zvol_compat_ioctl,
1173 .media_changed = zvol_media_changed,
1174 .revalidate_disk = zvol_revalidate_disk,
1175 .getgeo = zvol_getgeo,
1176 .owner = THIS_MODULE,
1179 #else /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1182 zvol_open_by_inode(struct inode *inode, struct file *file)
1184 return zvol_open(inode->i_bdev, file->f_mode);
1188 zvol_release_by_inode(struct inode *inode, struct file *file)
1190 return zvol_release(inode->i_bdev->bd_disk, file->f_mode);
1194 zvol_ioctl_by_inode(struct inode *inode, struct file *file,
1195 unsigned int cmd, unsigned long arg)
1197 if (file == NULL || inode == NULL)
1199 return zvol_ioctl(inode->i_bdev, file->f_mode, cmd, arg);
1202 # ifdef CONFIG_COMPAT
1204 zvol_compat_ioctl_by_inode(struct file *file,
1205 unsigned int cmd, unsigned long arg)
1209 return zvol_compat_ioctl(file->f_dentry->d_inode->i_bdev,
1210 file->f_mode, cmd, arg);
1213 # define zvol_compat_ioctl_by_inode NULL
1216 static struct block_device_operations zvol_ops = {
1217 .open = zvol_open_by_inode,
1218 .release = zvol_release_by_inode,
1219 .ioctl = zvol_ioctl_by_inode,
1220 .compat_ioctl = zvol_compat_ioctl_by_inode,
1221 .media_changed = zvol_media_changed,
1222 .revalidate_disk = zvol_revalidate_disk,
1223 .getgeo = zvol_getgeo,
1224 .owner = THIS_MODULE,
1226 #endif /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1229 * Allocate memory for a new zvol_state_t and setup the required
1230 * request queue and generic disk structures for the block device.
1232 static zvol_state_t *
1233 zvol_alloc(dev_t dev, const char *name)
1238 zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
1240 spin_lock_init(&zv->zv_lock);
1241 list_link_init(&zv->zv_next);
1243 zv->zv_queue = blk_init_queue(zvol_request, &zv->zv_lock);
1244 if (zv->zv_queue == NULL)
1247 #ifdef HAVE_ELEVATOR_CHANGE
1248 error = elevator_change(zv->zv_queue, "noop");
1249 #endif /* HAVE_ELEVATOR_CHANGE */
1251 printk("ZFS: Unable to set \"%s\" scheduler for zvol %s: %d\n",
1252 "noop", name, error);
1256 #ifdef HAVE_BLK_QUEUE_FLUSH
1257 blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
1259 blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
1260 #endif /* HAVE_BLK_QUEUE_FLUSH */
1262 zv->zv_disk = alloc_disk(ZVOL_MINORS);
1263 if (zv->zv_disk == NULL)
1266 zv->zv_queue->queuedata = zv;
1268 zv->zv_open_count = 0;
1269 strlcpy(zv->zv_name, name, MAXNAMELEN);
1271 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
1272 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
1273 sizeof (rl_t), offsetof(rl_t, r_node));
1274 zv->zv_znode.z_is_zvol = TRUE;
1276 zv->zv_disk->major = zvol_major;
1277 zv->zv_disk->first_minor = (dev & MINORMASK);
1278 zv->zv_disk->fops = &zvol_ops;
1279 zv->zv_disk->private_data = zv;
1280 zv->zv_disk->queue = zv->zv_queue;
1281 snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
1282 ZVOL_DEV_NAME, (dev & MINORMASK));
1287 blk_cleanup_queue(zv->zv_queue);
1289 kmem_free(zv, sizeof (zvol_state_t));
1295 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
1298 zvol_free(zvol_state_t *zv)
1300 avl_destroy(&zv->zv_znode.z_range_avl);
1301 mutex_destroy(&zv->zv_znode.z_range_lock);
1303 del_gendisk(zv->zv_disk);
1304 blk_cleanup_queue(zv->zv_queue);
1305 put_disk(zv->zv_disk);
1307 kmem_free(zv, sizeof (zvol_state_t));
1311 __zvol_snapdev_hidden(const char *name)
1318 parent = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1319 (void) strlcpy(parent, name, MAXPATHLEN);
1321 if ((atp = strrchr(parent, '@')) != NULL) {
1323 error = dsl_prop_get_integer(parent, "snapdev", &snapdev, NULL);
1324 if ((error == 0) && (snapdev == ZFS_SNAPDEV_HIDDEN))
1325 error = SET_ERROR(ENODEV);
1327 kmem_free(parent, MAXPATHLEN);
1332 __zvol_create_minor(const char *name, boolean_t ignore_snapdev)
1336 dmu_object_info_t *doi;
1341 ASSERT(MUTEX_HELD(&zvol_state_lock));
1343 zv = zvol_find_by_name(name);
1345 error = SET_ERROR(EEXIST);
1349 if (ignore_snapdev == B_FALSE) {
1350 error = __zvol_snapdev_hidden(name);
1355 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
1357 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
1361 error = dmu_object_info(os, ZVOL_OBJ, doi);
1363 goto out_dmu_objset_disown;
1365 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1367 goto out_dmu_objset_disown;
1369 error = zvol_find_minor(&minor);
1371 goto out_dmu_objset_disown;
1373 zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1375 error = SET_ERROR(EAGAIN);
1376 goto out_dmu_objset_disown;
1379 if (dmu_objset_is_snapshot(os))
1380 zv->zv_flags |= ZVOL_RDONLY;
1382 zv->zv_volblocksize = doi->doi_data_block_size;
1383 zv->zv_volsize = volsize;
1386 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1388 blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX);
1389 blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
1390 blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
1391 blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
1392 blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
1393 #ifdef HAVE_BLK_QUEUE_DISCARD
1394 blk_queue_max_discard_sectors(zv->zv_queue,
1395 (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
1396 blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
1397 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
1399 #ifdef HAVE_BLK_QUEUE_NONROT
1400 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
1403 if (spa_writeable(dmu_objset_spa(os))) {
1404 if (zil_replay_disable)
1405 zil_destroy(dmu_objset_zil(os), B_FALSE);
1407 zil_replay(os, zv, zvol_replay_vector);
1410 zv->zv_objset = NULL;
1411 out_dmu_objset_disown:
1412 dmu_objset_disown(os, zvol_tag);
1414 kmem_free(doi, sizeof(dmu_object_info_t));
1419 add_disk(zv->zv_disk);
1426 * Create a block device minor node and setup the linkage between it
1427 * and the specified volume. Once this function returns the block
1428 * device is live and ready for use.
1431 zvol_create_minor(const char *name)
1435 mutex_enter(&zvol_state_lock);
1436 error = __zvol_create_minor(name, B_FALSE);
1437 mutex_exit(&zvol_state_lock);
1443 __zvol_remove_minor(const char *name)
1447 ASSERT(MUTEX_HELD(&zvol_state_lock));
1449 zv = zvol_find_by_name(name);
1451 return (SET_ERROR(ENXIO));
1453 if (zv->zv_open_count > 0)
1454 return (SET_ERROR(EBUSY));
1463 * Remove a block device minor node for the specified volume.
1466 zvol_remove_minor(const char *name)
1470 mutex_enter(&zvol_state_lock);
1471 error = __zvol_remove_minor(name);
1472 mutex_exit(&zvol_state_lock);
1478 zvol_create_minors_cb(const char *dsname, void *arg)
1480 if (strchr(dsname, '/') == NULL)
1483 (void) __zvol_create_minor(dsname, B_FALSE);
1488 * Create minors for specified pool, if pool is NULL create minors
1489 * for all available pools.
1492 zvol_create_minors(char *pool)
1497 if (zvol_inhibit_dev)
1500 mutex_enter(&zvol_state_lock);
1502 error = dmu_objset_find(pool, zvol_create_minors_cb,
1503 NULL, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1505 mutex_enter(&spa_namespace_lock);
1506 while ((spa = spa_next(spa)) != NULL) {
1507 error = dmu_objset_find(spa_name(spa), zvol_create_minors_cb, NULL,
1508 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1512 mutex_exit(&spa_namespace_lock);
1514 mutex_exit(&zvol_state_lock);
1520 * Remove minors for specified pool, if pool is NULL remove all minors.
1523 zvol_remove_minors(const char *pool)
1525 zvol_state_t *zv, *zv_next;
1528 if (zvol_inhibit_dev)
1531 str = kmem_zalloc(MAXNAMELEN, KM_SLEEP);
1533 (void) strncpy(str, pool, strlen(pool));
1534 (void) strcat(str, "/");
1537 mutex_enter(&zvol_state_lock);
1538 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1539 zv_next = list_next(&zvol_state_list, zv);
1541 if (pool == NULL || !strncmp(str, zv->zv_name, strlen(str))) {
1546 mutex_exit(&zvol_state_lock);
1547 kmem_free(str, MAXNAMELEN);
1551 snapdev_snapshot_changed_cb(const char *dsname, void *arg) {
1552 uint64_t snapdev = *(uint64_t *) arg;
1554 if (strchr(dsname, '@') == NULL)
1558 case ZFS_SNAPDEV_VISIBLE:
1559 mutex_enter(&zvol_state_lock);
1560 (void) __zvol_create_minor(dsname, B_TRUE);
1561 mutex_exit(&zvol_state_lock);
1563 case ZFS_SNAPDEV_HIDDEN:
1564 (void) zvol_remove_minor(dsname);
1571 zvol_set_snapdev(const char *dsname, uint64_t snapdev) {
1572 (void) dmu_objset_find((char *) dsname, snapdev_snapshot_changed_cb,
1573 &snapdev, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
1574 /* caller should continue to modify snapdev property */
1584 list_create(&zvol_state_list, sizeof (zvol_state_t),
1585 offsetof(zvol_state_t, zv_next));
1586 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
1588 zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
1589 zvol_threads, INT_MAX, TASKQ_PREPOPULATE);
1590 if (zvol_taskq == NULL) {
1591 printk(KERN_INFO "ZFS: taskq_create() failed\n");
1596 error = register_blkdev(zvol_major, ZVOL_DRIVER);
1598 printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
1602 blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
1603 THIS_MODULE, zvol_probe, NULL, NULL);
1608 taskq_destroy(zvol_taskq);
1610 mutex_destroy(&zvol_state_lock);
1611 list_destroy(&zvol_state_list);
1619 zvol_remove_minors(NULL);
1620 blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
1621 unregister_blkdev(zvol_major, ZVOL_DRIVER);
1622 taskq_destroy(zvol_taskq);
1623 mutex_destroy(&zvol_state_lock);
1624 list_destroy(&zvol_state_list);
1627 module_param(zvol_inhibit_dev, uint, 0644);
1628 MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
1630 module_param(zvol_major, uint, 0444);
1631 MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
1633 module_param(zvol_threads, uint, 0444);
1634 MODULE_PARM_DESC(zvol_threads, "Number of threads for zvol device");
1636 module_param(zvol_max_discard_blocks, ulong, 0444);
1637 MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard at once");