4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
27 * ZFS volume emulation driver.
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
32 * /dev/<pool_name>/<dataset_name>
34 * Volumes are persistent through reboot and module load. No user command
35 * needs to be run before opening and using a device.
38 #include <sys/dmu_traverse.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_prop.h>
42 #include <sys/zil_impl.h>
44 #include <sys/zfs_rlock.h>
45 #include <sys/zfs_znode.h>
47 #include <linux/blkdev_compat.h>
49 unsigned int zvol_inhibit_dev = 0;
50 unsigned int zvol_major = ZVOL_MAJOR;
51 unsigned int zvol_threads = 32;
52 unsigned long zvol_max_discard_blocks = 16384;
54 static taskq_t *zvol_taskq;
55 static kmutex_t zvol_state_lock;
56 static list_t zvol_state_list;
57 static char *zvol_tag = "zvol_tag";
60 * The in-core state of each volume.
62 typedef struct zvol_state {
63 char zv_name[MAXNAMELEN]; /* name */
64 uint64_t zv_volsize; /* advertised space */
65 uint64_t zv_volblocksize;/* volume block size */
66 objset_t *zv_objset; /* objset handle */
67 uint32_t zv_flags; /* ZVOL_* flags */
68 uint32_t zv_open_count; /* open counts */
69 uint32_t zv_changed; /* disk changed */
70 zilog_t *zv_zilog; /* ZIL handle */
71 znode_t zv_znode; /* for range locking */
72 dmu_buf_t *zv_dbuf; /* bonus handle */
73 dev_t zv_dev; /* device id */
74 struct gendisk *zv_disk; /* generic disk */
75 struct request_queue *zv_queue; /* request queue */
76 spinlock_t zv_lock; /* request queue lock */
77 list_node_t zv_next; /* next zvol_state_t linkage */
80 #define ZVOL_RDONLY 0x1
83 * Find the next available range of ZVOL_MINORS minor numbers. The
84 * zvol_state_list is kept in ascending minor order so we simply need
85 * to scan the list for the first gap in the sequence. This allows us
86 * to recycle minor number as devices are created and removed.
89 zvol_find_minor(unsigned *minor)
94 ASSERT(MUTEX_HELD(&zvol_state_lock));
95 for (zv = list_head(&zvol_state_list); zv != NULL;
96 zv = list_next(&zvol_state_list, zv), *minor += ZVOL_MINORS) {
97 if (MINOR(zv->zv_dev) != MINOR(*minor))
101 /* All minors are in use */
102 if (*minor >= (1 << MINORBITS))
109 * Find a zvol_state_t given the full major+minor dev_t.
111 static zvol_state_t *
112 zvol_find_by_dev(dev_t dev)
116 ASSERT(MUTEX_HELD(&zvol_state_lock));
117 for (zv = list_head(&zvol_state_list); zv != NULL;
118 zv = list_next(&zvol_state_list, zv)) {
119 if (zv->zv_dev == dev)
127 * Find a zvol_state_t given the name provided at zvol_alloc() time.
129 static zvol_state_t *
130 zvol_find_by_name(const char *name)
134 ASSERT(MUTEX_HELD(&zvol_state_lock));
135 for (zv = list_head(&zvol_state_list); zv != NULL;
136 zv = list_next(&zvol_state_list, zv)) {
137 if (!strncmp(zv->zv_name, name, MAXNAMELEN))
146 * Given a path, return TRUE if path is a ZVOL.
149 zvol_is_zvol(const char *device)
151 struct block_device *bdev;
154 bdev = lookup_bdev(device);
158 major = MAJOR(bdev->bd_dev);
161 if (major == zvol_major)
168 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
171 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
173 zfs_creat_t *zct = arg;
174 nvlist_t *nvprops = zct->zct_props;
176 uint64_t volblocksize, volsize;
178 VERIFY(nvlist_lookup_uint64(nvprops,
179 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
180 if (nvlist_lookup_uint64(nvprops,
181 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
182 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
185 * These properties must be removed from the list so the generic
186 * property setting step won't apply to them.
188 VERIFY(nvlist_remove_all(nvprops,
189 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
190 (void) nvlist_remove_all(nvprops,
191 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
193 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
197 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
201 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
206 * ZFS_IOC_OBJSET_STATS entry point.
209 zvol_get_stats(objset_t *os, nvlist_t *nv)
212 dmu_object_info_t *doi;
215 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
219 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
220 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
221 error = dmu_object_info(os, ZVOL_OBJ, doi);
224 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
225 doi->doi_data_block_size);
228 kmem_free(doi, sizeof(dmu_object_info_t));
234 * Sanity check volume size.
237 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
242 if (volsize % blocksize != 0)
246 if (volsize - 1 > MAXOFFSET_T)
253 * Ensure the zap is flushed then inform the VFS of the capacity change.
256 zvol_update_volsize(zvol_state_t *zv, uint64_t volsize, objset_t *os)
258 struct block_device *bdev;
262 ASSERT(MUTEX_HELD(&zvol_state_lock));
264 tx = dmu_tx_create(os);
265 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
266 error = dmu_tx_assign(tx, TXG_WAIT);
272 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
279 error = dmu_free_long_range(os,
280 ZVOL_OBJ, volsize, DMU_OBJECT_END);
284 bdev = bdget_disk(zv->zv_disk, 0);
289 * Added check_disk_size_change() helper function.
291 #ifdef HAVE_CHECK_DISK_SIZE_CHANGE
292 set_capacity(zv->zv_disk, volsize >> 9);
293 zv->zv_volsize = volsize;
294 check_disk_size_change(zv->zv_disk, bdev);
296 zv->zv_volsize = volsize;
298 (void) check_disk_change(bdev);
299 #endif /* HAVE_CHECK_DISK_SIZE_CHANGE */
307 * Set ZFS_PROP_VOLSIZE set entry point.
310 zvol_set_volsize(const char *name, uint64_t volsize)
313 dmu_object_info_t *doi;
318 mutex_enter(&zvol_state_lock);
320 zv = zvol_find_by_name(name);
326 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
328 error = dmu_objset_hold(name, FTAG, &os);
332 if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) != 0 ||
333 (error = zvol_check_volsize(volsize,doi->doi_data_block_size)) != 0)
336 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly, NULL) == 0);
342 if (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY)) {
347 error = zvol_update_volsize(zv, volsize, os);
349 kmem_free(doi, sizeof(dmu_object_info_t));
352 dmu_objset_rele(os, FTAG);
354 mutex_exit(&zvol_state_lock);
360 * Sanity check volume block size.
363 zvol_check_volblocksize(uint64_t volblocksize)
365 if (volblocksize < SPA_MINBLOCKSIZE ||
366 volblocksize > SPA_MAXBLOCKSIZE ||
374 * Set ZFS_PROP_VOLBLOCKSIZE set entry point.
377 zvol_set_volblocksize(const char *name, uint64_t volblocksize)
383 mutex_enter(&zvol_state_lock);
385 zv = zvol_find_by_name(name);
391 if (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY)) {
396 tx = dmu_tx_create(zv->zv_objset);
397 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
398 error = dmu_tx_assign(tx, TXG_WAIT);
402 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
403 volblocksize, 0, tx);
404 if (error == ENOTSUP)
408 zv->zv_volblocksize = volblocksize;
411 mutex_exit(&zvol_state_lock);
417 * Replay a TX_WRITE ZIL transaction that didn't get committed
418 * after a system failure
421 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
423 objset_t *os = zv->zv_objset;
424 char *data = (char *)(lr + 1); /* data follows lr_write_t */
425 uint64_t off = lr->lr_offset;
426 uint64_t len = lr->lr_length;
431 byteswap_uint64_array(lr, sizeof (*lr));
433 tx = dmu_tx_create(os);
434 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
435 error = dmu_tx_assign(tx, TXG_WAIT);
439 dmu_write(os, ZVOL_OBJ, off, len, data, tx);
447 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
453 * Callback vectors for replaying records.
454 * Only TX_WRITE is needed for zvol.
456 zil_replay_func_t zvol_replay_vector[TX_MAX_TYPE] = {
457 (zil_replay_func_t)zvol_replay_err, /* no such transaction type */
458 (zil_replay_func_t)zvol_replay_err, /* TX_CREATE */
459 (zil_replay_func_t)zvol_replay_err, /* TX_MKDIR */
460 (zil_replay_func_t)zvol_replay_err, /* TX_MKXATTR */
461 (zil_replay_func_t)zvol_replay_err, /* TX_SYMLINK */
462 (zil_replay_func_t)zvol_replay_err, /* TX_REMOVE */
463 (zil_replay_func_t)zvol_replay_err, /* TX_RMDIR */
464 (zil_replay_func_t)zvol_replay_err, /* TX_LINK */
465 (zil_replay_func_t)zvol_replay_err, /* TX_RENAME */
466 (zil_replay_func_t)zvol_replay_write, /* TX_WRITE */
467 (zil_replay_func_t)zvol_replay_err, /* TX_TRUNCATE */
468 (zil_replay_func_t)zvol_replay_err, /* TX_SETATTR */
469 (zil_replay_func_t)zvol_replay_err, /* TX_ACL */
473 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
475 * We store data in the log buffers if it's small enough.
476 * Otherwise we will later flush the data out via dmu_sync().
478 ssize_t zvol_immediate_write_sz = 32768;
481 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx,
482 uint64_t offset, uint64_t size, int sync)
484 uint32_t blocksize = zv->zv_volblocksize;
485 zilog_t *zilog = zv->zv_zilog;
487 ssize_t immediate_write_sz;
489 if (zil_replaying(zilog, tx))
492 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
493 ? 0 : zvol_immediate_write_sz;
494 slogging = spa_has_slogs(zilog->zl_spa) &&
495 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
501 itx_wr_state_t write_state;
504 * Unlike zfs_log_write() we can be called with
505 * up to DMU_MAX_ACCESS/2 (5MB) writes.
507 if (blocksize > immediate_write_sz && !slogging &&
508 size >= blocksize && offset % blocksize == 0) {
509 write_state = WR_INDIRECT; /* uses dmu_sync */
512 write_state = WR_COPIED;
513 len = MIN(ZIL_MAX_LOG_DATA, size);
515 write_state = WR_NEED_COPY;
516 len = MIN(ZIL_MAX_LOG_DATA, size);
519 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
520 (write_state == WR_COPIED ? len : 0));
521 lr = (lr_write_t *)&itx->itx_lr;
522 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
523 ZVOL_OBJ, offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
524 zil_itx_destroy(itx);
525 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
526 lr = (lr_write_t *)&itx->itx_lr;
527 write_state = WR_NEED_COPY;
530 itx->itx_wr_state = write_state;
531 if (write_state == WR_NEED_COPY)
533 lr->lr_foid = ZVOL_OBJ;
534 lr->lr_offset = offset;
537 BP_ZERO(&lr->lr_blkptr);
539 itx->itx_private = zv;
540 itx->itx_sync = sync;
542 (void) zil_itx_assign(zilog, itx, tx);
550 * Common write path running under the zvol taskq context. This function
551 * is responsible for copying the request structure data in to the DMU and
552 * signaling the request queue with the result of the copy.
555 zvol_write(void *arg)
557 struct request *req = (struct request *)arg;
558 struct request_queue *q = req->q;
559 zvol_state_t *zv = q->queuedata;
560 uint64_t offset = blk_rq_pos(req) << 9;
561 uint64_t size = blk_rq_bytes(req);
567 * Annotate this call path with a flag that indicates that it is
568 * unsafe to use KM_SLEEP during memory allocations due to the
569 * potential for a deadlock. KM_PUSHPAGE should be used instead.
571 ASSERT(!(current->flags & PF_NOFS));
572 current->flags |= PF_NOFS;
574 if (req->cmd_flags & VDEV_REQ_FLUSH)
575 zil_commit(zv->zv_zilog, ZVOL_OBJ);
578 * Some requests are just for flush and nothing else.
581 blk_end_request(req, 0, size);
585 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
587 tx = dmu_tx_create(zv->zv_objset);
588 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);
590 /* This will only fail for ENOSPC */
591 error = dmu_tx_assign(tx, TXG_WAIT);
594 zfs_range_unlock(rl);
595 blk_end_request(req, -error, size);
599 error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
601 zvol_log_write(zv, tx, offset, size,
602 req->cmd_flags & VDEV_REQ_FUA);
605 zfs_range_unlock(rl);
607 if ((req->cmd_flags & VDEV_REQ_FUA) ||
608 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
609 zil_commit(zv->zv_zilog, ZVOL_OBJ);
611 blk_end_request(req, -error, size);
613 current->flags &= ~PF_NOFS;
616 #ifdef HAVE_BLK_QUEUE_DISCARD
618 zvol_discard(void *arg)
620 struct request *req = (struct request *)arg;
621 struct request_queue *q = req->q;
622 zvol_state_t *zv = q->queuedata;
623 uint64_t start = blk_rq_pos(req) << 9;
624 uint64_t end = start + blk_rq_bytes(req);
629 * Annotate this call path with a flag that indicates that it is
630 * unsafe to use KM_SLEEP during memory allocations due to the
631 * potential for a deadlock. KM_PUSHPAGE should be used instead.
633 ASSERT(!(current->flags & PF_NOFS));
634 current->flags |= PF_NOFS;
636 if (end > zv->zv_volsize) {
637 blk_end_request(req, -EIO, blk_rq_bytes(req));
642 * Align the request to volume block boundaries. If we don't,
643 * then this will force dnode_free_range() to zero out the
644 * unaligned parts, which is slow (read-modify-write) and
645 * useless since we are not freeing any space by doing so.
647 start = P2ROUNDUP(start, zv->zv_volblocksize);
648 end = P2ALIGN(end, zv->zv_volblocksize);
651 blk_end_request(req, 0, blk_rq_bytes(req));
655 rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);
657 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end - start);
660 * TODO: maybe we should add the operation to the log.
663 zfs_range_unlock(rl);
665 blk_end_request(req, -error, blk_rq_bytes(req));
667 current->flags &= ~PF_NOFS;
669 #endif /* HAVE_BLK_QUEUE_DISCARD */
672 * Common read path running under the zvol taskq context. This function
673 * is responsible for copying the requested data out of the DMU and in to
674 * a linux request structure. It then must signal the request queue with
675 * an error code describing the result of the copy.
680 struct request *req = (struct request *)arg;
681 struct request_queue *q = req->q;
682 zvol_state_t *zv = q->queuedata;
683 uint64_t offset = blk_rq_pos(req) << 9;
684 uint64_t size = blk_rq_bytes(req);
689 blk_end_request(req, 0, size);
693 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
695 error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);
697 zfs_range_unlock(rl);
699 /* convert checksum errors into IO errors */
703 blk_end_request(req, -error, size);
707 * Request will be added back to the request queue and retried if
708 * it cannot be immediately dispatched to the taskq for handling
711 zvol_dispatch(task_func_t func, struct request *req)
713 if (!taskq_dispatch(zvol_taskq, func, (void *)req, TQ_NOSLEEP))
714 blk_requeue_request(req->q, req);
718 * Common request path. Rather than registering a custom make_request()
719 * function we use the generic Linux version. This is done because it allows
720 * us to easily merge read requests which would otherwise we performed
721 * synchronously by the DMU. This is less critical in write case where the
722 * DMU will perform the correct merging within a transaction group. Using
723 * the generic make_request() also let's use leverage the fact that the
724 * elevator with ensure correct ordering in regards to barrior IOs. On
725 * the downside it means that in the write case we end up doing request
726 * merging twice once in the elevator and once in the DMU.
728 * The request handler is called under a spin lock so all the real work
729 * is handed off to be done in the context of the zvol taskq. This function
730 * simply performs basic request sanity checking and hands off the request.
733 zvol_request(struct request_queue *q)
735 zvol_state_t *zv = q->queuedata;
739 while ((req = blk_fetch_request(q)) != NULL) {
740 size = blk_rq_bytes(req);
742 if (size != 0 && blk_rq_pos(req) + blk_rq_sectors(req) >
743 get_capacity(zv->zv_disk)) {
745 "%s: bad access: block=%llu, count=%lu\n",
746 req->rq_disk->disk_name,
747 (long long unsigned)blk_rq_pos(req),
748 (long unsigned)blk_rq_sectors(req));
749 __blk_end_request(req, -EIO, size);
753 if (!blk_fs_request(req)) {
754 printk(KERN_INFO "%s: non-fs cmd\n",
755 req->rq_disk->disk_name);
756 __blk_end_request(req, -EIO, size);
760 switch (rq_data_dir(req)) {
762 zvol_dispatch(zvol_read, req);
765 if (unlikely(get_disk_ro(zv->zv_disk)) ||
766 unlikely(zv->zv_flags & ZVOL_RDONLY)) {
767 __blk_end_request(req, -EROFS, size);
771 #ifdef HAVE_BLK_QUEUE_DISCARD
772 if (req->cmd_flags & VDEV_REQ_DISCARD) {
773 zvol_dispatch(zvol_discard, req);
776 #endif /* HAVE_BLK_QUEUE_DISCARD */
778 zvol_dispatch(zvol_write, req);
781 printk(KERN_INFO "%s: unknown cmd: %d\n",
782 req->rq_disk->disk_name, (int)rq_data_dir(req));
783 __blk_end_request(req, -EIO, size);
790 zvol_get_done(zgd_t *zgd, int error)
793 dmu_buf_rele(zgd->zgd_db, zgd);
795 zfs_range_unlock(zgd->zgd_rl);
797 if (error == 0 && zgd->zgd_bp)
798 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
800 kmem_free(zgd, sizeof (zgd_t));
804 * Get data to generate a TX_WRITE intent log record.
807 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
809 zvol_state_t *zv = arg;
810 objset_t *os = zv->zv_objset;
811 uint64_t offset = lr->lr_offset;
812 uint64_t size = lr->lr_length;
820 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_PUSHPAGE);
821 zgd->zgd_zilog = zv->zv_zilog;
822 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
825 * Write records come in two flavors: immediate and indirect.
826 * For small writes it's cheaper to store the data with the
827 * log record (immediate); for large writes it's cheaper to
828 * sync the data and get a pointer to it (indirect) so that
829 * we don't have to write the data twice.
831 if (buf != NULL) { /* immediate write */
832 error = dmu_read(os, ZVOL_OBJ, offset, size, buf,
833 DMU_READ_NO_PREFETCH);
835 size = zv->zv_volblocksize;
836 offset = P2ALIGN_TYPED(offset, size, uint64_t);
837 error = dmu_buf_hold(os, ZVOL_OBJ, offset, zgd, &db,
838 DMU_READ_NO_PREFETCH);
841 zgd->zgd_bp = &lr->lr_blkptr;
844 ASSERT(db->db_offset == offset);
845 ASSERT(db->db_size == size);
847 error = dmu_sync(zio, lr->lr_common.lrc_txg,
855 zvol_get_done(zgd, error);
861 * The zvol_state_t's are inserted in increasing MINOR(dev_t) order.
864 zvol_insert(zvol_state_t *zv_insert)
866 zvol_state_t *zv = NULL;
868 ASSERT(MUTEX_HELD(&zvol_state_lock));
869 ASSERT3U(MINOR(zv_insert->zv_dev) & ZVOL_MINOR_MASK, ==, 0);
870 for (zv = list_head(&zvol_state_list); zv != NULL;
871 zv = list_next(&zvol_state_list, zv)) {
872 if (MINOR(zv->zv_dev) > MINOR(zv_insert->zv_dev))
876 list_insert_before(&zvol_state_list, zv, zv_insert);
880 * Simply remove the zvol from to list of zvols.
883 zvol_remove(zvol_state_t *zv_remove)
885 ASSERT(MUTEX_HELD(&zvol_state_lock));
886 list_remove(&zvol_state_list, zv_remove);
890 zvol_first_open(zvol_state_t *zv)
899 * In all other cases the spa_namespace_lock is taken before the
900 * bdev->bd_mutex lock. But in this case the Linux __blkdev_get()
901 * function calls fops->open() with the bdev->bd_mutex lock held.
903 * To avoid a potential lock inversion deadlock we preemptively
904 * try to take the spa_namespace_lock(). Normally it will not
905 * be contended and this is safe because spa_open_common() handles
906 * the case where the caller already holds the spa_namespace_lock.
908 * When it is contended we risk a lock inversion if we were to
909 * block waiting for the lock. Luckily, the __blkdev_get()
910 * function allows us to return -ERESTARTSYS which will result in
911 * bdev->bd_mutex being dropped, reacquired, and fops->open() being
912 * called again. This process can be repeated safely until both
913 * locks are acquired.
915 if (!mutex_owned(&spa_namespace_lock)) {
916 locked = mutex_tryenter(&spa_namespace_lock);
918 return (-ERESTARTSYS);
921 /* lie and say we're read-only */
922 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
926 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
928 dmu_objset_disown(os, zvol_tag);
933 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
935 dmu_objset_disown(os, zvol_tag);
939 set_capacity(zv->zv_disk, volsize >> 9);
940 zv->zv_volsize = volsize;
941 zv->zv_zilog = zil_open(os, zvol_get_data);
943 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL) == 0);
944 if (ro || dmu_objset_is_snapshot(os) ||
945 !spa_writeable(dmu_objset_spa(os))) {
946 set_disk_ro(zv->zv_disk, 1);
947 zv->zv_flags |= ZVOL_RDONLY;
949 set_disk_ro(zv->zv_disk, 0);
950 zv->zv_flags &= ~ZVOL_RDONLY;
955 mutex_exit(&spa_namespace_lock);
961 zvol_last_close(zvol_state_t *zv)
963 zil_close(zv->zv_zilog);
966 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
972 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
973 !(zv->zv_flags & ZVOL_RDONLY))
974 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
975 (void) dmu_objset_evict_dbufs(zv->zv_objset);
977 dmu_objset_disown(zv->zv_objset, zvol_tag);
978 zv->zv_objset = NULL;
982 zvol_open(struct block_device *bdev, fmode_t flag)
984 zvol_state_t *zv = bdev->bd_disk->private_data;
985 int error = 0, drop_mutex = 0;
988 * If the caller is already holding the mutex do not take it
989 * again, this will happen as part of zvol_create_minor().
990 * Once add_disk() is called the device is live and the kernel
991 * will attempt to open it to read the partition information.
993 if (!mutex_owned(&zvol_state_lock)) {
994 mutex_enter(&zvol_state_lock);
998 ASSERT3P(zv, !=, NULL);
1000 if (zv->zv_open_count == 0) {
1001 error = zvol_first_open(zv);
1006 if ((flag & FMODE_WRITE) &&
1007 (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY))) {
1009 goto out_open_count;
1012 zv->zv_open_count++;
1015 if (zv->zv_open_count == 0)
1016 zvol_last_close(zv);
1020 mutex_exit(&zvol_state_lock);
1022 check_disk_change(bdev);
1027 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1032 zvol_release(struct gendisk *disk, fmode_t mode)
1034 zvol_state_t *zv = disk->private_data;
1037 if (!mutex_owned(&zvol_state_lock)) {
1038 mutex_enter(&zvol_state_lock);
1042 ASSERT3P(zv, !=, NULL);
1043 ASSERT3U(zv->zv_open_count, >, 0);
1044 zv->zv_open_count--;
1045 if (zv->zv_open_count == 0)
1046 zvol_last_close(zv);
1049 mutex_exit(&zvol_state_lock);
1051 #ifndef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1057 zvol_ioctl(struct block_device *bdev, fmode_t mode,
1058 unsigned int cmd, unsigned long arg)
1060 zvol_state_t *zv = bdev->bd_disk->private_data;
1068 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1071 error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
1083 #ifdef CONFIG_COMPAT
1085 zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
1086 unsigned cmd, unsigned long arg)
1088 return zvol_ioctl(bdev, mode, cmd, arg);
1091 #define zvol_compat_ioctl NULL
1094 static int zvol_media_changed(struct gendisk *disk)
1096 zvol_state_t *zv = disk->private_data;
1098 return zv->zv_changed;
1101 static int zvol_revalidate_disk(struct gendisk *disk)
1103 zvol_state_t *zv = disk->private_data;
1106 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1112 * Provide a simple virtual geometry for legacy compatibility. For devices
1113 * smaller than 1 MiB a small head and sector count is used to allow very
1114 * tiny devices. For devices over 1 Mib a standard head and sector count
1115 * is used to keep the cylinders count reasonable.
1118 zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1120 zvol_state_t *zv = bdev->bd_disk->private_data;
1121 sector_t sectors = get_capacity(zv->zv_disk);
1123 if (sectors > 2048) {
1132 geo->cylinders = sectors / (geo->heads * geo->sectors);
1137 static struct kobject *
1138 zvol_probe(dev_t dev, int *part, void *arg)
1141 struct kobject *kobj;
1143 mutex_enter(&zvol_state_lock);
1144 zv = zvol_find_by_dev(dev);
1145 kobj = zv ? get_disk(zv->zv_disk) : NULL;
1146 mutex_exit(&zvol_state_lock);
1151 #ifdef HAVE_BDEV_BLOCK_DEVICE_OPERATIONS
1152 static struct block_device_operations zvol_ops = {
1154 .release = zvol_release,
1155 .ioctl = zvol_ioctl,
1156 .compat_ioctl = zvol_compat_ioctl,
1157 .media_changed = zvol_media_changed,
1158 .revalidate_disk = zvol_revalidate_disk,
1159 .getgeo = zvol_getgeo,
1160 .owner = THIS_MODULE,
1163 #else /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1166 zvol_open_by_inode(struct inode *inode, struct file *file)
1168 return zvol_open(inode->i_bdev, file->f_mode);
1172 zvol_release_by_inode(struct inode *inode, struct file *file)
1174 return zvol_release(inode->i_bdev->bd_disk, file->f_mode);
1178 zvol_ioctl_by_inode(struct inode *inode, struct file *file,
1179 unsigned int cmd, unsigned long arg)
1181 if (file == NULL || inode == NULL)
1183 return zvol_ioctl(inode->i_bdev, file->f_mode, cmd, arg);
1186 # ifdef CONFIG_COMPAT
1188 zvol_compat_ioctl_by_inode(struct file *file,
1189 unsigned int cmd, unsigned long arg)
1193 return zvol_compat_ioctl(file->f_dentry->d_inode->i_bdev,
1194 file->f_mode, cmd, arg);
1197 # define zvol_compat_ioctl_by_inode NULL
1200 static struct block_device_operations zvol_ops = {
1201 .open = zvol_open_by_inode,
1202 .release = zvol_release_by_inode,
1203 .ioctl = zvol_ioctl_by_inode,
1204 .compat_ioctl = zvol_compat_ioctl_by_inode,
1205 .media_changed = zvol_media_changed,
1206 .revalidate_disk = zvol_revalidate_disk,
1207 .getgeo = zvol_getgeo,
1208 .owner = THIS_MODULE,
1210 #endif /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1213 * Allocate memory for a new zvol_state_t and setup the required
1214 * request queue and generic disk structures for the block device.
1216 static zvol_state_t *
1217 zvol_alloc(dev_t dev, const char *name)
1222 zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
1224 spin_lock_init(&zv->zv_lock);
1225 list_link_init(&zv->zv_next);
1227 zv->zv_queue = blk_init_queue(zvol_request, &zv->zv_lock);
1228 if (zv->zv_queue == NULL)
1231 #ifdef HAVE_ELEVATOR_CHANGE
1232 error = elevator_change(zv->zv_queue, "noop");
1233 #endif /* HAVE_ELEVATOR_CHANGE */
1235 printk("ZFS: Unable to set \"%s\" scheduler for zvol %s: %d\n",
1236 "noop", name, error);
1240 #ifdef HAVE_BLK_QUEUE_FLUSH
1241 blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
1243 blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
1244 #endif /* HAVE_BLK_QUEUE_FLUSH */
1246 zv->zv_disk = alloc_disk(ZVOL_MINORS);
1247 if (zv->zv_disk == NULL)
1250 zv->zv_queue->queuedata = zv;
1252 zv->zv_open_count = 0;
1253 strlcpy(zv->zv_name, name, MAXNAMELEN);
1255 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
1256 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
1257 sizeof (rl_t), offsetof(rl_t, r_node));
1258 zv->zv_znode.z_is_zvol = TRUE;
1260 zv->zv_disk->major = zvol_major;
1261 zv->zv_disk->first_minor = (dev & MINORMASK);
1262 zv->zv_disk->fops = &zvol_ops;
1263 zv->zv_disk->private_data = zv;
1264 zv->zv_disk->queue = zv->zv_queue;
1265 snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
1266 ZVOL_DEV_NAME, (dev & MINORMASK));
1271 blk_cleanup_queue(zv->zv_queue);
1273 kmem_free(zv, sizeof (zvol_state_t));
1279 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
1282 zvol_free(zvol_state_t *zv)
1284 avl_destroy(&zv->zv_znode.z_range_avl);
1285 mutex_destroy(&zv->zv_znode.z_range_lock);
1287 del_gendisk(zv->zv_disk);
1288 blk_cleanup_queue(zv->zv_queue);
1289 put_disk(zv->zv_disk);
1291 kmem_free(zv, sizeof (zvol_state_t));
1295 __zvol_snapdev_hidden(const char *name)
1302 parent = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1303 (void) strlcpy(parent, name, MAXPATHLEN);
1305 if ((atp = strrchr(parent, '@')) != NULL) {
1307 error = dsl_prop_get_integer(parent, "snapdev", &snapdev, NULL);
1308 if ((error == 0) && (snapdev == ZFS_SNAPDEV_HIDDEN))
1311 kmem_free(parent, MAXPATHLEN);
1316 __zvol_create_minor(const char *name, boolean_t ignore_snapdev)
1320 dmu_object_info_t *doi;
1325 ASSERT(MUTEX_HELD(&zvol_state_lock));
1327 zv = zvol_find_by_name(name);
1333 if (ignore_snapdev == B_FALSE) {
1334 error = __zvol_snapdev_hidden(name);
1339 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
1341 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
1345 error = dmu_object_info(os, ZVOL_OBJ, doi);
1347 goto out_dmu_objset_disown;
1349 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1351 goto out_dmu_objset_disown;
1353 error = zvol_find_minor(&minor);
1355 goto out_dmu_objset_disown;
1357 zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1360 goto out_dmu_objset_disown;
1363 if (dmu_objset_is_snapshot(os))
1364 zv->zv_flags |= ZVOL_RDONLY;
1366 zv->zv_volblocksize = doi->doi_data_block_size;
1367 zv->zv_volsize = volsize;
1370 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1372 blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX);
1373 blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
1374 blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
1375 blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
1376 blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
1377 #ifdef HAVE_BLK_QUEUE_DISCARD
1378 blk_queue_max_discard_sectors(zv->zv_queue,
1379 (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
1380 blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
1381 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
1383 #ifdef HAVE_BLK_QUEUE_NONROT
1384 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
1387 if (spa_writeable(dmu_objset_spa(os))) {
1388 if (zil_replay_disable)
1389 zil_destroy(dmu_objset_zil(os), B_FALSE);
1391 zil_replay(os, zv, zvol_replay_vector);
1394 zv->zv_objset = NULL;
1395 out_dmu_objset_disown:
1396 dmu_objset_disown(os, zvol_tag);
1398 kmem_free(doi, sizeof(dmu_object_info_t));
1403 add_disk(zv->zv_disk);
1410 * Create a block device minor node and setup the linkage between it
1411 * and the specified volume. Once this function returns the block
1412 * device is live and ready for use.
1415 zvol_create_minor(const char *name)
1419 mutex_enter(&zvol_state_lock);
1420 error = __zvol_create_minor(name, B_FALSE);
1421 mutex_exit(&zvol_state_lock);
1427 __zvol_remove_minor(const char *name)
1431 ASSERT(MUTEX_HELD(&zvol_state_lock));
1433 zv = zvol_find_by_name(name);
1437 if (zv->zv_open_count > 0)
1447 * Remove a block device minor node for the specified volume.
1450 zvol_remove_minor(const char *name)
1454 mutex_enter(&zvol_state_lock);
1455 error = __zvol_remove_minor(name);
1456 mutex_exit(&zvol_state_lock);
1462 zvol_create_minors_cb(spa_t *spa, uint64_t dsobj,
1463 const char *dsname, void *arg)
1465 if (strchr(dsname, '/') == NULL)
1468 (void) __zvol_create_minor(dsname, B_FALSE);
1473 * Create minors for specified pool, if pool is NULL create minors
1474 * for all available pools.
1477 zvol_create_minors(const char *pool)
1482 if (zvol_inhibit_dev)
1485 mutex_enter(&zvol_state_lock);
1487 error = dmu_objset_find_spa(NULL, pool, zvol_create_minors_cb,
1488 NULL, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1490 mutex_enter(&spa_namespace_lock);
1491 while ((spa = spa_next(spa)) != NULL) {
1492 error = dmu_objset_find_spa(NULL,
1493 spa_name(spa), zvol_create_minors_cb, NULL,
1494 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1498 mutex_exit(&spa_namespace_lock);
1500 mutex_exit(&zvol_state_lock);
1506 * Remove minors for specified pool, if pool is NULL remove all minors.
1509 zvol_remove_minors(const char *pool)
1511 zvol_state_t *zv, *zv_next;
1514 if (zvol_inhibit_dev)
1517 str = kmem_zalloc(MAXNAMELEN, KM_SLEEP);
1519 (void) strncpy(str, pool, strlen(pool));
1520 (void) strcat(str, "/");
1523 mutex_enter(&zvol_state_lock);
1524 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1525 zv_next = list_next(&zvol_state_list, zv);
1527 if (pool == NULL || !strncmp(str, zv->zv_name, strlen(str))) {
1532 mutex_exit(&zvol_state_lock);
1533 kmem_free(str, MAXNAMELEN);
1537 snapdev_snapshot_changed_cb(const char *dsname, void *arg) {
1538 uint64_t snapdev = *(uint64_t *) arg;
1540 if (strchr(dsname, '@') == NULL)
1544 case ZFS_SNAPDEV_VISIBLE:
1545 mutex_enter(&zvol_state_lock);
1546 (void) __zvol_create_minor(dsname, B_TRUE);
1547 mutex_exit(&zvol_state_lock);
1549 case ZFS_SNAPDEV_HIDDEN:
1550 (void) zvol_remove_minor(dsname);
1557 zvol_set_snapdev(const char *dsname, uint64_t snapdev) {
1558 (void) dmu_objset_find((char *) dsname, snapdev_snapshot_changed_cb,
1559 &snapdev, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
1560 /* caller should continue to modify snapdev property */
1570 list_create(&zvol_state_list, sizeof (zvol_state_t),
1571 offsetof(zvol_state_t, zv_next));
1572 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
1574 zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
1575 zvol_threads, INT_MAX, TASKQ_PREPOPULATE);
1576 if (zvol_taskq == NULL) {
1577 printk(KERN_INFO "ZFS: taskq_create() failed\n");
1582 error = register_blkdev(zvol_major, ZVOL_DRIVER);
1584 printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
1588 blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
1589 THIS_MODULE, zvol_probe, NULL, NULL);
1594 taskq_destroy(zvol_taskq);
1596 mutex_destroy(&zvol_state_lock);
1597 list_destroy(&zvol_state_list);
1605 zvol_remove_minors(NULL);
1606 blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
1607 unregister_blkdev(zvol_major, ZVOL_DRIVER);
1608 taskq_destroy(zvol_taskq);
1609 mutex_destroy(&zvol_state_lock);
1610 list_destroy(&zvol_state_list);
1613 module_param(zvol_inhibit_dev, uint, 0644);
1614 MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
1616 module_param(zvol_major, uint, 0444);
1617 MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
1619 module_param(zvol_threads, uint, 0444);
1620 MODULE_PARM_DESC(zvol_threads, "Number of threads for zvol device");
1622 module_param(zvol_max_discard_blocks, ulong, 0444);
1623 MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard at once");