zio_interrupt(zio);
}
+static void
+vdev_file_io_fsync(void *arg)
+{
+ zio_t *zio = (zio_t *)arg;
+ vdev_file_t *vf = zio->io_vd->vdev_tsd;
+
+ zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC, kcred, NULL);
+
+ zio_interrupt(zio);
+}
+
static int
vdev_file_io_start(zio_t *zio)
{
if (zfs_nocacheflush)
break;
+ /*
+ * We cannot safely call vfs_fsync() when PF_FSTRANS
+ * is set in the current context. Filesystems like
+ * XFS include sanity checks to verify it is not
+ * already set, see xfs_vm_writepage(). Therefore
+ * the sync must be dispatched to a different context.
+ */
+ if (spl_fstrans_check()) {
+ VERIFY3U(taskq_dispatch(vdev_file_taskq,
+ vdev_file_io_fsync, zio, TQ_SLEEP), !=, 0);
+ return (ZIO_PIPELINE_STOP);
+ }
+
zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC,
kcred, NULL);
break;
zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
{
struct address_space *mapping = data;
+ fstrans_cookie_t cookie;
ASSERT(PageLocked(pp));
ASSERT(!PageWriteback(pp));
- ASSERT(!(current->flags & PF_NOFS));
- /*
- * Annotate this call path with a flag that indicates that it is
- * unsafe to use KM_SLEEP during memory allocations due to the
- * potential for a deadlock. KM_PUSHPAGE should be used instead.
- */
- current->flags |= PF_NOFS;
+ cookie = spl_fstrans_mark();
(void) zfs_putpage(mapping->host, pp, wbc);
- current->flags &= ~PF_NOFS;
+ spl_fstrans_unmark(cookie);
return (0);
}
struct request *req = (struct request *)arg;
struct request_queue *q = req->q;
zvol_state_t *zv = q->queuedata;
+ fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t offset = blk_rq_pos(req) << 9;
uint64_t size = blk_rq_bytes(req);
int error = 0;
dmu_tx_t *tx;
rl_t *rl;
- /*
- * Annotate this call path with a flag that indicates that it is
- * unsafe to use KM_SLEEP during memory allocations due to the
- * potential for a deadlock. KM_PUSHPAGE should be used instead.
- */
- ASSERT(!(current->flags & PF_NOFS));
- current->flags |= PF_NOFS;
-
if (req->cmd_flags & VDEV_REQ_FLUSH)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
* Some requests are just for flush and nothing else.
*/
if (size == 0) {
- blk_end_request(req, 0, size);
+ error = 0;
goto out;
}
if (error) {
dmu_tx_abort(tx);
zfs_range_unlock(rl);
- blk_end_request(req, -error, size);
goto out;
}
zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
- blk_end_request(req, -error, size);
out:
- current->flags &= ~PF_NOFS;
+ blk_end_request(req, -error, size);
+ spl_fstrans_unmark(cookie);
}
#ifdef HAVE_BLK_QUEUE_DISCARD
struct request *req = (struct request *)arg;
struct request_queue *q = req->q;
zvol_state_t *zv = q->queuedata;
+ fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t start = blk_rq_pos(req) << 9;
uint64_t end = start + blk_rq_bytes(req);
int error;
rl_t *rl;
- /*
- * Annotate this call path with a flag that indicates that it is
- * unsafe to use KM_SLEEP during memory allocations due to the
- * potential for a deadlock. KM_PUSHPAGE should be used instead.
- */
- ASSERT(!(current->flags & PF_NOFS));
- current->flags |= PF_NOFS;
-
if (end > zv->zv_volsize) {
- blk_end_request(req, -EIO, blk_rq_bytes(req));
+ error = EIO;
goto out;
}
end = P2ALIGN(end, zv->zv_volblocksize);
if (start >= end) {
- blk_end_request(req, 0, blk_rq_bytes(req));
+ error = 0;
goto out;
}
*/
zfs_range_unlock(rl);
-
- blk_end_request(req, -error, blk_rq_bytes(req));
out:
- current->flags &= ~PF_NOFS;
+ blk_end_request(req, -error, blk_rq_bytes(req));
+ spl_fstrans_unmark(cookie);
}
#endif /* HAVE_BLK_QUEUE_DISCARD */
struct request *req = (struct request *)arg;
struct request_queue *q = req->q;
zvol_state_t *zv = q->queuedata;
+ fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t offset = blk_rq_pos(req) << 9;
uint64_t size = blk_rq_bytes(req);
int error;
rl_t *rl;
if (size == 0) {
- blk_end_request(req, 0, size);
- return;
+ error = 0;
+ goto out;
}
rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
if (error == ECKSUM)
error = SET_ERROR(EIO);
+out:
blk_end_request(req, -error, size);
+ spl_fstrans_unmark(cookie);
}
/*