4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 /* Portions Copyright 2007 Jeremy Teo */
26 /* Portions Copyright 2010 Robert Milkowski */
29 #include <sys/types.h>
30 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/sysmacros.h>
34 #include <sys/resource.h>
36 #include <sys/vfs_opreg.h>
40 #include <sys/taskq.h>
42 #include <sys/vmsystm.h>
43 #include <sys/atomic.h>
45 #include <sys/pathname.h>
46 #include <sys/cmn_err.h>
47 #include <sys/errno.h>
48 #include <sys/unistd.h>
49 #include <sys/zfs_dir.h>
50 #include <sys/zfs_acl.h>
51 #include <sys/zfs_ioctl.h>
52 #include <sys/fs/zfs.h>
54 #include <sys/dmu_objset.h>
60 #include <sys/dirent.h>
61 #include <sys/policy.h>
62 #include <sys/sunddi.h>
65 #include "fs/fs_subr.h"
66 #include <sys/zfs_fuid.h>
67 #include <sys/zfs_sa.h>
68 #include <sys/zfs_vnops.h>
70 #include <sys/zfs_rlock.h>
71 #include <sys/extdirent.h>
72 #include <sys/kidmap.h>
79 * Each vnode op performs some logical unit of work. To do this, the ZPL must
80 * properly lock its in-core state, create a DMU transaction, do the work,
81 * record this work in the intent log (ZIL), commit the DMU transaction,
82 * and wait for the intent log to commit if it is a synchronous operation.
83 * Moreover, the vnode ops must work in both normal and log replay context.
84 * The ordering of events is important to avoid deadlocks and references
85 * to freed memory. The example below illustrates the following Big Rules:
87 * (1) A check must be made in each zfs thread for a mounted file system.
88 * This is done avoiding races using ZFS_ENTER(zsb).
89 * A ZFS_EXIT(zsb) is needed before all returns. Any znodes
90 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
91 * can return EIO from the calling function.
93 * (2) iput() should always be the last thing except for zil_commit()
94 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
95 * First, if it's the last reference, the vnode/znode
96 * can be freed, so the zp may point to freed memory. Second, the last
97 * reference will call zfs_zinactive(), which may induce a lot of work --
98 * pushing cached pages (which acquires range locks) and syncing out
99 * cached atime changes. Third, zfs_zinactive() may require a new tx,
100 * which could deadlock the system if you were already holding one.
101 * If you must call iput() within a tx then use iput_ASYNC().
103 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
104 * as they can span dmu_tx_assign() calls.
106 * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign().
107 * This is critical because we don't want to block while holding locks.
108 * Note, in particular, that if a lock is sometimes acquired before
109 * the tx assigns, and sometimes after (e.g. z_lock), then failing to
110 * use a non-blocking assign can deadlock the system. The scenario:
112 * Thread A has grabbed a lock before calling dmu_tx_assign().
113 * Thread B is in an already-assigned tx, and blocks for this lock.
114 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
115 * forever, because the previous txg can't quiesce until B's tx commits.
117 * If dmu_tx_assign() returns ERESTART and zsb->z_assign is TXG_NOWAIT,
118 * then drop all locks, call dmu_tx_wait(), and try again.
120 * (5) If the operation succeeded, generate the intent log entry for it
121 * before dropping locks. This ensures that the ordering of events
122 * in the intent log matches the order in which they actually occurred.
123 * During ZIL replay the zfs_log_* functions will update the sequence
124 * number to indicate the zil transaction has replayed.
126 * (6) At the end of each vnode op, the DMU tx must always commit,
127 * regardless of whether there were any errors.
129 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
130 * to ensure that synchronous semantics are provided when necessary.
132 * In general, this is how things should be ordered in each vnode op:
134 * ZFS_ENTER(zsb); // exit if unmounted
136 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
137 * rw_enter(...); // grab any other locks you need
138 * tx = dmu_tx_create(...); // get DMU tx
139 * dmu_tx_hold_*(); // hold each object you might modify
140 * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign
142 * rw_exit(...); // drop locks
143 * zfs_dirent_unlock(dl); // unlock directory entry
144 * iput(...); // release held vnodes
145 * if (error == ERESTART) {
150 * dmu_tx_abort(tx); // abort DMU tx
151 * ZFS_EXIT(zsb); // finished in zfs
152 * return (error); // really out of space
154 * error = do_real_work(); // do whatever this VOP does
156 * zfs_log_*(...); // on success, make ZIL entry
157 * dmu_tx_commit(tx); // commit DMU tx -- error or not
158 * rw_exit(...); // drop locks
159 * zfs_dirent_unlock(dl); // unlock directory entry
160 * iput(...); // release held vnodes
161 * zil_commit(zilog, foid); // synchronous when necessary
162 * ZFS_EXIT(zsb); // finished in zfs
163 * return (error); // done, report error
167 * Virus scanning is unsupported. It would be possible to add a hook
168 * here to performance the required virus scan. This could be done
169 * entirely in the kernel or potentially as an update to invoke a
173 zfs_vscan(struct inode *ip, cred_t *cr, int async)
180 zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
182 znode_t *zp = ITOZ(ip);
183 zfs_sb_t *zsb = ITOZSB(ip);
188 /* Honor ZFS_APPENDONLY file attribute */
189 if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
190 ((flag & O_APPEND) == 0)) {
195 /* Virus scan eligible files on open */
196 if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) &&
197 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
198 if (zfs_vscan(ip, cr, 0) != 0) {
204 /* Keep a count of the synchronous opens in the znode */
206 atomic_inc_32(&zp->z_sync_cnt);
211 EXPORT_SYMBOL(zfs_open);
215 zfs_close(struct inode *ip, int flag, cred_t *cr)
217 znode_t *zp = ITOZ(ip);
218 zfs_sb_t *zsb = ITOZSB(ip);
223 /* Decrement the synchronous opens in the znode */
227 if (!zfs_has_ctldir(zp) && zsb->z_vscan && S_ISREG(ip->i_mode) &&
228 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
229 VERIFY(zfs_vscan(ip, cr, 1) == 0);
234 EXPORT_SYMBOL(zfs_close);
238 * When a file is memory mapped, we must keep the IO data synchronized
239 * between the DMU cache and the memory mapped pages. What this means:
241 * On Write: If we find a memory mapped page, we write to *both*
242 * the page and the dmu buffer.
245 update_pages(struct inode *ip, int64_t start, int len,
246 objset_t *os, uint64_t oid)
248 struct address_space *mp = ip->i_mapping;
254 off = start & (PAGE_CACHE_SIZE-1);
255 for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) {
256 nbytes = MIN(PAGE_CACHE_SIZE - off, len);
258 pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
260 if (mapping_writably_mapped(mp))
261 flush_dcache_page(pp);
264 (void) dmu_read(os, oid, start+off, nbytes, pb+off,
268 if (mapping_writably_mapped(mp))
269 flush_dcache_page(pp);
271 mark_page_accessed(pp);
275 page_cache_release(pp);
284 * When a file is memory mapped, we must keep the IO data synchronized
285 * between the DMU cache and the memory mapped pages. What this means:
287 * On Read: We "read" preferentially from memory mapped pages,
288 * else we default from the dmu buffer.
290 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
291 * the file is memory mapped.
294 mappedread(struct inode *ip, int nbytes, uio_t *uio)
296 struct address_space *mp = ip->i_mapping;
298 znode_t *zp = ITOZ(ip);
299 objset_t *os = ITOZSB(ip)->z_os;
306 start = uio->uio_loffset;
307 off = start & (PAGE_CACHE_SIZE-1);
308 for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) {
309 bytes = MIN(PAGE_CACHE_SIZE - off, len);
311 pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
313 ASSERT(PageUptodate(pp));
316 error = uiomove(pb + off, bytes, UIO_READ, uio);
319 if (mapping_writably_mapped(mp))
320 flush_dcache_page(pp);
322 mark_page_accessed(pp);
324 page_cache_release(pp);
326 error = dmu_read_uio(os, zp->z_id, uio, bytes);
338 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */
341 * Read bytes from specified file into supplied buffer.
343 * IN: ip - inode of file to be read from.
344 * uio - structure supplying read location, range info,
346 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
347 * O_DIRECT flag; used to bypass page cache.
348 * cr - credentials of caller.
350 * OUT: uio - updated offset and range, buffer filled.
352 * RETURN: 0 if success
353 * error code if failure
356 * inode - atime updated if byte count > 0
360 zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
362 znode_t *zp = ITOZ(ip);
363 zfs_sb_t *zsb = ITOZSB(ip);
368 #ifdef HAVE_UIO_ZEROCOPY
370 #endif /* HAVE_UIO_ZEROCOPY */
376 if (zp->z_pflags & ZFS_AV_QUARANTINED) {
382 * Validate file offset
384 if (uio->uio_loffset < (offset_t)0) {
390 * Fasttrack empty reads
392 if (uio->uio_resid == 0) {
397 #ifdef HAVE_MANDLOCKS
399 * Check for mandatory locks
401 if (MANDMODE(zp->z_mode)) {
402 if (error = chklock(ip, FREAD,
403 uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) {
408 #endif /* HAVE_MANDLOCK */
411 * If we're in FRSYNC mode, sync out this znode before reading it.
413 if (ioflag & FRSYNC || zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
414 zil_commit(zsb->z_log, zp->z_id);
417 * Lock the range against changes.
419 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER);
422 * If we are reading past end-of-file we can skip
423 * to the end; but we might still need to set atime.
425 if (uio->uio_loffset >= zp->z_size) {
430 ASSERT(uio->uio_loffset < zp->z_size);
431 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
433 #ifdef HAVE_UIO_ZEROCOPY
434 if ((uio->uio_extflg == UIO_XUIO) &&
435 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
437 int blksz = zp->z_blksz;
438 uint64_t offset = uio->uio_loffset;
440 xuio = (xuio_t *)uio;
442 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
445 ASSERT(offset + n <= blksz);
448 (void) dmu_xuio_init(xuio, nblk);
450 if (vn_has_cached_data(ip)) {
452 * For simplicity, we always allocate a full buffer
453 * even if we only expect to read a portion of a block.
455 while (--nblk >= 0) {
456 (void) dmu_xuio_add(xuio,
457 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
462 #endif /* HAVE_UIO_ZEROCOPY */
465 nbytes = MIN(n, zfs_read_chunk_size -
466 P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
468 if (zp->z_is_mapped && !(ioflag & O_DIRECT))
469 error = mappedread(ip, nbytes, uio);
471 error = dmu_read_uio(os, zp->z_id, uio, nbytes);
474 /* convert checksum errors into IO errors */
483 zfs_range_unlock(rl);
485 ZFS_ACCESSTIME_STAMP(zsb, zp);
486 zfs_inode_update(zp);
490 EXPORT_SYMBOL(zfs_read);
493 * Write the bytes to a file.
495 * IN: ip - inode of file to be written to.
496 * uio - structure supplying write location, range info,
498 * ioflag - FAPPEND flag set if in append mode.
499 * O_DIRECT flag; used to bypass page cache.
500 * cr - credentials of caller.
502 * OUT: uio - updated offset and range.
504 * RETURN: 0 if success
505 * error code if failure
508 * ip - ctime|mtime updated if byte count > 0
513 zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
515 znode_t *zp = ITOZ(ip);
516 rlim64_t limit = uio->uio_limit;
517 ssize_t start_resid = uio->uio_resid;
521 zfs_sb_t *zsb = ZTOZSB(zp);
526 int max_blksz = zsb->z_max_blksz;
529 iovec_t *aiov = NULL;
532 iovec_t *iovp = uio->uio_iov;
535 sa_bulk_attr_t bulk[4];
536 uint64_t mtime[2], ctime[2];
537 ASSERTV(int iovcnt = uio->uio_iovcnt);
540 * Fasttrack empty write
546 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
552 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
553 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
554 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8);
555 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
559 * If immutable or not appending then return EPERM
561 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
562 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
563 (uio->uio_loffset < zp->z_size))) {
571 * Validate file offset
573 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
579 #ifdef HAVE_MANDLOCKS
581 * Check for mandatory locks before calling zfs_range_lock()
582 * in order to prevent a deadlock with locks set via fcntl().
584 if (MANDMODE((mode_t)zp->z_mode) &&
585 (error = chklock(ip, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) {
589 #endif /* HAVE_MANDLOCKS */
591 #ifdef HAVE_UIO_ZEROCOPY
593 * Pre-fault the pages to ensure slow (eg NFS) pages
595 * Skip this if uio contains loaned arc_buf.
597 if ((uio->uio_extflg == UIO_XUIO) &&
598 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
599 xuio = (xuio_t *)uio;
601 uio_prefaultpages(MIN(n, max_blksz), uio);
602 #endif /* HAVE_UIO_ZEROCOPY */
605 * If in append mode, set the io offset pointer to eof.
607 if (ioflag & FAPPEND) {
609 * Obtain an appending range lock to guarantee file append
610 * semantics. We reset the write offset once we have the lock.
612 rl = zfs_range_lock(zp, 0, n, RL_APPEND);
614 if (rl->r_len == UINT64_MAX) {
616 * We overlocked the file because this write will cause
617 * the file block size to increase.
618 * Note that zp_size cannot change with this lock held.
622 uio->uio_loffset = woff;
625 * Note that if the file block size will change as a result of
626 * this write, then this range lock will lock the entire file
627 * so that we can re-write the block safely.
629 rl = zfs_range_lock(zp, woff, n, RL_WRITER);
633 zfs_range_unlock(rl);
638 if ((woff + n) > limit || woff > (limit - n))
641 /* Will this write extend the file length? */
642 write_eof = (woff + n > zp->z_size);
644 end_size = MAX(zp->z_size, woff + n);
647 * Write the file in reasonable size chunks. Each chunk is written
648 * in a separate transaction; this keeps the intent log records small
649 * and allows us to do more fine-grained space accounting.
653 woff = uio->uio_loffset;
655 if (zfs_owner_overquota(zsb, zp, B_FALSE) ||
656 zfs_owner_overquota(zsb, zp, B_TRUE)) {
658 dmu_return_arcbuf(abuf);
663 if (xuio && abuf == NULL) {
664 ASSERT(i_iov < iovcnt);
666 abuf = dmu_xuio_arcbuf(xuio, i_iov);
667 dmu_xuio_clear(xuio, i_iov);
668 ASSERT((aiov->iov_base == abuf->b_data) ||
669 ((char *)aiov->iov_base - (char *)abuf->b_data +
670 aiov->iov_len == arc_buf_size(abuf)));
672 } else if (abuf == NULL && n >= max_blksz &&
673 woff >= zp->z_size &&
674 P2PHASE(woff, max_blksz) == 0 &&
675 zp->z_blksz == max_blksz) {
677 * This write covers a full block. "Borrow" a buffer
678 * from the dmu so that we can fill it before we enter
679 * a transaction. This avoids the possibility of
680 * holding up the transaction if the data copy hangs
681 * up on a pagefault (e.g., from an NFS server mapping).
685 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
687 ASSERT(abuf != NULL);
688 ASSERT(arc_buf_size(abuf) == max_blksz);
689 if ((error = uiocopy(abuf->b_data, max_blksz,
690 UIO_WRITE, uio, &cbytes))) {
691 dmu_return_arcbuf(abuf);
694 ASSERT(cbytes == max_blksz);
698 * Start a transaction.
700 tx = dmu_tx_create(zsb->z_os);
701 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
702 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
703 zfs_sa_upgrade_txholds(tx, zp);
704 error = dmu_tx_assign(tx, TXG_NOWAIT);
706 if (error == ERESTART) {
713 dmu_return_arcbuf(abuf);
718 * If zfs_range_lock() over-locked we grow the blocksize
719 * and then reduce the lock range. This will only happen
720 * on the first iteration since zfs_range_reduce() will
721 * shrink down r_len to the appropriate size.
723 if (rl->r_len == UINT64_MAX) {
726 if (zp->z_blksz > max_blksz) {
727 ASSERT(!ISP2(zp->z_blksz));
728 new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE);
730 new_blksz = MIN(end_size, max_blksz);
732 zfs_grow_blocksize(zp, new_blksz, tx);
733 zfs_range_reduce(rl, woff, n);
737 * XXX - should we really limit each write to z_max_blksz?
738 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
740 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
743 tx_bytes = uio->uio_resid;
744 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
746 tx_bytes -= uio->uio_resid;
749 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
751 * If this is not a full block write, but we are
752 * extending the file past EOF and this data starts
753 * block-aligned, use assign_arcbuf(). Otherwise,
754 * write via dmu_write().
756 if (tx_bytes < max_blksz && (!write_eof ||
757 aiov->iov_base != abuf->b_data)) {
759 dmu_write(zsb->z_os, zp->z_id, woff,
760 aiov->iov_len, aiov->iov_base, tx);
761 dmu_return_arcbuf(abuf);
762 xuio_stat_wbuf_copied();
764 ASSERT(xuio || tx_bytes == max_blksz);
765 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl),
768 ASSERT(tx_bytes <= uio->uio_resid);
769 uioskip(uio, tx_bytes);
772 if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT))
773 update_pages(ip, woff, tx_bytes, zsb->z_os, zp->z_id);
776 * If we made no progress, we're done. If we made even
777 * partial progress, update the znode and ZIL accordingly.
780 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb),
781 (void *)&zp->z_size, sizeof (uint64_t), tx);
788 * Clear Set-UID/Set-GID bits on successful write if not
789 * privileged and at least one of the excute bits is set.
791 * It would be nice to to this after all writes have
792 * been done, but that would still expose the ISUID/ISGID
793 * to another app after the partial write is committed.
795 * Note: we don't call zfs_fuid_map_id() here because
796 * user 0 is not an ephemeral uid.
798 mutex_enter(&zp->z_acl_lock);
799 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
800 (S_IXUSR >> 6))) != 0 &&
801 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
802 secpolicy_vnode_setid_retain(cr,
803 (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) {
805 zp->z_mode &= ~(S_ISUID | S_ISGID);
806 newmode = zp->z_mode;
807 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zsb),
808 (void *)&newmode, sizeof (uint64_t), tx);
810 mutex_exit(&zp->z_acl_lock);
812 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
816 * Update the file size (zp_size) if it has changed;
817 * account for possible concurrent updates.
819 while ((end_size = zp->z_size) < uio->uio_loffset) {
820 (void) atomic_cas_64(&zp->z_size, end_size,
825 * If we are replaying and eof is non zero then force
826 * the file size to the specified eof. Note, there's no
827 * concurrency during replay.
829 if (zsb->z_replay && zsb->z_replay_eof != 0)
830 zp->z_size = zsb->z_replay_eof;
832 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
834 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag);
839 ASSERT(tx_bytes == nbytes);
843 uio_prefaultpages(MIN(n, max_blksz), uio);
846 zfs_range_unlock(rl);
849 * If we're in replay mode, or we made no progress, return error.
850 * Otherwise, it's at least a partial write, so it's successful.
852 if (zsb->z_replay || uio->uio_resid == start_resid) {
857 if (ioflag & (FSYNC | FDSYNC) ||
858 zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
859 zil_commit(zilog, zp->z_id);
861 zfs_inode_update(zp);
865 EXPORT_SYMBOL(zfs_write);
868 iput_async(struct inode *ip, taskq_t *taskq)
870 ASSERT(atomic_read(&ip->i_count) > 0);
871 if (atomic_read(&ip->i_count) == 1)
872 taskq_dispatch(taskq, (task_func_t *)iput, ip, TQ_SLEEP);
878 zfs_get_done(zgd_t *zgd, int error)
880 znode_t *zp = zgd->zgd_private;
881 objset_t *os = ZTOZSB(zp)->z_os;
884 dmu_buf_rele(zgd->zgd_db, zgd);
886 zfs_range_unlock(zgd->zgd_rl);
889 * Release the vnode asynchronously as we currently have the
890 * txg stopped from syncing.
892 iput_async(ZTOI(zp), dsl_pool_iput_taskq(dmu_objset_pool(os)));
894 if (error == 0 && zgd->zgd_bp)
895 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
897 kmem_free(zgd, sizeof (zgd_t));
901 static int zil_fault_io = 0;
905 * Get data to generate a TX_WRITE intent log record.
908 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
911 objset_t *os = zsb->z_os;
913 uint64_t object = lr->lr_foid;
914 uint64_t offset = lr->lr_offset;
915 uint64_t size = lr->lr_length;
916 blkptr_t *bp = &lr->lr_blkptr;
925 * Nothing to do if the file has been removed
927 if (zfs_zget(zsb, object, &zp) != 0)
929 if (zp->z_unlinked) {
931 * Release the vnode asynchronously as we currently have the
932 * txg stopped from syncing.
934 iput_async(ZTOI(zp), dsl_pool_iput_taskq(dmu_objset_pool(os)));
938 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
939 zgd->zgd_zilog = zsb->z_log;
940 zgd->zgd_private = zp;
943 * Write records come in two flavors: immediate and indirect.
944 * For small writes it's cheaper to store the data with the
945 * log record (immediate); for large writes it's cheaper to
946 * sync the data and get a pointer to it (indirect) so that
947 * we don't have to write the data twice.
949 if (buf != NULL) { /* immediate write */
950 zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
951 /* test for truncation needs to be done while range locked */
952 if (offset >= zp->z_size) {
955 error = dmu_read(os, object, offset, size, buf,
956 DMU_READ_NO_PREFETCH);
958 ASSERT(error == 0 || error == ENOENT);
959 } else { /* indirect write */
961 * Have to lock the whole block to ensure when it's
962 * written out and it's checksum is being calculated
963 * that no one can change the data. We need to re-check
964 * blocksize after we get the lock in case it's changed!
969 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
971 zgd->zgd_rl = zfs_range_lock(zp, offset, size,
973 if (zp->z_blksz == size)
976 zfs_range_unlock(zgd->zgd_rl);
978 /* test for truncation needs to be done while range locked */
979 if (lr->lr_offset >= zp->z_size)
988 error = dmu_buf_hold(os, object, offset, zgd, &db,
989 DMU_READ_NO_PREFETCH);
995 ASSERT(db->db_offset == offset);
996 ASSERT(db->db_size == size);
998 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1000 ASSERT(error || lr->lr_length <= zp->z_blksz);
1003 * On success, we need to wait for the write I/O
1004 * initiated by dmu_sync() to complete before we can
1005 * release this dbuf. We will finish everything up
1006 * in the zfs_get_done() callback.
1011 if (error == EALREADY) {
1012 lr->lr_common.lrc_txtype = TX_WRITE2;
1018 zfs_get_done(zgd, error);
1025 zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
1027 znode_t *zp = ITOZ(ip);
1028 zfs_sb_t *zsb = ITOZSB(ip);
1034 if (flag & V_ACE_MASK)
1035 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
1037 error = zfs_zaccess_rwx(zp, mode, flag, cr);
1042 EXPORT_SYMBOL(zfs_access);
1045 * Lookup an entry in a directory, or an extended attribute directory.
1046 * If it exists, return a held inode reference for it.
1048 * IN: dip - inode of directory to search.
1049 * nm - name of entry to lookup.
1050 * flags - LOOKUP_XATTR set if looking for an attribute.
1051 * cr - credentials of caller.
1052 * direntflags - directory lookup flags
1053 * realpnp - returned pathname.
1055 * OUT: ipp - inode of located entry, NULL if not found.
1057 * RETURN: 0 if success
1058 * error code if failure
1065 zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
1066 cred_t *cr, int *direntflags, pathname_t *realpnp)
1068 znode_t *zdp = ITOZ(dip);
1069 zfs_sb_t *zsb = ITOZSB(dip);
1073 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
1075 if (!S_ISDIR(dip->i_mode)) {
1077 } else if (zdp->z_sa_hdl == NULL) {
1081 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
1082 error = zfs_fastaccesschk_execute(zdp, cr);
1091 vnode_t *tvp = dnlc_lookup(dvp, nm);
1094 error = zfs_fastaccesschk_execute(zdp, cr);
1099 if (tvp == DNLC_NO_VNODE) {
1104 return (specvp_check(vpp, cr));
1107 #endif /* HAVE_DNLC */
1116 if (flags & LOOKUP_XATTR) {
1118 * If the xattr property is off, refuse the lookup request.
1120 if (!(zsb->z_flags & ZSB_XATTR_USER)) {
1126 * We don't allow recursive attributes..
1127 * Maybe someday we will.
1129 if (zdp->z_pflags & ZFS_XATTR) {
1134 if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
1140 * Do we have permission to get into attribute directory?
1143 if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0,
1153 if (!S_ISDIR(dip->i_mode)) {
1159 * Check accessibility of directory.
1162 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
1167 if (zsb->z_utf8 && u8_validate(nm, strlen(nm),
1168 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1173 error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
1174 if ((error == 0) && (*ipp))
1175 zfs_inode_update(ITOZ(*ipp));
1180 EXPORT_SYMBOL(zfs_lookup);
1183 * Attempt to create a new entry in a directory. If the entry
1184 * already exists, truncate the file if permissible, else return
1185 * an error. Return the ip of the created or trunc'd file.
1187 * IN: dip - inode of directory to put new file entry in.
1188 * name - name of new file entry.
1189 * vap - attributes of new file.
1190 * excl - flag indicating exclusive or non-exclusive mode.
1191 * mode - mode to open file with.
1192 * cr - credentials of caller.
1193 * flag - large file flag [UNUSED].
1194 * vsecp - ACL to be set
1196 * OUT: ipp - inode of created or trunc'd entry.
1198 * RETURN: 0 if success
1199 * error code if failure
1202 * dip - ctime|mtime updated if new entry created
1203 * ip - ctime|mtime always, atime if new
1208 zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
1209 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
1211 znode_t *zp, *dzp = ITOZ(dip);
1212 zfs_sb_t *zsb = ITOZSB(dip);
1220 zfs_acl_ids_t acl_ids;
1221 boolean_t fuid_dirtied;
1222 boolean_t have_acl = B_FALSE;
1225 * If we have an ephemeral id, ACL, or XVATTR then
1226 * make sure file system is at proper version
1232 if (zsb->z_use_fuids == B_FALSE &&
1233 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1241 if (zsb->z_utf8 && u8_validate(name, strlen(name),
1242 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1247 if (vap->va_mask & ATTR_XVATTR) {
1248 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1249 crgetuid(cr), cr, vap->va_mode)) != 0) {
1257 if (*name == '\0') {
1259 * Null component name refers to the directory itself.
1266 /* possible igrab(zp) */
1269 if (flag & FIGNORECASE)
1272 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1276 zfs_acl_ids_free(&acl_ids);
1277 if (strcmp(name, "..") == 0)
1288 * Create a new file object and update the directory
1291 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
1293 zfs_acl_ids_free(&acl_ids);
1298 * We only support the creation of regular files in
1299 * extended attribute directories.
1302 if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
1304 zfs_acl_ids_free(&acl_ids);
1309 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
1310 cr, vsecp, &acl_ids)) != 0)
1314 if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
1315 zfs_acl_ids_free(&acl_ids);
1320 tx = dmu_tx_create(os);
1322 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1323 ZFS_SA_BASE_ATTR_SIZE);
1325 fuid_dirtied = zsb->z_fuid_dirty;
1327 zfs_fuid_txhold(zsb, tx);
1328 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
1329 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
1330 if (!zsb->z_use_sa &&
1331 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1332 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
1333 0, acl_ids.z_aclp->z_acl_bytes);
1335 error = dmu_tx_assign(tx, TXG_NOWAIT);
1337 zfs_dirent_unlock(dl);
1338 if (error == ERESTART) {
1343 zfs_acl_ids_free(&acl_ids);
1348 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1351 zfs_fuid_sync(zsb, tx);
1353 (void) zfs_link_create(dl, zp, tx, ZNEW);
1354 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
1355 if (flag & FIGNORECASE)
1357 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
1358 vsecp, acl_ids.z_fuidp, vap);
1359 zfs_acl_ids_free(&acl_ids);
1362 int aflags = (flag & FAPPEND) ? V_APPEND : 0;
1365 zfs_acl_ids_free(&acl_ids);
1369 * A directory entry already exists for this name.
1372 * Can't truncate an existing file if in exclusive mode.
1379 * Can't open a directory for writing.
1381 if (S_ISDIR(ZTOI(zp)->i_mode)) {
1386 * Verify requested access to file.
1388 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
1392 mutex_enter(&dzp->z_lock);
1394 mutex_exit(&dzp->z_lock);
1397 * Truncate regular files if requested.
1399 if (S_ISREG(ZTOI(zp)->i_mode) &&
1400 (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
1401 /* we can't hold any locks when calling zfs_freesp() */
1402 zfs_dirent_unlock(dl);
1404 error = zfs_freesp(zp, 0, 0, mode, TRUE);
1410 zfs_dirent_unlock(dl);
1416 zfs_inode_update(dzp);
1417 zfs_inode_update(zp);
1421 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1422 zil_commit(zilog, 0);
1427 EXPORT_SYMBOL(zfs_create);
1430 * Remove an entry from a directory.
1432 * IN: dip - inode of directory to remove entry from.
1433 * name - name of entry to remove.
1434 * cr - credentials of caller.
1436 * RETURN: 0 if success
1437 * error code if failure
1441 * ip - ctime (if nlink > 0)
1444 uint64_t null_xattr = 0;
1448 zfs_remove(struct inode *dip, char *name, cred_t *cr)
1450 znode_t *zp, *dzp = ITOZ(dip);
1453 zfs_sb_t *zsb = ITOZSB(dip);
1456 uint64_t xattr_obj_unlinked = 0;
1462 pathname_t *realnmp = NULL;
1463 #ifdef HAVE_PN_UTILS
1465 #endif /* HAVE_PN_UTILS */
1473 #ifdef HAVE_PN_UTILS
1474 if (flags & FIGNORECASE) {
1479 #endif /* HAVE_PN_UTILS */
1485 * Attempt to lock directory; fail if entry doesn't exist.
1487 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1489 #ifdef HAVE_PN_UTILS
1492 #endif /* HAVE_PN_UTILS */
1499 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1504 * Need to use rmdir for removing directories.
1506 if (S_ISDIR(ip->i_mode)) {
1513 dnlc_remove(dvp, realnmp->pn_buf);
1515 dnlc_remove(dvp, name);
1516 #endif /* HAVE_DNLC */
1519 * We never delete the znode and always place it in the unlinked
1520 * set. The dentry cache will always hold the last reference and
1521 * is responsible for safely freeing the znode.
1524 tx = dmu_tx_create(zsb->z_os);
1525 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1526 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1527 zfs_sa_upgrade_txholds(tx, zp);
1528 zfs_sa_upgrade_txholds(tx, dzp);
1530 /* are there any extended attributes? */
1531 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
1532 &xattr_obj, sizeof (xattr_obj));
1533 if (error == 0 && xattr_obj) {
1534 error = zfs_zget(zsb, xattr_obj, &xzp);
1535 ASSERT3U(error, ==, 0);
1536 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1537 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1540 /* charge as an update -- would be nice not to charge at all */
1541 dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
1543 error = dmu_tx_assign(tx, TXG_NOWAIT);
1545 zfs_dirent_unlock(dl);
1549 if (error == ERESTART) {
1554 #ifdef HAVE_PN_UTILS
1557 #endif /* HAVE_PN_UTILS */
1564 * Remove the directory entry.
1566 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1575 * Hold z_lock so that we can make sure that the ACL obj
1576 * hasn't changed. Could have been deleted due to
1579 mutex_enter(&zp->z_lock);
1580 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
1581 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1582 mutex_exit(&zp->z_lock);
1583 zfs_unlinked_add(zp, tx);
1587 #ifdef HAVE_PN_UTILS
1588 if (flags & FIGNORECASE)
1590 #endif /* HAVE_PN_UTILS */
1591 zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
1595 #ifdef HAVE_PN_UTILS
1598 #endif /* HAVE_PN_UTILS */
1600 zfs_dirent_unlock(dl);
1601 zfs_inode_update(dzp);
1602 zfs_inode_update(zp);
1604 zfs_inode_update(xzp);
1610 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1611 zil_commit(zilog, 0);
1616 EXPORT_SYMBOL(zfs_remove);
1619 * Create a new directory and insert it into dip using the name
1620 * provided. Return a pointer to the inserted directory.
1622 * IN: dip - inode of directory to add subdir to.
1623 * dirname - name of new directory.
1624 * vap - attributes of new directory.
1625 * cr - credentials of caller.
1626 * vsecp - ACL to be set
1628 * OUT: ipp - inode of created directory.
1630 * RETURN: 0 if success
1631 * error code if failure
1634 * dip - ctime|mtime updated
1635 * ipp - ctime|mtime|atime updated
1639 zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
1640 cred_t *cr, int flags, vsecattr_t *vsecp)
1642 znode_t *zp, *dzp = ITOZ(dip);
1643 zfs_sb_t *zsb = ITOZSB(dip);
1651 gid_t gid = crgetgid(cr);
1652 zfs_acl_ids_t acl_ids;
1653 boolean_t fuid_dirtied;
1655 ASSERT(S_ISDIR(vap->va_mode));
1658 * If we have an ephemeral id, ACL, or XVATTR then
1659 * make sure file system is at proper version
1663 if (zsb->z_use_fuids == B_FALSE &&
1664 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1671 if (dzp->z_pflags & ZFS_XATTR) {
1676 if (zsb->z_utf8 && u8_validate(dirname,
1677 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1681 if (flags & FIGNORECASE)
1684 if (vap->va_mask & ATTR_XVATTR) {
1685 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1686 crgetuid(cr), cr, vap->va_mode)) != 0) {
1692 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1693 vsecp, &acl_ids)) != 0) {
1698 * First make sure the new directory doesn't exist.
1700 * Existence is checked first to make sure we don't return
1701 * EACCES instead of EEXIST which can cause some applications
1707 if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1709 zfs_acl_ids_free(&acl_ids);
1714 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
1715 zfs_acl_ids_free(&acl_ids);
1716 zfs_dirent_unlock(dl);
1721 if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
1722 zfs_acl_ids_free(&acl_ids);
1723 zfs_dirent_unlock(dl);
1729 * Add a new entry to the directory.
1731 tx = dmu_tx_create(zsb->z_os);
1732 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1733 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1734 fuid_dirtied = zsb->z_fuid_dirty;
1736 zfs_fuid_txhold(zsb, tx);
1737 if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1738 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1739 acl_ids.z_aclp->z_acl_bytes);
1742 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1743 ZFS_SA_BASE_ATTR_SIZE);
1745 error = dmu_tx_assign(tx, TXG_NOWAIT);
1747 zfs_dirent_unlock(dl);
1748 if (error == ERESTART) {
1753 zfs_acl_ids_free(&acl_ids);
1762 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1765 zfs_fuid_sync(zsb, tx);
1768 * Now put new name in parent dir.
1770 (void) zfs_link_create(dl, zp, tx, ZNEW);
1774 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
1775 if (flags & FIGNORECASE)
1777 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
1778 acl_ids.z_fuidp, vap);
1780 zfs_acl_ids_free(&acl_ids);
1784 zfs_dirent_unlock(dl);
1786 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1787 zil_commit(zilog, 0);
1789 zfs_inode_update(dzp);
1790 zfs_inode_update(zp);
1794 EXPORT_SYMBOL(zfs_mkdir);
1797 * Remove a directory subdir entry. If the current working
1798 * directory is the same as the subdir to be removed, the
1801 * IN: dip - inode of directory to remove from.
1802 * name - name of directory to be removed.
1803 * cwd - inode of current working directory.
1804 * cr - credentials of caller.
1805 * flags - case flags
1807 * RETURN: 0 if success
1808 * error code if failure
1811 * dip - ctime|mtime updated
1815 zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr,
1818 znode_t *dzp = ITOZ(dip);
1821 zfs_sb_t *zsb = ITOZSB(dip);
1832 if (flags & FIGNORECASE)
1838 * Attempt to lock directory; fail if entry doesn't exist.
1840 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1848 if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
1852 if (!S_ISDIR(ip->i_mode)) {
1863 * Grab a lock on the directory to make sure that noone is
1864 * trying to add (or lookup) entries while we are removing it.
1866 rw_enter(&zp->z_name_lock, RW_WRITER);
1869 * Grab a lock on the parent pointer to make sure we play well
1870 * with the treewalk and directory rename code.
1872 rw_enter(&zp->z_parent_lock, RW_WRITER);
1874 tx = dmu_tx_create(zsb->z_os);
1875 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1876 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1877 dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
1878 zfs_sa_upgrade_txholds(tx, zp);
1879 zfs_sa_upgrade_txholds(tx, dzp);
1880 error = dmu_tx_assign(tx, TXG_NOWAIT);
1882 rw_exit(&zp->z_parent_lock);
1883 rw_exit(&zp->z_name_lock);
1884 zfs_dirent_unlock(dl);
1886 if (error == ERESTART) {
1896 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
1899 uint64_t txtype = TX_RMDIR;
1900 if (flags & FIGNORECASE)
1902 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
1907 rw_exit(&zp->z_parent_lock);
1908 rw_exit(&zp->z_name_lock);
1910 zfs_dirent_unlock(dl);
1914 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
1915 zil_commit(zilog, 0);
1917 zfs_inode_update(dzp);
1918 zfs_inode_update(zp);
1922 EXPORT_SYMBOL(zfs_rmdir);
1925 * Read as many directory entries as will fit into the provided
1926 * dirent buffer from the given directory cursor position.
1928 * IN: ip - inode of directory to read.
1929 * dirent - buffer for directory entries.
1931 * OUT: dirent - filler buffer of directory entries.
1933 * RETURN: 0 if success
1934 * error code if failure
1937 * ip - atime updated
1939 * Note that the low 4 bits of the cookie returned by zap is always zero.
1940 * This allows us to use the low range for "special" directory entries:
1941 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
1942 * we use the offset 2 for the '.zfs' directory.
1946 zfs_readdir(struct inode *ip, void *dirent, filldir_t filldir,
1947 loff_t *pos, cred_t *cr)
1949 znode_t *zp = ITOZ(ip);
1950 zfs_sb_t *zsb = ITOZSB(ip);
1953 zap_attribute_t zap;
1963 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zsb),
1964 &parent, sizeof (parent))) != 0)
1968 * Quit if directory has been removed (posix)
1975 prefetch = zp->z_zn_prefetch;
1978 * Initialize the iterator cursor.
1982 * Start iteration from the beginning of the directory.
1984 zap_cursor_init(&zc, os, zp->z_id);
1987 * The offset is a serialized cursor.
1989 zap_cursor_init_serialized(&zc, os, zp->z_id, *pos);
1993 * Transform to file-system independent format
2000 * Special case `.', `..', and `.zfs'.
2003 (void) strcpy(zap.za_name, ".");
2004 zap.za_normalization_conflict = 0;
2006 } else if (*pos == 1) {
2007 (void) strcpy(zap.za_name, "..");
2008 zap.za_normalization_conflict = 0;
2010 } else if (*pos == 2 && zfs_show_ctldir(zp)) {
2011 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
2012 zap.za_normalization_conflict = 0;
2013 objnum = ZFSCTL_INO_ROOT;
2018 if ((error = zap_cursor_retrieve(&zc, &zap))) {
2019 if (error == ENOENT)
2025 if (zap.za_integer_length != 8 ||
2026 zap.za_num_integers != 1) {
2027 cmn_err(CE_WARN, "zap_readdir: bad directory "
2028 "entry, obj = %lld, offset = %lld\n",
2029 (u_longlong_t)zp->z_id,
2030 (u_longlong_t)*pos);
2035 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
2037 done = filldir(dirent, zap.za_name, strlen(zap.za_name),
2038 zap_cursor_serialize(&zc), objnum, 0);
2043 /* Prefetch znode */
2045 dmu_prefetch(os, objnum, 0, 0);
2049 zap_cursor_advance(&zc);
2050 *pos = zap_cursor_serialize(&zc);
2055 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
2058 zap_cursor_fini(&zc);
2059 if (error == ENOENT)
2062 ZFS_ACCESSTIME_STAMP(zsb, zp);
2063 zfs_inode_update(zp);
2070 EXPORT_SYMBOL(zfs_readdir);
2072 ulong_t zfs_fsync_sync_cnt = 4;
2075 zfs_fsync(struct inode *ip, int syncflag, cred_t *cr)
2077 znode_t *zp = ITOZ(ip);
2078 zfs_sb_t *zsb = ITOZSB(ip);
2080 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
2082 if (zsb->z_os->os_sync != ZFS_SYNC_DISABLED) {
2085 zil_commit(zsb->z_log, zp->z_id);
2090 EXPORT_SYMBOL(zfs_fsync);
2094 * Get the requested file attributes and place them in the provided
2097 * IN: ip - inode of file.
2098 * vap - va_mask identifies requested attributes.
2099 * If ATTR_XVATTR set, then optional attrs are requested
2100 * flags - ATTR_NOACLCHECK (CIFS server context)
2101 * cr - credentials of caller.
2103 * OUT: vap - attribute values.
2105 * RETURN: 0 (always succeeds)
2109 zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2111 znode_t *zp = ITOZ(ip);
2112 zfs_sb_t *zsb = ITOZSB(ip);
2115 uint64_t mtime[2], ctime[2];
2116 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2117 xoptattr_t *xoap = NULL;
2118 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2119 sa_bulk_attr_t bulk[2];
2125 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
2127 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
2128 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
2130 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
2136 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2137 * Also, if we are the owner don't bother, since owner should
2138 * always be allowed to read basic attributes of file.
2140 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
2141 (vap->va_uid != crgetuid(cr))) {
2142 if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
2150 * Return all attributes. It's cheaper to provide the answer
2151 * than to determine whether we were asked the question.
2154 mutex_enter(&zp->z_lock);
2155 vap->va_type = vn_mode_to_vtype(zp->z_mode);
2156 vap->va_mode = zp->z_mode;
2157 vap->va_fsid = ZTOI(zp)->i_sb->s_dev;
2158 vap->va_nodeid = zp->z_id;
2159 if ((zp->z_id == zsb->z_root) && zfs_show_ctldir(zp))
2160 links = zp->z_links + 1;
2162 links = zp->z_links;
2163 vap->va_nlink = MIN(links, ZFS_LINK_MAX);
2164 vap->va_size = i_size_read(ip);
2165 vap->va_rdev = ip->i_rdev;
2166 vap->va_seq = ip->i_generation;
2169 * Add in any requested optional attributes and the create time.
2170 * Also set the corresponding bits in the returned attribute bitmap.
2172 if ((xoap = xva_getxoptattr(xvap)) != NULL && zsb->z_use_fuids) {
2173 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
2175 ((zp->z_pflags & ZFS_ARCHIVE) != 0);
2176 XVA_SET_RTN(xvap, XAT_ARCHIVE);
2179 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
2180 xoap->xoa_readonly =
2181 ((zp->z_pflags & ZFS_READONLY) != 0);
2182 XVA_SET_RTN(xvap, XAT_READONLY);
2185 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
2187 ((zp->z_pflags & ZFS_SYSTEM) != 0);
2188 XVA_SET_RTN(xvap, XAT_SYSTEM);
2191 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
2193 ((zp->z_pflags & ZFS_HIDDEN) != 0);
2194 XVA_SET_RTN(xvap, XAT_HIDDEN);
2197 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2198 xoap->xoa_nounlink =
2199 ((zp->z_pflags & ZFS_NOUNLINK) != 0);
2200 XVA_SET_RTN(xvap, XAT_NOUNLINK);
2203 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2204 xoap->xoa_immutable =
2205 ((zp->z_pflags & ZFS_IMMUTABLE) != 0);
2206 XVA_SET_RTN(xvap, XAT_IMMUTABLE);
2209 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2210 xoap->xoa_appendonly =
2211 ((zp->z_pflags & ZFS_APPENDONLY) != 0);
2212 XVA_SET_RTN(xvap, XAT_APPENDONLY);
2215 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2217 ((zp->z_pflags & ZFS_NODUMP) != 0);
2218 XVA_SET_RTN(xvap, XAT_NODUMP);
2221 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
2223 ((zp->z_pflags & ZFS_OPAQUE) != 0);
2224 XVA_SET_RTN(xvap, XAT_OPAQUE);
2227 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2228 xoap->xoa_av_quarantined =
2229 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
2230 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
2233 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2234 xoap->xoa_av_modified =
2235 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
2236 XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
2239 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
2240 S_ISREG(ip->i_mode)) {
2241 zfs_sa_get_scanstamp(zp, xvap);
2244 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
2247 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zsb),
2248 times, sizeof (times));
2249 ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
2250 XVA_SET_RTN(xvap, XAT_CREATETIME);
2253 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2254 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
2255 XVA_SET_RTN(xvap, XAT_REPARSE);
2257 if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
2258 xoap->xoa_generation = zp->z_gen;
2259 XVA_SET_RTN(xvap, XAT_GEN);
2262 if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
2264 ((zp->z_pflags & ZFS_OFFLINE) != 0);
2265 XVA_SET_RTN(xvap, XAT_OFFLINE);
2268 if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
2270 ((zp->z_pflags & ZFS_SPARSE) != 0);
2271 XVA_SET_RTN(xvap, XAT_SPARSE);
2275 ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
2276 ZFS_TIME_DECODE(&vap->va_mtime, mtime);
2277 ZFS_TIME_DECODE(&vap->va_ctime, ctime);
2279 mutex_exit(&zp->z_lock);
2281 sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
2283 if (zp->z_blksz == 0) {
2285 * Block size hasn't been set; suggest maximal I/O transfers.
2287 vap->va_blksize = zsb->z_max_blksz;
2293 EXPORT_SYMBOL(zfs_getattr);
2296 * Set the file attributes to the values contained in the
2299 * IN: ip - inode of file to be modified.
2300 * vap - new attribute values.
2301 * If ATTR_XVATTR set, then optional attrs are being set
2302 * flags - ATTR_UTIME set if non-default time values provided.
2303 * - ATTR_NOACLCHECK (CIFS context only).
2304 * cr - credentials of caller.
2306 * RETURN: 0 if success
2307 * error code if failure
2310 * ip - ctime updated, mtime updated if size changed.
2314 zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
2316 znode_t *zp = ITOZ(ip);
2317 zfs_sb_t *zsb = ITOZSB(ip);
2321 xvattr_t *tmpxvattr;
2322 uint_t mask = vap->va_mask;
2326 uint64_t new_uid, new_gid;
2328 uint64_t mtime[2], ctime[2];
2330 int need_policy = FALSE;
2332 zfs_fuid_info_t *fuidp = NULL;
2333 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
2336 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
2337 boolean_t fuid_dirtied = B_FALSE;
2338 sa_bulk_attr_t *bulk, *xattr_bulk;
2339 int count = 0, xattr_count = 0;
2350 * Make sure that if we have ephemeral uid/gid or xvattr specified
2351 * that file system is at proper version level
2354 if (zsb->z_use_fuids == B_FALSE &&
2355 (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2356 ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2357 (mask & ATTR_XVATTR))) {
2362 if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
2367 if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
2373 * If this is an xvattr_t, then get a pointer to the structure of
2374 * optional attributes. If this is NULL, then we have a vattr_t.
2376 xoap = xva_getxoptattr(xvap);
2378 tmpxvattr = kmem_alloc(sizeof(xvattr_t), KM_SLEEP);
2379 xva_init(tmpxvattr);
2381 bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP);
2382 xattr_bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP);
2385 * Immutable files can only alter immutable bit and atime
2387 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2388 ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
2389 ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2394 if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
2400 * Verify timestamps doesn't overflow 32 bits.
2401 * ZFS can handle large timestamps, but 32bit syscalls can't
2402 * handle times greater than 2039. This check should be removed
2403 * once large timestamps are fully supported.
2405 if (mask & (ATTR_ATIME | ATTR_MTIME)) {
2406 if (((mask & ATTR_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2407 ((mask & ATTR_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2417 /* Can this be moved to before the top label? */
2418 if (zsb->z_vfs->mnt_flags & MNT_READONLY) {
2424 * First validate permissions
2427 if (mask & ATTR_SIZE) {
2428 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
2433 * XXX - Note, we are not providing any open
2434 * mode flags here (like FNDELAY), so we may
2435 * block if there are locks present... this
2436 * should be addressed in openat().
2438 /* XXX - would it be OK to generate a log record here? */
2439 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2443 /* Careful negative Linux return code here */
2444 err = -vmtruncate(ip, vap->va_size);
2449 if (mask & (ATTR_ATIME|ATTR_MTIME) ||
2450 ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2451 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2452 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2453 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2454 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2455 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2456 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2457 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2461 if (mask & (ATTR_UID|ATTR_GID)) {
2462 int idmask = (mask & (ATTR_UID|ATTR_GID));
2467 * NOTE: even if a new mode is being set,
2468 * we may clear S_ISUID/S_ISGID bits.
2471 if (!(mask & ATTR_MODE))
2472 vap->va_mode = zp->z_mode;
2475 * Take ownership or chgrp to group we are a member of
2478 take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
2479 take_group = (mask & ATTR_GID) &&
2480 zfs_groupmember(zsb, vap->va_gid, cr);
2483 * If both ATTR_UID and ATTR_GID are set then take_owner and
2484 * take_group must both be set in order to allow taking
2487 * Otherwise, send the check through secpolicy_vnode_setattr()
2491 if (((idmask == (ATTR_UID|ATTR_GID)) &&
2492 take_owner && take_group) ||
2493 ((idmask == ATTR_UID) && take_owner) ||
2494 ((idmask == ATTR_GID) && take_group)) {
2495 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2496 skipaclchk, cr) == 0) {
2498 * Remove setuid/setgid for non-privileged users
2500 (void) secpolicy_setid_clear(vap, cr);
2501 trim_mask = (mask & (ATTR_UID|ATTR_GID));
2510 mutex_enter(&zp->z_lock);
2511 oldva.va_mode = zp->z_mode;
2512 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2513 if (mask & ATTR_XVATTR) {
2515 * Update xvattr mask to include only those attributes
2516 * that are actually changing.
2518 * the bits will be restored prior to actually setting
2519 * the attributes so the caller thinks they were set.
2521 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2522 if (xoap->xoa_appendonly !=
2523 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2526 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2527 XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
2531 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2532 if (xoap->xoa_nounlink !=
2533 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2536 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2537 XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
2541 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2542 if (xoap->xoa_immutable !=
2543 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2546 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2547 XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
2551 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2552 if (xoap->xoa_nodump !=
2553 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2556 XVA_CLR_REQ(xvap, XAT_NODUMP);
2557 XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
2561 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2562 if (xoap->xoa_av_modified !=
2563 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2566 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2567 XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
2571 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2572 if ((!S_ISREG(ip->i_mode) &&
2573 xoap->xoa_av_quarantined) ||
2574 xoap->xoa_av_quarantined !=
2575 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2578 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2579 XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
2583 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2584 mutex_exit(&zp->z_lock);
2589 if (need_policy == FALSE &&
2590 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2591 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2596 mutex_exit(&zp->z_lock);
2598 if (mask & ATTR_MODE) {
2599 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
2600 err = secpolicy_setid_setsticky_clear(ip, vap,
2605 trim_mask |= ATTR_MODE;
2613 * If trim_mask is set then take ownership
2614 * has been granted or write_acl is present and user
2615 * has the ability to modify mode. In that case remove
2616 * UID|GID and or MODE from mask so that
2617 * secpolicy_vnode_setattr() doesn't revoke it.
2621 saved_mask = vap->va_mask;
2622 vap->va_mask &= ~trim_mask;
2624 err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
2625 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
2630 vap->va_mask |= saved_mask;
2634 * secpolicy_vnode_setattr, or take ownership may have
2637 mask = vap->va_mask;
2639 if ((mask & (ATTR_UID | ATTR_GID))) {
2640 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zsb),
2641 &xattr_obj, sizeof (xattr_obj));
2643 if (err == 0 && xattr_obj) {
2644 err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
2648 if (mask & ATTR_UID) {
2649 new_uid = zfs_fuid_create(zsb,
2650 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
2651 if (new_uid != zp->z_uid &&
2652 zfs_fuid_overquota(zsb, B_FALSE, new_uid)) {
2660 if (mask & ATTR_GID) {
2661 new_gid = zfs_fuid_create(zsb, (uint64_t)vap->va_gid,
2662 cr, ZFS_GROUP, &fuidp);
2663 if (new_gid != zp->z_gid &&
2664 zfs_fuid_overquota(zsb, B_TRUE, new_gid)) {
2672 tx = dmu_tx_create(zsb->z_os);
2674 if (mask & ATTR_MODE) {
2675 uint64_t pmode = zp->z_mode;
2677 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2679 zfs_acl_chmod_setattr(zp, &aclp, new_mode);
2681 mutex_enter(&zp->z_lock);
2682 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
2684 * Are we upgrading ACL from old V0 format
2687 if (zsb->z_version >= ZPL_VERSION_FUID &&
2688 zfs_znode_acl_version(zp) ==
2689 ZFS_ACL_VERSION_INITIAL) {
2690 dmu_tx_hold_free(tx, acl_obj, 0,
2692 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2693 0, aclp->z_acl_bytes);
2695 dmu_tx_hold_write(tx, acl_obj, 0,
2698 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2699 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2700 0, aclp->z_acl_bytes);
2702 mutex_exit(&zp->z_lock);
2703 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2705 if ((mask & ATTR_XVATTR) &&
2706 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
2707 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2709 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2713 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
2716 fuid_dirtied = zsb->z_fuid_dirty;
2718 zfs_fuid_txhold(zsb, tx);
2720 zfs_sa_upgrade_txholds(tx, zp);
2722 err = dmu_tx_assign(tx, TXG_NOWAIT);
2724 if (err == ERESTART)
2731 * Set each attribute requested.
2732 * We group settings according to the locks they need to acquire.
2734 * Note: you cannot set ctime directly, although it will be
2735 * updated as a side-effect of calling this function.
2739 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2740 mutex_enter(&zp->z_acl_lock);
2741 mutex_enter(&zp->z_lock);
2743 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zsb), NULL,
2744 &zp->z_pflags, sizeof (zp->z_pflags));
2747 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2748 mutex_enter(&attrzp->z_acl_lock);
2749 mutex_enter(&attrzp->z_lock);
2750 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2751 SA_ZPL_FLAGS(zsb), NULL, &attrzp->z_pflags,
2752 sizeof (attrzp->z_pflags));
2755 if (mask & (ATTR_UID|ATTR_GID)) {
2757 if (mask & ATTR_UID) {
2758 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zsb), NULL,
2759 &new_uid, sizeof (new_uid));
2760 zp->z_uid = new_uid;
2762 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2763 SA_ZPL_UID(zsb), NULL, &new_uid,
2765 attrzp->z_uid = new_uid;
2769 if (mask & ATTR_GID) {
2770 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zsb),
2771 NULL, &new_gid, sizeof (new_gid));
2772 zp->z_gid = new_gid;
2774 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2775 SA_ZPL_GID(zsb), NULL, &new_gid,
2777 attrzp->z_gid = new_gid;
2780 if (!(mask & ATTR_MODE)) {
2781 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb),
2782 NULL, &new_mode, sizeof (new_mode));
2783 new_mode = zp->z_mode;
2785 err = zfs_acl_chown_setattr(zp);
2788 err = zfs_acl_chown_setattr(attrzp);
2793 if (mask & ATTR_MODE) {
2794 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zsb), NULL,
2795 &new_mode, sizeof (new_mode));
2796 zp->z_mode = new_mode;
2797 ASSERT3P(aclp, !=, NULL);
2798 err = zfs_aclset_common(zp, aclp, cr, tx);
2799 ASSERT3U(err, ==, 0);
2800 if (zp->z_acl_cached)
2801 zfs_acl_free(zp->z_acl_cached);
2802 zp->z_acl_cached = aclp;
2807 if (mask & ATTR_ATIME) {
2808 ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
2809 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zsb), NULL,
2810 &zp->z_atime, sizeof (zp->z_atime));
2813 if (mask & ATTR_MTIME) {
2814 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
2815 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL,
2816 mtime, sizeof (mtime));
2819 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
2820 if (mask & ATTR_SIZE && !(mask & ATTR_MTIME)) {
2821 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb),
2822 NULL, mtime, sizeof (mtime));
2823 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
2824 &ctime, sizeof (ctime));
2825 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
2827 } else if (mask != 0) {
2828 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL,
2829 &ctime, sizeof (ctime));
2830 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime,
2833 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2834 SA_ZPL_CTIME(zsb), NULL,
2835 &ctime, sizeof (ctime));
2836 zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
2837 mtime, ctime, B_TRUE);
2841 * Do this after setting timestamps to prevent timestamp
2842 * update from toggling bit
2845 if (xoap && (mask & ATTR_XVATTR)) {
2848 * restore trimmed off masks
2849 * so that return masks can be set for caller.
2852 if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
2853 XVA_SET_REQ(xvap, XAT_APPENDONLY);
2855 if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
2856 XVA_SET_REQ(xvap, XAT_NOUNLINK);
2858 if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
2859 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
2861 if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
2862 XVA_SET_REQ(xvap, XAT_NODUMP);
2864 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
2865 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
2867 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
2868 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
2871 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
2872 ASSERT(S_ISREG(ip->i_mode));
2874 zfs_xvattr_set(zp, xvap, tx);
2878 zfs_fuid_sync(zsb, tx);
2881 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
2883 mutex_exit(&zp->z_lock);
2884 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2885 mutex_exit(&zp->z_acl_lock);
2888 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2889 mutex_exit(&attrzp->z_acl_lock);
2890 mutex_exit(&attrzp->z_lock);
2893 if (err == 0 && attrzp) {
2894 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
2905 zfs_fuid_info_free(fuidp);
2911 if (err == ERESTART)
2914 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
2916 zfs_inode_update(zp);
2920 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
2921 zil_commit(zilog, 0);
2924 kmem_free(xattr_bulk, sizeof(sa_bulk_attr_t) * 7);
2925 kmem_free(bulk, sizeof(sa_bulk_attr_t) * 7);
2926 kmem_free(tmpxvattr, sizeof(xvattr_t));
2930 EXPORT_SYMBOL(zfs_setattr);
2932 typedef struct zfs_zlock {
2933 krwlock_t *zl_rwlock; /* lock we acquired */
2934 znode_t *zl_znode; /* znode we held */
2935 struct zfs_zlock *zl_next; /* next in list */
2939 * Drop locks and release vnodes that were held by zfs_rename_lock().
2942 zfs_rename_unlock(zfs_zlock_t **zlpp)
2946 while ((zl = *zlpp) != NULL) {
2947 if (zl->zl_znode != NULL)
2948 iput(ZTOI(zl->zl_znode));
2949 rw_exit(zl->zl_rwlock);
2950 *zlpp = zl->zl_next;
2951 kmem_free(zl, sizeof (*zl));
2956 * Search back through the directory tree, using the ".." entries.
2957 * Lock each directory in the chain to prevent concurrent renames.
2958 * Fail any attempt to move a directory into one of its own descendants.
2959 * XXX - z_parent_lock can overlap with map or grow locks
2962 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
2966 uint64_t rootid = ZTOZSB(zp)->z_root;
2967 uint64_t oidp = zp->z_id;
2968 krwlock_t *rwlp = &szp->z_parent_lock;
2969 krw_t rw = RW_WRITER;
2972 * First pass write-locks szp and compares to zp->z_id.
2973 * Later passes read-lock zp and compare to zp->z_parent.
2976 if (!rw_tryenter(rwlp, rw)) {
2978 * Another thread is renaming in this path.
2979 * Note that if we are a WRITER, we don't have any
2980 * parent_locks held yet.
2982 if (rw == RW_READER && zp->z_id > szp->z_id) {
2984 * Drop our locks and restart
2986 zfs_rename_unlock(&zl);
2990 rwlp = &szp->z_parent_lock;
2995 * Wait for other thread to drop its locks
3001 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
3002 zl->zl_rwlock = rwlp;
3003 zl->zl_znode = NULL;
3004 zl->zl_next = *zlpp;
3007 if (oidp == szp->z_id) /* We're a descendant of szp */
3010 if (oidp == rootid) /* We've hit the top */
3013 if (rw == RW_READER) { /* i.e. not the first pass */
3014 int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
3019 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
3020 &oidp, sizeof (oidp));
3021 rwlp = &zp->z_parent_lock;
3024 } while (zp->z_id != sdzp->z_id);
3030 * Move an entry from the provided source directory to the target
3031 * directory. Change the entry name as indicated.
3033 * IN: sdip - Source directory containing the "old entry".
3034 * snm - Old entry name.
3035 * tdip - Target directory to contain the "new entry".
3036 * tnm - New entry name.
3037 * cr - credentials of caller.
3038 * flags - case flags
3040 * RETURN: 0 if success
3041 * error code if failure
3044 * sdip,tdip - ctime|mtime updated
3048 zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
3049 cred_t *cr, int flags)
3051 znode_t *tdzp, *szp, *tzp;
3052 znode_t *sdzp = ITOZ(sdip);
3053 zfs_sb_t *zsb = ITOZSB(sdip);
3055 zfs_dirlock_t *sdl, *tdl;
3058 int cmp, serr, terr;
3063 ZFS_VERIFY_ZP(sdzp);
3066 if (tdip->i_sb != sdip->i_sb) {
3072 ZFS_VERIFY_ZP(tdzp);
3073 if (zsb->z_utf8 && u8_validate(tnm,
3074 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3079 if (flags & FIGNORECASE)
3088 * This is to prevent the creation of links into attribute space
3089 * by renaming a linked file into/outof an attribute directory.
3090 * See the comment in zfs_link() for why this is considered bad.
3092 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
3098 * Lock source and target directory entries. To prevent deadlock,
3099 * a lock ordering must be defined. We lock the directory with
3100 * the smallest object id first, or if it's a tie, the one with
3101 * the lexically first name.
3103 if (sdzp->z_id < tdzp->z_id) {
3105 } else if (sdzp->z_id > tdzp->z_id) {
3109 * First compare the two name arguments without
3110 * considering any case folding.
3112 int nofold = (zsb->z_norm & ~U8_TEXTPREP_TOUPPER);
3114 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
3115 ASSERT(error == 0 || !zsb->z_utf8);
3118 * POSIX: "If the old argument and the new argument
3119 * both refer to links to the same existing file,
3120 * the rename() function shall return successfully
3121 * and perform no other action."
3127 * If the file system is case-folding, then we may
3128 * have some more checking to do. A case-folding file
3129 * system is either supporting mixed case sensitivity
3130 * access or is completely case-insensitive. Note
3131 * that the file system is always case preserving.
3133 * In mixed sensitivity mode case sensitive behavior
3134 * is the default. FIGNORECASE must be used to
3135 * explicitly request case insensitive behavior.
3137 * If the source and target names provided differ only
3138 * by case (e.g., a request to rename 'tim' to 'Tim'),
3139 * we will treat this as a special case in the
3140 * case-insensitive mode: as long as the source name
3141 * is an exact match, we will allow this to proceed as
3142 * a name-change request.
3144 if ((zsb->z_case == ZFS_CASE_INSENSITIVE ||
3145 (zsb->z_case == ZFS_CASE_MIXED &&
3146 flags & FIGNORECASE)) &&
3147 u8_strcmp(snm, tnm, 0, zsb->z_norm, U8_UNICODE_LATEST,
3150 * case preserving rename request, require exact
3159 * If the source and destination directories are the same, we should
3160 * grab the z_name_lock of that directory only once.
3164 rw_enter(&sdzp->z_name_lock, RW_READER);
3168 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
3169 ZEXISTS | zflg, NULL, NULL);
3170 terr = zfs_dirent_lock(&tdl,
3171 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
3173 terr = zfs_dirent_lock(&tdl,
3174 tdzp, tnm, &tzp, zflg, NULL, NULL);
3175 serr = zfs_dirent_lock(&sdl,
3176 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
3182 * Source entry invalid or not there.
3185 zfs_dirent_unlock(tdl);
3191 rw_exit(&sdzp->z_name_lock);
3193 if (strcmp(snm, "..") == 0)
3199 zfs_dirent_unlock(sdl);
3203 rw_exit(&sdzp->z_name_lock);
3205 if (strcmp(tnm, "..") == 0)
3212 * Must have write access at the source to remove the old entry
3213 * and write access at the target to create the new entry.
3214 * Note that if target and source are the same, this can be
3215 * done in a single check.
3218 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
3221 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3223 * Check to make sure rename is valid.
3224 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3226 if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
3231 * Does target exist?
3235 * Source and target must be the same type.
3237 if (S_ISDIR(ZTOI(szp)->i_mode)) {
3238 if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
3243 if (S_ISDIR(ZTOI(tzp)->i_mode)) {
3249 * POSIX dictates that when the source and target
3250 * entries refer to the same file object, rename
3251 * must do nothing and exit without error.
3253 if (szp->z_id == tzp->z_id) {
3259 tx = dmu_tx_create(zsb->z_os);
3260 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3261 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3262 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
3263 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3265 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3266 zfs_sa_upgrade_txholds(tx, tdzp);
3269 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3270 zfs_sa_upgrade_txholds(tx, tzp);
3273 zfs_sa_upgrade_txholds(tx, szp);
3274 dmu_tx_hold_zap(tx, zsb->z_unlinkedobj, FALSE, NULL);
3275 error = dmu_tx_assign(tx, TXG_NOWAIT);
3278 zfs_rename_unlock(&zl);
3279 zfs_dirent_unlock(sdl);
3280 zfs_dirent_unlock(tdl);
3283 rw_exit(&sdzp->z_name_lock);
3288 if (error == ERESTART) {
3298 if (tzp) /* Attempt to remove the existing target */
3299 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
3302 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3304 szp->z_pflags |= ZFS_AV_MODIFIED;
3306 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zsb),
3307 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3308 ASSERT3U(error, ==, 0);
3310 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3312 zfs_log_rename(zilog, tx, TX_RENAME |
3313 (flags & FIGNORECASE ? TX_CI : 0), sdzp,
3314 sdl->dl_name, tdzp, tdl->dl_name, szp);
3317 * At this point, we have successfully created
3318 * the target name, but have failed to remove
3319 * the source name. Since the create was done
3320 * with the ZRENAMING flag, there are
3321 * complications; for one, the link count is
3322 * wrong. The easiest way to deal with this
3323 * is to remove the newly created target, and
3324 * return the original error. This must
3325 * succeed; fortunately, it is very unlikely to
3326 * fail, since we just created it.
3328 VERIFY3U(zfs_link_destroy(tdl, szp, tx,
3329 ZRENAMING, NULL), ==, 0);
3337 zfs_rename_unlock(&zl);
3339 zfs_dirent_unlock(sdl);
3340 zfs_dirent_unlock(tdl);
3342 zfs_inode_update(sdzp);
3344 rw_exit(&sdzp->z_name_lock);
3347 zfs_inode_update(tdzp);
3349 zfs_inode_update(szp);
3352 zfs_inode_update(tzp);
3356 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3357 zil_commit(zilog, 0);
3362 EXPORT_SYMBOL(zfs_rename);
3365 * Insert the indicated symbolic reference entry into the directory.
3367 * IN: dip - Directory to contain new symbolic link.
3368 * link - Name for new symlink entry.
3369 * vap - Attributes of new entry.
3370 * target - Target path of new symlink.
3372 * cr - credentials of caller.
3373 * flags - case flags
3375 * RETURN: 0 if success
3376 * error code if failure
3379 * dip - ctime|mtime updated
3383 zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
3384 struct inode **ipp, cred_t *cr, int flags)
3386 znode_t *zp, *dzp = ITOZ(dip);
3389 zfs_sb_t *zsb = ITOZSB(dip);
3391 uint64_t len = strlen(link);
3394 zfs_acl_ids_t acl_ids;
3395 boolean_t fuid_dirtied;
3396 uint64_t txtype = TX_SYMLINK;
3398 ASSERT(S_ISLNK(vap->va_mode));
3404 if (zsb->z_utf8 && u8_validate(name, strlen(name),
3405 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3409 if (flags & FIGNORECASE)
3412 if (len > MAXPATHLEN) {
3414 return (ENAMETOOLONG);
3417 if ((error = zfs_acl_ids_create(dzp, 0,
3418 vap, cr, NULL, &acl_ids)) != 0) {
3426 * Attempt to lock directory; fail if entry already exists.
3428 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3430 zfs_acl_ids_free(&acl_ids);
3435 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
3436 zfs_acl_ids_free(&acl_ids);
3437 zfs_dirent_unlock(dl);
3442 if (zfs_acl_ids_overquota(zsb, &acl_ids)) {
3443 zfs_acl_ids_free(&acl_ids);
3444 zfs_dirent_unlock(dl);
3448 tx = dmu_tx_create(zsb->z_os);
3449 fuid_dirtied = zsb->z_fuid_dirty;
3450 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3451 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3452 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3453 ZFS_SA_BASE_ATTR_SIZE + len);
3454 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3455 if (!zsb->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3456 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3457 acl_ids.z_aclp->z_acl_bytes);
3460 zfs_fuid_txhold(zsb, tx);
3461 error = dmu_tx_assign(tx, TXG_NOWAIT);
3463 zfs_dirent_unlock(dl);
3464 if (error == ERESTART) {
3469 zfs_acl_ids_free(&acl_ids);
3476 * Create a new object for the symlink.
3477 * for version 4 ZPL datsets the symlink will be an SA attribute
3479 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3482 zfs_fuid_sync(zsb, tx);
3484 mutex_enter(&zp->z_lock);
3486 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zsb),
3489 zfs_sa_symlink(zp, link, len, tx);
3490 mutex_exit(&zp->z_lock);
3493 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zsb),
3494 &zp->z_size, sizeof (zp->z_size), tx);
3496 * Insert the new object into the directory.
3498 (void) zfs_link_create(dl, zp, tx, ZNEW);
3500 if (flags & FIGNORECASE)
3502 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3504 zfs_inode_update(dzp);
3505 zfs_inode_update(zp);
3507 zfs_acl_ids_free(&acl_ids);
3511 zfs_dirent_unlock(dl);
3515 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3516 zil_commit(zilog, 0);
3521 EXPORT_SYMBOL(zfs_symlink);
3524 * Return, in the buffer contained in the provided uio structure,
3525 * the symbolic path referred to by ip.
3527 * IN: ip - inode of symbolic link
3528 * uio - structure to contain the link path.
3529 * cr - credentials of caller.
3531 * RETURN: 0 if success
3532 * error code if failure
3535 * ip - atime updated
3539 zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr)
3541 znode_t *zp = ITOZ(ip);
3542 zfs_sb_t *zsb = ITOZSB(ip);
3548 mutex_enter(&zp->z_lock);
3550 error = sa_lookup_uio(zp->z_sa_hdl,
3551 SA_ZPL_SYMLINK(zsb), uio);
3553 error = zfs_sa_readlink(zp, uio);
3554 mutex_exit(&zp->z_lock);
3556 ZFS_ACCESSTIME_STAMP(zsb, zp);
3557 zfs_inode_update(zp);
3561 EXPORT_SYMBOL(zfs_readlink);
3564 * Insert a new entry into directory tdip referencing sip.
3566 * IN: tdip - Directory to contain new entry.
3567 * sip - inode of new entry.
3568 * name - name of new entry.
3569 * cr - credentials of caller.
3571 * RETURN: 0 if success
3572 * error code if failure
3575 * tdip - ctime|mtime updated
3576 * sip - ctime updated
3580 zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr)
3582 znode_t *dzp = ITOZ(tdip);
3584 zfs_sb_t *zsb = ITOZSB(tdip);
3593 ASSERT(S_ISDIR(tdip->i_mode));
3600 * POSIX dictates that we return EPERM here.
3601 * Better choices include ENOTSUP or EISDIR.
3603 if (S_ISDIR(sip->i_mode)) {
3608 if (sip->i_sb != tdip->i_sb) {
3616 /* Prevent links to .zfs/shares files */
3618 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zsb),
3619 &parent, sizeof (uint64_t))) != 0) {
3623 if (parent == zsb->z_shares_dir) {
3628 if (zsb->z_utf8 && u8_validate(name,
3629 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3633 #ifdef HAVE_PN_UTILS
3634 if (flags & FIGNORECASE)
3636 #endif /* HAVE_PN_UTILS */
3639 * We do not support links between attributes and non-attributes
3640 * because of the potential security risk of creating links
3641 * into "normal" file space in order to circumvent restrictions
3642 * imposed in attribute space.
3644 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
3649 owner = zfs_fuid_map_id(zsb, szp->z_uid, cr, ZFS_OWNER);
3650 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
3655 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
3662 * Attempt to lock directory; fail if entry already exists.
3664 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
3670 tx = dmu_tx_create(zsb->z_os);
3671 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3672 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3673 zfs_sa_upgrade_txholds(tx, szp);
3674 zfs_sa_upgrade_txholds(tx, dzp);
3675 error = dmu_tx_assign(tx, TXG_NOWAIT);
3677 zfs_dirent_unlock(dl);
3678 if (error == ERESTART) {
3688 error = zfs_link_create(dl, szp, tx, 0);
3691 uint64_t txtype = TX_LINK;
3692 #ifdef HAVE_PN_UTILS
3693 if (flags & FIGNORECASE)
3695 #endif /* HAVE_PN_UTILS */
3696 zfs_log_link(zilog, tx, txtype, dzp, szp, name);
3701 zfs_dirent_unlock(dl);
3703 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
3704 zil_commit(zilog, 0);
3706 zfs_inode_update(dzp);
3707 zfs_inode_update(szp);
3711 EXPORT_SYMBOL(zfs_link);
3715 * zfs_null_putapage() is used when the file system has been force
3716 * unmounted. It just drops the pages.
3720 zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
3721 size_t *lenp, int flags, cred_t *cr)
3723 pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR);
3728 * Push a page out to disk, klustering if possible.
3730 * IN: vp - file to push page to.
3731 * pp - page to push.
3732 * flags - additional flags.
3733 * cr - credentials of caller.
3735 * OUT: offp - start of range pushed.
3736 * lenp - len of range pushed.
3738 * RETURN: 0 if success
3739 * error code if failure
3741 * NOTE: callers must have locked the page to be pushed. On
3742 * exit, the page (and all other pages in the kluster) must be
3747 zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
3748 size_t *lenp, int flags, cred_t *cr)
3750 znode_t *zp = VTOZ(vp);
3751 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3753 u_offset_t off, koff;
3760 * If our blocksize is bigger than the page size, try to kluster
3761 * multiple pages so that we write a full block (thus avoiding
3762 * a read-modify-write).
3764 if (off < zp->z_size && zp->z_blksz > PAGESIZE) {
3765 klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE);
3766 koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0;
3767 ASSERT(koff <= zp->z_size);
3768 if (koff + klen > zp->z_size)
3769 klen = P2ROUNDUP(zp->z_size - koff, (uint64_t)PAGESIZE);
3770 pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags);
3772 ASSERT3U(btop(len), ==, btopr(len));
3775 * Can't push pages past end-of-file.
3777 if (off >= zp->z_size) {
3778 /* ignore all pages */
3781 } else if (off + len > zp->z_size) {
3782 int npages = btopr(zp->z_size - off);
3785 page_list_break(&pp, &trunc, npages);
3786 /* ignore pages past end of file */
3788 pvn_write_done(trunc, flags);
3789 len = zp->z_size - off;
3792 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
3793 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
3798 tx = dmu_tx_create(zfsvfs->z_os);
3799 dmu_tx_hold_write(tx, zp->z_id, off, len);
3801 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3802 zfs_sa_upgrade_txholds(tx, zp);
3803 err = dmu_tx_assign(tx, TXG_NOWAIT);
3805 if (err == ERESTART) {
3814 if (zp->z_blksz <= PAGESIZE) {
3815 caddr_t va = zfs_map_page(pp, S_READ);
3816 ASSERT3U(len, <=, PAGESIZE);
3817 dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx);
3818 zfs_unmap_page(pp, va);
3820 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx);
3824 uint64_t mtime[2], ctime[2];
3825 sa_bulk_attr_t bulk[3];
3828 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
3830 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
3832 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
3834 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime,
3836 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0);
3841 pvn_write_done(pp, (err ? B_ERROR : 0) | flags);
3851 * Copy the portion of the file indicated from pages into the file.
3852 * The pages are stored in a page list attached to the files vnode.
3854 * IN: vp - vnode of file to push page data to.
3855 * off - position in file to put data.
3856 * len - amount of data to write.
3857 * flags - flags to control the operation.
3858 * cr - credentials of caller.
3859 * ct - caller context.
3861 * RETURN: 0 if success
3862 * error code if failure
3865 * vp - ctime|mtime updated
3869 zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr)
3871 znode_t *zp = VTOZ(vp);
3872 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
3884 * Align this request to the file block size in case we kluster.
3885 * XXX - this can result in pretty aggresive locking, which can
3886 * impact simultanious read/write access. One option might be
3887 * to break up long requests (len == 0) into block-by-block
3888 * operations to get narrower locking.
3890 blksz = zp->z_blksz;
3892 io_off = P2ALIGN_TYPED(off, blksz, u_offset_t);
3895 if (len > 0 && ISP2(blksz))
3896 io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t);
3902 * Search the entire vp list for pages >= io_off.
3904 rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER);
3905 error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr);
3908 rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER);
3910 if (off > zp->z_size) {
3911 /* past end of file */
3912 zfs_range_unlock(rl);
3917 len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off);
3919 for (off = io_off; io_off < off + len; io_off += io_len) {
3920 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
3921 pp = page_lookup(vp, io_off,
3922 (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED);
3924 pp = page_lookup_nowait(vp, io_off,
3925 (flags & B_FREE) ? SE_EXCL : SE_SHARED);
3928 if (pp != NULL && pvn_getdirty(pp, flags)) {
3932 * Found a dirty page to push
3934 err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr);
3942 zfs_range_unlock(rl);
3943 if ((flags & B_ASYNC) == 0 || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3944 zil_commit(zfsvfs->z_log, zp->z_id);
3948 #endif /* HAVE_MMAP */
3952 zfs_inactive(struct inode *ip)
3954 znode_t *zp = ITOZ(ip);
3955 zfs_sb_t *zsb = ITOZSB(ip);
3958 #ifdef HAVE_SNAPSHOT
3959 /* Early return for snapshot inode? */
3960 #endif /* HAVE_SNAPSHOT */
3962 rw_enter(&zsb->z_teardown_inactive_lock, RW_READER);
3963 if (zp->z_sa_hdl == NULL) {
3964 rw_exit(&zsb->z_teardown_inactive_lock);
3968 if (zp->z_atime_dirty && zp->z_unlinked == 0) {
3969 dmu_tx_t *tx = dmu_tx_create(zsb->z_os);
3971 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3972 zfs_sa_upgrade_txholds(tx, zp);
3973 error = dmu_tx_assign(tx, TXG_WAIT);
3977 mutex_enter(&zp->z_lock);
3978 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zsb),
3979 (void *)&zp->z_atime, sizeof (zp->z_atime), tx);
3980 zp->z_atime_dirty = 0;
3981 mutex_exit(&zp->z_lock);
3987 rw_exit(&zsb->z_teardown_inactive_lock);
3989 EXPORT_SYMBOL(zfs_inactive);
3992 * Bounds-check the seek operation.
3994 * IN: ip - inode seeking within
3995 * ooff - old file offset
3996 * noffp - pointer to new file offset
3997 * ct - caller context
3999 * RETURN: 0 if success
4000 * EINVAL if new offset invalid
4004 zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp)
4006 if (S_ISDIR(ip->i_mode))
4008 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4010 EXPORT_SYMBOL(zfs_seek);
4014 * Pre-filter the generic locking function to trap attempts to place
4015 * a mandatory lock on a memory mapped file.
4018 zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
4019 flk_callback_t *flk_cbp, cred_t *cr)
4021 znode_t *zp = VTOZ(vp);
4022 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4028 * We are following the UFS semantics with respect to mapcnt
4029 * here: If we see that the file is mapped already, then we will
4030 * return an error, but we don't worry about races between this
4031 * function and zfs_map().
4033 if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) {
4038 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4042 * If we can't find a page in the cache, we will create a new page
4043 * and fill it with file data. For efficiency, we may try to fill
4044 * multiple pages at once (klustering) to fill up the supplied page
4045 * list. Note that the pages to be filled are held with an exclusive
4046 * lock to prevent access by other threads while they are being filled.
4049 zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg,
4050 caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw)
4052 znode_t *zp = VTOZ(vp);
4053 page_t *pp, *cur_pp;
4054 objset_t *os = zp->z_zfsvfs->z_os;
4055 u_offset_t io_off, total;
4059 if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) {
4061 * We only have a single page, don't bother klustering
4065 pp = page_create_va(vp, io_off, io_len,
4066 PG_EXCL | PG_WAIT, seg, addr);
4069 * Try to find enough pages to fill the page list
4071 pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4072 &io_len, off, plsz, 0);
4076 * The page already exists, nothing to do here.
4083 * Fill the pages in the kluster.
4086 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
4089 ASSERT3U(io_off, ==, cur_pp->p_offset);
4090 va = zfs_map_page(cur_pp, S_WRITE);
4091 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
4093 zfs_unmap_page(cur_pp, va);
4095 /* On error, toss the entire kluster */
4096 pvn_read_done(pp, B_ERROR);
4097 /* convert checksum errors into IO errors */
4102 cur_pp = cur_pp->p_next;
4106 * Fill in the page list array from the kluster starting
4107 * from the desired offset `off'.
4108 * NOTE: the page list will always be null terminated.
4110 pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4111 ASSERT(pl == NULL || (*pl)->p_offset == off);
4117 * Return pointers to the pages for the file region [off, off + len]
4118 * in the pl array. If plsz is greater than len, this function may
4119 * also return page pointers from after the specified region
4120 * (i.e. the region [off, off + plsz]). These additional pages are
4121 * only returned if they are already in the cache, or were created as
4122 * part of a klustered read.
4124 * IN: vp - vnode of file to get data from.
4125 * off - position in file to get data from.
4126 * len - amount of data to retrieve.
4127 * plsz - length of provided page list.
4128 * seg - segment to obtain pages for.
4129 * addr - virtual address of fault.
4130 * rw - mode of created pages.
4131 * cr - credentials of caller.
4132 * ct - caller context.
4134 * OUT: protp - protection mode of created pages.
4135 * pl - list of pages created.
4137 * RETURN: 0 if success
4138 * error code if failure
4141 * vp - atime updated
4145 zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
4146 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
4147 enum seg_rw rw, cred_t *cr)
4149 znode_t *zp = VTOZ(vp);
4150 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4154 /* we do our own caching, faultahead is unnecessary */
4157 else if (len > plsz)
4160 len = P2ROUNDUP(len, PAGESIZE);
4161 ASSERT(plsz >= len);
4170 * Loop through the requested range [off, off + len) looking
4171 * for pages. If we don't find a page, we will need to create
4172 * a new page and fill it with data from the file.
4175 if (*pl = page_lookup(vp, off, SE_SHARED))
4177 else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw))
4180 ASSERT3U((*pl)->p_offset, ==, off);
4184 ASSERT3U(len, >=, PAGESIZE);
4187 ASSERT3U(plsz, >=, PAGESIZE);
4194 * Fill out the page array with any pages already in the cache.
4197 (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) {
4204 * Release any pages we have previously locked.
4209 ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
4219 * Request a memory map for a section of a file. This code interacts
4220 * with common code and the VM system as follows:
4222 * common code calls mmap(), which ends up in smmap_common()
4224 * this calls VOP_MAP(), which takes you into (say) zfs
4226 * zfs_map() calls as_map(), passing segvn_create() as the callback
4228 * segvn_create() creates the new segment and calls VOP_ADDMAP()
4230 * zfs_addmap() updates z_mapcnt
4234 zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4235 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr)
4237 znode_t *zp = VTOZ(vp);
4238 zfsvfs_t *zfsvfs = zp->z_zfsvfs;
4239 segvn_crargs_t vn_a;
4245 if ((prot & PROT_WRITE) && (zp->z_pflags &
4246 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4251 if ((prot & (PROT_READ | PROT_EXEC)) &&
4252 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4257 if (vp->v_flag & VNOMAP) {
4262 if (off < 0 || len > MAXOFFSET_T - off) {
4267 if (vp->v_type != VREG) {
4273 * If file is locked, disallow mapping.
4275 if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) {
4281 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4289 vn_a.offset = (u_offset_t)off;
4290 vn_a.type = flags & MAP_TYPE;
4292 vn_a.maxprot = maxprot;
4295 vn_a.flags = flags & ~MAP_TYPE;
4297 vn_a.lgrp_mem_policy_flags = 0;
4299 error = as_map(as, *addrp, len, segvn_create, &vn_a);
4308 zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4309 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr)
4311 uint64_t pages = btopr(len);
4313 atomic_add_64(&VTOZ(vp)->z_mapcnt, pages);
4318 * The reason we push dirty pages as part of zfs_delmap() is so that we get a
4319 * more accurate mtime for the associated file. Since we don't have a way of
4320 * detecting when the data was actually modified, we have to resort to
4321 * heuristics. If an explicit msync() is done, then we mark the mtime when the
4322 * last page is pushed. The problem occurs when the msync() call is omitted,
4323 * which by far the most common case:
4331 * putpage() via fsflush
4333 * If we wait until fsflush to come along, we can have a modification time that
4334 * is some arbitrary point in the future. In order to prevent this in the
4335 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is
4340 zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4341 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr)
4343 uint64_t pages = btopr(len);
4345 ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages);
4346 atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages);
4348 if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
4349 vn_has_cached_data(vp))
4350 (void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct);
4354 #endif /* HAVE_MMAP */
4357 * convoff - converts the given data (start, whence) to the
4361 convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
4366 if ((lckdat->l_whence == 2) || (whence == 2)) {
4367 if ((error = zfs_getattr(ip, &vap, 0, CRED()) != 0))
4371 switch (lckdat->l_whence) {
4373 lckdat->l_start += offset;
4376 lckdat->l_start += vap.va_size;
4384 if (lckdat->l_start < 0)
4389 lckdat->l_start -= offset;
4392 lckdat->l_start -= vap.va_size;
4400 lckdat->l_whence = (short)whence;
4405 * Free or allocate space in a file. Currently, this function only
4406 * supports the `F_FREESP' command. However, this command is somewhat
4407 * misnamed, as its functionality includes the ability to allocate as
4408 * well as free space.
4410 * IN: ip - inode of file to free data in.
4411 * cmd - action to take (only F_FREESP supported).
4412 * bfp - section of file to free/alloc.
4413 * flag - current file open mode flags.
4414 * offset - current file offset.
4415 * cr - credentials of caller [UNUSED].
4417 * RETURN: 0 if success
4418 * error code if failure
4421 * ip - ctime|mtime updated
4425 zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
4426 offset_t offset, cred_t *cr)
4428 znode_t *zp = ITOZ(ip);
4429 zfs_sb_t *zsb = ITOZSB(ip);
4436 if (cmd != F_FREESP) {
4441 if ((error = convoff(ip, bfp, 0, offset))) {
4446 if (bfp->l_len < 0) {
4452 len = bfp->l_len; /* 0 means from off to end of file */
4454 error = zfs_freesp(zp, off, len, flag, TRUE);
4459 EXPORT_SYMBOL(zfs_space);
4463 zfs_fid(struct inode *ip, fid_t *fidp)
4465 znode_t *zp = ITOZ(ip);
4466 zfs_sb_t *zsb = ITOZSB(ip);
4469 uint64_t object = zp->z_id;
4476 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zsb),
4477 &gen64, sizeof (uint64_t))) != 0) {
4482 gen = (uint32_t)gen64;
4484 size = (zsb->z_parent != zsb) ? LONG_FID_LEN : SHORT_FID_LEN;
4485 if (fidp->fid_len < size) {
4486 fidp->fid_len = size;
4491 zfid = (zfid_short_t *)fidp;
4493 zfid->zf_len = size;
4495 for (i = 0; i < sizeof (zfid->zf_object); i++)
4496 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4498 /* Must have a non-zero generation number to distinguish from .zfs */
4501 for (i = 0; i < sizeof (zfid->zf_gen); i++)
4502 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4504 if (size == LONG_FID_LEN) {
4505 uint64_t objsetid = dmu_objset_id(zsb->z_os);
4508 zlfid = (zfid_long_t *)fidp;
4510 for (i = 0; i < sizeof (zlfid->zf_setid); i++)
4511 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
4513 /* XXX - this should be the generation number for the objset */
4514 for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
4515 zlfid->zf_setgen[i] = 0;
4521 EXPORT_SYMBOL(zfs_fid);
4525 zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
4527 znode_t *zp = ITOZ(ip);
4528 zfs_sb_t *zsb = ITOZSB(ip);
4530 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4534 error = zfs_getacl(zp, vsecp, skipaclchk, cr);
4539 EXPORT_SYMBOL(zfs_getsecattr);
4543 zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
4545 znode_t *zp = ITOZ(ip);
4546 zfs_sb_t *zsb = ITOZSB(ip);
4548 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
4549 zilog_t *zilog = zsb->z_log;
4554 error = zfs_setacl(zp, vsecp, skipaclchk, cr);
4556 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
4557 zil_commit(zilog, 0);
4562 EXPORT_SYMBOL(zfs_setsecattr);
4564 #ifdef HAVE_UIO_ZEROCOPY
4566 * Tunable, both must be a power of 2.
4568 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
4569 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
4570 * an arcbuf for a partial block read
4572 int zcr_blksz_min = (1 << 10); /* 1K */
4573 int zcr_blksz_max = (1 << 17); /* 128K */
4577 zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
4579 znode_t *zp = ITOZ(ip);
4580 zfs_sb_t *zsb = ITOZSB(ip);
4581 int max_blksz = zsb->z_max_blksz;
4582 uio_t *uio = &xuio->xu_uio;
4583 ssize_t size = uio->uio_resid;
4584 offset_t offset = uio->uio_loffset;
4589 int preamble, postamble;
4591 if (xuio->xu_type != UIOTYPE_ZEROCOPY)
4599 * Loan out an arc_buf for write if write size is bigger than
4600 * max_blksz, and the file's block size is also max_blksz.
4603 if (size < blksz || zp->z_blksz != blksz) {
4608 * Caller requests buffers for write before knowing where the
4609 * write offset might be (e.g. NFS TCP write).
4614 preamble = P2PHASE(offset, blksz);
4616 preamble = blksz - preamble;
4621 postamble = P2PHASE(size, blksz);
4624 fullblk = size / blksz;
4625 (void) dmu_xuio_init(xuio,
4626 (preamble != 0) + fullblk + (postamble != 0));
4629 * Have to fix iov base/len for partial buffers. They
4630 * currently represent full arc_buf's.
4633 /* data begins in the middle of the arc_buf */
4634 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4637 (void) dmu_xuio_add(xuio, abuf,
4638 blksz - preamble, preamble);
4641 for (i = 0; i < fullblk; i++) {
4642 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4645 (void) dmu_xuio_add(xuio, abuf, 0, blksz);
4649 /* data ends in the middle of the arc_buf */
4650 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
4653 (void) dmu_xuio_add(xuio, abuf, 0, postamble);
4658 * Loan out an arc_buf for read if the read size is larger than
4659 * the current file block size. Block alignment is not
4660 * considered. Partial arc_buf will be loaned out for read.
4662 blksz = zp->z_blksz;
4663 if (blksz < zcr_blksz_min)
4664 blksz = zcr_blksz_min;
4665 if (blksz > zcr_blksz_max)
4666 blksz = zcr_blksz_max;
4667 /* avoid potential complexity of dealing with it */
4668 if (blksz > max_blksz) {
4673 maxsize = zp->z_size - uio->uio_loffset;
4687 uio->uio_extflg = UIO_XUIO;
4688 XUIO_XUZC_RW(xuio) = ioflag;
4695 zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
4699 int ioflag = XUIO_XUZC_RW(xuio);
4701 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
4703 i = dmu_xuio_cnt(xuio);
4705 abuf = dmu_xuio_arcbuf(xuio, i);
4707 * if abuf == NULL, it must be a write buffer
4708 * that has been returned in zfs_write().
4711 dmu_return_arcbuf(abuf);
4712 ASSERT(abuf || ioflag == UIO_WRITE);
4715 dmu_xuio_fini(xuio);
4718 #endif /* HAVE_UIO_ZEROCOPY */