4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
23 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
28 #include <linux/compat.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/zfs_vfsops.h>
33 #include <sys/zfs_vnops.h>
34 #include <sys/zfs_znode.h>
35 #include <sys/zfs_project.h>
39 zpl_open(struct inode *ip, struct file *filp)
43 fstrans_cookie_t cookie;
45 error = generic_file_open(ip, filp);
50 cookie = spl_fstrans_mark();
51 error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
52 spl_fstrans_unmark(cookie);
54 ASSERT3S(error, <=, 0);
60 zpl_release(struct inode *ip, struct file *filp)
64 fstrans_cookie_t cookie;
66 cookie = spl_fstrans_mark();
67 if (ITOZ(ip)->z_atime_dirty)
68 zfs_mark_inode_dirty(ip);
71 error = -zfs_close(ip, filp->f_flags, cr);
72 spl_fstrans_unmark(cookie);
74 ASSERT3S(error, <=, 0);
80 zpl_iterate(struct file *filp, zpl_dir_context_t *ctx)
84 fstrans_cookie_t cookie;
87 cookie = spl_fstrans_mark();
88 error = -zfs_readdir(file_inode(filp), ctx, cr);
89 spl_fstrans_unmark(cookie);
91 ASSERT3S(error, <=, 0);
96 #if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
98 zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
100 zpl_dir_context_t ctx =
101 ZPL_DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
104 error = zpl_iterate(filp, &ctx);
105 filp->f_pos = ctx.pos;
109 #endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
111 #if defined(HAVE_FSYNC_WITH_DENTRY)
113 * Linux 2.6.x - 2.6.34 API,
114 * Through 2.6.34 the nfsd kernel server would pass a NULL 'file struct *'
115 * to the fops->fsync() hook. For this reason, we must be careful not to
116 * use filp unconditionally.
119 zpl_fsync(struct file *filp, struct dentry *dentry, int datasync)
123 fstrans_cookie_t cookie;
126 cookie = spl_fstrans_mark();
127 error = -zfs_fsync(dentry->d_inode, datasync, cr);
128 spl_fstrans_unmark(cookie);
130 ASSERT3S(error, <=, 0);
135 #ifdef HAVE_FILE_AIO_FSYNC
137 zpl_aio_fsync(struct kiocb *kiocb, int datasync)
139 struct file *filp = kiocb->ki_filp;
140 return (zpl_fsync(filp, file_dentry(filp), datasync));
144 #elif defined(HAVE_FSYNC_WITHOUT_DENTRY)
146 * Linux 2.6.35 - 3.0 API,
147 * As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
148 * redundant. The dentry is still accessible via filp->f_path.dentry,
149 * and we are guaranteed that filp will never be NULL.
152 zpl_fsync(struct file *filp, int datasync)
154 struct inode *inode = filp->f_mapping->host;
157 fstrans_cookie_t cookie;
160 cookie = spl_fstrans_mark();
161 error = -zfs_fsync(inode, datasync, cr);
162 spl_fstrans_unmark(cookie);
164 ASSERT3S(error, <=, 0);
169 #ifdef HAVE_FILE_AIO_FSYNC
171 zpl_aio_fsync(struct kiocb *kiocb, int datasync)
173 return (zpl_fsync(kiocb->ki_filp, datasync));
177 #elif defined(HAVE_FSYNC_RANGE)
179 * Linux 3.1 - 3.x API,
180 * As of 3.1 the responsibility to call filemap_write_and_wait_range() has
181 * been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
182 * lock is no longer held by the caller, for zfs we don't require the lock
183 * to be held so we don't acquire it.
186 zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
188 struct inode *inode = filp->f_mapping->host;
191 fstrans_cookie_t cookie;
193 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
198 cookie = spl_fstrans_mark();
199 error = -zfs_fsync(inode, datasync, cr);
200 spl_fstrans_unmark(cookie);
202 ASSERT3S(error, <=, 0);
207 #ifdef HAVE_FILE_AIO_FSYNC
209 zpl_aio_fsync(struct kiocb *kiocb, int datasync)
211 return (zpl_fsync(kiocb->ki_filp, kiocb->ki_pos, -1, datasync));
216 #error "Unsupported fops->fsync() implementation"
220 zpl_read_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
221 unsigned long nr_segs, loff_t *ppos, uio_seg_t segment, int flags,
222 cred_t *cr, size_t skip)
227 fstrans_cookie_t cookie;
231 uio.uio_resid = count;
232 uio.uio_iovcnt = nr_segs;
233 uio.uio_loffset = *ppos;
234 uio.uio_limit = MAXOFFSET_T;
235 uio.uio_segflg = segment;
237 cookie = spl_fstrans_mark();
238 error = -zfs_read(ip, &uio, flags, cr);
239 spl_fstrans_unmark(cookie);
243 read = count - uio.uio_resid;
250 zpl_read_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
251 uio_seg_t segment, int flags, cred_t *cr)
255 iov.iov_base = (void *)buf;
258 return (zpl_read_common_iovec(ip, &iov, len, 1, ppos, segment,
263 zpl_iter_read_common(struct kiocb *kiocb, const struct iovec *iovp,
264 unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip)
267 struct file *filp = kiocb->ki_filp;
271 read = zpl_read_common_iovec(filp->f_mapping->host, iovp, count,
272 nr_segs, &kiocb->ki_pos, seg, filp->f_flags, cr, skip);
279 #if defined(HAVE_VFS_RW_ITERATE)
281 zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
284 uio_seg_t seg = UIO_USERSPACE;
285 if (to->type & ITER_KVEC)
287 if (to->type & ITER_BVEC)
289 ret = zpl_iter_read_common(kiocb, to->iov, to->nr_segs,
290 iov_iter_count(to), seg, to->iov_offset);
292 iov_iter_advance(to, ret);
297 zpl_aio_read(struct kiocb *kiocb, const struct iovec *iovp,
298 unsigned long nr_segs, loff_t pos)
303 ret = generic_segment_checks(iovp, &nr_segs, &count, VERIFY_WRITE);
307 return (zpl_iter_read_common(kiocb, iovp, nr_segs, count,
310 #endif /* HAVE_VFS_RW_ITERATE */
313 zpl_write_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
314 unsigned long nr_segs, loff_t *ppos, uio_seg_t segment, int flags,
315 cred_t *cr, size_t skip)
320 fstrans_cookie_t cookie;
322 if (flags & O_APPEND)
323 *ppos = i_size_read(ip);
327 uio.uio_resid = count;
328 uio.uio_iovcnt = nr_segs;
329 uio.uio_loffset = *ppos;
330 uio.uio_limit = MAXOFFSET_T;
331 uio.uio_segflg = segment;
333 cookie = spl_fstrans_mark();
334 error = -zfs_write(ip, &uio, flags, cr);
335 spl_fstrans_unmark(cookie);
339 wrote = count - uio.uio_resid;
346 zpl_write_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
347 uio_seg_t segment, int flags, cred_t *cr)
351 iov.iov_base = (void *)buf;
354 return (zpl_write_common_iovec(ip, &iov, len, 1, ppos, segment,
359 zpl_iter_write_common(struct kiocb *kiocb, const struct iovec *iovp,
360 unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip)
363 struct file *filp = kiocb->ki_filp;
367 wrote = zpl_write_common_iovec(filp->f_mapping->host, iovp, count,
368 nr_segs, &kiocb->ki_pos, seg, filp->f_flags, cr, skip);
374 #if defined(HAVE_VFS_RW_ITERATE)
376 zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
380 uio_seg_t seg = UIO_USERSPACE;
382 #ifndef HAVE_GENERIC_WRITE_CHECKS_KIOCB
383 struct file *file = kiocb->ki_filp;
384 struct address_space *mapping = file->f_mapping;
385 struct inode *ip = mapping->host;
386 int isblk = S_ISBLK(ip->i_mode);
388 count = iov_iter_count(from);
389 ret = generic_write_checks(file, &kiocb->ki_pos, &count, isblk);
394 * XXX - ideally this check should be in the same lock region with
395 * write operations, so that there's no TOCTTOU race when doing
396 * append and someone else grow the file.
398 ret = generic_write_checks(kiocb, from);
404 if (from->type & ITER_KVEC)
406 if (from->type & ITER_BVEC)
409 ret = zpl_iter_write_common(kiocb, from->iov, from->nr_segs,
410 count, seg, from->iov_offset);
412 iov_iter_advance(from, ret);
418 zpl_aio_write(struct kiocb *kiocb, const struct iovec *iovp,
419 unsigned long nr_segs, loff_t pos)
421 struct file *file = kiocb->ki_filp;
422 struct address_space *mapping = file->f_mapping;
423 struct inode *ip = mapping->host;
424 int isblk = S_ISBLK(ip->i_mode);
428 ret = generic_segment_checks(iovp, &nr_segs, &count, VERIFY_READ);
432 ret = generic_write_checks(file, &pos, &count, isblk);
436 return (zpl_iter_write_common(kiocb, iovp, nr_segs, count,
439 #endif /* HAVE_VFS_RW_ITERATE */
441 #if defined(HAVE_VFS_RW_ITERATE)
443 zpl_direct_IO_impl(int rw, struct kiocb *kiocb, struct iov_iter *iter)
446 return (zpl_iter_write(kiocb, iter));
448 return (zpl_iter_read(kiocb, iter));
450 #if defined(HAVE_VFS_DIRECT_IO_ITER)
452 zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter)
454 return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
456 #elif defined(HAVE_VFS_DIRECT_IO_ITER_OFFSET)
458 zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
460 ASSERT3S(pos, ==, kiocb->ki_pos);
461 return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
463 #elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
465 zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
467 ASSERT3S(pos, ==, kiocb->ki_pos);
468 return (zpl_direct_IO_impl(rw, kiocb, iter));
471 #error "Unknown direct IO interface"
476 #if defined(HAVE_VFS_DIRECT_IO_IOVEC)
478 zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iovp,
479 loff_t pos, unsigned long nr_segs)
482 return (zpl_aio_write(kiocb, iovp, nr_segs, pos));
484 return (zpl_aio_read(kiocb, iovp, nr_segs, pos));
487 #error "Unknown direct IO interface"
490 #endif /* HAVE_VFS_RW_ITERATE */
493 zpl_llseek(struct file *filp, loff_t offset, int whence)
495 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
496 fstrans_cookie_t cookie;
498 if (whence == SEEK_DATA || whence == SEEK_HOLE) {
499 struct inode *ip = filp->f_mapping->host;
500 loff_t maxbytes = ip->i_sb->s_maxbytes;
503 spl_inode_lock_shared(ip);
504 cookie = spl_fstrans_mark();
505 error = -zfs_holey(ip, whence, &offset);
506 spl_fstrans_unmark(cookie);
508 error = lseek_execute(filp, ip, offset, maxbytes);
509 spl_inode_unlock_shared(ip);
513 #endif /* SEEK_HOLE && SEEK_DATA */
515 return (generic_file_llseek(filp, offset, whence));
519 * It's worth taking a moment to describe how mmap is implemented
520 * for zfs because it differs considerably from other Linux filesystems.
521 * However, this issue is handled the same way under OpenSolaris.
523 * The issue is that by design zfs bypasses the Linux page cache and
524 * leaves all caching up to the ARC. This has been shown to work
525 * well for the common read(2)/write(2) case. However, mmap(2)
526 * is problem because it relies on being tightly integrated with the
527 * page cache. To handle this we cache mmap'ed files twice, once in
528 * the ARC and a second time in the page cache. The code is careful
529 * to keep both copies synchronized.
531 * When a file with an mmap'ed region is written to using write(2)
532 * both the data in the ARC and existing pages in the page cache
533 * are updated. For a read(2) data will be read first from the page
534 * cache then the ARC if needed. Neither a write(2) or read(2) will
535 * will ever result in new pages being added to the page cache.
537 * New pages are added to the page cache only via .readpage() which
538 * is called when the vfs needs to read a page off disk to back the
539 * virtual memory region. These pages may be modified without
540 * notifying the ARC and will be written out periodically via
541 * .writepage(). This will occur due to either a sync or the usual
542 * page aging behavior. Note because a read(2) of a mmap'ed file
543 * will always check the page cache first even when the ARC is out
544 * of date correct data will still be returned.
546 * While this implementation ensures correct behavior it does have
547 * have some drawbacks. The most obvious of which is that it
548 * increases the required memory footprint when access mmap'ed
549 * files. It also adds additional complexity to the code keeping
550 * both caches synchronized.
552 * Longer term it may be possible to cleanly resolve this wart by
553 * mapping page cache pages directly on to the ARC buffers. The
554 * Linux address space operations are flexible enough to allow
555 * selection of which pages back a particular index. The trick
556 * would be working out the details of which subsystem is in
557 * charge, the ARC, the page cache, or both. It may also prove
558 * helpful to move the ARC buffers to a scatter-gather lists
559 * rather than a vmalloc'ed region.
562 zpl_mmap(struct file *filp, struct vm_area_struct *vma)
564 struct inode *ip = filp->f_mapping->host;
565 znode_t *zp = ITOZ(ip);
567 fstrans_cookie_t cookie;
569 cookie = spl_fstrans_mark();
570 error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
571 (size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
572 spl_fstrans_unmark(cookie);
576 error = generic_file_mmap(filp, vma);
580 mutex_enter(&zp->z_lock);
581 zp->z_is_mapped = B_TRUE;
582 mutex_exit(&zp->z_lock);
588 * Populate a page with data for the Linux page cache. This function is
589 * only used to support mmap(2). There will be an identical copy of the
590 * data in the ARC which is kept up to date via .write() and .writepage().
592 * Current this function relies on zpl_read_common() and the O_DIRECT
593 * flag to read in a page. This works but the more correct way is to
594 * update zfs_fillpage() to be Linux friendly and use that interface.
597 zpl_readpage(struct file *filp, struct page *pp)
602 fstrans_cookie_t cookie;
604 ASSERT(PageLocked(pp));
605 ip = pp->mapping->host;
608 cookie = spl_fstrans_mark();
609 error = -zfs_getpage(ip, pl, 1);
610 spl_fstrans_unmark(cookie);
614 ClearPageUptodate(pp);
618 flush_dcache_page(pp);
626 * Populate a set of pages with data for the Linux page cache. This
627 * function will only be called for read ahead and never for demand
628 * paging. For simplicity, the code relies on read_cache_pages() to
629 * correctly lock each page for IO and call zpl_readpage().
632 zpl_readpages(struct file *filp, struct address_space *mapping,
633 struct list_head *pages, unsigned nr_pages)
635 return (read_cache_pages(mapping, pages,
636 (filler_t *)zpl_readpage, filp));
640 zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
642 struct address_space *mapping = data;
643 fstrans_cookie_t cookie;
645 ASSERT(PageLocked(pp));
646 ASSERT(!PageWriteback(pp));
648 cookie = spl_fstrans_mark();
649 (void) zfs_putpage(mapping->host, pp, wbc);
650 spl_fstrans_unmark(cookie);
656 zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
658 znode_t *zp = ITOZ(mapping->host);
659 zfsvfs_t *zfsvfs = ITOZSB(mapping->host);
660 enum writeback_sync_modes sync_mode;
664 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
665 wbc->sync_mode = WB_SYNC_ALL;
667 sync_mode = wbc->sync_mode;
670 * We don't want to run write_cache_pages() in SYNC mode here, because
671 * that would make putpage() wait for a single page to be committed to
672 * disk every single time, resulting in atrocious performance. Instead
673 * we run it once in non-SYNC mode so that the ZIL gets all the data,
674 * and then we commit it all in one go.
676 wbc->sync_mode = WB_SYNC_NONE;
677 result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
678 if (sync_mode != wbc->sync_mode) {
681 if (zfsvfs->z_log != NULL)
682 zil_commit(zfsvfs->z_log, zp->z_id);
686 * We need to call write_cache_pages() again (we can't just
687 * return after the commit) because the previous call in
688 * non-SYNC mode does not guarantee that we got all the dirty
689 * pages (see the implementation of write_cache_pages() for
690 * details). That being said, this is a no-op in most cases.
692 wbc->sync_mode = sync_mode;
693 result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
699 * Write out dirty pages to the ARC, this function is only required to
700 * support mmap(2). Mapped pages may be dirtied by memory operations
701 * which never call .write(). These dirty pages are kept in sync with
702 * the ARC buffers via this hook.
705 zpl_writepage(struct page *pp, struct writeback_control *wbc)
707 if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
708 wbc->sync_mode = WB_SYNC_ALL;
710 return (zpl_putpage(pp, wbc, pp->mapping));
714 * The only flag combination which matches the behavior of zfs_space()
715 * is FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
716 * flag was introduced in the 2.6.38 kernel.
718 #if defined(HAVE_FILE_FALLOCATE) || defined(HAVE_INODE_FALLOCATE)
720 zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
722 int error = -EOPNOTSUPP;
724 #if defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE)
728 fstrans_cookie_t cookie;
730 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
733 if (offset < 0 || len <= 0)
737 olen = i_size_read(ip);
740 spl_inode_unlock(ip);
743 if (offset + len > olen)
752 cookie = spl_fstrans_mark();
753 error = -zfs_space(ip, F_FREESP, &bf, FWRITE, offset, cr);
754 spl_fstrans_unmark(cookie);
755 spl_inode_unlock(ip);
758 #endif /* defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE) */
760 ASSERT3S(error, <=, 0);
763 #endif /* defined(HAVE_FILE_FALLOCATE) || defined(HAVE_INODE_FALLOCATE) */
765 #ifdef HAVE_FILE_FALLOCATE
767 zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
769 return zpl_fallocate_common(file_inode(filp),
772 #endif /* HAVE_FILE_FALLOCATE */
774 #define ZFS_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | ZFS_PROJINHERIT_FL)
775 #define ZFS_FL_USER_MODIFIABLE (FS_FL_USER_MODIFIABLE | ZFS_PROJINHERIT_FL)
778 __zpl_ioctl_getflags(struct inode *ip)
780 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
781 uint32_t ioctl_flags = 0;
783 if (zfs_flags & ZFS_IMMUTABLE)
784 ioctl_flags |= FS_IMMUTABLE_FL;
786 if (zfs_flags & ZFS_APPENDONLY)
787 ioctl_flags |= FS_APPEND_FL;
789 if (zfs_flags & ZFS_NODUMP)
790 ioctl_flags |= FS_NODUMP_FL;
792 if (zfs_flags & ZFS_PROJINHERIT)
793 ioctl_flags |= ZFS_PROJINHERIT_FL;
795 return (ioctl_flags & ZFS_FL_USER_VISIBLE);
799 * Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
800 * attributes common to both Linux and Solaris are mapped.
803 zpl_ioctl_getflags(struct file *filp, void __user *arg)
808 flags = __zpl_ioctl_getflags(file_inode(filp));
809 err = copy_to_user(arg, &flags, sizeof (flags));
815 * fchange() is a helper macro to detect if we have been asked to change a
816 * flag. This is ugly, but the requirement that we do this is a consequence of
817 * how the Linux file attribute interface was designed. Another consequence is
818 * that concurrent modification of files suffers from a TOCTOU race. Neither
819 * are things we can fix without modifying the kernel-userland interface, which
820 * is outside of our jurisdiction.
823 #define fchange(f0, f1, b0, b1) (!((f0) & (b0)) != !((f1) & (b1)))
826 __zpl_ioctl_setflags(struct inode *ip, uint32_t ioctl_flags, xvattr_t *xva)
828 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
831 if (ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL |
833 return (-EOPNOTSUPP);
835 if (ioctl_flags & ~ZFS_FL_USER_MODIFIABLE)
838 if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) ||
839 fchange(ioctl_flags, zfs_flags, FS_APPEND_FL, ZFS_APPENDONLY)) &&
840 !capable(CAP_LINUX_IMMUTABLE))
843 if (!zpl_inode_owner_or_capable(ip))
847 xoap = xva_getxoptattr(xva);
849 XVA_SET_REQ(xva, XAT_IMMUTABLE);
850 if (ioctl_flags & FS_IMMUTABLE_FL)
851 xoap->xoa_immutable = B_TRUE;
853 XVA_SET_REQ(xva, XAT_APPENDONLY);
854 if (ioctl_flags & FS_APPEND_FL)
855 xoap->xoa_appendonly = B_TRUE;
857 XVA_SET_REQ(xva, XAT_NODUMP);
858 if (ioctl_flags & FS_NODUMP_FL)
859 xoap->xoa_nodump = B_TRUE;
861 XVA_SET_REQ(xva, XAT_PROJINHERIT);
862 if (ioctl_flags & ZFS_PROJINHERIT_FL)
863 xoap->xoa_projinherit = B_TRUE;
869 zpl_ioctl_setflags(struct file *filp, void __user *arg)
871 struct inode *ip = file_inode(filp);
876 fstrans_cookie_t cookie;
878 if (copy_from_user(&flags, arg, sizeof (flags)))
881 err = __zpl_ioctl_setflags(ip, flags, &xva);
886 cookie = spl_fstrans_mark();
887 err = -zfs_setattr(ip, (vattr_t *)&xva, 0, cr);
888 spl_fstrans_unmark(cookie);
895 zpl_ioctl_getxattr(struct file *filp, void __user *arg)
897 zfsxattr_t fsx = { 0 };
898 struct inode *ip = file_inode(filp);
901 fsx.fsx_xflags = __zpl_ioctl_getflags(ip);
902 fsx.fsx_projid = ITOZ(ip)->z_projid;
903 err = copy_to_user(arg, &fsx, sizeof (fsx));
909 zpl_ioctl_setxattr(struct file *filp, void __user *arg)
911 struct inode *ip = file_inode(filp);
917 fstrans_cookie_t cookie;
919 if (copy_from_user(&fsx, arg, sizeof (fsx)))
922 if (!zpl_is_valid_projid(fsx.fsx_projid))
925 err = __zpl_ioctl_setflags(ip, fsx.fsx_xflags, &xva);
929 xoap = xva_getxoptattr(&xva);
930 XVA_SET_REQ(&xva, XAT_PROJID);
931 xoap->xoa_projid = fsx.fsx_projid;
934 cookie = spl_fstrans_mark();
935 err = -zfs_setattr(ip, (vattr_t *)&xva, 0, cr);
936 spl_fstrans_unmark(cookie);
943 zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
946 case FS_IOC_GETFLAGS:
947 return (zpl_ioctl_getflags(filp, (void *)arg));
948 case FS_IOC_SETFLAGS:
949 return (zpl_ioctl_setflags(filp, (void *)arg));
950 case ZFS_IOC_FSGETXATTR:
951 return (zpl_ioctl_getxattr(filp, (void *)arg));
952 case ZFS_IOC_FSSETXATTR:
953 return (zpl_ioctl_setxattr(filp, (void *)arg));
961 zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
964 case FS_IOC32_GETFLAGS:
965 cmd = FS_IOC_GETFLAGS;
967 case FS_IOC32_SETFLAGS:
968 cmd = FS_IOC_SETFLAGS;
973 return (zpl_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)));
975 #endif /* CONFIG_COMPAT */
978 const struct address_space_operations zpl_address_space_operations = {
979 .readpages = zpl_readpages,
980 .readpage = zpl_readpage,
981 .writepage = zpl_writepage,
982 .writepages = zpl_writepages,
983 .direct_IO = zpl_direct_IO,
986 const struct file_operations zpl_file_operations = {
988 .release = zpl_release,
989 .llseek = zpl_llseek,
990 #ifdef HAVE_VFS_RW_ITERATE
991 #ifdef HAVE_NEW_SYNC_READ
992 .read = new_sync_read,
993 .write = new_sync_write,
995 .read_iter = zpl_iter_read,
996 .write_iter = zpl_iter_write,
998 .read = do_sync_read,
999 .write = do_sync_write,
1000 .aio_read = zpl_aio_read,
1001 .aio_write = zpl_aio_write,
1005 #ifdef HAVE_FILE_AIO_FSYNC
1006 .aio_fsync = zpl_aio_fsync,
1008 #ifdef HAVE_FILE_FALLOCATE
1009 .fallocate = zpl_fallocate,
1010 #endif /* HAVE_FILE_FALLOCATE */
1011 .unlocked_ioctl = zpl_ioctl,
1012 #ifdef CONFIG_COMPAT
1013 .compat_ioctl = zpl_compat_ioctl,
1017 const struct file_operations zpl_dir_file_operations = {
1018 .llseek = generic_file_llseek,
1019 .read = generic_read_dir,
1020 #if defined(HAVE_VFS_ITERATE_SHARED)
1021 .iterate_shared = zpl_iterate,
1022 #elif defined(HAVE_VFS_ITERATE)
1023 .iterate = zpl_iterate,
1025 .readdir = zpl_readdir,
1028 .unlocked_ioctl = zpl_ioctl,
1029 #ifdef CONFIG_COMPAT
1030 .compat_ioctl = zpl_compat_ioctl,