4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (C) 2011 Lawrence Livermore National Security, LLC.
24 * Copyright (C) 2015 Jörg Thalheim.
30 #include <sys/taskq.h>
32 #include <linux/backing-dev.h>
33 #include <linux/compat.h>
37 * Added insert_inode_locked() helper function, prior to this most callers
38 * used insert_inode_hash(). The older method doesn't check for collisions
39 * in the inode_hashtable but it still acceptible for use.
41 #ifndef HAVE_INSERT_INODE_LOCKED
43 insert_inode_locked(struct inode *ip)
45 insert_inode_hash(ip);
48 #endif /* HAVE_INSERT_INODE_LOCKED */
52 * Add truncate_setsize() if it is not exported by the Linux kernel.
54 * Truncate the inode and pages associated with the inode. The pages are
55 * unmapped and removed from cache.
57 #ifndef HAVE_TRUNCATE_SETSIZE
59 truncate_setsize(struct inode *ip, loff_t new)
61 struct address_space *mapping = ip->i_mapping;
63 i_size_write(ip, new);
65 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
66 truncate_inode_pages(mapping, new);
67 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
69 #endif /* HAVE_TRUNCATE_SETSIZE */
72 * 2.6.32 - 2.6.33, bdi_setup_and_register() is not available.
73 * 2.6.34 - 3.19, bdi_setup_and_register() takes 3 arguments.
74 * 4.0 - 4.11, bdi_setup_and_register() takes 2 arguments.
75 * 4.12 - x.y, super_setup_bdi_name() new interface.
77 #if defined(HAVE_SUPER_SETUP_BDI_NAME)
78 extern atomic_long_t zfs_bdi_seq;
81 zpl_bdi_setup(struct super_block *sb, char *name)
83 return super_setup_bdi_name(sb, "%.28s-%ld", name,
84 atomic_long_inc_return(&zfs_bdi_seq));
87 zpl_bdi_destroy(struct super_block *sb)
90 #elif defined(HAVE_2ARGS_BDI_SETUP_AND_REGISTER)
92 zpl_bdi_setup(struct super_block *sb, char *name)
94 struct backing_dev_info *bdi;
97 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
98 error = bdi_setup_and_register(bdi, name);
100 kmem_free(bdi, sizeof (struct backing_dev_info));
109 zpl_bdi_destroy(struct super_block *sb)
111 struct backing_dev_info *bdi = sb->s_bdi;
114 kmem_free(bdi, sizeof (struct backing_dev_info));
117 #elif defined(HAVE_3ARGS_BDI_SETUP_AND_REGISTER)
119 zpl_bdi_setup(struct super_block *sb, char *name)
121 struct backing_dev_info *bdi;
124 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
125 error = bdi_setup_and_register(bdi, name, BDI_CAP_MAP_COPY);
127 kmem_free(sb->s_bdi, sizeof (struct backing_dev_info));
136 zpl_bdi_destroy(struct super_block *sb)
138 struct backing_dev_info *bdi = sb->s_bdi;
141 kmem_free(bdi, sizeof (struct backing_dev_info));
145 extern atomic_long_t zfs_bdi_seq;
148 zpl_bdi_setup(struct super_block *sb, char *name)
150 struct backing_dev_info *bdi;
153 bdi = kmem_zalloc(sizeof (struct backing_dev_info), KM_SLEEP);
155 bdi->capabilities = BDI_CAP_MAP_COPY;
157 error = bdi_init(bdi);
159 kmem_free(bdi, sizeof (struct backing_dev_info));
163 error = bdi_register(bdi, NULL, "%.28s-%ld", name,
164 atomic_long_inc_return(&zfs_bdi_seq));
167 kmem_free(bdi, sizeof (struct backing_dev_info));
176 zpl_bdi_destroy(struct super_block *sb)
178 struct backing_dev_info *bdi = sb->s_bdi;
181 kmem_free(bdi, sizeof (struct backing_dev_info));
187 * 4.14 adds SB_* flag definitions, define them to MS_* equivalents
191 #define SB_RDONLY MS_RDONLY
195 #define SB_SILENT MS_SILENT
199 #define SB_ACTIVE MS_ACTIVE
203 #define SB_POSIXACL MS_POSIXACL
207 #define SB_MANDLOCK MS_MANDLOCK
212 * LOOKUP_RCU flag introduced to distinguish rcu-walk from ref-walk cases.
215 #define LOOKUP_RCU 0x0
216 #endif /* LOOKUP_RCU */
219 * 3.2-rc1 API change,
220 * Add set_nlink() if it is not exported by the Linux kernel.
222 * i_nlink is read-only in Linux 3.2, but it can be set directly in
225 #ifndef HAVE_SET_NLINK
227 set_nlink(struct inode *inode, unsigned int nlink)
229 inode->i_nlink = nlink;
231 #endif /* HAVE_SET_NLINK */
235 * The VFS .create, .mkdir and .mknod callbacks were updated to take a
236 * umode_t type rather than an int. To cleanly handle both definitions
237 * the zpl_umode_t type is introduced and set accordingly.
239 #ifdef HAVE_MKDIR_UMODE_T
240 typedef umode_t zpl_umode_t;
242 typedef int zpl_umode_t;
247 * The clear_inode() function replaces end_writeback() and introduces an
248 * ordering change regarding when the inode_sync_wait() occurs. See the
249 * configure check in config/kernel-clear-inode.m4 for full details.
251 #if defined(HAVE_EVICT_INODE) && !defined(HAVE_CLEAR_INODE)
252 #define clear_inode(ip) end_writeback(ip)
253 #endif /* HAVE_EVICT_INODE && !HAVE_CLEAR_INODE */
257 * The sget() helper function now takes the mount flags as an argument.
259 #ifdef HAVE_5ARG_SGET
260 #define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, fl, mtd)
262 #define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, mtd)
263 #endif /* HAVE_5ARG_SGET */
265 #if defined(SEEK_HOLE) && defined(SEEK_DATA) && !defined(HAVE_LSEEK_EXECUTE)
273 if (offset < 0 && !(filp->f_mode & FMODE_UNSIGNED_OFFSET))
276 if (offset > maxsize)
279 if (offset != filp->f_pos) {
280 spin_lock(&filp->f_lock);
281 filp->f_pos = offset;
283 spin_unlock(&filp->f_lock);
288 #endif /* SEEK_HOLE && SEEK_DATA && !HAVE_LSEEK_EXECUTE */
290 #if defined(CONFIG_FS_POSIX_ACL)
292 * These functions safely approximates the behavior of posix_acl_release()
293 * which cannot be used because it calls the GPL-only symbol kfree_rcu().
294 * The in-kernel version, which can access the RCU, frees the ACLs after
295 * the grace period expires. Because we're unsure how long that grace
296 * period may be this implementation conservatively delays for 60 seconds.
297 * This is several orders of magnitude larger than expected grace period.
298 * At 60 seconds the kernel will also begin issuing RCU stall warnings.
301 #include <linux/posix_acl.h>
303 #if defined(HAVE_POSIX_ACL_RELEASE) && !defined(HAVE_POSIX_ACL_RELEASE_GPL_ONLY)
304 #define zpl_posix_acl_release(arg) posix_acl_release(arg)
306 void zpl_posix_acl_release_impl(struct posix_acl *);
309 zpl_posix_acl_release(struct posix_acl *acl)
311 if ((acl == NULL) || (acl == ACL_NOT_CACHED))
313 #ifdef HAVE_ACL_REFCOUNT
314 if (refcount_dec_and_test(&acl->a_refcount))
315 zpl_posix_acl_release_impl(acl);
317 if (atomic_dec_and_test(&acl->a_refcount))
318 zpl_posix_acl_release_impl(acl);
321 #endif /* HAVE_POSIX_ACL_RELEASE */
323 #ifdef HAVE_SET_CACHED_ACL_USABLE
324 #define zpl_set_cached_acl(ip, ty, n) set_cached_acl(ip, ty, n)
325 #define zpl_forget_cached_acl(ip, ty) forget_cached_acl(ip, ty)
328 zpl_set_cached_acl(struct inode *ip, int type, struct posix_acl *newer)
330 struct posix_acl *older = NULL;
332 spin_lock(&ip->i_lock);
334 if ((newer != ACL_NOT_CACHED) && (newer != NULL))
335 posix_acl_dup(newer);
338 case ACL_TYPE_ACCESS:
340 rcu_assign_pointer(ip->i_acl, newer);
342 case ACL_TYPE_DEFAULT:
343 older = ip->i_default_acl;
344 rcu_assign_pointer(ip->i_default_acl, newer);
348 spin_unlock(&ip->i_lock);
350 zpl_posix_acl_release(older);
354 zpl_forget_cached_acl(struct inode *ip, int type)
356 zpl_set_cached_acl(ip, type, (struct posix_acl *)ACL_NOT_CACHED);
358 #endif /* HAVE_SET_CACHED_ACL_USABLE */
360 #ifndef HAVE___POSIX_ACL_CHMOD
361 #ifdef HAVE_POSIX_ACL_CHMOD
362 #define __posix_acl_chmod(acl, gfp, mode) posix_acl_chmod(acl, gfp, mode)
363 #define __posix_acl_create(acl, gfp, mode) posix_acl_create(acl, gfp, mode)
366 __posix_acl_chmod(struct posix_acl **acl, int flags, umode_t umode)
368 struct posix_acl *oldacl = *acl;
372 *acl = posix_acl_clone(*acl, flags);
373 zpl_posix_acl_release(oldacl);
378 error = posix_acl_chmod_masq(*acl, mode);
380 zpl_posix_acl_release(*acl);
388 __posix_acl_create(struct posix_acl **acl, int flags, umode_t *umodep)
390 struct posix_acl *oldacl = *acl;
391 mode_t mode = *umodep;
394 *acl = posix_acl_clone(*acl, flags);
395 zpl_posix_acl_release(oldacl);
400 error = posix_acl_create_masq(*acl, &mode);
404 zpl_posix_acl_release(*acl);
410 #endif /* HAVE_POSIX_ACL_CHMOD */
411 #endif /* HAVE___POSIX_ACL_CHMOD */
413 #ifdef HAVE_POSIX_ACL_EQUIV_MODE_UMODE_T
414 typedef umode_t zpl_equivmode_t;
416 typedef mode_t zpl_equivmode_t;
417 #endif /* HAVE_POSIX_ACL_EQUIV_MODE_UMODE_T */
421 * posix_acl_valid() now must be passed a namespace, the namespace from
422 * from super block associated with the given inode is used for this purpose.
424 #ifdef HAVE_POSIX_ACL_VALID_WITH_NS
425 #define zpl_posix_acl_valid(ip, acl) posix_acl_valid(ip->i_sb->s_user_ns, acl)
427 #define zpl_posix_acl_valid(ip, acl) posix_acl_valid(acl)
430 #endif /* CONFIG_FS_POSIX_ACL */
434 * The is_owner_or_cap() function was renamed to inode_owner_or_capable().
436 #ifdef HAVE_INODE_OWNER_OR_CAPABLE
437 #define zpl_inode_owner_or_capable(ip) inode_owner_or_capable(ip)
439 #define zpl_inode_owner_or_capable(ip) is_owner_or_cap(ip)
440 #endif /* HAVE_INODE_OWNER_OR_CAPABLE */
444 * struct access f->f_dentry->d_inode was replaced by accessor function
447 #ifndef HAVE_FILE_INODE
448 static inline struct inode *file_inode(const struct file *f)
450 return (f->f_dentry->d_inode);
452 #endif /* HAVE_FILE_INODE */
456 * struct access file->f_path.dentry was replaced by accessor function
459 #ifndef HAVE_FILE_DENTRY
460 static inline struct dentry *file_dentry(const struct file *f)
462 return (f->f_path.dentry);
464 #endif /* HAVE_FILE_DENTRY */
466 #ifdef HAVE_KUID_HELPERS
467 static inline uid_t zfs_uid_read_impl(struct inode *ip)
469 #ifdef HAVE_SUPER_USER_NS
470 return (from_kuid(ip->i_sb->s_user_ns, ip->i_uid));
472 return (from_kuid(kcred->user_ns, ip->i_uid));
476 static inline uid_t zfs_uid_read(struct inode *ip)
478 return (zfs_uid_read_impl(ip));
481 static inline gid_t zfs_gid_read_impl(struct inode *ip)
483 #ifdef HAVE_SUPER_USER_NS
484 return (from_kgid(ip->i_sb->s_user_ns, ip->i_gid));
486 return (from_kgid(kcred->user_ns, ip->i_gid));
490 static inline gid_t zfs_gid_read(struct inode *ip)
492 return (zfs_gid_read_impl(ip));
495 static inline void zfs_uid_write(struct inode *ip, uid_t uid)
497 #ifdef HAVE_SUPER_USER_NS
498 ip->i_uid = make_kuid(ip->i_sb->s_user_ns, uid);
500 ip->i_uid = make_kuid(kcred->user_ns, uid);
504 static inline void zfs_gid_write(struct inode *ip, gid_t gid)
506 #ifdef HAVE_SUPER_USER_NS
507 ip->i_gid = make_kgid(ip->i_sb->s_user_ns, gid);
509 ip->i_gid = make_kgid(kcred->user_ns, gid);
514 static inline uid_t zfs_uid_read(struct inode *ip)
519 static inline gid_t zfs_gid_read(struct inode *ip)
524 static inline void zfs_uid_write(struct inode *ip, uid_t uid)
529 static inline void zfs_gid_write(struct inode *ip, gid_t gid)
538 #ifdef HAVE_FOLLOW_DOWN_ONE
539 #define zpl_follow_down_one(path) follow_down_one(path)
540 #define zpl_follow_up(path) follow_up(path)
542 #define zpl_follow_down_one(path) follow_down(path)
543 #define zpl_follow_up(path) follow_up(path)
549 #ifndef HAVE_SETATTR_PREPARE
551 setattr_prepare(struct dentry *dentry, struct iattr *ia)
553 return (inode_change_ok(dentry->d_inode, ia));
559 * These macros are defined by kernel 4.11. We define them so that the same
560 * code builds under kernels < 4.11 and >= 4.11. The macros are set to 0 so
561 * that it will create obvious failures if they are accidentally used when built
562 * against a kernel >= 4.11.
565 #ifndef STATX_BASIC_STATS
566 #define STATX_BASIC_STATS 0
569 #ifndef AT_STATX_SYNC_AS_STAT
570 #define AT_STATX_SYNC_AS_STAT 0
575 * 4.11 takes struct path *, < 4.11 takes vfsmount *
578 #ifdef HAVE_VFSMOUNT_IOPS_GETATTR
579 #define ZPL_GETATTR_WRAPPER(func) \
581 func(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) \
583 struct path path = { .mnt = mnt, .dentry = dentry }; \
584 return func##_impl(&path, stat, STATX_BASIC_STATS, \
585 AT_STATX_SYNC_AS_STAT); \
587 #elif defined(HAVE_PATH_IOPS_GETATTR)
588 #define ZPL_GETATTR_WRAPPER(func) \
590 func(const struct path *path, struct kstat *stat, u32 request_mask, \
591 unsigned int query_flags) \
593 return (func##_impl(path, stat, request_mask, query_flags)); \
601 * Preferred interface to get the current FS time.
603 #if !defined(HAVE_CURRENT_TIME)
604 static inline struct timespec
605 current_time(struct inode *ip)
607 return (timespec_trunc(current_kernel_time(), ip->i_sb->s_time_gran));
613 * Added iversion interface for managing inode version field.
615 #ifdef HAVE_INODE_SET_IVERSION
616 #include <linux/iversion.h>
619 inode_set_iversion(struct inode *ip, u64 val)
626 * Returns true when called in the context of a 32-bit system call.
629 zpl_is_32bit_api(void)
632 #ifdef HAVE_IN_COMPAT_SYSCALL
633 return (in_compat_syscall());
635 return (is_compat_task());
638 return (BITS_PER_LONG == 32);
642 #endif /* _ZFS_VFS_H */