]> granicus.if.org Git - zfs/blob - module/spl/spl-vnode.c
Cleanup vn_rename() and vn_remove()
[zfs] / module / spl / spl-vnode.c
1 /*****************************************************************************\
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *  For details, see <http://zfsonlinux.org/>.
10  *
11  *  The SPL is free software; you can redistribute it and/or modify it
12  *  under the terms of the GNU General Public License as published by the
13  *  Free Software Foundation; either version 2 of the License, or (at your
14  *  option) any later version.
15  *
16  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
17  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19  *  for more details.
20  *
21  *  You should have received a copy of the GNU General Public License along
22  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
23  *****************************************************************************
24  *  Solaris Porting Layer (SPL) Vnode Implementation.
25 \*****************************************************************************/
26
27 #include <sys/cred.h>
28 #include <sys/vnode.h>
29 #include <linux/falloc.h>
30 #include <spl-debug.h>
31
32 #ifdef SS_DEBUG_SUBSYS
33 #undef SS_DEBUG_SUBSYS
34 #endif
35
36 #define SS_DEBUG_SUBSYS SS_VNODE
37
38 vnode_t *rootdir = (vnode_t *)0xabcd1234;
39 EXPORT_SYMBOL(rootdir);
40
41 static spl_kmem_cache_t *vn_cache;
42 static spl_kmem_cache_t *vn_file_cache;
43
44 static DEFINE_SPINLOCK(vn_file_lock);
45 static LIST_HEAD(vn_file_list);
46
47 vtype_t
48 vn_mode_to_vtype(mode_t mode)
49 {
50         if (S_ISREG(mode))
51                 return VREG;
52
53         if (S_ISDIR(mode))
54                 return VDIR;
55
56         if (S_ISCHR(mode))
57                 return VCHR;
58
59         if (S_ISBLK(mode))
60                 return VBLK;
61
62         if (S_ISFIFO(mode))
63                 return VFIFO;
64
65         if (S_ISLNK(mode))
66                 return VLNK;
67
68         if (S_ISSOCK(mode))
69                 return VSOCK;
70
71         if (S_ISCHR(mode))
72                 return VCHR;
73
74         return VNON;
75 } /* vn_mode_to_vtype() */
76 EXPORT_SYMBOL(vn_mode_to_vtype);
77
78 mode_t
79 vn_vtype_to_mode(vtype_t vtype)
80 {
81         if (vtype == VREG)
82                 return S_IFREG;
83
84         if (vtype == VDIR)
85                 return S_IFDIR;
86
87         if (vtype == VCHR)
88                 return S_IFCHR;
89
90         if (vtype == VBLK)
91                 return S_IFBLK;
92
93         if (vtype == VFIFO)
94                 return S_IFIFO;
95
96         if (vtype == VLNK)
97                 return S_IFLNK;
98
99         if (vtype == VSOCK)
100                 return S_IFSOCK;
101
102         return VNON;
103 } /* vn_vtype_to_mode() */
104 EXPORT_SYMBOL(vn_vtype_to_mode);
105
106 vnode_t *
107 vn_alloc(int flag)
108 {
109         vnode_t *vp;
110         SENTRY;
111
112         vp = kmem_cache_alloc(vn_cache, flag);
113         if (vp != NULL) {
114                 vp->v_file = NULL;
115                 vp->v_type = 0;
116         }
117
118         SRETURN(vp);
119 } /* vn_alloc() */
120 EXPORT_SYMBOL(vn_alloc);
121
122 void
123 vn_free(vnode_t *vp)
124 {
125         SENTRY;
126         kmem_cache_free(vn_cache, vp);
127         SEXIT;
128 } /* vn_free() */
129 EXPORT_SYMBOL(vn_free);
130
131 int
132 vn_open(const char *path, uio_seg_t seg, int flags, int mode,
133         vnode_t **vpp, int x1, void *x2)
134 {
135         struct file *fp;
136         struct kstat stat;
137         int rc, saved_umask = 0;
138         gfp_t saved_gfp;
139         vnode_t *vp;
140         SENTRY;
141
142         ASSERT(flags & (FWRITE | FREAD));
143         ASSERT(seg == UIO_SYSSPACE);
144         ASSERT(vpp);
145         *vpp = NULL;
146
147         if (!(flags & FCREAT) && (flags & FWRITE))
148                 flags |= FEXCL;
149
150         /* Note for filp_open() the two low bits must be remapped to mean:
151          * 01 - read-only  -> 00 read-only
152          * 10 - write-only -> 01 write-only
153          * 11 - read-write -> 10 read-write
154          */
155         flags--;
156
157         if (flags & FCREAT)
158                 saved_umask = xchg(&current->fs->umask, 0);
159
160         fp = filp_open(path, flags, mode);
161
162         if (flags & FCREAT)
163                 (void)xchg(&current->fs->umask, saved_umask);
164
165         if (IS_ERR(fp))
166                 SRETURN(-PTR_ERR(fp));
167
168 #ifdef HAVE_2ARGS_VFS_GETATTR
169         rc = vfs_getattr(&fp->f_path, &stat);
170 #else
171         rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
172 #endif
173         if (rc) {
174                 filp_close(fp, 0);
175                 SRETURN(-rc);
176         }
177
178         vp = vn_alloc(KM_SLEEP);
179         if (!vp) {
180                 filp_close(fp, 0);
181                 SRETURN(ENOMEM);
182         }
183
184         saved_gfp = mapping_gfp_mask(fp->f_mapping);
185         mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
186
187         mutex_enter(&vp->v_lock);
188         vp->v_type = vn_mode_to_vtype(stat.mode);
189         vp->v_file = fp;
190         vp->v_gfp_mask = saved_gfp;
191         *vpp = vp;
192         mutex_exit(&vp->v_lock);
193
194         SRETURN(0);
195 } /* vn_open() */
196 EXPORT_SYMBOL(vn_open);
197
198 int
199 vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
200           vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd)
201 {
202         char *realpath;
203         int len, rc;
204         SENTRY;
205
206         ASSERT(vp == rootdir);
207
208         len = strlen(path) + 2;
209         realpath = kmalloc(len, GFP_KERNEL);
210         if (!realpath)
211                 SRETURN(ENOMEM);
212
213         (void)snprintf(realpath, len, "/%s", path);
214         rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
215         kfree(realpath);
216
217         SRETURN(rc);
218 } /* vn_openat() */
219 EXPORT_SYMBOL(vn_openat);
220
221 int
222 vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
223         uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp)
224 {
225         loff_t offset;
226         mm_segment_t saved_fs;
227         struct file *fp;
228         int rc;
229         SENTRY;
230
231         ASSERT(uio == UIO_WRITE || uio == UIO_READ);
232         ASSERT(vp);
233         ASSERT(vp->v_file);
234         ASSERT(seg == UIO_SYSSPACE);
235         ASSERT((ioflag & ~FAPPEND) == 0);
236         ASSERT(x2 == RLIM64_INFINITY);
237
238         fp = vp->v_file;
239
240         offset = off;
241         if (ioflag & FAPPEND)
242                 offset = fp->f_pos;
243
244         /* Writable user data segment must be briefly increased for this
245          * process so we can use the user space read call paths to write
246          * in to memory allocated by the kernel. */
247         saved_fs = get_fs();
248         set_fs(get_ds());
249
250         if (uio & UIO_WRITE)
251                 rc = vfs_write(fp, addr, len, &offset);
252         else
253                 rc = vfs_read(fp, addr, len, &offset);
254
255         set_fs(saved_fs);
256         fp->f_pos = offset;
257
258         if (rc < 0)
259                 SRETURN(-rc);
260
261         if (residp) {
262                 *residp = len - rc;
263         } else {
264                 if (rc != len)
265                         SRETURN(EIO);
266         }
267
268         SRETURN(0);
269 } /* vn_rdwr() */
270 EXPORT_SYMBOL(vn_rdwr);
271
272 int
273 vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
274 {
275         int rc;
276         SENTRY;
277
278         ASSERT(vp);
279         ASSERT(vp->v_file);
280
281         mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask);
282         rc = filp_close(vp->v_file, 0);
283         vn_free(vp);
284
285         SRETURN(-rc);
286 } /* vn_close() */
287 EXPORT_SYMBOL(vn_close);
288
289 /* vn_seek() does not actually seek it only performs bounds checking on the
290  * proposed seek.  We perform minimal checking and allow vn_rdwr() to catch
291  * anything more serious. */
292 int
293 vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
294 {
295         return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
296 }
297 EXPORT_SYMBOL(vn_seek);
298
299 /*
300  * kern_path() was introduced in Linux 2.6.28. We duplicate it as a
301  * compatibility shim for earlier kernels.
302  */
303 #ifndef HAVE_KERN_PATH
304 int
305 kern_path(const char *name, unsigned int flags, struct path *path)
306 {
307         struct nameidata nd;
308         int rc = path_lookup(name, flags, &nd);
309         if (!rc)
310                 *path = nd.path;
311         return rc;
312 }
313 #endif /* HAVE_KERN_PATH */
314
315 /*
316  * spl_basename() takes a NULL-terminated string s as input containing a path.
317  * It returns a char pointer to a string and a length that describe the
318  * basename of the path. If the basename is not "." or "/", it will be an index
319  * into the string. While the string should be NULL terminated, the section
320  * referring to the basename is not. spl_basename is dual-licensed GPLv2+ and
321  * CC0. Anyone wishing to reuse it in another codebase may pick either license.
322  */
323 static void
324 spl_basename(const char *s, const char **str, int *len)
325 {
326         size_t i, end;
327
328         ASSERT(str);
329         ASSERT(len);
330
331         if (!s || !*s) {
332                 *str = ".";
333                 *len = 1;
334                 return;
335         }
336
337         i = strlen(s) - 1;
338
339         while (i && s[i--] == '/');
340
341         if (i == 0) {
342                 *str = "/";
343                 *len = 1;
344                 return;
345         }
346
347         end = i;
348
349         for (end = i; i; i--) {
350                 if (s[i] == '/') {
351                         *str = &s[i+1];
352                         *len = end - i + 1;
353                         return;
354                 }
355         }
356
357         *str = s;
358         *len = end + 1;
359 }
360
361 static struct dentry *
362 spl_kern_path_locked(const char *name, struct path *path)
363 {
364         struct path parent;
365         struct dentry *dentry;
366         const char *basename;
367         int len;
368         int rc;
369
370         ASSERT(name);
371         ASSERT(path);
372
373         spl_basename(name, &basename, &len);
374
375         /* We do not accept "." or ".." */
376         if (len <= 2 && basename[0] == '.')
377                 if (len == 1 || basename[1] == '.')
378                         return (ERR_PTR(-EACCES));
379
380         rc = kern_path(name, LOOKUP_PARENT, &parent);
381         if (rc)
382                 return (ERR_PTR(rc));
383
384         spl_inode_lock_nested(parent.dentry->d_inode, I_MUTEX_PARENT);
385
386         dentry = lookup_one_len(basename, parent.dentry, len);
387         if (IS_ERR(dentry)) {
388                 spl_inode_unlock(parent.dentry->d_inode);
389                 path_put(&parent);
390         } else {
391                 *path = parent;
392         }
393
394         return (dentry);
395 }
396
397 /* Based on do_unlinkat() from linux/fs/namei.c */
398 int
399 vn_remove(const char *path, uio_seg_t seg, int flags)
400 {
401         struct dentry *dentry;
402         struct path parent;
403         struct inode *inode = NULL;
404         int rc = 0;
405         SENTRY;
406
407         ASSERT(seg == UIO_SYSSPACE);
408         ASSERT(flags == RMFILE);
409
410         dentry = spl_kern_path_locked(path, &parent);
411         rc = PTR_ERR(dentry);
412         if (!IS_ERR(dentry)) {
413                 if (parent.dentry->d_name.name[parent.dentry->d_name.len])
414                         SGOTO(slashes, rc = 0);
415
416                 inode = dentry->d_inode;
417                 if (inode)
418                         atomic_inc(&inode->i_count);
419                 else
420                         SGOTO(slashes, rc = 0);
421
422 #ifdef HAVE_2ARGS_VFS_UNLINK
423                 rc = vfs_unlink(parent.dentry->d_inode, dentry);
424 #else
425                 rc = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
426 #endif /* HAVE_2ARGS_VFS_UNLINK */
427 exit1:
428                 dput(dentry);
429         } else {
430                 return (-rc);
431         }
432
433         spl_inode_unlock(parent.dentry->d_inode);
434         if (inode)
435                 iput(inode);    /* truncate the inode here */
436
437         path_put(&parent);
438         SRETURN(-rc);
439
440 slashes:
441         rc = !dentry->d_inode ? -ENOENT :
442             S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
443         SGOTO(exit1, rc);
444 } /* vn_remove() */
445 EXPORT_SYMBOL(vn_remove);
446
447 /* Based on do_rename() from linux/fs/namei.c */
448 int
449 vn_rename(const char *oldname, const char *newname, int x1)
450 {
451         struct dentry *old_dir, *new_dir;
452         struct dentry *old_dentry, *new_dentry;
453         struct dentry *trap;
454         struct path old_parent, new_parent;
455         int rc = 0;
456         SENTRY;
457
458         old_dentry = spl_kern_path_locked(oldname, &old_parent);
459         if (IS_ERR(old_dentry))
460                 SGOTO(exit, rc = PTR_ERR(old_dentry));
461
462         spl_inode_unlock(old_parent.dentry->d_inode);
463
464         new_dentry = spl_kern_path_locked(newname, &new_parent);
465         if (IS_ERR(new_dentry))
466                 SGOTO(exit2, rc = PTR_ERR(new_dentry));
467
468         spl_inode_unlock(new_parent.dentry->d_inode);
469
470         rc = -EXDEV;
471         if (old_parent.mnt != new_parent.mnt)
472                 SGOTO(exit3, rc);
473
474         old_dir = old_parent.dentry;
475         new_dir = new_parent.dentry;
476         trap = lock_rename(new_dir, old_dir);
477
478         /* source should not be ancestor of target */
479         rc = -EINVAL;
480         if (old_dentry == trap)
481                 SGOTO(exit4, rc);
482
483         /* target should not be an ancestor of source */
484         rc = -ENOTEMPTY;
485         if (new_dentry == trap)
486                 SGOTO(exit4, rc);
487
488         /* source must exist */
489         rc = -ENOENT;
490         if (!old_dentry->d_inode)
491                 SGOTO(exit4, rc);
492
493         /* unless the source is a directory trailing slashes give -ENOTDIR */
494         if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
495                 rc = -ENOTDIR;
496                 if (old_dentry->d_name.name[old_dentry->d_name.len])
497                         SGOTO(exit4, rc);
498                 if (new_dentry->d_name.name[new_dentry->d_name.len])
499                         SGOTO(exit4, rc);
500         }
501
502 #if defined(HAVE_4ARGS_VFS_RENAME)
503         rc = vfs_rename(old_dir->d_inode, old_dentry,
504             new_dir->d_inode, new_dentry);
505 #elif defined(HAVE_5ARGS_VFS_RENAME)
506         rc = vfs_rename(old_dir->d_inode, old_dentry,
507             new_dir->d_inode, new_dentry, NULL);
508 #else
509         rc = vfs_rename(old_dir->d_inode, old_dentry,
510             new_dir->d_inode, new_dentry, NULL, 0);
511 #endif
512 exit4:
513         unlock_rename(new_dir, old_dir);
514 exit3:
515         dput(new_dentry);
516         path_put(&new_parent);
517 exit2:
518         dput(old_dentry);
519         path_put(&old_parent);
520 exit:
521         SRETURN(-rc);
522 }
523 EXPORT_SYMBOL(vn_rename);
524
525 int
526 vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
527 {
528         struct file *fp;
529         struct kstat stat;
530         int rc;
531         SENTRY;
532
533         ASSERT(vp);
534         ASSERT(vp->v_file);
535         ASSERT(vap);
536
537         fp = vp->v_file;
538
539 #ifdef HAVE_2ARGS_VFS_GETATTR
540         rc = vfs_getattr(&fp->f_path, &stat);
541 #else
542         rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
543 #endif
544         if (rc)
545                 SRETURN(-rc);
546
547         vap->va_type          = vn_mode_to_vtype(stat.mode);
548         vap->va_mode          = stat.mode;
549         vap->va_uid           = KUID_TO_SUID(stat.uid);
550         vap->va_gid           = KGID_TO_SGID(stat.gid);
551         vap->va_fsid          = 0;
552         vap->va_nodeid        = stat.ino;
553         vap->va_nlink         = stat.nlink;
554         vap->va_size          = stat.size;
555         vap->va_blksize       = stat.blksize;
556         vap->va_atime         = stat.atime;
557         vap->va_mtime         = stat.mtime;
558         vap->va_ctime         = stat.ctime;
559         vap->va_rdev          = stat.rdev;
560         vap->va_nblocks       = stat.blocks;
561
562         SRETURN(0);
563 }
564 EXPORT_SYMBOL(vn_getattr);
565
566 int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
567 {
568         int datasync = 0;
569         SENTRY;
570
571         ASSERT(vp);
572         ASSERT(vp->v_file);
573
574         if (flags & FDSYNC)
575                 datasync = 1;
576
577         SRETURN(-spl_filp_fsync(vp->v_file, datasync));
578 } /* vn_fsync() */
579 EXPORT_SYMBOL(vn_fsync);
580
581 int vn_space(vnode_t *vp, int cmd, struct flock *bfp, int flag,
582     offset_t offset, void *x6, void *x7)
583 {
584         int error = EOPNOTSUPP;
585         SENTRY;
586
587         if (cmd != F_FREESP || bfp->l_whence != 0)
588                 SRETURN(EOPNOTSUPP);
589
590         ASSERT(vp);
591         ASSERT(vp->v_file);
592         ASSERT(bfp->l_start >= 0 && bfp->l_len > 0);
593
594 #ifdef FALLOC_FL_PUNCH_HOLE
595         /*
596          * When supported by the underlying file system preferentially
597          * use the fallocate() callback to preallocate the space.
598          */
599         error = -spl_filp_fallocate(vp->v_file,
600             FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
601             bfp->l_start, bfp->l_len);
602         if (error == 0)
603                 SRETURN(0);
604 #endif
605
606 #ifdef HAVE_INODE_TRUNCATE_RANGE
607         if (vp->v_file->f_dentry && vp->v_file->f_dentry->d_inode &&
608             vp->v_file->f_dentry->d_inode->i_op &&
609             vp->v_file->f_dentry->d_inode->i_op->truncate_range) {
610                 off_t end = bfp->l_start + bfp->l_len;
611                 /*
612                  * Judging from the code in shmem_truncate_range(),
613                  * it seems the kernel expects the end offset to be
614                  * inclusive and aligned to the end of a page.
615                  */
616                 if (end % PAGE_SIZE != 0) {
617                         end &= ~(off_t)(PAGE_SIZE - 1);
618                         if (end <= bfp->l_start)
619                                 SRETURN(0);
620                 }
621                 --end;
622
623                 vp->v_file->f_dentry->d_inode->i_op->truncate_range(
624                         vp->v_file->f_dentry->d_inode,
625                         bfp->l_start, end
626                 );
627                 SRETURN(0);
628         }
629 #endif
630
631         SRETURN(error);
632 }
633 EXPORT_SYMBOL(vn_space);
634
635 /* Function must be called while holding the vn_file_lock */
636 static file_t *
637 file_find(int fd)
638 {
639         file_t *fp;
640
641         ASSERT(spin_is_locked(&vn_file_lock));
642
643         list_for_each_entry(fp, &vn_file_list,  f_list) {
644                 if (fd == fp->f_fd && fp->f_task == current) {
645                         ASSERT(atomic_read(&fp->f_ref) != 0);
646                         return fp;
647                 }
648         }
649
650         return NULL;
651 } /* file_find() */
652
653 file_t *
654 vn_getf(int fd)
655 {
656         struct kstat stat;
657         struct file *lfp;
658         file_t *fp;
659         vnode_t *vp;
660         int rc = 0;
661         SENTRY;
662
663         /* Already open just take an extra reference */
664         spin_lock(&vn_file_lock);
665
666         fp = file_find(fd);
667         if (fp) {
668                 atomic_inc(&fp->f_ref);
669                 spin_unlock(&vn_file_lock);
670                 SRETURN(fp);
671         }
672
673         spin_unlock(&vn_file_lock);
674
675         /* File was not yet opened create the object and setup */
676         fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
677         if (fp == NULL)
678                 SGOTO(out, rc);
679
680         mutex_enter(&fp->f_lock);
681
682         fp->f_fd = fd;
683         fp->f_task = current;
684         fp->f_offset = 0;
685         atomic_inc(&fp->f_ref);
686
687         lfp = fget(fd);
688         if (lfp == NULL)
689                 SGOTO(out_mutex, rc);
690
691         vp = vn_alloc(KM_SLEEP);
692         if (vp == NULL)
693                 SGOTO(out_fget, rc);
694
695 #ifdef HAVE_2ARGS_VFS_GETATTR
696         rc = vfs_getattr(&lfp->f_path, &stat);
697 #else
698         rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
699 #endif
700         if (rc)
701                 SGOTO(out_vnode, rc);
702
703         mutex_enter(&vp->v_lock);
704         vp->v_type = vn_mode_to_vtype(stat.mode);
705         vp->v_file = lfp;
706         mutex_exit(&vp->v_lock);
707
708         fp->f_vnode = vp;
709         fp->f_file = lfp;
710
711         /* Put it on the tracking list */
712         spin_lock(&vn_file_lock);
713         list_add(&fp->f_list, &vn_file_list);
714         spin_unlock(&vn_file_lock);
715
716         mutex_exit(&fp->f_lock);
717         SRETURN(fp);
718
719 out_vnode:
720         vn_free(vp);
721 out_fget:
722         fput(lfp);
723 out_mutex:
724         mutex_exit(&fp->f_lock);
725         kmem_cache_free(vn_file_cache, fp);
726 out:
727         SRETURN(NULL);
728 } /* getf() */
729 EXPORT_SYMBOL(getf);
730
731 static void releasef_locked(file_t *fp)
732 {
733         ASSERT(fp->f_file);
734         ASSERT(fp->f_vnode);
735
736         /* Unlinked from list, no refs, safe to free outside mutex */
737         fput(fp->f_file);
738         vn_free(fp->f_vnode);
739
740         kmem_cache_free(vn_file_cache, fp);
741 }
742
743 void
744 vn_releasef(int fd)
745 {
746         file_t *fp;
747         SENTRY;
748
749         spin_lock(&vn_file_lock);
750         fp = file_find(fd);
751         if (fp) {
752                 atomic_dec(&fp->f_ref);
753                 if (atomic_read(&fp->f_ref) > 0) {
754                         spin_unlock(&vn_file_lock);
755                         SEXIT;
756                         return;
757                 }
758
759                 list_del(&fp->f_list);
760                 releasef_locked(fp);
761         }
762         spin_unlock(&vn_file_lock);
763
764         SEXIT;
765         return;
766 } /* releasef() */
767 EXPORT_SYMBOL(releasef);
768
769 #ifndef HAVE_SET_FS_PWD
770 void
771 #  ifdef HAVE_SET_FS_PWD_WITH_CONST
772 set_fs_pwd(struct fs_struct *fs, const struct path *path)
773 #  else
774 set_fs_pwd(struct fs_struct *fs, struct path *path)
775 #  endif
776 {
777         struct path old_pwd;
778
779 #  ifdef HAVE_FS_STRUCT_SPINLOCK
780         spin_lock(&fs->lock);
781         old_pwd = fs->pwd;
782         fs->pwd = *path;
783         path_get(path);
784         spin_unlock(&fs->lock);
785 #  else
786         write_lock(&fs->lock);
787         old_pwd = fs->pwd;
788         fs->pwd = *path;
789         path_get(path);
790         write_unlock(&fs->lock);
791 #  endif /* HAVE_FS_STRUCT_SPINLOCK */
792
793         if (old_pwd.dentry)
794                 path_put(&old_pwd);
795 }
796 #endif /* HAVE_SET_FS_PWD */
797
798 int
799 vn_set_pwd(const char *filename)
800 {
801 #ifdef HAVE_USER_PATH_DIR
802         struct path path;
803 #else
804         struct nameidata nd;
805 #endif /* HAVE_USER_PATH_DIR */
806         mm_segment_t saved_fs;
807         int rc;
808         SENTRY;
809
810         /*
811          * user_path_dir() and __user_walk() both expect 'filename' to be
812          * a user space address so we must briefly increase the data segment
813          * size to ensure strncpy_from_user() does not fail with -EFAULT.
814          */
815         saved_fs = get_fs();
816         set_fs(get_ds());
817
818 # ifdef HAVE_USER_PATH_DIR
819         rc = user_path_dir(filename, &path);
820         if (rc)
821                 SGOTO(out, rc);
822
823         rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
824         if (rc)
825                 SGOTO(dput_and_out, rc);
826
827         set_fs_pwd(current->fs, &path);
828
829 dput_and_out:
830         path_put(&path);
831 # else
832         rc = __user_walk(filename,
833                          LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_CHDIR, &nd);
834         if (rc)
835                 SGOTO(out, rc);
836
837         rc = vfs_permission(&nd, MAY_EXEC);
838         if (rc)
839                 SGOTO(dput_and_out, rc);
840
841         set_fs_pwd(current->fs, &nd.path);
842
843 dput_and_out:
844         path_put(&nd.path);
845 # endif /* HAVE_USER_PATH_DIR */
846 out:
847         set_fs(saved_fs);
848
849         SRETURN(-rc);
850 } /* vn_set_pwd() */
851 EXPORT_SYMBOL(vn_set_pwd);
852
853 static int
854 vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
855 {
856         struct vnode *vp = buf;
857
858         mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
859
860         return (0);
861 } /* vn_cache_constructor() */
862
863 static void
864 vn_cache_destructor(void *buf, void *cdrarg)
865 {
866         struct vnode *vp = buf;
867
868         mutex_destroy(&vp->v_lock);
869 } /* vn_cache_destructor() */
870
871 static int
872 vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags)
873 {
874         file_t *fp = buf;
875
876         atomic_set(&fp->f_ref, 0);
877         mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL);
878         INIT_LIST_HEAD(&fp->f_list);
879
880         return (0);
881 } /* file_cache_constructor() */
882
883 static void
884 vn_file_cache_destructor(void *buf, void *cdrarg)
885 {
886         file_t *fp = buf;
887
888         mutex_destroy(&fp->f_lock);
889 } /* vn_file_cache_destructor() */
890
891 int
892 spl_vn_init(void)
893 {
894         SENTRY;
895         vn_cache = kmem_cache_create("spl_vn_cache",
896                                      sizeof(struct vnode), 64,
897                                      vn_cache_constructor,
898                                      vn_cache_destructor,
899                                      NULL, NULL, NULL, KMC_KMEM);
900
901         vn_file_cache = kmem_cache_create("spl_vn_file_cache",
902                                           sizeof(file_t), 64,
903                                           vn_file_cache_constructor,
904                                           vn_file_cache_destructor,
905                                           NULL, NULL, NULL, KMC_KMEM);
906         SRETURN(0);
907 } /* vn_init() */
908
909 void
910 spl_vn_fini(void)
911 {
912         file_t *fp, *next_fp;
913         int leaked = 0;
914         SENTRY;
915
916         spin_lock(&vn_file_lock);
917
918         list_for_each_entry_safe(fp, next_fp, &vn_file_list,  f_list) {
919                 list_del(&fp->f_list);
920                 releasef_locked(fp);
921                 leaked++;
922         }
923
924         spin_unlock(&vn_file_lock);
925
926         if (leaked > 0)
927                 SWARN("Warning %d files leaked\n", leaked);
928
929         kmem_cache_destroy(vn_file_cache);
930         kmem_cache_destroy(vn_cache);
931
932         SEXIT;
933         return;
934 } /* vn_fini() */