]> granicus.if.org Git - zfs/blob - module/spl/spl-vnode.c
Linux 2.6.36 compat, fs_struct->lock type change
[zfs] / module / spl / spl-vnode.c
1 /*****************************************************************************\
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *  For details, see <http://github.com/behlendorf/spl/>.
10  *
11  *  The SPL is free software; you can redistribute it and/or modify it
12  *  under the terms of the GNU General Public License as published by the
13  *  Free Software Foundation; either version 2 of the License, or (at your
14  *  option) any later version.
15  *
16  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
17  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19  *  for more details.
20  *
21  *  You should have received a copy of the GNU General Public License along
22  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
23  *****************************************************************************
24  *  Solaris Porting Layer (SPL) Vnode Implementation.
25 \*****************************************************************************/
26
27 #include <sys/vnode.h>
28 #include <spl-debug.h>
29
30 #ifdef SS_DEBUG_SUBSYS
31 #undef SS_DEBUG_SUBSYS
32 #endif
33
34 #define SS_DEBUG_SUBSYS SS_VNODE
35
36 vnode_t *rootdir = (vnode_t *)0xabcd1234;
37 EXPORT_SYMBOL(rootdir);
38
39 static spl_kmem_cache_t *vn_cache;
40 static spl_kmem_cache_t *vn_file_cache;
41
42 static spinlock_t vn_file_lock = SPIN_LOCK_UNLOCKED;
43 static LIST_HEAD(vn_file_list);
44
45 static vtype_t
46 vn_get_sol_type(umode_t mode)
47 {
48         if (S_ISREG(mode))
49                 return VREG;
50
51         if (S_ISDIR(mode))
52                 return VDIR;
53
54         if (S_ISCHR(mode))
55                 return VCHR;
56
57         if (S_ISBLK(mode))
58                 return VBLK;
59
60         if (S_ISFIFO(mode))
61                 return VFIFO;
62
63         if (S_ISLNK(mode))
64                 return VLNK;
65
66         if (S_ISSOCK(mode))
67                 return VSOCK;
68
69         if (S_ISCHR(mode))
70                 return VCHR;
71
72         return VNON;
73 } /* vn_get_sol_type() */
74
75 vnode_t *
76 vn_alloc(int flag)
77 {
78         vnode_t *vp;
79         SENTRY;
80
81         vp = kmem_cache_alloc(vn_cache, flag);
82         if (vp != NULL) {
83                 vp->v_file = NULL;
84                 vp->v_type = 0;
85         }
86
87         SRETURN(vp);
88 } /* vn_alloc() */
89 EXPORT_SYMBOL(vn_alloc);
90
91 void
92 vn_free(vnode_t *vp)
93 {
94         SENTRY;
95         kmem_cache_free(vn_cache, vp);
96         SEXIT;
97 } /* vn_free() */
98 EXPORT_SYMBOL(vn_free);
99
100 int
101 vn_open(const char *path, uio_seg_t seg, int flags, int mode,
102         vnode_t **vpp, int x1, void *x2)
103 {
104         struct file *fp;
105         struct kstat stat;
106         int rc, saved_umask = 0;
107         gfp_t saved_gfp;
108         vnode_t *vp;
109         SENTRY;
110
111         ASSERT(flags & (FWRITE | FREAD));
112         ASSERT(seg == UIO_SYSSPACE);
113         ASSERT(vpp);
114         *vpp = NULL;
115
116         if (!(flags & FCREAT) && (flags & FWRITE))
117                 flags |= FEXCL;
118
119         /* Note for filp_open() the two low bits must be remapped to mean:
120          * 01 - read-only  -> 00 read-only
121          * 10 - write-only -> 01 write-only
122          * 11 - read-write -> 10 read-write
123          */
124         flags--;
125
126         if (flags & FCREAT)
127                 saved_umask = xchg(&current->fs->umask, 0);
128
129         fp = filp_open(path, flags, mode);
130
131         if (flags & FCREAT)
132                 (void)xchg(&current->fs->umask, saved_umask);
133
134         if (IS_ERR(fp))
135                 SRETURN(-PTR_ERR(fp));
136
137         rc = vfs_getattr(fp->f_vfsmnt, fp->f_dentry, &stat);
138         if (rc) {
139                 filp_close(fp, 0);
140                 SRETURN(-rc);
141         }
142
143         vp = vn_alloc(KM_SLEEP);
144         if (!vp) {
145                 filp_close(fp, 0);
146                 SRETURN(ENOMEM);
147         }
148
149         saved_gfp = mapping_gfp_mask(fp->f_mapping);
150         mapping_set_gfp_mask(fp->f_mapping, saved_gfp & ~(__GFP_IO|__GFP_FS));
151
152         mutex_enter(&vp->v_lock);
153         vp->v_type = vn_get_sol_type(stat.mode);
154         vp->v_file = fp;
155         vp->v_gfp_mask = saved_gfp;
156         *vpp = vp;
157         mutex_exit(&vp->v_lock);
158
159         SRETURN(0);
160 } /* vn_open() */
161 EXPORT_SYMBOL(vn_open);
162
163 int
164 vn_openat(const char *path, uio_seg_t seg, int flags, int mode,
165           vnode_t **vpp, int x1, void *x2, vnode_t *vp, int fd)
166 {
167         char *realpath;
168         int len, rc;
169         SENTRY;
170
171         ASSERT(vp == rootdir);
172
173         len = strlen(path) + 2;
174         realpath = kmalloc(len, GFP_KERNEL);
175         if (!realpath)
176                 SRETURN(ENOMEM);
177
178         (void)snprintf(realpath, len, "/%s", path);
179         rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
180         kfree(realpath);
181
182         SRETURN(rc);
183 } /* vn_openat() */
184 EXPORT_SYMBOL(vn_openat);
185
186 int
187 vn_rdwr(uio_rw_t uio, vnode_t *vp, void *addr, ssize_t len, offset_t off,
188         uio_seg_t seg, int ioflag, rlim64_t x2, void *x3, ssize_t *residp)
189 {
190         loff_t offset;
191         mm_segment_t saved_fs;
192         struct file *fp;
193         int rc;
194         SENTRY;
195
196         ASSERT(uio == UIO_WRITE || uio == UIO_READ);
197         ASSERT(vp);
198         ASSERT(vp->v_file);
199         ASSERT(seg == UIO_SYSSPACE);
200         ASSERT((ioflag & ~FAPPEND) == 0);
201         ASSERT(x2 == RLIM64_INFINITY);
202
203         fp = vp->v_file;
204
205         offset = off;
206         if (ioflag & FAPPEND)
207                 offset = fp->f_pos;
208
209         /* Writable user data segment must be briefly increased for this
210          * process so we can use the user space read call paths to write
211          * in to memory allocated by the kernel. */
212         saved_fs = get_fs();
213         set_fs(get_ds());
214
215         if (uio & UIO_WRITE)
216                 rc = vfs_write(fp, addr, len, &offset);
217         else
218                 rc = vfs_read(fp, addr, len, &offset);
219
220         set_fs(saved_fs);
221
222         if (rc < 0)
223                 SRETURN(-rc);
224
225         if (residp) {
226                 *residp = len - rc;
227         } else {
228                 if (rc != len)
229                         SRETURN(EIO);
230         }
231
232         SRETURN(0);
233 } /* vn_rdwr() */
234 EXPORT_SYMBOL(vn_rdwr);
235
236 int
237 vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
238 {
239         int rc;
240         SENTRY;
241
242         ASSERT(vp);
243         ASSERT(vp->v_file);
244
245         mapping_set_gfp_mask(vp->v_file->f_mapping, vp->v_gfp_mask);
246         rc = filp_close(vp->v_file, 0);
247         vn_free(vp);
248
249         SRETURN(-rc);
250 } /* vn_close() */
251 EXPORT_SYMBOL(vn_close);
252
253 /* vn_seek() does not actually seek it only performs bounds checking on the
254  * proposed seek.  We perform minimal checking and allow vn_rdwr() to catch
255  * anything more serious. */
256 int
257 vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
258 {
259         return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
260 }
261 EXPORT_SYMBOL(vn_seek);
262
263 static struct dentry *
264 vn_lookup_hash(struct nameidata *nd)
265 {
266         return lookup_one_len((const char *)nd->last.name,
267                               nd->nd_dentry, nd->last.len);
268 } /* lookup_hash() */
269
270 static void
271 vn_path_release(struct nameidata *nd)
272 {
273         dput(nd->nd_dentry);
274         mntput(nd->nd_mnt);
275 }
276
277 /* Modified do_unlinkat() from linux/fs/namei.c, only uses exported symbols */
278 int
279 vn_remove(const char *path, uio_seg_t seg, int flags)
280 {
281         struct dentry *dentry;
282         struct nameidata nd;
283         struct inode *inode = NULL;
284         int rc = 0;
285         SENTRY;
286
287         ASSERT(seg == UIO_SYSSPACE);
288         ASSERT(flags == RMFILE);
289
290         rc = path_lookup(path, LOOKUP_PARENT, &nd);
291         if (rc)
292                 SGOTO(exit, rc);
293
294         rc = -EISDIR;
295         if (nd.last_type != LAST_NORM)
296                 SGOTO(exit1, rc);
297
298 #ifdef HAVE_INODE_I_MUTEX
299         mutex_lock_nested(&nd.nd_dentry->d_inode->i_mutex, I_MUTEX_PARENT);
300 #else
301         down(&nd.nd_dentry->d_inode->i_sem);
302 #endif /* HAVE_INODE_I_MUTEX */
303         dentry = vn_lookup_hash(&nd);
304         rc = PTR_ERR(dentry);
305         if (!IS_ERR(dentry)) {
306                 /* Why not before? Because we want correct rc value */
307                 if (nd.last.name[nd.last.len])
308                         SGOTO(slashes, rc);
309
310                 inode = dentry->d_inode;
311                 if (inode)
312                         atomic_inc(&inode->i_count);
313 #ifdef HAVE_2ARGS_VFS_UNLINK
314                 rc = vfs_unlink(nd.nd_dentry->d_inode, dentry);
315 #else
316                 rc = vfs_unlink(nd.nd_dentry->d_inode, dentry, nd.nd_mnt);
317 #endif /* HAVE_2ARGS_VFS_UNLINK */
318 exit2:
319                 dput(dentry);
320         }
321 #ifdef HAVE_INODE_I_MUTEX
322         mutex_unlock(&nd.nd_dentry->d_inode->i_mutex);
323 #else
324         up(&nd.nd_dentry->d_inode->i_sem);
325 #endif /* HAVE_INODE_I_MUTEX */
326         if (inode)
327                 iput(inode);    /* truncate the inode here */
328 exit1:
329         vn_path_release(&nd);
330 exit:
331         SRETURN(-rc);
332
333 slashes:
334         rc = !dentry->d_inode ? -ENOENT :
335                 S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
336         SGOTO(exit2, rc);
337 } /* vn_remove() */
338 EXPORT_SYMBOL(vn_remove);
339
340 /* Modified do_rename() from linux/fs/namei.c, only uses exported symbols */
341 int
342 vn_rename(const char *oldname, const char *newname, int x1)
343 {
344         struct dentry *old_dir, *new_dir;
345         struct dentry *old_dentry, *new_dentry;
346         struct dentry *trap;
347         struct nameidata oldnd, newnd;
348         int rc = 0;
349         SENTRY;
350
351         rc = path_lookup(oldname, LOOKUP_PARENT, &oldnd);
352         if (rc)
353                 SGOTO(exit, rc);
354
355         rc = path_lookup(newname, LOOKUP_PARENT, &newnd);
356         if (rc)
357                 SGOTO(exit1, rc);
358
359         rc = -EXDEV;
360         if (oldnd.nd_mnt != newnd.nd_mnt)
361                 SGOTO(exit2, rc);
362
363         old_dir = oldnd.nd_dentry;
364         rc = -EBUSY;
365         if (oldnd.last_type != LAST_NORM)
366                 SGOTO(exit2, rc);
367
368         new_dir = newnd.nd_dentry;
369         if (newnd.last_type != LAST_NORM)
370                 SGOTO(exit2, rc);
371
372         trap = lock_rename(new_dir, old_dir);
373
374         old_dentry = vn_lookup_hash(&oldnd);
375
376         rc = PTR_ERR(old_dentry);
377         if (IS_ERR(old_dentry))
378                 SGOTO(exit3, rc);
379
380         /* source must exist */
381         rc = -ENOENT;
382         if (!old_dentry->d_inode)
383                 SGOTO(exit4, rc);
384
385         /* unless the source is a directory trailing slashes give -ENOTDIR */
386         if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
387                 rc = -ENOTDIR;
388                 if (oldnd.last.name[oldnd.last.len])
389                         SGOTO(exit4, rc);
390                 if (newnd.last.name[newnd.last.len])
391                         SGOTO(exit4, rc);
392         }
393
394         /* source should not be ancestor of target */
395         rc = -EINVAL;
396         if (old_dentry == trap)
397                 SGOTO(exit4, rc);
398
399         new_dentry = vn_lookup_hash(&newnd);
400         rc = PTR_ERR(new_dentry);
401         if (IS_ERR(new_dentry))
402                 SGOTO(exit4, rc);
403
404         /* target should not be an ancestor of source */
405         rc = -ENOTEMPTY;
406         if (new_dentry == trap)
407                 SGOTO(exit5, rc);
408
409 #ifdef HAVE_4ARGS_VFS_RENAME
410         rc = vfs_rename(old_dir->d_inode, old_dentry,
411                         new_dir->d_inode, new_dentry);
412 #else
413         rc = vfs_rename(old_dir->d_inode, old_dentry, oldnd.nd_mnt,
414                         new_dir->d_inode, new_dentry, newnd.nd_mnt);
415 #endif /* HAVE_4ARGS_VFS_RENAME */
416 exit5:
417         dput(new_dentry);
418 exit4:
419         dput(old_dentry);
420 exit3:
421         unlock_rename(new_dir, old_dir);
422 exit2:
423         vn_path_release(&newnd);
424 exit1:
425         vn_path_release(&oldnd);
426 exit:
427         SRETURN(-rc);
428 }
429 EXPORT_SYMBOL(vn_rename);
430
431 int
432 vn_getattr(vnode_t *vp, vattr_t *vap, int flags, void *x3, void *x4)
433 {
434         struct file *fp;
435         struct kstat stat;
436         int rc;
437         SENTRY;
438
439         ASSERT(vp);
440         ASSERT(vp->v_file);
441         ASSERT(vap);
442
443         fp = vp->v_file;
444
445         rc = vfs_getattr(fp->f_vfsmnt, fp->f_dentry, &stat);
446         if (rc)
447                 SRETURN(-rc);
448
449         vap->va_type          = vn_get_sol_type(stat.mode);
450         vap->va_mode          = stat.mode;
451         vap->va_uid           = stat.uid;
452         vap->va_gid           = stat.gid;
453         vap->va_fsid          = 0;
454         vap->va_nodeid        = stat.ino;
455         vap->va_nlink         = stat.nlink;
456         vap->va_size          = stat.size;
457         vap->va_blocksize     = stat.blksize;
458         vap->va_atime.tv_sec  = stat.atime.tv_sec;
459         vap->va_atime.tv_usec = stat.atime.tv_nsec / NSEC_PER_USEC;
460         vap->va_mtime.tv_sec  = stat.mtime.tv_sec;
461         vap->va_mtime.tv_usec = stat.mtime.tv_nsec / NSEC_PER_USEC;
462         vap->va_ctime.tv_sec  = stat.ctime.tv_sec;
463         vap->va_ctime.tv_usec = stat.ctime.tv_nsec / NSEC_PER_USEC;
464         vap->va_rdev          = stat.rdev;
465         vap->va_blocks        = stat.blocks;
466
467         SRETURN(0);
468 }
469 EXPORT_SYMBOL(vn_getattr);
470
471 int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
472 {
473         int datasync = 0;
474         SENTRY;
475
476         ASSERT(vp);
477         ASSERT(vp->v_file);
478
479         if (flags & FDSYNC)
480                 datasync = 1;
481
482         SRETURN(-spl_filp_fsync(vp->v_file, datasync));
483 } /* vn_fsync() */
484 EXPORT_SYMBOL(vn_fsync);
485
486 /* Function must be called while holding the vn_file_lock */
487 static file_t *
488 file_find(int fd)
489 {
490         file_t *fp;
491
492         ASSERT(spin_is_locked(&vn_file_lock));
493
494         list_for_each_entry(fp, &vn_file_list,  f_list) {
495                 if (fd == fp->f_fd) {
496                         ASSERT(atomic_read(&fp->f_ref) != 0);
497                         return fp;
498                 }
499         }
500
501         return NULL;
502 } /* file_find() */
503
504 file_t *
505 vn_getf(int fd)
506 {
507         struct kstat stat;
508         struct file *lfp;
509         file_t *fp;
510         vnode_t *vp;
511         int rc = 0;
512         SENTRY;
513
514         /* Already open just take an extra reference */
515         spin_lock(&vn_file_lock);
516
517         fp = file_find(fd);
518         if (fp) {
519                 atomic_inc(&fp->f_ref);
520                 spin_unlock(&vn_file_lock);
521                 SRETURN(fp);
522         }
523
524         spin_unlock(&vn_file_lock);
525
526         /* File was not yet opened create the object and setup */
527         fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
528         if (fp == NULL)
529                 SGOTO(out, rc);
530
531         mutex_enter(&fp->f_lock);
532
533         fp->f_fd = fd;
534         fp->f_offset = 0;
535         atomic_inc(&fp->f_ref);
536
537         lfp = fget(fd);
538         if (lfp == NULL)
539                 SGOTO(out_mutex, rc);
540
541         vp = vn_alloc(KM_SLEEP);
542         if (vp == NULL)
543                 SGOTO(out_fget, rc);
544
545         if (vfs_getattr(lfp->f_vfsmnt, lfp->f_dentry, &stat))
546                 SGOTO(out_vnode, rc);
547
548         mutex_enter(&vp->v_lock);
549         vp->v_type = vn_get_sol_type(stat.mode);
550         vp->v_file = lfp;
551         mutex_exit(&vp->v_lock);
552
553         fp->f_vnode = vp;
554         fp->f_file = lfp;
555
556         /* Put it on the tracking list */
557         spin_lock(&vn_file_lock);
558         list_add(&fp->f_list, &vn_file_list);
559         spin_unlock(&vn_file_lock);
560
561         mutex_exit(&fp->f_lock);
562         SRETURN(fp);
563
564 out_vnode:
565         vn_free(vp);
566 out_fget:
567         fput(lfp);
568 out_mutex:
569         mutex_exit(&fp->f_lock);
570         kmem_cache_free(vn_file_cache, fp);
571 out:
572         SRETURN(NULL);
573 } /* getf() */
574 EXPORT_SYMBOL(getf);
575
576 static void releasef_locked(file_t *fp)
577 {
578         ASSERT(fp->f_file);
579         ASSERT(fp->f_vnode);
580
581         /* Unlinked from list, no refs, safe to free outside mutex */
582         fput(fp->f_file);
583         vn_free(fp->f_vnode);
584
585         kmem_cache_free(vn_file_cache, fp);
586 }
587
588 void
589 vn_releasef(int fd)
590 {
591         file_t *fp;
592         SENTRY;
593
594         spin_lock(&vn_file_lock);
595         fp = file_find(fd);
596         if (fp) {
597                 atomic_dec(&fp->f_ref);
598                 if (atomic_read(&fp->f_ref) > 0) {
599                         spin_unlock(&vn_file_lock);
600                         SEXIT;
601                         return;
602                 }
603
604                 list_del(&fp->f_list);
605                 releasef_locked(fp);
606         }
607         spin_unlock(&vn_file_lock);
608
609         SEXIT;
610         return;
611 } /* releasef() */
612 EXPORT_SYMBOL(releasef);
613
614 #ifndef HAVE_SET_FS_PWD
615 # ifdef HAVE_2ARGS_SET_FS_PWD
616 /* Used from 2.6.25 - 2.6.31+ */
617 void
618 set_fs_pwd(struct fs_struct *fs, struct path *path)
619 {
620         struct path old_pwd;
621
622 #  ifdef HAVE_FS_STRUCT_SPINLOCK
623         spin_lock(&fs->lock);
624         old_pwd = fs->pwd;
625         fs->pwd = *path;
626         path_get(path);
627         spin_unlock(&fs->lock);
628 #  else
629         write_lock(&fs->lock);
630         old_pwd = fs->pwd;
631         fs->pwd = *path;
632         path_get(path);
633         write_unlock(&fs->lock);
634 #  endif /* HAVE_FS_STRUCT_SPINLOCK */
635
636         if (old_pwd.dentry)
637                 path_put(&old_pwd);
638 }
639 # else
640 /* Used from 2.6.11 - 2.6.24 */
641 void
642 set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt, struct dentry *dentry)
643 {
644         struct dentry *old_pwd;
645         struct vfsmount *old_pwdmnt;
646
647         write_lock(&fs->lock);
648         old_pwd = fs->pwd;
649         old_pwdmnt = fs->pwdmnt;
650         fs->pwdmnt = mntget(mnt);
651         fs->pwd = dget(dentry);
652         write_unlock(&fs->lock);
653
654         if (old_pwd) {
655                 dput(old_pwd);
656                 mntput(old_pwdmnt);
657         }
658 }
659 # endif /* HAVE_2ARGS_SET_FS_PWD */
660 #endif /* HAVE_SET_FS_PWD */
661
662 int
663 vn_set_pwd(const char *filename)
664 {
665 #if defined(HAVE_2ARGS_SET_FS_PWD) && defined(HAVE_USER_PATH_DIR)
666         struct path path;
667 #else
668         struct nameidata nd;
669 #endif /* HAVE_2ARGS_SET_FS_PWD */
670         mm_segment_t saved_fs;
671         int rc;
672         SENTRY;
673
674         /*
675          * user_path_dir() and __user_walk() both expect 'filename' to be
676          * a user space address so we must briefly increase the data segment
677          * size to ensure strncpy_from_user() does not fail with -EFAULT.
678          */
679         saved_fs = get_fs();
680         set_fs(get_ds());
681
682 #ifdef HAVE_2ARGS_SET_FS_PWD
683 # ifdef HAVE_USER_PATH_DIR
684         rc = user_path_dir(filename, &path);
685         if (rc)
686                 SGOTO(out, rc);
687
688         rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
689         if (rc)
690                 SGOTO(dput_and_out, rc);
691
692         set_fs_pwd(current->fs, &path);
693
694 dput_and_out:
695         path_put(&path);
696 # else
697         rc = __user_walk(filename,
698                          LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_CHDIR, &nd);
699         if (rc)
700                 SGOTO(out, rc);
701
702         rc = vfs_permission(&nd, MAY_EXEC);
703         if (rc)
704                 SGOTO(dput_and_out, rc);
705
706         set_fs_pwd(current->fs, &nd.path);
707
708 dput_and_out:
709         path_put(&nd.path);
710 # endif /* HAVE_USER_PATH_DIR */
711 #else
712         rc = __user_walk(filename,
713                          LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_CHDIR, &nd);
714         if (rc)
715                 SGOTO(out, rc);
716
717         rc = vfs_permission(&nd, MAY_EXEC);
718         if (rc)
719                 SGOTO(dput_and_out, rc);
720
721         set_fs_pwd(current->fs, nd.nd_mnt, nd.nd_dentry);
722
723 dput_and_out:
724         vn_path_release(&nd);
725 #endif /* HAVE_2ARGS_SET_FS_PWD */
726 out:
727         set_fs(saved_fs);
728
729         SRETURN(-rc);
730 } /* vn_set_pwd() */
731 EXPORT_SYMBOL(vn_set_pwd);
732
733 static int
734 vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
735 {
736         struct vnode *vp = buf;
737
738         mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
739
740         return (0);
741 } /* vn_cache_constructor() */
742
743 static void
744 vn_cache_destructor(void *buf, void *cdrarg)
745 {
746         struct vnode *vp = buf;
747
748         mutex_destroy(&vp->v_lock);
749 } /* vn_cache_destructor() */
750
751 static int
752 vn_file_cache_constructor(void *buf, void *cdrarg, int kmflags)
753 {
754         file_t *fp = buf;
755
756         atomic_set(&fp->f_ref, 0);
757         mutex_init(&fp->f_lock, NULL, MUTEX_DEFAULT, NULL);
758         INIT_LIST_HEAD(&fp->f_list);
759
760         return (0);
761 } /* file_cache_constructor() */
762
763 static void
764 vn_file_cache_destructor(void *buf, void *cdrarg)
765 {
766         file_t *fp = buf;
767
768         mutex_destroy(&fp->f_lock);
769 } /* vn_file_cache_destructor() */
770
771 int
772 vn_init(void)
773 {
774         SENTRY;
775         vn_cache = kmem_cache_create("spl_vn_cache",
776                                      sizeof(struct vnode), 64,
777                                      vn_cache_constructor,
778                                      vn_cache_destructor,
779                                      NULL, NULL, NULL, 0);
780
781         vn_file_cache = kmem_cache_create("spl_vn_file_cache",
782                                           sizeof(file_t), 64,
783                                           vn_file_cache_constructor,
784                                           vn_file_cache_destructor,
785                                           NULL, NULL, NULL, 0);
786         SRETURN(0);
787 } /* vn_init() */
788
789 void
790 vn_fini(void)
791 {
792         file_t *fp, *next_fp;
793         int leaked = 0;
794         SENTRY;
795
796         spin_lock(&vn_file_lock);
797
798         list_for_each_entry_safe(fp, next_fp, &vn_file_list,  f_list) {
799                 list_del(&fp->f_list);
800                 releasef_locked(fp);
801                 leaked++;
802         }
803
804         kmem_cache_destroy(vn_file_cache);
805         vn_file_cache = NULL;
806         spin_unlock(&vn_file_lock);
807
808         if (leaked > 0)
809                 SWARN("Warning %d files leaked\n", leaked);
810
811         kmem_cache_destroy(vn_cache);
812
813         SEXIT;
814         return;
815 } /* vn_fini() */