]> granicus.if.org Git - zfs/commitdiff
Commit bulk of remaining 2.6.9 and 2.6.26 compat changes.
authorbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
Mon, 11 Aug 2008 22:13:47 +0000 (22:13 +0000)
committerbehlendo <behlendo@7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c>
Mon, 11 Aug 2008 22:13:47 +0000 (22:13 +0000)
git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@155 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c

12 files changed:
autoconf/spl-build.m4
configure.ac
include/sys/kmem.h
include/sys/taskq.h
modules/spl/spl-debug.c
modules/spl/spl-generic.c
modules/spl/spl-kmem.c
modules/spl/spl-taskq.c
modules/spl/spl-thread.c
modules/spl/spl-time.c
modules/spl/spl-vnode.c
modules/splat/splat-vnode.c

index 84827cf9cc6642599143d4017a3e8d198db6a2de..e12a2e3ebb279055462f38f6aac7f4ef4be9f2af 100644 (file)
@@ -310,7 +310,7 @@ AC_DEFUN([SPL_CHECK_HEADER],
 ])
 
 dnl #
-dnl # 2.6.x API change
+dnl # 2.6.24 API change,
 dnl # check if uintptr_t typedef is defined
 dnl #
 AC_DEFUN([SPL_AC_TYPE_UINTPTR_T],
@@ -329,7 +329,7 @@ AC_DEFUN([SPL_AC_TYPE_UINTPTR_T],
 ])
 
 dnl #
-dnl # 2.6.19 API change
+dnl # 2.6.19 API change,
 dnl # panic_notifier_list use atomic_notifier operations
 dnl #
 
@@ -350,7 +350,7 @@ AC_DEFUN([SPL_AC_ATOMIC_PANIC_NOTIFIER],
 ])
 
 dnl #
-dnl # 2.6.20 API change
+dnl # 2.6.20 API change,
 dnl # INIT_WORK use 2 args and not store data inside
 dnl #
 AC_DEFUN([SPL_AC_3ARGS_INIT_WORK],
@@ -371,7 +371,7 @@ AC_DEFUN([SPL_AC_3ARGS_INIT_WORK],
 ])
 
 dnl #
-dnl # 2.6.21 api change.
+dnl # 2.6.21 API change,
 dnl # 'register_sysctl_table' use only one argument instead of two
 dnl #
 AC_DEFUN([SPL_AC_2ARGS_REGISTER_SYSCTL],
@@ -390,7 +390,7 @@ AC_DEFUN([SPL_AC_2ARGS_REGISTER_SYSCTL],
 ])
 
 dnl #
-dnl # 2.6.x API change
+dnl # 2.6.23 API change
 dnl # Old set_shrinker API replaced with register_shrinker
 dnl #
 AC_DEFUN([SPL_AC_SET_SHRINKER], [
@@ -409,7 +409,7 @@ AC_DEFUN([SPL_AC_SET_SHRINKER], [
 ])
 
 dnl #
-dnl # 2.6.x API change
+dnl # 2.6.25 API change,
 dnl # struct path entry added to struct nameidata
 dnl #
 AC_DEFUN([SPL_AC_PATH_IN_NAMEIDATA],
@@ -440,7 +440,7 @@ AC_DEFUN([SPL_AC_TASK_CURR], [
 ])
 
 dnl #
-dnl # 2.6.x API change
+dnl # 2.6.19 API change,
 dnl # Use CTL_UNNUMBERED when binary sysctl is not required
 dnl #
 AC_DEFUN([SPL_AC_CTL_UNNUMBERED],
@@ -506,18 +506,6 @@ AC_DEFUN([SPL_AC_CLASS_DEVICE_CREATE], [
                [])
 ])
 
-dnl #
-dnl # 2.6.26 API change, set_normalized_timespec() is exported.
-dnl #
-AC_DEFUN([SPL_AC_CLASS_DEVICE_CREATE], [
-       SPL_CHECK_SYMBOL_EXPORT(
-               [class_device_create],
-               [drivers/base/class.c],
-               [AC_DEFINE(HAVE_CLASS_DEVICE_CREATE, 1,
-               [class_device_create() is available])],
-               [])
-])
-
 dnl #
 dnl # 2.6.26 API change, set_normalized_timespec() is exported.
 dnl #
@@ -540,7 +528,7 @@ AC_DEFUN([SPL_AC_SET_NORMALIZED_TIMESPEC_INLINE], [
                #include <linux/time.h>
        ],[
                void set_normalized_timespec(struct timespec *ts,
-                                            time_t sec, long nsec) { }
+                                            time_t sec, long nsec);
        ],[
                AC_MSG_RESULT(no)
        ],[
@@ -570,17 +558,110 @@ AC_DEFUN([SPL_AC_TIMESPEC_SUB], [
 ])
 
 dnl #
-dnl # 2.6.26 API change
-dnl # Definition of struct fdtable relocated to linux/fdtable.h
+dnl # 2.6.19 API change,
+dnl # check if init_utsname() is available in linux/utsname.h
+dnl #
+AC_DEFUN([SPL_AC_INIT_UTSNAME], [
+       AC_MSG_CHECKING([whether init_utsname() is available])
+       SPL_LINUX_TRY_COMPILE([
+               #include <linux/utsname.h>
+       ],[
+               struct new_utsname *a = init_utsname();
+       ],[
+               AC_MSG_RESULT(yes)
+               AC_DEFINE(HAVE_INIT_UTSNAME, 1, [init_utsname() is available])
+       ],[
+               AC_MSG_RESULT(no)
+       ])
+])
+
+dnl #
+dnl # 2.6.26 API change,
+dnl # definition of struct fdtable relocated to linux/fdtable.h
 dnl #
 AC_DEFUN([SPL_AC_FDTABLE_HEADER], [
        SPL_CHECK_HEADER([linux/fdtable.h], [FDTABLE], [], [])
 ])
 
 dnl #
-dnl # 2.6.18 API change
-dnl # Added linux/uaccess.h
+dnl # 2.6.14 API change,
+dnl # check whether 'files_fdtable()' exists
+dnl #
+AC_DEFUN([SPL_AC_FILES_FDTABLE], [
+       AC_MSG_CHECKING([whether files_fdtable() is available])
+       SPL_LINUX_TRY_COMPILE([
+               #include <linux/sched.h>
+               #include <linux/file.h>
+               #ifdef HAVE_FDTABLE_HEADER
+               #include <linux/fdtable.h>
+               #endif
+       ],[
+               struct files_struct *files = current->files;
+               struct fdtable *fdt = files_fdtable(files);
+       ],[
+               AC_MSG_RESULT(yes)
+               AC_DEFINE(HAVE_FILES_FDTABLE, 1, [files_fdtable() is available])
+       ],[
+               AC_MSG_RESULT(no)
+       ])
+])
+
+dnl #
+dnl # 2.6.18 API change,
+dnl # added linux/uaccess.h
 dnl #
 AC_DEFUN([SPL_AC_UACCESS_HEADER], [
        SPL_CHECK_HEADER([linux/uaccess.h], [UACCESS], [], [])
 ])
+
+dnl #
+dnl # 2.6.12 API change,
+dnl # check whether 'kmalloc_node()' is available.
+dnl #
+AC_DEFUN([SPL_AC_KMALLOC_NODE], [
+       AC_MSG_CHECKING([whether kmalloc_node() is available])
+       SPL_LINUX_TRY_COMPILE([
+               #include <linux/slab.h>
+       ],[
+               void *a = kmalloc_node(1, GFP_KERNEL, 0);
+       ],[
+               AC_MSG_RESULT(yes)
+               AC_DEFINE(HAVE_KMALLOC_NODE, 1, [kmalloc_node() is available])
+       ],[
+               AC_MSG_RESULT(no)
+       ])
+])
+
+dnl #
+dnl # 2.6.9 API change,
+dnl # check whether 'monotonic_clock()' is available it may
+dnl # be available for some archs but not others.
+dnl #
+AC_DEFUN([SPL_AC_MONOTONIC_CLOCK], [
+       SPL_CHECK_SYMBOL_EXPORT(
+               [monotonic_clock],
+               [],
+               [AC_DEFINE(HAVE_MONOTONIC_CLOCK, 1,
+               [monotonic_clock() is available])],
+               [])
+])
+
+dnl #
+dnl # 2.6.16 API change,
+dnl # check whether 'struct inode' has i_mutex
+dnl #
+AC_DEFUN([SPL_AC_INODE_I_MUTEX], [
+       AC_MSG_CHECKING([whether struct inode has i_mutex])
+       SPL_LINUX_TRY_COMPILE([
+               #include <linux/fs.h>
+               #include <linux/mutex.h>
+       ],[
+               struct inode i;
+               mutex_init(&i.i_mutex);
+       ],[
+               AC_MSG_RESULT(yes)
+               AC_DEFINE(HAVE_INODE_I_MUTEX, 1, [struct inode has i_mutex])
+       ],[
+               AC_MSG_RESULT(no)
+       ])
+])
index 3a082375239aebadd50daba4cacd04599d186389..74b043b326096650d90fc29bea2bd61f6c78d19d 100644 (file)
@@ -57,8 +57,13 @@ SPL_AC_CLASS_DEVICE_CREATE
 SPL_AC_SET_NORMALIZED_TIMESPEC_EXPORT
 SPL_AC_SET_NORMALIZED_TIMESPEC_INLINE
 SPL_AC_TIMESPEC_SUB
+SPL_AC_INIT_UTSNAME
 SPL_AC_FDTABLE_HEADER
+SPL_AC_FILES_FDTABLE
 SPL_AC_UACCESS_HEADER
+SPL_AC_KMALLOC_NODE
+SPL_AC_MONOTONIC_CLOCK
+SPL_AC_INODE_I_MUTEX
 
 TOPDIR=`/bin/pwd`
 
index dd1514fb95429a60ebed657385bf54517b699b34..1ca2c261a3c56cbefa297df3afed674524e6d6d8 100644 (file)
@@ -54,6 +54,13 @@ extern "C" {
 #define KM_VMFLAGS                      GFP_LEVEL_MASK
 #define KM_FLAGS                        __GFP_BITS_MASK
 
+/*
+ * Used internally, the kernel does not need to support this flag
+ */
+#ifndef __GFP_ZERO
+#define __GFP_ZERO                     0x8000
+#endif
+
 #ifdef DEBUG_KMEM
 extern atomic64_t kmem_alloc_used;
 extern unsigned long kmem_alloc_max;
@@ -113,7 +120,7 @@ __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
         return NULL;
 }
 
-#define __kmem_alloc(size, flags, allocator)                                  \
+#define __kmem_alloc(size, flags, allocator, args...)                         \
 ({      void *_ptr_ = NULL;                                                   \
         kmem_debug_t *_dptr_;                                                 \
         unsigned long _flags_;                                                \
@@ -133,7 +140,7 @@ __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
                                       atomic64_read(&kmem_alloc_used),       \
                                       kmem_alloc_max);                       \
                                                                               \
-                _ptr_ = (void *)allocator((size), (flags));                   \
+                _ptr_ = (void *)allocator((size), (flags), ## args);          \
                 if (_ptr_ == NULL) {                                          \
                         kfree(_dptr_);                                        \
                         __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning "          \
@@ -273,7 +280,7 @@ __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
 
 #else /* DEBUG_KMEM_TRACKING */
 
-#define __kmem_alloc(size, flags, allocator)                                  \
+#define __kmem_alloc(size, flags, allocator, args...)                         \
 ({      void *_ptr_ = NULL;                                                   \
                                                                               \
        /* Marked unlikely because we should never be doing this, */          \
@@ -285,7 +292,7 @@ __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
                               atomic64_read(&kmem_alloc_used),               \
                               kmem_alloc_max);                               \
                                                                               \
-        _ptr_ = (void *)allocator((size), (flags));                           \
+        _ptr_ = (void *)allocator((size), (flags), ## args);                  \
         if (_ptr_ == NULL) {                                                  \
                 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning "                  \
                               "kmem_alloc(%d, 0x%x) failed (%ld/"            \
@@ -370,6 +377,14 @@ __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
 #define kmem_alloc(size, flags)         __kmem_alloc((size), (flags), kmalloc)
 #define kmem_zalloc(size, flags)        __kmem_alloc((size), (flags), kzalloc)
 
+#ifdef HAVE_KMALLOC_NODE
+#define kmem_alloc_node(size, flags, node)                                    \
+       __kmem_alloc((size), (flags), kmalloc_node, node)
+#else
+#define kmem_alloc_node(size, flags, node)                                    \
+       __kmem_alloc((size), (flags), kmalloc)
+#endif
+
 #define vmem_alloc(size, flags)         __vmem_alloc((size), (flags))
 #define vmem_zalloc(size, flags)        __vmem_alloc((size), ((flags) | __GFP_ZERO))
 
@@ -379,6 +394,14 @@ __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
 #define kmem_zalloc(size, flags)        kzalloc((size), (flags))
 #define kmem_free(ptr, size)            kfree(ptr)
 
+#ifdef HAVE_KMALLOC_NODE
+#define kmem_alloc_node(size, flags, node)                                    \
+       kmalloc_node((size), (flags), (node))
+#else
+#define kmem_alloc_node(size, flags, node)                                    \
+       kmalloc((size), (flags))
+#endif
+
 #define vmem_alloc(size, flags)         __vmalloc((size), ((flags) |          \
                                        __GFP_HIGHMEM), PAGE_KERNEL)
 #define vmem_zalloc(size, flags)                                              \
@@ -555,14 +578,6 @@ void spl_kmem_fini(void);
 #define kmem_virt(ptr)                 (((ptr) >= (void *)VMALLOC_START) && \
                                         ((ptr) <  (void *)VMALLOC_END))
 
-#ifdef HAVE_KMEM_CACHE_CREATE_DTOR
-#define __kmem_cache_create(name, size, align, flags, ctor, dtor) \
-        kmem_cache_create(name, size, align, flags, ctor, dtor)
-#else
-#define __kmem_cache_create(name, size, align, flags, ctor, dtor) \
-        kmem_cache_create(name, size, align, flags, ctor)
-#endif /* HAVE_KMEM_CACHE_CREATE_DTOR */
-
 #ifdef __cplusplus
 }
 #endif
index 11b1cbbfb3ba08843c7d615d63f23fe973dd9412..fbcc5556de83af39077a042b403b4cfbd53431b6 100644 (file)
@@ -37,7 +37,6 @@ extern "C" {
 #include <linux/interrupt.h>
 #include <linux/kthread.h>
 #include <sys/types.h>
-#include <sys/kmem.h>
 
 #define TASKQ_NAMELEN           31
 
@@ -60,14 +59,6 @@ typedef void (task_func_t)(void *);
 #define TQ_NEW                  0x04000000
 #define TQ_ACTIVE               0x80000000
 
-typedef struct task {
-       spinlock_t              t_lock;
-       struct list_head        t_list;
-       taskqid_t               t_id;
-        task_func_t             *t_func;
-        void                    *t_arg;
-} task_t;
-
 typedef struct taskq {
         spinlock_t              tq_lock;       /* protects taskq_t */
         struct task_struct      **tq_threads;  /* thread pointers */
index 1fd38e30a062383f048fbec772c03bbb97a8a28a..99f61d74fa021a171af62a2eacbb644b85de33da 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/kthread.h>
 #include <linux/hardirq.h>
 #include <linux/interrupt.h>
+#include <linux/notifier.h>
 #include <sys/sysmacros.h>
 #include <sys/proc.h>
 #include <sys/debug.h>
@@ -375,7 +376,7 @@ spl_debug_dumplog_thread(void *arg)
         spl_debug_dumplog_internal(dp);
        atomic_set(&dp->dp_done, 1);
         wake_up(&dp->dp_waitq);
-        do_exit(0);
+       complete_and_exit(NULL, 0);
 
         return 0; /* Unreachable */
 }
index 7a073ee52d41c574878cc0a0a42c7e6eff825bc0..7a818add8389c98f0df64ccbea173c796b357faa 100644 (file)
@@ -59,7 +59,7 @@ int
 highbit(unsigned long i)
 {
         register int h = 1;
-       ENTRY;
+        ENTRY;
 
         if (i == 0)
                 RETURN(0);
@@ -97,7 +97,11 @@ EXPORT_SYMBOL(ddi_strtoul);
 
 struct new_utsname *__utsname(void)
 {
+#ifdef HAVE_INIT_UTSNAME
        return init_utsname();
+#else
+       return &system_utsname;
+#endif
 }
 EXPORT_SYMBOL(__utsname);
 
index fc238a397f08b4b87149ab8150c685eb85f31368..208f11b8e8bfdfbd741c5d7d9ce292662c694a78 100644 (file)
@@ -370,7 +370,7 @@ spl_magazine_alloc(spl_kmem_cache_t *skc, int node)
                   sizeof(void *) * skc->skc_mag_size;
        ENTRY;
 
-       skm = kmalloc_node(size, GFP_KERNEL, node);
+       skm = kmem_alloc_node(size, GFP_KERNEL, node);
        if (skm) {
                skm->skm_magic = SKM_MAGIC;
                skm->skm_avail = 0;
index 9c820b7c1c253203bc520e7de49f5aa146ff7647..21d63c87521f852a815d32f4af745f22e4d092ce 100644 (file)
@@ -25,6 +25,7 @@
  */
 
 #include <sys/taskq.h>
+#include <sys/kmem.h>
 
 #ifdef DEBUG_SUBSYSTEM
 #undef DEBUG_SUBSYSTEM
 
 #define DEBUG_SUBSYSTEM S_TASKQ
 
+typedef struct spl_task {
+        spinlock_t              t_lock;
+        struct list_head        t_list;
+        taskqid_t               t_id;
+        task_func_t             *t_func;
+        void                    *t_arg;
+} spl_task_t;
+
 /* NOTE: Must be called with tq->tq_lock held, returns a list_t which
  * is not attached to the free, work, or pending taskq lists.
  */
-static task_t *
+static spl_task_t *
 task_alloc(taskq_t *tq, uint_t flags)
 {
-        task_t *t;
+        spl_task_t *t;
         int count = 0;
         ENTRY;
 
         ASSERT(tq);
         ASSERT(flags & (TQ_SLEEP | TQ_NOSLEEP));               /* One set */
         ASSERT(!((flags & TQ_SLEEP) && (flags & TQ_NOSLEEP))); /* Not both */
-       ASSERT(spin_is_locked(&tq->tq_lock));
+        ASSERT(spin_is_locked(&tq->tq_lock));
 retry:
-        /* Aquire task_t's from free list if available */
+        /* Aquire spl_task_t's from free list if available */
         if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
-                t = list_entry(tq->tq_free_list.next, task_t, t_list);
-               list_del_init(&t->t_list);
-               RETURN(t);
+                t = list_entry(tq->tq_free_list.next, spl_task_t, t_list);
+                list_del_init(&t->t_list);
+                RETURN(t);
         }
 
         /* Free list is empty and memory allocs are prohibited */
         if (flags & TQ_NOALLOC)
                 RETURN(NULL);
 
-        /* Hit maximum task_t pool size */
+        /* Hit maximum spl_task_t pool size */
         if (tq->tq_nalloc >= tq->tq_maxalloc) {
                 if (flags & TQ_NOSLEEP)
                         RETURN(NULL);
 
                 /* Sleep periodically polling the free list for an available
-                 * task_t.  If a full second passes and we have not found
+                 * spl_task_t.  If a full second passes and we have not found
                  * one gives up and return a NULL to the caller. */
                 if (flags & TQ_SLEEP) {
                         spin_unlock_irq(&tq->tq_lock);
@@ -81,7 +90,7 @@ retry:
         }
 
        spin_unlock_irq(&tq->tq_lock);
-        t = kmem_alloc(sizeof(task_t), flags & (TQ_SLEEP | TQ_NOSLEEP));
+        t = kmem_alloc(sizeof(spl_task_t), flags & (TQ_SLEEP | TQ_NOSLEEP));
         spin_lock_irq(&tq->tq_lock);
 
        if (t) {
@@ -96,11 +105,11 @@ retry:
         RETURN(t);
 }
 
-/* NOTE: Must be called with tq->tq_lock held, expectes the task_t
+/* NOTE: Must be called with tq->tq_lock held, expectes the spl_task_t
  * to already be removed from the free, work, or pending taskq lists.
  */
 static void
-task_free(taskq_t *tq, task_t *t)
+task_free(taskq_t *tq, spl_task_t *t)
 {
         ENTRY;
 
@@ -109,17 +118,17 @@ task_free(taskq_t *tq, task_t *t)
        ASSERT(spin_is_locked(&tq->tq_lock));
        ASSERT(list_empty(&t->t_list));
 
-        kmem_free(t, sizeof(task_t));
+        kmem_free(t, sizeof(spl_task_t));
         tq->tq_nalloc--;
 
        EXIT;
 }
 
 /* NOTE: Must be called with tq->tq_lock held, either destroyes the
- * task_t if too many exist or moves it to the free list for later use.
+ * spl_task_t if too many exist or moves it to the free list for later use.
  */
 static void
-task_done(taskq_t *tq, task_t *t)
+task_done(taskq_t *tq, spl_task_t *t)
 {
        ENTRY;
        ASSERT(tq);
@@ -207,7 +216,7 @@ EXPORT_SYMBOL(__taskq_member);
 taskqid_t
 __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
 {
-        task_t *t;
+        spl_task_t *t;
        taskqid_t rc = 0;
         ENTRY;
 
@@ -254,7 +263,7 @@ static taskqid_t
 taskq_lowest_id(taskq_t *tq)
 {
        taskqid_t lowest_id = ~0;
-        task_t *t;
+        spl_task_t *t;
        ENTRY;
 
        ASSERT(tq);
@@ -278,7 +287,7 @@ taskq_thread(void *args)
         sigset_t blocked;
        taskqid_t id;
         taskq_t *tq = args;
-        task_t *t;
+        spl_task_t *t;
        ENTRY;
 
         ASSERT(tq);
@@ -306,7 +315,7 @@ taskq_thread(void *args)
 
                remove_wait_queue(&tq->tq_work_waitq, &wait);
                 if (!list_empty(&tq->tq_pend_list)) {
-                        t = list_entry(tq->tq_pend_list.next, task_t, t_list);
+                        t = list_entry(tq->tq_pend_list.next, spl_task_t, t_list);
                         list_del_init(&t->t_list);
                        list_add_tail(&t->t_list, &tq->tq_work_list);
                         tq->tq_nactive++;
@@ -418,7 +427,7 @@ EXPORT_SYMBOL(__taskq_create);
 void
 __taskq_destroy(taskq_t *tq)
 {
-       task_t *t;
+       spl_task_t *t;
        int i, nthreads;
        ENTRY;
 
@@ -438,7 +447,7 @@ __taskq_destroy(taskq_t *tq)
         spin_lock_irq(&tq->tq_lock);
 
         while (!list_empty(&tq->tq_free_list)) {
-               t = list_entry(tq->tq_free_list.next, task_t, t_list);
+               t = list_entry(tq->tq_free_list.next, spl_task_t, t_list);
                list_del_init(&t->t_list);
                 task_free(tq, t);
         }
@@ -450,7 +459,7 @@ __taskq_destroy(taskq_t *tq)
         ASSERT(list_empty(&tq->tq_pend_list));
 
         spin_unlock_irq(&tq->tq_lock);
-        kmem_free(tq->tq_threads, nthreads * sizeof(task_t *));
+        kmem_free(tq->tq_threads, nthreads * sizeof(spl_task_t *));
         kmem_free(tq, sizeof(taskq_t));
 
        EXIT;
index 7a9ad4cb3de64b9e0c16be1289c2916c5d0d66bf..62d2fa329db54de8144ed5cb523ba0c2e698e872 100644 (file)
@@ -73,7 +73,7 @@ __thread_exit(void)
 {
        ENTRY;
        EXIT;
-       do_exit(0);
+       complete_and_exit(NULL, 0);
        /* Unreachable */
 }
 EXPORT_SYMBOL(__thread_exit);
index c8cd048d55b48af17bbd8183a25f108ea90e732e..93945ddbc3b1db9a3733f934cc88738c41d012f6 100644 (file)
 #include <sys/sysmacros.h>
 #include <sys/time.h>
 
+#ifdef HAVE_MONOTONIC_CLOCK
+extern unsigned long long monotonic_clock(void);
+#endif
+
 #ifdef DEBUG_SUBSYSTEM
 #undef DEBUG_SUBSYSTEM
 #endif
 void
 __gethrestime(timestruc_t *ts)
 {
-       getnstimeofday((struct timespec *)ts);
-}
-EXPORT_SYMBOL(__gethrestime);
+        struct timeval tv;
 
-int
-__clock_gettime(clock_type_t type, timespec_t *tp)
-{
-       /* Only support CLOCK_REALTIME+__CLOCK_REALTIME0 for now */
-        ASSERT((type == CLOCK_REALTIME) || (type == __CLOCK_REALTIME0));
-
-        getnstimeofday(tp);
-        return 0;
+       do_gettimeofday(&tv);
+       ts->tv_sec = tv.tv_sec;
+       ts->tv_nsec = tv.tv_usec * NSEC_PER_USEC;
 }
-EXPORT_SYMBOL(__clock_gettime);
+EXPORT_SYMBOL(__gethrestime);
 
-/* This function may not be as fast as using monotonic_clock() but it
- * should be much more portable, if performance becomes as issue we can
- * look at using monotonic_clock() for x86_64 and x86 arches.
+/* Use monotonic_clock() by default. It's faster and is available on older
+ * kernels, but few architectures have them, so we must fallback to
+ * do_posix_clock_monotonic_gettime().
  */
 hrtime_t
 __gethrtime(void) {
+#ifdef HAVE_MONOTONIC_CLOCK
+       unsigned long long res = monotonic_clock();
+
+       /* Deal with signed/unsigned mismatch */
+       return (hrtime_t)(res & ~(1ULL << (BITS_PER_LONG - 1)));
+#else
         timespec_t tv;
         hrtime_t rc;
 
@@ -64,12 +67,13 @@ __gethrtime(void) {
         rc = (NSEC_PER_SEC * (hrtime_t)tv.tv_sec) + (hrtime_t)tv.tv_nsec;
 
         return rc;
+#endif
 }
 EXPORT_SYMBOL(__gethrtime);
 
 /* set_normalized_timespec() API changes
  * 2.6.0  - 2.6.15: Inline function provided by linux/time.h
- * 2.6.16 - 2.6.25: Function prototypedefined but not exported
+ * 2.6.16 - 2.6.25: Function prototype defined but not exported
  * 2.6.26 - 2.6.x:  Function defined and exported
  */
 #if !defined(HAVE_SET_NORMALIZED_TIMESPEC_INLINE) && \
index bf9b3e97c6dfd5a372027cb216a2477e6ea035a6..17336a1a22503e757c0d9778af2ad85741503dc9 100644 (file)
@@ -261,10 +261,10 @@ vn_remove(const char *path, uio_seg_t seg, int flags)
         struct nameidata nd;
         struct inode *inode = NULL;
         int rc = 0;
-       ENTRY;
+        ENTRY;
 
-       ASSERT(seg == UIO_SYSSPACE);
-       ASSERT(flags == RMFILE);
+        ASSERT(seg == UIO_SYSSPACE);
+        ASSERT(flags == RMFILE);
 
         rc = path_lookup(path, LOOKUP_PARENT, &nd);
         if (rc)
@@ -274,7 +274,11 @@ vn_remove(const char *path, uio_seg_t seg, int flags)
         if (nd.last_type != LAST_NORM)
                 GOTO(exit1, rc);
 
+#ifdef HAVE_INODE_I_MUTEX
         mutex_lock_nested(&nd.nd_dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+#else
+        down(&nd.nd_dentry->d_inode->i_sem);
+#endif
         dentry = vn_lookup_hash(&nd);
         rc = PTR_ERR(dentry);
         if (!IS_ERR(dentry)) {
@@ -289,7 +293,11 @@ vn_remove(const char *path, uio_seg_t seg, int flags)
 exit2:
                 dput(dentry);
         }
+#ifdef HAVE_INODE_I_MUTEX
         mutex_unlock(&nd.nd_dentry->d_inode->i_mutex);
+#else
+        up(&nd.nd_dentry->d_inode->i_sem);
+#endif
         if (inode)
                 iput(inode);    /* truncate the inode here */
 exit1:
index 6f97503880c08243c4a727bdeb3421913a5f9807..c85b6165a8e7da406d5878a5a8752bd810992ba9 100644 (file)
@@ -357,6 +357,7 @@ fd_uninstall(int fd)
         struct fdtable *fdt;
 
         spin_lock(&files->file_lock);
+#ifdef HAVE_FILES_FDTABLE
         fdt = files_fdtable(files);
 
         if (fd >= fdt->max_fds)
@@ -368,10 +369,20 @@ fd_uninstall(int fd)
 
         rcu_assign_pointer(fdt->fd[fd], NULL);
         FD_CLR(fd, fdt->close_on_exec);
+#else
+        if (fd >= files->max_fds)
+                goto out_unlock;
+
+        fp = files->fd[fd];
+        if (!fp)
+                goto out_unlock;
 
-        /* Dropping the lock here exposes a minor race but it allows me
-         * to use the existing kernel interfaces for this, and for a test
-         * case I think that's reasonable. */
+        files->fd[fd] = NULL;
+        FD_CLR(fd, files->close_on_exec);
+#endif
+        /* Dropping the lock here exposes a minor race but it allows me
+         * to use the existing kernel interfaces for this, and for a test
+         * case I think that's reasonable. */
         spin_unlock(&files->file_lock);
         put_unused_fd(fd);
         return 0;