AC_MSG_RESULT(no)
])
])
+
+dnl #
+dnl # 2.6.14 API change,
+dnl # check whether 'div64_64()' is available
+dnl #
+AC_DEFUN([SPL_AC_DIV64_64], [
+ AC_MSG_CHECKING([whether div64_64() is available])
+ SPL_LINUX_TRY_COMPILE([
+ #include <asm/div64.h>
+ ],[
+ uint64_t i = div64_64(1ULL, 1ULL);
+ ],[
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_DIV64_64, 1, [div64_64() is available])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+])
SPL_AC_KMALLOC_NODE
SPL_AC_MONOTONIC_CLOCK
SPL_AC_INODE_I_MUTEX
+SPL_AC_DIV64_64
TOPDIR=`/bin/pwd`
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <sys/isa_defs.h>
/* XXX: Serialize everything through global locks. This is
* going to be bad for performance, but for now it's the easiest
return rc;
}
-#if defined(__x86_64__)
+static __inline__ uint32_t
+atomic_cas_32(volatile uint32_t *target, uint32_t cmp,
+ uint32_t newval)
+{
+ uint32_t rc;
+
+ spin_lock(&atomic32_lock);
+ rc = *target;
+ if (*target == cmp)
+ *target = newval;
+
+ spin_unlock(&atomic32_lock);
+
+ return rc;
+}
+
+#ifdef _LP64
/* XXX: Implement atomic_cas_ptr() in terms of uint64'ts. This
* is of course only safe and correct for 64 bit arches... but
* for now I'm OK with that.
return (void *)atomic_cas_64((volatile uint64_t *)target,
(uint64_t)cmp, (uint64_t)newval);
}
+#else
+static __inline__ void *
+atomic_cas_ptr(volatile void *target, void *cmp, void *newval)
+{
+ return (void *)atomic_cas_32((volatile uint32_t *)target,
+ (uint32_t)cmp, (uint32_t)newval);
+}
#endif
#ifdef __cplusplus
--- /dev/null
+/*
+ * This file is part of the SPL: Solaris Porting Layer.
+ *
+ * Copyright (c) 2008 Sun Microsystems, Inc.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _SPL_DIV64_H
+#define _SPL_DIV64_H
+
+#include <asm/div64.h>
+
+#ifndef HAVE_DIV64_64
+#if BITS_PER_LONG == 32
+
+extern uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor);
+#define div64_64(a,b) spl_div64_64(a,b)
+
+#else /* BITS_PER_LONG == 32 */
+
+static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
+{
+ return dividend / divisor;
+}
+
+#endif /* BITS_PER_LONG == 32 */
+#endif /* HAVE_DIV64_64 */
+
+#define roundup64(x, y) (div64_64((x) + ((y) - 1), (y)) * (y))
+
+#endif /* _SPL_DIV64_H */
}
EXPORT_SYMBOL(highbit);
+/*
+ * Implementation of div64_64(), for kernels that don't have it.
+ *
+ * Taken from a 2.6.24 kernel.
+ */
+uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor)
+{
+ uint32_t high, d;
+
+ high = divisor >> 32;
+ if (high) {
+ unsigned int shift = fls(high);
+
+ d = divisor >> shift;
+ dividend >>= shift;
+ } else
+ d = divisor;
+
+ do_div(dividend, d);
+
+ return dividend;
+}
+EXPORT_SYMBOL(spl_div64_64);
+
int
ddi_strtoul(const char *str, char **nptr, int base, unsigned long *result)
{