AC_MSG_CHECKING([whether div64_64() is available])
SPL_LINUX_TRY_COMPILE([
#include <asm/div64.h>
+ #include <linux/types.h>
],[
uint64_t i = div64_64(1ULL, 1ULL);
],[
--- /dev/null
+#ifndef _SPL_ATOMIC_COMPAT_H
+#define _SPL_ATOMIC_COMPAT_H
+
+#include <asm/atomic.h>
+
+#ifndef HAVE_ATOMIC64_T
+#include <linux/spinlock.h>
+
+typedef struct {
+ spinlock_t lock;
+ __s64 val;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i) { .lock = SPIN_LOCK_UNLOCKED, .val = (i) }
+
+static inline void atomic64_add(__s64 i, atomic64_t *v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&v->lock, flags);
+ v->val += i;
+ spin_unlock_irqrestore(&v->lock, flags);
+}
+
+static inline void atomic64_sub(__s64 i, atomic64_t *v)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&v->lock, flags);
+ v->val -= i;
+ spin_unlock_irqrestore(&v->lock, flags);
+}
+
+static inline __s64 atomic64_read(atomic64_t *v)
+{
+ unsigned long flags;
+ __s64 r;
+
+ spin_lock_irqsave(&v->lock, flags);
+ r = v->val;
+ spin_unlock_irqrestore(&v->lock, flags);
+
+ return r;
+}
+
+static inline void atomic64_set(atomic64_t *v, __s64 i)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&v->lock, flags);
+ v->val = i;
+ spin_unlock_irqrestore(&v->lock, flags);
+}
+
+#endif /* HAVE_ATOMIC64_T */
+
+#endif /* _SPL_ATOMIC_COMPAT_H */
+
+++ /dev/null
-/*
- * This file is part of the SPL: Solaris Porting Layer.
- *
- * Copyright (c) 2008 Sun Microsystems, Inc.
- *
- * This is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef _SPL_DIV64_H
-#define _SPL_DIV64_H
-
-#include <asm/div64.h>
-
-#ifndef HAVE_DIV64_64
-#if BITS_PER_LONG == 32
-
-extern uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor);
-#define div64_64(a,b) spl_div64_64(a,b)
-
-#else /* BITS_PER_LONG == 32 */
-
-static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
-{
- return dividend / divisor;
-}
-
-#endif /* BITS_PER_LONG == 32 */
-#endif /* HAVE_DIV64_64 */
-
-#define roundup64(x, y) (div64_64((x) + ((y) - 1), (y)) * (y))
-
-#endif /* _SPL_DIV64_H */
#include <linux/rwsem.h>
#include <linux/hash.h>
#include <linux/ctype.h>
+#include <asm/atomic_compat.h>
#include <sys/types.h>
#include <sys/debug.h>
+
/*
* Memory allocation interfaces
*/
EXPORT_SYMBOL(highbit);
/*
- * Implementation of div64_64(), for kernels that don't have it.
- *
- * Taken from a 2.6.24 kernel.
+ * Implementation of 64 bit division for 32-bit machines.
*/
-uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor)
+#if BITS_PER_LONG == 32
+uint64_t __udivdi3(uint64_t dividend, uint64_t divisor)
{
+#ifdef HAVE_DIV64_64
+ return div64_64(dividend, divisor);
+#else
+ /* Taken from a 2.6.24 kernel. */
uint32_t high, d;
high = divisor >> 32;
do_div(dividend, d);
return dividend;
+#endif
}
-EXPORT_SYMBOL(spl_div64_64);
+EXPORT_SYMBOL(__udivdi3);
+
+/*
+ * Implementation of 64 bit modulo for 32-bit machines.
+ */
+uint64_t __umoddi3(uint64_t dividend, uint64_t divisor)
+{
+ return dividend - divisor * (dividend / divisor);
+}
+EXPORT_SYMBOL(__umoddi3);
+#endif
int
ddi_strtoul(const char *str, char **nptr, int base, unsigned long *result)
*/
#ifdef DEBUG_KMEM
/* Shim layer memory accounting */
-atomic64_t kmem_alloc_used;
+atomic64_t kmem_alloc_used = ATOMIC64_INIT(0);
unsigned long kmem_alloc_max = 0;
-atomic64_t vmem_alloc_used;
+atomic64_t vmem_alloc_used = ATOMIC64_INIT(0);
unsigned long vmem_alloc_max = 0;
int kmem_warning_flag = 1;
* a serious concern here since it is module unload time. */
if (atomic64_read(&kmem_alloc_used) != 0)
CWARN("kmem leaked %ld/%ld bytes\n",
- atomic_read(&kmem_alloc_used), kmem_alloc_max);
+ atomic64_read(&kmem_alloc_used), kmem_alloc_max);
if (atomic64_read(&vmem_alloc_used) != 0)
CWARN("vmem leaked %ld/%ld bytes\n",
- atomic_read(&vmem_alloc_used), vmem_alloc_max);
+ atomic64_read(&vmem_alloc_used), vmem_alloc_max);
spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
if (write) {
*ppos += *lenp;
} else {
- val = atomic_read((atomic64_t *)table->data);
+ val = atomic64_read((atomic64_t *)table->data);
rc = proc_doulongvec_minmax(&dummy, write, filp,
buffer, lenp, ppos);
}