DEFAULT_INCLUDES += \
-I$(top_srcdir)/lib
-noinst_PROGRAMS = spl
sbin_PROGRAMS = splat
-spl_SOURCES = spl.c
-
splat_SOURCES = splat.c
splat_LDFLAGS = $(top_builddir)/lib/libcommon.la
+++ /dev/null
-/*****************************************************************************\
- * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
- * Copyright (C) 2007 The Regents of the University of California.
- * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
- * UCRL-CODE-235197
- *
- * This file is part of the SPL, Solaris Porting Layer.
- * For details, see <http://zfsonlinux.org/>.
- *
- * The SPL is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * The SPL is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with the SPL. If not, see <http://www.gnu.org/licenses/>.
- *****************************************************************************
- * Solaris Porting Layer (SPL) User Space Interface.
-\*****************************************************************************/
-
-#include <stdlib.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <string.h>
-#include <errno.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <fcntl.h>
-#include "../include/spl-ctl.h"
-
-static int spl_debug_mask = ~0;
-static int spl_debug_subsystem = ~0;
-
-/* all strings nul-terminated; only the struct and hdr need to be freed */
-struct dbg_line {
- struct spl_debug_header *hdr;
- char *file;
- char *fn;
- char *text;
-};
-
-static int
-cmp_rec(const void *p1, const void *p2)
-{
- struct dbg_line *d1 = *(struct dbg_line **)p1;
- struct dbg_line *d2 = *(struct dbg_line **)p2;
-
- if (d1->hdr->ph_sec < d2->hdr->ph_sec)
- return -1;
-
- if (d1->hdr->ph_sec == d2->hdr->ph_sec &&
- d1->hdr->ph_usec < d2->hdr->ph_usec)
- return -1;
-
- if (d1->hdr->ph_sec == d2->hdr->ph_sec &&
- d1->hdr->ph_usec == d2->hdr->ph_usec)
- return 0;
-
- return 1;
-}
-
-static void
-print_rec(struct dbg_line **linev, int used, FILE *out)
-{
- int i;
-
- for (i = 0; i < used; i++) {
- struct dbg_line *line = linev[i];
- struct spl_debug_header *hdr = line->hdr;
-
- fprintf(out, "%08x:%08x:%u:%u.%06llu:%u:%u:%u:(%s:%u:%s()) %s",
- hdr->ph_subsys, hdr->ph_mask, hdr->ph_cpu_id,
- hdr->ph_sec, (unsigned long long)hdr->ph_usec,
- hdr->ph_stack, hdr->ph_pid, hdr->ph_stack, line->file,
- hdr->ph_line_num, line->fn, line->text);
- free(line->hdr);
- free(line);
- }
-
- free(linev);
-}
-
-static int
-add_rec(struct dbg_line *line, struct dbg_line ***linevp, int *lenp, int used)
-{
- struct dbg_line **linev = *linevp;
-
- if (used == *lenp) {
- int nlen = *lenp + 512;
- int nsize = nlen * sizeof(struct dbg_line *);
-
- linev = *linevp ? realloc(*linevp, nsize) : malloc(nsize);
- if (!linev)
- return 0;
- *linevp = linev;
- *lenp = nlen;
- }
- linev[used] = line;
- return 1;
-}
-
-static int
-parse_buffer(FILE *in, FILE *out)
-{
- struct dbg_line *line;
- struct spl_debug_header *hdr;
- char buf[4097], *p;
- unsigned long dropped = 0, kept = 0;
- struct dbg_line **linev = NULL;
- const int phl = sizeof(hdr->ph_len);
- const int phf = sizeof(hdr->ph_flags);
- int rc, linev_len = 0;
-
- while (1) {
- rc = fread(buf, phl + phf, 1, in);
- if (rc <= 0)
- break;
-
- hdr = (void *)buf;
- if (hdr->ph_len == 0)
- break;
- if (hdr->ph_len > 4094) {
- fprintf(stderr, "unexpected large record: %d bytes. "
- "aborting.\n", hdr->ph_len);
- break;
- }
-
- rc = fread(buf + phl + phf, 1, hdr->ph_len - phl - phf, in);
- if (rc <= 0)
- break;
-
- if (hdr->ph_mask &&
- (!(spl_debug_subsystem & hdr->ph_subsys) ||
- (!(spl_debug_mask & hdr->ph_mask)))) {
- dropped++;
- continue;
- }
-
- line = malloc(sizeof(*line));
- if (line == NULL) {
- fprintf(stderr, "malloc failed; printing accumulated "
- "records and exiting.\n");
- break;
- }
-
- line->hdr = malloc(hdr->ph_len + 1);
- if (line->hdr == NULL) {
- free(line);
- fprintf(stderr, "malloc failed; printing accumulated "
- "records and exiting.\n");
- break;
- }
-
- p = (void *)line->hdr;
- memcpy(line->hdr, buf, hdr->ph_len);
- p[hdr->ph_len] = '\0';
-
- p += sizeof(*hdr);
- line->file = p;
- p += strlen(line->file) + 1;
- line->fn = p;
- p += strlen(line->fn) + 1;
- line->text = p;
-
- if (!add_rec(line, &linev, &linev_len, kept)) {
- fprintf(stderr, "malloc failed; printing accumulated "
- "records and exiting.\n");
- break;
- }
- kept++;
- }
-
- if (linev) {
- qsort(linev, kept, sizeof(struct dbg_line *), cmp_rec);
- print_rec(linev, kept, out);
- }
-
- printf("Debug log: %lu lines, %lu kept, %lu dropped.\n",
- dropped + kept, kept, dropped);
- return 0;
-}
-
-int
-main(int argc, char *argv[])
-{
- int fdin, fdout;
- FILE *in, *out = stdout;
- int rc, o_lf = 0;
-
- if (argc > 3 || argc < 2) {
- fprintf(stderr, "usage: %s <input> [output]\n", argv[0]);
- return 0;
- }
-
-#ifdef __USE_LARGEFILE64
- o_lf = O_LARGEFILE;
-#endif
-
- fdin = open(argv[1], O_RDONLY | o_lf);
- if (fdin == -1) {
- fprintf(stderr, "open(%s) failed: %s\n", argv[1],
- strerror(errno));
- return 1;
- }
- in = fdopen(fdin, "r");
- if (in == NULL) {
- fprintf(stderr, "fopen(%s) failed: %s\n", argv[1],
- strerror(errno));
- close(fdin);
- return 1;
- }
- if (argc > 2) {
- fdout = open(argv[2], O_CREAT | O_TRUNC | O_WRONLY | o_lf, 0600);
- if (fdout == -1) {
- fprintf(stderr, "open(%s) failed: %s\n", argv[2],
- strerror(errno));
- fclose(in);
- return 1;
- }
- out = fdopen(fdout, "w");
- if (out == NULL) {
- fprintf(stderr, "fopen(%s) failed: %s\n", argv[2],
- strerror(errno));
- fclose(in);
- close(fdout);
- return 1;
- }
- }
-
- rc = parse_buffer(in, out);
-
- fclose(in);
- if (out != stdout)
- fclose(out);
-
- return rc;
-}
AC_SUBST(KERNELCPPFLAGS)
SPL_AC_DEBUG
- SPL_AC_DEBUG_LOG
SPL_AC_DEBUG_KMEM
SPL_AC_DEBUG_KMEM_TRACKING
SPL_AC_TEST_MODULE
AC_MSG_RESULT([$HAVE_RPMBUILD])
])
- RPM_DEFINE_COMMON='--define "$(DEBUG_SPL) 1" --define "$(DEBUG_LOG) 1" --define "$(DEBUG_KMEM) 1" --define "$(DEBUG_KMEM_TRACKING) 1"'
+ RPM_DEFINE_COMMON='--define "$(DEBUG_SPL) 1" --define "$(DEBUG_KMEM) 1" --define "$(DEBUG_KMEM_TRACKING) 1"'
RPM_DEFINE_UTIL=
RPM_DEFINE_KMOD='--define "kernels $(LINUX_VERSION)"'
RPM_DEFINE_DKMS=
AC_MSG_RESULT([$enable_debug])
])
-dnl #
-dnl # Enabled by default it provides a basic debug log infrastructure.
-dnl # Each subsystem registers itself with a name and logs messages
-dnl # using predefined types. If the debug mask it set to allow the
-dnl # message type it will be written to the internal log. The log
-dnl # can be dumped to a file by echoing 1 to the 'dump' proc entry,
-dnl # after dumping the log it must be decoded using the spl utility.
-dnl #
-dnl # echo 1 >/proc/sys/kernel/spl/debug/dump
-dnl # spl /tmp/spl-log.xxx.yyy /tmp/spl-log.xxx.yyy.txt
-dnl #
-AC_DEFUN([SPL_AC_DEBUG_LOG], [
- AC_ARG_ENABLE([debug-log],
- [AS_HELP_STRING([--enable-debug-log],
- [Enable basic debug logging @<:@default=yes@:>@])],
- [],
- [enable_debug_log=yes])
-
- AS_IF([test "x$enable_debug_log" = xyes],
- [
- KERNELCPPFLAGS="${KERNELCPPFLAGS} -DDEBUG_LOG"
- DEBUG_LOG="_with_debug_log"
- AC_DEFINE([DEBUG_LOG], [1],
- [Define to 1 to enable basic debug logging])
- ], [
- DEBUG_LOG="_without_debug_log"
- ])
-
- AC_SUBST(DEBUG_LOG)
- AC_MSG_CHECKING([whether basic debug logging is enabled])
- AC_MSG_RESULT([$enable_debug_log])
-])
-
dnl #
dnl # Enabled by default it provides a minimal level of memory tracking.
dnl # A total count of bytes allocated is kept for each alloc and free.
KERNEL_H = \
$(top_srcdir)/include/splat-ctl.h \
$(top_srcdir)/include/spl-ctl.h \
- $(top_srcdir)/include/spl-debug.h \
- $(top_srcdir)/include/spl-trace.h \
$(top_srcdir)/include/strings.h \
$(top_srcdir)/include/unistd.h
+++ /dev/null
-/*****************************************************************************\
- * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
- * Copyright (C) 2007 The Regents of the University of California.
- * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
- * UCRL-CODE-235197
- *
- * This file is part of the SPL, Solaris Porting Layer.
- * For details, see <http://zfsonlinux.org/>.
- *
- * The SPL is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * The SPL is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with the SPL. If not, see <http://www.gnu.org/licenses/>.
-\*****************************************************************************/
-
-/*
- * Available debug functions. These function should be used by any
- * package which needs to integrate with the SPL log infrastructure.
- *
- * SDEBUG() - Log debug message with specified mask.
- * SDEBUG_LIMIT() - Log just 1 debug message with specified mask.
- * SWARN() - Log a warning message.
- * SERROR() - Log an error message.
- * SEMERG() - Log an emergency error message.
- * SCONSOLE() - Log a generic message to the console.
- *
- * SENTRY - Log entry point to a function.
- * SEXIT - Log exit point from a function.
- * SRETURN(x) - Log return from a function.
- * SGOTO(x, y) - Log goto within a function.
- */
-
-#ifndef _SPL_DEBUG_INTERNAL_H
-#define _SPL_DEBUG_INTERNAL_H
-
-#include <linux/limits.h>
-#include <linux/sched.h>
-
-#define SS_UNDEFINED 0x00000001
-#define SS_ATOMIC 0x00000002
-#define SS_KOBJ 0x00000004
-#define SS_VNODE 0x00000008
-#define SS_TIME 0x00000010
-#define SS_RWLOCK 0x00000020
-#define SS_THREAD 0x00000040
-#define SS_CONDVAR 0x00000080
-#define SS_MUTEX 0x00000100
-#define SS_RNG 0x00000200
-#define SS_TASKQ 0x00000400
-#define SS_KMEM 0x00000800
-#define SS_DEBUG 0x00001000
-#define SS_GENERIC 0x00002000
-#define SS_PROC 0x00004000
-#define SS_MODULE 0x00008000
-#define SS_CRED 0x00010000
-#define SS_KSTAT 0x00020000
-#define SS_XDR 0x00040000
-#define SS_TSD 0x00080000
-#define SS_ZLIB 0x00100000
-#define SS_USER1 0x01000000
-#define SS_USER2 0x02000000
-#define SS_USER3 0x04000000
-#define SS_USER4 0x08000000
-#define SS_USER5 0x10000000
-#define SS_USER6 0x20000000
-#define SS_USER7 0x40000000
-#define SS_USER8 0x80000000
-#define SS_DEBUG_SUBSYS SS_UNDEFINED
-
-#define SD_TRACE 0x00000001
-#define SD_INFO 0x00000002
-#define SD_WARNING 0x00000004
-#define SD_ERROR 0x00000008
-#define SD_EMERG 0x00000010
-#define SD_CONSOLE 0x00000020
-#define SD_IOCTL 0x00000040
-#define SD_DPRINTF 0x00000080
-#define SD_OTHER 0x00000100
-#define SD_CANTMASK (SD_ERROR | SD_EMERG | SD_WARNING | SD_CONSOLE)
-
-/* Debug log support enabled */
-#ifdef DEBUG_LOG
-
-#define __SDEBUG(cdls, subsys, mask, format, a...) \
-do { \
- if (((mask) & SD_CANTMASK) != 0 || \
- ((spl_debug_mask & (mask)) != 0 && \
- (spl_debug_subsys & (subsys)) != 0)) \
- spl_debug_msg(cdls, subsys, mask, __FILE__, \
- __FUNCTION__, __LINE__, format, ## a); \
-} while (0)
-
-#define SDEBUG(mask, format, a...) \
- __SDEBUG(NULL, SS_DEBUG_SUBSYS, mask, format, ## a)
-
-#define __SDEBUG_LIMIT(subsys, mask, format, a...) \
-do { \
- static spl_debug_limit_state_t cdls; \
- \
- __SDEBUG(&cdls, subsys, mask, format, ## a); \
-} while (0)
-
-#define SDEBUG_LIMIT(mask, format, a...) \
- __SDEBUG_LIMIT(SS_DEBUG_SUBSYS, mask, format, ## a)
-
-#define SWARN(fmt, a...) SDEBUG_LIMIT(SD_WARNING, fmt, ## a)
-#define SERROR(fmt, a...) SDEBUG_LIMIT(SD_ERROR, fmt, ## a)
-#define SEMERG(fmt, a...) SDEBUG_LIMIT(SD_EMERG, fmt, ## a)
-#define SCONSOLE(mask, fmt, a...) SDEBUG(SD_CONSOLE | (mask), fmt, ## a)
-
-#define SENTRY SDEBUG(SD_TRACE, "Process entered\n")
-#define SEXIT SDEBUG(SD_TRACE, "Process leaving\n")
-
-#define SRETURN(rc) \
-do { \
- typeof(rc) RETURN__ret = (rc); \
- SDEBUG(SD_TRACE, "Process leaving (rc=%lu : %ld : %lx)\n", \
- (long)RETURN__ret, (long)RETURN__ret, (long)RETURN__ret); \
- return RETURN__ret; \
-} while (0)
-
-#define SGOTO(label, rc) \
-do { \
- long GOTO__ret = (long)(rc); \
- SDEBUG(SD_TRACE,"Process leaving via %s (rc=%lu : %ld : %lx)\n",\
- #label, (unsigned long)GOTO__ret, (signed long)GOTO__ret, \
- (signed long)GOTO__ret); \
- goto label; \
-} while (0)
-
-typedef struct {
- unsigned long cdls_next;
- int cdls_count;
- long cdls_delay;
-} spl_debug_limit_state_t;
-
-/* Global debug variables */
-extern unsigned long spl_debug_subsys;
-extern unsigned long spl_debug_mask;
-extern unsigned long spl_debug_printk;
-extern int spl_debug_mb;
-extern unsigned int spl_debug_binary;
-extern unsigned int spl_debug_catastrophe;
-extern unsigned int spl_debug_panic_on_bug;
-extern char spl_debug_file_path[PATH_MAX];
-extern unsigned int spl_console_ratelimit;
-extern long spl_console_max_delay;
-extern long spl_console_min_delay;
-extern unsigned int spl_console_backoff;
-extern unsigned int spl_debug_stack;
-
-/* Exported debug functions */
-extern int spl_debug_mask2str(char *str, int size, unsigned long mask, int ss);
-extern int spl_debug_str2mask(unsigned long *mask, const char *str, int ss);
-extern unsigned long spl_debug_set_mask(unsigned long mask);
-extern unsigned long spl_debug_get_mask(void);
-extern unsigned long spl_debug_set_subsys(unsigned long mask);
-extern unsigned long spl_debug_get_subsys(void);
-extern int spl_debug_set_mb(int mb);
-extern int spl_debug_get_mb(void);
-extern int spl_debug_dumplog(int flags);
-extern void spl_debug_dumpstack(struct task_struct *tsk);
-extern void spl_debug_bug(char *file, const char *fn, const int line, int fl);
-extern int spl_debug_msg(void *arg, int subsys, int mask, const char *file,
- const char *fn, const int line, const char *format, ...);
-extern int spl_debug_clear_buffer(void);
-extern int spl_debug_mark_buffer(char *text);
-
-int spl_debug_init(void);
-void spl_debug_fini(void);
-
-/* Debug log support disabled */
-#else /* DEBUG_LOG */
-
-#define __SDEBUG(x, y, mask, fmt, a...) ((void)0)
-#define SDEBUG(mask, fmt, a...) ((void)0)
-#define SDEBUG_LIMIT(x, y, fmt, a...) ((void)0)
-#define SWARN(fmt, a...) ((void)0)
-#define SERROR(fmt, a...) ((void)0)
-#define SEMERG(fmt, a...) ((void)0)
-#define SCONSOLE(mask, fmt, a...) ((void)0)
-
-#define SENTRY ((void)0)
-#define SEXIT ((void)0)
-#define SRETURN(x) return (x)
-#define SGOTO(x, y) { ((void)(y)); goto x; }
-
-static inline unsigned long
-spl_debug_set_mask(unsigned long mask) {
- return (0);
-}
-
-static inline unsigned long
-spl_debug_get_mask(void) {
- return (0);
-}
-
-static inline unsigned long
-spl_debug_set_subsys(unsigned long mask) {
- return (0);
-}
-
-static inline unsigned long
-spl_debug_get_subsys(void) {
- return (0);
-}
-
-static inline int
-spl_debug_set_mb(int mb) {
- return (0);
-}
-
-static inline int
-spl_debug_get_mb(void) {
- return (0);
-}
-
-static inline int
-spl_debug_dumplog(int flags)
-{
- return (0);
-}
-
-static inline void
-spl_debug_dumpstack(struct task_struct *tsk)
-{
- return;
-}
-
-static inline void
-spl_debug_bug(char *file, const char *fn, const int line, int fl)
-{
- return;
-}
-
-static inline int
-spl_debug_msg(void *arg, int subsys, int mask, const char *file,
- const char *fn, const int line, const char *format, ...)
-{
- return (0);
-}
-
-static inline int
-spl_debug_clear_buffer(void)
-{
- return (0);
-}
-
-static inline int
-spl_debug_mark_buffer(char *text)
-{
- return (0);
-}
-
-static inline int
-spl_debug_init(void) {
- return (0);
-}
-
-static inline void
-spl_debug_fini(void) {
- return;
-}
-
-#endif /* DEBUG_LOG */
-
-#endif /* SPL_DEBUG_INTERNAL_H */
+++ /dev/null
-/*****************************************************************************\
- * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
- * Copyright (C) 2007 The Regents of the University of California.
- * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
- * UCRL-CODE-235197
- *
- * This file is part of the SPL, Solaris Porting Layer.
- * For details, see <http://zfsonlinux.org/>.
- *
- * The SPL is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * The SPL is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with the SPL. If not, see <http://www.gnu.org/licenses/>.
-\*****************************************************************************/
-
-#ifndef _SPL_TRACE_H
-#define _SPL_TRACE_H
-
-#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
-#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
-#define TRACE_CONSOLE_BUFFER_SIZE 1024
-
-#define SPL_DEFAULT_MAX_DELAY (600 * HZ)
-#define SPL_DEFAULT_MIN_DELAY ((HZ + 1) / 2)
-#define SPL_DEFAULT_BACKOFF 2
-
-#define DL_NOTHREAD 0x0001 /* Do not create a new thread */
-#define DL_SINGLE_CPU 0x0002 /* Collect pages from this CPU*/
-
-typedef struct dumplog_priv {
- wait_queue_head_t dp_waitq;
- pid_t dp_pid;
- int dp_flags;
- atomic_t dp_done;
-} dumplog_priv_t;
-
-/* Three trace data types */
-typedef enum {
- TCD_TYPE_PROC,
- TCD_TYPE_SOFTIRQ,
- TCD_TYPE_IRQ,
- TCD_TYPE_MAX
-} tcd_type_t;
-
-union trace_data_union {
- struct trace_cpu_data {
- /* pages with trace records not yet processed by tracefiled */
- struct list_head tcd_pages;
- /* number of pages on ->tcd_pages */
- unsigned long tcd_cur_pages;
- /* Max number of pages allowed on ->tcd_pages */
- unsigned long tcd_max_pages;
-
- /*
- * preallocated pages to write trace records into. Pages from
- * ->tcd_stock_pages are moved to ->tcd_pages by spl_debug_msg().
- *
- * This list is necessary, because on some platforms it's
- * impossible to perform efficient atomic page allocation in a
- * non-blockable context.
- *
- * Such platforms fill ->tcd_stock_pages "on occasion", when
- * tracing code is entered in blockable context.
- *
- * trace_get_tage_try() tries to get a page from
- * ->tcd_stock_pages first and resorts to atomic page
- * allocation only if this queue is empty. ->tcd_stock_pages
- * is replenished when tracing code is entered in blocking
- * context (darwin-tracefile.c:trace_get_tcd()). We try to
- * maintain TCD_STOCK_PAGES (40 by default) pages in this
- * queue. Atomic allocation is only required if more than
- * TCD_STOCK_PAGES pagesful are consumed by trace records all
- * emitted in non-blocking contexts. Which is quite unlikely.
- */
- struct list_head tcd_stock_pages;
- /* number of pages on ->tcd_stock_pages */
- unsigned long tcd_cur_stock_pages;
-
- unsigned short tcd_shutting_down;
- unsigned short tcd_cpu;
- unsigned short tcd_type;
- /* The factors to share debug memory. */
- unsigned short tcd_pages_factor;
-
- /*
- * This spinlock is needed to workaround the problem of
- * set_cpus_allowed() being GPL-only. Since we cannot
- * schedule a thread on a specific CPU when dumping the
- * pages, we must use the spinlock for mutual exclusion.
- */
- spinlock_t tcd_lock;
- unsigned long tcd_lock_flags;
- } tcd;
- char __pad[L1_CACHE_ALIGN(sizeof(struct trace_cpu_data))];
-};
-
-extern union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS];
-
-#define tcd_for_each(tcd, i, j) \
- for (i = 0; i < TCD_TYPE_MAX && trace_data[i]; i++) \
- for (j = 0, ((tcd) = &(*trace_data[i])[j].tcd); \
- j < num_possible_cpus(); j++, (tcd) = &(*trace_data[i])[j].tcd)
-
-#define tcd_for_each_type_lock(tcd, i, cpu) \
- for (i = 0; i < TCD_TYPE_MAX && trace_data[i] && \
- (tcd = &(*trace_data[i])[cpu].tcd) && \
- trace_lock_tcd(tcd); trace_unlock_tcd(tcd), i++)
-
-struct trace_page {
- struct page *page; /* page itself */
- struct list_head linkage; /* Used by trace_data_union */
- unsigned int used; /* number of bytes used within this page */
- unsigned short cpu; /* cpu that owns this page */
- unsigned short type; /* type(context) of this page */
-};
-
-struct page_collection {
- struct list_head pc_pages;
- spinlock_t pc_lock;
- int pc_want_daemon_pages;
-};
-
-#endif /* SPL_TRACE_H */
*
* PANIC() - Panic the node and print message.
* ASSERT() - Assert X is true, if not panic.
- * ASSERTF() - Assert X is true, if not panic and print message.
* ASSERTV() - Wraps a variable declaration which is only used by ASSERT().
* ASSERT3S() - Assert signed X OP Y is true, if not panic.
* ASSERT3U() - Assert unsigned X OP Y is true, if not panic.
*/
#ifndef _SPL_DEBUG_H
-#define _SPL_DEBUG_H
+#define _SPL_DEBUG_H
-#include <spl-debug.h>
-
-#ifdef NDEBUG /* Debugging Disabled */
-
-/* Define SPL_DEBUG_STR to make clear which ASSERT definitions are used */
-#define SPL_DEBUG_STR ""
-
-#define PANIC(fmt, a...) \
- spl_PANIC(__FILE__, __FUNCTION__, __LINE__, fmt, ## a)
-
-#define __ASSERT(x) ((void)0)
-#define ASSERT(x) ((void)0)
-#define ASSERTF(x, y, z...) ((void)0)
-#define ASSERTV(x)
-#define VERIFY(cond) \
- (void)(unlikely(!(cond)) && \
- spl_PANIC(__FILE__, __FUNCTION__, __LINE__, \
- "%s", "VERIFY(" #cond ") failed\n"))
-
-#define VERIFY3_IMPL(LEFT, OP, RIGHT, TYPE, FMT, CAST) \
- (void)((!((TYPE)(LEFT) OP (TYPE)(RIGHT))) && \
- spl_PANIC(__FILE__, __FUNCTION__, __LINE__, \
- "VERIFY3(" #LEFT " " #OP " " #RIGHT ") " \
- "failed (" FMT " " #OP " " FMT ")\n", \
- CAST (LEFT), CAST (RIGHT)))
-
-#define VERIFY3S(x,y,z) VERIFY3_IMPL(x, y, z, int64_t, "%lld", (long long))
-#define VERIFY3U(x,y,z) VERIFY3_IMPL(x, y, z, uint64_t, "%llu", \
- (unsigned long long))
-#define VERIFY3P(x,y,z) VERIFY3_IMPL(x, y, z, uintptr_t, "%p", (void *))
-#define VERIFY0(x) VERIFY3_IMPL(0, ==, x, int64_t, "%lld", (long long))
-
-#define ASSERT3S(x,y,z) ((void)0)
-#define ASSERT3U(x,y,z) ((void)0)
-#define ASSERT3P(x,y,z) ((void)0)
-#define ASSERT0(x) ((void)0)
-
-#else /* Debugging Enabled */
-
-/* Define SPL_DEBUG_STR to make clear which ASSERT definitions are used */
-#define SPL_DEBUG_STR " (DEBUG mode)"
-
-#define PANIC(fmt, a...) \
- spl_PANIC(__FILE__, __FUNCTION__, __LINE__, fmt, ## a)
-
-/* ASSERTION that is safe to use within the debug system */
-#define __ASSERT(cond) \
-do { \
- if (unlikely(!(cond))) { \
- printk(KERN_EMERG "ASSERTION(" #cond ") failed\n"); \
- BUG(); \
- } \
-} while (0)
+/*
+ * Common DEBUG functionality.
+ */
+int spl_panic(const char *file, const char *func, int line,
+ const char *fmt, ...);
+void spl_dumpstack(void);
-/* ASSERTION that will debug log used outside the debug sysytem */
-#define ASSERT(cond) \
- (void)(unlikely(!(cond)) && \
- spl_PANIC(__FILE__, __FUNCTION__, __LINE__, \
- "%s", "ASSERTION(" #cond ") failed\n"))
+#define PANIC(fmt, a...) \
+ spl_panic(__FILE__, __FUNCTION__, __LINE__, fmt, ## a)
-#define ASSERTF(cond, fmt, a...) \
+#define VERIFY(cond) \
(void)(unlikely(!(cond)) && \
- spl_PANIC(__FILE__, __FUNCTION__, __LINE__, \
- "ASSERTION(" #cond ") failed: " fmt, ## a))
+ spl_panic(__FILE__, __FUNCTION__, __LINE__, \
+ "%s", "VERIFY(" #cond ") failed\n"))
-#define VERIFY3_IMPL(LEFT, OP, RIGHT, TYPE, FMT, CAST) \
+#define VERIFY3_IMPL(LEFT, OP, RIGHT, TYPE, FMT, CAST) \
(void)((!((TYPE)(LEFT) OP (TYPE)(RIGHT))) && \
- spl_PANIC(__FILE__, __FUNCTION__, __LINE__, \
- "VERIFY3(" #LEFT " " #OP " " #RIGHT ") " \
- "failed (" FMT " " #OP " " FMT ")\n", \
- CAST (LEFT), CAST (RIGHT)))
+ spl_panic(__FILE__, __FUNCTION__, __LINE__, \
+ "VERIFY3(" #LEFT " " #OP " " #RIGHT ") " \
+ "failed (" FMT " " #OP " " FMT ")\n", \
+ CAST (LEFT), CAST (RIGHT)))
-#define VERIFY3S(x,y,z) VERIFY3_IMPL(x, y, z, int64_t, "%lld", (long long))
-#define VERIFY3U(x,y,z) VERIFY3_IMPL(x, y, z, uint64_t, "%llu", \
+#define VERIFY3S(x,y,z) VERIFY3_IMPL(x, y, z, int64_t, "%lld", (long long))
+#define VERIFY3U(x,y,z) VERIFY3_IMPL(x, y, z, uint64_t, "%llu", \
(unsigned long long))
-#define VERIFY3P(x,y,z) VERIFY3_IMPL(x, y, z, uintptr_t, "%p", (void *))
-#define VERIFY0(x) VERIFY3_IMPL(0, ==, x, int64_t, "%lld", (long long))
-
-#define ASSERT3S(x,y,z) VERIFY3S(x, y, z)
-#define ASSERT3U(x,y,z) VERIFY3U(x, y, z)
-#define ASSERT3P(x,y,z) VERIFY3P(x, y, z)
-#define ASSERT0(x) VERIFY0(x)
+#define VERIFY3P(x,y,z) VERIFY3_IMPL(x, y, z, uintptr_t, "%p", (void *))
+#define VERIFY0(x) VERIFY3_IMPL(0, ==, x, int64_t, "%lld", (long long))
-#define ASSERTV(x) x
-#define VERIFY(x) ASSERT(x)
-
-#endif /* NDEBUG */
+#define CTASSERT_GLOBAL(x) _CTASSERT(x, __LINE__)
+#define CTASSERT(x) { _CTASSERT(x, __LINE__); }
+#define _CTASSERT(x, y) __CTASSERT(x, y)
+#define __CTASSERT(x, y) \
+ typedef char __attribute__ ((unused)) \
+ __compile_time_assertion__ ## y[(x) ? 1 : -1]
/*
- * Helpers for the Solaris debug macros above
+ * Debugging disabled (--disable-debug)
*/
-extern int spl_PANIC(char *filename, const char *functionname,
- int lineno, const char *fmt, ...);
+#ifdef NDEBUG
+
+#define SPL_DEBUG_STR ""
+#define ASSERT(x) ((void)0)
+#define ASSERTV(x)
+#define ASSERT3S(x,y,z) ((void)0)
+#define ASSERT3U(x,y,z) ((void)0)
+#define ASSERT3P(x,y,z) ((void)0)
+#define ASSERT0(x) ((void)0)
/*
- * Compile-time assertion. The condition 'x' must be constant.
+ * Debugging enabled (--enable-debug)
*/
-#define CTASSERT_GLOBAL(x) _CTASSERT(x, __LINE__)
-#define CTASSERT(x) { _CTASSERT(x, __LINE__); }
-#define _CTASSERT(x, y) __CTASSERT(x, y)
-#define __CTASSERT(x, y) \
- typedef char __attribute__ ((unused)) \
- __compile_time_assertion__ ## y[(x) ? 1 : -1]
+#else
+
+#define SPL_DEBUG_STR " (DEBUG mode)"
+#define ASSERT(cond) VERIFY(cond)
+#define ASSERTV(x) x
+#define ASSERT3S(x,y,z) VERIFY3S(x, y, z)
+#define ASSERT3U(x,y,z) VERIFY3U(x, y, z)
+#define ASSERT3P(x,y,z) VERIFY3P(x, y, z)
+#define ASSERT0(x) VERIFY0(x)
+
+#endif /* NDEBUG */
#endif /* SPL_DEBUG_H */
* ship a kernel with CONFIG_RT_MUTEX_TESTER disabled.
*/
#if !defined(CONFIG_RT_MUTEX_TESTER) && defined(PF_MUTEX_TESTER)
-# define PF_NOFS PF_MUTEX_TESTER
+#define PF_NOFS PF_MUTEX_TESTER
static inline void
sanitize_flags(struct task_struct *p, gfp_t *flags)
{
if (unlikely((p->flags & PF_NOFS) && (*flags & (__GFP_IO|__GFP_FS)))) {
-# ifdef NDEBUG
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "Fixing allocation for "
- "task %s (%d) which used GFP flags 0x%x with PF_NOFS set\n",
- p->comm, p->pid, flags);
- spl_debug_dumpstack(p);
+#ifdef NDEBUG
+ printk(KERN_WARNING "Fixing allocation for task %s (%d) "
+ "which used GFP flags 0x%x with PF_NOFS set\n",
+ p->comm, p->pid, *flags);
+ spl_dumpstack();
*flags &= ~(__GFP_IO|__GFP_FS);
-# else
+#else
PANIC("FATAL allocation for task %s (%d) which used GFP "
- "flags 0x%x with PF_NOFS set\n", p->comm, p->pid, flags);
-# endif /* NDEBUG */
+ "flags 0x%x with PF_NOFS set\n", p->comm, p->pid, *flags);
+#endif /* NDEBUG */
}
}
#else
-# define PF_NOFS 0x00000000
-# define sanitize_flags(p, fl) ((void)0)
+#define PF_NOFS 0x00000000
+#define sanitize_flags(p, fl) ((void)0)
#endif /* !defined(CONFIG_RT_MUTEX_TESTER) && defined(PF_MUTEX_TESTER) */
/*
# Solaris porting layer module
obj-$(CONFIG_SPL) := $(MODULE).o
-$(MODULE)-objs += @top_srcdir@/module/spl/spl-debug.o
$(MODULE)-objs += @top_srcdir@/module/spl/spl-proc.o
$(MODULE)-objs += @top_srcdir@/module/spl/spl-kmem.o
$(MODULE)-objs += @top_srcdir@/module/spl/spl-thread.o
\*****************************************************************************/
#include <sys/condvar.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_CONDVAR
void
__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
{
- SENTRY;
ASSERT(cvp);
ASSERT(name == NULL);
ASSERT(type == CV_DEFAULT);
atomic_set(&cvp->cv_waiters, 0);
atomic_set(&cvp->cv_refs, 1);
cvp->cv_mutex = NULL;
-
- SEXIT;
}
EXPORT_SYMBOL(__cv_init);
void
__cv_destroy(kcondvar_t *cvp)
{
- SENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
-
- SEXIT;
}
EXPORT_SYMBOL(__cv_destroy);
cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
{
DEFINE_WAIT(wait);
- SENTRY;
ASSERT(cvp);
ASSERT(mp);
finish_wait(&cvp->cv_event, &wait);
atomic_dec(&cvp->cv_refs);
-
- SEXIT;
}
void
{
DEFINE_WAIT(wait);
clock_t time_left;
- SENTRY;
ASSERT(cvp);
ASSERT(mp);
time_left = expire_time - jiffies;
if (time_left <= 0) {
atomic_dec(&cvp->cv_refs);
- SRETURN(-1);
+ return (-1);
}
prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
finish_wait(&cvp->cv_event, &wait);
atomic_dec(&cvp->cv_refs);
- SRETURN(time_left > 0 ? time_left : -1);
+ return (time_left > 0 ? time_left : -1);
}
clock_t
DEFINE_WAIT(wait);
hrtime_t time_left, now;
unsigned long time_left_us;
- SENTRY;
ASSERT(cvp);
ASSERT(mp);
time_left = expire_time - now;
if (time_left <= 0) {
atomic_dec(&cvp->cv_refs);
- SRETURN(-1);
+ return (-1);
}
time_left_us = time_left / NSEC_PER_USEC;
atomic_dec(&cvp->cv_refs);
time_left = expire_time - gethrtime();
- SRETURN(time_left > 0 ? time_left : -1);
+ return (time_left > 0 ? time_left : -1);
}
/*
void
__cv_signal(kcondvar_t *cvp)
{
- SENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
atomic_inc(&cvp->cv_refs);
wake_up(&cvp->cv_event);
atomic_dec(&cvp->cv_refs);
- SEXIT;
}
EXPORT_SYMBOL(__cv_signal);
void
__cv_broadcast(kcondvar_t *cvp)
{
- SENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
atomic_inc(&cvp->cv_refs);
wake_up_all(&cvp->cv_event);
atomic_dec(&cvp->cv_refs);
- SEXIT;
}
EXPORT_SYMBOL(__cv_broadcast);
+++ /dev/null
-/*****************************************************************************\
- * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
- * Copyright (C) 2007 The Regents of the University of California.
- * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
- * UCRL-CODE-235197
- *
- * This file is part of the SPL, Solaris Porting Layer.
- * For details, see <http://zfsonlinux.org/>.
- *
- * The SPL is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * The SPL is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with the SPL. If not, see <http://www.gnu.org/licenses/>.
- *****************************************************************************
- * Solaris Porting Layer (SPL) Debug Implementation.
-\*****************************************************************************/
-
-#include <linux/kmod.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include <linux/kthread.h>
-#include <linux/hardirq.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/proc_compat.h>
-#include <linux/file_compat.h>
-#include <linux/swap.h>
-#include <linux/ratelimit.h>
-#include <sys/sysmacros.h>
-#include <sys/thread.h>
-#include <spl-debug.h>
-#include <spl-trace.h>
-#include <spl-ctl.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_DEBUG
-
-/* Debug log support enabled */
-#ifdef DEBUG_LOG
-
-unsigned long spl_debug_subsys = ~0;
-EXPORT_SYMBOL(spl_debug_subsys);
-module_param(spl_debug_subsys, ulong, 0644);
-MODULE_PARM_DESC(spl_debug_subsys, "Subsystem debugging level mask.");
-
-unsigned long spl_debug_mask = SD_CANTMASK;
-EXPORT_SYMBOL(spl_debug_mask);
-module_param(spl_debug_mask, ulong, 0644);
-MODULE_PARM_DESC(spl_debug_mask, "Debugging level mask.");
-
-unsigned long spl_debug_printk = SD_CANTMASK;
-EXPORT_SYMBOL(spl_debug_printk);
-module_param(spl_debug_printk, ulong, 0644);
-MODULE_PARM_DESC(spl_debug_printk, "Console printk level mask.");
-
-int spl_debug_mb = -1;
-EXPORT_SYMBOL(spl_debug_mb);
-module_param(spl_debug_mb, int, 0644);
-MODULE_PARM_DESC(spl_debug_mb, "Total debug buffer size.");
-
-unsigned int spl_debug_binary = 1;
-EXPORT_SYMBOL(spl_debug_binary);
-
-unsigned int spl_debug_catastrophe;
-EXPORT_SYMBOL(spl_debug_catastrophe);
-
-unsigned int spl_debug_panic_on_bug = 0;
-EXPORT_SYMBOL(spl_debug_panic_on_bug);
-module_param(spl_debug_panic_on_bug, uint, 0644);
-MODULE_PARM_DESC(spl_debug_panic_on_bug, "Panic on BUG");
-
-static char spl_debug_file_name[PATH_MAX];
-char spl_debug_file_path[PATH_MAX] = "/tmp/spl-log";
-
-unsigned int spl_console_ratelimit = 1;
-EXPORT_SYMBOL(spl_console_ratelimit);
-
-long spl_console_max_delay;
-EXPORT_SYMBOL(spl_console_max_delay);
-
-long spl_console_min_delay;
-EXPORT_SYMBOL(spl_console_min_delay);
-
-unsigned int spl_console_backoff = SPL_DEFAULT_BACKOFF;
-EXPORT_SYMBOL(spl_console_backoff);
-
-unsigned int spl_debug_stack;
-EXPORT_SYMBOL(spl_debug_stack);
-
-static int spl_panic_in_progress;
-
-union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS] __cacheline_aligned;
-char *trace_console_buffers[NR_CPUS][3];
-struct rw_semaphore trace_sem;
-atomic_t trace_tage_allocated = ATOMIC_INIT(0);
-
-static int spl_debug_dump_all_pages(dumplog_priv_t *dp, char *);
-static void trace_fini(void);
-
-
-/* Memory percentage breakdown by type */
-static unsigned int pages_factor[TCD_TYPE_MAX] = {
- 80, /* 80% pages for TCD_TYPE_PROC */
- 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
- 10 /* 10% pages for TCD_TYPE_IRQ */
-};
-
-const char *
-spl_debug_subsys2str(int subsys)
-{
- switch (subsys) {
- default:
- return NULL;
- case SS_UNDEFINED:
- return "undefined";
- case SS_ATOMIC:
- return "atomic";
- case SS_KOBJ:
- return "kobj";
- case SS_VNODE:
- return "vnode";
- case SS_TIME:
- return "time";
- case SS_RWLOCK:
- return "rwlock";
- case SS_THREAD:
- return "thread";
- case SS_CONDVAR:
- return "condvar";
- case SS_MUTEX:
- return "mutex";
- case SS_RNG:
- return "rng";
- case SS_TASKQ:
- return "taskq";
- case SS_KMEM:
- return "kmem";
- case SS_DEBUG:
- return "debug";
- case SS_GENERIC:
- return "generic";
- case SS_PROC:
- return "proc";
- case SS_MODULE:
- return "module";
- case SS_CRED:
- return "cred";
- case SS_KSTAT:
- return "kstat";
- case SS_XDR:
- return "xdr";
- case SS_TSD:
- return "tsd";
- case SS_ZLIB:
- return "zlib";
- case SS_USER1:
- return "user1";
- case SS_USER2:
- return "user2";
- case SS_USER3:
- return "user3";
- case SS_USER4:
- return "user4";
- case SS_USER5:
- return "user5";
- case SS_USER6:
- return "user6";
- case SS_USER7:
- return "user7";
- case SS_USER8:
- return "user8";
- }
-}
-
-const char *
-spl_debug_dbg2str(int debug)
-{
- switch (debug) {
- default:
- return NULL;
- case SD_TRACE:
- return "trace";
- case SD_INFO:
- return "info";
- case SD_WARNING:
- return "warning";
- case SD_ERROR:
- return "error";
- case SD_EMERG:
- return "emerg";
- case SD_CONSOLE:
- return "console";
- case SD_IOCTL:
- return "ioctl";
- case SD_DPRINTF:
- return "dprintf";
- case SD_OTHER:
- return "other";
- }
-}
-
-int
-spl_debug_mask2str(char *str, int size, unsigned long mask, int is_subsys)
-{
- const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
- spl_debug_dbg2str;
- const char *token;
- int i, bit, len = 0;
-
- if (mask == 0) { /* "0" */
- if (size > 0)
- str[0] = '0';
- len = 1;
- } else { /* space-separated tokens */
- for (i = 0; i < 32; i++) {
- bit = 1 << i;
-
- if ((mask & bit) == 0)
- continue;
-
- token = fn(bit);
- if (token == NULL) /* unused bit */
- continue;
-
- if (len > 0) { /* separator? */
- if (len < size)
- str[len] = ' ';
- len++;
- }
-
- while (*token != 0) {
- if (len < size)
- str[len] = *token;
- token++;
- len++;
- }
- }
- }
-
- /* terminate 'str' */
- if (len < size)
- str[len] = 0;
- else
- str[size - 1] = 0;
-
- return len;
-}
-
-static int
-spl_debug_token2mask(int *mask, const char *str, int len, int is_subsys)
-{
- const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
- spl_debug_dbg2str;
- const char *token;
- int i, j, bit;
-
- /* match against known tokens */
- for (i = 0; i < 32; i++) {
- bit = 1 << i;
-
- token = fn(bit);
- if (token == NULL) /* unused? */
- continue;
-
- /* strcasecmp */
- for (j = 0; ; j++) {
- if (j == len) { /* end of token */
- if (token[j] == 0) {
- *mask = bit;
- return 0;
- }
- break;
- }
-
- if (token[j] == 0)
- break;
-
- if (str[j] == token[j])
- continue;
-
- if (str[j] < 'A' || 'Z' < str[j])
- break;
-
- if (str[j] - 'A' + 'a' != token[j])
- break;
- }
- }
-
- return -EINVAL; /* no match */
-}
-
-int
-spl_debug_str2mask(unsigned long *mask, const char *str, int is_subsys)
-{
- char op = 0;
- int m = 0, matched, n, t;
-
- /* Allow a number for backwards compatibility */
- for (n = strlen(str); n > 0; n--)
- if (!isspace(str[n-1]))
- break;
- matched = n;
-
- if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 && matched == n) {
- *mask = m;
- return 0;
- }
-
- /* <str> must be a list of debug tokens or numbers separated by
- * whitespace and optionally an operator ('+' or '-'). If an operator
- * appears first in <str>, '*mask' is used as the starting point
- * (relative), otherwise 0 is used (absolute). An operator applies to
- * all following tokens up to the next operator. */
- matched = 0;
- while (*str != 0) {
- while (isspace(*str)) /* skip whitespace */
- str++;
-
- if (*str == 0)
- break;
-
- if (*str == '+' || *str == '-') {
- op = *str++;
-
- /* op on first token == relative */
- if (!matched)
- m = *mask;
-
- while (isspace(*str)) /* skip whitespace */
- str++;
-
- if (*str == 0) /* trailing op */
- return -EINVAL;
- }
-
- /* find token length */
- for (n = 0; str[n] != 0 && !isspace(str[n]); n++);
-
- /* match token */
- if (spl_debug_token2mask(&t, str, n, is_subsys) != 0)
- return -EINVAL;
-
- matched = 1;
- if (op == '-')
- m &= ~t;
- else
- m |= t;
-
- str += n;
- }
-
- if (!matched)
- return -EINVAL;
-
- *mask = m;
- return 0;
-}
-
-static void
-spl_debug_dumplog_internal(dumplog_priv_t *dp)
-{
- void *journal_info;
-
- journal_info = current->journal_info;
- current->journal_info = NULL;
-
- snprintf(spl_debug_file_name, sizeof(spl_debug_file_path) - 1,
- "%s.%ld.%ld", spl_debug_file_path,
- get_seconds(), (long)dp->dp_pid);
- printk("SPL: Dumping log to %s\n", spl_debug_file_name);
- spl_debug_dump_all_pages(dp, spl_debug_file_name);
-
- current->journal_info = journal_info;
-}
-
-static int
-spl_debug_dumplog_thread(void *arg)
-{
- dumplog_priv_t *dp = (dumplog_priv_t *)arg;
-
- spl_debug_dumplog_internal(dp);
- atomic_set(&dp->dp_done, 1);
- wake_up(&dp->dp_waitq);
- complete_and_exit(NULL, 0);
-
- return 0; /* Unreachable */
-}
-
-/* When flag is set do not use a new thread for the debug dump */
-int
-spl_debug_dumplog(int flags)
-{
- struct task_struct *tsk;
- dumplog_priv_t dp;
-
- init_waitqueue_head(&dp.dp_waitq);
- dp.dp_pid = current->pid;
- dp.dp_flags = flags;
- atomic_set(&dp.dp_done, 0);
-
- if (dp.dp_flags & DL_NOTHREAD) {
- spl_debug_dumplog_internal(&dp);
- } else {
-
- tsk = spl_kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
- if (tsk == NULL)
- return -ENOMEM;
-
- wake_up_process(tsk);
- wait_event(dp.dp_waitq, atomic_read(&dp.dp_done));
- }
-
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_dumplog);
-
-static char *
-trace_get_console_buffer(void)
-{
- int cpu = get_cpu();
- int idx;
-
- if (in_irq()) {
- idx = 0;
- } else if (in_softirq()) {
- idx = 1;
- } else {
- idx = 2;
- }
-
- return trace_console_buffers[cpu][idx];
-}
-
-static void
-trace_put_console_buffer(char *buffer)
-{
- put_cpu();
-}
-
-static int
-trace_lock_tcd(struct trace_cpu_data *tcd)
-{
- __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
-
- spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
-
- return 1;
-}
-
-static void
-trace_unlock_tcd(struct trace_cpu_data *tcd)
-{
- __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
-
- spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
-}
-
-static struct trace_cpu_data *
-trace_get_tcd(void)
-{
- int cpu;
- struct trace_cpu_data *tcd;
-
- cpu = get_cpu();
- if (in_irq())
- tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
- else if (in_softirq())
- tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
- else
- tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
-
- trace_lock_tcd(tcd);
-
- return tcd;
-}
-
-static void
-trace_put_tcd (struct trace_cpu_data *tcd)
-{
- trace_unlock_tcd(tcd);
-
- put_cpu();
-}
-
-static void
-trace_set_debug_header(struct spl_debug_header *header, int subsys,
- int mask, const int line, unsigned long stack)
-{
- struct timeval tv;
-
- do_gettimeofday(&tv);
-
- header->ph_subsys = subsys;
- header->ph_mask = mask;
- header->ph_cpu_id = smp_processor_id();
- header->ph_sec = (__u32)tv.tv_sec;
- header->ph_usec = tv.tv_usec;
- header->ph_stack = stack;
- header->ph_pid = current->pid;
- header->ph_line_num = line;
-
- return;
-}
-
-static void
-trace_print_to_console(struct spl_debug_header *hdr, int mask, const char *buf,
- int len, const char *file, const char *fn)
-{
- char *prefix = "SPL", *ptype = NULL;
-
- if ((mask & SD_EMERG) != 0) {
- prefix = "SPLError";
- ptype = KERN_EMERG;
- } else if ((mask & SD_ERROR) != 0) {
- prefix = "SPLError";
- ptype = KERN_ERR;
- } else if ((mask & SD_WARNING) != 0) {
- prefix = "SPL";
- ptype = KERN_WARNING;
- } else if ((mask & (SD_CONSOLE | spl_debug_printk)) != 0) {
- prefix = "SPL";
- ptype = KERN_INFO;
- }
-
- if ((mask & SD_CONSOLE) != 0) {
- printk("%s%s: %.*s", ptype, prefix, len, buf);
- } else {
- printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
- hdr->ph_pid, hdr->ph_stack, file,
- hdr->ph_line_num, fn, len, buf);
- }
-
- return;
-}
-
-static int
-trace_max_debug_mb(void)
-{
- return MAX(512, ((totalram_pages >> (20 - PAGE_SHIFT)) * 80) / 100);
-}
-
-static struct trace_page *
-tage_alloc(int gfp)
-{
- struct page *page;
- struct trace_page *tage;
-
- page = alloc_pages(gfp | __GFP_NOWARN, 0);
- if (page == NULL)
- return NULL;
-
- tage = kmalloc(sizeof(*tage), gfp);
- if (tage == NULL) {
- __free_pages(page, 0);
- return NULL;
- }
-
- tage->page = page;
- atomic_inc(&trace_tage_allocated);
-
- return tage;
-}
-
-static void
-tage_free(struct trace_page *tage)
-{
- __ASSERT(tage != NULL);
- __ASSERT(tage->page != NULL);
-
- __free_pages(tage->page, 0);
- kfree(tage);
- atomic_dec(&trace_tage_allocated);
-}
-
-static struct trace_page *
-tage_from_list(struct list_head *list)
-{
- return list_entry(list, struct trace_page, linkage);
-}
-
-static void
-tage_to_tail(struct trace_page *tage, struct list_head *queue)
-{
- __ASSERT(tage != NULL);
- __ASSERT(queue != NULL);
-
- list_move_tail(&tage->linkage, queue);
-}
-
-/* try to return a page that has 'len' bytes left at the end */
-static struct trace_page *
-trace_get_tage_try(struct trace_cpu_data *tcd, unsigned long len)
-{
- struct trace_page *tage;
-
- if (tcd->tcd_cur_pages > 0) {
- __ASSERT(!list_empty(&tcd->tcd_pages));
- tage = tage_from_list(tcd->tcd_pages.prev);
- if (tage->used + len <= PAGE_SIZE)
- return tage;
- }
-
- if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
- if (tcd->tcd_cur_stock_pages > 0) {
- tage = tage_from_list(tcd->tcd_stock_pages.prev);
- tcd->tcd_cur_stock_pages--;
- list_del_init(&tage->linkage);
- } else {
- tage = tage_alloc(GFP_ATOMIC);
- if (tage == NULL) {
- printk(KERN_WARNING
- "failure to allocate a tage (%ld)\n",
- tcd->tcd_cur_pages);
- return NULL;
- }
- }
-
- tage->used = 0;
- tage->cpu = smp_processor_id();
- tage->type = tcd->tcd_type;
- list_add_tail(&tage->linkage, &tcd->tcd_pages);
- tcd->tcd_cur_pages++;
-
- return tage;
- }
-
- return NULL;
-}
-
-/* return a page that has 'len' bytes left at the end */
-static struct trace_page *
-trace_get_tage(struct trace_cpu_data *tcd, unsigned long len)
-{
- struct trace_page *tage;
-
- __ASSERT(len <= PAGE_SIZE);
-
- tage = trace_get_tage_try(tcd, len);
- if (tage)
- return tage;
-
- if (tcd->tcd_cur_pages > 0) {
- tage = tage_from_list(tcd->tcd_pages.next);
- tage->used = 0;
- tage_to_tail(tage, &tcd->tcd_pages);
- }
-
- return tage;
-}
-
-int
-spl_debug_msg(void *arg, int subsys, int mask, const char *file,
- const char *fn, const int line, const char *format, ...)
-{
- spl_debug_limit_state_t *cdls = arg;
- struct trace_cpu_data *tcd = NULL;
- struct spl_debug_header header = { 0, };
- struct trace_page *tage;
- /* string_buf is used only if tcd != NULL, and is always set then */
- char *string_buf = NULL;
- char *debug_buf;
- int known_size;
- int needed = 85; /* average message length */
- int max_nob;
- va_list ap;
- int i;
-
- if (subsys == 0)
- subsys = SS_DEBUG_SUBSYS;
-
- if (mask == 0)
- mask = SD_EMERG;
-
- if (strchr(file, '/'))
- file = strrchr(file, '/') + 1;
-
- tcd = trace_get_tcd();
- trace_set_debug_header(&header, subsys, mask, line, 0);
- if (tcd == NULL)
- goto console;
-
- if (tcd->tcd_shutting_down) {
- trace_put_tcd(tcd);
- tcd = NULL;
- goto console;
- }
-
- known_size = strlen(file) + 1;
- if (fn)
- known_size += strlen(fn) + 1;
-
- if (spl_debug_binary)
- known_size += sizeof(header);
-
- /* '2' used because vsnprintf returns real size required for output
- * _without_ terminating NULL. */
- for (i = 0; i < 2; i++) {
- tage = trace_get_tage(tcd, needed + known_size + 1);
- if (tage == NULL) {
- if (needed + known_size > PAGE_SIZE)
- mask |= SD_ERROR;
-
- trace_put_tcd(tcd);
- tcd = NULL;
- goto console;
- }
-
- string_buf = (char *)page_address(tage->page) +
- tage->used + known_size;
-
- max_nob = PAGE_SIZE - tage->used - known_size;
- if (max_nob <= 0) {
- printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
- mask |= SD_ERROR;
- trace_put_tcd(tcd);
- tcd = NULL;
- goto console;
- }
-
- needed = 0;
- if (format) {
- va_start(ap, format);
- needed += vsnprintf(string_buf, max_nob, format, ap);
- va_end(ap);
- }
-
- if (needed < max_nob)
- break;
- }
-
- header.ph_len = known_size + needed;
- debug_buf = (char *)page_address(tage->page) + tage->used;
-
- if (spl_debug_binary) {
- memcpy(debug_buf, &header, sizeof(header));
- tage->used += sizeof(header);
- debug_buf += sizeof(header);
- }
-
- strcpy(debug_buf, file);
- tage->used += strlen(file) + 1;
- debug_buf += strlen(file) + 1;
-
- if (fn) {
- strcpy(debug_buf, fn);
- tage->used += strlen(fn) + 1;
- debug_buf += strlen(fn) + 1;
- }
-
- __ASSERT(debug_buf == string_buf);
-
- tage->used += needed;
- __ASSERT (tage->used <= PAGE_SIZE);
-
-console:
- if ((mask & spl_debug_printk) == 0) {
- /* no console output requested */
- if (tcd != NULL)
- trace_put_tcd(tcd);
- return 1;
- }
-
- if (cdls != NULL) {
- if (spl_console_ratelimit && cdls->cdls_next != 0 &&
- !time_before(cdls->cdls_next, jiffies)) {
- /* skipping a console message */
- cdls->cdls_count++;
- if (tcd != NULL)
- trace_put_tcd(tcd);
- return 1;
- }
-
- if (time_before(cdls->cdls_next + spl_console_max_delay +
- (10 * HZ), jiffies)) {
- /* last timeout was a long time ago */
- cdls->cdls_delay /= spl_console_backoff * 4;
- } else {
- cdls->cdls_delay *= spl_console_backoff;
-
- if (cdls->cdls_delay < spl_console_min_delay)
- cdls->cdls_delay = spl_console_min_delay;
- else if (cdls->cdls_delay > spl_console_max_delay)
- cdls->cdls_delay = spl_console_max_delay;
- }
-
- /* ensure cdls_next is never zero after it's been seen */
- cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
- }
-
- if (tcd != NULL) {
- trace_print_to_console(&header, mask, string_buf, needed, file, fn);
- trace_put_tcd(tcd);
- } else {
- string_buf = trace_get_console_buffer();
-
- needed = 0;
- if (format != NULL) {
- va_start(ap, format);
- needed += vsnprintf(string_buf,
- TRACE_CONSOLE_BUFFER_SIZE, format, ap);
- va_end(ap);
- }
- trace_print_to_console(&header, mask,
- string_buf, needed, file, fn);
-
- trace_put_console_buffer(string_buf);
- }
-
- if (cdls != NULL && cdls->cdls_count != 0) {
- string_buf = trace_get_console_buffer();
-
- needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
- "Skipped %d previous similar message%s\n",
- cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
-
- trace_print_to_console(&header, mask,
- string_buf, needed, file, fn);
-
- trace_put_console_buffer(string_buf);
- cdls->cdls_count = 0;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_msg);
-
-/* Do the collect_pages job on a single CPU: assumes that all other
- * CPUs have been stopped during a panic. If this isn't true for
- * some arch, this will have to be implemented separately in each arch.
- */
-static void
-collect_pages_from_single_cpu(struct page_collection *pc)
-{
- struct trace_cpu_data *tcd;
- int i, j;
-
- tcd_for_each(tcd, i, j) {
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
- tcd->tcd_cur_pages = 0;
- }
-}
-
-static void
-collect_pages_on_all_cpus(struct page_collection *pc)
-{
- struct trace_cpu_data *tcd;
- int i, cpu;
-
- spin_lock(&pc->pc_lock);
- for_each_possible_cpu(cpu) {
- tcd_for_each_type_lock(tcd, i, cpu) {
- list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
- tcd->tcd_cur_pages = 0;
- }
- }
- spin_unlock(&pc->pc_lock);
-}
-
-static void
-collect_pages(dumplog_priv_t *dp, struct page_collection *pc)
-{
- INIT_LIST_HEAD(&pc->pc_pages);
-
- if (spl_panic_in_progress || dp->dp_flags & DL_SINGLE_CPU)
- collect_pages_from_single_cpu(pc);
- else
- collect_pages_on_all_cpus(pc);
-}
-
-static void
-put_pages_back_on_all_cpus(struct page_collection *pc)
-{
- struct trace_cpu_data *tcd;
- struct list_head *cur_head;
- struct trace_page *tage;
- struct trace_page *tmp;
- int i, cpu;
-
- spin_lock(&pc->pc_lock);
-
- for_each_possible_cpu(cpu) {
- tcd_for_each_type_lock(tcd, i, cpu) {
- cur_head = tcd->tcd_pages.next;
-
- list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
- linkage) {
- if (tage->cpu != cpu || tage->type != i)
- continue;
-
- tage_to_tail(tage, cur_head);
- tcd->tcd_cur_pages++;
- }
- }
- }
-
- spin_unlock(&pc->pc_lock);
-}
-
-static void
-put_pages_back(struct page_collection *pc)
-{
- if (!spl_panic_in_progress)
- put_pages_back_on_all_cpus(pc);
-}
-
-static int
-spl_debug_dump_all_pages(dumplog_priv_t *dp, char *filename)
-{
- struct page_collection pc;
- struct file *filp;
- struct trace_page *tage;
- struct trace_page *tmp;
- mm_segment_t oldfs;
- int rc = 0;
-
- down_write(&trace_sem);
-
- filp = spl_filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE,
- 0600, &rc);
- if (filp == NULL) {
- if (rc != -EEXIST)
- printk(KERN_ERR "SPL: Can't open %s for dump: %d\n",
- filename, rc);
- goto out;
- }
-
- spin_lock_init(&pc.pc_lock);
- collect_pages(dp, &pc);
- if (list_empty(&pc.pc_pages)) {
- rc = 0;
- goto close;
- }
-
- oldfs = get_fs();
- set_fs(get_ds());
-
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
- rc = spl_filp_write(filp, page_address(tage->page),
- tage->used, spl_filp_poff(filp));
- if (rc != (int)tage->used) {
- printk(KERN_WARNING "SPL: Wanted to write %u "
- "but wrote %d\n", tage->used, rc);
- put_pages_back(&pc);
- __ASSERT(list_empty(&pc.pc_pages));
- break;
- }
- list_del(&tage->linkage);
- tage_free(tage);
- }
-
- set_fs(oldfs);
-
- rc = spl_filp_fsync(filp, 1);
- if (rc)
- printk(KERN_ERR "SPL: Unable to sync: %d\n", rc);
- close:
- spl_filp_close(filp);
- out:
- up_write(&trace_sem);
-
- return rc;
-}
-
-static void
-spl_debug_flush_pages(void)
-{
- dumplog_priv_t dp;
- struct page_collection pc;
- struct trace_page *tage;
- struct trace_page *tmp;
-
- spin_lock_init(&pc.pc_lock);
- init_waitqueue_head(&dp.dp_waitq);
- dp.dp_pid = current->pid;
- dp.dp_flags = 0;
- atomic_set(&dp.dp_done, 0);
-
- collect_pages(&dp, &pc);
- list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
- list_del(&tage->linkage);
- tage_free(tage);
- }
-}
-
-unsigned long
-spl_debug_set_mask(unsigned long mask) {
- spl_debug_mask = mask;
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_set_mask);
-
-unsigned long
-spl_debug_get_mask(void) {
- return spl_debug_mask;
-}
-EXPORT_SYMBOL(spl_debug_get_mask);
-
-unsigned long
-spl_debug_set_subsys(unsigned long subsys) {
- spl_debug_subsys = subsys;
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_set_subsys);
-
-unsigned long
-spl_debug_get_subsys(void) {
- return spl_debug_subsys;
-}
-EXPORT_SYMBOL(spl_debug_get_subsys);
-
-int
-spl_debug_set_mb(int mb)
-{
- int i, j, pages;
- int limit = trace_max_debug_mb();
- struct trace_cpu_data *tcd;
-
- if (mb < num_possible_cpus()) {
- printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
- "%dMB - lower limit is %d\n", mb, num_possible_cpus());
- return -EINVAL;
- }
-
- if (mb > limit) {
- printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
- "%dMB - upper limit is %d\n", mb, limit);
- return -EINVAL;
- }
-
- mb /= num_possible_cpus();
- pages = mb << (20 - PAGE_SHIFT);
-
- down_write(&trace_sem);
-
- tcd_for_each(tcd, i, j)
- tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
-
- up_write(&trace_sem);
-
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_set_mb);
-
-int
-spl_debug_get_mb(void)
-{
- int i, j;
- struct trace_cpu_data *tcd;
- int total_pages = 0;
-
- down_read(&trace_sem);
-
- tcd_for_each(tcd, i, j)
- total_pages += tcd->tcd_max_pages;
-
- up_read(&trace_sem);
-
- return (total_pages >> (20 - PAGE_SHIFT)) + 1;
-}
-EXPORT_SYMBOL(spl_debug_get_mb);
-
-/*
- * Limit the number of stack traces dumped to not more than 5 every
- * 60 seconds to prevent denial-of-service attacks from debug code.
- */
-DEFINE_RATELIMIT_STATE(dumpstack_ratelimit_state, 60 * HZ, 5);
-
-void
-spl_debug_dumpstack(struct task_struct *tsk)
-{
- if (__ratelimit(&dumpstack_ratelimit_state)) {
- if (tsk == NULL)
- tsk = current;
-
- printk("SPL: Showing stack for process %d\n", tsk->pid);
- dump_stack();
- }
-}
-EXPORT_SYMBOL(spl_debug_dumpstack);
-
-void spl_debug_bug(char *file, const char *func, const int line, int flags)
-{
- spl_debug_catastrophe = 1;
- spl_debug_msg(NULL, 0, SD_EMERG, file, func, line, "SPL PANIC\n");
-
- if (in_interrupt())
- panic("SPL PANIC in interrupt.\n");
-
- if (in_atomic() || irqs_disabled())
- flags |= DL_NOTHREAD;
-
- /* Ensure all debug pages and dumped by current cpu */
- if (spl_debug_panic_on_bug)
- spl_panic_in_progress = 1;
-
- spl_debug_dumpstack(NULL);
-
- if (spl_debug_panic_on_bug) {
- spl_debug_dumplog(flags);
- panic("SPL PANIC");
- }
-
- set_task_state(current, TASK_UNINTERRUPTIBLE);
- while (1)
- schedule();
-}
-EXPORT_SYMBOL(spl_debug_bug);
-
-int
-spl_debug_clear_buffer(void)
-{
- spl_debug_flush_pages();
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_clear_buffer);
-
-int
-spl_debug_mark_buffer(char *text)
-{
- SDEBUG(SD_WARNING, "*************************************\n");
- SDEBUG(SD_WARNING, "DEBUG MARKER: %s\n", text);
- SDEBUG(SD_WARNING, "*************************************\n");
-
- return 0;
-}
-EXPORT_SYMBOL(spl_debug_mark_buffer);
-
-static int
-trace_init(int max_pages)
-{
- struct trace_cpu_data *tcd;
- int i, j;
-
- init_rwsem(&trace_sem);
-
- /* initialize trace_data */
- memset(trace_data, 0, sizeof(trace_data));
- for (i = 0; i < TCD_TYPE_MAX; i++) {
- trace_data[i] = kmalloc(sizeof(union trace_data_union) *
- NR_CPUS, GFP_KERNEL);
- if (trace_data[i] == NULL)
- goto out;
- }
-
- tcd_for_each(tcd, i, j) {
- spin_lock_init(&tcd->tcd_lock);
- tcd->tcd_pages_factor = pages_factor[i];
- tcd->tcd_type = i;
- tcd->tcd_cpu = j;
- INIT_LIST_HEAD(&tcd->tcd_pages);
- INIT_LIST_HEAD(&tcd->tcd_stock_pages);
- tcd->tcd_cur_pages = 0;
- tcd->tcd_cur_stock_pages = 0;
- tcd->tcd_max_pages = (max_pages * pages_factor[i]) / 100;
- tcd->tcd_shutting_down = 0;
- }
-
- for (i = 0; i < num_possible_cpus(); i++) {
- for (j = 0; j < 3; j++) {
- trace_console_buffers[i][j] =
- kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
- GFP_KERNEL);
-
- if (trace_console_buffers[i][j] == NULL)
- goto out;
- }
- }
-
- return 0;
-out:
- trace_fini();
- printk(KERN_ERR "SPL: Insufficient memory for debug logs\n");
- return -ENOMEM;
-}
-
-int
-spl_debug_init(void)
-{
- int rc, max = spl_debug_mb;
-
- spl_console_max_delay = SPL_DEFAULT_MAX_DELAY;
- spl_console_min_delay = SPL_DEFAULT_MIN_DELAY;
-
- /* If spl_debug_mb is set to an invalid value or uninitialized
- * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
- if (max > (totalram_pages >> (20 - 2 - PAGE_SHIFT)) / 5 ||
- max >= 512 || max < 0) {
- max = TCD_MAX_PAGES;
- } else {
- max = (max / num_online_cpus()) << (20 - PAGE_SHIFT);
- }
-
- rc = trace_init(max);
- if (rc)
- return rc;
-
- return rc;
-}
-
-static void
-trace_cleanup_on_all_cpus(void)
-{
- struct trace_cpu_data *tcd;
- struct trace_page *tage;
- struct trace_page *tmp;
- int i, cpu;
-
- for_each_possible_cpu(cpu) {
- tcd_for_each_type_lock(tcd, i, cpu) {
- tcd->tcd_shutting_down = 1;
-
- list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
- linkage) {
- list_del(&tage->linkage);
- tage_free(tage);
- }
- tcd->tcd_cur_pages = 0;
- }
- }
-}
-
-static void
-trace_fini(void)
-{
- int i, j;
-
- trace_cleanup_on_all_cpus();
-
- for (i = 0; i < num_possible_cpus(); i++) {
- for (j = 0; j < 3; j++) {
- if (trace_console_buffers[i][j] != NULL) {
- kfree(trace_console_buffers[i][j]);
- trace_console_buffers[i][j] = NULL;
- }
- }
- }
-
- for (i = 0; i < TCD_TYPE_MAX && trace_data[i] != NULL; i++) {
- kfree(trace_data[i]);
- trace_data[i] = NULL;
- }
-}
-
-void
-spl_debug_fini(void)
-{
- trace_fini();
-}
-
-#endif /* DEBUG_LOG */
#include <sys/sysmacros.h>
#include <sys/cmn_err.h>
-#include <spl-debug.h>
+#include <linux/ratelimit.h>
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
+/*
+ * Limit the number of stack traces dumped to not more than 5 every
+ * 60 seconds to prevent denial-of-service attacks from debug code.
+ */
+DEFINE_RATELIMIT_STATE(dumpstack_ratelimit_state, 60 * HZ, 5);
-#define SS_DEBUG_SUBSYS SS_GENERIC
-
-#ifdef DEBUG_LOG
-static char ce_prefix[CE_IGNORE][10] = { "", "NOTICE: ", "WARNING: ", "" };
-static char ce_suffix[CE_IGNORE][2] = { "", "\n", "\n", "" };
-#endif
+void
+spl_dumpstack(void)
+{
+ if (__ratelimit(&dumpstack_ratelimit_state)) {
+ printk("Showing stack for process %d\n", current->pid);
+ dump_stack();
+ }
+}
+EXPORT_SYMBOL(spl_dumpstack);
int
-spl_PANIC(char *filename, const char *functionname,
- int lineno, const char *fmt, ...) {
+spl_panic(const char *file, const char *func, int line, const char *fmt, ...) {
+ const char *newfile;
char msg[MAXMSGLEN];
va_list ap;
+ newfile = strrchr(file, '/');
+ if (newfile != NULL)
+ newfile = newfile + 1;
+ else
+ newfile = file;
+
va_start(ap, fmt);
- if (vsnprintf(msg, sizeof (msg), fmt, ap) == sizeof (msg))
- msg[sizeof (msg) - 1] = '\0';
+ (void) vsnprintf(msg, sizeof (msg), fmt, ap);
va_end(ap);
-#ifdef NDEBUG
+
printk(KERN_EMERG "%s", msg);
-#else
- spl_debug_msg(NULL, 0, 0,
- filename, functionname, lineno, "%s", msg);
-#endif
- spl_debug_bug(filename, functionname, lineno, 0);
- return 1;
-}
-EXPORT_SYMBOL(spl_PANIC);
+ printk(KERN_EMERG "PANIC at %s:%d:%s()\n", newfile, line, func);
+ spl_dumpstack();
-void
-vpanic(const char *fmt, va_list ap)
-{
- char msg[MAXMSGLEN];
+ /* Halt the thread to facilitate further debugging */
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
+ while (1)
+ schedule();
- vsnprintf(msg, MAXMSGLEN - 1, fmt, ap);
- PANIC("%s", msg);
-} /* vpanic() */
-EXPORT_SYMBOL(vpanic);
+ /* Unreachable */
+ return (1);
+}
+EXPORT_SYMBOL(spl_panic);
void
vcmn_err(int ce, const char *fmt, va_list ap)
{
char msg[MAXMSGLEN];
- if (ce == CE_PANIC)
- vpanic(fmt, ap);
+ vsnprintf(msg, MAXMSGLEN - 1, fmt, ap);
- if (ce != CE_NOTE) {
- vsnprintf(msg, MAXMSGLEN - 1, fmt, ap);
+ switch (ce) {
+ case CE_IGNORE:
+ break;
+ case CE_CONT:
+ printk("%s", msg);
+ break;
+ case CE_NOTE:
+ printk(KERN_NOTICE "NOTICE: %s\n", msg);
+ break;
+ case CE_WARN:
+ printk(KERN_WARNING "WARNING: %s\n", msg);
+ break;
+ case CE_PANIC:
+ printk(KERN_EMERG "PANIC: %s\n", msg);
+ spl_dumpstack();
- if (fmt[0] == '!')
- SDEBUG(SD_INFO, "%s%s%s",
- ce_prefix[ce], msg, ce_suffix[ce]);
- else
- SERROR("%s%s%s", ce_prefix[ce], msg, ce_suffix[ce]);
+ /* Halt the thread to facilitate further debugging */
+ set_task_state(current, TASK_UNINTERRUPTIBLE);
+ while (1)
+ schedule();
}
} /* vcmn_err() */
EXPORT_SYMBOL(vcmn_err);
va_end(ap);
} /* cmn_err() */
EXPORT_SYMBOL(cmn_err);
-
#include <sys/file.h>
#include <linux/kmod.h>
#include <linux/proc_compat.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_GENERIC
char spl_version[32] = "SPL v" SPL_META_VERSION "-" SPL_META_RELEASE;
EXPORT_SYMBOL(spl_version);
{
int rc = 0;
- if ((rc = spl_debug_init()))
- return rc;
-
if ((rc = spl_kmem_init()))
- SGOTO(out1, rc);
+ goto out1;
if ((rc = spl_mutex_init()))
- SGOTO(out2, rc);
+ goto out2;
if ((rc = spl_rw_init()))
- SGOTO(out3, rc);
+ goto out3;
if ((rc = spl_taskq_init()))
- SGOTO(out4, rc);
+ goto out4;
if ((rc = spl_vn_init()))
- SGOTO(out5, rc);
+ goto out5;
if ((rc = spl_proc_init()))
- SGOTO(out6, rc);
+ goto out6;
if ((rc = spl_kstat_init()))
- SGOTO(out7, rc);
+ goto out7;
if ((rc = spl_tsd_init()))
- SGOTO(out8, rc);
+ goto out8;
if ((rc = spl_zlib_init()))
- SGOTO(out9, rc);
+ goto out9;
printk(KERN_NOTICE "SPL: Loaded module v%s-%s%s\n", SPL_META_VERSION,
SPL_META_RELEASE, SPL_DEBUG_STR);
- SRETURN(rc);
+ return (rc);
out9:
spl_tsd_fini();
out2:
spl_kmem_fini();
out1:
- spl_debug_fini();
-
printk(KERN_NOTICE "SPL: Failed to Load Solaris Porting Layer "
"v%s-%s%s, rc = %d\n", SPL_META_VERSION, SPL_META_RELEASE,
SPL_DEBUG_STR, rc);
+
return rc;
}
static void
spl_fini(void)
{
- SENTRY;
-
printk(KERN_NOTICE "SPL: Unloaded module v%s-%s%s\n",
SPL_META_VERSION, SPL_META_RELEASE, SPL_DEBUG_STR);
spl_zlib_fini();
spl_rw_fini();
spl_mutex_fini();
spl_kmem_fini();
- spl_debug_fini();
}
/* Called when a dependent module is loaded */
\*****************************************************************************/
#include <sys/kmem.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_KMEM
/*
* Within the scope of spl-kmem.c file the kmem_cache_* definitions
struct hlist_node *node;
struct kmem_debug *p;
unsigned long flags;
- SENTRY;
spin_lock_irqsave(lock, flags);
spin_unlock_irqrestore(lock, flags);
- SRETURN(NULL);
+ return (NULL);
}
void *
void *ptr = NULL;
kmem_debug_t *dptr;
unsigned long irq_flags;
- SENTRY;
/* Function may be called with KM_NOSLEEP so failure is possible */
dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
flags & ~__GFP_ZERO);
if (unlikely(dptr == NULL)) {
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
- "kmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
- sizeof(kmem_debug_t), flags, func, line,
- kmem_alloc_used_read(), kmem_alloc_max);
+ printk(KERN_WARNING "debug kmem_alloc(%ld, 0x%x) at %s:%d "
+ "failed (%lld/%llu)\n", sizeof(kmem_debug_t), flags,
+ func, line, kmem_alloc_used_read(), kmem_alloc_max);
} else {
/*
* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best.
*/
if (unlikely((size > PAGE_SIZE*2) && !(flags & KM_NODEBUG))) {
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "large "
- "kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line,
+ printk(KERN_WARNING "large kmem_alloc(%llu, 0x%x) "
+ "at %s:%d failed (%lld/%llu)\n",
+ (unsigned long long)size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
- spl_debug_dumpstack(NULL);
+ spl_dumpstack();
}
/*
dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
if (unlikely(dptr->kd_func == NULL)) {
kfree(dptr);
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
- "debug __strdup() at %s:%d failed (%lld/%llu)\n",
- func, line, kmem_alloc_used_read(), kmem_alloc_max);
+ printk(KERN_WARNING "debug __strdup() at %s:%d "
+ "failed (%lld/%llu)\n", func, line,
+ kmem_alloc_used_read(), kmem_alloc_max);
goto out;
}
if (unlikely(ptr == NULL)) {
kfree(dptr->kd_func);
kfree(dptr);
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "kmem_alloc"
- "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ printk(KERN_WARNING "kmem_alloc(%llu, 0x%x) "
+ "at %s:%d failed (%lld/%llu)\n",
(unsigned long long) size, flags, func, line,
kmem_alloc_used_read(), kmem_alloc_max);
goto out;
&kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]);
list_add_tail(&dptr->kd_list, &kmem_list);
spin_unlock_irqrestore(&kmem_lock, irq_flags);
-
- SDEBUG_LIMIT(SD_INFO,
- "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line, ptr,
- kmem_alloc_used_read(), kmem_alloc_max);
}
out:
- SRETURN(ptr);
+ return (ptr);
}
EXPORT_SYMBOL(kmem_alloc_track);
kmem_free_track(const void *ptr, size_t size)
{
kmem_debug_t *dptr;
- SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
- dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);
-
/* Must exist in hash due to kmem_alloc() */
+ dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);
ASSERT(dptr);
/* Size must match */
(unsigned long long) size, dptr->kd_func, dptr->kd_line);
kmem_alloc_used_sub(size);
- SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
- (unsigned long long) size, kmem_alloc_used_read(),
- kmem_alloc_max);
-
kfree(dptr->kd_func);
memset((void *)dptr, 0x5a, sizeof(kmem_debug_t));
memset((void *)ptr, 0x5a, size);
kfree(ptr);
-
- SEXIT;
}
EXPORT_SYMBOL(kmem_free_track);
void *ptr = NULL;
kmem_debug_t *dptr;
unsigned long irq_flags;
- SENTRY;
ASSERT(flags & KM_SLEEP);
dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
flags & ~__GFP_ZERO);
if (unlikely(dptr == NULL)) {
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
- "vmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ printk(KERN_WARNING "debug vmem_alloc(%ld, 0x%x) "
+ "at %s:%d failed (%lld/%llu)\n",
sizeof(kmem_debug_t), flags, func, line,
vmem_alloc_used_read(), vmem_alloc_max);
} else {
dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
if (unlikely(dptr->kd_func == NULL)) {
kfree(dptr);
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
- "debug __strdup() at %s:%d failed (%lld/%llu)\n",
- func, line, vmem_alloc_used_read(), vmem_alloc_max);
+ printk(KERN_WARNING "debug __strdup() at %s:%d "
+ "failed (%lld/%llu)\n", func, line,
+ vmem_alloc_used_read(), vmem_alloc_max);
goto out;
}
if (unlikely(ptr == NULL)) {
kfree(dptr->kd_func);
kfree(dptr);
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "vmem_alloc"
- "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
+ printk(KERN_WARNING "vmem_alloc (%llu, 0x%x) "
+ "at %s:%d failed (%lld/%llu)\n",
(unsigned long long) size, flags, func, line,
vmem_alloc_used_read(), vmem_alloc_max);
goto out;
&vmem_table[hash_ptr(ptr, VMEM_HASH_BITS)]);
list_add_tail(&dptr->kd_list, &vmem_list);
spin_unlock_irqrestore(&vmem_lock, irq_flags);
-
- SDEBUG_LIMIT(SD_INFO,
- "vmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line,
- ptr, vmem_alloc_used_read(), vmem_alloc_max);
}
out:
- SRETURN(ptr);
+ return (ptr);
}
EXPORT_SYMBOL(vmem_alloc_track);
vmem_free_track(const void *ptr, size_t size)
{
kmem_debug_t *dptr;
- SENTRY;
ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
(unsigned long long) size);
- dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
-
/* Must exist in hash due to vmem_alloc() */
+ dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
ASSERT(dptr);
/* Size must match */
(unsigned long long) size, dptr->kd_func, dptr->kd_line);
vmem_alloc_used_sub(size);
- SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
- (unsigned long long) size, vmem_alloc_used_read(),
- vmem_alloc_max);
-
kfree(dptr->kd_func);
memset((void *)dptr, 0x5a, sizeof(kmem_debug_t));
memset((void *)ptr, 0x5a, size);
vfree(ptr);
-
- SEXIT;
}
EXPORT_SYMBOL(vmem_free_track);
int node_alloc, int node)
{
void *ptr;
- SENTRY;
/*
* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best.
*/
if (unlikely((size > PAGE_SIZE * 2) && !(flags & KM_NODEBUG))) {
- SDEBUG(SD_CONSOLE | SD_WARNING,
+ printk(KERN_WARNING
"large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line,
- kmem_alloc_used_read(), kmem_alloc_max);
- spl_debug_dumpstack(NULL);
+ (unsigned long long)size, flags, func, line,
+ (unsigned long long)kmem_alloc_used_read(), kmem_alloc_max);
+ spl_dumpstack();
}
/* Use the correct allocator */
}
if (unlikely(ptr == NULL)) {
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ printk(KERN_WARNING
"kmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line,
- kmem_alloc_used_read(), kmem_alloc_max);
+ (unsigned long long)size, flags, func, line,
+ (unsigned long long)kmem_alloc_used_read(), kmem_alloc_max);
} else {
kmem_alloc_used_add(size);
if (unlikely(kmem_alloc_used_read() > kmem_alloc_max))
kmem_alloc_max = kmem_alloc_used_read();
-
- SDEBUG_LIMIT(SD_INFO,
- "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line, ptr,
- kmem_alloc_used_read(), kmem_alloc_max);
}
- SRETURN(ptr);
+ return (ptr);
}
EXPORT_SYMBOL(kmem_alloc_debug);
void
kmem_free_debug(const void *ptr, size_t size)
{
- SENTRY;
-
- ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
- (unsigned long long) size);
-
+ ASSERT(ptr || size > 0);
kmem_alloc_used_sub(size);
- SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
- (unsigned long long) size, kmem_alloc_used_read(),
- kmem_alloc_max);
kfree(ptr);
-
- SEXIT;
}
EXPORT_SYMBOL(kmem_free_debug);
vmem_alloc_debug(size_t size, int flags, const char *func, int line)
{
void *ptr;
- SENTRY;
ASSERT(flags & KM_SLEEP);
}
if (unlikely(ptr == NULL)) {
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
+ printk(KERN_WARNING
"vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
- (unsigned long long) size, flags, func, line,
- vmem_alloc_used_read(), vmem_alloc_max);
+ (unsigned long long)size, flags, func, line,
+ (unsigned long long)vmem_alloc_used_read(), vmem_alloc_max);
} else {
vmem_alloc_used_add(size);
if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
vmem_alloc_max = vmem_alloc_used_read();
-
- SDEBUG_LIMIT(SD_INFO, "vmem_alloc(%llu, 0x%x) = %p "
- "(%lld/%llu)\n", (unsigned long long) size, flags, ptr,
- vmem_alloc_used_read(), vmem_alloc_max);
}
- SRETURN(ptr);
+ return (ptr);
}
EXPORT_SYMBOL(vmem_alloc_debug);
void
vmem_free_debug(const void *ptr, size_t size)
{
- SENTRY;
-
- ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
- (unsigned long long) size);
-
+ ASSERT(ptr || size > 0);
vmem_alloc_used_sub(size);
- SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
- (unsigned long long) size, vmem_alloc_used_read(),
- vmem_alloc_max);
vfree(ptr);
-
- SEXIT;
}
EXPORT_SYMBOL(vmem_free_debug);
base = kv_alloc(skc, skc->skc_slab_size, flags);
if (base == NULL)
- SRETURN(NULL);
+ return (NULL);
sks = (spl_kmem_slab_t *)base;
sks->sks_magic = SKS_MAGIC;
for (i = 0; i < sks->sks_objs; i++) {
if (skc->skc_flags & KMC_OFFSLAB) {
obj = kv_alloc(skc, offslab_size, flags);
- if (!obj)
- SGOTO(out, rc = -ENOMEM);
+ if (!obj) {
+ rc = -ENOMEM;
+ goto out;
+ }
} else {
obj = base + spl_sks_size(skc) + (i * obj_size);
}
sks = NULL;
}
- SRETURN(sks);
+ return (sks);
}
/*
struct list_head *sks_list, struct list_head *sko_list)
{
spl_kmem_cache_t *skc;
- SENTRY;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_ref == 0);
list_del(&sks->sks_list);
list_add(&sks->sks_list, sks_list);
list_splice_init(&sks->sks_free_list, sko_list);
-
- SEXIT;
}
/*
LIST_HEAD(sko_list);
uint32_t size = 0;
int i = 0;
- SENTRY;
/*
* Move empty slabs and objects which have not been touched in
ASSERT(sks->sks_magic == SKS_MAGIC);
kv_free(skc, sks, skc->skc_slab_size);
}
-
- SEXIT;
}
static spl_kmem_emergency_t *
{
spl_kmem_emergency_t *ske;
int empty;
- SENTRY;
/* Last chance use a partial slab if one now exists */
spin_lock(&skc->skc_lock);
empty = list_empty(&skc->skc_partial_list);
spin_unlock(&skc->skc_lock);
if (!empty)
- SRETURN(-EEXIST);
+ return (-EEXIST);
ske = kmalloc(sizeof(*ske), flags);
if (ske == NULL)
- SRETURN(-ENOMEM);
+ return (-ENOMEM);
ske->ske_obj = kmalloc(skc->skc_obj_size, flags);
if (ske->ske_obj == NULL) {
kfree(ske);
- SRETURN(-ENOMEM);
+ return (-ENOMEM);
}
spin_lock(&skc->skc_lock);
if (unlikely(!empty)) {
kfree(ske->ske_obj);
kfree(ske);
- SRETURN(-EINVAL);
+ return (-EINVAL);
}
*obj = ske->ske_obj;
- SRETURN(0);
+ return (0);
}
/*
spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
{
spl_kmem_emergency_t *ske;
- SENTRY;
spin_lock(&skc->skc_lock);
ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
spin_unlock(&skc->skc_lock);
if (unlikely(ske == NULL))
- SRETURN(-ENOENT);
+ return (-ENOENT);
kfree(ske->ske_obj);
kfree(ske);
- SRETURN(0);
+ return (0);
}
/*
__spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
{
int i, count = MIN(flush, skm->skm_avail);
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
skm->skm_avail -= count;
memmove(skm->skm_objs, &(skm->skm_objs[count]),
sizeof(void *) * skm->skm_avail);
-
- SEXIT;
}
static void
if (skc->skc_flags & KMC_OFFSLAB) {
*objs = spl_kmem_cache_obj_per_slab;
*size = P2ROUNDUP(sizeof(spl_kmem_slab_t), PAGE_SIZE);
- SRETURN(0);
+ return (0);
} else {
sks_size = spl_sks_size(skc);
obj_size = spl_obj_size(skc);
for (*size = PAGE_SIZE; *size <= max_size; *size *= 2) {
*objs = (*size - sks_size) / obj_size;
if (*objs >= spl_kmem_cache_obj_per_slab)
- SRETURN(0);
+ return (0);
}
/*
*size = max_size;
*objs = (*size - sks_size) / obj_size;
if (*objs >= (spl_kmem_cache_obj_per_slab_min))
- SRETURN(0);
+ return (0);
}
- SRETURN(-ENOSPC);
+ return (-ENOSPC);
}
/*
{
uint32_t obj_size = spl_obj_size(skc);
int size;
- SENTRY;
/* Per-magazine sizes below assume a 4Kib page size */
if (obj_size > (PAGE_SIZE * 256))
else
size = 256;
- SRETURN(size);
+ return (size);
}
/*
spl_kmem_magazine_t *skm;
int size = sizeof(spl_kmem_magazine_t) +
sizeof(void *) * skc->skc_mag_size;
- SENTRY;
skm = kmem_alloc_node(size, KM_SLEEP, cpu_to_node(cpu));
if (skm) {
skm->skm_cpu = cpu;
}
- SRETURN(skm);
+ return (skm);
}
/*
int size = sizeof(spl_kmem_magazine_t) +
sizeof(void *) * skm->skm_size;
- SENTRY;
ASSERT(skm->skm_magic == SKM_MAGIC);
ASSERT(skm->skm_avail == 0);
kmem_free(skm, size);
- SEXIT;
}
/*
spl_magazine_create(spl_kmem_cache_t *skc)
{
int i;
- SENTRY;
if (skc->skc_flags & KMC_NOMAGAZINE)
- SRETURN(0);
+ return (0);
skc->skc_mag_size = spl_magazine_size(skc);
skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
for (i--; i >= 0; i--)
spl_magazine_free(skc->skc_mag[i]);
- SRETURN(-ENOMEM);
+ return (-ENOMEM);
}
}
- SRETURN(0);
+ return (0);
}
/*
{
spl_kmem_magazine_t *skm;
int i;
- SENTRY;
- if (skc->skc_flags & KMC_NOMAGAZINE) {
- SEXIT;
+ if (skc->skc_flags & KMC_NOMAGAZINE)
return;
- }
for_each_online_cpu(i) {
skm = skc->skc_mag[i];
spl_cache_flush(skc, skm, skm->skm_avail);
spl_magazine_free(skm);
}
-
- SEXIT;
}
/*
{
spl_kmem_cache_t *skc;
int rc;
- SENTRY;
- ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags);
- ASSERTF(!(flags & KMC_NOHASH), "Bad KMC_NOHASH (%x)\n", flags);
- ASSERTF(!(flags & KMC_QCACHE), "Bad KMC_QCACHE (%x)\n", flags);
+ /*
+ * Unsupported flags
+ */
+ ASSERT0(flags & KMC_NOMAGAZINE);
+ ASSERT0(flags & KMC_NOHASH);
+ ASSERT0(flags & KMC_QCACHE);
ASSERT(vmp == NULL);
might_sleep();
*/
skc = kmem_zalloc(sizeof(*skc), KM_SLEEP| KM_NODEBUG);
if (skc == NULL)
- SRETURN(NULL);
+ return (NULL);
skc->skc_magic = SKC_MAGIC;
skc->skc_name_size = strlen(name) + 1;
skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, KM_SLEEP);
if (skc->skc_name == NULL) {
kmem_free(skc, sizeof(*skc));
- SRETURN(NULL);
+ return (NULL);
}
strncpy(skc->skc_name, name, skc->skc_name_size);
rc = spl_slab_size(skc,
&skc->skc_slab_objs, &skc->skc_slab_size);
if (rc)
- SGOTO(out, rc);
+ goto out;
rc = spl_magazine_create(skc);
if (rc)
- SGOTO(out, rc);
+ goto out;
} else {
skc->skc_linux_cache = kmem_cache_create(
skc->skc_name, size, align, 0, NULL);
- if (skc->skc_linux_cache == NULL)
- SGOTO(out, rc = ENOMEM);
+ if (skc->skc_linux_cache == NULL) {
+ rc = ENOMEM;
+ goto out;
+ }
kmem_cache_set_allocflags(skc, __GFP_COMP);
skc->skc_flags |= KMC_NOMAGAZINE;
list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
up_write(&spl_kmem_cache_sem);
- SRETURN(skc);
+ return (skc);
out:
kmem_free(skc->skc_name, skc->skc_name_size);
kmem_free(skc, sizeof(*skc));
- SRETURN(NULL);
+ return (NULL);
}
EXPORT_SYMBOL(spl_kmem_cache_create);
{
DECLARE_WAIT_QUEUE_HEAD(wq);
taskqid_t id;
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB));
spin_unlock(&skc->skc_lock);
kmem_free(skc, sizeof(*skc));
-
- SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_destroy);
spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
{
int remaining, rc;
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
TASK_UNINTERRUPTIBLE);
- SRETURN(rc ? rc : -EAGAIN);
+ return (rc ? rc : -EAGAIN);
}
/*
if (ska == NULL) {
clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
wake_up_all(&skc->skc_waitq);
- SRETURN(-ENOMEM);
+ return (-ENOMEM);
}
atomic_inc(&skc->skc_ref);
rc = -ENOMEM;
}
- SRETURN(rc);
+ return (rc);
}
/*
spl_kmem_slab_t *sks;
int count = 0, rc, refill;
void *obj = NULL;
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
/* Emergency object for immediate use by caller */
if (rc == 0 && obj != NULL)
- SRETURN(obj);
+ return (obj);
if (rc)
- SGOTO(out, rc);
+ goto out;
/* Rescheduled to different CPU skm is not local */
if (skm != skc->skc_mag[smp_processor_id()])
- SGOTO(out, rc);
+ goto out;
/* Potentially rescheduled to the same CPU but
* allocations may have occurred from this CPU while
spin_unlock(&skc->skc_lock);
out:
- SRETURN(NULL);
+ return (NULL);
}
/*
{
spl_kmem_slab_t *sks = NULL;
spl_kmem_obj_t *sko = NULL;
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
list_add_tail(&sks->sks_list, &skc->skc_partial_list);
skc->skc_slab_alloc--;
}
-
- SEXIT;
}
/*
{
spl_kmem_magazine_t *skm;
void *obj = NULL;
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
* the local magazine since this may have changed
* when we need to grow the cache. */
skm = skc->skc_mag[smp_processor_id()];
- ASSERTF(skm->skm_magic == SKM_MAGIC, "%x != %x: %s/%p/%p %x/%x/%x\n",
- skm->skm_magic, SKM_MAGIC, skc->skc_name, skc, skm,
- skm->skm_size, skm->skm_refill, skm->skm_avail);
+ ASSERT(skm->skm_magic == SKM_MAGIC);
if (likely(skm->skm_avail)) {
/* Object available in CPU cache, use it */
} else {
obj = spl_cache_refill(skc, skm, flags);
if (obj == NULL)
- SGOTO(restart, obj = NULL);
+ goto restart;
}
local_irq_enable();
atomic_dec(&skc->skc_ref);
- SRETURN(obj);
+ return (obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
{
spl_kmem_magazine_t *skm;
unsigned long flags;
- SENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
* are guaranteed to have physical addresses. They must be removed
* from the tree of emergency objects and the freed.
*/
- if ((skc->skc_flags & KMC_VMEM) && !kmem_virt(obj))
- SGOTO(out, spl_emergency_free(skc, obj));
+ if ((skc->skc_flags & KMC_VMEM) && !kmem_virt(obj)) {
+ spl_emergency_free(skc, obj);
+ goto out;
+ }
local_irq_save(flags);
local_irq_restore(flags);
out:
atomic_dec(&skc->skc_ref);
-
- SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_free);
void
spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
{
- SENTRY;
-
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
if (spl_kmem_cache_expire & KMC_EXPIRE_MEM)
kmem_cache_shrink(skc->skc_linux_cache);
- SGOTO(out, 0);
+ goto out;
}
/*
* Prevent concurrent cache reaping when contended.
*/
if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
- SGOTO(out, 0);
+ goto out;
/*
* When a reclaim function is available it may be invoked repeatedly
wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
out:
atomic_dec(&skc->skc_ref);
-
- SEXIT;
}
EXPORT_SYMBOL(spl_kmem_cache_reap_now);
spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
{
int i;
- SENTRY;
spin_lock_init(lock);
INIT_LIST_HEAD(list);
for (i = 0; i < size; i++)
INIT_HLIST_HEAD(&kmem_table[i]);
- SRETURN(0);
+ return (0);
}
static void
unsigned long flags;
kmem_debug_t *kd;
char str[17];
- SENTRY;
spin_lock_irqsave(lock, flags);
if (!list_empty(list))
kd->kd_func, kd->kd_line);
spin_unlock_irqrestore(lock, flags);
- SEXIT;
}
#else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
#define spl_kmem_init_tracking(list, lock, size)
spl_kmem_init(void)
{
int rc = 0;
- SENTRY;
#ifdef DEBUG_KMEM
kmem_alloc_used_set(0);
spl_register_shrinker(&spl_kmem_cache_shrinker);
- SRETURN(rc);
+ return (rc);
}
void
spl_kmem_fini(void)
{
- SENTRY;
-
spl_unregister_shrinker(&spl_kmem_cache_shrinker);
taskq_destroy(spl_kmem_cache_taskq);
* at that address to aid in debugging. Performance is not
* a serious concern here since it is module unload time. */
if (kmem_alloc_used_read() != 0)
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
- "kmem leaked %ld/%ld bytes\n",
+ printk(KERN_WARNING "kmem leaked %ld/%llu bytes\n",
kmem_alloc_used_read(), kmem_alloc_max);
-
if (vmem_alloc_used_read() != 0)
- SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
- "vmem leaked %ld/%ld bytes\n",
+ printk(KERN_WARNING "vmem leaked %ld/%llu bytes\n",
vmem_alloc_used_read(), vmem_alloc_max);
spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
#endif /* DEBUG_KMEM */
-
- SEXIT;
}
\*****************************************************************************/
#include <sys/kobj.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_KOBJ
struct _buf *
kobj_open_file(const char *name)
struct _buf *file;
vnode_t *vp;
int rc;
- SENTRY;
file = kmalloc(sizeof(_buf_t), GFP_KERNEL);
if (file == NULL)
- SRETURN((_buf_t *)-1UL);
+ return ((_buf_t *)-1UL);
if ((rc = vn_open(name, UIO_SYSSPACE, FREAD, 0644, &vp, 0, 0))) {
kfree(file);
- SRETURN((_buf_t *)-1UL);
+ return ((_buf_t *)-1UL);
}
file->vp = vp;
- SRETURN(file);
+ return (file);
} /* kobj_open_file() */
EXPORT_SYMBOL(kobj_open_file);
void
kobj_close_file(struct _buf *file)
{
- SENTRY;
VOP_CLOSE(file->vp, 0, 0, 0, 0, 0);
kfree(file);
- SEXIT;
} /* kobj_close_file() */
EXPORT_SYMBOL(kobj_close_file);
int
kobj_read_file(struct _buf *file, char *buf, ssize_t size, offset_t off)
{
- SENTRY;
- SRETURN(vn_rdwr(UIO_READ, file->vp, buf, size, off,
+ return (vn_rdwr(UIO_READ, file->vp, buf, size, off,
UIO_SYSSPACE, 0, RLIM64_INFINITY, 0, NULL));
} /* kobj_read_file() */
EXPORT_SYMBOL(kobj_read_file);
{
vattr_t vap;
int rc;
- SENTRY;
rc = VOP_GETATTR(file->vp, &vap, 0, 0, NULL);
if (rc)
- SRETURN(rc);
+ return (rc);
*size = vap.va_size;
- SRETURN(rc);
+ return (rc);
} /* kobj_get_filesize() */
EXPORT_SYMBOL(kobj_get_filesize);
#include <linux/seq_file.h>
#include <sys/kstat.h>
-#include <spl-debug.h>
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_KSTAT
#ifndef HAVE_PDE_DATA
#define PDE_DATA(x) (PDE(x)->data)
#endif
kstat_seq_data_addr(kstat_t *ksp, loff_t n)
{
void *rc = NULL;
- SENTRY;
switch (ksp->ks_type) {
case KSTAT_TYPE_RAW:
PANIC("Undefined kstat type %d\n", ksp->ks_type);
}
- SRETURN(rc);
+ return (rc);
}
static void *
loff_t n = *pos;
kstat_t *ksp = (kstat_t *)f->private;
ASSERT(ksp->ks_magic == KS_MAGIC);
- SENTRY;
mutex_enter(ksp->ks_lock);
ksp->ks_snaptime = gethrtime();
if (!n && kstat_seq_show_headers(f))
- SRETURN(NULL);
+ return (NULL);
if (n >= ksp->ks_ndata)
- SRETURN(NULL);
+ return (NULL);
- SRETURN(kstat_seq_data_addr(ksp, n));
+ return (kstat_seq_data_addr(ksp, n));
}
static void *
{
kstat_t *ksp = (kstat_t *)f->private;
ASSERT(ksp->ks_magic == KS_MAGIC);
- SENTRY;
++*pos;
if (*pos >= ksp->ks_ndata)
- SRETURN(NULL);
+ return (NULL);
- SRETURN(kstat_seq_data_addr(ksp, *pos));
+ return (kstat_seq_data_addr(ksp, *pos));
}
static void
int
spl_kstat_init(void)
{
- SENTRY;
mutex_init(&kstat_module_lock, NULL, MUTEX_DEFAULT, NULL);
INIT_LIST_HEAD(&kstat_module_list);
kstat_id = 0;
- SRETURN(0);
+ return (0);
}
void
spl_kstat_fini(void)
{
- SENTRY;
ASSERT(list_empty(&kstat_module_list));
mutex_destroy(&kstat_module_lock);
- SEXIT;
}
#include <linux/seq_file.h>
#include <linux/proc_compat.h>
#include <linux/version.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_PROC
#if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
typedef struct ctl_table __no_const spl_ctl_table;
return size;
}
-#ifdef DEBUG_LOG
-static int
-proc_dobitmasks(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- unsigned long *mask = table->data;
- int is_subsys = (mask == &spl_debug_subsys) ? 1 : 0;
- int is_printk = (mask == &spl_debug_printk) ? 1 : 0;
- int size = 512, rc;
- char *str;
- SENTRY;
-
- str = kmem_alloc(size, KM_SLEEP);
- if (str == NULL)
- SRETURN(-ENOMEM);
-
- if (write) {
- rc = proc_copyin_string(str, size, buffer, *lenp);
- if (rc < 0)
- SRETURN(rc);
-
- rc = spl_debug_str2mask(mask, str, is_subsys);
- /* Always print BUG/ASSERT to console, so keep this mask */
- if (is_printk)
- *mask |= SD_EMERG;
-
- *ppos += *lenp;
- } else {
- rc = spl_debug_mask2str(str, size, *mask, is_subsys);
- if (*ppos >= rc)
- rc = 0;
- else
- rc = proc_copyout_string(buffer, *lenp,
- str + *ppos, "\n");
- if (rc >= 0) {
- *lenp = rc;
- *ppos += rc;
- }
- }
-
- kmem_free(str, size);
- SRETURN(rc);
-}
-
-static int
-proc_debug_mb(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- char str[32];
- int rc, len;
- SENTRY;
-
- if (write) {
- rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
- if (rc < 0)
- SRETURN(rc);
-
- rc = spl_debug_set_mb(simple_strtoul(str, NULL, 0));
- *ppos += *lenp;
- } else {
- len = snprintf(str, sizeof(str), "%d", spl_debug_get_mb());
- if (*ppos >= len)
- rc = 0;
- else
- rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
-
- if (rc >= 0) {
- *lenp = rc;
- *ppos += rc;
- }
- }
-
- SRETURN(rc);
-}
-
-static int
-proc_dump_kernel(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- SENTRY;
-
- if (write) {
- spl_debug_dumplog(0);
- *ppos += *lenp;
- } else {
- *lenp = 0;
- }
-
- SRETURN(0);
-}
-
-static int
-proc_force_bug(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- SENTRY;
-
- if (write)
- PANIC("Crashing due to forced panic\n");
- else
- *lenp = 0;
-
- SRETURN(0);
-}
-
-static int
-proc_console_max_delay_cs(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int rc, max_delay_cs;
- spl_ctl_table dummy = *table;
- long d;
- SENTRY;
-
- dummy.data = &max_delay_cs;
- dummy.proc_handler = &proc_dointvec;
-
- if (write) {
- max_delay_cs = 0;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- if (rc < 0)
- SRETURN(rc);
-
- if (max_delay_cs <= 0)
- SRETURN(-EINVAL);
-
- d = (max_delay_cs * HZ) / 100;
- if (d == 0 || d < spl_console_min_delay)
- SRETURN(-EINVAL);
-
- spl_console_max_delay = d;
- } else {
- max_delay_cs = (spl_console_max_delay * 100) / HZ;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- }
-
- SRETURN(rc);
-}
-
-static int
-proc_console_min_delay_cs(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int rc, min_delay_cs;
- spl_ctl_table dummy = *table;
- long d;
- SENTRY;
-
- dummy.data = &min_delay_cs;
- dummy.proc_handler = &proc_dointvec;
-
- if (write) {
- min_delay_cs = 0;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- if (rc < 0)
- SRETURN(rc);
-
- if (min_delay_cs <= 0)
- SRETURN(-EINVAL);
-
- d = (min_delay_cs * HZ) / 100;
- if (d == 0 || d > spl_console_max_delay)
- SRETURN(-EINVAL);
-
- spl_console_min_delay = d;
- } else {
- min_delay_cs = (spl_console_min_delay * 100) / HZ;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- }
-
- SRETURN(rc);
-}
-
-static int
-proc_console_backoff(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int rc, backoff;
- spl_ctl_table dummy = *table;
- SENTRY;
-
- dummy.data = &backoff;
- dummy.proc_handler = &proc_dointvec;
-
- if (write) {
- backoff = 0;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- if (rc < 0)
- SRETURN(rc);
-
- if (backoff <= 0)
- SRETURN(-EINVAL);
-
- spl_console_backoff = backoff;
- } else {
- backoff = spl_console_backoff;
- rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
- }
-
- SRETURN(rc);
-}
-#endif /* DEBUG_LOG */
-
#ifdef DEBUG_KMEM
static int
proc_domemused(struct ctl_table *table, int write,
int rc = 0;
unsigned long min = 0, max = ~0, val;
spl_ctl_table dummy = *table;
- SENTRY;
dummy.data = &val;
dummy.proc_handler = &proc_dointvec;
rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
}
- SRETURN(rc);
+ return (rc);
}
static int
unsigned long min = 0, max = ~0, val = 0, mask;
spl_ctl_table dummy = *table;
spl_kmem_cache_t *skc;
- SENTRY;
dummy.data = &val;
dummy.proc_handler = &proc_dointvec;
rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
}
- SRETURN(rc);
+ return (rc);
}
#endif /* DEBUG_KMEM */
{
int len, rc = 0;
char *end, str[32];
- SENTRY;
if (write) {
/* We can't use proc_doulongvec_minmax() in the write
* leading 0x which confuses the helper function. */
rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
if (rc < 0)
- SRETURN(rc);
+ return (rc);
spl_hostid = simple_strtoul(str, &end, 16);
if (str == end)
- SRETURN(-EINVAL);
+ return (-EINVAL);
} else {
len = snprintf(str, sizeof(str), "%lx", spl_hostid);
}
}
- SRETURN(rc);
+ return (rc);
}
#ifdef DEBUG_KMEM
{
struct list_head *p;
loff_t n = *pos;
- SENTRY;
down_read(&spl_kmem_cache_sem);
if (!n)
while (n--) {
p = p->next;
if (p == &spl_kmem_cache_list)
- SRETURN(NULL);
+ return (NULL);
}
- SRETURN(list_entry(p, spl_kmem_cache_t, skc_list));
+ return (list_entry(p, spl_kmem_cache_t, skc_list));
}
static void *
slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
{
spl_kmem_cache_t *skc = p;
- SENTRY;
++*pos;
- SRETURN((skc->skc_list.next == &spl_kmem_cache_list) ?
+ return ((skc->skc_list.next == &spl_kmem_cache_list) ?
NULL : list_entry(skc->skc_list.next,spl_kmem_cache_t,skc_list));
}
};
#endif /* DEBUG_KMEM */
-#ifdef DEBUG_LOG
-static struct ctl_table spl_debug_table[] = {
- {
- .procname = "subsystem",
- .data = &spl_debug_subsys,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = &proc_dobitmasks
- },
- {
- .procname = "mask",
- .data = &spl_debug_mask,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = &proc_dobitmasks
- },
- {
- .procname = "printk",
- .data = &spl_debug_printk,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = &proc_dobitmasks
- },
- {
- .procname = "mb",
- .mode = 0644,
- .proc_handler = &proc_debug_mb,
- },
- {
- .procname = "binary",
- .data = &spl_debug_binary,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
- .procname = "catastrophe",
- .data = &spl_debug_catastrophe,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- },
- {
- .procname = "panic_on_bug",
- .data = &spl_debug_panic_on_bug,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec
- },
- {
- .procname = "path",
- .data = spl_debug_file_path,
- .maxlen = sizeof(spl_debug_file_path),
- .mode = 0644,
- .proc_handler = &proc_dostring,
- },
- {
- .procname = "dump",
- .mode = 0200,
- .proc_handler = &proc_dump_kernel,
- },
- {
- .procname = "force_bug",
- .mode = 0200,
- .proc_handler = &proc_force_bug,
- },
- {
- .procname = "console_ratelimit",
- .data = &spl_console_ratelimit,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
- .procname = "console_max_delay_centisecs",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_console_max_delay_cs,
- },
- {
- .procname = "console_min_delay_centisecs",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_console_min_delay_cs,
- },
- {
- .procname = "console_backoff",
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_console_backoff,
- },
- {
- .procname = "stack_max",
- .data = &spl_debug_stack,
- .maxlen = sizeof(int),
- .mode = 0444,
- .proc_handler = &proc_dointvec,
- },
- {0},
-};
-#endif /* DEBUG_LOG */
-
#ifdef DEBUG_KMEM
static struct ctl_table spl_kmem_table[] = {
{
.mode = 0644,
.proc_handler = &proc_dohostid,
},
-#ifdef DEBUG_LOG
- {
- .procname = "debug",
- .mode = 0555,
- .child = spl_debug_table,
- },
-#endif
#ifdef DEBUG_KMEM
{
.procname = "kmem",
spl_proc_init(void)
{
int rc = 0;
- SENTRY;
spl_header = register_sysctl_table(spl_root);
if (spl_header == NULL)
- SRETURN(-EUNATCH);
+ return (-EUNATCH);
proc_spl = proc_mkdir("spl", NULL);
- if (proc_spl == NULL)
- SGOTO(out, rc = -EUNATCH);
+ if (proc_spl == NULL) {
+ rc = -EUNATCH;
+ goto out;
+ }
#ifdef DEBUG_KMEM
proc_spl_kmem = proc_mkdir("kmem", proc_spl);
- if (proc_spl_kmem == NULL)
- SGOTO(out, rc = -EUNATCH);
+ if (proc_spl_kmem == NULL) {
+ rc = -EUNATCH;
+ goto out;
+ }
proc_spl_kmem_slab = proc_create_data("slab", 0444,
proc_spl_kmem, &proc_slab_operations, NULL);
- if (proc_spl_kmem_slab == NULL)
- SGOTO(out, rc = -EUNATCH);
+ if (proc_spl_kmem_slab == NULL) {
+ rc = -EUNATCH;
+ goto out;
+ }
#endif /* DEBUG_KMEM */
proc_spl_kstat = proc_mkdir("kstat", proc_spl);
- if (proc_spl_kstat == NULL)
- SGOTO(out, rc = -EUNATCH);
+ if (proc_spl_kstat == NULL) {
+ rc = -EUNATCH;
+ goto out;
+ }
out:
if (rc) {
remove_proc_entry("kstat", proc_spl);
unregister_sysctl_table(spl_header);
}
- SRETURN(rc);
+ return (rc);
}
void
spl_proc_fini(void)
{
- SENTRY;
-
remove_proc_entry("kstat", proc_spl);
#ifdef DEBUG_KMEM
remove_proc_entry("slab", proc_spl_kmem);
ASSERT(spl_header != NULL);
unregister_sysctl_table(spl_header);
-
- SEXIT;
}
#include <sys/taskq.h>
#include <sys/kmem.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_TASKQ
int spl_taskq_thread_bind = 0;
module_param(spl_taskq_thread_bind, int, 0644);
{
taskq_ent_t *t;
int count = 0;
- SENTRY;
ASSERT(tq);
ASSERT(spin_is_locked(&tq->tq_lock));
ASSERT(!timer_pending(&t->tqent_timer));
list_del_init(&t->tqent_list);
- SRETURN(t);
+ return (t);
}
/* Free list is empty and memory allocations are prohibited */
if (flags & TQ_NOALLOC)
- SRETURN(NULL);
+ return (NULL);
/* Hit maximum taskq_ent_t pool size */
if (tq->tq_nalloc >= tq->tq_maxalloc) {
if (flags & TQ_NOSLEEP)
- SRETURN(NULL);
+ return (NULL);
/*
* Sleep periodically polling the free list for an available
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
schedule_timeout(HZ / 100);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
- if (count < 100)
- SGOTO(retry, count++);
+ if (count < 100) {
+ count++;
+ goto retry;
+ }
}
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
tq->tq_nalloc++;
}
- SRETURN(t);
+ return (t);
}
/*
static void
task_free(taskq_t *tq, taskq_ent_t *t)
{
- SENTRY;
-
ASSERT(tq);
ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
kmem_free(t, sizeof(taskq_ent_t));
tq->tq_nalloc--;
-
- SEXIT;
}
/*
static void
task_done(taskq_t *tq, taskq_ent_t *t)
{
- SENTRY;
ASSERT(tq);
ASSERT(t);
ASSERT(spin_is_locked(&tq->tq_lock));
} else {
task_free(tq, t);
}
-
- SEXIT;
}
/*
taskqid_t lowest_id = tq->tq_next_id;
taskq_ent_t *t;
taskq_thread_t *tqt;
- SENTRY;
ASSERT(tq);
ASSERT(spin_is_locked(&tq->tq_lock));
lowest_id = MIN(lowest_id, tqt->tqt_id);
}
- SRETURN(lowest_id);
+ return (lowest_id);
}
/*
taskq_thread_t *w;
struct list_head *l;
- SENTRY;
ASSERT(tq);
ASSERT(tqt);
ASSERT(spin_is_locked(&tq->tq_lock));
}
if (l == &tq->tq_active_list)
list_add(&tqt->tqt_active_list, &tq->tq_active_list);
-
- SEXIT;
}
/*
{
struct list_head *l;
taskq_ent_t *t;
- SENTRY;
ASSERT(spin_is_locked(&tq->tq_lock));
t = list_entry(l, taskq_ent_t, tqent_list);
if (t->tqent_id == id)
- SRETURN(t);
+ return (t);
if (t->tqent_id > id)
break;
}
- SRETURN(NULL);
+ return (NULL);
}
/*
taskq_thread_t *tqt;
struct list_head *l;
taskq_ent_t *t;
- SENTRY;
ASSERT(spin_is_locked(&tq->tq_lock));
*active = 0;
t = taskq_find_list(tq, &tq->tq_delay_list, id);
if (t)
- SRETURN(t);
+ return (t);
t = taskq_find_list(tq, &tq->tq_prio_list, id);
if (t)
- SRETURN(t);
+ return (t);
t = taskq_find_list(tq, &tq->tq_pend_list, id);
if (t)
- SRETURN(t);
+ return (t);
list_for_each(l, &tq->tq_active_list) {
tqt = list_entry(l, taskq_thread_t, tqt_active_list);
if (tqt->tqt_id == id) {
t = tqt->tqt_task;
*active = 1;
- SRETURN(t);
+ return (t);
}
}
- SRETURN(NULL);
+ return (NULL);
}
static int
rc = (id < tq->tq_lowest_id);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(rc);
+ return (rc);
}
void
taskq_wait(taskq_t *tq)
{
taskqid_t id;
- SENTRY;
+
ASSERT(tq);
/* Wait for the largest outstanding taskqid */
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
taskq_wait_all(tq, id);
-
- SEXIT;
-
}
EXPORT_SYMBOL(taskq_wait);
{
struct list_head *l;
taskq_thread_t *tqt;
- SENTRY;
ASSERT(tq);
ASSERT(t);
list_for_each(l, &tq->tq_thread_list) {
tqt = list_entry(l, taskq_thread_t, tqt_thread_list);
if (tqt->tqt_thread == (struct task_struct *)t)
- SRETURN(1);
+ return (1);
}
- SRETURN(0);
+ return (0);
}
EXPORT_SYMBOL(taskq_member);
taskq_ent_t *t;
int active = 0;
int rc = ENOENT;
- SENTRY;
ASSERT(tq);
rc = EBUSY;
}
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(taskq_cancel_id);
{
taskq_ent_t *t;
taskqid_t rc = 0;
- SENTRY;
ASSERT(tq);
ASSERT(func);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TQ_ACTIVE))
- SGOTO(out, rc = 0);
+ goto out;
/* Do not queue the task unless there is idle thread for it */
ASSERT(tq->tq_nactive <= tq->tq_nthreads);
if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads))
- SGOTO(out, rc = 0);
+ goto out;
if ((t = task_alloc(tq, flags)) == NULL)
- SGOTO(out, rc = 0);
+ goto out;
spin_lock(&t->tqent_lock);
wake_up(&tq->tq_work_waitq);
out:
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(taskq_dispatch);
taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
uint_t flags, clock_t expire_time)
{
- taskq_ent_t *t;
taskqid_t rc = 0;
- SENTRY;
+ taskq_ent_t *t;
ASSERT(tq);
ASSERT(func);
/* Taskq being destroyed and all tasks drained */
if (!(tq->tq_flags & TQ_ACTIVE))
- SGOTO(out, rc = 0);
+ goto out;
if ((t = task_alloc(tq, flags)) == NULL)
- SGOTO(out, rc = 0);
+ goto out;
spin_lock(&t->tqent_lock);
spin_unlock(&t->tqent_lock);
out:
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(taskq_dispatch_delay);
taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
taskq_ent_t *t)
{
- SENTRY;
-
ASSERT(tq);
ASSERT(func);
ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
wake_up(&tq->tq_work_waitq);
out:
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SEXIT;
}
EXPORT_SYMBOL(taskq_dispatch_ent);
taskq_t *tq;
taskq_ent_t *t;
struct list_head *pend_list;
- SENTRY;
ASSERT(tqt);
tq = tqt->tqt_tq;
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
- SRETURN(0);
+ return (0);
}
taskq_t *
taskq_t *tq;
taskq_thread_t *tqt;
int rc = 0, i, j = 0;
- SENTRY;
ASSERT(name != NULL);
ASSERT(pri <= maxclsyspri);
tq = kmem_alloc(sizeof(*tq), KM_PUSHPAGE);
if (tq == NULL)
- SRETURN(NULL);
+ return (NULL);
spin_lock_init(&tq->tq_lock);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
tq = NULL;
}
- SRETURN(tq);
+ return (tq);
}
EXPORT_SYMBOL(taskq_create);
struct task_struct *thread;
taskq_thread_t *tqt;
taskq_ent_t *t;
- SENTRY;
ASSERT(tq);
spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
kmem_free(tq, sizeof(taskq_t));
-
- SEXIT;
}
EXPORT_SYMBOL(taskq_destroy);
int
spl_taskq_init(void)
{
- SENTRY;
-
/* Solaris creates a dynamic taskq of up to 64 threads, however in
* a Linux environment 1 thread per-core is usually about right */
system_taskq = taskq_create("spl_system_taskq", num_online_cpus(),
minclsyspri, 4, 512, TASKQ_PREPOPULATE);
if (system_taskq == NULL)
- SRETURN(1);
+ return (1);
- SRETURN(0);
+ return (0);
}
void
spl_taskq_fini(void)
{
- SENTRY;
taskq_destroy(system_taskq);
- SEXIT;
}
#include <sys/thread.h>
#include <sys/kmem.h>
#include <sys/tsd.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_THREAD
/*
* Thread interfaces
void
__thread_exit(void)
{
- SENTRY;
- SEXIT;
tsd_exit();
complete_and_exit(NULL, 0);
/* Unreachable */
thread_priv_t *tp;
struct task_struct *tsk;
char *p;
- SENTRY;
/* Option pp is simply ignored */
/* Variable stack size unsupported */
tp = kmem_alloc(sizeof(thread_priv_t), KM_PUSHPAGE);
if (tp == NULL)
- SRETURN(NULL);
+ return (NULL);
tp->tp_magic = TP_MAGIC;
tp->tp_name_size = strlen(name) + 1;
tp->tp_name = kmem_alloc(tp->tp_name_size, KM_PUSHPAGE);
if (tp->tp_name == NULL) {
kmem_free(tp, sizeof(thread_priv_t));
- SRETURN(NULL);
+ return (NULL);
}
strncpy(tp->tp_name, name, tp->tp_name_size);
tsk = spl_kthread_create(thread_generic_wrapper, (void *)tp,
"%s", tp->tp_name);
- if (IS_ERR(tsk)) {
- SERROR("Failed to create thread: %ld\n", PTR_ERR(tsk));
- SRETURN(NULL);
- }
+ if (IS_ERR(tsk))
+ return (NULL);
wake_up_process(tsk);
- SRETURN((kthread_t *)tsk);
+ return ((kthread_t *)tsk);
}
EXPORT_SYMBOL(__thread_create);
#include <sys/kmem.h>
#include <sys/thread.h>
#include <sys/tsd.h>
-#include <spl-debug.h>
-
-#ifdef DEBUG_SUBSYSTEM
-#undef DEBUG_SUBSYSTEM
-#endif
-
-#define DEBUG_SUBSYSTEM SS_TSD
-#define DEBUG_SUBSYSTEM SS_TSD
typedef struct tsd_hash_bin {
spinlock_t hb_lock;
tsd_hash_entry_t *entry;
tsd_hash_bin_t *bin;
ulong_t hash;
- SENTRY;
hash = hash_long((ulong_t)key * (ulong_t)pid, table->ht_bits);
bin = &table->ht_bins[hash];
entry = list_entry(node, tsd_hash_entry_t, he_list);
if ((entry->he_key == key) && (entry->he_pid == pid)) {
spin_unlock(&bin->hb_lock);
- SRETURN(entry);
+ return (entry);
}
}
spin_unlock(&bin->hb_lock);
- SRETURN(NULL);
+ return (NULL);
}
/*
tsd_hash_dtor(struct hlist_head *work)
{
tsd_hash_entry_t *entry;
- SENTRY;
while (!hlist_empty(work)) {
entry = hlist_entry(work->first, tsd_hash_entry_t, he_list);
kmem_free(entry, sizeof(tsd_hash_entry_t));
}
-
- SEXIT;
}
/*
tsd_hash_bin_t *bin;
ulong_t hash;
int rc = 0;
- SENTRY;
ASSERT3P(tsd_hash_search(table, key, pid), ==, NULL);
/* New entry allocate structure, set value, and add to hash */
entry = kmem_alloc(sizeof(tsd_hash_entry_t), KM_PUSHPAGE);
if (entry == NULL)
- SRETURN(ENOMEM);
+ return (ENOMEM);
entry->he_key = key;
entry->he_pid = pid;
spin_unlock(&bin->hb_lock);
spin_unlock(&table->ht_lock);
- SRETURN(rc);
+ return (rc);
}
/*
tsd_hash_bin_t *bin;
ulong_t hash;
int keys_checked = 0;
- SENTRY;
ASSERT3P(table, !=, NULL);
/* Allocate entry to be used as a destructor for this key */
entry = kmem_alloc(sizeof(tsd_hash_entry_t), KM_PUSHPAGE);
if (entry == NULL)
- SRETURN(ENOMEM);
+ return (ENOMEM);
/* Determine next available key value */
spin_lock(&table->ht_lock);
/* Ensure failure when all TSD_KEYS_MAX keys are in use */
if (keys_checked++ >= TSD_KEYS_MAX) {
spin_unlock(&table->ht_lock);
- SRETURN(ENOENT);
+ return (ENOENT);
}
tmp_entry = tsd_hash_search(table, table->ht_key, DTOR_PID);
spin_unlock(&bin->hb_lock);
spin_unlock(&table->ht_lock);
- SRETURN(0);
+ return (0);
}
/*
tsd_hash_entry_t *entry;
tsd_hash_bin_t *bin;
ulong_t hash;
- SENTRY;
/* Allocate entry to be used as the process reference */
entry = kmem_alloc(sizeof(tsd_hash_entry_t), KM_PUSHPAGE);
if (entry == NULL)
- SRETURN(ENOMEM);
+ return (ENOMEM);
spin_lock(&table->ht_lock);
entry->he_key = PID_KEY;
spin_unlock(&bin->hb_lock);
spin_unlock(&table->ht_lock);
- SRETURN(0);
+ return (0);
}
/*
static void
tsd_hash_del(tsd_hash_table_t *table, tsd_hash_entry_t *entry)
{
- SENTRY;
-
ASSERT(spin_is_locked(&table->ht_lock));
hlist_del(&entry->he_list);
list_del_init(&entry->he_key_list);
list_del_init(&entry->he_pid_list);
-
- SEXIT;
}
/*
{
tsd_hash_table_t *table;
int hash, size = (1 << bits);
- SENTRY;
table = kmem_zalloc(sizeof(tsd_hash_table_t), KM_SLEEP);
if (table == NULL)
- SRETURN(NULL);
+ return (NULL);
table->ht_bins = kmem_zalloc(sizeof(tsd_hash_bin_t) * size,
KM_SLEEP | KM_NODEBUG);
if (table->ht_bins == NULL) {
kmem_free(table, sizeof(tsd_hash_table_t));
- SRETURN(NULL);
+ return (NULL);
}
for (hash = 0; hash < size; hash++) {
table->ht_bits = bits;
table->ht_key = 1;
- SRETURN(table);
+ return (table);
}
/*
tsd_hash_bin_t *bin;
tsd_hash_entry_t *entry;
int size, i;
- SENTRY;
ASSERT3P(table, !=, NULL);
spin_lock(&table->ht_lock);
tsd_hash_dtor(&work);
kmem_free(table->ht_bins, sizeof(tsd_hash_bin_t)*(1<<table->ht_bits));
kmem_free(table, sizeof(tsd_hash_table_t));
-
- SEXIT;
}
/*
tsd_hash_entry_t *entry;
pid_t pid;
int rc;
- SENTRY;
table = tsd_hash_table;
pid = curthread->pid;
ASSERT3P(table, !=, NULL);
if ((key == 0) || (key > TSD_KEYS_MAX))
- SRETURN(EINVAL);
+ return (EINVAL);
/* Entry already exists in hash table update value */
entry = tsd_hash_search(table, key, pid);
if (entry) {
entry->he_value = value;
- SRETURN(0);
+ return (0);
}
/* Add a process entry to the hash if not yet exists */
if (entry == NULL) {
rc = tsd_hash_add_pid(table, pid);
if (rc)
- SRETURN(rc);
+ return (rc);
}
rc = tsd_hash_add(table, key, pid, value);
- SRETURN(rc);
+ return (rc);
}
EXPORT_SYMBOL(tsd_set);
tsd_get(uint_t key)
{
tsd_hash_entry_t *entry;
- SENTRY;
ASSERT3P(tsd_hash_table, !=, NULL);
if ((key == 0) || (key > TSD_KEYS_MAX))
- SRETURN(NULL);
+ return (NULL);
entry = tsd_hash_search(tsd_hash_table, key, curthread->pid);
if (entry == NULL)
- SRETURN(NULL);
+ return (NULL);
- SRETURN(entry->he_value);
+ return (entry->he_value);
}
EXPORT_SYMBOL(tsd_get);
void
tsd_create(uint_t *keyp, dtor_func_t dtor)
{
- SENTRY;
-
ASSERT3P(keyp, !=, NULL);
- if (*keyp) {
- SEXIT;
+ if (*keyp)
return;
- }
(void)tsd_hash_add_key(tsd_hash_table, keyp, dtor);
-
- SEXIT;
}
EXPORT_SYMBOL(tsd_create);
tsd_hash_entry_t *dtor_entry, *entry;
tsd_hash_bin_t *dtor_entry_bin, *entry_bin;
ulong_t hash;
- SENTRY;
table = tsd_hash_table;
ASSERT3P(table, !=, NULL);
dtor_entry = tsd_hash_search(table, *keyp, DTOR_PID);
if (dtor_entry == NULL) {
spin_unlock(&table->ht_lock);
- SEXIT;
return;
}
tsd_hash_dtor(&work);
*keyp = 0;
-
- SEXIT;
}
EXPORT_SYMBOL(tsd_destroy);
tsd_hash_entry_t *pid_entry, *entry;
tsd_hash_bin_t *pid_entry_bin, *entry_bin;
ulong_t hash;
- SENTRY;
table = tsd_hash_table;
ASSERT3P(table, !=, NULL);
pid_entry = tsd_hash_search(table, PID_KEY, curthread->pid);
if (pid_entry == NULL) {
spin_unlock(&table->ht_lock);
- SEXIT;
return;
}
spin_unlock(&table->ht_lock);
tsd_hash_dtor(&work);
-
- SEXIT;
}
EXPORT_SYMBOL(tsd_exit);
int
spl_tsd_init(void)
{
- SENTRY;
-
tsd_hash_table = tsd_hash_table_init(TSD_HASH_TABLE_BITS_DEFAULT);
if (tsd_hash_table == NULL)
- SRETURN(1);
+ return (1);
- SRETURN(0);
+ return (0);
}
void
spl_tsd_fini(void)
{
- SENTRY;
tsd_hash_table_fini(tsd_hash_table);
tsd_hash_table = NULL;
- SEXIT;
}
#include <sys/cred.h>
#include <sys/vnode.h>
#include <linux/falloc.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_VNODE
vnode_t *rootdir = (vnode_t *)0xabcd1234;
EXPORT_SYMBOL(rootdir);
vn_alloc(int flag)
{
vnode_t *vp;
- SENTRY;
vp = kmem_cache_alloc(vn_cache, flag);
if (vp != NULL) {
vp->v_type = 0;
}
- SRETURN(vp);
+ return (vp);
} /* vn_alloc() */
EXPORT_SYMBOL(vn_alloc);
void
vn_free(vnode_t *vp)
{
- SENTRY;
kmem_cache_free(vn_cache, vp);
- SEXIT;
} /* vn_free() */
EXPORT_SYMBOL(vn_free);
int rc, saved_umask = 0;
gfp_t saved_gfp;
vnode_t *vp;
- SENTRY;
ASSERT(flags & (FWRITE | FREAD));
ASSERT(seg == UIO_SYSSPACE);
(void)xchg(¤t->fs->umask, saved_umask);
if (IS_ERR(fp))
- SRETURN(-PTR_ERR(fp));
+ return (-PTR_ERR(fp));
#ifdef HAVE_2ARGS_VFS_GETATTR
rc = vfs_getattr(&fp->f_path, &stat);
#endif
if (rc) {
filp_close(fp, 0);
- SRETURN(-rc);
+ return (-rc);
}
vp = vn_alloc(KM_SLEEP);
if (!vp) {
filp_close(fp, 0);
- SRETURN(ENOMEM);
+ return (ENOMEM);
}
saved_gfp = mapping_gfp_mask(fp->f_mapping);
*vpp = vp;
mutex_exit(&vp->v_lock);
- SRETURN(0);
+ return (0);
} /* vn_open() */
EXPORT_SYMBOL(vn_open);
{
char *realpath;
int len, rc;
- SENTRY;
ASSERT(vp == rootdir);
len = strlen(path) + 2;
realpath = kmalloc(len, GFP_KERNEL);
if (!realpath)
- SRETURN(ENOMEM);
+ return (ENOMEM);
(void)snprintf(realpath, len, "/%s", path);
rc = vn_open(realpath, seg, flags, mode, vpp, x1, x2);
kfree(realpath);
- SRETURN(rc);
+ return (rc);
} /* vn_openat() */
EXPORT_SYMBOL(vn_openat);
mm_segment_t saved_fs;
struct file *fp;
int rc;
- SENTRY;
ASSERT(uio == UIO_WRITE || uio == UIO_READ);
ASSERT(vp);
fp->f_pos = offset;
if (rc < 0)
- SRETURN(-rc);
+ return (-rc);
if (residp) {
*residp = len - rc;
} else {
if (rc != len)
- SRETURN(EIO);
+ return (EIO);
}
- SRETURN(0);
+ return (0);
} /* vn_rdwr() */
EXPORT_SYMBOL(vn_rdwr);
vn_close(vnode_t *vp, int flags, int x1, int x2, void *x3, void *x4)
{
int rc;
- SENTRY;
ASSERT(vp);
ASSERT(vp->v_file);
rc = filp_close(vp->v_file, 0);
vn_free(vp);
- SRETURN(-rc);
+ return (-rc);
} /* vn_close() */
EXPORT_SYMBOL(vn_close);
struct path parent;
struct inode *inode = NULL;
int rc = 0;
- SENTRY;
ASSERT(seg == UIO_SYSSPACE);
ASSERT(flags == RMFILE);
dentry = spl_kern_path_locked(path, &parent);
rc = PTR_ERR(dentry);
if (!IS_ERR(dentry)) {
- if (parent.dentry->d_name.name[parent.dentry->d_name.len])
- SGOTO(slashes, rc = 0);
+ if (parent.dentry->d_name.name[parent.dentry->d_name.len]) {
+ rc = 0;
+ goto slashes;
+ }
inode = dentry->d_inode;
- if (inode)
+ if (inode) {
atomic_inc(&inode->i_count);
- else
- SGOTO(slashes, rc = 0);
+ } else {
+ rc = 0;
+ goto slashes;
+ }
#ifdef HAVE_2ARGS_VFS_UNLINK
rc = vfs_unlink(parent.dentry->d_inode, dentry);
iput(inode); /* truncate the inode here */
path_put(&parent);
- SRETURN(-rc);
+ return (-rc);
slashes:
rc = !dentry->d_inode ? -ENOENT :
S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR;
- SGOTO(exit1, rc);
+ goto exit1;
} /* vn_remove() */
EXPORT_SYMBOL(vn_remove);
struct dentry *trap;
struct path old_parent, new_parent;
int rc = 0;
- SENTRY;
old_dentry = spl_kern_path_locked(oldname, &old_parent);
- if (IS_ERR(old_dentry))
- SGOTO(exit, rc = PTR_ERR(old_dentry));
+ if (IS_ERR(old_dentry)) {
+ rc = PTR_ERR(old_dentry);
+ goto exit;
+ }
spl_inode_unlock(old_parent.dentry->d_inode);
new_dentry = spl_kern_path_locked(newname, &new_parent);
- if (IS_ERR(new_dentry))
- SGOTO(exit2, rc = PTR_ERR(new_dentry));
+ if (IS_ERR(new_dentry)) {
+ rc = PTR_ERR(new_dentry);
+ goto exit2;
+ }
spl_inode_unlock(new_parent.dentry->d_inode);
rc = -EXDEV;
if (old_parent.mnt != new_parent.mnt)
- SGOTO(exit3, rc);
+ goto exit3;
old_dir = old_parent.dentry;
new_dir = new_parent.dentry;
/* source should not be ancestor of target */
rc = -EINVAL;
if (old_dentry == trap)
- SGOTO(exit4, rc);
+ goto exit4;
/* target should not be an ancestor of source */
rc = -ENOTEMPTY;
if (new_dentry == trap)
- SGOTO(exit4, rc);
+ goto exit4;
/* source must exist */
rc = -ENOENT;
if (!old_dentry->d_inode)
- SGOTO(exit4, rc);
+ goto exit4;
/* unless the source is a directory trailing slashes give -ENOTDIR */
if (!S_ISDIR(old_dentry->d_inode->i_mode)) {
rc = -ENOTDIR;
if (old_dentry->d_name.name[old_dentry->d_name.len])
- SGOTO(exit4, rc);
+ goto exit4;
if (new_dentry->d_name.name[new_dentry->d_name.len])
- SGOTO(exit4, rc);
+ goto exit4;
}
#if defined(HAVE_4ARGS_VFS_RENAME)
dput(old_dentry);
path_put(&old_parent);
exit:
- SRETURN(-rc);
+ return (-rc);
}
EXPORT_SYMBOL(vn_rename);
struct file *fp;
struct kstat stat;
int rc;
- SENTRY;
ASSERT(vp);
ASSERT(vp->v_file);
rc = vfs_getattr(fp->f_path.mnt, fp->f_dentry, &stat);
#endif
if (rc)
- SRETURN(-rc);
+ return (-rc);
vap->va_type = vn_mode_to_vtype(stat.mode);
vap->va_mode = stat.mode;
vap->va_rdev = stat.rdev;
vap->va_nblocks = stat.blocks;
- SRETURN(0);
+ return (0);
}
EXPORT_SYMBOL(vn_getattr);
int vn_fsync(vnode_t *vp, int flags, void *x3, void *x4)
{
int datasync = 0;
- SENTRY;
ASSERT(vp);
ASSERT(vp->v_file);
if (flags & FDSYNC)
datasync = 1;
- SRETURN(-spl_filp_fsync(vp->v_file, datasync));
+ return (-spl_filp_fsync(vp->v_file, datasync));
} /* vn_fsync() */
EXPORT_SYMBOL(vn_fsync);
offset_t offset, void *x6, void *x7)
{
int error = EOPNOTSUPP;
- SENTRY;
if (cmd != F_FREESP || bfp->l_whence != 0)
- SRETURN(EOPNOTSUPP);
+ return (EOPNOTSUPP);
ASSERT(vp);
ASSERT(vp->v_file);
FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
bfp->l_start, bfp->l_len);
if (error == 0)
- SRETURN(0);
+ return (0);
#endif
#ifdef HAVE_INODE_TRUNCATE_RANGE
if (end % PAGE_SIZE != 0) {
end &= ~(off_t)(PAGE_SIZE - 1);
if (end <= bfp->l_start)
- SRETURN(0);
+ return (0);
}
--end;
vp->v_file->f_dentry->d_inode,
bfp->l_start, end
);
- SRETURN(0);
+ return (0);
}
#endif
- SRETURN(error);
+ return (error);
}
EXPORT_SYMBOL(vn_space);
file_t *fp;
vnode_t *vp;
int rc = 0;
- SENTRY;
/* Already open just take an extra reference */
spin_lock(&vn_file_lock);
if (fp) {
atomic_inc(&fp->f_ref);
spin_unlock(&vn_file_lock);
- SRETURN(fp);
+ return (fp);
}
spin_unlock(&vn_file_lock);
/* File was not yet opened create the object and setup */
fp = kmem_cache_alloc(vn_file_cache, KM_SLEEP);
if (fp == NULL)
- SGOTO(out, rc);
+ goto out;
mutex_enter(&fp->f_lock);
lfp = fget(fd);
if (lfp == NULL)
- SGOTO(out_mutex, rc);
+ goto out_mutex;
vp = vn_alloc(KM_SLEEP);
if (vp == NULL)
- SGOTO(out_fget, rc);
+ goto out_fget;
#ifdef HAVE_2ARGS_VFS_GETATTR
rc = vfs_getattr(&lfp->f_path, &stat);
rc = vfs_getattr(lfp->f_path.mnt, lfp->f_dentry, &stat);
#endif
if (rc)
- SGOTO(out_vnode, rc);
+ goto out_vnode;
mutex_enter(&vp->v_lock);
vp->v_type = vn_mode_to_vtype(stat.mode);
spin_unlock(&vn_file_lock);
mutex_exit(&fp->f_lock);
- SRETURN(fp);
+ return (fp);
out_vnode:
vn_free(vp);
mutex_exit(&fp->f_lock);
kmem_cache_free(vn_file_cache, fp);
out:
- SRETURN(NULL);
+ return (NULL);
} /* getf() */
EXPORT_SYMBOL(getf);
vn_releasef(int fd)
{
file_t *fp;
- SENTRY;
spin_lock(&vn_file_lock);
fp = file_find(fd);
atomic_dec(&fp->f_ref);
if (atomic_read(&fp->f_ref) > 0) {
spin_unlock(&vn_file_lock);
- SEXIT;
return;
}
}
spin_unlock(&vn_file_lock);
- SEXIT;
return;
} /* releasef() */
EXPORT_SYMBOL(releasef);
struct path path;
mm_segment_t saved_fs;
int rc;
- SENTRY;
/*
* user_path_dir() and __user_walk() both expect 'filename' to be
rc = user_path_dir(filename, &path);
if (rc)
- SGOTO(out, rc);
+ goto out;
rc = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
if (rc)
- SGOTO(dput_and_out, rc);
+ goto dput_and_out;
vn_set_fs_pwd(current->fs, &path);
out:
set_fs(saved_fs);
- SRETURN(-rc);
+ return (-rc);
} /* vn_set_pwd() */
EXPORT_SYMBOL(vn_set_pwd);
int
spl_vn_init(void)
{
- SENTRY;
vn_cache = kmem_cache_create("spl_vn_cache",
sizeof(struct vnode), 64,
vn_cache_constructor,
vn_file_cache_constructor,
vn_file_cache_destructor,
NULL, NULL, NULL, KMC_KMEM);
- SRETURN(0);
+ return (0);
} /* vn_init() */
void
{
file_t *fp, *next_fp;
int leaked = 0;
- SENTRY;
spin_lock(&vn_file_lock);
spin_unlock(&vn_file_lock);
if (leaked > 0)
- SWARN("Warning %d files leaked\n", leaked);
+ printk(KERN_WARNING "WARNING: %d vnode files leaked\n", leaked);
kmem_cache_destroy(vn_file_cache);
kmem_cache_destroy(vn_cache);
- SEXIT;
return;
} /* vn_fini() */
#include <sys/types.h>
#include <rpc/types.h>
#include <rpc/xdr.h>
-#include <spl-debug.h>
-
-#ifdef SS_DEBUG_SUBSYS
-#undef SS_DEBUG_SUBSYS
-#endif
-
-#define SS_DEBUG_SUBSYS SS_XDR
/*
* SPL's XDR mem implementation.
xdrs->x_ops = &xdrmem_decode_ops;
break;
default:
- SWARN("Invalid op value: %d\n", op);
xdrs->x_ops = NULL; /* Let the caller know we failed */
return;
}
xdrs->x_addr_end = addr + size;
if (xdrs->x_addr_end < xdrs->x_addr) {
- SWARN("Overflow while creating xdrmem: %p, %u\n", addr, size);
xdrs->x_ops = NULL;
}
}
{
struct xdr_bytesrec *rec = (struct xdr_bytesrec *) info;
- if (req != XDR_GET_BYTES_AVAIL) {
- SWARN("Called with unknown request: %d\n", req);
+ if (req != XDR_GET_BYTES_AVAIL)
return FALSE;
- }
rec->xc_is_last_record = TRUE; /* always TRUE in xdrmem streams */
rec->xc_num_avail = xdrs->x_addr_end - xdrs->x_addr;
#include <sys/kmem.h>
#include <sys/zmod.h>
-#include <spl-debug.h>
-
-#ifdef DEBUG_SUBSYSTEM
-#undef DEBUG_SUBSYSTEM
-#endif
-
-#define DEBUG_SUBSYSTEM SS_ZLIB
static spl_kmem_cache_t *zlib_workspace_cache;
spl_zlib_init(void)
{
int size;
- SENTRY;
size = MAX(spl_zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
zlib_inflate_workspacesize());
size, 0, NULL, NULL, NULL, NULL, NULL,
KMC_VMEM | KMC_NOEMERGENCY);
if (!zlib_workspace_cache)
- SRETURN(1);
+ return (1);
- SRETURN(0);
+ return (0);
}
void
spl_zlib_fini(void)
{
- SENTRY;
kmem_cache_destroy(zlib_workspace_cache);
zlib_workspace_cache = NULL;
- SEXIT;
}
#ifndef _SPLAT_INTERNAL_H
#define _SPLAT_INTERNAL_H
-#include "spl-debug.h"
#include "splat-ctl.h"
#include <sys/mutex.h>
{
kmem_cache_thread_t *kct;
- ASSERTF(id < SPLAT_KMEM_THREADS, "id=%d\n", id);
+ ASSERT3S(id, <, SPLAT_KMEM_THREADS);
ASSERT(kcp->kcp_kct[id] == NULL);
kct = kmem_zalloc(sizeof(kmem_cache_thread_t), KM_SLEEP);
then
echo --enable-debug
fi
- if [[ \${SPL_DKMS_ENABLE_DEBUG_LOG,,} == @(y|yes) ]]
- then
- echo --enable-debug-log
- fi
if [[ \${SPL_DKMS_ENABLE_DEBUG_KMEM,,} == @(y|yes) ]]
then
echo --enable-debug-kmem