1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Debug Implementation.
25 \*****************************************************************************/
27 #include <linux/kmod.h>
29 #include <linux/vmalloc.h>
30 #include <linux/pagemap.h>
31 #include <linux/slab.h>
32 #include <linux/ctype.h>
33 #include <linux/kthread.h>
34 #include <linux/hardirq.h>
35 #include <linux/interrupt.h>
36 #include <linux/spinlock.h>
37 #include <linux/proc_compat.h>
38 #include <linux/file_compat.h>
39 #include <sys/sysmacros.h>
40 #include <spl-debug.h>
41 #include <spl-trace.h>
44 #ifdef SS_DEBUG_SUBSYS
45 #undef SS_DEBUG_SUBSYS
48 #define SS_DEBUG_SUBSYS SS_DEBUG
50 unsigned long spl_debug_subsys = ~0;
51 EXPORT_SYMBOL(spl_debug_subsys);
52 module_param(spl_debug_subsys, ulong, 0644);
53 MODULE_PARM_DESC(spl_debug_subsys, "Subsystem debugging level mask.");
55 unsigned long spl_debug_mask = SD_CANTMASK;
56 EXPORT_SYMBOL(spl_debug_mask);
57 module_param(spl_debug_mask, ulong, 0644);
58 MODULE_PARM_DESC(spl_debug_mask, "Debugging level mask.");
60 unsigned long spl_debug_printk = SD_CANTMASK;
61 EXPORT_SYMBOL(spl_debug_printk);
62 module_param(spl_debug_printk, ulong, 0644);
63 MODULE_PARM_DESC(spl_debug_printk, "Console printk level mask.");
65 int spl_debug_mb = -1;
66 EXPORT_SYMBOL(spl_debug_mb);
67 module_param(spl_debug_mb, int, 0644);
68 MODULE_PARM_DESC(spl_debug_mb, "Total debug buffer size.");
70 unsigned int spl_debug_binary = 1;
71 EXPORT_SYMBOL(spl_debug_binary);
73 unsigned int spl_debug_catastrophe;
74 EXPORT_SYMBOL(spl_debug_catastrophe);
76 unsigned int spl_debug_panic_on_bug = 0;
77 EXPORT_SYMBOL(spl_debug_panic_on_bug);
78 module_param(spl_debug_panic_on_bug, uint, 0644);
79 MODULE_PARM_DESC(spl_debug_panic_on_bug, "Panic on BUG");
81 static char spl_debug_file_name[PATH_MAX];
82 char spl_debug_file_path[PATH_MAX] = "/tmp/spl-log";
84 unsigned int spl_console_ratelimit = 1;
85 EXPORT_SYMBOL(spl_console_ratelimit);
87 long spl_console_max_delay;
88 EXPORT_SYMBOL(spl_console_max_delay);
90 long spl_console_min_delay;
91 EXPORT_SYMBOL(spl_console_min_delay);
93 unsigned int spl_console_backoff = SPL_DEFAULT_BACKOFF;
94 EXPORT_SYMBOL(spl_console_backoff);
96 unsigned int spl_debug_stack;
97 EXPORT_SYMBOL(spl_debug_stack);
99 static int spl_panic_in_progress;
101 union trace_data_union (*trace_data[TCD_TYPE_MAX])[NR_CPUS] __cacheline_aligned;
102 char *trace_console_buffers[NR_CPUS][3];
103 struct rw_semaphore trace_sem;
104 atomic_t trace_tage_allocated = ATOMIC_INIT(0);
106 static int spl_debug_dump_all_pages(dumplog_priv_t *dp, char *);
107 static void trace_fini(void);
110 /* Memory percentage breakdown by type */
111 static unsigned int pages_factor[TCD_TYPE_MAX] = {
112 80, /* 80% pages for TCD_TYPE_PROC */
113 10, /* 10% pages for TCD_TYPE_SOFTIRQ */
114 10 /* 10% pages for TCD_TYPE_IRQ */
118 spl_debug_subsys2str(int subsys)
183 spl_debug_dbg2str(int debug)
210 spl_debug_mask2str(char *str, int size, unsigned long mask, int is_subsys)
212 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
217 if (mask == 0) { /* "0" */
221 } else { /* space-separated tokens */
222 for (i = 0; i < 32; i++) {
225 if ((mask & bit) == 0)
229 if (token == NULL) /* unused bit */
232 if (len > 0) { /* separator? */
238 while (*token != 0) {
247 /* terminate 'str' */
257 spl_debug_token2mask(int *mask, const char *str, int len, int is_subsys)
259 const char *(*fn)(int bit) = is_subsys ? spl_debug_subsys2str :
264 /* match against known tokens */
265 for (i = 0; i < 32; i++) {
269 if (token == NULL) /* unused? */
274 if (j == len) { /* end of token */
285 if (str[j] == token[j])
288 if (str[j] < 'A' || 'Z' < str[j])
291 if (str[j] - 'A' + 'a' != token[j])
296 return -EINVAL; /* no match */
300 spl_debug_str2mask(unsigned long *mask, const char *str, int is_subsys)
303 int m = 0, matched, n, t;
305 /* Allow a number for backwards compatibility */
306 for (n = strlen(str); n > 0; n--)
307 if (!isspace(str[n-1]))
311 if ((t = sscanf(str, "%i%n", &m, &matched)) >= 1 && matched == n) {
316 /* <str> must be a list of debug tokens or numbers separated by
317 * whitespace and optionally an operator ('+' or '-'). If an operator
318 * appears first in <str>, '*mask' is used as the starting point
319 * (relative), otherwise 0 is used (absolute). An operator applies to
320 * all following tokens up to the next operator. */
323 while (isspace(*str)) /* skip whitespace */
329 if (*str == '+' || *str == '-') {
332 /* op on first token == relative */
336 while (isspace(*str)) /* skip whitespace */
339 if (*str == 0) /* trailing op */
343 /* find token length */
344 for (n = 0; str[n] != 0 && !isspace(str[n]); n++);
347 if (spl_debug_token2mask(&t, str, n, is_subsys) != 0)
367 spl_debug_dumplog_internal(dumplog_priv_t *dp)
371 journal_info = current->journal_info;
372 current->journal_info = NULL;
374 snprintf(spl_debug_file_name, sizeof(spl_debug_file_path) - 1,
375 "%s.%ld.%ld", spl_debug_file_path,
376 get_seconds(), (long)dp->dp_pid);
377 printk("SPL: Dumping log to %s\n", spl_debug_file_name);
378 spl_debug_dump_all_pages(dp, spl_debug_file_name);
380 current->journal_info = journal_info;
384 spl_debug_dumplog_thread(void *arg)
386 dumplog_priv_t *dp = (dumplog_priv_t *)arg;
388 spl_debug_dumplog_internal(dp);
389 atomic_set(&dp->dp_done, 1);
390 wake_up(&dp->dp_waitq);
391 complete_and_exit(NULL, 0);
393 return 0; /* Unreachable */
396 /* When flag is set do not use a new thread for the debug dump */
398 spl_debug_dumplog(int flags)
400 struct task_struct *tsk;
403 init_waitqueue_head(&dp.dp_waitq);
404 dp.dp_pid = current->pid;
406 atomic_set(&dp.dp_done, 0);
408 if (dp.dp_flags & DL_NOTHREAD) {
409 spl_debug_dumplog_internal(&dp);
412 tsk = kthread_create(spl_debug_dumplog_thread,(void *)&dp,"spl_debug");
416 wake_up_process(tsk);
417 wait_event(dp.dp_waitq, atomic_read(&dp.dp_done));
422 EXPORT_SYMBOL(spl_debug_dumplog);
425 trace_get_console_buffer(void)
432 } else if (in_softirq()) {
438 return trace_console_buffers[cpu][idx];
442 trace_put_console_buffer(char *buffer)
448 trace_lock_tcd(struct trace_cpu_data *tcd)
450 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
452 spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
458 trace_unlock_tcd(struct trace_cpu_data *tcd)
460 __ASSERT(tcd->tcd_type < TCD_TYPE_MAX);
462 spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
465 static struct trace_cpu_data *
469 struct trace_cpu_data *tcd;
473 tcd = &(*trace_data[TCD_TYPE_IRQ])[cpu].tcd;
474 else if (in_softirq())
475 tcd = &(*trace_data[TCD_TYPE_SOFTIRQ])[cpu].tcd;
477 tcd = &(*trace_data[TCD_TYPE_PROC])[cpu].tcd;
485 trace_put_tcd (struct trace_cpu_data *tcd)
487 trace_unlock_tcd(tcd);
493 trace_set_debug_header(struct spl_debug_header *header, int subsys,
494 int mask, const int line, unsigned long stack)
498 do_gettimeofday(&tv);
500 header->ph_subsys = subsys;
501 header->ph_mask = mask;
502 header->ph_cpu_id = smp_processor_id();
503 header->ph_sec = (__u32)tv.tv_sec;
504 header->ph_usec = tv.tv_usec;
505 header->ph_stack = stack;
506 header->ph_pid = current->pid;
507 header->ph_line_num = line;
513 trace_print_to_console(struct spl_debug_header *hdr, int mask, const char *buf,
514 int len, const char *file, const char *fn)
516 char *prefix = "SPL", *ptype = NULL;
518 if ((mask & SD_EMERG) != 0) {
521 } else if ((mask & SD_ERROR) != 0) {
524 } else if ((mask & SD_WARNING) != 0) {
526 ptype = KERN_WARNING;
527 } else if ((mask & (SD_CONSOLE | spl_debug_printk)) != 0) {
532 if ((mask & SD_CONSOLE) != 0) {
533 printk("%s%s: %.*s", ptype, prefix, len, buf);
535 printk("%s%s: %d:%d:(%s:%d:%s()) %.*s", ptype, prefix,
536 hdr->ph_pid, hdr->ph_stack, file,
537 hdr->ph_line_num, fn, len, buf);
544 trace_max_debug_mb(void)
546 return MAX(512, ((num_physpages >> (20 - PAGE_SHIFT)) * 80) / 100);
549 static struct trace_page *
553 struct trace_page *tage;
555 page = alloc_pages(gfp | __GFP_NOWARN, 0);
559 tage = kmalloc(sizeof(*tage), gfp);
561 __free_pages(page, 0);
566 atomic_inc(&trace_tage_allocated);
572 tage_free(struct trace_page *tage)
574 __ASSERT(tage != NULL);
575 __ASSERT(tage->page != NULL);
577 __free_pages(tage->page, 0);
579 atomic_dec(&trace_tage_allocated);
582 static struct trace_page *
583 tage_from_list(struct list_head *list)
585 return list_entry(list, struct trace_page, linkage);
589 tage_to_tail(struct trace_page *tage, struct list_head *queue)
591 __ASSERT(tage != NULL);
592 __ASSERT(queue != NULL);
594 list_move_tail(&tage->linkage, queue);
597 /* try to return a page that has 'len' bytes left at the end */
598 static struct trace_page *
599 trace_get_tage_try(struct trace_cpu_data *tcd, unsigned long len)
601 struct trace_page *tage;
603 if (tcd->tcd_cur_pages > 0) {
604 __ASSERT(!list_empty(&tcd->tcd_pages));
605 tage = tage_from_list(tcd->tcd_pages.prev);
606 if (tage->used + len <= PAGE_SIZE)
610 if (tcd->tcd_cur_pages < tcd->tcd_max_pages) {
611 if (tcd->tcd_cur_stock_pages > 0) {
612 tage = tage_from_list(tcd->tcd_stock_pages.prev);
613 tcd->tcd_cur_stock_pages--;
614 list_del_init(&tage->linkage);
616 tage = tage_alloc(GFP_ATOMIC);
619 "failure to allocate a tage (%ld)\n",
626 tage->cpu = smp_processor_id();
627 tage->type = tcd->tcd_type;
628 list_add_tail(&tage->linkage, &tcd->tcd_pages);
629 tcd->tcd_cur_pages++;
637 /* return a page that has 'len' bytes left at the end */
638 static struct trace_page *
639 trace_get_tage(struct trace_cpu_data *tcd, unsigned long len)
641 struct trace_page *tage;
643 __ASSERT(len <= PAGE_SIZE);
645 tage = trace_get_tage_try(tcd, len);
649 if (tcd->tcd_cur_pages > 0) {
650 tage = tage_from_list(tcd->tcd_pages.next);
652 tage_to_tail(tage, &tcd->tcd_pages);
659 spl_debug_msg(void *arg, int subsys, int mask, const char *file,
660 const char *fn, const int line, const char *format, ...)
662 spl_debug_limit_state_t *cdls = arg;
663 struct trace_cpu_data *tcd = NULL;
664 struct spl_debug_header header = { 0, };
665 struct trace_page *tage;
666 /* string_buf is used only if tcd != NULL, and is always set then */
667 char *string_buf = NULL;
670 int needed = 85; /* average message length */
676 subsys = SS_DEBUG_SUBSYS;
681 if (strchr(file, '/'))
682 file = strrchr(file, '/') + 1;
684 trace_set_debug_header(&header, subsys, mask, line, 0);
686 tcd = trace_get_tcd();
690 if (tcd->tcd_shutting_down) {
696 known_size = strlen(file) + 1;
698 known_size += strlen(fn) + 1;
700 if (spl_debug_binary)
701 known_size += sizeof(header);
703 /* '2' used because vsnprintf returns real size required for output
704 * _without_ terminating NULL. */
705 for (i = 0; i < 2; i++) {
706 tage = trace_get_tage(tcd, needed + known_size + 1);
708 if (needed + known_size > PAGE_SIZE)
716 string_buf = (char *)page_address(tage->page) +
717 tage->used + known_size;
719 max_nob = PAGE_SIZE - tage->used - known_size;
721 printk(KERN_EMERG "negative max_nob: %i\n", max_nob);
730 va_start(ap, format);
731 needed += vsnprintf(string_buf, max_nob, format, ap);
735 if (needed < max_nob)
739 header.ph_len = known_size + needed;
740 debug_buf = (char *)page_address(tage->page) + tage->used;
742 if (spl_debug_binary) {
743 memcpy(debug_buf, &header, sizeof(header));
744 tage->used += sizeof(header);
745 debug_buf += sizeof(header);
748 strcpy(debug_buf, file);
749 tage->used += strlen(file) + 1;
750 debug_buf += strlen(file) + 1;
753 strcpy(debug_buf, fn);
754 tage->used += strlen(fn) + 1;
755 debug_buf += strlen(fn) + 1;
758 __ASSERT(debug_buf == string_buf);
760 tage->used += needed;
761 __ASSERT (tage->used <= PAGE_SIZE);
764 if ((mask & spl_debug_printk) == 0) {
765 /* no console output requested */
772 if (spl_console_ratelimit && cdls->cdls_next != 0 &&
773 !time_before(cdls->cdls_next, jiffies)) {
774 /* skipping a console message */
781 if (time_before(cdls->cdls_next + spl_console_max_delay +
782 (10 * HZ), jiffies)) {
783 /* last timeout was a long time ago */
784 cdls->cdls_delay /= spl_console_backoff * 4;
786 cdls->cdls_delay *= spl_console_backoff;
788 if (cdls->cdls_delay < spl_console_min_delay)
789 cdls->cdls_delay = spl_console_min_delay;
790 else if (cdls->cdls_delay > spl_console_max_delay)
791 cdls->cdls_delay = spl_console_max_delay;
794 /* ensure cdls_next is never zero after it's been seen */
795 cdls->cdls_next = (jiffies + cdls->cdls_delay) | 1;
799 trace_print_to_console(&header, mask, string_buf, needed, file, fn);
802 string_buf = trace_get_console_buffer();
805 if (format != NULL) {
806 va_start(ap, format);
807 needed += vsnprintf(string_buf,
808 TRACE_CONSOLE_BUFFER_SIZE, format, ap);
811 trace_print_to_console(&header, mask,
812 string_buf, needed, file, fn);
814 trace_put_console_buffer(string_buf);
817 if (cdls != NULL && cdls->cdls_count != 0) {
818 string_buf = trace_get_console_buffer();
820 needed = snprintf(string_buf, TRACE_CONSOLE_BUFFER_SIZE,
821 "Skipped %d previous similar message%s\n",
822 cdls->cdls_count, (cdls->cdls_count > 1) ? "s" : "");
824 trace_print_to_console(&header, mask,
825 string_buf, needed, file, fn);
827 trace_put_console_buffer(string_buf);
828 cdls->cdls_count = 0;
833 EXPORT_SYMBOL(spl_debug_msg);
835 /* Do the collect_pages job on a single CPU: assumes that all other
836 * CPUs have been stopped during a panic. If this isn't true for
837 * some arch, this will have to be implemented separately in each arch.
840 collect_pages_from_single_cpu(struct page_collection *pc)
842 struct trace_cpu_data *tcd;
845 tcd_for_each(tcd, i, j) {
846 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
847 tcd->tcd_cur_pages = 0;
852 collect_pages_on_all_cpus(struct page_collection *pc)
854 struct trace_cpu_data *tcd;
857 spin_lock(&pc->pc_lock);
858 for_each_possible_cpu(cpu) {
859 tcd_for_each_type_lock(tcd, i, cpu) {
860 list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
861 tcd->tcd_cur_pages = 0;
864 spin_unlock(&pc->pc_lock);
868 collect_pages(dumplog_priv_t *dp, struct page_collection *pc)
870 INIT_LIST_HEAD(&pc->pc_pages);
872 if (spl_panic_in_progress || dp->dp_flags & DL_SINGLE_CPU)
873 collect_pages_from_single_cpu(pc);
875 collect_pages_on_all_cpus(pc);
879 put_pages_back_on_all_cpus(struct page_collection *pc)
881 struct trace_cpu_data *tcd;
882 struct list_head *cur_head;
883 struct trace_page *tage;
884 struct trace_page *tmp;
887 spin_lock(&pc->pc_lock);
889 for_each_possible_cpu(cpu) {
890 tcd_for_each_type_lock(tcd, i, cpu) {
891 cur_head = tcd->tcd_pages.next;
893 list_for_each_entry_safe(tage, tmp, &pc->pc_pages,
895 if (tage->cpu != cpu || tage->type != i)
898 tage_to_tail(tage, cur_head);
899 tcd->tcd_cur_pages++;
904 spin_unlock(&pc->pc_lock);
908 put_pages_back(struct page_collection *pc)
910 if (!spl_panic_in_progress)
911 put_pages_back_on_all_cpus(pc);
915 spl_debug_dump_all_pages(dumplog_priv_t *dp, char *filename)
917 struct page_collection pc;
919 struct trace_page *tage;
920 struct trace_page *tmp;
924 down_write(&trace_sem);
926 filp = spl_filp_open(filename, O_CREAT|O_EXCL|O_WRONLY|O_LARGEFILE,
930 printk(KERN_ERR "SPL: Can't open %s for dump: %d\n",
935 spin_lock_init(&pc.pc_lock);
936 collect_pages(dp, &pc);
937 if (list_empty(&pc.pc_pages)) {
945 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
946 rc = spl_filp_write(filp, page_address(tage->page),
947 tage->used, spl_filp_poff(filp));
948 if (rc != (int)tage->used) {
949 printk(KERN_WARNING "SPL: Wanted to write %u "
950 "but wrote %d\n", tage->used, rc);
952 __ASSERT(list_empty(&pc.pc_pages));
955 list_del(&tage->linkage);
961 rc = spl_filp_fsync(filp, 1);
963 printk(KERN_ERR "SPL: Unable to sync: %d\n", rc);
965 spl_filp_close(filp);
967 up_write(&trace_sem);
973 spl_debug_flush_pages(void)
976 struct page_collection pc;
977 struct trace_page *tage;
978 struct trace_page *tmp;
980 spin_lock_init(&pc.pc_lock);
981 init_waitqueue_head(&dp.dp_waitq);
982 dp.dp_pid = current->pid;
984 atomic_set(&dp.dp_done, 0);
986 collect_pages(&dp, &pc);
987 list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
988 list_del(&tage->linkage);
994 spl_debug_set_mask(unsigned long mask) {
995 spl_debug_mask = mask;
998 EXPORT_SYMBOL(spl_debug_set_mask);
1001 spl_debug_get_mask(void) {
1002 return spl_debug_mask;
1004 EXPORT_SYMBOL(spl_debug_get_mask);
1007 spl_debug_set_subsys(unsigned long subsys) {
1008 spl_debug_subsys = subsys;
1011 EXPORT_SYMBOL(spl_debug_set_subsys);
1014 spl_debug_get_subsys(void) {
1015 return spl_debug_subsys;
1017 EXPORT_SYMBOL(spl_debug_get_subsys);
1020 spl_debug_set_mb(int mb)
1023 int limit = trace_max_debug_mb();
1024 struct trace_cpu_data *tcd;
1026 if (mb < num_possible_cpus()) {
1027 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1028 "%dMB - lower limit is %d\n", mb, num_possible_cpus());
1033 printk(KERN_ERR "SPL: Refusing to set debug buffer size to "
1034 "%dMB - upper limit is %d\n", mb, limit);
1038 mb /= num_possible_cpus();
1039 pages = mb << (20 - PAGE_SHIFT);
1041 down_write(&trace_sem);
1043 tcd_for_each(tcd, i, j)
1044 tcd->tcd_max_pages = (pages * tcd->tcd_pages_factor) / 100;
1046 up_write(&trace_sem);
1050 EXPORT_SYMBOL(spl_debug_set_mb);
1053 spl_debug_get_mb(void)
1056 struct trace_cpu_data *tcd;
1057 int total_pages = 0;
1059 down_read(&trace_sem);
1061 tcd_for_each(tcd, i, j)
1062 total_pages += tcd->tcd_max_pages;
1064 up_read(&trace_sem);
1066 return (total_pages >> (20 - PAGE_SHIFT)) + 1;
1068 EXPORT_SYMBOL(spl_debug_get_mb);
1070 void spl_debug_dumpstack(struct task_struct *tsk)
1072 extern void show_task(struct task_struct *);
1077 printk("SPL: Showing stack for process %d\n", tsk->pid);
1080 EXPORT_SYMBOL(spl_debug_dumpstack);
1082 void spl_debug_bug(char *file, const char *func, const int line, int flags)
1084 spl_debug_catastrophe = 1;
1085 spl_debug_msg(NULL, 0, SD_EMERG, file, func, line, "SPL PANIC\n");
1088 panic("SPL PANIC in interrupt.\n");
1090 if (in_atomic() || irqs_disabled())
1091 flags |= DL_NOTHREAD;
1093 /* Ensure all debug pages and dumped by current cpu */
1094 if (spl_debug_panic_on_bug)
1095 spl_panic_in_progress = 1;
1097 spl_debug_dumpstack(NULL);
1098 spl_debug_dumplog(flags);
1100 if (spl_debug_panic_on_bug)
1103 set_task_state(current, TASK_UNINTERRUPTIBLE);
1107 EXPORT_SYMBOL(spl_debug_bug);
1110 spl_debug_clear_buffer(void)
1112 spl_debug_flush_pages();
1115 EXPORT_SYMBOL(spl_debug_clear_buffer);
1118 spl_debug_mark_buffer(char *text)
1120 SDEBUG(SD_WARNING, "*************************************\n");
1121 SDEBUG(SD_WARNING, "DEBUG MARKER: %s\n", text);
1122 SDEBUG(SD_WARNING, "*************************************\n");
1126 EXPORT_SYMBOL(spl_debug_mark_buffer);
1129 trace_init(int max_pages)
1131 struct trace_cpu_data *tcd;
1134 init_rwsem(&trace_sem);
1136 /* initialize trace_data */
1137 memset(trace_data, 0, sizeof(trace_data));
1138 for (i = 0; i < TCD_TYPE_MAX; i++) {
1139 trace_data[i] = kmalloc(sizeof(union trace_data_union) *
1140 NR_CPUS, GFP_KERNEL);
1141 if (trace_data[i] == NULL)
1145 tcd_for_each(tcd, i, j) {
1146 spin_lock_init(&tcd->tcd_lock);
1147 tcd->tcd_pages_factor = pages_factor[i];
1150 INIT_LIST_HEAD(&tcd->tcd_pages);
1151 INIT_LIST_HEAD(&tcd->tcd_stock_pages);
1152 tcd->tcd_cur_pages = 0;
1153 tcd->tcd_cur_stock_pages = 0;
1154 tcd->tcd_max_pages = (max_pages * pages_factor[i]) / 100;
1155 tcd->tcd_shutting_down = 0;
1158 for (i = 0; i < num_possible_cpus(); i++) {
1159 for (j = 0; j < 3; j++) {
1160 trace_console_buffers[i][j] =
1161 kmalloc(TRACE_CONSOLE_BUFFER_SIZE,
1164 if (trace_console_buffers[i][j] == NULL)
1172 printk(KERN_ERR "SPL: Insufficient memory for debug logs\n");
1179 int rc, max = spl_debug_mb;
1181 spl_console_max_delay = SPL_DEFAULT_MAX_DELAY;
1182 spl_console_min_delay = SPL_DEFAULT_MIN_DELAY;
1184 /* If spl_debug_mb is set to an invalid value or uninitialized
1185 * then just make the total buffers smp_num_cpus TCD_MAX_PAGES */
1186 if (max > (num_physpages >> (20 - 2 - PAGE_SHIFT)) / 5 ||
1187 max >= 512 || max < 0) {
1188 max = TCD_MAX_PAGES;
1190 max = (max / num_online_cpus()) << (20 - PAGE_SHIFT);
1193 rc = trace_init(max);
1201 trace_cleanup_on_all_cpus(void)
1203 struct trace_cpu_data *tcd;
1204 struct trace_page *tage;
1205 struct trace_page *tmp;
1208 for_each_possible_cpu(cpu) {
1209 tcd_for_each_type_lock(tcd, i, cpu) {
1210 tcd->tcd_shutting_down = 1;
1212 list_for_each_entry_safe(tage, tmp, &tcd->tcd_pages,
1214 list_del(&tage->linkage);
1217 tcd->tcd_cur_pages = 0;
1227 trace_cleanup_on_all_cpus();
1229 for (i = 0; i < num_possible_cpus(); i++) {
1230 for (j = 0; j < 3; j++) {
1231 if (trace_console_buffers[i][j] != NULL) {
1232 kfree(trace_console_buffers[i][j]);
1233 trace_console_buffers[i][j] = NULL;
1238 for (i = 0; i < TCD_TYPE_MAX && trace_data[i] != NULL; i++) {
1239 kfree(trace_data[i]);
1240 trace_data[i] = NULL;