atomic_t vmem_alloc_used = ATOMIC_INIT(0);
unsigned long long vmem_alloc_max = 0;
# endif /* _LP64 */
-int kmem_warning_flag = 1;
EXPORT_SYMBOL(kmem_alloc_used);
EXPORT_SYMBOL(kmem_alloc_max);
EXPORT_SYMBOL(vmem_alloc_used);
EXPORT_SYMBOL(vmem_alloc_max);
-EXPORT_SYMBOL(kmem_warning_flag);
/* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
* but also the location of every alloc and free. When the SPL module is
EXPORT_SYMBOL(vmem_table);
EXPORT_SYMBOL(vmem_list);
# endif
-
-int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); }
-#else
-int kmem_set_warning(int flag) { return 0; }
#endif
-EXPORT_SYMBOL(kmem_set_warning);
/*
* Slab allocation interfaces
} else {
/* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best. */
- if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag)
+ if (unlikely((size > PAGE_SIZE*2) && !(flags & __GFP_NOWARN))) {
CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
(unsigned long long) size, flags,
kmem_alloc_used_read(), kmem_alloc_max);
+ spl_debug_dumpstack(NULL);
+ }
/* We use kstrdup() below because the string pointed to by
* __FUNCTION__ might not be available by the time we want
/* Marked unlikely because we should never be doing this,
* we tolerate to up 2 pages but a single page is best. */
- if (unlikely(size > (PAGE_SIZE * 2)) && kmem_warning_flag)
+ if (unlikely((size > PAGE_SIZE * 2) && !(flags & __GFP_NOWARN))) {
CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
(unsigned long long) size, flags,
kmem_alloc_used_read(), kmem_alloc_max);
+ spl_debug_dumpstack(NULL);
+ }
/* Use the correct allocator */
if (node_alloc) {
if (current_thread_info()->preempt_count || irqs_disabled())
kmem_flags = KM_NOSLEEP;
- /* Allocate new cache memory and initialize. */
- skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc), kmem_flags);
+ /* Allocate memry for a new cache an initialize it. Unfortunately,
+ * this usually ends up being a large allocation of ~32k because
+ * we need to allocate enough memory for the worst case number of
+ * cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
+ * explicitly pass __GFP_NOWARN to suppress the kmem warning */
+ skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc),
+ kmem_flags | __GFP_NOWARN);
if (skc == NULL)
RETURN(NULL);
int size = PAGE_SIZE;
int i, count, rc = 0;
- /* We are intentionally going to push kmem_alloc to its max
- * allocation size, so suppress the console warnings for now */
- kmem_set_warning(0);
-
while ((!rc) && (size <= (PAGE_SIZE * 32))) {
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
- ptr[i] = kmem_alloc(size, KM_SLEEP);
+ ptr[i] = kmem_alloc(size, KM_SLEEP | __GFP_NOWARN);
if (ptr[i])
count++;
}
size *= 2;
}
- kmem_set_warning(1);
-
return rc;
}
int size = PAGE_SIZE;
int i, j, count, rc = 0;
- /* We are intentionally going to push kmem_alloc to its max
- * allocation size, so suppress the console warnings for now */
- kmem_set_warning(0);
-
while ((!rc) && (size <= (PAGE_SIZE * 32))) {
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
- ptr[i] = kmem_zalloc(size, KM_SLEEP);
+ ptr[i] = kmem_zalloc(size, KM_SLEEP | __GFP_NOWARN);
if (ptr[i])
count++;
}
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
for (j = 0; j < size; j++) {
if (((char *)ptr[i])[j] != '\0') {
- splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
+ splat_vprint(file,SPLAT_KMEM_TEST2_NAME,
"%d-byte allocation was "
"not zeroed\n", size);
rc = -EFAULT;
size *= 2;
}
- kmem_set_warning(1);
-
return rc;
}