kmemtrace: trace kfree() calls with NULL or zero-length objects

Impact: also output kfree(NULL) entries

This patch moves the trace_kfree() calls before the ZERO_OR_NULL_PTR
check so that we can trace call-sites that call kfree() with NULL many
times which might be an indication of a bug.

Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
LKML-Reference: <1237971957.30175.18.camel@penberg-laptop>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Pekka Enberg 2009-03-25 11:05:57 +02:00 committed by Ingo Molnar
parent c826e3cd0c
commit 2121db74ba
3 changed files with 6 additions and 6 deletions

View File

@ -3773,6 +3773,8 @@ void kfree(const void *objp)
struct kmem_cache *c;
unsigned long flags;
trace_kfree(_RET_IP_, objp);
if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
local_irq_save(flags);
@ -3782,8 +3784,6 @@ void kfree(const void *objp)
debug_check_no_obj_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
trace_kfree(_RET_IP_, objp);
}
EXPORT_SYMBOL(kfree);

View File

@ -514,6 +514,8 @@ void kfree(const void *block)
{
struct slob_page *sp;
trace_kfree(_RET_IP_, block);
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
@ -524,8 +526,6 @@ void kfree(const void *block)
slob_free(m, *m + align);
} else
put_page(&sp->page);
trace_kfree(_RET_IP_, block);
}
EXPORT_SYMBOL(kfree);

View File

@ -2792,6 +2792,8 @@ void kfree(const void *x)
struct page *page;
void *object = (void *)x;
trace_kfree(_RET_IP_, x);
if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
@ -2802,8 +2804,6 @@ void kfree(const void *x)
return;
}
slab_free(page->slab, page, object, _RET_IP_);
trace_kfree(_RET_IP_, x);
}
EXPORT_SYMBOL(kfree);