projects
/
firefly-linux-kernel-4.4.55.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke...
[firefly-linux-kernel-4.4.55.git]
/
mm
/
slob.c
diff --git
a/mm/slob.c
b/mm/slob.c
index 7a3411524dacd555e9ce70e287a4232072391e10..a2d4ab32198d85b9773926c27ba7fdd07adfbfe7 100644
(file)
--- a/
mm/slob.c
+++ b/
mm/slob.c
@@
-65,6
+65,7
@@
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
+#include <trace/kmemtrace.h>
#include <asm/atomic.h>
/*
#include <asm/atomic.h>
/*
@@
-474,6
+475,7
@@
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
{
unsigned int *m;
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ void *ret;
lockdep_trace_alloc(gfp);
lockdep_trace_alloc(gfp);
@@
-482,12
+484,16
@@
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
return ZERO_SIZE_PTR;
m = slob_alloc(size + align, gfp, align, node);
return ZERO_SIZE_PTR;
m = slob_alloc(size + align, gfp, align, node);
+
if (!m)
return NULL;
*m = size;
if (!m)
return NULL;
*m = size;
- return (void *)m + align;
+ ret = (void *)m + align;
+
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, size + align, gfp, node);
} else {
} else {
-
void *ret
;
+
unsigned int order = get_order(size)
;
ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (ret) {
ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (ret) {
@@
-495,8
+501,12
@@
void *__kmalloc_node(size_t size, gfp_t gfp, int node)
page = virt_to_page(ret);
page->private = size;
}
page = virt_to_page(ret);
page->private = size;
}
- return ret;
+
+ trace_kmalloc_node(_RET_IP_, ret,
+ size, PAGE_SIZE << order, gfp, node);
}
}
+
+ return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
}
EXPORT_SYMBOL(__kmalloc_node);
@@
-504,6
+514,8
@@
void kfree(const void *block)
{
struct slob_page *sp;
{
struct slob_page *sp;
+ trace_kfree(_RET_IP_, block);
+
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
@@
-583,10
+595,17
@@
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
void *b;
{
void *b;
- if (c->size < PAGE_SIZE)
+ if (c->size < PAGE_SIZE)
{
b = slob_alloc(c->size, flags, c->align, node);
b = slob_alloc(c->size, flags, c->align, node);
- else
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ SLOB_UNITS(c->size) * SLOB_UNIT,
+ flags, node);
+ } else {
b = slob_new_pages(flags, get_order(c->size), node);
b = slob_new_pages(flags, get_order(c->size), node);
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
+ PAGE_SIZE << get_order(c->size),
+ flags, node);
+ }
if (c->ctor)
c->ctor(b);
if (c->ctor)
c->ctor(b);
@@
-622,6
+641,8
@@
void kmem_cache_free(struct kmem_cache *c, void *b)
} else {
__kmem_cache_free(b, c->size);
}
} else {
__kmem_cache_free(b, c->size);
}
+
+ trace_kmem_cache_free(_RET_IP_, b);
}
EXPORT_SYMBOL(kmem_cache_free);
}
EXPORT_SYMBOL(kmem_cache_free);