Merge branch 'for-4.0-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[firefly-linux-kernel-4.4.55.git] / mm / kmemleak.c
index 8d2fcdfeff7fdb319f58c838cd8b94a6cc59121e..5405aff5a590c370d3c8ca37c0f9a0c19cb46775 100644 (file)
@@ -98,6 +98,7 @@
 #include <asm/processor.h>
 #include <linux/atomic.h>
 
+#include <linux/kasan.h>
 #include <linux/kmemcheck.h>
 #include <linux/kmemleak.h>
 #include <linux/memory_hotplug.h>
@@ -387,7 +388,7 @@ static void dump_object_info(struct kmemleak_object *object)
        pr_notice("  min_count = %d\n", object->min_count);
        pr_notice("  count = %d\n", object->count);
        pr_notice("  flags = 0x%lx\n", object->flags);
-       pr_notice("  checksum = %d\n", object->checksum);
+       pr_notice("  checksum = %u\n", object->checksum);
        pr_notice("  backtrace:\n");
        print_stack_trace(&trace, 4);
 }
@@ -989,6 +990,40 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
 }
 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
 
+/**
+ * kmemleak_update_trace - update object allocation stack trace
+ * @ptr:       pointer to beginning of the object
+ *
+ * Override the object allocation stack trace for cases where the actual
+ * allocation place is not always useful.
+ */
+void __ref kmemleak_update_trace(const void *ptr)
+{
+       struct kmemleak_object *object;
+       unsigned long flags;
+
+       pr_debug("%s(0x%p)\n", __func__, ptr);
+
+       if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
+               return;
+
+       object = find_and_get_object((unsigned long)ptr, 1);
+       if (!object) {
+#ifdef DEBUG
+               kmemleak_warn("Updating stack trace for unknown object at %p\n",
+                             ptr);
+#endif
+               return;
+       }
+
+       spin_lock_irqsave(&object->lock, flags);
+       object->trace_len = __save_stack_trace(object->trace);
+       spin_unlock_irqrestore(&object->lock, flags);
+
+       put_object(object);
+}
+EXPORT_SYMBOL(kmemleak_update_trace);
+
 /**
  * kmemleak_not_leak - mark an allocated object as false positive
  * @ptr:       pointer to beginning of the object
@@ -1079,7 +1114,10 @@ static bool update_checksum(struct kmemleak_object *object)
        if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
                return false;
 
+       kasan_disable_current();
        object->checksum = crc32(0, (void *)object->pointer, object->size);
+       kasan_enable_current();
+
        return object->checksum != old_csum;
 }
 
@@ -1130,7 +1168,9 @@ static void scan_block(void *_start, void *_end,
                                                  BYTES_PER_POINTER))
                        continue;
 
+               kasan_disable_current();
                pointer = *ptr;
+               kasan_enable_current();
 
                object = find_and_get_object(pointer, 1);
                if (!object)
@@ -1300,7 +1340,7 @@ static void kmemleak_scan(void)
        /*
         * Struct page scanning for each node.
         */
-       lock_memory_hotplug();
+       get_online_mems();
        for_each_online_node(i) {
                unsigned long start_pfn = node_start_pfn(i);
                unsigned long end_pfn = node_end_pfn(i);
@@ -1318,7 +1358,7 @@ static void kmemleak_scan(void)
                        scan_block(page, page + 1, NULL, 1);
                }
        }
-       unlock_memory_hotplug();
+       put_online_mems();
 
        /*
         * Scanning the task stacks (may introduce false negatives).