mm: use VM_BUG_ON_MM where possible
authorSasha Levin <sasha.levin@oracle.com>
Thu, 9 Oct 2014 22:28:39 +0000 (15:28 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 10 Oct 2014 02:25:58 +0000 (22:25 -0400)
Dump the contents of the relevant struct_mm when we hit the bug condition.

Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/fork.c
kernel/sys.c
mm/huge_memory.c
mm/mlock.c
mm/mmap.c
mm/pagewalk.c

index a91e47d86de214613fd4a2f04d3e7dea7509a7a7..8c162d102740a1ab806a5fe37ddf70e045c29832 100644 (file)
@@ -601,9 +601,8 @@ static void check_mm(struct mm_struct *mm)
                        printk(KERN_ALERT "BUG: Bad rss-counter state "
                                          "mm:%p idx:%d val:%ld\n", mm, i, x);
        }
-
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
-       VM_BUG_ON(mm->pmd_huge_pte);
+       VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
 #endif
 }
 
index f7030b06001841c84e5096926939336afbc62177..df692fbf1e796813be247e84a0b13f9a24ddb109 100644 (file)
@@ -1634,7 +1634,7 @@ static int prctl_set_mm_exe_file_locked(struct mm_struct *mm, unsigned int fd)
        struct inode *inode;
        int err;
 
-       VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+       VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
 
        exe = fdget(fd);
        if (!exe.file)
index c13148cc745fce2c943b6d354d079b896e64c837..74c78aa8bc2fa68454928b09f34a7b97f3419e05 100644 (file)
@@ -2048,7 +2048,7 @@ int __khugepaged_enter(struct mm_struct *mm)
                return -ENOMEM;
 
        /* __khugepaged_exit() must not run from under us */
-       VM_BUG_ON(khugepaged_test_exit(mm));
+       VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
        if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
                free_mm_slot(mm_slot);
                return 0;
index d5d09d0786ec3a7900d598012859a7780cb31049..03aa8512723b3120277702d60e032031de0e425f 100644 (file)
@@ -235,7 +235,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
        VM_BUG_ON(end   & ~PAGE_MASK);
        VM_BUG_ON_VMA(start < vma->vm_start, vma);
        VM_BUG_ON_VMA(end   > vma->vm_end, vma);
-       VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+       VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
 
        gup_flags = FOLL_TOUCH | FOLL_MLOCK;
        /*
index c9bc285df255ffd73171fa02eec07582714a267d..16d19b48e2ad749bf03a6fc73181efadf41da9f1 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -410,8 +410,9 @@ static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
        for (nd = rb_first(root); nd; nd = rb_next(nd)) {
                struct vm_area_struct *vma;
                vma = rb_entry(nd, struct vm_area_struct, vm_rb);
-               BUG_ON(vma != ignore &&
-                      vma->rb_subtree_gap != vma_compute_subtree_gap(vma));
+               VM_BUG_ON_VMA(vma != ignore &&
+                       vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
+                       vma);
        }
 }
 
@@ -448,7 +449,7 @@ static void validate_mm(struct mm_struct *mm)
                        pr_emerg("map_count %d rb %d\n", mm->map_count, i);
                bug = 1;
        }
-       BUG_ON(bug);
+       VM_BUG_ON_MM(bug, mm);
 }
 #else
 #define validate_mm_rb(root, ignore) do { } while (0)
index 2beeabf502c50f1ff3069981bd5b3a84b7463b6f..ad83195521f2da08e136cc8674d3b37c54e3ab86 100644 (file)
@@ -177,7 +177,7 @@ int walk_page_range(unsigned long addr, unsigned long end,
        if (!walk->mm)
                return -EINVAL;
 
-       VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+       VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
 
        pgd = pgd_offset(walk->mm, addr);
        do {