Documentation: dts: Add bindings for APM X-Gene SoC ethernet driver
[firefly-linux-kernel-4.4.55.git] / mm / huge_memory.c
index 33514d88fef9b041cef11c74717091eec4805f80..3630d577e9879e9d6dc6a80912e2eb88d5f1c959 100644 (file)
@@ -827,7 +827,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
        }
-       if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_KERNEL))) {
+       if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_TRANSHUGE))) {
                put_page(page);
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
@@ -1132,7 +1132,7 @@ alloc:
                goto out;
        }
 
-       if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) {
+       if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_TRANSHUGE))) {
                put_page(new_page);
                if (page) {
                        split_huge_page(page);
@@ -1681,7 +1681,7 @@ static void __split_huge_page_refcount(struct page *page,
                           &page_tail->_count);
 
                /* after clearing PageTail the gup refcount can be released */
-               smp_mb();
+               smp_mb__after_atomic();
 
                /*
                 * retain hwpoison flag of the poisoned tail page:
@@ -1775,6 +1775,8 @@ static int __split_huge_page_map(struct page *page,
        if (pmd) {
                pgtable = pgtable_trans_huge_withdraw(mm, pmd);
                pmd_populate(mm, &_pmd, pgtable);
+               if (pmd_write(*pmd))
+                       BUG_ON(page_mapcount(page) != 1);
 
                haddr = address;
                for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
@@ -1784,8 +1786,6 @@ static int __split_huge_page_map(struct page *page,
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                        if (!pmd_write(*pmd))
                                entry = pte_wrprotect(entry);
-                       else
-                               BUG_ON(page_mapcount(page) != 1);
                        if (!pmd_young(*pmd))
                                entry = pte_mkold(entry);
                        if (pmd_numa(*pmd))
@@ -2233,6 +2233,30 @@ static void khugepaged_alloc_sleep(void)
 
 static int khugepaged_node_load[MAX_NUMNODES];
 
+static bool khugepaged_scan_abort(int nid)
+{
+       int i;
+
+       /*
+        * If zone_reclaim_mode is disabled, then no extra effort is made to
+        * allocate memory locally.
+        */
+       if (!zone_reclaim_mode)
+               return false;
+
+       /* If there is a count for this node already, it must be acceptable */
+       if (khugepaged_node_load[nid])
+               return false;
+
+       for (i = 0; i < MAX_NUMNODES; i++) {
+               if (!khugepaged_node_load[i])
+                       continue;
+               if (node_distance(nid, i) > RECLAIM_DISTANCE)
+                       return true;
+       }
+       return false;
+}
+
 #ifdef CONFIG_NUMA
 static int khugepaged_find_target_node(void)
 {
@@ -2399,7 +2423,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        if (!new_page)
                return;
 
-       if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)))
+       if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_TRANSHUGE)))
                return;
 
        /*
@@ -2545,6 +2569,8 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
                 * hit record.
                 */
                node = page_to_nid(page);
+               if (khugepaged_scan_abort(node))
+                       goto out_unmap;
                khugepaged_node_load[node]++;
                VM_BUG_ON_PAGE(PageCompound(page), page);
                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))