arm64: rockchip_defconfig: enable CC_STACKPROTECTOR_STRONG
[firefly-linux-kernel-4.4.55.git] / mm / hugetlb.c
index ef6963b577fd2920c1a4857f7b74b50e91c98b7a..4434cdd4cd9a6af734a32ff3cae884e9146cd90e 100644 (file)
@@ -1416,12 +1416,13 @@ static void dissolve_free_huge_page(struct page *page)
 {
        spin_lock(&hugetlb_lock);
        if (PageHuge(page) && !page_count(page)) {
-               struct hstate *h = page_hstate(page);
-               int nid = page_to_nid(page);
-               list_del(&page->lru);
+               struct page *head = compound_head(page);
+               struct hstate *h = page_hstate(head);
+               int nid = page_to_nid(head);
+               list_del(&head->lru);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
-               update_and_free_page(h, page);
+               update_and_free_page(h, head);
        }
        spin_unlock(&hugetlb_lock);
 }
@@ -1429,7 +1430,8 @@ static void dissolve_free_huge_page(struct page *page)
 /*
  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
  * make specified memory blocks removable from the system.
- * Note that start_pfn should aligned with (minimum) hugepage size.
+ * Note that this will dissolve a free gigantic hugepage completely, if any
+ * part of it lies within the given range.
  */
 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 {
@@ -1438,7 +1440,6 @@ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
        if (!hugepages_supported())
                return;
 
-       VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
        for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
                dissolve_free_huge_page(pfn_to_page(pfn));
 }
@@ -2170,6 +2171,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
                 * and reducing the surplus.
                 */
                spin_unlock(&hugetlb_lock);
+
+               /* yield cpu to avoid soft lockup */
+               cond_resched();
+
                if (hstate_is_gigantic(h))
                        ret = alloc_fresh_gigantic_page(h, nodes_allowed);
                else
@@ -4209,7 +4214,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
                if (saddr) {
                        spte = huge_pte_offset(svma->vm_mm, saddr);
                        if (spte) {
-                               mm_inc_nr_pmds(mm);
                                get_page(virt_to_page(spte));
                                break;
                        }
@@ -4224,9 +4228,9 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        if (pud_none(*pud)) {
                pud_populate(mm, pud,
                                (pmd_t *)((unsigned long)spte & PAGE_MASK));
+               mm_inc_nr_pmds(mm);
        } else {
                put_page(virt_to_page(spte));
-               mm_inc_nr_pmds(mm);
        }
        spin_unlock(ptl);
 out: