SLUB: Move page->offset to kmem_cache_cpu->offset
[firefly-linux-kernel-4.4.55.git] / include / linux / hugetlb.h
index e6a71c82d204699b6a18226e261a7beda4463ed7..3a19b032c0eb6ecfe887fad15b2adaddf3969b83 100644 (file)
@@ -66,11 +66,8 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
  * If the arch doesn't supply something else, assume that hugepage
  * size aligned regions are ok without further preparation.
  */
-static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
-                                               pgoff_t pgoff)
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
 {
-       if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
-               return -EINVAL;
        if (len & ~HPAGE_MASK)
                return -EINVAL;
        if (addr & ~HPAGE_MASK)
@@ -78,8 +75,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
        return 0;
 }
 #else
-int prepare_hugepage_range(unsigned long addr, unsigned long len,
-                                               pgoff_t pgoff);
+int prepare_hugepage_range(unsigned long addr, unsigned long len);
 #endif
 
 #ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
@@ -117,7 +113,7 @@ static inline unsigned long hugetlb_total_pages(void)
 #define hugetlb_report_meminfo(buf)            0
 #define hugetlb_report_node_meminfo(n, buf)    0
 #define follow_huge_pmd(mm, addr, pmd, write)  NULL
-#define prepare_hugepage_range(addr,len,pgoff) (-EINVAL)
+#define prepare_hugepage_range(addr,len)       (-EINVAL)
 #define pmd_huge(x)    0
 #define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })