static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
{
- return subpool_inode(vma->vm_file->f_dentry->d_inode);
+ return subpool_inode(file_inode(vma->vm_file));
}
/*
for_each_hstate(h) {
char buf[32];
- printk(KERN_INFO "HugeTLB registered %s page size, "
- "pre-allocated %ld pages\n",
+ pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
memfmt(buf, huge_page_size(h)),
h->free_huge_pages);
}
err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
hstate_kobjs, &hstate_attr_group);
if (err)
- printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
- h->name);
+ pr_err("Hugetlb: Unable to add hstate %s", h->name);
}
}
nhs->hstate_kobjs,
&per_node_hstate_attr_group);
if (err) {
- printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
- " for node %d\n",
- h->name, node->dev.id);
+ pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
+ h->name, node->dev.id);
hugetlb_unregister_node(node);
break;
}
unsigned long i;
if (size_to_hstate(PAGE_SIZE << order)) {
- printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
+ pr_warning("hugepagesz= specified twice, ignoring\n");
return;
}
BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
mhp = &parsed_hstate->max_huge_pages;
if (mhp == last_mhp) {
- printk(KERN_WARNING "hugepages= specified twice without "
- "interleaving hugepagesz=, ignoring\n");
+ pr_warning("hugepages= specified twice without "
+ "interleaving hugepagesz=, ignoring\n");
return 1;
}
address = address & huge_page_mask(h);
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff;
- mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
+ mapping = file_inode(vma->vm_file)->i_mapping;
/*
* Take the mapping lock for the duration of the table walk. As
* COW. Warn that such a situation has occurred as it may not be obvious
*/
if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
- printk(KERN_WARNING
- "PID %d killed due to inadequate hugepage pool\n",
- current->pid);
+ pr_warning("PID %d killed due to inadequate hugepage pool\n",
+ current->pid);
return ret;
}
return NULL;
}
-int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
- struct page **pages, struct vm_area_struct **vmas,
- unsigned long *position, int *length, int i,
- unsigned int flags)
+long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct page **pages, struct vm_area_struct **vmas,
+ unsigned long *position, unsigned long *nr_pages,
+ long i, unsigned int flags)
{
unsigned long pfn_offset;
unsigned long vaddr = *position;
- int remainder = *length;
+ unsigned long remainder = *nr_pages;
struct hstate *h = hstate_vma(vma);
spin_lock(&mm->page_table_lock);
}
}
spin_unlock(&mm->page_table_lock);
- *length = remainder;
+ *nr_pages = remainder;
*position = vaddr;
return i ? i : -EFAULT;
if (!huge_pte_none(huge_ptep_get(ptep))) {
pte = huge_ptep_get_and_clear(mm, address, ptep);
pte = pte_mkhuge(pte_modify(pte, newprot));
+ pte = arch_make_huge_pte(pte, vma, NULL, 0);
set_huge_pte_at(mm, address, ptep, pte);
pages++;
}