rk_fb: sysfs: make use vmap/vunmap in pairs.
[firefly-linux-kernel-4.4.55.git] / mm / filemap_xip.c
index 93356cd12828a40eb4d635a9e0e0bea3d6ba0790..28fe26b64f8a746119d8111ad0f3beb9ef4751c2 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <linux/fs.h>
 #include <linux/pagemap.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/uio.h>
 #include <linux/rmap.h>
 #include <linux/mmu_notifier.h>
@@ -167,7 +167,6 @@ __xip_unmap (struct address_space * mapping,
 {
        struct vm_area_struct *vma;
        struct mm_struct *mm;
-       struct prio_tree_iter iter;
        unsigned long address;
        pte_t *pte;
        pte_t pteval;
@@ -184,7 +183,7 @@ __xip_unmap (struct address_space * mapping,
 
 retry:
        mutex_lock(&mapping->i_mmap_mutex);
-       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+       vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
                mm = vma->vm_mm;
                address = vma->vm_start +
                        ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
@@ -193,11 +192,13 @@ retry:
                if (pte) {
                        /* Nuke the page table entry. */
                        flush_cache_page(vma, address, pte_pfn(*pte));
-                       pteval = ptep_clear_flush_notify(vma, address, pte);
+                       pteval = ptep_clear_flush(vma, address, pte);
                        page_remove_rmap(page);
                        dec_mm_counter(mm, MM_FILEPAGES);
                        BUG_ON(pte_dirty(pteval));
                        pte_unmap_unlock(pte, ptl);
+                       /* must invalidate_page _before_ freeing the page */
+                       mmu_notifier_invalidate_page(mm, address);
                        page_cache_release(page);
                }
        }
@@ -263,7 +264,12 @@ found:
                                                        xip_pfn);
                if (err == -ENOMEM)
                        return VM_FAULT_OOM;
-               BUG_ON(err);
+               /*
+                * err == -EBUSY is fine, we've raced against another thread
+                * that faulted-in the same page
+                */
+               if (err != -EBUSY)
+                       BUG_ON(err);
                return VM_FAULT_NOPAGE;
        } else {
                int err, ret = VM_FAULT_OOM;
@@ -299,6 +305,8 @@ out:
 
 static const struct vm_operations_struct xip_file_vm_ops = {
        .fault  = xip_file_fault,
+       .page_mkwrite   = filemap_page_mkwrite,
+       .remap_pages = generic_file_remap_pages,
 };
 
 int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
@@ -307,7 +315,7 @@ int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
 
        file_accessed(file);
        vma->vm_ops = &xip_file_vm_ops;
-       vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
+       vma->vm_flags |= VM_MIXEDMAP;
        return 0;
 }
 EXPORT_SYMBOL_GPL(xip_file_mmap);
@@ -406,8 +414,6 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
        pos = *ppos;
        count = len;
 
-       vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
-
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = mapping->backing_dev_info;
 
@@ -421,7 +427,9 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
        if (ret)
                goto out_backing;
 
-       file_update_time(filp);
+       ret = file_update_time(filp);
+       if (ret)
+               goto out_backing;
 
        ret = __xip_file_write (filp, buf, count, pos, ppos);