drm: bridge: dw-hdmi-i2s-audio: enable INSERT_PCUV bit for LPCM
[firefly-linux-kernel-4.4.55.git] / mm / filemap.c
index 327910c2400c6ce36f440383147fdc768cf14692..c33c31d75a2ba5c68bd13a10bd9333a00480b366 100644 (file)
  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
  */
 
+static int page_cache_tree_insert(struct address_space *mapping,
+                                 struct page *page, void **shadowp)
+{
+       struct radix_tree_node *node;
+       void **slot;
+       int error;
+
+       error = __radix_tree_create(&mapping->page_tree, page->index,
+                                   &node, &slot);
+       if (error)
+               return error;
+       if (*slot) {
+               void *p;
+
+               p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+               if (!radix_tree_exceptional_entry(p))
+                       return -EEXIST;
+               if (shadowp)
+                       *shadowp = p;
+               mapping->nrshadows--;
+               if (node)
+                       workingset_node_shadows_dec(node);
+       }
+       radix_tree_replace_slot(slot, page);
+       mapping->nrpages++;
+       if (node) {
+               workingset_node_pages_inc(node);
+               /*
+                * Don't track node that contains actual pages.
+                *
+                * Avoid acquiring the list_lru lock if already
+                * untracked.  The list_empty() test is safe as
+                * node->private_list is protected by
+                * mapping->tree_lock.
+                */
+               if (!list_empty(&node->private_list))
+                       list_lru_del(&workingset_shadow_nodes,
+                                    &node->private_list);
+       }
+       return 0;
+}
+
 static void page_cache_tree_delete(struct address_space *mapping,
                                   struct page *page, void *shadow)
 {
@@ -122,6 +164,14 @@ static void page_cache_tree_delete(struct address_space *mapping,
 
        __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);
 
+       if (!node) {
+               /*
+                * We need a node to properly account shadow
+                * entries. Don't plant any without. XXX
+                */
+               shadow = NULL;
+       }
+
        if (shadow) {
                mapping->nrshadows++;
                /*
@@ -331,23 +381,14 @@ int filemap_flush(struct address_space *mapping)
 }
 EXPORT_SYMBOL(filemap_flush);
 
-/**
- * filemap_fdatawait_range - wait for writeback to complete
- * @mapping:           address space structure to wait for
- * @start_byte:                offset in bytes where the range starts
- * @end_byte:          offset in bytes where the range ends (inclusive)
- *
- * Walk the list of under-writeback pages of the given address space
- * in the given range and wait for all of them.
- */
-int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
-                           loff_t end_byte)
+static int __filemap_fdatawait_range(struct address_space *mapping,
+                                    loff_t start_byte, loff_t end_byte)
 {
        pgoff_t index = start_byte >> PAGE_CACHE_SHIFT;
        pgoff_t end = end_byte >> PAGE_CACHE_SHIFT;
        struct pagevec pvec;
        int nr_pages;
-       int ret2, ret = 0;
+       int ret = 0;
 
        if (end_byte < start_byte)
                goto out;
@@ -374,6 +415,29 @@ int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
                cond_resched();
        }
 out:
+       return ret;
+}
+
+/**
+ * filemap_fdatawait_range - wait for writeback to complete
+ * @mapping:           address space structure to wait for
+ * @start_byte:                offset in bytes where the range starts
+ * @end_byte:          offset in bytes where the range ends (inclusive)
+ *
+ * Walk the list of under-writeback pages of the given address space
+ * in the given range and wait for all of them.  Check error status of
+ * the address space and return it.
+ *
+ * Since the error status of the address space is cleared by this function,
+ * callers are responsible for checking the return value and handling and/or
+ * reporting the error.
+ */
+int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
+                           loff_t end_byte)
+{
+       int ret, ret2;
+
+       ret = __filemap_fdatawait_range(mapping, start_byte, end_byte);
        ret2 = filemap_check_errors(mapping);
        if (!ret)
                ret = ret2;
@@ -382,12 +446,39 @@ out:
 }
 EXPORT_SYMBOL(filemap_fdatawait_range);
 
+/**
+ * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
+ * @mapping: address space structure to wait for
+ *
+ * Walk the list of under-writeback pages of the given address space
+ * and wait for all of them.  Unlike filemap_fdatawait(), this function
+ * does not clear error status of the address space.
+ *
+ * Use this function if callers don't handle errors themselves.  Expected
+ * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
+ * fsfreeze(8)
+ */
+void filemap_fdatawait_keep_errors(struct address_space *mapping)
+{
+       loff_t i_size = i_size_read(mapping->host);
+
+       if (i_size == 0)
+               return;
+
+       __filemap_fdatawait_range(mapping, 0, i_size - 1);
+}
+
 /**
  * filemap_fdatawait - wait for all under-writeback pages to complete
  * @mapping: address space structure to wait for
  *
  * Walk the list of under-writeback pages of the given address space
- * and wait for all of them.
+ * and wait for all of them.  Check error status of the address space
+ * and return it.
+ *
+ * Since the error status of the address space is cleared by this function,
+ * callers are responsible for checking the return value and handling and/or
+ * reporting the error.
  */
 int filemap_fdatawait(struct address_space *mapping)
 {
@@ -497,9 +588,8 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                memcg = mem_cgroup_begin_page_stat(old);
                spin_lock_irqsave(&mapping->tree_lock, flags);
                __delete_from_page_cache(old, NULL, memcg);
-               error = radix_tree_insert(&mapping->page_tree, offset, new);
+               error = page_cache_tree_insert(mapping, new, NULL);
                BUG_ON(error);
-               mapping->nrpages++;
 
                /*
                 * hugetlb pages do not participate in page cache accounting.
@@ -510,7 +600,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                        __inc_zone_page_state(new, NR_SHMEM);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
                mem_cgroup_end_page_stat(memcg);
-               mem_cgroup_migrate(old, new, true);
+               mem_cgroup_replace_page(old, new);
                radix_tree_preload_end();
                if (freepage)
                        freepage(old);
@@ -521,48 +611,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
 }
 EXPORT_SYMBOL_GPL(replace_page_cache_page);
 
-static int page_cache_tree_insert(struct address_space *mapping,
-                                 struct page *page, void **shadowp)
-{
-       struct radix_tree_node *node;
-       void **slot;
-       int error;
-
-       error = __radix_tree_create(&mapping->page_tree, page->index,
-                                   &node, &slot);
-       if (error)
-               return error;
-       if (*slot) {
-               void *p;
-
-               p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
-               if (!radix_tree_exceptional_entry(p))
-                       return -EEXIST;
-               if (shadowp)
-                       *shadowp = p;
-               mapping->nrshadows--;
-               if (node)
-                       workingset_node_shadows_dec(node);
-       }
-       radix_tree_replace_slot(slot, page);
-       mapping->nrpages++;
-       if (node) {
-               workingset_node_pages_inc(node);
-               /*
-                * Don't track node that contains actual pages.
-                *
-                * Avoid acquiring the list_lru lock if already
-                * untracked.  The list_empty() test is safe as
-                * node->private_list is protected by
-                * mapping->tree_lock.
-                */
-               if (!list_empty(&node->private_list))
-                       list_lru_del(&workingset_shadow_nodes,
-                                    &node->private_list);
-       }
-       return 0;
-}
-
 static int __add_to_page_cache_locked(struct page *page,
                                      struct address_space *mapping,
                                      pgoff_t offset, gfp_t gfp_mask,
@@ -1511,6 +1559,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
 
                cond_resched();
 find_page:
+               if (fatal_signal_pending(current)) {
+                       error = -EINTR;
+                       goto out;
+               }
+
                page = find_get_page(mapping, index);
                if (!page) {
                        page_cache_sync_readahead(mapping,
@@ -1681,7 +1734,7 @@ no_cached_page:
                        goto out;
                }
                error = add_to_page_cache_lru(page, mapping, index,
-                                       GFP_KERNEL & mapping_gfp_mask(mapping));
+                               mapping_gfp_constraint(mapping, GFP_KERNEL));
                if (error) {
                        page_cache_release(page);
                        if (error == -EEXIST) {
@@ -1783,7 +1836,7 @@ static int page_cache_read(struct file *file, pgoff_t offset)
                        return -ENOMEM;
 
                ret = add_to_page_cache_lru(page, mapping, offset,
-                               GFP_KERNEL & mapping_gfp_mask(mapping));
+                               mapping_gfp_constraint(mapping, GFP_KERNEL));
                if (ret == 0)
                        ret = mapping->a_ops->readpage(file, page);
                else if (ret == -EEXIST)
@@ -1807,7 +1860,6 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
                                   struct file *file,
                                   pgoff_t offset)
 {
-       unsigned long ra_pages;
        struct address_space *mapping = file->f_mapping;
 
        /* If we don't want any read-ahead, don't bother */
@@ -1836,10 +1888,9 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
        /*
         * mmap read-around
         */
-       ra_pages = max_sane_readahead(ra->ra_pages);
-       ra->start = max_t(long, 0, offset - ra_pages / 2);
-       ra->size = ra_pages;
-       ra->async_size = ra_pages / 4;
+       ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
+       ra->size = ra->ra_pages;
+       ra->async_size = ra->ra_pages / 4;
        ra_submit(ra, mapping, file);
 }
 
@@ -2674,7 +2725,7 @@ EXPORT_SYMBOL(generic_file_write_iter);
  * page is known to the local caching routines.
  *
  * The @gfp_mask argument specifies whether I/O may be performed to release
- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
+ * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
  *
  */
 int try_to_release_page(struct page *page, gfp_t gfp_mask)