Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
[firefly-linux-kernel-4.4.55.git] / fs / ext4 / readpage.c
1 /*
2  * linux/fs/ext4/readpage.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  * Copyright (C) 2015, Google, Inc.
6  *
7  * This was originally taken from fs/mpage.c
8  *
9  * The intent is the ext4_mpage_readpages() function here is intended
10  * to replace mpage_readpages() in the general case, not just for
11  * encrypted files.  It has some limitations (see below), where it
12  * will fall back to read_block_full_page(), but these limitations
13  * should only be hit when page_size != block_size.
14  *
15  * This will allow us to attach a callback function to support ext4
16  * encryption.
17  *
18  * If anything unusual happens, such as:
19  *
20  * - encountering a page which has buffers
21  * - encountering a page which has a non-hole after a hole
22  * - encountering a page with non-contiguous blocks
23  *
24  * then this code just gives up and calls the buffer_head-based read function.
25  * It does handle a page which has holes at the end - that is a common case:
26  * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
27  *
28  */
29
30 #include <linux/kernel.h>
31 #include <linux/export.h>
32 #include <linux/mm.h>
33 #include <linux/kdev_t.h>
34 #include <linux/gfp.h>
35 #include <linux/bio.h>
36 #include <linux/fs.h>
37 #include <linux/buffer_head.h>
38 #include <linux/blkdev.h>
39 #include <linux/highmem.h>
40 #include <linux/prefetch.h>
41 #include <linux/mpage.h>
42 #include <linux/writeback.h>
43 #include <linux/backing-dev.h>
44 #include <linux/pagevec.h>
45 #include <linux/cleancache.h>
46
47 #include "ext4.h"
48 #include <trace/events/android_fs.h>
49
50 /*
51  * Call ext4_decrypt on every single page, reusing the encryption
52  * context.
53  */
54 static void completion_pages(struct work_struct *work)
55 {
56 #ifdef CONFIG_EXT4_FS_ENCRYPTION
57         struct ext4_crypto_ctx *ctx =
58                 container_of(work, struct ext4_crypto_ctx, r.work);
59         struct bio      *bio    = ctx->r.bio;
60         struct bio_vec  *bv;
61         int             i;
62
63         bio_for_each_segment_all(bv, bio, i) {
64                 struct page *page = bv->bv_page;
65
66                 int ret = ext4_decrypt(page);
67                 if (ret) {
68                         WARN_ON_ONCE(1);
69                         SetPageError(page);
70                 } else
71                         SetPageUptodate(page);
72                 unlock_page(page);
73         }
74         ext4_release_crypto_ctx(ctx);
75         bio_put(bio);
76 #else
77         BUG();
78 #endif
79 }
80
81 static inline bool ext4_bio_encrypted(struct bio *bio)
82 {
83 #ifdef CONFIG_EXT4_FS_ENCRYPTION
84         return unlikely(bio->bi_private != NULL);
85 #else
86         return false;
87 #endif
88 }
89
90 static void
91 ext4_trace_read_completion(struct bio *bio)
92 {
93         struct page *first_page = bio->bi_io_vec[0].bv_page;
94
95         if (first_page != NULL)
96                 trace_android_fs_dataread_end(first_page->mapping->host,
97                                               page_offset(first_page),
98                                               bio->bi_iter.bi_size);
99 }
100
101 /*
102  * I/O completion handler for multipage BIOs.
103  *
104  * The mpage code never puts partial pages into a BIO (except for end-of-file).
105  * If a page does not map to a contiguous run of blocks then it simply falls
106  * back to block_read_full_page().
107  *
108  * Why is this?  If a page's completion depends on a number of different BIOs
109  * which can complete in any order (or at the same time) then determining the
110  * status of that page is hard.  See end_buffer_async_read() for the details.
111  * There is no point in duplicating all that complexity.
112  */
113 static void mpage_end_io(struct bio *bio)
114 {
115         struct bio_vec *bv;
116         int i;
117
118         if (trace_android_fs_dataread_start_enabled())
119                 ext4_trace_read_completion(bio);
120
121         if (ext4_bio_encrypted(bio)) {
122                 struct ext4_crypto_ctx *ctx = bio->bi_private;
123
124                 if (bio->bi_error) {
125                         ext4_release_crypto_ctx(ctx);
126                 } else {
127                         INIT_WORK(&ctx->r.work, completion_pages);
128                         ctx->r.bio = bio;
129                         queue_work(ext4_read_workqueue, &ctx->r.work);
130                         return;
131                 }
132         }
133         bio_for_each_segment_all(bv, bio, i) {
134                 struct page *page = bv->bv_page;
135
136                 if (!bio->bi_error) {
137                         SetPageUptodate(page);
138                 } else {
139                         ClearPageUptodate(page);
140                         SetPageError(page);
141                 }
142                 unlock_page(page);
143         }
144
145         bio_put(bio);
146 }
147
148 static void
149 ext4_submit_bio_read(struct bio *bio)
150 {
151         if (trace_android_fs_dataread_start_enabled()) {
152                 struct page *first_page = bio->bi_io_vec[0].bv_page;
153
154                 if (first_page != NULL) {
155                         trace_android_fs_dataread_start(
156                                 first_page->mapping->host,
157                                 page_offset(first_page),
158                                 bio->bi_iter.bi_size,
159                                 current->pid,
160                                 current->comm);
161                 }
162         }
163         submit_bio(READ, bio);
164 }
165
166 int ext4_mpage_readpages(struct address_space *mapping,
167                          struct list_head *pages, struct page *page,
168                          unsigned nr_pages)
169 {
170         struct bio *bio = NULL;
171         unsigned page_idx;
172         sector_t last_block_in_bio = 0;
173
174         struct inode *inode = mapping->host;
175         const unsigned blkbits = inode->i_blkbits;
176         const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
177         const unsigned blocksize = 1 << blkbits;
178         sector_t block_in_file;
179         sector_t last_block;
180         sector_t last_block_in_file;
181         sector_t blocks[MAX_BUF_PER_PAGE];
182         unsigned page_block;
183         struct block_device *bdev = inode->i_sb->s_bdev;
184         int length;
185         unsigned relative_block = 0;
186         struct ext4_map_blocks map;
187
188         map.m_pblk = 0;
189         map.m_lblk = 0;
190         map.m_len = 0;
191         map.m_flags = 0;
192
193         for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
194                 int fully_mapped = 1;
195                 unsigned first_hole = blocks_per_page;
196
197                 prefetchw(&page->flags);
198                 if (pages) {
199                         page = list_entry(pages->prev, struct page, lru);
200                         list_del(&page->lru);
201                         if (add_to_page_cache_lru(page, mapping, page->index,
202                                   mapping_gfp_constraint(mapping, GFP_KERNEL)))
203                                 goto next_page;
204                 }
205
206                 if (page_has_buffers(page))
207                         goto confused;
208
209                 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
210                 last_block = block_in_file + nr_pages * blocks_per_page;
211                 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
212                 if (last_block > last_block_in_file)
213                         last_block = last_block_in_file;
214                 page_block = 0;
215
216                 /*
217                  * Map blocks using the previous result first.
218                  */
219                 if ((map.m_flags & EXT4_MAP_MAPPED) &&
220                     block_in_file > map.m_lblk &&
221                     block_in_file < (map.m_lblk + map.m_len)) {
222                         unsigned map_offset = block_in_file - map.m_lblk;
223                         unsigned last = map.m_len - map_offset;
224
225                         for (relative_block = 0; ; relative_block++) {
226                                 if (relative_block == last) {
227                                         /* needed? */
228                                         map.m_flags &= ~EXT4_MAP_MAPPED;
229                                         break;
230                                 }
231                                 if (page_block == blocks_per_page)
232                                         break;
233                                 blocks[page_block] = map.m_pblk + map_offset +
234                                         relative_block;
235                                 page_block++;
236                                 block_in_file++;
237                         }
238                 }
239
240                 /*
241                  * Then do more ext4_map_blocks() calls until we are
242                  * done with this page.
243                  */
244                 while (page_block < blocks_per_page) {
245                         if (block_in_file < last_block) {
246                                 map.m_lblk = block_in_file;
247                                 map.m_len = last_block - block_in_file;
248
249                                 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
250                                 set_error_page:
251                                         SetPageError(page);
252                                         zero_user_segment(page, 0,
253                                                           PAGE_CACHE_SIZE);
254                                         unlock_page(page);
255                                         goto next_page;
256                                 }
257                         }
258                         if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
259                                 fully_mapped = 0;
260                                 if (first_hole == blocks_per_page)
261                                         first_hole = page_block;
262                                 page_block++;
263                                 block_in_file++;
264                                 continue;
265                         }
266                         if (first_hole != blocks_per_page)
267                                 goto confused;          /* hole -> non-hole */
268
269                         /* Contiguous blocks? */
270                         if (page_block && blocks[page_block-1] != map.m_pblk-1)
271                                 goto confused;
272                         for (relative_block = 0; ; relative_block++) {
273                                 if (relative_block == map.m_len) {
274                                         /* needed? */
275                                         map.m_flags &= ~EXT4_MAP_MAPPED;
276                                         break;
277                                 } else if (page_block == blocks_per_page)
278                                         break;
279                                 blocks[page_block] = map.m_pblk+relative_block;
280                                 page_block++;
281                                 block_in_file++;
282                         }
283                 }
284                 if (first_hole != blocks_per_page) {
285                         zero_user_segment(page, first_hole << blkbits,
286                                           PAGE_CACHE_SIZE);
287                         if (first_hole == 0) {
288                                 SetPageUptodate(page);
289                                 unlock_page(page);
290                                 goto next_page;
291                         }
292                 } else if (fully_mapped) {
293                         SetPageMappedToDisk(page);
294                 }
295                 if (fully_mapped && blocks_per_page == 1 &&
296                     !PageUptodate(page) && cleancache_get_page(page) == 0) {
297                         SetPageUptodate(page);
298                         goto confused;
299                 }
300
301                 /*
302                  * This page will go to BIO.  Do we need to send this
303                  * BIO off first?
304                  */
305                 if (bio && (last_block_in_bio != blocks[0] - 1)) {
306                 submit_and_realloc:
307                         ext4_submit_bio_read(bio);
308                         bio = NULL;
309                 }
310                 if (bio == NULL) {
311                         struct ext4_crypto_ctx *ctx = NULL;
312
313                         if (ext4_encrypted_inode(inode) &&
314                             S_ISREG(inode->i_mode)) {
315                                 ctx = ext4_get_crypto_ctx(inode);
316                                 if (IS_ERR(ctx))
317                                         goto set_error_page;
318                         }
319                         bio = bio_alloc(GFP_KERNEL,
320                                 min_t(int, nr_pages, BIO_MAX_PAGES));
321                         if (!bio) {
322                                 if (ctx)
323                                         ext4_release_crypto_ctx(ctx);
324                                 goto set_error_page;
325                         }
326                         bio->bi_bdev = bdev;
327                         bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
328                         bio->bi_end_io = mpage_end_io;
329                         bio->bi_private = ctx;
330                 }
331
332                 length = first_hole << blkbits;
333                 if (bio_add_page(bio, page, length, 0) < length)
334                         goto submit_and_realloc;
335
336                 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
337                      (relative_block == map.m_len)) ||
338                     (first_hole != blocks_per_page)) {
339                         ext4_submit_bio_read(bio);
340                         bio = NULL;
341                 } else
342                         last_block_in_bio = blocks[blocks_per_page - 1];
343                 goto next_page;
344         confused:
345                 if (bio) {
346                         ext4_submit_bio_read(bio);
347                         bio = NULL;
348                 }
349                 if (!PageUptodate(page))
350                         block_read_full_page(page, ext4_get_block);
351                 else
352                         unlock_page(page);
353         next_page:
354                 if (pages)
355                         page_cache_release(page);
356         }
357         BUG_ON(pages && !list_empty(pages));
358         if (bio)
359                 ext4_submit_bio_read(bio);
360         return 0;
361 }