Merge remote-tracking branch 'lsk/v3.10/topic/arm64-misc' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / fs / exofs / inode.c
1 /*
2  * Copyright (C) 2005, 2006
3  * Avishay Traeger (avishay@gmail.com)
4  * Copyright (C) 2008, 2009
5  * Boaz Harrosh <bharrosh@panasas.com>
6  *
7  * Copyrights for code taken from ext2:
8  *     Copyright (C) 1992, 1993, 1994, 1995
9  *     Remy Card (card@masi.ibp.fr)
10  *     Laboratoire MASI - Institut Blaise Pascal
11  *     Universite Pierre et Marie Curie (Paris VI)
12  *     from
13  *     linux/fs/minix/inode.c
14  *     Copyright (C) 1991, 1992  Linus Torvalds
15  *
16  * This file is part of exofs.
17  *
18  * exofs is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation.  Since it is based on ext2, and the only
21  * valid version of GPL for the Linux kernel is version 2, the only valid
22  * version of GPL for exofs is version 2.
23  *
24  * exofs is distributed in the hope that it will be useful,
25  * but WITHOUT ANY WARRANTY; without even the implied warranty of
26  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
27  * GNU General Public License for more details.
28  *
29  * You should have received a copy of the GNU General Public License
30  * along with exofs; if not, write to the Free Software
31  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
32  */
33
34 #include <linux/slab.h>
35
36 #include "exofs.h"
37
38 #define EXOFS_DBGMSG2(M...) do {} while (0)
39
40 unsigned exofs_max_io_pages(struct ore_layout *layout,
41                             unsigned expected_pages)
42 {
43         unsigned pages = min_t(unsigned, expected_pages,
44                                layout->max_io_length / PAGE_SIZE);
45
46         return pages;
47 }
48
49 struct page_collect {
50         struct exofs_sb_info *sbi;
51         struct inode *inode;
52         unsigned expected_pages;
53         struct ore_io_state *ios;
54
55         struct page **pages;
56         unsigned alloc_pages;
57         unsigned nr_pages;
58         unsigned long length;
59         loff_t pg_first; /* keep 64bit also in 32-arches */
60         bool read_4_write; /* This means two things: that the read is sync
61                             * And the pages should not be unlocked.
62                             */
63         struct page *that_locked_page;
64 };
65
66 static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
67                        struct inode *inode)
68 {
69         struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
70
71         pcol->sbi = sbi;
72         pcol->inode = inode;
73         pcol->expected_pages = expected_pages;
74
75         pcol->ios = NULL;
76         pcol->pages = NULL;
77         pcol->alloc_pages = 0;
78         pcol->nr_pages = 0;
79         pcol->length = 0;
80         pcol->pg_first = -1;
81         pcol->read_4_write = false;
82         pcol->that_locked_page = NULL;
83 }
84
85 static void _pcol_reset(struct page_collect *pcol)
86 {
87         pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
88
89         pcol->pages = NULL;
90         pcol->alloc_pages = 0;
91         pcol->nr_pages = 0;
92         pcol->length = 0;
93         pcol->pg_first = -1;
94         pcol->ios = NULL;
95         pcol->that_locked_page = NULL;
96
97         /* this is probably the end of the loop but in writes
98          * it might not end here. don't be left with nothing
99          */
100         if (!pcol->expected_pages)
101                 pcol->expected_pages =
102                                 exofs_max_io_pages(&pcol->sbi->layout, ~0);
103 }
104
105 static int pcol_try_alloc(struct page_collect *pcol)
106 {
107         unsigned pages;
108
109         /* TODO: easily support bio chaining */
110         pages =  exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
111
112         for (; pages; pages >>= 1) {
113                 pcol->pages = kmalloc(pages * sizeof(struct page *),
114                                       GFP_KERNEL);
115                 if (likely(pcol->pages)) {
116                         pcol->alloc_pages = pages;
117                         return 0;
118                 }
119         }
120
121         EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
122                   pcol->expected_pages);
123         return -ENOMEM;
124 }
125
126 static void pcol_free(struct page_collect *pcol)
127 {
128         kfree(pcol->pages);
129         pcol->pages = NULL;
130
131         if (pcol->ios) {
132                 ore_put_io_state(pcol->ios);
133                 pcol->ios = NULL;
134         }
135 }
136
137 static int pcol_add_page(struct page_collect *pcol, struct page *page,
138                          unsigned len)
139 {
140         if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
141                 return -ENOMEM;
142
143         pcol->pages[pcol->nr_pages++] = page;
144         pcol->length += len;
145         return 0;
146 }
147
148 enum {PAGE_WAS_NOT_IN_IO = 17};
149 static int update_read_page(struct page *page, int ret)
150 {
151         switch (ret) {
152         case 0:
153                 /* Everything is OK */
154                 SetPageUptodate(page);
155                 if (PageError(page))
156                         ClearPageError(page);
157                 break;
158         case -EFAULT:
159                 /* In this case we were trying to read something that wasn't on
160                  * disk yet - return a page full of zeroes.  This should be OK,
161                  * because the object should be empty (if there was a write
162                  * before this read, the read would be waiting with the page
163                  * locked */
164                 clear_highpage(page);
165
166                 SetPageUptodate(page);
167                 if (PageError(page))
168                         ClearPageError(page);
169                 EXOFS_DBGMSG("recovered read error\n");
170                 /* fall through */
171         case PAGE_WAS_NOT_IN_IO:
172                 ret = 0; /* recovered error */
173                 break;
174         default:
175                 SetPageError(page);
176         }
177         return ret;
178 }
179
180 static void update_write_page(struct page *page, int ret)
181 {
182         if (unlikely(ret == PAGE_WAS_NOT_IN_IO))
183                 return; /* don't pass start don't collect $200 */
184
185         if (ret) {
186                 mapping_set_error(page->mapping, ret);
187                 SetPageError(page);
188         }
189         end_page_writeback(page);
190 }
191
192 /* Called at the end of reads, to optionally unlock pages and update their
193  * status.
194  */
195 static int __readpages_done(struct page_collect *pcol)
196 {
197         int i;
198         u64 good_bytes;
199         u64 length = 0;
200         int ret = ore_check_io(pcol->ios, NULL);
201
202         if (likely(!ret)) {
203                 good_bytes = pcol->length;
204                 ret = PAGE_WAS_NOT_IN_IO;
205         } else {
206                 good_bytes = 0;
207         }
208
209         EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
210                      " length=0x%lx nr_pages=%u\n",
211                      pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
212                      pcol->nr_pages);
213
214         for (i = 0; i < pcol->nr_pages; i++) {
215                 struct page *page = pcol->pages[i];
216                 struct inode *inode = page->mapping->host;
217                 int page_stat;
218
219                 if (inode != pcol->inode)
220                         continue; /* osd might add more pages at end */
221
222                 if (likely(length < good_bytes))
223                         page_stat = 0;
224                 else
225                         page_stat = ret;
226
227                 EXOFS_DBGMSG2("    readpages_done(0x%lx, 0x%lx) %s\n",
228                           inode->i_ino, page->index,
229                           page_stat ? "bad_bytes" : "good_bytes");
230
231                 ret = update_read_page(page, page_stat);
232                 if (!pcol->read_4_write)
233                         unlock_page(page);
234                 length += PAGE_SIZE;
235         }
236
237         pcol_free(pcol);
238         EXOFS_DBGMSG2("readpages_done END\n");
239         return ret;
240 }
241
242 /* callback of async reads */
243 static void readpages_done(struct ore_io_state *ios, void *p)
244 {
245         struct page_collect *pcol = p;
246
247         __readpages_done(pcol);
248         atomic_dec(&pcol->sbi->s_curr_pending);
249         kfree(pcol);
250 }
251
252 static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
253 {
254         int i;
255
256         for (i = 0; i < pcol->nr_pages; i++) {
257                 struct page *page = pcol->pages[i];
258
259                 if (rw == READ)
260                         update_read_page(page, ret);
261                 else
262                         update_write_page(page, ret);
263
264                 unlock_page(page);
265         }
266 }
267
268 static int _maybe_not_all_in_one_io(struct ore_io_state *ios,
269         struct page_collect *pcol_src, struct page_collect *pcol)
270 {
271         /* length was wrong or offset was not page aligned */
272         BUG_ON(pcol_src->nr_pages < ios->nr_pages);
273
274         if (pcol_src->nr_pages > ios->nr_pages) {
275                 struct page **src_page;
276                 unsigned pages_less = pcol_src->nr_pages - ios->nr_pages;
277                 unsigned long len_less = pcol_src->length - ios->length;
278                 unsigned i;
279                 int ret;
280
281                 /* This IO was trimmed */
282                 pcol_src->nr_pages = ios->nr_pages;
283                 pcol_src->length = ios->length;
284
285                 /* Left over pages are passed to the next io */
286                 pcol->expected_pages += pages_less;
287                 pcol->nr_pages = pages_less;
288                 pcol->length = len_less;
289                 src_page = pcol_src->pages + pcol_src->nr_pages;
290                 pcol->pg_first = (*src_page)->index;
291
292                 ret = pcol_try_alloc(pcol);
293                 if (unlikely(ret))
294                         return ret;
295
296                 for (i = 0; i < pages_less; ++i)
297                         pcol->pages[i] = *src_page++;
298
299                 EXOFS_DBGMSG("Length was adjusted nr_pages=0x%x "
300                         "pages_less=0x%x expected_pages=0x%x "
301                         "next_offset=0x%llx next_len=0x%lx\n",
302                         pcol_src->nr_pages, pages_less, pcol->expected_pages,
303                         pcol->pg_first * PAGE_SIZE, pcol->length);
304         }
305         return 0;
306 }
307
308 static int read_exec(struct page_collect *pcol)
309 {
310         struct exofs_i_info *oi = exofs_i(pcol->inode);
311         struct ore_io_state *ios;
312         struct page_collect *pcol_copy = NULL;
313         int ret;
314
315         if (!pcol->pages)
316                 return 0;
317
318         if (!pcol->ios) {
319                 int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
320                                              pcol->pg_first << PAGE_CACHE_SHIFT,
321                                              pcol->length, &pcol->ios);
322
323                 if (ret)
324                         return ret;
325         }
326
327         ios = pcol->ios;
328         ios->pages = pcol->pages;
329
330         if (pcol->read_4_write) {
331                 ore_read(pcol->ios);
332                 return __readpages_done(pcol);
333         }
334
335         pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
336         if (!pcol_copy) {
337                 ret = -ENOMEM;
338                 goto err;
339         }
340
341         *pcol_copy = *pcol;
342         ios->done = readpages_done;
343         ios->private = pcol_copy;
344
345         /* pages ownership was passed to pcol_copy */
346         _pcol_reset(pcol);
347
348         ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
349         if (unlikely(ret))
350                 goto err;
351
352         EXOFS_DBGMSG2("read_exec(0x%lx) offset=0x%llx length=0x%llx\n",
353                 pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
354
355         ret = ore_read(ios);
356         if (unlikely(ret))
357                 goto err;
358
359         atomic_inc(&pcol->sbi->s_curr_pending);
360
361         return 0;
362
363 err:
364         if (!pcol_copy) /* Failed before ownership transfer */
365                 pcol_copy = pcol;
366         _unlock_pcol_pages(pcol_copy, ret, READ);
367         pcol_free(pcol_copy);
368         kfree(pcol_copy);
369
370         return ret;
371 }
372
373 /* readpage_strip is called either directly from readpage() or by the VFS from
374  * within read_cache_pages(), to add one more page to be read. It will try to
375  * collect as many contiguous pages as posible. If a discontinuity is
376  * encountered, or it runs out of resources, it will submit the previous segment
377  * and will start a new collection. Eventually caller must submit the last
378  * segment if present.
379  */
380 static int readpage_strip(void *data, struct page *page)
381 {
382         struct page_collect *pcol = data;
383         struct inode *inode = pcol->inode;
384         struct exofs_i_info *oi = exofs_i(inode);
385         loff_t i_size = i_size_read(inode);
386         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
387         size_t len;
388         int ret;
389
390         BUG_ON(!PageLocked(page));
391
392         /* FIXME: Just for debugging, will be removed */
393         if (PageUptodate(page))
394                 EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
395                           page->index);
396
397         pcol->that_locked_page = page;
398
399         if (page->index < end_index)
400                 len = PAGE_CACHE_SIZE;
401         else if (page->index == end_index)
402                 len = i_size & ~PAGE_CACHE_MASK;
403         else
404                 len = 0;
405
406         if (!len || !obj_created(oi)) {
407                 /* this will be out of bounds, or doesn't exist yet.
408                  * Current page is cleared and the request is split
409                  */
410                 clear_highpage(page);
411
412                 SetPageUptodate(page);
413                 if (PageError(page))
414                         ClearPageError(page);
415
416                 if (!pcol->read_4_write)
417                         unlock_page(page);
418                 EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
419                              "read_4_write=%d index=0x%lx end_index=0x%lx "
420                              "splitting\n", inode->i_ino, len,
421                              pcol->read_4_write, page->index, end_index);
422
423                 return read_exec(pcol);
424         }
425
426 try_again:
427
428         if (unlikely(pcol->pg_first == -1)) {
429                 pcol->pg_first = page->index;
430         } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
431                    page->index)) {
432                 /* Discontinuity detected, split the request */
433                 ret = read_exec(pcol);
434                 if (unlikely(ret))
435                         goto fail;
436                 goto try_again;
437         }
438
439         if (!pcol->pages) {
440                 ret = pcol_try_alloc(pcol);
441                 if (unlikely(ret))
442                         goto fail;
443         }
444
445         if (len != PAGE_CACHE_SIZE)
446                 zero_user(page, len, PAGE_CACHE_SIZE - len);
447
448         EXOFS_DBGMSG2("    readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
449                      inode->i_ino, page->index, len);
450
451         ret = pcol_add_page(pcol, page, len);
452         if (ret) {
453                 EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
454                           "this_len=0x%zx nr_pages=%u length=0x%lx\n",
455                           page, len, pcol->nr_pages, pcol->length);
456
457                 /* split the request, and start again with current page */
458                 ret = read_exec(pcol);
459                 if (unlikely(ret))
460                         goto fail;
461
462                 goto try_again;
463         }
464
465         return 0;
466
467 fail:
468         /* SetPageError(page); ??? */
469         unlock_page(page);
470         return ret;
471 }
472
473 static int exofs_readpages(struct file *file, struct address_space *mapping,
474                            struct list_head *pages, unsigned nr_pages)
475 {
476         struct page_collect pcol;
477         int ret;
478
479         _pcol_init(&pcol, nr_pages, mapping->host);
480
481         ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
482         if (ret) {
483                 EXOFS_ERR("read_cache_pages => %d\n", ret);
484                 return ret;
485         }
486
487         ret = read_exec(&pcol);
488         if (unlikely(ret))
489                 return ret;
490
491         return read_exec(&pcol);
492 }
493
494 static int _readpage(struct page *page, bool read_4_write)
495 {
496         struct page_collect pcol;
497         int ret;
498
499         _pcol_init(&pcol, 1, page->mapping->host);
500
501         pcol.read_4_write = read_4_write;
502         ret = readpage_strip(&pcol, page);
503         if (ret) {
504                 EXOFS_ERR("_readpage => %d\n", ret);
505                 return ret;
506         }
507
508         return read_exec(&pcol);
509 }
510
511 /*
512  * We don't need the file
513  */
514 static int exofs_readpage(struct file *file, struct page *page)
515 {
516         return _readpage(page, false);
517 }
518
519 /* Callback for osd_write. All writes are asynchronous */
520 static void writepages_done(struct ore_io_state *ios, void *p)
521 {
522         struct page_collect *pcol = p;
523         int i;
524         u64  good_bytes;
525         u64  length = 0;
526         int ret = ore_check_io(ios, NULL);
527
528         atomic_dec(&pcol->sbi->s_curr_pending);
529
530         if (likely(!ret)) {
531                 good_bytes = pcol->length;
532                 ret = PAGE_WAS_NOT_IN_IO;
533         } else {
534                 good_bytes = 0;
535         }
536
537         EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
538                      " length=0x%lx nr_pages=%u\n",
539                      pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
540                      pcol->nr_pages);
541
542         for (i = 0; i < pcol->nr_pages; i++) {
543                 struct page *page = pcol->pages[i];
544                 struct inode *inode = page->mapping->host;
545                 int page_stat;
546
547                 if (inode != pcol->inode)
548                         continue; /* osd might add more pages to a bio */
549
550                 if (likely(length < good_bytes))
551                         page_stat = 0;
552                 else
553                         page_stat = ret;
554
555                 update_write_page(page, page_stat);
556                 unlock_page(page);
557                 EXOFS_DBGMSG2("    writepages_done(0x%lx, 0x%lx) status=%d\n",
558                              inode->i_ino, page->index, page_stat);
559
560                 length += PAGE_SIZE;
561         }
562
563         pcol_free(pcol);
564         kfree(pcol);
565         EXOFS_DBGMSG2("writepages_done END\n");
566 }
567
568 static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
569 {
570         struct page_collect *pcol = priv;
571         pgoff_t index = offset / PAGE_SIZE;
572
573         if (!pcol->that_locked_page ||
574             (pcol->that_locked_page->index != index)) {
575                 struct page *page;
576                 loff_t i_size = i_size_read(pcol->inode);
577
578                 if (offset >= i_size) {
579                         *uptodate = true;
580                         EXOFS_DBGMSG("offset >= i_size index=0x%lx\n", index);
581                         return ZERO_PAGE(0);
582                 }
583
584                 page =  find_get_page(pcol->inode->i_mapping, index);
585                 if (!page) {
586                         page = find_or_create_page(pcol->inode->i_mapping,
587                                                    index, GFP_NOFS);
588                         if (unlikely(!page)) {
589                                 EXOFS_DBGMSG("grab_cache_page Failed "
590                                         "index=0x%llx\n", _LLU(index));
591                                 return NULL;
592                         }
593                         unlock_page(page);
594                 }
595                 if (PageDirty(page) || PageWriteback(page))
596                         *uptodate = true;
597                 else
598                         *uptodate = PageUptodate(page);
599                 EXOFS_DBGMSG("index=0x%lx uptodate=%d\n", index, *uptodate);
600                 return page;
601         } else {
602                 EXOFS_DBGMSG("YES that_locked_page index=0x%lx\n",
603                              pcol->that_locked_page->index);
604                 *uptodate = true;
605                 return pcol->that_locked_page;
606         }
607 }
608
609 static void __r4w_put_page(void *priv, struct page *page)
610 {
611         struct page_collect *pcol = priv;
612
613         if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) {
614                 EXOFS_DBGMSG("index=0x%lx\n", page->index);
615                 page_cache_release(page);
616                 return;
617         }
618         EXOFS_DBGMSG("that_locked_page index=0x%lx\n",
619                      ZERO_PAGE(0) == page ? -1 : page->index);
620 }
621
622 static const struct _ore_r4w_op _r4w_op = {
623         .get_page = &__r4w_get_page,
624         .put_page = &__r4w_put_page,
625 };
626
627 static int write_exec(struct page_collect *pcol)
628 {
629         struct exofs_i_info *oi = exofs_i(pcol->inode);
630         struct ore_io_state *ios;
631         struct page_collect *pcol_copy = NULL;
632         int ret;
633
634         if (!pcol->pages)
635                 return 0;
636
637         BUG_ON(pcol->ios);
638         ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
639                                  pcol->pg_first << PAGE_CACHE_SHIFT,
640                                  pcol->length, &pcol->ios);
641         if (unlikely(ret))
642                 goto err;
643
644         pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
645         if (!pcol_copy) {
646                 EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
647                 ret = -ENOMEM;
648                 goto err;
649         }
650
651         *pcol_copy = *pcol;
652
653         ios = pcol->ios;
654         ios->pages = pcol_copy->pages;
655         ios->done = writepages_done;
656         ios->r4w = &_r4w_op;
657         ios->private = pcol_copy;
658
659         /* pages ownership was passed to pcol_copy */
660         _pcol_reset(pcol);
661
662         ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
663         if (unlikely(ret))
664                 goto err;
665
666         EXOFS_DBGMSG2("write_exec(0x%lx) offset=0x%llx length=0x%llx\n",
667                 pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
668
669         ret = ore_write(ios);
670         if (unlikely(ret)) {
671                 EXOFS_ERR("write_exec: ore_write() Failed\n");
672                 goto err;
673         }
674
675         atomic_inc(&pcol->sbi->s_curr_pending);
676         return 0;
677
678 err:
679         if (!pcol_copy) /* Failed before ownership transfer */
680                 pcol_copy = pcol;
681         _unlock_pcol_pages(pcol_copy, ret, WRITE);
682         pcol_free(pcol_copy);
683         kfree(pcol_copy);
684
685         return ret;
686 }
687
688 /* writepage_strip is called either directly from writepage() or by the VFS from
689  * within write_cache_pages(), to add one more page to be written to storage.
690  * It will try to collect as many contiguous pages as possible. If a
691  * discontinuity is encountered or it runs out of resources it will submit the
692  * previous segment and will start a new collection.
693  * Eventually caller must submit the last segment if present.
694  */
695 static int writepage_strip(struct page *page,
696                            struct writeback_control *wbc_unused, void *data)
697 {
698         struct page_collect *pcol = data;
699         struct inode *inode = pcol->inode;
700         struct exofs_i_info *oi = exofs_i(inode);
701         loff_t i_size = i_size_read(inode);
702         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
703         size_t len;
704         int ret;
705
706         BUG_ON(!PageLocked(page));
707
708         ret = wait_obj_created(oi);
709         if (unlikely(ret))
710                 goto fail;
711
712         if (page->index < end_index)
713                 /* in this case, the page is within the limits of the file */
714                 len = PAGE_CACHE_SIZE;
715         else {
716                 len = i_size & ~PAGE_CACHE_MASK;
717
718                 if (page->index > end_index || !len) {
719                         /* in this case, the page is outside the limits
720                          * (truncate in progress)
721                          */
722                         ret = write_exec(pcol);
723                         if (unlikely(ret))
724                                 goto fail;
725                         if (PageError(page))
726                                 ClearPageError(page);
727                         unlock_page(page);
728                         EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
729                                      "outside the limits\n",
730                                      inode->i_ino, page->index);
731                         return 0;
732                 }
733         }
734
735 try_again:
736
737         if (unlikely(pcol->pg_first == -1)) {
738                 pcol->pg_first = page->index;
739         } else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
740                    page->index)) {
741                 /* Discontinuity detected, split the request */
742                 ret = write_exec(pcol);
743                 if (unlikely(ret))
744                         goto fail;
745
746                 EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
747                              inode->i_ino, page->index);
748                 goto try_again;
749         }
750
751         if (!pcol->pages) {
752                 ret = pcol_try_alloc(pcol);
753                 if (unlikely(ret))
754                         goto fail;
755         }
756
757         EXOFS_DBGMSG2("    writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
758                      inode->i_ino, page->index, len);
759
760         ret = pcol_add_page(pcol, page, len);
761         if (unlikely(ret)) {
762                 EXOFS_DBGMSG2("Failed pcol_add_page "
763                              "nr_pages=%u total_length=0x%lx\n",
764                              pcol->nr_pages, pcol->length);
765
766                 /* split the request, next loop will start again */
767                 ret = write_exec(pcol);
768                 if (unlikely(ret)) {
769                         EXOFS_DBGMSG("write_exec failed => %d", ret);
770                         goto fail;
771                 }
772
773                 goto try_again;
774         }
775
776         BUG_ON(PageWriteback(page));
777         set_page_writeback(page);
778
779         return 0;
780
781 fail:
782         EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
783                      inode->i_ino, page->index, ret);
784         set_bit(AS_EIO, &page->mapping->flags);
785         unlock_page(page);
786         return ret;
787 }
788
789 static int exofs_writepages(struct address_space *mapping,
790                        struct writeback_control *wbc)
791 {
792         struct page_collect pcol;
793         long start, end, expected_pages;
794         int ret;
795
796         start = wbc->range_start >> PAGE_CACHE_SHIFT;
797         end = (wbc->range_end == LLONG_MAX) ?
798                         start + mapping->nrpages :
799                         wbc->range_end >> PAGE_CACHE_SHIFT;
800
801         if (start || end)
802                 expected_pages = end - start + 1;
803         else
804                 expected_pages = mapping->nrpages;
805
806         if (expected_pages < 32L)
807                 expected_pages = 32L;
808
809         EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
810                      "nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
811                      mapping->host->i_ino, wbc->range_start, wbc->range_end,
812                      mapping->nrpages, start, end, expected_pages);
813
814         _pcol_init(&pcol, expected_pages, mapping->host);
815
816         ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
817         if (unlikely(ret)) {
818                 EXOFS_ERR("write_cache_pages => %d\n", ret);
819                 return ret;
820         }
821
822         ret = write_exec(&pcol);
823         if (unlikely(ret))
824                 return ret;
825
826         if (wbc->sync_mode == WB_SYNC_ALL) {
827                 return write_exec(&pcol); /* pump the last reminder */
828         } else if (pcol.nr_pages) {
829                 /* not SYNC let the reminder join the next writeout */
830                 unsigned i;
831
832                 for (i = 0; i < pcol.nr_pages; i++) {
833                         struct page *page = pcol.pages[i];
834
835                         end_page_writeback(page);
836                         set_page_dirty(page);
837                         unlock_page(page);
838                 }
839         }
840         return 0;
841 }
842
843 /*
844 static int exofs_writepage(struct page *page, struct writeback_control *wbc)
845 {
846         struct page_collect pcol;
847         int ret;
848
849         _pcol_init(&pcol, 1, page->mapping->host);
850
851         ret = writepage_strip(page, NULL, &pcol);
852         if (ret) {
853                 EXOFS_ERR("exofs_writepage => %d\n", ret);
854                 return ret;
855         }
856
857         return write_exec(&pcol);
858 }
859 */
860 /* i_mutex held using inode->i_size directly */
861 static void _write_failed(struct inode *inode, loff_t to)
862 {
863         if (to > inode->i_size)
864                 truncate_pagecache(inode, to, inode->i_size);
865 }
866
867 int exofs_write_begin(struct file *file, struct address_space *mapping,
868                 loff_t pos, unsigned len, unsigned flags,
869                 struct page **pagep, void **fsdata)
870 {
871         int ret = 0;
872         struct page *page;
873
874         page = *pagep;
875         if (page == NULL) {
876                 ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
877                                          fsdata);
878                 if (ret) {
879                         EXOFS_DBGMSG("simple_write_begin failed\n");
880                         goto out;
881                 }
882
883                 page = *pagep;
884         }
885
886          /* read modify write */
887         if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
888                 loff_t i_size = i_size_read(mapping->host);
889                 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
890                 size_t rlen;
891
892                 if (page->index < end_index)
893                         rlen = PAGE_CACHE_SIZE;
894                 else if (page->index == end_index)
895                         rlen = i_size & ~PAGE_CACHE_MASK;
896                 else
897                         rlen = 0;
898
899                 if (!rlen) {
900                         clear_highpage(page);
901                         SetPageUptodate(page);
902                         goto out;
903                 }
904
905                 ret = _readpage(page, true);
906                 if (ret) {
907                         /*SetPageError was done by _readpage. Is it ok?*/
908                         unlock_page(page);
909                         EXOFS_DBGMSG("__readpage failed\n");
910                 }
911         }
912 out:
913         if (unlikely(ret))
914                 _write_failed(mapping->host, pos + len);
915
916         return ret;
917 }
918
919 static int exofs_write_begin_export(struct file *file,
920                 struct address_space *mapping,
921                 loff_t pos, unsigned len, unsigned flags,
922                 struct page **pagep, void **fsdata)
923 {
924         *pagep = NULL;
925
926         return exofs_write_begin(file, mapping, pos, len, flags, pagep,
927                                         fsdata);
928 }
929
930 static int exofs_write_end(struct file *file, struct address_space *mapping,
931                         loff_t pos, unsigned len, unsigned copied,
932                         struct page *page, void *fsdata)
933 {
934         struct inode *inode = mapping->host;
935         /* According to comment in simple_write_end i_mutex is held */
936         loff_t i_size = inode->i_size;
937         int ret;
938
939         ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
940         if (unlikely(ret))
941                 _write_failed(inode, pos + len);
942
943         /* TODO: once simple_write_end marks inode dirty remove */
944         if (i_size != inode->i_size)
945                 mark_inode_dirty(inode);
946         return ret;
947 }
948
949 static int exofs_releasepage(struct page *page, gfp_t gfp)
950 {
951         EXOFS_DBGMSG("page 0x%lx\n", page->index);
952         WARN_ON(1);
953         return 0;
954 }
955
956 static void exofs_invalidatepage(struct page *page, unsigned long offset)
957 {
958         EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
959         WARN_ON(1);
960 }
961
962 const struct address_space_operations exofs_aops = {
963         .readpage       = exofs_readpage,
964         .readpages      = exofs_readpages,
965         .writepage      = NULL,
966         .writepages     = exofs_writepages,
967         .write_begin    = exofs_write_begin_export,
968         .write_end      = exofs_write_end,
969         .releasepage    = exofs_releasepage,
970         .set_page_dirty = __set_page_dirty_nobuffers,
971         .invalidatepage = exofs_invalidatepage,
972
973         /* Not implemented Yet */
974         .bmap           = NULL, /* TODO: use osd's OSD_ACT_READ_MAP */
975         .direct_IO      = NULL, /* TODO: Should be trivial to do */
976
977         /* With these NULL has special meaning or default is not exported */
978         .get_xip_mem    = NULL,
979         .migratepage    = NULL,
980         .launder_page   = NULL,
981         .is_partially_uptodate = NULL,
982         .error_remove_page = NULL,
983 };
984
985 /******************************************************************************
986  * INODE OPERATIONS
987  *****************************************************************************/
988
989 /*
990  * Test whether an inode is a fast symlink.
991  */
992 static inline int exofs_inode_is_fast_symlink(struct inode *inode)
993 {
994         struct exofs_i_info *oi = exofs_i(inode);
995
996         return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
997 }
998
999 static int _do_truncate(struct inode *inode, loff_t newsize)
1000 {
1001         struct exofs_i_info *oi = exofs_i(inode);
1002         struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
1003         int ret;
1004
1005         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1006
1007         ret = ore_truncate(&sbi->layout, &oi->oc, (u64)newsize);
1008         if (likely(!ret))
1009                 truncate_setsize(inode, newsize);
1010
1011         EXOFS_DBGMSG("(0x%lx) size=0x%llx ret=>%d\n",
1012                      inode->i_ino, newsize, ret);
1013         return ret;
1014 }
1015
1016 /*
1017  * Set inode attributes - update size attribute on OSD if needed,
1018  *                        otherwise just call generic functions.
1019  */
1020 int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
1021 {
1022         struct inode *inode = dentry->d_inode;
1023         int error;
1024
1025         /* if we are about to modify an object, and it hasn't been
1026          * created yet, wait
1027          */
1028         error = wait_obj_created(exofs_i(inode));
1029         if (unlikely(error))
1030                 return error;
1031
1032         error = inode_change_ok(inode, iattr);
1033         if (unlikely(error))
1034                 return error;
1035
1036         if ((iattr->ia_valid & ATTR_SIZE) &&
1037             iattr->ia_size != i_size_read(inode)) {
1038                 error = _do_truncate(inode, iattr->ia_size);
1039                 if (unlikely(error))
1040                         return error;
1041         }
1042
1043         setattr_copy(inode, iattr);
1044         mark_inode_dirty(inode);
1045         return 0;
1046 }
1047
1048 static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
1049         EXOFS_APAGE_FS_DATA,
1050         EXOFS_ATTR_INODE_FILE_LAYOUT,
1051         0);
1052 static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF(
1053         EXOFS_APAGE_FS_DATA,
1054         EXOFS_ATTR_INODE_DIR_LAYOUT,
1055         0);
1056
1057 /*
1058  * Read the Linux inode info from the OSD, and return it as is. In exofs the
1059  * inode info is in an application specific page/attribute of the osd-object.
1060  */
1061 static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
1062                     struct exofs_fcb *inode)
1063 {
1064         struct exofs_sb_info *sbi = sb->s_fs_info;
1065         struct osd_attr attrs[] = {
1066                 [0] = g_attr_inode_data,
1067                 [1] = g_attr_inode_file_layout,
1068                 [2] = g_attr_inode_dir_layout,
1069         };
1070         struct ore_io_state *ios;
1071         struct exofs_on_disk_inode_layout *layout;
1072         int ret;
1073
1074         ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
1075         if (unlikely(ret)) {
1076                 EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
1077                 return ret;
1078         }
1079
1080         attrs[1].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
1081         attrs[2].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
1082
1083         ios->in_attr = attrs;
1084         ios->in_attr_len = ARRAY_SIZE(attrs);
1085
1086         ret = ore_read(ios);
1087         if (unlikely(ret)) {
1088                 EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
1089                           _LLU(oi->one_comp.obj.id), ret);
1090                 memset(inode, 0, sizeof(*inode));
1091                 inode->i_mode = 0040000 | (0777 & ~022);
1092                 /* If object is lost on target we might as well enable it's
1093                  * delete.
1094                  */
1095                 if ((ret == -ENOENT) || (ret == -EINVAL))
1096                         ret = 0;
1097                 goto out;
1098         }
1099
1100         ret = extract_attr_from_ios(ios, &attrs[0]);
1101         if (ret) {
1102                 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
1103                 goto out;
1104         }
1105         WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
1106         memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
1107
1108         ret = extract_attr_from_ios(ios, &attrs[1]);
1109         if (ret) {
1110                 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
1111                 goto out;
1112         }
1113         if (attrs[1].len) {
1114                 layout = attrs[1].val_ptr;
1115                 if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
1116                         EXOFS_ERR("%s: unsupported files layout %d\n",
1117                                 __func__, layout->gen_func);
1118                         ret = -ENOTSUPP;
1119                         goto out;
1120                 }
1121         }
1122
1123         ret = extract_attr_from_ios(ios, &attrs[2]);
1124         if (ret) {
1125                 EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
1126                 goto out;
1127         }
1128         if (attrs[2].len) {
1129                 layout = attrs[2].val_ptr;
1130                 if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
1131                         EXOFS_ERR("%s: unsupported meta-data layout %d\n",
1132                                 __func__, layout->gen_func);
1133                         ret = -ENOTSUPP;
1134                         goto out;
1135                 }
1136         }
1137
1138 out:
1139         ore_put_io_state(ios);
1140         return ret;
1141 }
1142
1143 static void __oi_init(struct exofs_i_info *oi)
1144 {
1145         init_waitqueue_head(&oi->i_wq);
1146         oi->i_flags = 0;
1147 }
1148 /*
1149  * Fill in an inode read from the OSD and set it up for use
1150  */
1151 struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
1152 {
1153         struct exofs_i_info *oi;
1154         struct exofs_fcb fcb;
1155         struct inode *inode;
1156         int ret;
1157
1158         inode = iget_locked(sb, ino);
1159         if (!inode)
1160                 return ERR_PTR(-ENOMEM);
1161         if (!(inode->i_state & I_NEW))
1162                 return inode;
1163         oi = exofs_i(inode);
1164         __oi_init(oi);
1165         exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
1166                          exofs_oi_objno(oi));
1167
1168         /* read the inode from the osd */
1169         ret = exofs_get_inode(sb, oi, &fcb);
1170         if (ret)
1171                 goto bad_inode;
1172
1173         set_obj_created(oi);
1174
1175         /* copy stuff from on-disk struct to in-memory struct */
1176         inode->i_mode = le16_to_cpu(fcb.i_mode);
1177         i_uid_write(inode, le32_to_cpu(fcb.i_uid));
1178         i_gid_write(inode, le32_to_cpu(fcb.i_gid));
1179         set_nlink(inode, le16_to_cpu(fcb.i_links_count));
1180         inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
1181         inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
1182         inode->i_mtime.tv_sec = (signed)le32_to_cpu(fcb.i_mtime);
1183         inode->i_ctime.tv_nsec =
1184                 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
1185         oi->i_commit_size = le64_to_cpu(fcb.i_size);
1186         i_size_write(inode, oi->i_commit_size);
1187         inode->i_blkbits = EXOFS_BLKSHIFT;
1188         inode->i_generation = le32_to_cpu(fcb.i_generation);
1189
1190         oi->i_dir_start_lookup = 0;
1191
1192         if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
1193                 ret = -ESTALE;
1194                 goto bad_inode;
1195         }
1196
1197         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1198                 if (fcb.i_data[0])
1199                         inode->i_rdev =
1200                                 old_decode_dev(le32_to_cpu(fcb.i_data[0]));
1201                 else
1202                         inode->i_rdev =
1203                                 new_decode_dev(le32_to_cpu(fcb.i_data[1]));
1204         } else {
1205                 memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
1206         }
1207
1208         inode->i_mapping->backing_dev_info = sb->s_bdi;
1209         if (S_ISREG(inode->i_mode)) {
1210                 inode->i_op = &exofs_file_inode_operations;
1211                 inode->i_fop = &exofs_file_operations;
1212                 inode->i_mapping->a_ops = &exofs_aops;
1213         } else if (S_ISDIR(inode->i_mode)) {
1214                 inode->i_op = &exofs_dir_inode_operations;
1215                 inode->i_fop = &exofs_dir_operations;
1216                 inode->i_mapping->a_ops = &exofs_aops;
1217         } else if (S_ISLNK(inode->i_mode)) {
1218                 if (exofs_inode_is_fast_symlink(inode))
1219                         inode->i_op = &exofs_fast_symlink_inode_operations;
1220                 else {
1221                         inode->i_op = &exofs_symlink_inode_operations;
1222                         inode->i_mapping->a_ops = &exofs_aops;
1223                 }
1224         } else {
1225                 inode->i_op = &exofs_special_inode_operations;
1226                 if (fcb.i_data[0])
1227                         init_special_inode(inode, inode->i_mode,
1228                            old_decode_dev(le32_to_cpu(fcb.i_data[0])));
1229                 else
1230                         init_special_inode(inode, inode->i_mode,
1231                            new_decode_dev(le32_to_cpu(fcb.i_data[1])));
1232         }
1233
1234         unlock_new_inode(inode);
1235         return inode;
1236
1237 bad_inode:
1238         iget_failed(inode);
1239         return ERR_PTR(ret);
1240 }
1241
1242 int __exofs_wait_obj_created(struct exofs_i_info *oi)
1243 {
1244         if (!obj_created(oi)) {
1245                 EXOFS_DBGMSG("!obj_created\n");
1246                 BUG_ON(!obj_2bcreated(oi));
1247                 wait_event(oi->i_wq, obj_created(oi));
1248                 EXOFS_DBGMSG("wait_event done\n");
1249         }
1250         return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
1251 }
1252
1253 /*
1254  * Callback function from exofs_new_inode().  The important thing is that we
1255  * set the obj_created flag so that other methods know that the object exists on
1256  * the OSD.
1257  */
1258 static void create_done(struct ore_io_state *ios, void *p)
1259 {
1260         struct inode *inode = p;
1261         struct exofs_i_info *oi = exofs_i(inode);
1262         struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
1263         int ret;
1264
1265         ret = ore_check_io(ios, NULL);
1266         ore_put_io_state(ios);
1267
1268         atomic_dec(&sbi->s_curr_pending);
1269
1270         if (unlikely(ret)) {
1271                 EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
1272                           _LLU(exofs_oi_objno(oi)),
1273                           _LLU(oi->one_comp.obj.partition));
1274                 /*TODO: When FS is corrupted creation can fail, object already
1275                  * exist. Get rid of this asynchronous creation, if exist
1276                  * increment the obj counter and try the next object. Until we
1277                  * succeed. All these dangling objects will be made into lost
1278                  * files by chkfs.exofs
1279                  */
1280         }
1281
1282         set_obj_created(oi);
1283
1284         wake_up(&oi->i_wq);
1285 }
1286
1287 /*
1288  * Set up a new inode and create an object for it on the OSD
1289  */
1290 struct inode *exofs_new_inode(struct inode *dir, umode_t mode)
1291 {
1292         struct super_block *sb = dir->i_sb;
1293         struct exofs_sb_info *sbi = sb->s_fs_info;
1294         struct inode *inode;
1295         struct exofs_i_info *oi;
1296         struct ore_io_state *ios;
1297         int ret;
1298
1299         inode = new_inode(sb);
1300         if (!inode)
1301                 return ERR_PTR(-ENOMEM);
1302
1303         oi = exofs_i(inode);
1304         __oi_init(oi);
1305
1306         set_obj_2bcreated(oi);
1307
1308         inode->i_mapping->backing_dev_info = sb->s_bdi;
1309         inode_init_owner(inode, dir, mode);
1310         inode->i_ino = sbi->s_nextid++;
1311         inode->i_blkbits = EXOFS_BLKSHIFT;
1312         inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1313         oi->i_commit_size = inode->i_size = 0;
1314         spin_lock(&sbi->s_next_gen_lock);
1315         inode->i_generation = sbi->s_next_generation++;
1316         spin_unlock(&sbi->s_next_gen_lock);
1317         insert_inode_hash(inode);
1318
1319         exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
1320                          exofs_oi_objno(oi));
1321         exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
1322
1323         mark_inode_dirty(inode);
1324
1325         ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
1326         if (unlikely(ret)) {
1327                 EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
1328                 return ERR_PTR(ret);
1329         }
1330
1331         ios->done = create_done;
1332         ios->private = inode;
1333
1334         ret = ore_create(ios);
1335         if (ret) {
1336                 ore_put_io_state(ios);
1337                 return ERR_PTR(ret);
1338         }
1339         atomic_inc(&sbi->s_curr_pending);
1340
1341         return inode;
1342 }
1343
1344 /*
1345  * struct to pass two arguments to update_inode's callback
1346  */
1347 struct updatei_args {
1348         struct exofs_sb_info    *sbi;
1349         struct exofs_fcb        fcb;
1350 };
1351
1352 /*
1353  * Callback function from exofs_update_inode().
1354  */
1355 static void updatei_done(struct ore_io_state *ios, void *p)
1356 {
1357         struct updatei_args *args = p;
1358
1359         ore_put_io_state(ios);
1360
1361         atomic_dec(&args->sbi->s_curr_pending);
1362
1363         kfree(args);
1364 }
1365
1366 /*
1367  * Write the inode to the OSD.  Just fill up the struct, and set the attribute
1368  * synchronously or asynchronously depending on the do_sync flag.
1369  */
1370 static int exofs_update_inode(struct inode *inode, int do_sync)
1371 {
1372         struct exofs_i_info *oi = exofs_i(inode);
1373         struct super_block *sb = inode->i_sb;
1374         struct exofs_sb_info *sbi = sb->s_fs_info;
1375         struct ore_io_state *ios;
1376         struct osd_attr attr;
1377         struct exofs_fcb *fcb;
1378         struct updatei_args *args;
1379         int ret;
1380
1381         args = kzalloc(sizeof(*args), GFP_KERNEL);
1382         if (!args) {
1383                 EXOFS_DBGMSG("Failed kzalloc of args\n");
1384                 return -ENOMEM;
1385         }
1386
1387         fcb = &args->fcb;
1388
1389         fcb->i_mode = cpu_to_le16(inode->i_mode);
1390         fcb->i_uid = cpu_to_le32(i_uid_read(inode));
1391         fcb->i_gid = cpu_to_le32(i_gid_read(inode));
1392         fcb->i_links_count = cpu_to_le16(inode->i_nlink);
1393         fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
1394         fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
1395         fcb->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
1396         oi->i_commit_size = i_size_read(inode);
1397         fcb->i_size = cpu_to_le64(oi->i_commit_size);
1398         fcb->i_generation = cpu_to_le32(inode->i_generation);
1399
1400         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
1401                 if (old_valid_dev(inode->i_rdev)) {
1402                         fcb->i_data[0] =
1403                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
1404                         fcb->i_data[1] = 0;
1405                 } else {
1406                         fcb->i_data[0] = 0;
1407                         fcb->i_data[1] =
1408                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
1409                         fcb->i_data[2] = 0;
1410                 }
1411         } else
1412                 memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
1413
1414         ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
1415         if (unlikely(ret)) {
1416                 EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
1417                 goto free_args;
1418         }
1419
1420         attr = g_attr_inode_data;
1421         attr.val_ptr = fcb;
1422         ios->out_attr_len = 1;
1423         ios->out_attr = &attr;
1424
1425         wait_obj_created(oi);
1426
1427         if (!do_sync) {
1428                 args->sbi = sbi;
1429                 ios->done = updatei_done;
1430                 ios->private = args;
1431         }
1432
1433         ret = ore_write(ios);
1434         if (!do_sync && !ret) {
1435                 atomic_inc(&sbi->s_curr_pending);
1436                 goto out; /* deallocation in updatei_done */
1437         }
1438
1439         ore_put_io_state(ios);
1440 free_args:
1441         kfree(args);
1442 out:
1443         EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
1444                      inode->i_ino, do_sync, ret);
1445         return ret;
1446 }
1447
1448 int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
1449 {
1450         /* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
1451         return exofs_update_inode(inode, 1);
1452 }
1453
1454 /*
1455  * Callback function from exofs_delete_inode() - don't have much cleaning up to
1456  * do.
1457  */
1458 static void delete_done(struct ore_io_state *ios, void *p)
1459 {
1460         struct exofs_sb_info *sbi = p;
1461
1462         ore_put_io_state(ios);
1463
1464         atomic_dec(&sbi->s_curr_pending);
1465 }
1466
1467 /*
1468  * Called when the refcount of an inode reaches zero.  We remove the object
1469  * from the OSD here.  We make sure the object was created before we try and
1470  * delete it.
1471  */
1472 void exofs_evict_inode(struct inode *inode)
1473 {
1474         struct exofs_i_info *oi = exofs_i(inode);
1475         struct super_block *sb = inode->i_sb;
1476         struct exofs_sb_info *sbi = sb->s_fs_info;
1477         struct ore_io_state *ios;
1478         int ret;
1479
1480         truncate_inode_pages(&inode->i_data, 0);
1481
1482         /* TODO: should do better here */
1483         if (inode->i_nlink || is_bad_inode(inode))
1484                 goto no_delete;
1485
1486         inode->i_size = 0;
1487         clear_inode(inode);
1488
1489         /* if we are deleting an obj that hasn't been created yet, wait.
1490          * This also makes sure that create_done cannot be called with an
1491          * already evicted inode.
1492          */
1493         wait_obj_created(oi);
1494         /* ignore the error, attempt a remove anyway */
1495
1496         /* Now Remove the OSD objects */
1497         ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
1498         if (unlikely(ret)) {
1499                 EXOFS_ERR("%s: ore_get_io_state failed\n", __func__);
1500                 return;
1501         }
1502
1503         ios->done = delete_done;
1504         ios->private = sbi;
1505
1506         ret = ore_remove(ios);
1507         if (ret) {
1508                 EXOFS_ERR("%s: ore_remove failed\n", __func__);
1509                 ore_put_io_state(ios);
1510                 return;
1511         }
1512         atomic_inc(&sbi->s_curr_pending);
1513
1514         return;
1515
1516 no_delete:
1517         clear_inode(inode);
1518 }