fs: push sync_filesystem() down to the file system's remount_fs()
[firefly-linux-kernel-4.4.55.git] / fs / gfs2 / log.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/crc32.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/freezer.h>
20 #include <linux/bio.h>
21 #include <linux/writeback.h>
22 #include <linux/list_sort.h>
23
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "log.h"
29 #include "lops.h"
30 #include "meta_io.h"
31 #include "util.h"
32 #include "dir.h"
33 #include "trace_gfs2.h"
34
35 /**
36  * gfs2_struct2blk - compute stuff
37  * @sdp: the filesystem
38  * @nstruct: the number of structures
39  * @ssize: the size of the structures
40  *
41  * Compute the number of log descriptor blocks needed to hold a certain number
42  * of structures of a certain size.
43  *
44  * Returns: the number of blocks needed (minimum is always 1)
45  */
46
47 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
48                              unsigned int ssize)
49 {
50         unsigned int blks;
51         unsigned int first, second;
52
53         blks = 1;
54         first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
55
56         if (nstruct > first) {
57                 second = (sdp->sd_sb.sb_bsize -
58                           sizeof(struct gfs2_meta_header)) / ssize;
59                 blks += DIV_ROUND_UP(nstruct - first, second);
60         }
61
62         return blks;
63 }
64
65 /**
66  * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
67  * @mapping: The associated mapping (maybe NULL)
68  * @bd: The gfs2_bufdata to remove
69  *
70  * The ail lock _must_ be held when calling this function
71  *
72  */
73
74 void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
75 {
76         bd->bd_tr = NULL;
77         list_del_init(&bd->bd_ail_st_list);
78         list_del_init(&bd->bd_ail_gl_list);
79         atomic_dec(&bd->bd_gl->gl_ail_count);
80         brelse(bd->bd_bh);
81 }
82
83 /**
84  * gfs2_ail1_start_one - Start I/O on a part of the AIL
85  * @sdp: the filesystem
86  * @wbc: The writeback control structure
87  * @ai: The ail structure
88  *
89  */
90
91 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
92                                struct writeback_control *wbc,
93                                struct gfs2_trans *tr)
94 __releases(&sdp->sd_ail_lock)
95 __acquires(&sdp->sd_ail_lock)
96 {
97         struct gfs2_glock *gl = NULL;
98         struct address_space *mapping;
99         struct gfs2_bufdata *bd, *s;
100         struct buffer_head *bh;
101
102         list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
103                 bh = bd->bd_bh;
104
105                 gfs2_assert(sdp, bd->bd_tr == tr);
106
107                 if (!buffer_busy(bh)) {
108                         if (!buffer_uptodate(bh))
109                                 gfs2_io_error_bh(sdp, bh);
110                         list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
111                         continue;
112                 }
113
114                 if (!buffer_dirty(bh))
115                         continue;
116                 if (gl == bd->bd_gl)
117                         continue;
118                 gl = bd->bd_gl;
119                 list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
120                 mapping = bh->b_page->mapping;
121                 if (!mapping)
122                         continue;
123                 spin_unlock(&sdp->sd_ail_lock);
124                 generic_writepages(mapping, wbc);
125                 spin_lock(&sdp->sd_ail_lock);
126                 if (wbc->nr_to_write <= 0)
127                         break;
128                 return 1;
129         }
130
131         return 0;
132 }
133
134
135 /**
136  * gfs2_ail1_flush - start writeback of some ail1 entries 
137  * @sdp: The super block
138  * @wbc: The writeback control structure
139  *
140  * Writes back some ail1 entries, according to the limits in the
141  * writeback control structure
142  */
143
144 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
145 {
146         struct list_head *head = &sdp->sd_ail1_list;
147         struct gfs2_trans *tr;
148
149         trace_gfs2_ail_flush(sdp, wbc, 1);
150         spin_lock(&sdp->sd_ail_lock);
151 restart:
152         list_for_each_entry_reverse(tr, head, tr_list) {
153                 if (wbc->nr_to_write <= 0)
154                         break;
155                 if (gfs2_ail1_start_one(sdp, wbc, tr))
156                         goto restart;
157         }
158         spin_unlock(&sdp->sd_ail_lock);
159         trace_gfs2_ail_flush(sdp, wbc, 0);
160 }
161
162 /**
163  * gfs2_ail1_start - start writeback of all ail1 entries
164  * @sdp: The superblock
165  */
166
167 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
168 {
169         struct writeback_control wbc = {
170                 .sync_mode = WB_SYNC_NONE,
171                 .nr_to_write = LONG_MAX,
172                 .range_start = 0,
173                 .range_end = LLONG_MAX,
174         };
175
176         return gfs2_ail1_flush(sdp, &wbc);
177 }
178
179 /**
180  * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
181  * @sdp: the filesystem
182  * @ai: the AIL entry
183  *
184  */
185
186 static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
187 {
188         struct gfs2_bufdata *bd, *s;
189         struct buffer_head *bh;
190
191         list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
192                                          bd_ail_st_list) {
193                 bh = bd->bd_bh;
194                 gfs2_assert(sdp, bd->bd_tr == tr);
195                 if (buffer_busy(bh))
196                         continue;
197                 if (!buffer_uptodate(bh))
198                         gfs2_io_error_bh(sdp, bh);
199                 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
200         }
201
202 }
203
204 /**
205  * gfs2_ail1_empty - Try to empty the ail1 lists
206  * @sdp: The superblock
207  *
208  * Tries to empty the ail1 lists, starting with the oldest first
209  */
210
211 static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
212 {
213         struct gfs2_trans *tr, *s;
214         int ret;
215
216         spin_lock(&sdp->sd_ail_lock);
217         list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
218                 gfs2_ail1_empty_one(sdp, tr);
219                 if (list_empty(&tr->tr_ail1_list))
220                         list_move(&tr->tr_list, &sdp->sd_ail2_list);
221                 else
222                         break;
223         }
224         ret = list_empty(&sdp->sd_ail1_list);
225         spin_unlock(&sdp->sd_ail_lock);
226
227         return ret;
228 }
229
230 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
231 {
232         struct gfs2_trans *tr;
233         struct gfs2_bufdata *bd;
234         struct buffer_head *bh;
235
236         spin_lock(&sdp->sd_ail_lock);
237         list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
238                 list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
239                         bh = bd->bd_bh;
240                         if (!buffer_locked(bh))
241                                 continue;
242                         get_bh(bh);
243                         spin_unlock(&sdp->sd_ail_lock);
244                         wait_on_buffer(bh);
245                         brelse(bh);
246                         return;
247                 }
248         }
249         spin_unlock(&sdp->sd_ail_lock);
250 }
251
252 /**
253  * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
254  * @sdp: the filesystem
255  * @ai: the AIL entry
256  *
257  */
258
259 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
260 {
261         struct list_head *head = &tr->tr_ail2_list;
262         struct gfs2_bufdata *bd;
263
264         while (!list_empty(head)) {
265                 bd = list_entry(head->prev, struct gfs2_bufdata,
266                                 bd_ail_st_list);
267                 gfs2_assert(sdp, bd->bd_tr == tr);
268                 gfs2_remove_from_ail(bd);
269         }
270 }
271
272 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
273 {
274         struct gfs2_trans *tr, *safe;
275         unsigned int old_tail = sdp->sd_log_tail;
276         int wrap = (new_tail < old_tail);
277         int a, b, rm;
278
279         spin_lock(&sdp->sd_ail_lock);
280
281         list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
282                 a = (old_tail <= tr->tr_first);
283                 b = (tr->tr_first < new_tail);
284                 rm = (wrap) ? (a || b) : (a && b);
285                 if (!rm)
286                         continue;
287
288                 gfs2_ail2_empty_one(sdp, tr);
289                 list_del(&tr->tr_list);
290                 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
291                 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
292                 kfree(tr);
293         }
294
295         spin_unlock(&sdp->sd_ail_lock);
296 }
297
298 /**
299  * gfs2_log_reserve - Make a log reservation
300  * @sdp: The GFS2 superblock
301  * @blks: The number of blocks to reserve
302  *
303  * Note that we never give out the last few blocks of the journal. Thats
304  * due to the fact that there is a small number of header blocks
305  * associated with each log flush. The exact number can't be known until
306  * flush time, so we ensure that we have just enough free blocks at all
307  * times to avoid running out during a log flush.
308  *
309  * We no longer flush the log here, instead we wake up logd to do that
310  * for us. To avoid the thundering herd and to ensure that we deal fairly
311  * with queued waiters, we use an exclusive wait. This means that when we
312  * get woken with enough journal space to get our reservation, we need to
313  * wake the next waiter on the list.
314  *
315  * Returns: errno
316  */
317
318 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
319 {
320         unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
321         unsigned wanted = blks + reserved_blks;
322         DEFINE_WAIT(wait);
323         int did_wait = 0;
324         unsigned int free_blocks;
325
326         if (gfs2_assert_warn(sdp, blks) ||
327             gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
328                 return -EINVAL;
329 retry:
330         free_blocks = atomic_read(&sdp->sd_log_blks_free);
331         if (unlikely(free_blocks <= wanted)) {
332                 do {
333                         prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
334                                         TASK_UNINTERRUPTIBLE);
335                         wake_up(&sdp->sd_logd_waitq);
336                         did_wait = 1;
337                         if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
338                                 io_schedule();
339                         free_blocks = atomic_read(&sdp->sd_log_blks_free);
340                 } while(free_blocks <= wanted);
341                 finish_wait(&sdp->sd_log_waitq, &wait);
342         }
343         if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
344                                 free_blocks - blks) != free_blocks)
345                 goto retry;
346         trace_gfs2_log_blocks(sdp, -blks);
347
348         /*
349          * If we waited, then so might others, wake them up _after_ we get
350          * our share of the log.
351          */
352         if (unlikely(did_wait))
353                 wake_up(&sdp->sd_log_waitq);
354
355         down_read(&sdp->sd_log_flush_lock);
356
357         return 0;
358 }
359
360 /**
361  * log_distance - Compute distance between two journal blocks
362  * @sdp: The GFS2 superblock
363  * @newer: The most recent journal block of the pair
364  * @older: The older journal block of the pair
365  *
366  *   Compute the distance (in the journal direction) between two
367  *   blocks in the journal
368  *
369  * Returns: the distance in blocks
370  */
371
372 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
373                                         unsigned int older)
374 {
375         int dist;
376
377         dist = newer - older;
378         if (dist < 0)
379                 dist += sdp->sd_jdesc->jd_blocks;
380
381         return dist;
382 }
383
384 /**
385  * calc_reserved - Calculate the number of blocks to reserve when
386  *                 refunding a transaction's unused buffers.
387  * @sdp: The GFS2 superblock
388  *
389  * This is complex.  We need to reserve room for all our currently used
390  * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 
391  * all our journaled data buffers for journaled files (e.g. files in the 
392  * meta_fs like rindex, or files for which chattr +j was done.)
393  * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
394  * will count it as free space (sd_log_blks_free) and corruption will follow.
395  *
396  * We can have metadata bufs and jdata bufs in the same journal.  So each
397  * type gets its own log header, for which we need to reserve a block.
398  * In fact, each type has the potential for needing more than one header 
399  * in cases where we have more buffers than will fit on a journal page.
400  * Metadata journal entries take up half the space of journaled buffer entries.
401  * Thus, metadata entries have buf_limit (502) and journaled buffers have
402  * databuf_limit (251) before they cause a wrap around.
403  *
404  * Also, we need to reserve blocks for revoke journal entries and one for an
405  * overall header for the lot.
406  *
407  * Returns: the number of blocks reserved
408  */
409 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
410 {
411         unsigned int reserved = 0;
412         unsigned int mbuf_limit, metabufhdrs_needed;
413         unsigned int dbuf_limit, databufhdrs_needed;
414         unsigned int revokes = 0;
415
416         mbuf_limit = buf_limit(sdp);
417         metabufhdrs_needed = (sdp->sd_log_commited_buf +
418                               (mbuf_limit - 1)) / mbuf_limit;
419         dbuf_limit = databuf_limit(sdp);
420         databufhdrs_needed = (sdp->sd_log_commited_databuf +
421                               (dbuf_limit - 1)) / dbuf_limit;
422
423         if (sdp->sd_log_commited_revoke > 0)
424                 revokes = gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
425                                           sizeof(u64));
426
427         reserved = sdp->sd_log_commited_buf + metabufhdrs_needed +
428                 sdp->sd_log_commited_databuf + databufhdrs_needed +
429                 revokes;
430         /* One for the overall header */
431         if (reserved)
432                 reserved++;
433         return reserved;
434 }
435
436 static unsigned int current_tail(struct gfs2_sbd *sdp)
437 {
438         struct gfs2_trans *tr;
439         unsigned int tail;
440
441         spin_lock(&sdp->sd_ail_lock);
442
443         if (list_empty(&sdp->sd_ail1_list)) {
444                 tail = sdp->sd_log_head;
445         } else {
446                 tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
447                                 tr_list);
448                 tail = tr->tr_first;
449         }
450
451         spin_unlock(&sdp->sd_ail_lock);
452
453         return tail;
454 }
455
456 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
457 {
458         unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
459
460         ail2_empty(sdp, new_tail);
461
462         atomic_add(dist, &sdp->sd_log_blks_free);
463         trace_gfs2_log_blocks(sdp, dist);
464         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
465                              sdp->sd_jdesc->jd_blocks);
466
467         sdp->sd_log_tail = new_tail;
468 }
469
470
471 static void log_flush_wait(struct gfs2_sbd *sdp)
472 {
473         DEFINE_WAIT(wait);
474
475         if (atomic_read(&sdp->sd_log_in_flight)) {
476                 do {
477                         prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
478                                         TASK_UNINTERRUPTIBLE);
479                         if (atomic_read(&sdp->sd_log_in_flight))
480                                 io_schedule();
481                 } while(atomic_read(&sdp->sd_log_in_flight));
482                 finish_wait(&sdp->sd_log_flush_wait, &wait);
483         }
484 }
485
486 static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
487 {
488         struct gfs2_inode *ipa, *ipb;
489
490         ipa = list_entry(a, struct gfs2_inode, i_ordered);
491         ipb = list_entry(b, struct gfs2_inode, i_ordered);
492
493         if (ipa->i_no_addr < ipb->i_no_addr)
494                 return -1;
495         if (ipa->i_no_addr > ipb->i_no_addr)
496                 return 1;
497         return 0;
498 }
499
500 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
501 {
502         struct gfs2_inode *ip;
503         LIST_HEAD(written);
504
505         spin_lock(&sdp->sd_ordered_lock);
506         list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp);
507         while (!list_empty(&sdp->sd_log_le_ordered)) {
508                 ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
509                 list_move(&ip->i_ordered, &written);
510                 if (ip->i_inode.i_mapping->nrpages == 0)
511                         continue;
512                 spin_unlock(&sdp->sd_ordered_lock);
513                 filemap_fdatawrite(ip->i_inode.i_mapping);
514                 spin_lock(&sdp->sd_ordered_lock);
515         }
516         list_splice(&written, &sdp->sd_log_le_ordered);
517         spin_unlock(&sdp->sd_ordered_lock);
518 }
519
520 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
521 {
522         struct gfs2_inode *ip;
523
524         spin_lock(&sdp->sd_ordered_lock);
525         while (!list_empty(&sdp->sd_log_le_ordered)) {
526                 ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
527                 list_del(&ip->i_ordered);
528                 WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
529                 if (ip->i_inode.i_mapping->nrpages == 0)
530                         continue;
531                 spin_unlock(&sdp->sd_ordered_lock);
532                 filemap_fdatawait(ip->i_inode.i_mapping);
533                 spin_lock(&sdp->sd_ordered_lock);
534         }
535         spin_unlock(&sdp->sd_ordered_lock);
536 }
537
538 void gfs2_ordered_del_inode(struct gfs2_inode *ip)
539 {
540         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
541
542         spin_lock(&sdp->sd_ordered_lock);
543         if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
544                 list_del(&ip->i_ordered);
545         spin_unlock(&sdp->sd_ordered_lock);
546 }
547
548 /**
549  * log_write_header - Get and initialize a journal header buffer
550  * @sdp: The GFS2 superblock
551  *
552  * Returns: the initialized log buffer descriptor
553  */
554
555 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
556 {
557         struct gfs2_log_header *lh;
558         unsigned int tail;
559         u32 hash;
560         int rw = WRITE_FLUSH_FUA | REQ_META;
561         struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
562         lh = page_address(page);
563         clear_page(lh);
564
565         gfs2_ail1_empty(sdp);
566         tail = current_tail(sdp);
567
568         lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
569         lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
570         lh->lh_header.__pad0 = cpu_to_be64(0);
571         lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
572         lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
573         lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
574         lh->lh_flags = cpu_to_be32(flags);
575         lh->lh_tail = cpu_to_be32(tail);
576         lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
577         hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header));
578         lh->lh_hash = cpu_to_be32(hash);
579
580         if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
581                 gfs2_ordered_wait(sdp);
582                 log_flush_wait(sdp);
583                 rw = WRITE_SYNC | REQ_META | REQ_PRIO;
584         }
585
586         sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
587         gfs2_log_write_page(sdp, page);
588         gfs2_log_flush_bio(sdp, rw);
589         log_flush_wait(sdp);
590
591         if (sdp->sd_log_tail != tail)
592                 log_pull_tail(sdp, tail);
593 }
594
595 /**
596  * gfs2_log_flush - flush incore transaction(s)
597  * @sdp: the filesystem
598  * @gl: The glock structure to flush.  If NULL, flush the whole incore log
599  *
600  */
601
602 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
603 {
604         struct gfs2_trans *tr;
605
606         down_write(&sdp->sd_log_flush_lock);
607
608         /* Log might have been flushed while we waited for the flush lock */
609         if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
610                 up_write(&sdp->sd_log_flush_lock);
611                 return;
612         }
613         trace_gfs2_log_flush(sdp, 1);
614
615         tr = sdp->sd_log_tr;
616         if (tr) {
617                 sdp->sd_log_tr = NULL;
618                 INIT_LIST_HEAD(&tr->tr_ail1_list);
619                 INIT_LIST_HEAD(&tr->tr_ail2_list);
620         }
621
622         if (sdp->sd_log_num_buf != sdp->sd_log_commited_buf) {
623                 printk(KERN_INFO "GFS2: log buf %u %u\n", sdp->sd_log_num_buf,
624                        sdp->sd_log_commited_buf);
625                 gfs2_assert_withdraw(sdp, 0);
626         }
627         if (sdp->sd_log_num_databuf != sdp->sd_log_commited_databuf) {
628                 printk(KERN_INFO "GFS2: log databuf %u %u\n",
629                        sdp->sd_log_num_databuf, sdp->sd_log_commited_databuf);
630                 gfs2_assert_withdraw(sdp, 0);
631         }
632         gfs2_assert_withdraw(sdp,
633                         sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
634
635         sdp->sd_log_flush_head = sdp->sd_log_head;
636         sdp->sd_log_flush_wrapped = 0;
637         if (tr)
638                 tr->tr_first = sdp->sd_log_flush_head;
639
640         gfs2_ordered_write(sdp);
641         lops_before_commit(sdp);
642         gfs2_log_flush_bio(sdp, WRITE);
643
644         if (sdp->sd_log_head != sdp->sd_log_flush_head) {
645                 log_write_header(sdp, 0);
646         } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
647                 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
648                 trace_gfs2_log_blocks(sdp, -1);
649                 log_write_header(sdp, 0);
650         }
651         lops_after_commit(sdp, tr);
652
653         gfs2_log_lock(sdp);
654         sdp->sd_log_head = sdp->sd_log_flush_head;
655         sdp->sd_log_blks_reserved = 0;
656         sdp->sd_log_commited_buf = 0;
657         sdp->sd_log_commited_databuf = 0;
658         sdp->sd_log_commited_revoke = 0;
659
660         spin_lock(&sdp->sd_ail_lock);
661         if (tr && !list_empty(&tr->tr_ail1_list)) {
662                 list_add(&tr->tr_list, &sdp->sd_ail1_list);
663                 tr = NULL;
664         }
665         spin_unlock(&sdp->sd_ail_lock);
666         gfs2_log_unlock(sdp);
667         trace_gfs2_log_flush(sdp, 0);
668         up_write(&sdp->sd_log_flush_lock);
669
670         kfree(tr);
671 }
672
673 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
674 {
675         unsigned int reserved;
676         unsigned int unused;
677
678         gfs2_log_lock(sdp);
679
680         sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
681         sdp->sd_log_commited_databuf += tr->tr_num_databuf_new -
682                 tr->tr_num_databuf_rm;
683         gfs2_assert_withdraw(sdp, (((int)sdp->sd_log_commited_buf) >= 0) ||
684                              (((int)sdp->sd_log_commited_databuf) >= 0));
685         sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
686         reserved = calc_reserved(sdp);
687         gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved);
688         unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved;
689         atomic_add(unused, &sdp->sd_log_blks_free);
690         trace_gfs2_log_blocks(sdp, unused);
691         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
692                              sdp->sd_jdesc->jd_blocks);
693         sdp->sd_log_blks_reserved = reserved;
694
695         if (sdp->sd_log_tr == NULL &&
696             (tr->tr_num_buf_new || tr->tr_num_databuf_new)) {
697                 gfs2_assert_withdraw(sdp, tr->tr_t_gh.gh_gl);
698                 sdp->sd_log_tr = tr;
699                 tr->tr_attached = 1;
700         }
701         gfs2_log_unlock(sdp);
702 }
703
704 /**
705  * gfs2_log_commit - Commit a transaction to the log
706  * @sdp: the filesystem
707  * @tr: the transaction
708  *
709  * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
710  * or the total number of used blocks (pinned blocks plus AIL blocks)
711  * is greater than thresh2.
712  *
713  * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
714  * journal size.
715  *
716  * Returns: errno
717  */
718
719 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
720 {
721         log_refund(sdp, tr);
722
723         if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
724             ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
725             atomic_read(&sdp->sd_log_thresh2)))
726                 wake_up(&sdp->sd_logd_waitq);
727 }
728
729 /**
730  * gfs2_log_shutdown - write a shutdown header into a journal
731  * @sdp: the filesystem
732  *
733  */
734
735 void gfs2_log_shutdown(struct gfs2_sbd *sdp)
736 {
737         down_write(&sdp->sd_log_flush_lock);
738
739         gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
740         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
741         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
742         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
743         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
744         gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
745
746         sdp->sd_log_flush_head = sdp->sd_log_head;
747         sdp->sd_log_flush_wrapped = 0;
748
749         log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT);
750
751         gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
752         gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
753         gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
754
755         sdp->sd_log_head = sdp->sd_log_flush_head;
756         sdp->sd_log_tail = sdp->sd_log_head;
757
758         up_write(&sdp->sd_log_flush_lock);
759 }
760
761
762 /**
763  * gfs2_meta_syncfs - sync all the buffers in a filesystem
764  * @sdp: the filesystem
765  *
766  */
767
768 void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
769 {
770         gfs2_log_flush(sdp, NULL);
771         for (;;) {
772                 gfs2_ail1_start(sdp);
773                 gfs2_ail1_wait(sdp);
774                 if (gfs2_ail1_empty(sdp))
775                         break;
776         }
777         gfs2_log_flush(sdp, NULL);
778 }
779
780 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
781 {
782         return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1));
783 }
784
785 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
786 {
787         unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
788         return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
789 }
790
791 /**
792  * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
793  * @sdp: Pointer to GFS2 superblock
794  *
795  * Also, periodically check to make sure that we're using the most recent
796  * journal index.
797  */
798
799 int gfs2_logd(void *data)
800 {
801         struct gfs2_sbd *sdp = data;
802         unsigned long t = 1;
803         DEFINE_WAIT(wait);
804
805         while (!kthread_should_stop()) {
806
807                 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
808                         gfs2_ail1_empty(sdp);
809                         gfs2_log_flush(sdp, NULL);
810                 }
811
812                 if (gfs2_ail_flush_reqd(sdp)) {
813                         gfs2_ail1_start(sdp);
814                         gfs2_ail1_wait(sdp);
815                         gfs2_ail1_empty(sdp);
816                         gfs2_log_flush(sdp, NULL);
817                 }
818
819                 if (!gfs2_ail_flush_reqd(sdp))
820                         wake_up(&sdp->sd_log_waitq);
821
822                 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
823
824                 try_to_freeze();
825
826                 do {
827                         prepare_to_wait(&sdp->sd_logd_waitq, &wait,
828                                         TASK_INTERRUPTIBLE);
829                         if (!gfs2_ail_flush_reqd(sdp) &&
830                             !gfs2_jrnl_flush_reqd(sdp) &&
831                             !kthread_should_stop())
832                                 t = schedule_timeout(t);
833                 } while(t && !gfs2_ail_flush_reqd(sdp) &&
834                         !gfs2_jrnl_flush_reqd(sdp) &&
835                         !kthread_should_stop());
836                 finish_wait(&sdp->sd_logd_waitq, &wait);
837         }
838
839         return 0;
840 }
841