Merge branch 'for-3.5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[firefly-linux-kernel-4.4.55.git] / fs / jbd2 / transaction.c
1 /*
2  * linux/fs/jbd2/transaction.c
3  *
4  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5  *
6  * Copyright 1998 Red Hat corp --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Generic filesystem transaction handling code; part of the ext2fs
13  * journaling system.
14  *
15  * This file manages transactions (compound commits managed by the
16  * journaling code) and handles (individual atomic operations by the
17  * filesystem).
18  */
19
20 #include <linux/time.h>
21 #include <linux/fs.h>
22 #include <linux/jbd2.h>
23 #include <linux/errno.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/hrtimer.h>
29 #include <linux/backing-dev.h>
30 #include <linux/bug.h>
31 #include <linux/module.h>
32
33 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
34 static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
35
36 static struct kmem_cache *transaction_cache;
37 int __init jbd2_journal_init_transaction_cache(void)
38 {
39         J_ASSERT(!transaction_cache);
40         transaction_cache = kmem_cache_create("jbd2_transaction_s",
41                                         sizeof(transaction_t),
42                                         0,
43                                         SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
44                                         NULL);
45         if (transaction_cache)
46                 return 0;
47         return -ENOMEM;
48 }
49
50 void jbd2_journal_destroy_transaction_cache(void)
51 {
52         if (transaction_cache) {
53                 kmem_cache_destroy(transaction_cache);
54                 transaction_cache = NULL;
55         }
56 }
57
58 void jbd2_journal_free_transaction(transaction_t *transaction)
59 {
60         if (unlikely(ZERO_OR_NULL_PTR(transaction)))
61                 return;
62         kmem_cache_free(transaction_cache, transaction);
63 }
64
65 /*
66  * jbd2_get_transaction: obtain a new transaction_t object.
67  *
68  * Simply allocate and initialise a new transaction.  Create it in
69  * RUNNING state and add it to the current journal (which should not
70  * have an existing running transaction: we only make a new transaction
71  * once we have started to commit the old one).
72  *
73  * Preconditions:
74  *      The journal MUST be locked.  We don't perform atomic mallocs on the
75  *      new transaction and we can't block without protecting against other
76  *      processes trying to touch the journal while it is in transition.
77  *
78  */
79
80 static transaction_t *
81 jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
82 {
83         transaction->t_journal = journal;
84         transaction->t_state = T_RUNNING;
85         transaction->t_start_time = ktime_get();
86         transaction->t_tid = journal->j_transaction_sequence++;
87         transaction->t_expires = jiffies + journal->j_commit_interval;
88         spin_lock_init(&transaction->t_handle_lock);
89         atomic_set(&transaction->t_updates, 0);
90         atomic_set(&transaction->t_outstanding_credits, 0);
91         atomic_set(&transaction->t_handle_count, 0);
92         INIT_LIST_HEAD(&transaction->t_inode_list);
93         INIT_LIST_HEAD(&transaction->t_private_list);
94
95         /* Set up the commit timer for the new transaction. */
96         journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
97         add_timer(&journal->j_commit_timer);
98
99         J_ASSERT(journal->j_running_transaction == NULL);
100         journal->j_running_transaction = transaction;
101         transaction->t_max_wait = 0;
102         transaction->t_start = jiffies;
103
104         return transaction;
105 }
106
107 /*
108  * Handle management.
109  *
110  * A handle_t is an object which represents a single atomic update to a
111  * filesystem, and which tracks all of the modifications which form part
112  * of that one update.
113  */
114
115 /*
116  * Update transaction's maximum wait time, if debugging is enabled.
117  *
118  * In order for t_max_wait to be reliable, it must be protected by a
119  * lock.  But doing so will mean that start_this_handle() can not be
120  * run in parallel on SMP systems, which limits our scalability.  So
121  * unless debugging is enabled, we no longer update t_max_wait, which
122  * means that maximum wait time reported by the jbd2_run_stats
123  * tracepoint will always be zero.
124  */
125 static inline void update_t_max_wait(transaction_t *transaction,
126                                      unsigned long ts)
127 {
128 #ifdef CONFIG_JBD2_DEBUG
129         if (jbd2_journal_enable_debug &&
130             time_after(transaction->t_start, ts)) {
131                 ts = jbd2_time_diff(ts, transaction->t_start);
132                 spin_lock(&transaction->t_handle_lock);
133                 if (ts > transaction->t_max_wait)
134                         transaction->t_max_wait = ts;
135                 spin_unlock(&transaction->t_handle_lock);
136         }
137 #endif
138 }
139
140 /*
141  * start_this_handle: Given a handle, deal with any locking or stalling
142  * needed to make sure that there is enough journal space for the handle
143  * to begin.  Attach the handle to a transaction and set up the
144  * transaction's buffer credits.
145  */
146
147 static int start_this_handle(journal_t *journal, handle_t *handle,
148                              gfp_t gfp_mask)
149 {
150         transaction_t   *transaction, *new_transaction = NULL;
151         tid_t           tid;
152         int             needed, need_to_start;
153         int             nblocks = handle->h_buffer_credits;
154         unsigned long ts = jiffies;
155
156         if (nblocks > journal->j_max_transaction_buffers) {
157                 printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n",
158                        current->comm, nblocks,
159                        journal->j_max_transaction_buffers);
160                 return -ENOSPC;
161         }
162
163 alloc_transaction:
164         if (!journal->j_running_transaction) {
165                 new_transaction = kmem_cache_zalloc(transaction_cache,
166                                                     gfp_mask);
167                 if (!new_transaction) {
168                         /*
169                          * If __GFP_FS is not present, then we may be
170                          * being called from inside the fs writeback
171                          * layer, so we MUST NOT fail.  Since
172                          * __GFP_NOFAIL is going away, we will arrange
173                          * to retry the allocation ourselves.
174                          */
175                         if ((gfp_mask & __GFP_FS) == 0) {
176                                 congestion_wait(BLK_RW_ASYNC, HZ/50);
177                                 goto alloc_transaction;
178                         }
179                         return -ENOMEM;
180                 }
181         }
182
183         jbd_debug(3, "New handle %p going live.\n", handle);
184
185         /*
186          * We need to hold j_state_lock until t_updates has been incremented,
187          * for proper journal barrier handling
188          */
189 repeat:
190         read_lock(&journal->j_state_lock);
191         BUG_ON(journal->j_flags & JBD2_UNMOUNT);
192         if (is_journal_aborted(journal) ||
193             (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
194                 read_unlock(&journal->j_state_lock);
195                 jbd2_journal_free_transaction(new_transaction);
196                 return -EROFS;
197         }
198
199         /* Wait on the journal's transaction barrier if necessary */
200         if (journal->j_barrier_count) {
201                 read_unlock(&journal->j_state_lock);
202                 wait_event(journal->j_wait_transaction_locked,
203                                 journal->j_barrier_count == 0);
204                 goto repeat;
205         }
206
207         if (!journal->j_running_transaction) {
208                 read_unlock(&journal->j_state_lock);
209                 if (!new_transaction)
210                         goto alloc_transaction;
211                 write_lock(&journal->j_state_lock);
212                 if (!journal->j_running_transaction) {
213                         jbd2_get_transaction(journal, new_transaction);
214                         new_transaction = NULL;
215                 }
216                 write_unlock(&journal->j_state_lock);
217                 goto repeat;
218         }
219
220         transaction = journal->j_running_transaction;
221
222         /*
223          * If the current transaction is locked down for commit, wait for the
224          * lock to be released.
225          */
226         if (transaction->t_state == T_LOCKED) {
227                 DEFINE_WAIT(wait);
228
229                 prepare_to_wait(&journal->j_wait_transaction_locked,
230                                         &wait, TASK_UNINTERRUPTIBLE);
231                 read_unlock(&journal->j_state_lock);
232                 schedule();
233                 finish_wait(&journal->j_wait_transaction_locked, &wait);
234                 goto repeat;
235         }
236
237         /*
238          * If there is not enough space left in the log to write all potential
239          * buffers requested by this operation, we need to stall pending a log
240          * checkpoint to free some more log space.
241          */
242         needed = atomic_add_return(nblocks,
243                                    &transaction->t_outstanding_credits);
244
245         if (needed > journal->j_max_transaction_buffers) {
246                 /*
247                  * If the current transaction is already too large, then start
248                  * to commit it: we can then go back and attach this handle to
249                  * a new transaction.
250                  */
251                 DEFINE_WAIT(wait);
252
253                 jbd_debug(2, "Handle %p starting new commit...\n", handle);
254                 atomic_sub(nblocks, &transaction->t_outstanding_credits);
255                 prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
256                                 TASK_UNINTERRUPTIBLE);
257                 tid = transaction->t_tid;
258                 need_to_start = !tid_geq(journal->j_commit_request, tid);
259                 read_unlock(&journal->j_state_lock);
260                 if (need_to_start)
261                         jbd2_log_start_commit(journal, tid);
262                 schedule();
263                 finish_wait(&journal->j_wait_transaction_locked, &wait);
264                 goto repeat;
265         }
266
267         /*
268          * The commit code assumes that it can get enough log space
269          * without forcing a checkpoint.  This is *critical* for
270          * correctness: a checkpoint of a buffer which is also
271          * associated with a committing transaction creates a deadlock,
272          * so commit simply cannot force through checkpoints.
273          *
274          * We must therefore ensure the necessary space in the journal
275          * *before* starting to dirty potentially checkpointed buffers
276          * in the new transaction.
277          *
278          * The worst part is, any transaction currently committing can
279          * reduce the free space arbitrarily.  Be careful to account for
280          * those buffers when checkpointing.
281          */
282
283         /*
284          * @@@ AKPM: This seems rather over-defensive.  We're giving commit
285          * a _lot_ of headroom: 1/4 of the journal plus the size of
286          * the committing transaction.  Really, we only need to give it
287          * committing_transaction->t_outstanding_credits plus "enough" for
288          * the log control blocks.
289          * Also, this test is inconsistent with the matching one in
290          * jbd2_journal_extend().
291          */
292         if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
293                 jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
294                 atomic_sub(nblocks, &transaction->t_outstanding_credits);
295                 read_unlock(&journal->j_state_lock);
296                 write_lock(&journal->j_state_lock);
297                 if (__jbd2_log_space_left(journal) < jbd_space_needed(journal))
298                         __jbd2_log_wait_for_space(journal);
299                 write_unlock(&journal->j_state_lock);
300                 goto repeat;
301         }
302
303         /* OK, account for the buffers that this operation expects to
304          * use and add the handle to the running transaction. 
305          */
306         update_t_max_wait(transaction, ts);
307         handle->h_transaction = transaction;
308         atomic_inc(&transaction->t_updates);
309         atomic_inc(&transaction->t_handle_count);
310         jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
311                   handle, nblocks,
312                   atomic_read(&transaction->t_outstanding_credits),
313                   __jbd2_log_space_left(journal));
314         read_unlock(&journal->j_state_lock);
315
316         lock_map_acquire(&handle->h_lockdep_map);
317         jbd2_journal_free_transaction(new_transaction);
318         return 0;
319 }
320
321 static struct lock_class_key jbd2_handle_key;
322
323 /* Allocate a new handle.  This should probably be in a slab... */
324 static handle_t *new_handle(int nblocks)
325 {
326         handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
327         if (!handle)
328                 return NULL;
329         memset(handle, 0, sizeof(*handle));
330         handle->h_buffer_credits = nblocks;
331         handle->h_ref = 1;
332
333         lockdep_init_map(&handle->h_lockdep_map, "jbd2_handle",
334                                                 &jbd2_handle_key, 0);
335
336         return handle;
337 }
338
339 /**
340  * handle_t *jbd2_journal_start() - Obtain a new handle.
341  * @journal: Journal to start transaction on.
342  * @nblocks: number of block buffer we might modify
343  *
344  * We make sure that the transaction can guarantee at least nblocks of
345  * modified buffers in the log.  We block until the log can guarantee
346  * that much space.
347  *
348  * This function is visible to journal users (like ext3fs), so is not
349  * called with the journal already locked.
350  *
351  * Return a pointer to a newly allocated handle, or an ERR_PTR() value
352  * on failure.
353  */
354 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask)
355 {
356         handle_t *handle = journal_current_handle();
357         int err;
358
359         if (!journal)
360                 return ERR_PTR(-EROFS);
361
362         if (handle) {
363                 J_ASSERT(handle->h_transaction->t_journal == journal);
364                 handle->h_ref++;
365                 return handle;
366         }
367
368         handle = new_handle(nblocks);
369         if (!handle)
370                 return ERR_PTR(-ENOMEM);
371
372         current->journal_info = handle;
373
374         err = start_this_handle(journal, handle, gfp_mask);
375         if (err < 0) {
376                 jbd2_free_handle(handle);
377                 current->journal_info = NULL;
378                 handle = ERR_PTR(err);
379         }
380         return handle;
381 }
382 EXPORT_SYMBOL(jbd2__journal_start);
383
384
385 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
386 {
387         return jbd2__journal_start(journal, nblocks, GFP_NOFS);
388 }
389 EXPORT_SYMBOL(jbd2_journal_start);
390
391
392 /**
393  * int jbd2_journal_extend() - extend buffer credits.
394  * @handle:  handle to 'extend'
395  * @nblocks: nr blocks to try to extend by.
396  *
397  * Some transactions, such as large extends and truncates, can be done
398  * atomically all at once or in several stages.  The operation requests
399  * a credit for a number of buffer modications in advance, but can
400  * extend its credit if it needs more.
401  *
402  * jbd2_journal_extend tries to give the running handle more buffer credits.
403  * It does not guarantee that allocation - this is a best-effort only.
404  * The calling process MUST be able to deal cleanly with a failure to
405  * extend here.
406  *
407  * Return 0 on success, non-zero on failure.
408  *
409  * return code < 0 implies an error
410  * return code > 0 implies normal transaction-full status.
411  */
412 int jbd2_journal_extend(handle_t *handle, int nblocks)
413 {
414         transaction_t *transaction = handle->h_transaction;
415         journal_t *journal = transaction->t_journal;
416         int result;
417         int wanted;
418
419         result = -EIO;
420         if (is_handle_aborted(handle))
421                 goto out;
422
423         result = 1;
424
425         read_lock(&journal->j_state_lock);
426
427         /* Don't extend a locked-down transaction! */
428         if (handle->h_transaction->t_state != T_RUNNING) {
429                 jbd_debug(3, "denied handle %p %d blocks: "
430                           "transaction not running\n", handle, nblocks);
431                 goto error_out;
432         }
433
434         spin_lock(&transaction->t_handle_lock);
435         wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks;
436
437         if (wanted > journal->j_max_transaction_buffers) {
438                 jbd_debug(3, "denied handle %p %d blocks: "
439                           "transaction too large\n", handle, nblocks);
440                 goto unlock;
441         }
442
443         if (wanted > __jbd2_log_space_left(journal)) {
444                 jbd_debug(3, "denied handle %p %d blocks: "
445                           "insufficient log space\n", handle, nblocks);
446                 goto unlock;
447         }
448
449         handle->h_buffer_credits += nblocks;
450         atomic_add(nblocks, &transaction->t_outstanding_credits);
451         result = 0;
452
453         jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
454 unlock:
455         spin_unlock(&transaction->t_handle_lock);
456 error_out:
457         read_unlock(&journal->j_state_lock);
458 out:
459         return result;
460 }
461
462
463 /**
464  * int jbd2_journal_restart() - restart a handle .
465  * @handle:  handle to restart
466  * @nblocks: nr credits requested
467  *
468  * Restart a handle for a multi-transaction filesystem
469  * operation.
470  *
471  * If the jbd2_journal_extend() call above fails to grant new buffer credits
472  * to a running handle, a call to jbd2_journal_restart will commit the
473  * handle's transaction so far and reattach the handle to a new
474  * transaction capabable of guaranteeing the requested number of
475  * credits.
476  */
477 int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
478 {
479         transaction_t *transaction = handle->h_transaction;
480         journal_t *journal = transaction->t_journal;
481         tid_t           tid;
482         int             need_to_start, ret;
483
484         /* If we've had an abort of any type, don't even think about
485          * actually doing the restart! */
486         if (is_handle_aborted(handle))
487                 return 0;
488
489         /*
490          * First unlink the handle from its current transaction, and start the
491          * commit on that.
492          */
493         J_ASSERT(atomic_read(&transaction->t_updates) > 0);
494         J_ASSERT(journal_current_handle() == handle);
495
496         read_lock(&journal->j_state_lock);
497         spin_lock(&transaction->t_handle_lock);
498         atomic_sub(handle->h_buffer_credits,
499                    &transaction->t_outstanding_credits);
500         if (atomic_dec_and_test(&transaction->t_updates))
501                 wake_up(&journal->j_wait_updates);
502         spin_unlock(&transaction->t_handle_lock);
503
504         jbd_debug(2, "restarting handle %p\n", handle);
505         tid = transaction->t_tid;
506         need_to_start = !tid_geq(journal->j_commit_request, tid);
507         read_unlock(&journal->j_state_lock);
508         if (need_to_start)
509                 jbd2_log_start_commit(journal, tid);
510
511         lock_map_release(&handle->h_lockdep_map);
512         handle->h_buffer_credits = nblocks;
513         ret = start_this_handle(journal, handle, gfp_mask);
514         return ret;
515 }
516 EXPORT_SYMBOL(jbd2__journal_restart);
517
518
519 int jbd2_journal_restart(handle_t *handle, int nblocks)
520 {
521         return jbd2__journal_restart(handle, nblocks, GFP_NOFS);
522 }
523 EXPORT_SYMBOL(jbd2_journal_restart);
524
525 /**
526  * void jbd2_journal_lock_updates () - establish a transaction barrier.
527  * @journal:  Journal to establish a barrier on.
528  *
529  * This locks out any further updates from being started, and blocks
530  * until all existing updates have completed, returning only once the
531  * journal is in a quiescent state with no updates running.
532  *
533  * The journal lock should not be held on entry.
534  */
535 void jbd2_journal_lock_updates(journal_t *journal)
536 {
537         DEFINE_WAIT(wait);
538
539         write_lock(&journal->j_state_lock);
540         ++journal->j_barrier_count;
541
542         /* Wait until there are no running updates */
543         while (1) {
544                 transaction_t *transaction = journal->j_running_transaction;
545
546                 if (!transaction)
547                         break;
548
549                 spin_lock(&transaction->t_handle_lock);
550                 prepare_to_wait(&journal->j_wait_updates, &wait,
551                                 TASK_UNINTERRUPTIBLE);
552                 if (!atomic_read(&transaction->t_updates)) {
553                         spin_unlock(&transaction->t_handle_lock);
554                         finish_wait(&journal->j_wait_updates, &wait);
555                         break;
556                 }
557                 spin_unlock(&transaction->t_handle_lock);
558                 write_unlock(&journal->j_state_lock);
559                 schedule();
560                 finish_wait(&journal->j_wait_updates, &wait);
561                 write_lock(&journal->j_state_lock);
562         }
563         write_unlock(&journal->j_state_lock);
564
565         /*
566          * We have now established a barrier against other normal updates, but
567          * we also need to barrier against other jbd2_journal_lock_updates() calls
568          * to make sure that we serialise special journal-locked operations
569          * too.
570          */
571         mutex_lock(&journal->j_barrier);
572 }
573
574 /**
575  * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
576  * @journal:  Journal to release the barrier on.
577  *
578  * Release a transaction barrier obtained with jbd2_journal_lock_updates().
579  *
580  * Should be called without the journal lock held.
581  */
582 void jbd2_journal_unlock_updates (journal_t *journal)
583 {
584         J_ASSERT(journal->j_barrier_count != 0);
585
586         mutex_unlock(&journal->j_barrier);
587         write_lock(&journal->j_state_lock);
588         --journal->j_barrier_count;
589         write_unlock(&journal->j_state_lock);
590         wake_up(&journal->j_wait_transaction_locked);
591 }
592
593 static void warn_dirty_buffer(struct buffer_head *bh)
594 {
595         char b[BDEVNAME_SIZE];
596
597         printk(KERN_WARNING
598                "JBD2: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
599                "There's a risk of filesystem corruption in case of system "
600                "crash.\n",
601                bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
602 }
603
604 /*
605  * If the buffer is already part of the current transaction, then there
606  * is nothing we need to do.  If it is already part of a prior
607  * transaction which we are still committing to disk, then we need to
608  * make sure that we do not overwrite the old copy: we do copy-out to
609  * preserve the copy going to disk.  We also account the buffer against
610  * the handle's metadata buffer credits (unless the buffer is already
611  * part of the transaction, that is).
612  *
613  */
614 static int
615 do_get_write_access(handle_t *handle, struct journal_head *jh,
616                         int force_copy)
617 {
618         struct buffer_head *bh;
619         transaction_t *transaction;
620         journal_t *journal;
621         int error;
622         char *frozen_buffer = NULL;
623         int need_copy = 0;
624
625         if (is_handle_aborted(handle))
626                 return -EROFS;
627
628         transaction = handle->h_transaction;
629         journal = transaction->t_journal;
630
631         jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
632
633         JBUFFER_TRACE(jh, "entry");
634 repeat:
635         bh = jh2bh(jh);
636
637         /* @@@ Need to check for errors here at some point. */
638
639         lock_buffer(bh);
640         jbd_lock_bh_state(bh);
641
642         /* We now hold the buffer lock so it is safe to query the buffer
643          * state.  Is the buffer dirty?
644          *
645          * If so, there are two possibilities.  The buffer may be
646          * non-journaled, and undergoing a quite legitimate writeback.
647          * Otherwise, it is journaled, and we don't expect dirty buffers
648          * in that state (the buffers should be marked JBD_Dirty
649          * instead.)  So either the IO is being done under our own
650          * control and this is a bug, or it's a third party IO such as
651          * dump(8) (which may leave the buffer scheduled for read ---
652          * ie. locked but not dirty) or tune2fs (which may actually have
653          * the buffer dirtied, ugh.)  */
654
655         if (buffer_dirty(bh)) {
656                 /*
657                  * First question: is this buffer already part of the current
658                  * transaction or the existing committing transaction?
659                  */
660                 if (jh->b_transaction) {
661                         J_ASSERT_JH(jh,
662                                 jh->b_transaction == transaction ||
663                                 jh->b_transaction ==
664                                         journal->j_committing_transaction);
665                         if (jh->b_next_transaction)
666                                 J_ASSERT_JH(jh, jh->b_next_transaction ==
667                                                         transaction);
668                         warn_dirty_buffer(bh);
669                 }
670                 /*
671                  * In any case we need to clean the dirty flag and we must
672                  * do it under the buffer lock to be sure we don't race
673                  * with running write-out.
674                  */
675                 JBUFFER_TRACE(jh, "Journalling dirty buffer");
676                 clear_buffer_dirty(bh);
677                 set_buffer_jbddirty(bh);
678         }
679
680         unlock_buffer(bh);
681
682         error = -EROFS;
683         if (is_handle_aborted(handle)) {
684                 jbd_unlock_bh_state(bh);
685                 goto out;
686         }
687         error = 0;
688
689         /*
690          * The buffer is already part of this transaction if b_transaction or
691          * b_next_transaction points to it
692          */
693         if (jh->b_transaction == transaction ||
694             jh->b_next_transaction == transaction)
695                 goto done;
696
697         /*
698          * this is the first time this transaction is touching this buffer,
699          * reset the modified flag
700          */
701        jh->b_modified = 0;
702
703         /*
704          * If there is already a copy-out version of this buffer, then we don't
705          * need to make another one
706          */
707         if (jh->b_frozen_data) {
708                 JBUFFER_TRACE(jh, "has frozen data");
709                 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
710                 jh->b_next_transaction = transaction;
711                 goto done;
712         }
713
714         /* Is there data here we need to preserve? */
715
716         if (jh->b_transaction && jh->b_transaction != transaction) {
717                 JBUFFER_TRACE(jh, "owned by older transaction");
718                 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
719                 J_ASSERT_JH(jh, jh->b_transaction ==
720                                         journal->j_committing_transaction);
721
722                 /* There is one case we have to be very careful about.
723                  * If the committing transaction is currently writing
724                  * this buffer out to disk and has NOT made a copy-out,
725                  * then we cannot modify the buffer contents at all
726                  * right now.  The essence of copy-out is that it is the
727                  * extra copy, not the primary copy, which gets
728                  * journaled.  If the primary copy is already going to
729                  * disk then we cannot do copy-out here. */
730
731                 if (jh->b_jlist == BJ_Shadow) {
732                         DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
733                         wait_queue_head_t *wqh;
734
735                         wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
736
737                         JBUFFER_TRACE(jh, "on shadow: sleep");
738                         jbd_unlock_bh_state(bh);
739                         /* commit wakes up all shadow buffers after IO */
740                         for ( ; ; ) {
741                                 prepare_to_wait(wqh, &wait.wait,
742                                                 TASK_UNINTERRUPTIBLE);
743                                 if (jh->b_jlist != BJ_Shadow)
744                                         break;
745                                 schedule();
746                         }
747                         finish_wait(wqh, &wait.wait);
748                         goto repeat;
749                 }
750
751                 /* Only do the copy if the currently-owning transaction
752                  * still needs it.  If it is on the Forget list, the
753                  * committing transaction is past that stage.  The
754                  * buffer had better remain locked during the kmalloc,
755                  * but that should be true --- we hold the journal lock
756                  * still and the buffer is already on the BUF_JOURNAL
757                  * list so won't be flushed.
758                  *
759                  * Subtle point, though: if this is a get_undo_access,
760                  * then we will be relying on the frozen_data to contain
761                  * the new value of the committed_data record after the
762                  * transaction, so we HAVE to force the frozen_data copy
763                  * in that case. */
764
765                 if (jh->b_jlist != BJ_Forget || force_copy) {
766                         JBUFFER_TRACE(jh, "generate frozen data");
767                         if (!frozen_buffer) {
768                                 JBUFFER_TRACE(jh, "allocate memory for buffer");
769                                 jbd_unlock_bh_state(bh);
770                                 frozen_buffer =
771                                         jbd2_alloc(jh2bh(jh)->b_size,
772                                                          GFP_NOFS);
773                                 if (!frozen_buffer) {
774                                         printk(KERN_EMERG
775                                                "%s: OOM for frozen_buffer\n",
776                                                __func__);
777                                         JBUFFER_TRACE(jh, "oom!");
778                                         error = -ENOMEM;
779                                         jbd_lock_bh_state(bh);
780                                         goto done;
781                                 }
782                                 goto repeat;
783                         }
784                         jh->b_frozen_data = frozen_buffer;
785                         frozen_buffer = NULL;
786                         need_copy = 1;
787                 }
788                 jh->b_next_transaction = transaction;
789         }
790
791
792         /*
793          * Finally, if the buffer is not journaled right now, we need to make
794          * sure it doesn't get written to disk before the caller actually
795          * commits the new data
796          */
797         if (!jh->b_transaction) {
798                 JBUFFER_TRACE(jh, "no transaction");
799                 J_ASSERT_JH(jh, !jh->b_next_transaction);
800                 JBUFFER_TRACE(jh, "file as BJ_Reserved");
801                 spin_lock(&journal->j_list_lock);
802                 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
803                 spin_unlock(&journal->j_list_lock);
804         }
805
806 done:
807         if (need_copy) {
808                 struct page *page;
809                 int offset;
810                 char *source;
811
812                 J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
813                             "Possible IO failure.\n");
814                 page = jh2bh(jh)->b_page;
815                 offset = offset_in_page(jh2bh(jh)->b_data);
816                 source = kmap_atomic(page);
817                 /* Fire data frozen trigger just before we copy the data */
818                 jbd2_buffer_frozen_trigger(jh, source + offset,
819                                            jh->b_triggers);
820                 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
821                 kunmap_atomic(source);
822
823                 /*
824                  * Now that the frozen data is saved off, we need to store
825                  * any matching triggers.
826                  */
827                 jh->b_frozen_triggers = jh->b_triggers;
828         }
829         jbd_unlock_bh_state(bh);
830
831         /*
832          * If we are about to journal a buffer, then any revoke pending on it is
833          * no longer valid
834          */
835         jbd2_journal_cancel_revoke(handle, jh);
836
837 out:
838         if (unlikely(frozen_buffer))    /* It's usually NULL */
839                 jbd2_free(frozen_buffer, bh->b_size);
840
841         JBUFFER_TRACE(jh, "exit");
842         return error;
843 }
844
845 /**
846  * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
847  * @handle: transaction to add buffer modifications to
848  * @bh:     bh to be used for metadata writes
849  *
850  * Returns an error code or 0 on success.
851  *
852  * In full data journalling mode the buffer may be of type BJ_AsyncData,
853  * because we're write()ing a buffer which is also part of a shared mapping.
854  */
855
856 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
857 {
858         struct journal_head *jh = jbd2_journal_add_journal_head(bh);
859         int rc;
860
861         /* We do not want to get caught playing with fields which the
862          * log thread also manipulates.  Make sure that the buffer
863          * completes any outstanding IO before proceeding. */
864         rc = do_get_write_access(handle, jh, 0);
865         jbd2_journal_put_journal_head(jh);
866         return rc;
867 }
868
869
870 /*
871  * When the user wants to journal a newly created buffer_head
872  * (ie. getblk() returned a new buffer and we are going to populate it
873  * manually rather than reading off disk), then we need to keep the
874  * buffer_head locked until it has been completely filled with new
875  * data.  In this case, we should be able to make the assertion that
876  * the bh is not already part of an existing transaction.
877  *
878  * The buffer should already be locked by the caller by this point.
879  * There is no lock ranking violation: it was a newly created,
880  * unlocked buffer beforehand. */
881
882 /**
883  * int jbd2_journal_get_create_access () - notify intent to use newly created bh
884  * @handle: transaction to new buffer to
885  * @bh: new buffer.
886  *
887  * Call this if you create a new bh.
888  */
889 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
890 {
891         transaction_t *transaction = handle->h_transaction;
892         journal_t *journal = transaction->t_journal;
893         struct journal_head *jh = jbd2_journal_add_journal_head(bh);
894         int err;
895
896         jbd_debug(5, "journal_head %p\n", jh);
897         err = -EROFS;
898         if (is_handle_aborted(handle))
899                 goto out;
900         err = 0;
901
902         JBUFFER_TRACE(jh, "entry");
903         /*
904          * The buffer may already belong to this transaction due to pre-zeroing
905          * in the filesystem's new_block code.  It may also be on the previous,
906          * committing transaction's lists, but it HAS to be in Forget state in
907          * that case: the transaction must have deleted the buffer for it to be
908          * reused here.
909          */
910         jbd_lock_bh_state(bh);
911         spin_lock(&journal->j_list_lock);
912         J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
913                 jh->b_transaction == NULL ||
914                 (jh->b_transaction == journal->j_committing_transaction &&
915                           jh->b_jlist == BJ_Forget)));
916
917         J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
918         J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
919
920         if (jh->b_transaction == NULL) {
921                 /*
922                  * Previous jbd2_journal_forget() could have left the buffer
923                  * with jbddirty bit set because it was being committed. When
924                  * the commit finished, we've filed the buffer for
925                  * checkpointing and marked it dirty. Now we are reallocating
926                  * the buffer so the transaction freeing it must have
927                  * committed and so it's safe to clear the dirty bit.
928                  */
929                 clear_buffer_dirty(jh2bh(jh));
930                 /* first access by this transaction */
931                 jh->b_modified = 0;
932
933                 JBUFFER_TRACE(jh, "file as BJ_Reserved");
934                 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
935         } else if (jh->b_transaction == journal->j_committing_transaction) {
936                 /* first access by this transaction */
937                 jh->b_modified = 0;
938
939                 JBUFFER_TRACE(jh, "set next transaction");
940                 jh->b_next_transaction = transaction;
941         }
942         spin_unlock(&journal->j_list_lock);
943         jbd_unlock_bh_state(bh);
944
945         /*
946          * akpm: I added this.  ext3_alloc_branch can pick up new indirect
947          * blocks which contain freed but then revoked metadata.  We need
948          * to cancel the revoke in case we end up freeing it yet again
949          * and the reallocating as data - this would cause a second revoke,
950          * which hits an assertion error.
951          */
952         JBUFFER_TRACE(jh, "cancelling revoke");
953         jbd2_journal_cancel_revoke(handle, jh);
954 out:
955         jbd2_journal_put_journal_head(jh);
956         return err;
957 }
958
959 /**
960  * int jbd2_journal_get_undo_access() -  Notify intent to modify metadata with
961  *     non-rewindable consequences
962  * @handle: transaction
963  * @bh: buffer to undo
964  *
965  * Sometimes there is a need to distinguish between metadata which has
966  * been committed to disk and that which has not.  The ext3fs code uses
967  * this for freeing and allocating space, we have to make sure that we
968  * do not reuse freed space until the deallocation has been committed,
969  * since if we overwrote that space we would make the delete
970  * un-rewindable in case of a crash.
971  *
972  * To deal with that, jbd2_journal_get_undo_access requests write access to a
973  * buffer for parts of non-rewindable operations such as delete
974  * operations on the bitmaps.  The journaling code must keep a copy of
975  * the buffer's contents prior to the undo_access call until such time
976  * as we know that the buffer has definitely been committed to disk.
977  *
978  * We never need to know which transaction the committed data is part
979  * of, buffers touched here are guaranteed to be dirtied later and so
980  * will be committed to a new transaction in due course, at which point
981  * we can discard the old committed data pointer.
982  *
983  * Returns error number or 0 on success.
984  */
985 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
986 {
987         int err;
988         struct journal_head *jh = jbd2_journal_add_journal_head(bh);
989         char *committed_data = NULL;
990
991         JBUFFER_TRACE(jh, "entry");
992
993         /*
994          * Do this first --- it can drop the journal lock, so we want to
995          * make sure that obtaining the committed_data is done
996          * atomically wrt. completion of any outstanding commits.
997          */
998         err = do_get_write_access(handle, jh, 1);
999         if (err)
1000                 goto out;
1001
1002 repeat:
1003         if (!jh->b_committed_data) {
1004                 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
1005                 if (!committed_data) {
1006                         printk(KERN_EMERG "%s: No memory for committed data\n",
1007                                 __func__);
1008                         err = -ENOMEM;
1009                         goto out;
1010                 }
1011         }
1012
1013         jbd_lock_bh_state(bh);
1014         if (!jh->b_committed_data) {
1015                 /* Copy out the current buffer contents into the
1016                  * preserved, committed copy. */
1017                 JBUFFER_TRACE(jh, "generate b_committed data");
1018                 if (!committed_data) {
1019                         jbd_unlock_bh_state(bh);
1020                         goto repeat;
1021                 }
1022
1023                 jh->b_committed_data = committed_data;
1024                 committed_data = NULL;
1025                 memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
1026         }
1027         jbd_unlock_bh_state(bh);
1028 out:
1029         jbd2_journal_put_journal_head(jh);
1030         if (unlikely(committed_data))
1031                 jbd2_free(committed_data, bh->b_size);
1032         return err;
1033 }
1034
1035 /**
1036  * void jbd2_journal_set_triggers() - Add triggers for commit writeout
1037  * @bh: buffer to trigger on
1038  * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
1039  *
1040  * Set any triggers on this journal_head.  This is always safe, because
1041  * triggers for a committing buffer will be saved off, and triggers for
1042  * a running transaction will match the buffer in that transaction.
1043  *
1044  * Call with NULL to clear the triggers.
1045  */
1046 void jbd2_journal_set_triggers(struct buffer_head *bh,
1047                                struct jbd2_buffer_trigger_type *type)
1048 {
1049         struct journal_head *jh = bh2jh(bh);
1050
1051         jh->b_triggers = type;
1052 }
1053
1054 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
1055                                 struct jbd2_buffer_trigger_type *triggers)
1056 {
1057         struct buffer_head *bh = jh2bh(jh);
1058
1059         if (!triggers || !triggers->t_frozen)
1060                 return;
1061
1062         triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
1063 }
1064
1065 void jbd2_buffer_abort_trigger(struct journal_head *jh,
1066                                struct jbd2_buffer_trigger_type *triggers)
1067 {
1068         if (!triggers || !triggers->t_abort)
1069                 return;
1070
1071         triggers->t_abort(triggers, jh2bh(jh));
1072 }
1073
1074
1075
1076 /**
1077  * int jbd2_journal_dirty_metadata() -  mark a buffer as containing dirty metadata
1078  * @handle: transaction to add buffer to.
1079  * @bh: buffer to mark
1080  *
1081  * mark dirty metadata which needs to be journaled as part of the current
1082  * transaction.
1083  *
1084  * The buffer must have previously had jbd2_journal_get_write_access()
1085  * called so that it has a valid journal_head attached to the buffer
1086  * head.
1087  *
1088  * The buffer is placed on the transaction's metadata list and is marked
1089  * as belonging to the transaction.
1090  *
1091  * Returns error number or 0 on success.
1092  *
1093  * Special care needs to be taken if the buffer already belongs to the
1094  * current committing transaction (in which case we should have frozen
1095  * data present for that commit).  In that case, we don't relink the
1096  * buffer: that only gets done when the old transaction finally
1097  * completes its commit.
1098  */
1099 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1100 {
1101         transaction_t *transaction = handle->h_transaction;
1102         journal_t *journal = transaction->t_journal;
1103         struct journal_head *jh = bh2jh(bh);
1104         int ret = 0;
1105
1106         jbd_debug(5, "journal_head %p\n", jh);
1107         JBUFFER_TRACE(jh, "entry");
1108         if (is_handle_aborted(handle))
1109                 goto out;
1110         if (!buffer_jbd(bh)) {
1111                 ret = -EUCLEAN;
1112                 goto out;
1113         }
1114
1115         jbd_lock_bh_state(bh);
1116
1117         if (jh->b_modified == 0) {
1118                 /*
1119                  * This buffer's got modified and becoming part
1120                  * of the transaction. This needs to be done
1121                  * once a transaction -bzzz
1122                  */
1123                 jh->b_modified = 1;
1124                 J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
1125                 handle->h_buffer_credits--;
1126         }
1127
1128         /*
1129          * fastpath, to avoid expensive locking.  If this buffer is already
1130          * on the running transaction's metadata list there is nothing to do.
1131          * Nobody can take it off again because there is a handle open.
1132          * I _think_ we're OK here with SMP barriers - a mistaken decision will
1133          * result in this test being false, so we go in and take the locks.
1134          */
1135         if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1136                 JBUFFER_TRACE(jh, "fastpath");
1137                 if (unlikely(jh->b_transaction !=
1138                              journal->j_running_transaction)) {
1139                         printk(KERN_EMERG "JBD: %s: "
1140                                "jh->b_transaction (%llu, %p, %u) != "
1141                                "journal->j_running_transaction (%p, %u)",
1142                                journal->j_devname,
1143                                (unsigned long long) bh->b_blocknr,
1144                                jh->b_transaction,
1145                                jh->b_transaction ? jh->b_transaction->t_tid : 0,
1146                                journal->j_running_transaction,
1147                                journal->j_running_transaction ?
1148                                journal->j_running_transaction->t_tid : 0);
1149                         ret = -EINVAL;
1150                 }
1151                 goto out_unlock_bh;
1152         }
1153
1154         set_buffer_jbddirty(bh);
1155
1156         /*
1157          * Metadata already on the current transaction list doesn't
1158          * need to be filed.  Metadata on another transaction's list must
1159          * be committing, and will be refiled once the commit completes:
1160          * leave it alone for now.
1161          */
1162         if (jh->b_transaction != transaction) {
1163                 JBUFFER_TRACE(jh, "already on other transaction");
1164                 if (unlikely(jh->b_transaction !=
1165                              journal->j_committing_transaction)) {
1166                         printk(KERN_EMERG "JBD: %s: "
1167                                "jh->b_transaction (%llu, %p, %u) != "
1168                                "journal->j_committing_transaction (%p, %u)",
1169                                journal->j_devname,
1170                                (unsigned long long) bh->b_blocknr,
1171                                jh->b_transaction,
1172                                jh->b_transaction ? jh->b_transaction->t_tid : 0,
1173                                journal->j_committing_transaction,
1174                                journal->j_committing_transaction ?
1175                                journal->j_committing_transaction->t_tid : 0);
1176                         ret = -EINVAL;
1177                 }
1178                 if (unlikely(jh->b_next_transaction != transaction)) {
1179                         printk(KERN_EMERG "JBD: %s: "
1180                                "jh->b_next_transaction (%llu, %p, %u) != "
1181                                "transaction (%p, %u)",
1182                                journal->j_devname,
1183                                (unsigned long long) bh->b_blocknr,
1184                                jh->b_next_transaction,
1185                                jh->b_next_transaction ?
1186                                jh->b_next_transaction->t_tid : 0,
1187                                transaction, transaction->t_tid);
1188                         ret = -EINVAL;
1189                 }
1190                 /* And this case is illegal: we can't reuse another
1191                  * transaction's data buffer, ever. */
1192                 goto out_unlock_bh;
1193         }
1194
1195         /* That test should have eliminated the following case: */
1196         J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1197
1198         JBUFFER_TRACE(jh, "file as BJ_Metadata");
1199         spin_lock(&journal->j_list_lock);
1200         __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
1201         spin_unlock(&journal->j_list_lock);
1202 out_unlock_bh:
1203         jbd_unlock_bh_state(bh);
1204 out:
1205         JBUFFER_TRACE(jh, "exit");
1206         WARN_ON(ret);   /* All errors are bugs, so dump the stack */
1207         return ret;
1208 }
1209
1210 /*
1211  * jbd2_journal_release_buffer: undo a get_write_access without any buffer
1212  * updates, if the update decided in the end that it didn't need access.
1213  *
1214  */
1215 void
1216 jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
1217 {
1218         BUFFER_TRACE(bh, "entry");
1219 }
1220
1221 /**
1222  * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1223  * @handle: transaction handle
1224  * @bh:     bh to 'forget'
1225  *
1226  * We can only do the bforget if there are no commits pending against the
1227  * buffer.  If the buffer is dirty in the current running transaction we
1228  * can safely unlink it.
1229  *
1230  * bh may not be a journalled buffer at all - it may be a non-JBD
1231  * buffer which came off the hashtable.  Check for this.
1232  *
1233  * Decrements bh->b_count by one.
1234  *
1235  * Allow this call even if the handle has aborted --- it may be part of
1236  * the caller's cleanup after an abort.
1237  */
1238 int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
1239 {
1240         transaction_t *transaction = handle->h_transaction;
1241         journal_t *journal = transaction->t_journal;
1242         struct journal_head *jh;
1243         int drop_reserve = 0;
1244         int err = 0;
1245         int was_modified = 0;
1246
1247         BUFFER_TRACE(bh, "entry");
1248
1249         jbd_lock_bh_state(bh);
1250         spin_lock(&journal->j_list_lock);
1251
1252         if (!buffer_jbd(bh))
1253                 goto not_jbd;
1254         jh = bh2jh(bh);
1255
1256         /* Critical error: attempting to delete a bitmap buffer, maybe?
1257          * Don't do any jbd operations, and return an error. */
1258         if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1259                          "inconsistent data on disk")) {
1260                 err = -EIO;
1261                 goto not_jbd;
1262         }
1263
1264         /* keep track of wether or not this transaction modified us */
1265         was_modified = jh->b_modified;
1266
1267         /*
1268          * The buffer's going from the transaction, we must drop
1269          * all references -bzzz
1270          */
1271         jh->b_modified = 0;
1272
1273         if (jh->b_transaction == handle->h_transaction) {
1274                 J_ASSERT_JH(jh, !jh->b_frozen_data);
1275
1276                 /* If we are forgetting a buffer which is already part
1277                  * of this transaction, then we can just drop it from
1278                  * the transaction immediately. */
1279                 clear_buffer_dirty(bh);
1280                 clear_buffer_jbddirty(bh);
1281
1282                 JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1283
1284                 /*
1285                  * we only want to drop a reference if this transaction
1286                  * modified the buffer
1287                  */
1288                 if (was_modified)
1289                         drop_reserve = 1;
1290
1291                 /*
1292                  * We are no longer going to journal this buffer.
1293                  * However, the commit of this transaction is still
1294                  * important to the buffer: the delete that we are now
1295                  * processing might obsolete an old log entry, so by
1296                  * committing, we can satisfy the buffer's checkpoint.
1297                  *
1298                  * So, if we have a checkpoint on the buffer, we should
1299                  * now refile the buffer on our BJ_Forget list so that
1300                  * we know to remove the checkpoint after we commit.
1301                  */
1302
1303                 if (jh->b_cp_transaction) {
1304                         __jbd2_journal_temp_unlink_buffer(jh);
1305                         __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1306                 } else {
1307                         __jbd2_journal_unfile_buffer(jh);
1308                         if (!buffer_jbd(bh)) {
1309                                 spin_unlock(&journal->j_list_lock);
1310                                 jbd_unlock_bh_state(bh);
1311                                 __bforget(bh);
1312                                 goto drop;
1313                         }
1314                 }
1315         } else if (jh->b_transaction) {
1316                 J_ASSERT_JH(jh, (jh->b_transaction ==
1317                                  journal->j_committing_transaction));
1318                 /* However, if the buffer is still owned by a prior
1319                  * (committing) transaction, we can't drop it yet... */
1320                 JBUFFER_TRACE(jh, "belongs to older transaction");
1321                 /* ... but we CAN drop it from the new transaction if we
1322                  * have also modified it since the original commit. */
1323
1324                 if (jh->b_next_transaction) {
1325                         J_ASSERT(jh->b_next_transaction == transaction);
1326                         jh->b_next_transaction = NULL;
1327
1328                         /*
1329                          * only drop a reference if this transaction modified
1330                          * the buffer
1331                          */
1332                         if (was_modified)
1333                                 drop_reserve = 1;
1334                 }
1335         }
1336
1337 not_jbd:
1338         spin_unlock(&journal->j_list_lock);
1339         jbd_unlock_bh_state(bh);
1340         __brelse(bh);
1341 drop:
1342         if (drop_reserve) {
1343                 /* no need to reserve log space for this block -bzzz */
1344                 handle->h_buffer_credits++;
1345         }
1346         return err;
1347 }
1348
1349 /**
1350  * int jbd2_journal_stop() - complete a transaction
1351  * @handle: tranaction to complete.
1352  *
1353  * All done for a particular handle.
1354  *
1355  * There is not much action needed here.  We just return any remaining
1356  * buffer credits to the transaction and remove the handle.  The only
1357  * complication is that we need to start a commit operation if the
1358  * filesystem is marked for synchronous update.
1359  *
1360  * jbd2_journal_stop itself will not usually return an error, but it may
1361  * do so in unusual circumstances.  In particular, expect it to
1362  * return -EIO if a jbd2_journal_abort has been executed since the
1363  * transaction began.
1364  */
1365 int jbd2_journal_stop(handle_t *handle)
1366 {
1367         transaction_t *transaction = handle->h_transaction;
1368         journal_t *journal = transaction->t_journal;
1369         int err, wait_for_commit = 0;
1370         tid_t tid;
1371         pid_t pid;
1372
1373         J_ASSERT(journal_current_handle() == handle);
1374
1375         if (is_handle_aborted(handle))
1376                 err = -EIO;
1377         else {
1378                 J_ASSERT(atomic_read(&transaction->t_updates) > 0);
1379                 err = 0;
1380         }
1381
1382         if (--handle->h_ref > 0) {
1383                 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1384                           handle->h_ref);
1385                 return err;
1386         }
1387
1388         jbd_debug(4, "Handle %p going down\n", handle);
1389
1390         /*
1391          * Implement synchronous transaction batching.  If the handle
1392          * was synchronous, don't force a commit immediately.  Let's
1393          * yield and let another thread piggyback onto this
1394          * transaction.  Keep doing that while new threads continue to
1395          * arrive.  It doesn't cost much - we're about to run a commit
1396          * and sleep on IO anyway.  Speeds up many-threaded, many-dir
1397          * operations by 30x or more...
1398          *
1399          * We try and optimize the sleep time against what the
1400          * underlying disk can do, instead of having a static sleep
1401          * time.  This is useful for the case where our storage is so
1402          * fast that it is more optimal to go ahead and force a flush
1403          * and wait for the transaction to be committed than it is to
1404          * wait for an arbitrary amount of time for new writers to
1405          * join the transaction.  We achieve this by measuring how
1406          * long it takes to commit a transaction, and compare it with
1407          * how long this transaction has been running, and if run time
1408          * < commit time then we sleep for the delta and commit.  This
1409          * greatly helps super fast disks that would see slowdowns as
1410          * more threads started doing fsyncs.
1411          *
1412          * But don't do this if this process was the most recent one
1413          * to perform a synchronous write.  We do this to detect the
1414          * case where a single process is doing a stream of sync
1415          * writes.  No point in waiting for joiners in that case.
1416          */
1417         pid = current->pid;
1418         if (handle->h_sync && journal->j_last_sync_writer != pid) {
1419                 u64 commit_time, trans_time;
1420
1421                 journal->j_last_sync_writer = pid;
1422
1423                 read_lock(&journal->j_state_lock);
1424                 commit_time = journal->j_average_commit_time;
1425                 read_unlock(&journal->j_state_lock);
1426
1427                 trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1428                                                    transaction->t_start_time));
1429
1430                 commit_time = max_t(u64, commit_time,
1431                                     1000*journal->j_min_batch_time);
1432                 commit_time = min_t(u64, commit_time,
1433                                     1000*journal->j_max_batch_time);
1434
1435                 if (trans_time < commit_time) {
1436                         ktime_t expires = ktime_add_ns(ktime_get(),
1437                                                        commit_time);
1438                         set_current_state(TASK_UNINTERRUPTIBLE);
1439                         schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1440                 }
1441         }
1442
1443         if (handle->h_sync)
1444                 transaction->t_synchronous_commit = 1;
1445         current->journal_info = NULL;
1446         atomic_sub(handle->h_buffer_credits,
1447                    &transaction->t_outstanding_credits);
1448
1449         /*
1450          * If the handle is marked SYNC, we need to set another commit
1451          * going!  We also want to force a commit if the current
1452          * transaction is occupying too much of the log, or if the
1453          * transaction is too old now.
1454          */
1455         if (handle->h_sync ||
1456             (atomic_read(&transaction->t_outstanding_credits) >
1457              journal->j_max_transaction_buffers) ||
1458             time_after_eq(jiffies, transaction->t_expires)) {
1459                 /* Do this even for aborted journals: an abort still
1460                  * completes the commit thread, it just doesn't write
1461                  * anything to disk. */
1462
1463                 jbd_debug(2, "transaction too old, requesting commit for "
1464                                         "handle %p\n", handle);
1465                 /* This is non-blocking */
1466                 jbd2_log_start_commit(journal, transaction->t_tid);
1467
1468                 /*
1469                  * Special case: JBD2_SYNC synchronous updates require us
1470                  * to wait for the commit to complete.
1471                  */
1472                 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1473                         wait_for_commit = 1;
1474         }
1475
1476         /*
1477          * Once we drop t_updates, if it goes to zero the transaction
1478          * could start committing on us and eventually disappear.  So
1479          * once we do this, we must not dereference transaction
1480          * pointer again.
1481          */
1482         tid = transaction->t_tid;
1483         if (atomic_dec_and_test(&transaction->t_updates)) {
1484                 wake_up(&journal->j_wait_updates);
1485                 if (journal->j_barrier_count)
1486                         wake_up(&journal->j_wait_transaction_locked);
1487         }
1488
1489         if (wait_for_commit)
1490                 err = jbd2_log_wait_commit(journal, tid);
1491
1492         lock_map_release(&handle->h_lockdep_map);
1493
1494         jbd2_free_handle(handle);
1495         return err;
1496 }
1497
1498 /**
1499  * int jbd2_journal_force_commit() - force any uncommitted transactions
1500  * @journal: journal to force
1501  *
1502  * For synchronous operations: force any uncommitted transactions
1503  * to disk.  May seem kludgy, but it reuses all the handle batching
1504  * code in a very simple manner.
1505  */
1506 int jbd2_journal_force_commit(journal_t *journal)
1507 {
1508         handle_t *handle;
1509         int ret;
1510
1511         handle = jbd2_journal_start(journal, 1);
1512         if (IS_ERR(handle)) {
1513                 ret = PTR_ERR(handle);
1514         } else {
1515                 handle->h_sync = 1;
1516                 ret = jbd2_journal_stop(handle);
1517         }
1518         return ret;
1519 }
1520
1521 /*
1522  *
1523  * List management code snippets: various functions for manipulating the
1524  * transaction buffer lists.
1525  *
1526  */
1527
1528 /*
1529  * Append a buffer to a transaction list, given the transaction's list head
1530  * pointer.
1531  *
1532  * j_list_lock is held.
1533  *
1534  * jbd_lock_bh_state(jh2bh(jh)) is held.
1535  */
1536
1537 static inline void
1538 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1539 {
1540         if (!*list) {
1541                 jh->b_tnext = jh->b_tprev = jh;
1542                 *list = jh;
1543         } else {
1544                 /* Insert at the tail of the list to preserve order */
1545                 struct journal_head *first = *list, *last = first->b_tprev;
1546                 jh->b_tprev = last;
1547                 jh->b_tnext = first;
1548                 last->b_tnext = first->b_tprev = jh;
1549         }
1550 }
1551
1552 /*
1553  * Remove a buffer from a transaction list, given the transaction's list
1554  * head pointer.
1555  *
1556  * Called with j_list_lock held, and the journal may not be locked.
1557  *
1558  * jbd_lock_bh_state(jh2bh(jh)) is held.
1559  */
1560
1561 static inline void
1562 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1563 {
1564         if (*list == jh) {
1565                 *list = jh->b_tnext;
1566                 if (*list == jh)
1567                         *list = NULL;
1568         }
1569         jh->b_tprev->b_tnext = jh->b_tnext;
1570         jh->b_tnext->b_tprev = jh->b_tprev;
1571 }
1572
1573 /*
1574  * Remove a buffer from the appropriate transaction list.
1575  *
1576  * Note that this function can *change* the value of
1577  * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list,
1578  * t_log_list or t_reserved_list.  If the caller is holding onto a copy of one
1579  * of these pointers, it could go bad.  Generally the caller needs to re-read
1580  * the pointer from the transaction_t.
1581  *
1582  * Called under j_list_lock.
1583  */
1584 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1585 {
1586         struct journal_head **list = NULL;
1587         transaction_t *transaction;
1588         struct buffer_head *bh = jh2bh(jh);
1589
1590         J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1591         transaction = jh->b_transaction;
1592         if (transaction)
1593                 assert_spin_locked(&transaction->t_journal->j_list_lock);
1594
1595         J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1596         if (jh->b_jlist != BJ_None)
1597                 J_ASSERT_JH(jh, transaction != NULL);
1598
1599         switch (jh->b_jlist) {
1600         case BJ_None:
1601                 return;
1602         case BJ_Metadata:
1603                 transaction->t_nr_buffers--;
1604                 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
1605                 list = &transaction->t_buffers;
1606                 break;
1607         case BJ_Forget:
1608                 list = &transaction->t_forget;
1609                 break;
1610         case BJ_IO:
1611                 list = &transaction->t_iobuf_list;
1612                 break;
1613         case BJ_Shadow:
1614                 list = &transaction->t_shadow_list;
1615                 break;
1616         case BJ_LogCtl:
1617                 list = &transaction->t_log_list;
1618                 break;
1619         case BJ_Reserved:
1620                 list = &transaction->t_reserved_list;
1621                 break;
1622         }
1623
1624         __blist_del_buffer(list, jh);
1625         jh->b_jlist = BJ_None;
1626         if (test_clear_buffer_jbddirty(bh))
1627                 mark_buffer_dirty(bh);  /* Expose it to the VM */
1628 }
1629
1630 /*
1631  * Remove buffer from all transactions.
1632  *
1633  * Called with bh_state lock and j_list_lock
1634  *
1635  * jh and bh may be already freed when this function returns.
1636  */
1637 static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
1638 {
1639         __jbd2_journal_temp_unlink_buffer(jh);
1640         jh->b_transaction = NULL;
1641         jbd2_journal_put_journal_head(jh);
1642 }
1643
1644 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1645 {
1646         struct buffer_head *bh = jh2bh(jh);
1647
1648         /* Get reference so that buffer cannot be freed before we unlock it */
1649         get_bh(bh);
1650         jbd_lock_bh_state(bh);
1651         spin_lock(&journal->j_list_lock);
1652         __jbd2_journal_unfile_buffer(jh);
1653         spin_unlock(&journal->j_list_lock);
1654         jbd_unlock_bh_state(bh);
1655         __brelse(bh);
1656 }
1657
1658 /*
1659  * Called from jbd2_journal_try_to_free_buffers().
1660  *
1661  * Called under jbd_lock_bh_state(bh)
1662  */
1663 static void
1664 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
1665 {
1666         struct journal_head *jh;
1667
1668         jh = bh2jh(bh);
1669
1670         if (buffer_locked(bh) || buffer_dirty(bh))
1671                 goto out;
1672
1673         if (jh->b_next_transaction != NULL)
1674                 goto out;
1675
1676         spin_lock(&journal->j_list_lock);
1677         if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
1678                 /* written-back checkpointed metadata buffer */
1679                 JBUFFER_TRACE(jh, "remove from checkpoint list");
1680                 __jbd2_journal_remove_checkpoint(jh);
1681         }
1682         spin_unlock(&journal->j_list_lock);
1683 out:
1684         return;
1685 }
1686
1687 /**
1688  * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1689  * @journal: journal for operation
1690  * @page: to try and free
1691  * @gfp_mask: we use the mask to detect how hard should we try to release
1692  * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
1693  * release the buffers.
1694  *
1695  *
1696  * For all the buffers on this page,
1697  * if they are fully written out ordered data, move them onto BUF_CLEAN
1698  * so try_to_free_buffers() can reap them.
1699  *
1700  * This function returns non-zero if we wish try_to_free_buffers()
1701  * to be called. We do this if the page is releasable by try_to_free_buffers().
1702  * We also do it if the page has locked or dirty buffers and the caller wants
1703  * us to perform sync or async writeout.
1704  *
1705  * This complicates JBD locking somewhat.  We aren't protected by the
1706  * BKL here.  We wish to remove the buffer from its committing or
1707  * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1708  *
1709  * This may *change* the value of transaction_t->t_datalist, so anyone
1710  * who looks at t_datalist needs to lock against this function.
1711  *
1712  * Even worse, someone may be doing a jbd2_journal_dirty_data on this
1713  * buffer.  So we need to lock against that.  jbd2_journal_dirty_data()
1714  * will come out of the lock with the buffer dirty, which makes it
1715  * ineligible for release here.
1716  *
1717  * Who else is affected by this?  hmm...  Really the only contender
1718  * is do_get_write_access() - it could be looking at the buffer while
1719  * journal_try_to_free_buffer() is changing its state.  But that
1720  * cannot happen because we never reallocate freed data as metadata
1721  * while the data is part of a transaction.  Yes?
1722  *
1723  * Return 0 on failure, 1 on success
1724  */
1725 int jbd2_journal_try_to_free_buffers(journal_t *journal,
1726                                 struct page *page, gfp_t gfp_mask)
1727 {
1728         struct buffer_head *head;
1729         struct buffer_head *bh;
1730         int ret = 0;
1731
1732         J_ASSERT(PageLocked(page));
1733
1734         head = page_buffers(page);
1735         bh = head;
1736         do {
1737                 struct journal_head *jh;
1738
1739                 /*
1740                  * We take our own ref against the journal_head here to avoid
1741                  * having to add tons of locking around each instance of
1742                  * jbd2_journal_put_journal_head().
1743                  */
1744                 jh = jbd2_journal_grab_journal_head(bh);
1745                 if (!jh)
1746                         continue;
1747
1748                 jbd_lock_bh_state(bh);
1749                 __journal_try_to_free_buffer(journal, bh);
1750                 jbd2_journal_put_journal_head(jh);
1751                 jbd_unlock_bh_state(bh);
1752                 if (buffer_jbd(bh))
1753                         goto busy;
1754         } while ((bh = bh->b_this_page) != head);
1755
1756         ret = try_to_free_buffers(page);
1757
1758 busy:
1759         return ret;
1760 }
1761
1762 /*
1763  * This buffer is no longer needed.  If it is on an older transaction's
1764  * checkpoint list we need to record it on this transaction's forget list
1765  * to pin this buffer (and hence its checkpointing transaction) down until
1766  * this transaction commits.  If the buffer isn't on a checkpoint list, we
1767  * release it.
1768  * Returns non-zero if JBD no longer has an interest in the buffer.
1769  *
1770  * Called under j_list_lock.
1771  *
1772  * Called under jbd_lock_bh_state(bh).
1773  */
1774 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1775 {
1776         int may_free = 1;
1777         struct buffer_head *bh = jh2bh(jh);
1778
1779         if (jh->b_cp_transaction) {
1780                 JBUFFER_TRACE(jh, "on running+cp transaction");
1781                 __jbd2_journal_temp_unlink_buffer(jh);
1782                 /*
1783                  * We don't want to write the buffer anymore, clear the
1784                  * bit so that we don't confuse checks in
1785                  * __journal_file_buffer
1786                  */
1787                 clear_buffer_dirty(bh);
1788                 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1789                 may_free = 0;
1790         } else {
1791                 JBUFFER_TRACE(jh, "on running transaction");
1792                 __jbd2_journal_unfile_buffer(jh);
1793         }
1794         return may_free;
1795 }
1796
1797 /*
1798  * jbd2_journal_invalidatepage
1799  *
1800  * This code is tricky.  It has a number of cases to deal with.
1801  *
1802  * There are two invariants which this code relies on:
1803  *
1804  * i_size must be updated on disk before we start calling invalidatepage on the
1805  * data.
1806  *
1807  *  This is done in ext3 by defining an ext3_setattr method which
1808  *  updates i_size before truncate gets going.  By maintaining this
1809  *  invariant, we can be sure that it is safe to throw away any buffers
1810  *  attached to the current transaction: once the transaction commits,
1811  *  we know that the data will not be needed.
1812  *
1813  *  Note however that we can *not* throw away data belonging to the
1814  *  previous, committing transaction!
1815  *
1816  * Any disk blocks which *are* part of the previous, committing
1817  * transaction (and which therefore cannot be discarded immediately) are
1818  * not going to be reused in the new running transaction
1819  *
1820  *  The bitmap committed_data images guarantee this: any block which is
1821  *  allocated in one transaction and removed in the next will be marked
1822  *  as in-use in the committed_data bitmap, so cannot be reused until
1823  *  the next transaction to delete the block commits.  This means that
1824  *  leaving committing buffers dirty is quite safe: the disk blocks
1825  *  cannot be reallocated to a different file and so buffer aliasing is
1826  *  not possible.
1827  *
1828  *
1829  * The above applies mainly to ordered data mode.  In writeback mode we
1830  * don't make guarantees about the order in which data hits disk --- in
1831  * particular we don't guarantee that new dirty data is flushed before
1832  * transaction commit --- so it is always safe just to discard data
1833  * immediately in that mode.  --sct
1834  */
1835
1836 /*
1837  * The journal_unmap_buffer helper function returns zero if the buffer
1838  * concerned remains pinned as an anonymous buffer belonging to an older
1839  * transaction.
1840  *
1841  * We're outside-transaction here.  Either or both of j_running_transaction
1842  * and j_committing_transaction may be NULL.
1843  */
1844 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1845 {
1846         transaction_t *transaction;
1847         struct journal_head *jh;
1848         int may_free = 1;
1849         int ret;
1850
1851         BUFFER_TRACE(bh, "entry");
1852
1853         /*
1854          * It is safe to proceed here without the j_list_lock because the
1855          * buffers cannot be stolen by try_to_free_buffers as long as we are
1856          * holding the page lock. --sct
1857          */
1858
1859         if (!buffer_jbd(bh))
1860                 goto zap_buffer_unlocked;
1861
1862         /* OK, we have data buffer in journaled mode */
1863         write_lock(&journal->j_state_lock);
1864         jbd_lock_bh_state(bh);
1865         spin_lock(&journal->j_list_lock);
1866
1867         jh = jbd2_journal_grab_journal_head(bh);
1868         if (!jh)
1869                 goto zap_buffer_no_jh;
1870
1871         /*
1872          * We cannot remove the buffer from checkpoint lists until the
1873          * transaction adding inode to orphan list (let's call it T)
1874          * is committed.  Otherwise if the transaction changing the
1875          * buffer would be cleaned from the journal before T is
1876          * committed, a crash will cause that the correct contents of
1877          * the buffer will be lost.  On the other hand we have to
1878          * clear the buffer dirty bit at latest at the moment when the
1879          * transaction marking the buffer as freed in the filesystem
1880          * structures is committed because from that moment on the
1881          * buffer can be reallocated and used by a different page.
1882          * Since the block hasn't been freed yet but the inode has
1883          * already been added to orphan list, it is safe for us to add
1884          * the buffer to BJ_Forget list of the newest transaction.
1885          */
1886         transaction = jh->b_transaction;
1887         if (transaction == NULL) {
1888                 /* First case: not on any transaction.  If it
1889                  * has no checkpoint link, then we can zap it:
1890                  * it's a writeback-mode buffer so we don't care
1891                  * if it hits disk safely. */
1892                 if (!jh->b_cp_transaction) {
1893                         JBUFFER_TRACE(jh, "not on any transaction: zap");
1894                         goto zap_buffer;
1895                 }
1896
1897                 if (!buffer_dirty(bh)) {
1898                         /* bdflush has written it.  We can drop it now */
1899                         goto zap_buffer;
1900                 }
1901
1902                 /* OK, it must be in the journal but still not
1903                  * written fully to disk: it's metadata or
1904                  * journaled data... */
1905
1906                 if (journal->j_running_transaction) {
1907                         /* ... and once the current transaction has
1908                          * committed, the buffer won't be needed any
1909                          * longer. */
1910                         JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
1911                         ret = __dispose_buffer(jh,
1912                                         journal->j_running_transaction);
1913                         jbd2_journal_put_journal_head(jh);
1914                         spin_unlock(&journal->j_list_lock);
1915                         jbd_unlock_bh_state(bh);
1916                         write_unlock(&journal->j_state_lock);
1917                         return ret;
1918                 } else {
1919                         /* There is no currently-running transaction. So the
1920                          * orphan record which we wrote for this file must have
1921                          * passed into commit.  We must attach this buffer to
1922                          * the committing transaction, if it exists. */
1923                         if (journal->j_committing_transaction) {
1924                                 JBUFFER_TRACE(jh, "give to committing trans");
1925                                 ret = __dispose_buffer(jh,
1926                                         journal->j_committing_transaction);
1927                                 jbd2_journal_put_journal_head(jh);
1928                                 spin_unlock(&journal->j_list_lock);
1929                                 jbd_unlock_bh_state(bh);
1930                                 write_unlock(&journal->j_state_lock);
1931                                 return ret;
1932                         } else {
1933                                 /* The orphan record's transaction has
1934                                  * committed.  We can cleanse this buffer */
1935                                 clear_buffer_jbddirty(bh);
1936                                 goto zap_buffer;
1937                         }
1938                 }
1939         } else if (transaction == journal->j_committing_transaction) {
1940                 JBUFFER_TRACE(jh, "on committing transaction");
1941                 /*
1942                  * The buffer is committing, we simply cannot touch
1943                  * it. So we just set j_next_transaction to the
1944                  * running transaction (if there is one) and mark
1945                  * buffer as freed so that commit code knows it should
1946                  * clear dirty bits when it is done with the buffer.
1947                  */
1948                 set_buffer_freed(bh);
1949                 if (journal->j_running_transaction && buffer_jbddirty(bh))
1950                         jh->b_next_transaction = journal->j_running_transaction;
1951                 jbd2_journal_put_journal_head(jh);
1952                 spin_unlock(&journal->j_list_lock);
1953                 jbd_unlock_bh_state(bh);
1954                 write_unlock(&journal->j_state_lock);
1955                 return 0;
1956         } else {
1957                 /* Good, the buffer belongs to the running transaction.
1958                  * We are writing our own transaction's data, not any
1959                  * previous one's, so it is safe to throw it away
1960                  * (remember that we expect the filesystem to have set
1961                  * i_size already for this truncate so recovery will not
1962                  * expose the disk blocks we are discarding here.) */
1963                 J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
1964                 JBUFFER_TRACE(jh, "on running transaction");
1965                 may_free = __dispose_buffer(jh, transaction);
1966         }
1967
1968 zap_buffer:
1969         jbd2_journal_put_journal_head(jh);
1970 zap_buffer_no_jh:
1971         spin_unlock(&journal->j_list_lock);
1972         jbd_unlock_bh_state(bh);
1973         write_unlock(&journal->j_state_lock);
1974 zap_buffer_unlocked:
1975         clear_buffer_dirty(bh);
1976         J_ASSERT_BH(bh, !buffer_jbddirty(bh));
1977         clear_buffer_mapped(bh);
1978         clear_buffer_req(bh);
1979         clear_buffer_new(bh);
1980         clear_buffer_delay(bh);
1981         clear_buffer_unwritten(bh);
1982         bh->b_bdev = NULL;
1983         return may_free;
1984 }
1985
1986 /**
1987  * void jbd2_journal_invalidatepage()
1988  * @journal: journal to use for flush...
1989  * @page:    page to flush
1990  * @offset:  length of page to invalidate.
1991  *
1992  * Reap page buffers containing data after offset in page.
1993  *
1994  */
1995 void jbd2_journal_invalidatepage(journal_t *journal,
1996                       struct page *page,
1997                       unsigned long offset)
1998 {
1999         struct buffer_head *head, *bh, *next;
2000         unsigned int curr_off = 0;
2001         int may_free = 1;
2002
2003         if (!PageLocked(page))
2004                 BUG();
2005         if (!page_has_buffers(page))
2006                 return;
2007
2008         /* We will potentially be playing with lists other than just the
2009          * data lists (especially for journaled data mode), so be
2010          * cautious in our locking. */
2011
2012         head = bh = page_buffers(page);
2013         do {
2014                 unsigned int next_off = curr_off + bh->b_size;
2015                 next = bh->b_this_page;
2016
2017                 if (offset <= curr_off) {
2018                         /* This block is wholly outside the truncation point */
2019                         lock_buffer(bh);
2020                         may_free &= journal_unmap_buffer(journal, bh);
2021                         unlock_buffer(bh);
2022                 }
2023                 curr_off = next_off;
2024                 bh = next;
2025
2026         } while (bh != head);
2027
2028         if (!offset) {
2029                 if (may_free && try_to_free_buffers(page))
2030                         J_ASSERT(!page_has_buffers(page));
2031         }
2032 }
2033
2034 /*
2035  * File a buffer on the given transaction list.
2036  */
2037 void __jbd2_journal_file_buffer(struct journal_head *jh,
2038                         transaction_t *transaction, int jlist)
2039 {
2040         struct journal_head **list = NULL;
2041         int was_dirty = 0;
2042         struct buffer_head *bh = jh2bh(jh);
2043
2044         J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2045         assert_spin_locked(&transaction->t_journal->j_list_lock);
2046
2047         J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2048         J_ASSERT_JH(jh, jh->b_transaction == transaction ||
2049                                 jh->b_transaction == NULL);
2050
2051         if (jh->b_transaction && jh->b_jlist == jlist)
2052                 return;
2053
2054         if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
2055             jlist == BJ_Shadow || jlist == BJ_Forget) {
2056                 /*
2057                  * For metadata buffers, we track dirty bit in buffer_jbddirty
2058                  * instead of buffer_dirty. We should not see a dirty bit set
2059                  * here because we clear it in do_get_write_access but e.g.
2060                  * tune2fs can modify the sb and set the dirty bit at any time
2061                  * so we try to gracefully handle that.
2062                  */
2063                 if (buffer_dirty(bh))
2064                         warn_dirty_buffer(bh);
2065                 if (test_clear_buffer_dirty(bh) ||
2066                     test_clear_buffer_jbddirty(bh))
2067                         was_dirty = 1;
2068         }
2069
2070         if (jh->b_transaction)
2071                 __jbd2_journal_temp_unlink_buffer(jh);
2072         else
2073                 jbd2_journal_grab_journal_head(bh);
2074         jh->b_transaction = transaction;
2075
2076         switch (jlist) {
2077         case BJ_None:
2078                 J_ASSERT_JH(jh, !jh->b_committed_data);
2079                 J_ASSERT_JH(jh, !jh->b_frozen_data);
2080                 return;
2081         case BJ_Metadata:
2082                 transaction->t_nr_buffers++;
2083                 list = &transaction->t_buffers;
2084                 break;
2085         case BJ_Forget:
2086                 list = &transaction->t_forget;
2087                 break;
2088         case BJ_IO:
2089                 list = &transaction->t_iobuf_list;
2090                 break;
2091         case BJ_Shadow:
2092                 list = &transaction->t_shadow_list;
2093                 break;
2094         case BJ_LogCtl:
2095                 list = &transaction->t_log_list;
2096                 break;
2097         case BJ_Reserved:
2098                 list = &transaction->t_reserved_list;
2099                 break;
2100         }
2101
2102         __blist_add_buffer(list, jh);
2103         jh->b_jlist = jlist;
2104
2105         if (was_dirty)
2106                 set_buffer_jbddirty(bh);
2107 }
2108
2109 void jbd2_journal_file_buffer(struct journal_head *jh,
2110                                 transaction_t *transaction, int jlist)
2111 {
2112         jbd_lock_bh_state(jh2bh(jh));
2113         spin_lock(&transaction->t_journal->j_list_lock);
2114         __jbd2_journal_file_buffer(jh, transaction, jlist);
2115         spin_unlock(&transaction->t_journal->j_list_lock);
2116         jbd_unlock_bh_state(jh2bh(jh));
2117 }
2118
2119 /*
2120  * Remove a buffer from its current buffer list in preparation for
2121  * dropping it from its current transaction entirely.  If the buffer has
2122  * already started to be used by a subsequent transaction, refile the
2123  * buffer on that transaction's metadata list.
2124  *
2125  * Called under j_list_lock
2126  * Called under jbd_lock_bh_state(jh2bh(jh))
2127  *
2128  * jh and bh may be already free when this function returns
2129  */
2130 void __jbd2_journal_refile_buffer(struct journal_head *jh)
2131 {
2132         int was_dirty, jlist;
2133         struct buffer_head *bh = jh2bh(jh);
2134
2135         J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2136         if (jh->b_transaction)
2137                 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
2138
2139         /* If the buffer is now unused, just drop it. */
2140         if (jh->b_next_transaction == NULL) {
2141                 __jbd2_journal_unfile_buffer(jh);
2142                 return;
2143         }
2144
2145         /*
2146          * It has been modified by a later transaction: add it to the new
2147          * transaction's metadata list.
2148          */
2149
2150         was_dirty = test_clear_buffer_jbddirty(bh);
2151         __jbd2_journal_temp_unlink_buffer(jh);
2152         /*
2153          * We set b_transaction here because b_next_transaction will inherit
2154          * our jh reference and thus __jbd2_journal_file_buffer() must not
2155          * take a new one.
2156          */
2157         jh->b_transaction = jh->b_next_transaction;
2158         jh->b_next_transaction = NULL;
2159         if (buffer_freed(bh))
2160                 jlist = BJ_Forget;
2161         else if (jh->b_modified)
2162                 jlist = BJ_Metadata;
2163         else
2164                 jlist = BJ_Reserved;
2165         __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
2166         J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2167
2168         if (was_dirty)
2169                 set_buffer_jbddirty(bh);
2170 }
2171
2172 /*
2173  * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2174  * bh reference so that we can safely unlock bh.
2175  *
2176  * The jh and bh may be freed by this call.
2177  */
2178 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2179 {
2180         struct buffer_head *bh = jh2bh(jh);
2181
2182         /* Get reference so that buffer cannot be freed before we unlock it */
2183         get_bh(bh);
2184         jbd_lock_bh_state(bh);
2185         spin_lock(&journal->j_list_lock);
2186         __jbd2_journal_refile_buffer(jh);
2187         jbd_unlock_bh_state(bh);
2188         spin_unlock(&journal->j_list_lock);
2189         __brelse(bh);
2190 }
2191
2192 /*
2193  * File inode in the inode list of the handle's transaction
2194  */
2195 int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
2196 {
2197         transaction_t *transaction = handle->h_transaction;
2198         journal_t *journal = transaction->t_journal;
2199
2200         if (is_handle_aborted(handle))
2201                 return -EIO;
2202
2203         jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
2204                         transaction->t_tid);
2205
2206         /*
2207          * First check whether inode isn't already on the transaction's
2208          * lists without taking the lock. Note that this check is safe
2209          * without the lock as we cannot race with somebody removing inode
2210          * from the transaction. The reason is that we remove inode from the
2211          * transaction only in journal_release_jbd_inode() and when we commit
2212          * the transaction. We are guarded from the first case by holding
2213          * a reference to the inode. We are safe against the second case
2214          * because if jinode->i_transaction == transaction, commit code
2215          * cannot touch the transaction because we hold reference to it,
2216          * and if jinode->i_next_transaction == transaction, commit code
2217          * will only file the inode where we want it.
2218          */
2219         if (jinode->i_transaction == transaction ||
2220             jinode->i_next_transaction == transaction)
2221                 return 0;
2222
2223         spin_lock(&journal->j_list_lock);
2224
2225         if (jinode->i_transaction == transaction ||
2226             jinode->i_next_transaction == transaction)
2227                 goto done;
2228
2229         /*
2230          * We only ever set this variable to 1 so the test is safe. Since
2231          * t_need_data_flush is likely to be set, we do the test to save some
2232          * cacheline bouncing
2233          */
2234         if (!transaction->t_need_data_flush)
2235                 transaction->t_need_data_flush = 1;
2236         /* On some different transaction's list - should be
2237          * the committing one */
2238         if (jinode->i_transaction) {
2239                 J_ASSERT(jinode->i_next_transaction == NULL);
2240                 J_ASSERT(jinode->i_transaction ==
2241                                         journal->j_committing_transaction);
2242                 jinode->i_next_transaction = transaction;
2243                 goto done;
2244         }
2245         /* Not on any transaction list... */
2246         J_ASSERT(!jinode->i_next_transaction);
2247         jinode->i_transaction = transaction;
2248         list_add(&jinode->i_list, &transaction->t_inode_list);
2249 done:
2250         spin_unlock(&journal->j_list_lock);
2251
2252         return 0;
2253 }
2254
2255 /*
2256  * File truncate and transaction commit interact with each other in a
2257  * non-trivial way.  If a transaction writing data block A is
2258  * committing, we cannot discard the data by truncate until we have
2259  * written them.  Otherwise if we crashed after the transaction with
2260  * write has committed but before the transaction with truncate has
2261  * committed, we could see stale data in block A.  This function is a
2262  * helper to solve this problem.  It starts writeout of the truncated
2263  * part in case it is in the committing transaction.
2264  *
2265  * Filesystem code must call this function when inode is journaled in
2266  * ordered mode before truncation happens and after the inode has been
2267  * placed on orphan list with the new inode size. The second condition
2268  * avoids the race that someone writes new data and we start
2269  * committing the transaction after this function has been called but
2270  * before a transaction for truncate is started (and furthermore it
2271  * allows us to optimize the case where the addition to orphan list
2272  * happens in the same transaction as write --- we don't have to write
2273  * any data in such case).
2274  */
2275 int jbd2_journal_begin_ordered_truncate(journal_t *journal,
2276                                         struct jbd2_inode *jinode,
2277                                         loff_t new_size)
2278 {
2279         transaction_t *inode_trans, *commit_trans;
2280         int ret = 0;
2281
2282         /* This is a quick check to avoid locking if not necessary */
2283         if (!jinode->i_transaction)
2284                 goto out;
2285         /* Locks are here just to force reading of recent values, it is
2286          * enough that the transaction was not committing before we started
2287          * a transaction adding the inode to orphan list */
2288         read_lock(&journal->j_state_lock);
2289         commit_trans = journal->j_committing_transaction;
2290         read_unlock(&journal->j_state_lock);
2291         spin_lock(&journal->j_list_lock);
2292         inode_trans = jinode->i_transaction;
2293         spin_unlock(&journal->j_list_lock);
2294         if (inode_trans == commit_trans) {
2295                 ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
2296                         new_size, LLONG_MAX);
2297                 if (ret)
2298                         jbd2_journal_abort(journal, ret);
2299         }
2300 out:
2301         return ret;
2302 }