arm64: dts: rockchip: fix sdmmc1_bus4 pinctrl for rk3328
[firefly-linux-kernel-4.4.55.git] / fs / nilfs2 / segment.c
index a5752a589932d936b12cfdda27a6fb718e6cfb51..3b65adaae7e47b9732669c4db8a563cfeeec4d87 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/pagemap.h>
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
+#include <linux/bitops.h>
 #include <linux/bio.h>
 #include <linux/completion.h>
 #include <linux/blkdev.h>
@@ -76,6 +77,36 @@ enum {
        NILFS_ST_DONE,
 };
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/nilfs2.h>
+
+/*
+ * nilfs_sc_cstage_inc(), nilfs_sc_cstage_set(), nilfs_sc_cstage_get() are
+ * wrapper functions of stage count (nilfs_sc_info->sc_stage.scnt). Users of
+ * the variable must use them because transition of stage count must involve
+ * trace events (trace_nilfs2_collection_stage_transition).
+ *
+ * nilfs_sc_cstage_get() isn't required for the above purpose because it doesn't
+ * produce tracepoint events. It is provided just for making the intention
+ * clear.
+ */
+static inline void nilfs_sc_cstage_inc(struct nilfs_sc_info *sci)
+{
+       sci->sc_stage.scnt++;
+       trace_nilfs2_collection_stage_transition(sci);
+}
+
+static inline void nilfs_sc_cstage_set(struct nilfs_sc_info *sci, int next_scnt)
+{
+       sci->sc_stage.scnt = next_scnt;
+       trace_nilfs2_collection_stage_transition(sci);
+}
+
+static inline int nilfs_sc_cstage_get(struct nilfs_sc_info *sci)
+{
+       return sci->sc_stage.scnt;
+}
+
 /* State flags of collection */
 #define NILFS_CF_NODE          0x0001  /* Collecting node blocks */
 #define NILFS_CF_IFILE_STARTED 0x0002  /* IFILE stage has started */
@@ -183,11 +214,18 @@ int nilfs_transaction_begin(struct super_block *sb,
 {
        struct the_nilfs *nilfs;
        int ret = nilfs_prepare_segment_lock(ti);
+       struct nilfs_transaction_info *trace_ti;
 
        if (unlikely(ret < 0))
                return ret;
-       if (ret > 0)
+       if (ret > 0) {
+               trace_ti = current->journal_info;
+
+               trace_nilfs2_transaction_transition(sb, trace_ti,
+                                   trace_ti->ti_count, trace_ti->ti_flags,
+                                   TRACE_NILFS2_TRANSACTION_BEGIN);
                return 0;
+       }
 
        sb_start_intwrite(sb);
 
@@ -198,6 +236,11 @@ int nilfs_transaction_begin(struct super_block *sb,
                ret = -ENOSPC;
                goto failed;
        }
+
+       trace_ti = current->journal_info;
+       trace_nilfs2_transaction_transition(sb, trace_ti, trace_ti->ti_count,
+                                           trace_ti->ti_flags,
+                                           TRACE_NILFS2_TRANSACTION_BEGIN);
        return 0;
 
  failed:
@@ -230,6 +273,8 @@ int nilfs_transaction_commit(struct super_block *sb)
        ti->ti_flags |= NILFS_TI_COMMIT;
        if (ti->ti_count > 0) {
                ti->ti_count--;
+               trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
+                           ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
                return 0;
        }
        if (nilfs->ns_writer) {
@@ -241,6 +286,9 @@ int nilfs_transaction_commit(struct super_block *sb)
                        nilfs_segctor_do_flush(sci, 0);
        }
        up_read(&nilfs->ns_segctor_sem);
+       trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
+                           ti->ti_flags, TRACE_NILFS2_TRANSACTION_COMMIT);
+
        current->journal_info = ti->ti_save;
 
        if (ti->ti_flags & NILFS_TI_SYNC)
@@ -259,10 +307,15 @@ void nilfs_transaction_abort(struct super_block *sb)
        BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
        if (ti->ti_count > 0) {
                ti->ti_count--;
+               trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
+                           ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
                return;
        }
        up_read(&nilfs->ns_segctor_sem);
 
+       trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
+                   ti->ti_flags, TRACE_NILFS2_TRANSACTION_ABORT);
+
        current->journal_info = ti->ti_save;
        if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
                kmem_cache_free(nilfs_transaction_cachep, ti);
@@ -305,10 +358,12 @@ static void nilfs_transaction_lock(struct super_block *sb,
        ti->ti_count = 0;
        ti->ti_save = cur_ti;
        ti->ti_magic = NILFS_TI_MAGIC;
-       INIT_LIST_HEAD(&ti->ti_garbage);
        current->journal_info = ti;
 
        for (;;) {
+               trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
+                           ti->ti_flags, TRACE_NILFS2_TRANSACTION_TRYLOCK);
+
                down_write(&nilfs->ns_segctor_sem);
                if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
                        break;
@@ -320,6 +375,9 @@ static void nilfs_transaction_lock(struct super_block *sb,
        }
        if (gcflag)
                ti->ti_flags |= NILFS_TI_GC;
+
+       trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
+                           ti->ti_flags, TRACE_NILFS2_TRANSACTION_LOCK);
 }
 
 static void nilfs_transaction_unlock(struct super_block *sb)
@@ -332,8 +390,9 @@ static void nilfs_transaction_unlock(struct super_block *sb)
 
        up_write(&nilfs->ns_segctor_sem);
        current->journal_info = ti->ti_save;
-       if (!list_empty(&ti->ti_garbage))
-               nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
+
+       trace_nilfs2_transaction_transition(sb, ti, ti->ti_count,
+                           ti->ti_flags, TRACE_NILFS2_TRANSACTION_UNLOCK);
 }
 
 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
@@ -665,7 +724,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
 
                bh = head = page_buffers(page);
                do {
-                       if (!buffer_dirty(bh))
+                       if (!buffer_dirty(bh) || buffer_async_write(bh))
                                continue;
                        get_bh(bh);
                        list_add_tail(&bh->b_assoc_buffers, listp);
@@ -699,7 +758,8 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        bh = head = page_buffers(pvec.pages[i]);
                        do {
-                               if (buffer_dirty(bh)) {
+                               if (buffer_dirty(bh) &&
+                                               !buffer_async_write(bh)) {
                                        get_bh(bh);
                                        list_add_tail(&bh->b_assoc_buffers,
                                                      listp);
@@ -745,6 +805,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs,
        }
 }
 
+static void nilfs_iput_work_func(struct work_struct *work)
+{
+       struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
+                                                sc_iput_work);
+       struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
+
+       nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
+}
+
 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
                                     struct nilfs_root *root)
 {
@@ -835,9 +904,9 @@ static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
        raw_cp->cp_snapshot_list.ssl_next = 0;
        raw_cp->cp_snapshot_list.ssl_prev = 0;
        raw_cp->cp_inodes_count =
-               cpu_to_le64(atomic_read(&sci->sc_root->inodes_count));
+               cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
        raw_cp->cp_blocks_count =
-               cpu_to_le64(atomic_read(&sci->sc_root->blocks_count));
+               cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
        raw_cp->cp_nblk_inc =
                cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
        raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
@@ -929,7 +998,7 @@ static void nilfs_drop_collected_inodes(struct list_head *head)
                if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
                        continue;
 
-               clear_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
+               clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
                set_bit(NILFS_I_UPDATED, &ii->i_state);
        }
 }
@@ -1054,7 +1123,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
        size_t ndone;
        int err = 0;
 
-       switch (sci->sc_stage.scnt) {
+       switch (nilfs_sc_cstage_get(sci)) {
        case NILFS_ST_INIT:
                /* Pre-processes */
                sci->sc_stage.flags = 0;
@@ -1063,7 +1132,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
                        sci->sc_nblk_inc = 0;
                        sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
                        if (mode == SC_LSEG_DSYNC) {
-                               sci->sc_stage.scnt = NILFS_ST_DSYNC;
+                               nilfs_sc_cstage_set(sci, NILFS_ST_DSYNC);
                                goto dsync_mode;
                        }
                }
@@ -1071,10 +1140,10 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
                sci->sc_stage.dirty_file_ptr = NULL;
                sci->sc_stage.gc_inode_ptr = NULL;
                if (mode == SC_FLUSH_DAT) {
-                       sci->sc_stage.scnt = NILFS_ST_DAT;
+                       nilfs_sc_cstage_set(sci, NILFS_ST_DAT);
                        goto dat_stage;
                }
-               sci->sc_stage.scnt++;  /* Fall through */
+               nilfs_sc_cstage_inc(sci);  /* Fall through */
        case NILFS_ST_GC:
                if (nilfs_doing_gc()) {
                        head = &sci->sc_gc_inodes;
@@ -1095,7 +1164,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
                        }
                        sci->sc_stage.gc_inode_ptr = NULL;
                }
-               sci->sc_stage.scnt++;  /* Fall through */
+               nilfs_sc_cstage_inc(sci);  /* Fall through */
        case NILFS_ST_FILE:
                head = &sci->sc_dirty_files;
                ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
@@ -1117,10 +1186,10 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
                }
                sci->sc_stage.dirty_file_ptr = NULL;
                if (mode == SC_FLUSH_FILE) {
-                       sci->sc_stage.scnt = NILFS_ST_DONE;
+                       nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
                        return 0;
                }
-               sci->sc_stage.scnt++;
+               nilfs_sc_cstage_inc(sci);
                sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
                /* Fall through */
        case NILFS_ST_IFILE:
@@ -1128,7 +1197,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
                                              &nilfs_sc_file_ops);
                if (unlikely(err))
                        break;
-               sci->sc_stage.scnt++;
+               nilfs_sc_cstage_inc(sci);
                /* Creating a checkpoint */
                err = nilfs_segctor_create_checkpoint(sci);
                if (unlikely(err))
@@ -1139,7 +1208,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
                                              &nilfs_sc_file_ops);
                if (unlikely(err))
                        break;
-               sci->sc_stage.scnt++;  /* Fall through */
+               nilfs_sc_cstage_inc(sci);  /* Fall through */
        case NILFS_ST_SUFILE:
                err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
                                         sci->sc_nfreesegs, &ndone);
@@ -1155,7 +1224,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
                                              &nilfs_sc_file_ops);
                if (unlikely(err))
                        break;
-               sci->sc_stage.scnt++;  /* Fall through */
+               nilfs_sc_cstage_inc(sci);  /* Fall through */
        case NILFS_ST_DAT:
  dat_stage:
                err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
@@ -1163,10 +1232,10 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
                if (unlikely(err))
                        break;
                if (mode == SC_FLUSH_DAT) {
-                       sci->sc_stage.scnt = NILFS_ST_DONE;
+                       nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
                        return 0;
                }
-               sci->sc_stage.scnt++;  /* Fall through */
+               nilfs_sc_cstage_inc(sci);  /* Fall through */
        case NILFS_ST_SR:
                if (mode == SC_LSEG_SR) {
                        /* Appending a super root */
@@ -1176,7 +1245,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
                }
                /* End of a logical segment */
                sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
-               sci->sc_stage.scnt = NILFS_ST_DONE;
+               nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
                return 0;
        case NILFS_ST_DSYNC:
  dsync_mode:
@@ -1189,7 +1258,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
                if (unlikely(err))
                        break;
                sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
-               sci->sc_stage.scnt = NILFS_ST_DONE;
+               nilfs_sc_cstage_set(sci, NILFS_ST_DONE);
                return 0;
        case NILFS_ST_DONE:
                return 0;
@@ -1434,22 +1503,25 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
                        goto failed;
 
                /* The current segment is filled up */
-               if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE)
+               if (mode != SC_LSEG_SR ||
+                   nilfs_sc_cstage_get(sci) < NILFS_ST_CPFILE)
                        break;
 
                nilfs_clear_logs(&sci->sc_segbufs);
 
-               err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
-               if (unlikely(err))
-                       return err;
-
                if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
                        err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
                                                        sci->sc_freesegs,
                                                        sci->sc_nfreesegs,
                                                        NULL);
                        WARN_ON(err); /* do not happen */
+                       sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
                }
+
+               err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
+               if (unlikely(err))
+                       return err;
+
                nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
                sci->sc_stage = prev_stage;
        }
@@ -1592,6 +1664,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
 
                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
                                    b_assoc_buffers) {
+                       set_buffer_async_write(bh);
                        if (bh == segbuf->sb_super_root) {
                                if (bh->b_page != bd_page) {
                                        lock_page(bd_page);
@@ -1686,6 +1759,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
 
                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
                                    b_assoc_buffers) {
+                       clear_buffer_async_write(bh);
                        if (bh == segbuf->sb_super_root) {
                                if (bh->b_page != bd_page) {
                                        end_page_writeback(bd_page);
@@ -1774,11 +1848,13 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
                 */
                list_for_each_entry(bh, &segbuf->sb_payload_buffers,
                                    b_assoc_buffers) {
-                       set_buffer_uptodate(bh);
-                       clear_buffer_dirty(bh);
-                       clear_buffer_delay(bh);
-                       clear_buffer_nilfs_volatile(bh);
-                       clear_buffer_nilfs_redirected(bh);
+                       const unsigned long set_bits = (1 << BH_Uptodate);
+                       const unsigned long clear_bits =
+                               (1 << BH_Dirty | 1 << BH_Async_Write |
+                                1 << BH_Delay | 1 << BH_NILFS_Volatile |
+                                1 << BH_NILFS_Redirected);
+
+                       set_mask_bits(&bh->b_state, clear_bits, set_bits);
                        if (bh == segbuf->sb_super_root) {
                                if (bh->b_page != bd_page) {
                                        end_page_writeback(bd_page);
@@ -1824,6 +1900,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
        nilfs_set_next_segment(nilfs, segbuf);
 
        if (update_sr) {
+               nilfs->ns_flushed_device = 0;
                nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
                                       segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
 
@@ -1890,8 +1967,9 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
                                             struct the_nilfs *nilfs)
 {
-       struct nilfs_transaction_info *ti = current->journal_info;
        struct nilfs_inode_info *ii, *n;
+       int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
+       int defer_iput = false;
 
        spin_lock(&nilfs->ns_inode_lock);
        list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
@@ -1902,9 +1980,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
                clear_bit(NILFS_I_BUSY, &ii->i_state);
                brelse(ii->i_bh);
                ii->i_bh = NULL;
-               list_move_tail(&ii->i_dirty, &ti->ti_garbage);
+               list_del_init(&ii->i_dirty);
+               if (!ii->vfs_inode.i_nlink || during_mount) {
+                       /*
+                        * Defer calling iput() to avoid deadlocks if
+                        * i_nlink == 0 or mount is not yet finished.
+                        */
+                       list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
+                       defer_iput = true;
+               } else {
+                       spin_unlock(&nilfs->ns_inode_lock);
+                       iput(&ii->vfs_inode);
+                       spin_lock(&nilfs->ns_inode_lock);
+               }
        }
        spin_unlock(&nilfs->ns_inode_lock);
+
+       if (defer_iput)
+               schedule_work(&sci->sc_iput_work);
 }
 
 /*
@@ -1915,7 +2008,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
        struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
        int err;
 
-       sci->sc_stage.scnt = NILFS_ST_INIT;
+       nilfs_sc_cstage_set(sci, NILFS_ST_INIT);
        sci->sc_cno = nilfs->ns_cno;
 
        err = nilfs_segctor_collect_dirty_files(sci, nilfs);
@@ -1943,7 +2036,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
                        goto failed;
 
                /* Avoid empty segment */
-               if (sci->sc_stage.scnt == NILFS_ST_DONE &&
+               if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE &&
                    nilfs_segbuf_empty(sci->sc_curseg)) {
                        nilfs_segctor_abort_construction(sci, nilfs, 1);
                        goto out;
@@ -1957,7 +2050,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
                        nilfs_segctor_fill_in_file_bmap(sci);
 
                if (mode == SC_LSEG_SR &&
-                   sci->sc_stage.scnt >= NILFS_ST_CPFILE) {
+                   nilfs_sc_cstage_get(sci) >= NILFS_ST_CPFILE) {
                        err = nilfs_segctor_fill_in_checkpoint(sci);
                        if (unlikely(err))
                                goto failed_to_write;
@@ -1976,7 +2069,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
                if (unlikely(err))
                        goto failed_to_write;
 
-               if (sci->sc_stage.scnt == NILFS_ST_DONE ||
+               if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE ||
                    nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
                        /*
                         * At this point, we avoid double buffering
@@ -1989,7 +2082,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
                        if (err)
                                goto failed_to_write;
                }
-       } while (sci->sc_stage.scnt != NILFS_ST_DONE);
+       } while (nilfs_sc_cstage_get(sci) != NILFS_ST_DONE);
 
  out:
        nilfs_segctor_drop_written_files(sci, nilfs);
@@ -2185,7 +2278,7 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
        nilfs_transaction_lock(sb, &ti, 0);
 
        ii = NILFS_I(inode);
-       if (test_bit(NILFS_I_INODE_DIRTY, &ii->i_state) ||
+       if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
            nilfs_test_opt(nilfs, STRICT_ORDER) ||
            test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
            nilfs_discontinued(nilfs)) {
@@ -2207,6 +2300,8 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
        sci->sc_dsync_end = end;
 
        err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
+       if (!err)
+               nilfs->ns_flushed_device = 0;
 
        nilfs_transaction_unlock(sb);
        return err;
@@ -2397,7 +2492,6 @@ static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
 {
        int mode = 0;
-       int err;
 
        spin_lock(&sci->sc_state_lock);
        mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
@@ -2405,7 +2499,7 @@ static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
        spin_unlock(&sci->sc_state_lock);
 
        if (mode) {
-               err = nilfs_segctor_do_construct(sci, mode);
+               nilfs_segctor_do_construct(sci, mode);
 
                spin_lock(&sci->sc_state_lock);
                sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
@@ -2571,6 +2665,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
        INIT_LIST_HEAD(&sci->sc_segbufs);
        INIT_LIST_HEAD(&sci->sc_write_logs);
        INIT_LIST_HEAD(&sci->sc_gc_inodes);
+       INIT_LIST_HEAD(&sci->sc_iput_queue);
+       INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
        init_timer(&sci->sc_timer);
 
        sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
@@ -2597,6 +2693,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
                ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
                nilfs_transaction_unlock(sci->sc_super);
 
+               flush_work(&sci->sc_iput_work);
+
        } while (ret && retrycount-- > 0);
 }
 
@@ -2621,6 +2719,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
                || sci->sc_seq_request != sci->sc_seq_done);
        spin_unlock(&sci->sc_state_lock);
 
+       if (flush_work(&sci->sc_iput_work))
+               flag = true;
+
        if (flag || !nilfs_segctor_confirm(sci))
                nilfs_segctor_write_out(sci);
 
@@ -2630,6 +2731,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
                nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
        }
 
+       if (!list_empty(&sci->sc_iput_queue)) {
+               nilfs_warning(sci->sc_super, __func__,
+                             "iput queue is not empty\n");
+               nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
+       }
+
        WARN_ON(!list_empty(&sci->sc_segbufs));
        WARN_ON(!list_empty(&sci->sc_write_logs));