writeback: make bdi_start_background_writeback() take bdi_writeback instead of backin...
authorTejun Heo <tj@kernel.org>
Fri, 22 May 2015 21:13:54 +0000 (17:13 -0400)
committerJens Axboe <axboe@fb.com>
Tue, 2 Jun 2015 14:33:36 +0000 (08:33 -0600)
bdi_start_background_writeback() currently takes @bdi and kicks the
root wb (bdi_writeback).  In preparation for cgroup writeback support,
make it take wb instead.

This patch doesn't make any functional difference.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: Jens Axboe <axboe@fb.com>
fs/fs-writeback.c
include/linux/backing-dev.h
mm/page-writeback.c

index 45baf6c89b99396052d7426af0e08c6fe60ccf88..92aaf641ee22a58e4b944cc0239350677dc45562 100644 (file)
@@ -228,23 +228,23 @@ void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
 }
 
 /**
- * bdi_start_background_writeback - start background writeback
- * @bdi: the backing device to write from
+ * wb_start_background_writeback - start background writeback
+ * @wb: bdi_writback to write from
  *
  * Description:
  *   This makes sure WB_SYNC_NONE background writeback happens. When
- *   this function returns, it is only guaranteed that for given BDI
+ *   this function returns, it is only guaranteed that for given wb
  *   some IO is happening if we are over background dirty threshold.
  *   Caller need not hold sb s_umount semaphore.
  */
-void bdi_start_background_writeback(struct backing_dev_info *bdi)
+void wb_start_background_writeback(struct bdi_writeback *wb)
 {
        /*
         * We just wake up the flusher thread. It will perform background
         * writeback as soon as there is no other work to do.
         */
-       trace_writeback_wake_background(bdi);
-       wb_wakeup(&bdi->wb);
+       trace_writeback_wake_background(wb->bdi);
+       wb_wakeup(wb);
 }
 
 /*
index f04956c900ec05400fea8f23d12fefc40d721766..9cc11e5b97ca57cb746ce4868e3eeea63ae3715c 100644 (file)
@@ -27,7 +27,7 @@ void bdi_unregister(struct backing_dev_info *bdi);
 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
 void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
                        bool range_cyclic, enum wb_reason reason);
-void bdi_start_background_writeback(struct backing_dev_info *bdi);
+void wb_start_background_writeback(struct bdi_writeback *wb);
 void wb_workfn(struct work_struct *work);
 void wb_wakeup_delayed(struct bdi_writeback *wb);
 
index e3b5c1dddf1dd3b0ac7dd000d91498da1ed4506b..70cf98dc342309313cd816b9a8b9a1777e6282bb 100644 (file)
@@ -1456,7 +1456,7 @@ static void balance_dirty_pages(struct address_space *mapping,
                }
 
                if (unlikely(!writeback_in_progress(wb)))
-                       bdi_start_background_writeback(bdi);
+                       wb_start_background_writeback(wb);
 
                if (!strictlimit)
                        wb_dirty_limits(wb, dirty_thresh, background_thresh,
@@ -1588,7 +1588,7 @@ pause:
                return;
 
        if (nr_reclaimable > background_thresh)
-               bdi_start_background_writeback(bdi);
+               wb_start_background_writeback(wb);
 }
 
 static DEFINE_PER_CPU(int, bdp_ratelimits);