summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-06-25 09:12:10 +0200
committerJens Axboe <jens.axboe@oracle.com>2009-06-25 12:35:19 +0200
commit1a3d2fd8d960a5e94f05d5c544c590360a850ce1 (patch)
tree51b58d8b964f9413b8de5c410e73e762456228eb /fs
parent9d2f0f36a48889bb8bd0f01278984216947e854c (diff)
writeback: switch to per-bdi threads for flushing data
This gets rid of pdflush for bdi writeout and kupdated style cleaning. This is an experiment to see if we get better writeout behaviour with per-bdi flushing. Some initial tests look pretty encouraging. A sample ffsb workload that does random writes to files is about 8% faster here on a simple SATA drive during the benchmark phase. File layout also seems a LOT more smooth in vmstat: r b swpd free buff cache si so bi bo in cs us sy id wa 0 1 0 608848 2652 375372 0 0 0 71024 604 24 1 10 48 42 0 1 0 549644 2712 433736 0 0 0 60692 505 27 1 8 48 44 1 0 0 476928 2784 505192 0 0 4 29540 553 24 0 9 53 37 0 1 0 457972 2808 524008 0 0 0 54876 331 16 0 4 38 58 0 1 0 366128 2928 614284 0 0 4 92168 710 58 0 13 53 34 0 1 0 295092 3000 684140 0 0 0 62924 572 23 0 9 53 37 0 1 0 236592 3064 741704 0 0 4 58256 523 17 0 8 48 44 0 1 0 165608 3132 811464 0 0 0 57460 560 21 0 8 54 38 0 1 0 102952 3200 873164 0 0 4 74748 540 29 1 10 48 41 0 1 0 48604 3252 926472 0 0 0 53248 469 29 0 7 47 45 where vanilla tends to fluctuate a lot in the creation phase: r b swpd free buff cache si so bi bo in cs us sy id wa 1 1 0 678716 5792 303380 0 0 0 74064 565 50 1 11 52 36 1 0 0 662488 5864 319396 0 0 4 352 302 329 0 2 47 51 0 1 0 599312 5924 381468 0 0 0 78164 516 55 0 9 51 40 0 1 0 519952 6008 459516 0 0 4 78156 622 56 1 11 52 37 1 1 0 436640 6092 541632 0 0 0 82244 622 54 0 11 48 41 0 1 0 436640 6092 541660 0 0 0 8 152 39 0 0 51 49 0 1 0 332224 6200 644252 0 0 4 102800 728 46 1 13 49 36 1 0 0 274492 6260 701056 0 0 4 12328 459 49 0 7 50 43 0 1 0 211220 6324 763356 0 0 0 106940 515 37 1 10 51 39 1 0 0 160412 6376 813468 0 0 0 8224 415 43 0 6 49 45 1 1 0 85980 6452 886556 0 0 4 113516 575 39 1 11 54 34 0 2 0 85968 6452 886620 0 0 0 1640 158 211 0 0 46 54 So apart from seemingly behaving better for buffered writeout, this also allows us to potentially have more than one bdi thread flushing out data. This may be useful for NUMA type setups. A 10 disk test with btrfs performs 26% faster with per-bdi flushing. Other tests pending. mmap heavy writing also improves considerably. A separate thread is added to sync the super blocks. In the long term, adding sync_supers_bdi() functionality could get rid of this thread again. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/fs-writeback.c293
2 files changed, 172 insertions, 123 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index a3ef091a45bd..2a01b2bc27ba 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -281,7 +281,7 @@ static void free_more_memory(void)
struct zone *zone;
int nid;
- wakeup_pdflush(1024);
+ wakeup_flusher_threads(1024);
yield();
for_each_online_node(nid) {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 021f44269299..f3e4105e1285 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -19,6 +19,8 @@
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
@@ -61,10 +63,170 @@ int writeback_in_progress(struct backing_dev_info *bdi)
*/
static void writeback_release(struct backing_dev_info *bdi)
{
- BUG_ON(!writeback_in_progress(bdi));
+ WARN_ON_ONCE(!writeback_in_progress(bdi));
+ bdi->wb_arg.nr_pages = 0;
+ bdi->wb_arg.sb = NULL;
clear_bit(BDI_pdflush, &bdi->state);
}
+void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
+ long nr_pages, enum writeback_sync_modes sync_mode)
+{
+ /*
+ * This only happens the first time someone kicks this bdi, so put
+ * it out-of-line.
+ */
+ if (unlikely(!bdi->task))
+ wake_up_process(default_backing_dev_info.task);
+
+ if (writeback_acquire(bdi)) {
+ bdi->wb_arg.nr_pages = nr_pages;
+ bdi->wb_arg.sb = sb;
+ bdi->wb_arg.sync_mode = sync_mode;
+
+ if (bdi->task)
+ wake_up_process(bdi->task);
+ }
+}
+
+/*
+ * The maximum number of pages to writeout in a single bdi flush/kupdate
+ * operation. We do this so we don't hold I_SYNC against an inode for
+ * enormous amounts of time, which would block a userspace task which has
+ * been forced to throttle against that inode. Also, the code reevaluates
+ * the dirty each time it has written this many pages.
+ */
+#define MAX_WRITEBACK_PAGES 1024
+
+static inline bool over_bground_thresh(void)
+{
+ unsigned long background_thresh, dirty_thresh;
+
+ get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
+
+ return (global_page_state(NR_FILE_DIRTY) +
+ global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
+}
+
+/*
+ * Explicit flushing or periodic writeback of "old" data.
+ *
+ * Define "old": the first time one of an inode's pages is dirtied, we mark the
+ * dirtying-time in the inode's address_space. So this periodic writeback code
+ * just walks the superblock inode list, writing back any inodes which are
+ * older than a specific point in time.
+ *
+ * Try to run once per dirty_writeback_interval. But if a writeback event
+ * takes longer than a dirty_writeback_interval interval, then leave a
+ * one-second gap.
+ *
+ * older_than_this takes precedence over nr_to_write. So we'll only write back
+ * all dirty pages if they are all attached to "old" mappings.
+ */
+static void bdi_flush(struct backing_dev_info *bdi, int for_kupdate)
+{
+ struct writeback_control wbc = {
+ .bdi = bdi,
+ .sync_mode = bdi->wb_arg.sync_mode,
+ .older_than_this = NULL,
+ .for_kupdate = for_kupdate,
+ .range_cyclic = 1,
+ };
+ unsigned long oldest_jif;
+ long nr_pages = bdi->wb_arg.nr_pages;
+
+ if (wbc.for_kupdate) {
+ wbc.older_than_this = &oldest_jif;
+ oldest_jif = jiffies -
+ msecs_to_jiffies(dirty_expire_interval * 10);
+ }
+
+ for (;;) {
+ if (wbc.sync_mode == WB_SYNC_NONE && nr_pages <= 0 &&
+ !over_bground_thresh())
+ break;
+
+ wbc.more_io = 0;
+ wbc.encountered_congestion = 0;
+ wbc.nr_to_write = MAX_WRITEBACK_PAGES;
+ wbc.pages_skipped = 0;
+ generic_sync_bdi_inodes(bdi->wb_arg.sb, &wbc);
+ nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
+ /*
+ * If we ran out of stuff to write, bail unless more_io got set
+ */
+ if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
+ if (wbc.more_io && !wbc.for_kupdate)
+ continue;
+ break;
+ }
+ }
+}
+
+/*
+ * Handle writeback of dirty data for the device backed by this bdi. Also
+ * wakes up periodically and does kupdated style flushing.
+ */
+int bdi_writeback_task(struct backing_dev_info *bdi)
+{
+ while (!kthread_should_stop()) {
+ unsigned long wait_jiffies;
+ int for_kupdate;
+
+ wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(wait_jiffies);
+ try_to_freeze();
+
+ /*
+ * We get here in two cases:
+ *
+ * schedule_timeout() returned because the dirty writeback
+ * interval has elapsed. If that happens, we will be able
+ * to acquire the writeback lock and will proceed to do
+ * kupdated style writeout.
+ *
+ * Someone called bdi_start_writeback(), which will acquire
+ * the writeback lock. This means our writeback_acquire()
+ * below will fail and we call into bdi_pdflush() for
+ * pdflush style writeout.
+ *
+ */
+ for_kupdate = writeback_acquire(bdi);
+ if (for_kupdate) {
+ long nr;
+
+ nr = global_page_state(NR_FILE_DIRTY) +
+ global_page_state(NR_UNSTABLE_NFS) +
+ (inodes_stat.nr_inodes - inodes_stat.nr_unused);
+
+ bdi->wb_arg.nr_pages = nr;
+ bdi->wb_arg.sb = NULL;
+ bdi->wb_arg.sync_mode = WB_SYNC_NONE;
+ }
+
+ bdi_flush(bdi, for_kupdate);
+ writeback_release(bdi);
+ }
+
+ return 0;
+}
+
+void bdi_writeback_all(struct super_block *sb, struct writeback_control *wbc)
+{
+ struct backing_dev_info *bdi;
+
+ spin_lock(&bdi_lock);
+
+ list_for_each_entry(bdi, &bdi_list, bdi_list) {
+ if (!bdi_has_dirty_io(bdi))
+ continue;
+ bdi_start_writeback(bdi, sb, wbc->nr_to_write, wbc->sync_mode);
+ }
+
+ spin_unlock(&bdi_lock);
+}
+
static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
@@ -270,46 +432,6 @@ static void queue_io(struct backing_dev_info *bdi,
move_expired_inodes(&bdi->b_dirty, &bdi->b_io, older_than_this);
}
-static int sb_on_inode_list(struct super_block *sb, struct list_head *list)
-{
- struct inode *inode;
- int ret = 0;
-
- spin_lock(&inode_lock);
- list_for_each_entry(inode, list, i_list) {
- if (inode->i_sb == sb) {
- ret = 1;
- break;
- }
- }
- spin_unlock(&inode_lock);
- return ret;
-}
-
-int sb_has_dirty_inodes(struct super_block *sb)
-{
- struct backing_dev_info *bdi;
- int ret = 0;
-
- /*
- * This is REALLY expensive right now, but it'll go away
- * when the bdi writeback is introduced
- */
- mutex_lock(&bdi_lock);
- list_for_each_entry(bdi, &bdi_list, bdi_list) {
- if (sb_on_inode_list(sb, &bdi->b_dirty) ||
- sb_on_inode_list(sb, &bdi->b_io) ||
- sb_on_inode_list(sb, &bdi->b_more_io)) {
- ret = 1;
- break;
- }
- }
- mutex_unlock(&bdi_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(sb_has_dirty_inodes);
-
/*
* Wait for writeback on an inode to complete.
*/
@@ -466,11 +588,11 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
return ret;
}
-static void generic_sync_bdi_inodes(struct backing_dev_info *bdi,
- struct writeback_control *wbc,
- struct super_block *sb,
- int is_blkdev_sb)
+void generic_sync_bdi_inodes(struct super_block *sb,
+ struct writeback_control *wbc)
{
+ const int is_blkdev_sb = sb_is_blkdev_sb(sb);
+ struct backing_dev_info *bdi = wbc->bdi;
const unsigned long start = jiffies; /* livelock avoidance */
spin_lock(&inode_lock);
@@ -521,13 +643,6 @@ static void generic_sync_bdi_inodes(struct backing_dev_info *bdi,
continue; /* Skip a congested blockdev */
}
- if (wbc->bdi && bdi != wbc->bdi) {
- if (!is_blkdev_sb)
- break; /* fs has the wrong queue */
- requeue_io(inode);
- continue; /* blockdev has wrong queue */
- }
-
/*
* Was this inode dirtied after sync_sb_inodes was called?
* This keeps sync from extra jobs and livelock.
@@ -535,16 +650,10 @@ static void generic_sync_bdi_inodes(struct backing_dev_info *bdi,
if (inode_dirtied_after(inode, start))
break;
- /* Is another pdflush already flushing this queue? */
- if (current_is_pdflush() && !writeback_acquire(bdi))
- break;
-
BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
__iget(inode);
pages_skipped = wbc->pages_skipped;
writeback_single_inode(inode, wbc);
- if (current_is_pdflush())
- writeback_release(bdi);
if (wbc->pages_skipped != pages_skipped) {
/*
* writeback is not making progress due to locked
@@ -583,11 +692,6 @@ static void generic_sync_bdi_inodes(struct backing_dev_info *bdi,
* a variety of queues, so all inodes are searched. For other superblocks,
* assume that all inodes are backed by the same queue.
*
- * FIXME: this linear search could get expensive with many fileystems. But
- * how to fix? We need to go from an address_space to all inodes which share
- * a queue with that address_space. (Easy: have a global "dirty superblocks"
- * list).
- *
* The inodes to be written are parked on bdi->b_io. They are moved back onto
* bdi->b_dirty as they are selected for writing. This way, none can be missed
* on the writer throttling path, and we get decent balancing between many
@@ -596,13 +700,10 @@ static void generic_sync_bdi_inodes(struct backing_dev_info *bdi,
void generic_sync_sb_inodes(struct super_block *sb,
struct writeback_control *wbc)
{
- const int is_blkdev_sb = sb_is_blkdev_sb(sb);
- struct backing_dev_info *bdi;
-
- mutex_lock(&bdi_lock);
- list_for_each_entry(bdi, &bdi_list, bdi_list)
- generic_sync_bdi_inodes(bdi, wbc, sb, is_blkdev_sb);
- mutex_unlock(&bdi_lock);
+ if (wbc->bdi)
+ generic_sync_bdi_inodes(sb, wbc);
+ else
+ bdi_writeback_all(sb, wbc);
if (wbc->sync_mode == WB_SYNC_ALL) {
struct inode *inode, *old_inode = NULL;
@@ -658,58 +759,6 @@ static void sync_sb_inodes(struct super_block *sb,
}
/*
- * Start writeback of dirty pagecache data against all unlocked inodes.
- *
- * Note:
- * We don't need to grab a reference to superblock here. If it has non-empty
- * ->b_dirty it's hadn't been killed yet and kill_super() won't proceed
- * past sync_inodes_sb() until the ->b_dirty/b_io/b_more_io lists are all
- * empty. Since __sync_single_inode() regains inode_lock before it finally moves
- * inode from superblock lists we are OK.
- *
- * If `older_than_this' is non-zero then only flush inodes which have a
- * flushtime older than *older_than_this.
- *
- * If `bdi' is non-zero then we will scan the first inode against each
- * superblock until we find the matching ones. One group will be the dirty
- * inodes against a filesystem. Then when we hit the dummy blockdev superblock,
- * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not
- * super-efficient but we're about to do a ton of I/O...
- */
-void
-writeback_inodes(struct writeback_control *wbc)
-{
- struct super_block *sb;
-
- might_sleep();
- spin_lock(&sb_lock);
-restart:
- list_for_each_entry_reverse(sb, &super_blocks, s_list) {
- if (sb_has_dirty_inodes(sb)) {
- /* we're making our own get_super here */
- sb->s_count++;
- spin_unlock(&sb_lock);
- /*
- * If we can't get the readlock, there's no sense in
- * waiting around, most of the time the FS is going to
- * be unmounted by the time it is released.
- */
- if (down_read_trylock(&sb->s_umount)) {
- if (sb->s_root)
- sync_sb_inodes(sb, wbc);
- up_read(&sb->s_umount);
- }
- spin_lock(&sb_lock);
- if (__put_super_and_need_restart(sb))
- goto restart;
- }
- if (wbc->nr_to_write <= 0)
- break;
- }
- spin_unlock(&sb_lock);
-}
-
-/*
* writeback and wait upon the filesystem's dirty inodes. The caller will
* do this in two passes - one to write, and one to wait.
*