summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-10-13 18:14:19 -0400
committerJens Axboe <axboe@fb.com>2015-10-21 08:17:29 -0600
commite27c5b9d23168cc2cb8fec147ae7ed1f7a2005c3 (patch)
treed7ed4b0682df716b85b14e09e3da6337b2ee9f1d /mm
parent0dfc70c33409afc232ef0b9ec210535dfbf9bc61 (diff)
writeback: remove broken rbtree_postorder_for_each_entry_safe() usage in cgwb_bdi_destroy()
a20135ffbc44 ("writeback: don't drain bdi_writeback_congested on bdi destruction") added rbtree_postorder_for_each_entry_safe() which is used to remove all entries; however, according to Cody, the iterator isn't safe against operations which may rebalance the tree. Fix it by switching to repeatedly removing rb_first() until empty. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Cody P Schafer <dev@codyps.com> Fixes: a20135ffbc44 ("writeback: don't drain bdi_writeback_congested on bdi destruction") Link: http://lkml.kernel.org/g/1443997973-1700-1-git-send-email-dev@codyps.com Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 9e841399041a..619984fc07ec 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -681,7 +681,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
{
struct radix_tree_iter iter;
- struct bdi_writeback_congested *congested, *congested_n;
+ struct rb_node *rbn;
void **slot;
WARN_ON(test_bit(WB_registered, &bdi->wb.state));
@@ -691,9 +691,11 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
cgwb_kill(*slot);
- rbtree_postorder_for_each_entry_safe(congested, congested_n,
- &bdi->cgwb_congested_tree, rb_node) {
- rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree);
+ while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
+ struct bdi_writeback_congested *congested =
+ rb_entry(rbn, struct bdi_writeback_congested, rb_node);
+
+ rb_erase(rbn, &bdi->cgwb_congested_tree);
congested->bdi = NULL; /* mark @congested unlinked */
}