From 0465337c5599bbe360cdcff452992a1a6b7ed2d4 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 30 Jun 2020 14:53:02 -0400 Subject: btrfs: reset tree root pointer after error in init_tree_roots Eric reported an issue where mounting -o recovery with a fuzzed fs resulted in a kernel panic. This is because we tried to free the tree node, except it was an error from the read. Fix this by properly resetting the tree_root->node == NULL in this case. The panic was the following BTRFS warning (device loop0): failed to read tree root BUG: kernel NULL pointer dereference, address: 000000000000001f RIP: 0010:free_extent_buffer+0xe/0x90 [btrfs] Call Trace: free_root_extent_buffers.part.0+0x11/0x30 [btrfs] free_root_pointers+0x1a/0xa2 [btrfs] open_ctree+0x1776/0x18a5 [btrfs] btrfs_mount_root.cold+0x13/0xfa [btrfs] ? selinux_fs_context_parse_param+0x37/0x80 legacy_get_tree+0x27/0x40 vfs_get_tree+0x25/0xb0 fc_mount+0xe/0x30 vfs_kern_mount.part.0+0x71/0x90 btrfs_mount+0x147/0x3e0 [btrfs] ? cred_has_capability+0x7c/0x120 ? legacy_get_tree+0x27/0x40 legacy_get_tree+0x27/0x40 vfs_get_tree+0x25/0xb0 do_mount+0x735/0xa40 __x64_sys_mount+0x8e/0xd0 do_syscall_64+0x4d/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Nik says: this is problematic only if we fail on the last iteration of the loop as this results in init_tree_roots returning err value with tree_root->node = -ERR. Subsequently the caller does: fail_tree_roots which calls free_root_pointers on the bogus value. Reported-by: Eric Sandeen Fixes: b8522a1e5f42 ("btrfs: Factor out tree roots initialization during mount") CC: stable@vger.kernel.org # 5.5+ Reviewed-by: Nikolay Borisov Signed-off-by: Josef Bacik Reviewed-by: David Sterba [ add details how the pointer gets dereferenced ] Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f8ec2d8606fd..3d4bb77680e7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2595,10 +2595,12 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) !extent_buffer_uptodate(tree_root->node)) { handle_error = true; - if (IS_ERR(tree_root->node)) + if (IS_ERR(tree_root->node)) { ret = PTR_ERR(tree_root->node); - else if (!extent_buffer_uptodate(tree_root->node)) + tree_root->node = NULL; + } else if (!extent_buffer_uptodate(tree_root->node)) { ret = -EUCLEAN; + } btrfs_warn(fs_info, "failed to read tree root"); continue; -- cgit v1.2.3 From 21cf866145047f8bfecb38ec8d2fed64464c074f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Jul 2020 11:06:22 +0200 Subject: writeback: remove bdi->congested_fn Except for pktdvd, the only places setting congested bits are file systems that allocate their own backing_dev_info structures. And pktdvd is a deprecated driver that isn't useful in stack setup either. So remove the dead congested_fn stacking infrastructure. Signed-off-by: Christoph Hellwig Acked-by: Song Liu Acked-by: David Sterba [axboe: fixup unused variables in bcache/request.c] Signed-off-by: Jens Axboe --- drivers/block/drbd/drbd_main.c | 59 ---------------------------------------- drivers/md/bcache/request.c | 47 -------------------------------- drivers/md/bcache/super.c | 1 - drivers/md/dm-cache-target.c | 19 ------------- drivers/md/dm-clone-target.c | 15 ---------- drivers/md/dm-era-target.c | 15 ---------- drivers/md/dm-raid.c | 12 -------- drivers/md/dm-table.c | 37 +------------------------ drivers/md/dm-thin.c | 16 ----------- drivers/md/dm.c | 33 ---------------------- drivers/md/dm.h | 1 - drivers/md/md-linear.c | 24 ---------------- drivers/md/md-multipath.c | 23 ---------------- drivers/md/md.c | 23 ---------------- drivers/md/md.h | 4 --- drivers/md/raid0.c | 16 ----------- drivers/md/raid1.c | 31 --------------------- drivers/md/raid10.c | 26 ------------------ drivers/md/raid5.c | 25 ----------------- fs/btrfs/disk-io.c | 23 ---------------- include/linux/backing-dev-defs.h | 4 --- include/linux/backing-dev.h | 4 --- include/linux/device-mapper.h | 11 -------- 23 files changed, 1 insertion(+), 468 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 7c34cc0ad8cc..cb687ccdbd96 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2415,62 +2415,6 @@ static void drbd_cleanup(void) pr_info("module cleanup done.\n"); } -/** - * drbd_congested() - Callback for the flusher thread - * @congested_data: User data - * @bdi_bits: Bits the BDI flusher thread is currently interested in - * - * Returns 1<connection->flags)) { - r |= (1 << WB_async_congested); - /* Without good local data, we would need to read from remote, - * and that would need the worker thread as well, which is - * currently blocked waiting for that usermode helper to - * finish. - */ - if (!get_ldev_if_state(device, D_UP_TO_DATE)) - r |= (1 << WB_sync_congested); - else - put_ldev(device); - r &= bdi_bits; - reason = 'c'; - goto out; - } - - if (get_ldev(device)) { - q = bdev_get_queue(device->ldev->backing_bdev); - r = bdi_congested(q->backing_dev_info, bdi_bits); - put_ldev(device); - if (r) - reason = 'b'; - } - - if (bdi_bits & (1 << WB_async_congested) && - test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) { - r |= (1 << WB_async_congested); - reason = reason == 'b' ? 'a' : 'n'; - } - -out: - device->congestion_reason = reason; - return r; -} - static void drbd_init_workqueue(struct drbd_work_queue* wq) { spin_lock_init(&wq->q_lock); @@ -2825,9 +2769,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig /* we have no partitions. we contain only ourselves. */ device->this_bdev->bd_contains = device->this_bdev; - q->backing_dev_info->congested_fn = drbd_congested; - q->backing_dev_info->congested_data = device; - blk_queue_write_cache(q, true, true); /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index dd012ebface0..a190bf47076d 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1228,36 +1228,8 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); } -static int cached_dev_congested(void *data, int bits) -{ - struct bcache_device *d = data; - struct cached_dev *dc = container_of(d, struct cached_dev, disk); - struct request_queue *q = bdev_get_queue(dc->bdev); - int ret = 0; - - if (bdi_congested(q->backing_dev_info, bits)) - return 1; - - if (cached_dev_get(dc)) { - unsigned int i; - struct cache *ca; - - for_each_cache(ca, d->c, i) { - q = bdev_get_queue(ca->bdev); - ret |= bdi_congested(q->backing_dev_info, bits); - } - - cached_dev_put(dc); - } - - return ret; -} - void bch_cached_dev_request_init(struct cached_dev *dc) { - struct gendisk *g = dc->disk.disk; - - g->queue->backing_dev_info->congested_fn = cached_dev_congested; dc->disk.cache_miss = cached_dev_cache_miss; dc->disk.ioctl = cached_dev_ioctl; } @@ -1341,27 +1313,8 @@ static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, return -ENOTTY; } -static int flash_dev_congested(void *data, int bits) -{ - struct bcache_device *d = data; - struct request_queue *q; - struct cache *ca; - unsigned int i; - int ret = 0; - - for_each_cache(ca, d->c, i) { - q = bdev_get_queue(ca->bdev); - ret |= bdi_congested(q->backing_dev_info, bits); - } - - return ret; -} - void bch_flash_dev_request_init(struct bcache_device *d) { - struct gendisk *g = d->disk; - - g->queue->backing_dev_info->congested_fn = flash_dev_congested; d->cache_miss = flash_dev_cache_miss; d->ioctl = flash_dev_ioctl; } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index de13f6e91696..9e45faa054b6 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -885,7 +885,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, return -ENOMEM; d->disk->queue = q; - q->backing_dev_info->congested_data = d; q->limits.max_hw_sectors = UINT_MAX; q->limits.max_sectors = UINT_MAX; q->limits.max_segment_size = UINT_MAX; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 9eccced92896..96c93802ee4d 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -421,8 +421,6 @@ struct cache { struct rw_semaphore quiesce_lock; - struct dm_target_callbacks callbacks; - /* * origin_blocks entries, discarded if set. */ @@ -2423,20 +2421,6 @@ static void set_cache_size(struct cache *cache, dm_cblock_t size) cache->cache_size = size; } -static int is_congested(struct dm_dev *dev, int bdi_bits) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(q->backing_dev_info, bdi_bits); -} - -static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct cache *cache = container_of(cb, struct cache, callbacks); - - return is_congested(cache->origin_dev, bdi_bits) || - is_congested(cache->cache_dev, bdi_bits); -} - #define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) @@ -2471,9 +2455,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) goto bad; } - cache->callbacks.congested_fn = cache_is_congested; - dm_table_add_target_callbacks(ti->table, &cache->callbacks); - cache->metadata_dev = ca->metadata_dev; cache->origin_dev = ca->origin_dev; cache->cache_dev = ca->cache_dev; diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index 59ed8a67c2e3..bdb255edc200 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -68,7 +68,6 @@ struct hash_table_bucket; struct clone { struct dm_target *ti; - struct dm_target_callbacks callbacks; struct dm_dev *metadata_dev; struct dm_dev *dest_dev; @@ -1518,18 +1517,6 @@ error: DMEMIT("Error"); } -static int clone_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct request_queue *dest_q, *source_q; - struct clone *clone = container_of(cb, struct clone, callbacks); - - source_q = bdev_get_queue(clone->source_dev->bdev); - dest_q = bdev_get_queue(clone->dest_dev->bdev); - - return (bdi_congested(dest_q->backing_dev_info, bdi_bits) | - bdi_congested(source_q->backing_dev_info, bdi_bits)); -} - static sector_t get_dev_size(struct dm_dev *dev) { return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; @@ -1930,8 +1917,6 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto out_with_mempool; mutex_init(&clone->commit_lock); - clone->callbacks.congested_fn = clone_is_congested; - dm_table_add_target_callbacks(ti->table, &clone->callbacks); /* Enable flushes */ ti->num_flush_bios = 1; diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 566ddbdb16a4..b24e3839bb3a 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1137,7 +1137,6 @@ static int metadata_get_stats(struct era_metadata *md, void *ptr) struct era { struct dm_target *ti; - struct dm_target_callbacks callbacks; struct dm_dev *metadata_dev; struct dm_dev *origin_dev; @@ -1375,18 +1374,6 @@ static void stop_worker(struct era *era) /*---------------------------------------------------------------- * Target methods *--------------------------------------------------------------*/ -static int dev_is_congested(struct dm_dev *dev, int bdi_bits) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - return bdi_congested(q->backing_dev_info, bdi_bits); -} - -static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct era *era = container_of(cb, struct era, callbacks); - return dev_is_congested(era->origin_dev, bdi_bits); -} - static void era_destroy(struct era *era) { if (era->md) @@ -1514,8 +1501,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv) ti->flush_supported = true; ti->num_discard_bios = 1; - era->callbacks.congested_fn = era_is_congested; - dm_table_add_target_callbacks(ti->table, &era->callbacks); return 0; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 10e8b2fe787b..d9e270957e18 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -242,7 +242,6 @@ struct raid_set { struct mddev md; struct raid_type *raid_type; - struct dm_target_callbacks callbacks; sector_t array_sectors; sector_t dev_sectors; @@ -1705,13 +1704,6 @@ static void do_table_event(struct work_struct *ws) dm_table_event(rs->ti->table); } -static int raid_is_congested(struct dm_target_callbacks *cb, int bits) -{ - struct raid_set *rs = container_of(cb, struct raid_set, callbacks); - - return mddev_congested(&rs->md, bits); -} - /* * Make sure a valid takover (level switch) is being requested on @rs * @@ -3248,9 +3240,6 @@ size_check: goto bad_md_start; } - rs->callbacks.congested_fn = raid_is_congested; - dm_table_add_target_callbacks(ti->table, &rs->callbacks); - /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */ if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); @@ -3310,7 +3299,6 @@ static void raid_dtr(struct dm_target *ti) { struct raid_set *rs = ti->private; - list_del_init(&rs->callbacks.list); md_stop(&rs->md); raid_set_free(rs); } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 8277b959e00b..0ea5b7367179 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -64,8 +64,6 @@ struct dm_table { void *event_context; struct dm_md_mempools *mempools; - - struct list_head target_callbacks; }; /* @@ -190,7 +188,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode, return -ENOMEM; INIT_LIST_HEAD(&t->devices); - INIT_LIST_HEAD(&t->target_callbacks); if (!num_targets) num_targets = KEYS_PER_NODE; @@ -361,7 +358,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, * This upgrades the mode on an already open dm_dev, being * careful to leave things as they were if we fail to reopen the * device and not to touch the existing bdev field in case - * it is accessed concurrently inside dm_table_any_congested(). + * it is accessed concurrently. */ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, struct mapped_device *md) @@ -2052,38 +2049,6 @@ int dm_table_resume_targets(struct dm_table *t) return 0; } -void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) -{ - list_add(&cb->list, &t->target_callbacks); -} -EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks); - -int dm_table_any_congested(struct dm_table *t, int bdi_bits) -{ - struct dm_dev_internal *dd; - struct list_head *devices = dm_table_get_devices(t); - struct dm_target_callbacks *cb; - int r = 0; - - list_for_each_entry(dd, devices, list) { - struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); - char b[BDEVNAME_SIZE]; - - if (likely(q)) - r |= bdi_congested(q->backing_dev_info, bdi_bits); - else - DMWARN_LIMIT("%s: any_congested: nonexistent device %s", - dm_device_name(t->md), - bdevname(dd->dm_dev->bdev, b)); - } - - list_for_each_entry(cb, &t->target_callbacks, list) - if (cb->congested_fn) - r |= cb->congested_fn(cb, bdi_bits); - - return r; -} - struct mapped_device *dm_table_get_md(struct dm_table *t) { return t->md; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index fe2de2888709..fff4c50df74d 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -326,7 +326,6 @@ struct pool_c { struct pool *pool; struct dm_dev *data_dev; struct dm_dev *metadata_dev; - struct dm_target_callbacks callbacks; dm_block_t low_water_blocks; struct pool_features requested_pf; /* Features requested during table load */ @@ -2796,18 +2795,6 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) } } -static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) -{ - struct pool_c *pt = container_of(cb, struct pool_c, callbacks); - struct request_queue *q; - - if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE) - return 1; - - q = bdev_get_queue(pt->data_dev->bdev); - return bdi_congested(q->backing_dev_info, bdi_bits); -} - static void requeue_bios(struct pool *pool) { struct thin_c *tc; @@ -3420,9 +3407,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) dm_pool_register_pre_commit_callback(pool->pmd, metadata_pre_commit_callback, pool); - pt->callbacks.congested_fn = pool_is_congested; - dm_table_add_target_callbacks(ti->table, &pt->callbacks); - mutex_unlock(&dm_thin_pool_table.mutex); return 0; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 48bfd41658aa..e2148fcb88bb 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1821,31 +1821,6 @@ static blk_qc_t dm_submit_bio(struct bio *bio) return ret; } -static int dm_any_congested(void *congested_data, int bdi_bits) -{ - int r = bdi_bits; - struct mapped_device *md = congested_data; - struct dm_table *map; - - if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { - if (dm_request_based(md)) { - /* - * With request-based DM we only need to check the - * top-level queue for congestion. - */ - struct backing_dev_info *bdi = md->queue->backing_dev_info; - r = bdi->wb.congested & bdi_bits; - } else { - map = dm_get_live_table_fast(md); - if (map) - r = dm_table_any_congested(map, bdi_bits); - dm_put_live_table_fast(md); - } - } - - return r; -} - /*----------------------------------------------------------------- * An IDR is used to keep track of allocated minor numbers. *---------------------------------------------------------------*/ @@ -2284,12 +2259,6 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md) } EXPORT_SYMBOL_GPL(dm_get_queue_limits); -static void dm_init_congested_fn(struct mapped_device *md) -{ - md->queue->backing_dev_info->congested_data = md; - md->queue->backing_dev_info->congested_fn = dm_any_congested; -} - /* * Setup the DM device's queue based on md's type */ @@ -2306,12 +2275,10 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) DMERR("Cannot initialize queue for request-based dm-mq mapped device"); return r; } - dm_init_congested_fn(md); break; case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: case DM_TYPE_NVME_BIO_BASED: - dm_init_congested_fn(md); break; case DM_TYPE_NONE: WARN_ON_ONCE(true); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index d7c4f6606b5f..4f5fe664d05a 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -63,7 +63,6 @@ void dm_table_presuspend_targets(struct dm_table *t); void dm_table_presuspend_undo_targets(struct dm_table *t); void dm_table_postsuspend_targets(struct dm_table *t); int dm_table_resume_targets(struct dm_table *t); -int dm_table_any_congested(struct dm_table *t, int bdi_bits); enum dm_queue_mode dm_table_get_type(struct dm_table *t); struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); struct dm_target *dm_table_get_immutable_target(struct dm_table *t); diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index 8efada3ee16f..c2ae9125c4c3 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -46,29 +46,6 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector) return conf->disks + lo; } -/* - * In linear_congested() conf->raid_disks is used as a copy of - * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks - * and conf->disks[] are created in linear_conf(), they are always - * consitent with each other, but mddev->raid_disks does not. - */ -static int linear_congested(struct mddev *mddev, int bits) -{ - struct linear_conf *conf; - int i, ret = 0; - - rcu_read_lock(); - conf = rcu_dereference(mddev->private); - - for (i = 0; i < conf->raid_disks && !ret ; i++) { - struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); - ret |= bdi_congested(q->backing_dev_info, bits); - } - - rcu_read_unlock(); - return ret; -} - static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) { struct linear_conf *conf; @@ -322,7 +299,6 @@ static struct md_personality linear_personality = .hot_add_disk = linear_add, .size = linear_size, .quiesce = linear_quiesce, - .congested = linear_congested, }; static int __init linear_init (void) diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c index 277fdfd9ee54..776bbe542db5 100644 --- a/drivers/md/md-multipath.c +++ b/drivers/md/md-multipath.c @@ -151,28 +151,6 @@ static void multipath_status(struct seq_file *seq, struct mddev *mddev) seq_putc(seq, ']'); } -static int multipath_congested(struct mddev *mddev, int bits) -{ - struct mpconf *conf = mddev->private; - int i, ret = 0; - - rcu_read_lock(); - for (i = 0; i < mddev->raid_disks ; i++) { - struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); - if (rdev && !test_bit(Faulty, &rdev->flags)) { - struct request_queue *q = bdev_get_queue(rdev->bdev); - - ret |= bdi_congested(q->backing_dev_info, bits); - /* Just like multipath_map, we just check the - * first available device - */ - break; - } - } - rcu_read_unlock(); - return ret; -} - /* * Careful, this can execute in IRQ contexts as well! */ @@ -478,7 +456,6 @@ static struct md_personality multipath_personality = .hot_add_disk = multipath_add_disk, .hot_remove_disk= multipath_remove_disk, .size = multipath_size, - .congested = multipath_congested, }; static int __init multipath_init (void) diff --git a/drivers/md/md.c b/drivers/md/md.c index 77dfe4765c31..96b28f6d025c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -549,26 +549,6 @@ void mddev_resume(struct mddev *mddev) } EXPORT_SYMBOL_GPL(mddev_resume); -int mddev_congested(struct mddev *mddev, int bits) -{ - struct md_personality *pers = mddev->pers; - int ret = 0; - - rcu_read_lock(); - if (mddev->suspended) - ret = 1; - else if (pers && pers->congested) - ret = pers->congested(mddev, bits); - rcu_read_unlock(); - return ret; -} -EXPORT_SYMBOL_GPL(mddev_congested); -static int md_congested(void *data, int bits) -{ - struct mddev *mddev = data; - return mddev_congested(mddev, bits); -} - /* * Generic flush handling for md */ @@ -5965,8 +5945,6 @@ int md_run(struct mddev *mddev) blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); else blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); - mddev->queue->backing_dev_info->congested_data = mddev; - mddev->queue->backing_dev_info->congested_fn = md_congested; } if (pers->sync_request) { if (mddev->kobj.sd && @@ -6351,7 +6329,6 @@ static int do_md_stop(struct mddev *mddev, int mode, __md_stop_writes(mddev); __md_stop(mddev); - mddev->queue->backing_dev_info->congested_fn = NULL; /* tell userspace to handle 'inactive' */ sysfs_notify_dirent_safe(mddev->sysfs_state); diff --git a/drivers/md/md.h b/drivers/md/md.h index 612814d07d35..e2f1ad9afc48 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -597,9 +597,6 @@ struct md_personality * array. */ void *(*takeover) (struct mddev *mddev); - /* congested implements bdi.congested_fn(). - * Will not be called while array is 'suspended' */ - int (*congested)(struct mddev *mddev, int bits); /* Changes the consistency policy of an active array. */ int (*change_consistency_policy)(struct mddev *mddev, const char *buf); }; @@ -710,7 +707,6 @@ extern void md_done_sync(struct mddev *mddev, int blocks, int ok); extern void md_error(struct mddev *mddev, struct md_rdev *rdev); extern void md_finish_reshape(struct mddev *mddev); -extern int mddev_congested(struct mddev *mddev, int bits); extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, sector_t sector, int size, struct page *page); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index e9e91c8d8afc..f54a449f97aa 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -29,21 +29,6 @@ module_param(default_layout, int, 0644); (1L << MD_HAS_PPL) | \ (1L << MD_HAS_MULTIPLE_PPLS)) -static int raid0_congested(struct mddev *mddev, int bits) -{ - struct r0conf *conf = mddev->private; - struct md_rdev **devlist = conf->devlist; - int raid_disks = conf->strip_zone[0].nb_dev; - int i, ret = 0; - - for (i = 0; i < raid_disks && !ret ; i++) { - struct request_queue *q = bdev_get_queue(devlist[i]->bdev); - - ret |= bdi_congested(q->backing_dev_info, bits); - } - return ret; -} - /* * inform the user of the raid configuration */ @@ -818,7 +803,6 @@ static struct md_personality raid0_personality= .size = raid0_size, .takeover = raid0_takeover, .quiesce = raid0_quiesce, - .congested = raid0_congested, }; static int __init raid0_init (void) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 2aa2649cca66..960d854c07f8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -786,36 +786,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect return best_disk; } -static int raid1_congested(struct mddev *mddev, int bits) -{ - struct r1conf *conf = mddev->private; - int i, ret = 0; - - if ((bits & (1 << WB_async_congested)) && - conf->pending_count >= max_queued_requests) - return 1; - - rcu_read_lock(); - for (i = 0; i < conf->raid_disks * 2; i++) { - struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !test_bit(Faulty, &rdev->flags)) { - struct request_queue *q = bdev_get_queue(rdev->bdev); - - BUG_ON(!q); - - /* Note the '|| 1' - when read_balance prefers - * non-congested targets, it can be removed - */ - if ((bits & (1 << WB_async_congested)) || 1) - ret |= bdi_congested(q->backing_dev_info, bits); - else - ret &= bdi_congested(q->backing_dev_info, bits); - } - } - rcu_read_unlock(); - return ret; -} - static void flush_bio_list(struct r1conf *conf, struct bio *bio) { /* flush any pending bitmap writes to disk before proceeding w/ I/O */ @@ -3396,7 +3366,6 @@ static struct md_personality raid1_personality = .check_reshape = raid1_reshape, .quiesce = raid1_quiesce, .takeover = raid1_takeover, - .congested = raid1_congested, }; static int __init raid_init(void) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e45fd56cf584..353288bc4cb7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -848,31 +848,6 @@ static struct md_rdev *read_balance(struct r10conf *conf, return rdev; } -static int raid10_congested(struct mddev *mddev, int bits) -{ - struct r10conf *conf = mddev->private; - int i, ret = 0; - - if ((bits & (1 << WB_async_congested)) && - conf->pending_count >= max_queued_requests) - return 1; - - rcu_read_lock(); - for (i = 0; - (i < conf->geo.raid_disks || i < conf->prev.raid_disks) - && ret == 0; - i++) { - struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); - if (rdev && !test_bit(Faulty, &rdev->flags)) { - struct request_queue *q = bdev_get_queue(rdev->bdev); - - ret |= bdi_congested(q->backing_dev_info, bits); - } - } - rcu_read_unlock(); - return ret; -} - static void flush_pending_writes(struct r10conf *conf) { /* Any writes that have been queued but are awaiting @@ -4929,7 +4904,6 @@ static struct md_personality raid10_personality = .start_reshape = raid10_start_reshape, .finish_reshape = raid10_finish_reshape, .update_reshape_pos = raid10_update_reshape_pos, - .congested = raid10_congested, }; static int __init raid_init(void) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 8dea4398b191..774ea893d47e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5099,28 +5099,6 @@ static void activate_bit_delay(struct r5conf *conf, } } -static int raid5_congested(struct mddev *mddev, int bits) -{ - struct r5conf *conf = mddev->private; - - /* No difference between reads and writes. Just check - * how busy the stripe_cache is - */ - - if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) - return 1; - - /* Also checks whether there is pressure on r5cache log space */ - if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) - return 1; - if (conf->quiesce) - return 1; - if (atomic_read(&conf->empty_inactive_list_nr)) - return 1; - - return 0; -} - static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) { struct r5conf *conf = mddev->private; @@ -8427,7 +8405,6 @@ static struct md_personality raid6_personality = .finish_reshape = raid5_finish_reshape, .quiesce = raid5_quiesce, .takeover = raid6_takeover, - .congested = raid5_congested, .change_consistency_policy = raid5_change_consistency_policy, }; static struct md_personality raid5_personality = @@ -8452,7 +8429,6 @@ static struct md_personality raid5_personality = .finish_reshape = raid5_finish_reshape, .quiesce = raid5_quiesce, .takeover = raid5_takeover, - .congested = raid5_congested, .change_consistency_policy = raid5_change_consistency_policy, }; @@ -8478,7 +8454,6 @@ static struct md_personality raid4_personality = .finish_reshape = raid5_finish_reshape, .quiesce = raid5_quiesce, .takeover = raid4_takeover, - .congested = raid5_congested, .change_consistency_policy = raid5_change_consistency_policy, }; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7c6f0bbb54a5..eb5f2506cede 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1616,27 +1616,6 @@ fail: return ERR_PTR(ret); } -static int btrfs_congested_fn(void *congested_data, int bdi_bits) -{ - struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; - int ret = 0; - struct btrfs_device *device; - struct backing_dev_info *bdi; - - rcu_read_lock(); - list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { - if (!device->bdev) - continue; - bdi = device->bdev->bd_bdi; - if (bdi_congested(bdi, bdi_bits)) { - ret = 1; - break; - } - } - rcu_read_unlock(); - return ret; -} - /* * called by the kthread helper functions to finally call the bio end_io * functions. This is where read checksum verification actually happens @@ -3051,8 +3030,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device goto fail_sb_buffer; } - sb->s_bdi->congested_fn = btrfs_congested_fn; - sb->s_bdi->congested_data = fs_info; sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; sb->s_bdi->ra_pages = VM_READAHEAD_PAGES; sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 1cec4521e1fb..fff9367a6348 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -33,8 +33,6 @@ enum wb_congested_state { WB_sync_congested, /* The sync queue is getting full */ }; -typedef int (congested_fn)(void *, int); - enum wb_stat_item { WB_RECLAIMABLE, WB_WRITEBACK, @@ -170,8 +168,6 @@ struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ unsigned long io_pages; /* max allowed IO size */ - congested_fn *congested_fn; /* Function pointer if device is md/dm */ - void *congested_data; /* Pointer to aux data for congested func */ struct kref refcnt; /* Reference counter for the structure */ unsigned int capabilities; /* Device capabilities */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 9173d2c22b4a..0b06b2d26c9a 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -169,10 +169,6 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) { - struct backing_dev_info *bdi = wb->bdi; - - if (bdi->congested_fn) - return bdi->congested_fn(bdi->congested_data, cong_bits); return wb->congested & cong_bits; } diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 8750f2dc5613..d5306d9c29c4 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -322,12 +322,6 @@ struct dm_target { bool discards_supported:1; }; -/* Each target can link one of these into the table */ -struct dm_target_callbacks { - struct list_head list; - int (*congested_fn) (struct dm_target_callbacks *, int); -}; - void *dm_per_bio_data(struct bio *bio, size_t data_size); struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); unsigned dm_bio_get_target_bio_nr(const struct bio *bio); @@ -477,11 +471,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode, int dm_table_add_target(struct dm_table *t, const char *type, sector_t start, sector_t len, char *params); -/* - * Target_ctr should call this if it needs to add any callbacks. - */ -void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); - /* * Target can use this to set the table's type. * Can only ever be called from a target's ctr. -- cgit v1.2.3 From 5958253cf65de42493f17f36877a901486a90365 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Wed, 10 Jun 2020 09:04:44 +0800 Subject: btrfs: qgroup: catch reserved space leaks at unmount time Before this patch, qgroup completely relies on per-inode extent io tree to detect reserved data space leak. However previous bug has already shown how release page before btrfs_finish_ordered_io() could lead to leak, and since it's QGROUP_RESERVED bit cleared without triggering qgroup rsv, it can't be detected by per-inode extent io tree. So this patch adds another (and hopefully the final) safety net to catch qgroup data reserved space leak. At least the new safety net catches all the leaks during development, so it should be pretty useful in the real world. Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 5 +++++ fs/btrfs/qgroup.c | 43 +++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/qgroup.h | 1 + 3 files changed, 49 insertions(+) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b1a148058773..a9cf6152d175 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4058,6 +4058,11 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) ASSERT(list_empty(&fs_info->delayed_iputs)); set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); + if (btrfs_check_quota_leak(fs_info)) { + WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); + btrfs_err(fs_info, "qgroup reserved space leaked"); + } + btrfs_free_qgroup_config(fs_info); ASSERT(list_empty(&fs_info->delalloc_roots)); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 5bd4089ad0e1..74eb98479109 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -505,6 +505,49 @@ out: return ret < 0 ? ret : 0; } +static u64 btrfs_qgroup_subvolid(u64 qgroupid) +{ + return (qgroupid & ((1ULL << BTRFS_QGROUP_LEVEL_SHIFT) - 1)); +} + +/* + * Called in close_ctree() when quota is still enabled. This verifies we don't + * leak some reserved space. + * + * Return false if no reserved space is left. + * Return true if some reserved space is leaked. + */ +bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info) +{ + struct rb_node *node; + bool ret = false; + + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) + return ret; + /* + * Since we're unmounting, there is no race and no need to grab qgroup + * lock. And here we don't go post-order to provide a more user + * friendly sorted result. + */ + for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) { + struct btrfs_qgroup *qgroup; + int i; + + qgroup = rb_entry(node, struct btrfs_qgroup, node); + for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) { + if (qgroup->rsv.values[i]) { + ret = true; + btrfs_warn(fs_info, + "qgroup %llu/%llu has unreleased space, type %d rsv %llu", + btrfs_qgroup_level(qgroup->qgroupid), + btrfs_qgroup_subvolid(qgroup->qgroupid), + i, qgroup->rsv.values[i]); + } + } + } + return ret; +} + /* * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), * first two are in single-threaded paths.And for the third one, we have set diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h index 1bc654459469..3be5198a3719 100644 --- a/fs/btrfs/qgroup.h +++ b/fs/btrfs/qgroup.h @@ -415,5 +415,6 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans, int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *eb); void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans); +bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info); #endif -- cgit v1.2.3 From 923eb5236597c90b11114926025e0c04e4c1da32 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Tue, 23 Jun 2020 17:40:07 +0900 Subject: btrfs: use free_root_extent_buffer to free root In btrfs_put_root() we're freeing a btrfs_root's 'node' and 'commit_root' extent buffers manually via kfree(), while we're using free_root_extent_buffers() in the free_root_pointers() function above. free_root_extent_buffers() also NULLs the pointers after freeing, which mitigates potential double frees. Reviewed-by: Anand Jain Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a9cf6152d175..b5b7055b7953 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2001,8 +2001,7 @@ void btrfs_put_root(struct btrfs_root *root) if (root->anon_dev) free_anon_bdev(root->anon_dev); btrfs_drew_lock_destroy(&root->snapshot_lock); - free_extent_buffer(root->node); - free_extent_buffer(root->commit_root); + free_root_extent_buffers(root); kfree(root->free_ino_ctl); kfree(root->free_ino_pinned); #ifdef CONFIG_BTRFS_DEBUG -- cgit v1.2.3 From a2570ef330b959eb37e3a437b9884d9cc4b6a3d9 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 23 Jun 2020 21:23:54 +0200 Subject: btrfs: remove unused btrfs_root::defrag_trans_start Last touched in 2013 by commit de78b51a2852 ("btrfs: remove cache only arguments from defrag path") that was the only code that used the value. Now it's only set but never used for anything, so we can remove it. Reviewed-by: Nikolay Borisov Reviewed-by: Anand Jain Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 1 - fs/btrfs/disk-io.c | 4 ---- fs/btrfs/tree-defrag.c | 5 ++--- 3 files changed, 2 insertions(+), 8 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a9cf11f2e8c7..033e411dd811 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1075,7 +1075,6 @@ struct btrfs_root { u64 highest_objectid; - u64 defrag_trans_start; struct btrfs_key defrag_progress; struct btrfs_key defrag_max; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b5b7055b7953..dbf90cd49513 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1141,10 +1141,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); - if (!dummy) - root->defrag_trans_start = fs_info->generation; - else - root->defrag_trans_start = 0; root->root_key.objectid = objectid; root->anon_dev = 0; diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index 16c3a6d2586d..d3f28b8f4ff9 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c @@ -133,10 +133,9 @@ out: ret = 0; } done: - if (ret != -EAGAIN) { + if (ret != -EAGAIN) memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); - root->defrag_trans_start = trans->transid; - } + return ret; } -- cgit v1.2.3 From 851fd730a743e072badaf67caf39883e32439431 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 16 Jun 2020 10:17:34 +0800 Subject: btrfs: don't allocate anonymous block device for user invisible roots [BUG] When a lot of subvolumes are created, there is a user report about transaction aborted: BTRFS: Transaction aborted (error -24) WARNING: CPU: 17 PID: 17041 at fs/btrfs/transaction.c:1576 create_pending_snapshot+0xbc4/0xd10 [btrfs] RIP: 0010:create_pending_snapshot+0xbc4/0xd10 [btrfs] Call Trace: create_pending_snapshots+0x82/0xa0 [btrfs] btrfs_commit_transaction+0x275/0x8c0 [btrfs] btrfs_mksubvol+0x4b9/0x500 [btrfs] btrfs_ioctl_snap_create_transid+0x174/0x180 [btrfs] btrfs_ioctl_snap_create_v2+0x11c/0x180 [btrfs] btrfs_ioctl+0x11a4/0x2da0 [btrfs] do_vfs_ioctl+0xa9/0x640 ksys_ioctl+0x67/0x90 __x64_sys_ioctl+0x1a/0x20 do_syscall_64+0x5a/0x110 entry_SYSCALL_64_after_hwframe+0x44/0xa9 ---[ end trace 33f2f83f3d5250e9 ]--- BTRFS: error (device sda1) in create_pending_snapshot:1576: errno=-24 unknown BTRFS info (device sda1): forced readonly BTRFS warning (device sda1): Skipping commit of aborted transaction. BTRFS: error (device sda1) in cleanup_transaction:1831: errno=-24 unknown [CAUSE] The error is EMFILE (Too many files open) and comes from the anonymous block device allocation. The ids are in a shared pool of size 1<<20. The ids are assigned to live subvolumes, ie. the root structure exists in memory (eg. after creation or after the root appears in some path). The pool could be exhausted if the numbers are not reclaimed fast enough, after subvolume deletion or if other system component uses the anon block devices. [WORKAROUND] Since it's not possible to completely solve the problem, we can only minimize the time the id is allocated to a subvolume root. Firstly, we can reduce the use of anon_dev by trees that are not subvolume roots, like data reloc tree. This patch will do extra check on root objectid, to skip roots that don't need anon_dev. Currently it's only data reloc tree and orphan roots. Reported-by: Greed Rong Link: https://lore.kernel.org/linux-btrfs/CA+UqX+NTrZ6boGnWHhSeZmEY5J76CTqmYjO2S+=tHJX7nb9DPw@mail.gmail.com/ CC: stable@vger.kernel.org # 4.4+ Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index dbf90cd49513..c90edf04a9db 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1424,9 +1424,16 @@ static int btrfs_init_fs_root(struct btrfs_root *root) spin_lock_init(&root->ino_cache_lock); init_waitqueue_head(&root->ino_cache_wait); - ret = get_anon_bdev(&root->anon_dev); - if (ret) - goto fail; + /* + * Don't assign anonymous block device to roots that are not exposed to + * userspace, the id pool is limited to 1M + */ + if (is_fstree(root->root_key.objectid) && + btrfs_root_refs(&root->root_item) > 0) { + ret = get_anon_bdev(&root->anon_dev); + if (ret) + goto fail; + } mutex_lock(&root->objectid_mutex); ret = btrfs_find_highest_objectid(root, -- cgit v1.2.3 From 2dfb1e43f57dd3aeaa66f7cf05d068db2d4c8788 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 16 Jun 2020 10:17:36 +0800 Subject: btrfs: preallocate anon block device at first phase of snapshot creation [BUG] When the anonymous block device pool is exhausted, subvolume/snapshot creation fails with EMFILE (Too many files open). This has been reported by a user. The allocation happens in the second phase during transaction commit where it's only way out is to abort the transaction BTRFS: Transaction aborted (error -24) WARNING: CPU: 17 PID: 17041 at fs/btrfs/transaction.c:1576 create_pending_snapshot+0xbc4/0xd10 [btrfs] RIP: 0010:create_pending_snapshot+0xbc4/0xd10 [btrfs] Call Trace: create_pending_snapshots+0x82/0xa0 [btrfs] btrfs_commit_transaction+0x275/0x8c0 [btrfs] btrfs_mksubvol+0x4b9/0x500 [btrfs] btrfs_ioctl_snap_create_transid+0x174/0x180 [btrfs] btrfs_ioctl_snap_create_v2+0x11c/0x180 [btrfs] btrfs_ioctl+0x11a4/0x2da0 [btrfs] do_vfs_ioctl+0xa9/0x640 ksys_ioctl+0x67/0x90 __x64_sys_ioctl+0x1a/0x20 do_syscall_64+0x5a/0x110 entry_SYSCALL_64_after_hwframe+0x44/0xa9 ---[ end trace 33f2f83f3d5250e9 ]--- BTRFS: error (device sda1) in create_pending_snapshot:1576: errno=-24 unknown BTRFS info (device sda1): forced readonly BTRFS warning (device sda1): Skipping commit of aborted transaction. BTRFS: error (device sda1) in cleanup_transaction:1831: errno=-24 unknown [CAUSE] When the global anonymous block device pool is exhausted, the following call chain will fail, and lead to transaction abort: btrfs_ioctl_snap_create_v2() |- btrfs_ioctl_snap_create_transid() |- btrfs_mksubvol() |- btrfs_commit_transaction() |- create_pending_snapshot() |- btrfs_get_fs_root() |- btrfs_init_fs_root() |- get_anon_bdev() [FIX] Although we can't enlarge the anonymous block device pool, at least we can preallocate anon_dev for subvolume/snapshot in the first phase, outside of transaction context and exactly at the moment the user calls the creation ioctl. Reported-by: Greed Rong Link: https://lore.kernel.org/linux-btrfs/CA+UqX+NTrZ6boGnWHhSeZmEY5J76CTqmYjO2S+=tHJX7nb9DPw@mail.gmail.com/ CC: stable@vger.kernel.org # 4.4+ Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 71 +++++++++++++++++++++++++++++++++++++++++++++----- fs/btrfs/disk-io.h | 2 ++ fs/btrfs/ioctl.c | 21 ++++++++++++++- fs/btrfs/transaction.c | 2 +- fs/btrfs/transaction.h | 2 ++ 5 files changed, 89 insertions(+), 9 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index c90edf04a9db..9b307034d32c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1391,7 +1391,12 @@ alloc_fail: goto out; } -static int btrfs_init_fs_root(struct btrfs_root *root) +/* + * Initialize subvolume root in-memory structure + * + * @anon_dev: anonymous device to attach to the root, if zero, allocate new + */ +static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev) { int ret; unsigned int nofs_flag; @@ -1430,9 +1435,13 @@ static int btrfs_init_fs_root(struct btrfs_root *root) */ if (is_fstree(root->root_key.objectid) && btrfs_root_refs(&root->root_item) > 0) { - ret = get_anon_bdev(&root->anon_dev); - if (ret) - goto fail; + if (!anon_dev) { + ret = get_anon_bdev(&root->anon_dev); + if (ret) + goto fail; + } else { + root->anon_dev = anon_dev; + } } mutex_lock(&root->objectid_mutex); @@ -1537,8 +1546,27 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) } -struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, - u64 objectid, bool check_ref) +/* + * Get an in-memory reference of a root structure. + * + * For essential trees like root/extent tree, we grab it from fs_info directly. + * For subvolume trees, we check the cached filesystem roots first. If not + * found, then read it from disk and add it to cached fs roots. + * + * Caller should release the root by calling btrfs_put_root() after the usage. + * + * NOTE: Reloc and log trees can't be read by this function as they share the + * same root objectid. + * + * @objectid: root id + * @anon_dev: preallocated anonymous block device number for new roots, + * pass 0 for new allocation. + * @check_ref: whether to check root item references, If true, return -ENOENT + * for orphan roots + */ +static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, + u64 objectid, dev_t anon_dev, + bool check_ref) { struct btrfs_root *root; struct btrfs_path *path; @@ -1567,6 +1595,8 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, again: root = btrfs_lookup_fs_root(fs_info, objectid); if (root) { + /* Shouldn't get preallocated anon_dev for cached roots */ + ASSERT(!anon_dev); if (check_ref && btrfs_root_refs(&root->root_item) == 0) { btrfs_put_root(root); return ERR_PTR(-ENOENT); @@ -1586,7 +1616,7 @@ again: goto fail; } - ret = btrfs_init_fs_root(root); + ret = btrfs_init_fs_root(root, anon_dev); if (ret) goto fail; @@ -1619,6 +1649,33 @@ fail: return ERR_PTR(ret); } +/* + * Get in-memory reference of a root structure + * + * @objectid: tree objectid + * @check_ref: if set, verify that the tree exists and the item has at least + * one reference + */ +struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, + u64 objectid, bool check_ref) +{ + return btrfs_get_root_ref(fs_info, objectid, 0, check_ref); +} + +/* + * Get in-memory reference of a root structure, created as new, optionally pass + * the anonymous block device id + * + * @objectid: tree objectid + * @anon_dev: if zero, allocate a new anonymous block device or use the + * parameter value + */ +struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info, + u64 objectid, dev_t anon_dev) +{ + return btrfs_get_root_ref(fs_info, objectid, anon_dev, true); +} + static int btrfs_congested_fn(void *congested_data, int bdi_bits) { struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index bf43245406c4..00dc39d47ed3 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -67,6 +67,8 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info); struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, u64 objectid, bool check_ref); +struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info, + u64 objectid, dev_t anon_dev); void btrfs_free_fs_info(struct btrfs_fs_info *fs_info); int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4adfdfa28e53..ab34179d7cbc 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -566,6 +566,7 @@ static noinline int create_subvol(struct inode *dir, struct inode *inode; int ret; int err; + dev_t anon_dev = 0; u64 objectid; u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; u64 index = 0; @@ -578,6 +579,10 @@ static noinline int create_subvol(struct inode *dir, if (ret) goto fail_free; + ret = get_anon_bdev(&anon_dev); + if (ret < 0) + goto fail_free; + /* * Don't create subvolume whose level is not zero. Or qgroup will be * screwed up since it assumes subvolume qgroup's level to be 0. @@ -660,12 +665,15 @@ static noinline int create_subvol(struct inode *dir, goto fail; key.offset = (u64)-1; - new_root = btrfs_get_fs_root(fs_info, objectid, true); + new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev); if (IS_ERR(new_root)) { + free_anon_bdev(anon_dev); ret = PTR_ERR(new_root); btrfs_abort_transaction(trans, ret); goto fail; } + /* Freeing will be done in btrfs_put_root() of new_root */ + anon_dev = 0; btrfs_record_root_in_trans(trans, new_root); @@ -735,6 +743,8 @@ fail: return ret; fail_free: + if (anon_dev) + free_anon_bdev(anon_dev); kfree(root_item); return ret; } @@ -762,6 +772,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, if (!pending_snapshot) return -ENOMEM; + ret = get_anon_bdev(&pending_snapshot->anon_dev); + if (ret < 0) + goto free_pending; pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item), GFP_KERNEL); pending_snapshot->path = btrfs_alloc_path(); @@ -823,10 +836,16 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, d_instantiate(dentry, inode); ret = 0; + pending_snapshot->anon_dev = 0; fail: + /* Prevent double freeing of anon_dev */ + if (ret && pending_snapshot->snap) + pending_snapshot->snap->anon_dev = 0; btrfs_put_root(pending_snapshot->snap); btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); free_pending: + if (pending_snapshot->anon_dev) + free_anon_bdev(pending_snapshot->anon_dev); kfree(pending_snapshot->root_item); btrfs_free_path(pending_snapshot->path); kfree(pending_snapshot); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index b359d4b17658..3a7c392fbcc9 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1630,7 +1630,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, } key.offset = (u64)-1; - pending->snap = btrfs_get_fs_root(fs_info, objectid, true); + pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev); if (IS_ERR(pending->snap)) { ret = PTR_ERR(pending->snap); btrfs_abort_transaction(trans, ret); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 6f65fff6cf50..98f7860af5e9 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -151,6 +151,8 @@ struct btrfs_pending_snapshot { struct btrfs_block_rsv block_rsv; /* extra metadata reservation for relocation */ int error; + /* Preallocated anonymous block device number */ + dev_t anon_dev; bool readonly; struct list_head list; }; -- cgit v1.2.3 From c53e9653605dbf708f5be02902de51831be4b009 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Mon, 13 Jul 2020 18:50:48 +0800 Subject: btrfs: qgroup: try to flush qgroup space when we get -EDQUOT [PROBLEM] There are known problem related to how btrfs handles qgroup reserved space. One of the most obvious case is the the test case btrfs/153, which do fallocate, then write into the preallocated range. btrfs/153 1s ... - output mismatch (see xfstests-dev/results//btrfs/153.out.bad) --- tests/btrfs/153.out 2019-10-22 15:18:14.068965341 +0800 +++ xfstests-dev/results//btrfs/153.out.bad 2020-07-01 20:24:40.730000089 +0800 @@ -1,2 +1,5 @@ QA output created by 153 +pwrite: Disk quota exceeded +/mnt/scratch/testfile2: Disk quota exceeded +/mnt/scratch/testfile2: Disk quota exceeded Silence is golden ... (Run 'diff -u xfstests-dev/tests/btrfs/153.out xfstests-dev/results//btrfs/153.out.bad' to see the entire diff) [CAUSE] Since commit c6887cd11149 ("Btrfs: don't do nocow check unless we have to"), we always reserve space no matter if it's COW or not. Such behavior change is mostly for performance, and reverting it is not a good idea anyway. For preallcoated extent, we reserve qgroup data space for it already, and since we also reserve data space for qgroup at buffered write time, it needs twice the space for us to write into preallocated space. This leads to the -EDQUOT in buffered write routine. And we can't follow the same solution, unlike data/meta space check, qgroup reserved space is shared between data/metadata. The EDQUOT can happen at the metadata reservation, so doing NODATACOW check after qgroup reservation failure is not a solution. [FIX] To solve the problem, we don't return -EDQUOT directly, but every time we got a -EDQUOT, we try to flush qgroup space: - Flush all inodes of the root NODATACOW writes will free the qgroup reserved at run_dealloc_range(). However we don't have the infrastructure to only flush NODATACOW inodes, here we flush all inodes anyway. - Wait for ordered extents This would convert the preallocated metadata space into per-trans metadata, which can be freed in later transaction commit. - Commit transaction This will free all per-trans metadata space. Also we don't want to trigger flush multiple times, so here we introduce a per-root wait list and a new root status, to ensure only one thread starts the flushing. Fixes: c6887cd11149 ("Btrfs: don't do nocow check unless we have to") Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 3 ++ fs/btrfs/disk-io.c | 1 + fs/btrfs/qgroup.c | 100 ++++++++++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 96 insertions(+), 8 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 7a4ce885bd21..13a0524cc048 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1012,6 +1012,8 @@ enum { BTRFS_ROOT_DEAD_TREE, /* The root has a log tree. Used only for subvolume roots. */ BTRFS_ROOT_HAS_LOG_TREE, + /* Qgroup flushing is in progress */ + BTRFS_ROOT_QGROUP_FLUSHING, }; /* @@ -1164,6 +1166,7 @@ struct btrfs_root { spinlock_t qgroup_meta_rsv_lock; u64 qgroup_meta_rsv_pertrans; u64 qgroup_meta_rsv_prealloc; + wait_queue_head_t qgroup_flush_wait; /* Number of active swapfiles */ atomic_t nr_swapfiles; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9b307034d32c..4360937acc2f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1116,6 +1116,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, mutex_init(&root->log_mutex); mutex_init(&root->ordered_extent_mutex); mutex_init(&root->delalloc_mutex); + init_waitqueue_head(&root->qgroup_flush_wait); init_waitqueue_head(&root->log_writer_wait); init_waitqueue_head(&root->log_commit_wait[0]); init_waitqueue_head(&root->log_commit_wait[1]); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 054f36df60cf..23579dc39fa6 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -3515,17 +3515,58 @@ static int qgroup_unreserve_range(struct btrfs_inode *inode, } /* - * Reserve qgroup space for range [start, start + len). + * Try to free some space for qgroup. * - * This function will either reserve space from related qgroups or doing - * nothing if the range is already reserved. + * For qgroup, there are only 3 ways to free qgroup space: + * - Flush nodatacow write + * Any nodatacow write will free its reserved data space at run_delalloc_range(). + * In theory, we should only flush nodatacow inodes, but it's not yet + * possible, so we need to flush the whole root. * - * Return 0 for successful reserve - * Return <0 for error (including -EQUOT) + * - Wait for ordered extents + * When ordered extents are finished, their reserved metadata is finally + * converted to per_trans status, which can be freed by later commit + * transaction. * - * NOTE: this function may sleep for memory allocation. + * - Commit transaction + * This would free the meta_per_trans space. + * In theory this shouldn't provide much space, but any more qgroup space + * is needed. */ -int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, +static int try_flush_qgroup(struct btrfs_root *root) +{ + struct btrfs_trans_handle *trans; + int ret; + + /* + * We don't want to run flush again and again, so if there is a running + * one, we won't try to start a new flush, but exit directly. + */ + if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) { + wait_event(root->qgroup_flush_wait, + !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)); + return 0; + } + + ret = btrfs_start_delalloc_snapshot(root); + if (ret < 0) + goto out; + btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1); + + trans = btrfs_join_transaction(root); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out; + } + + ret = btrfs_commit_transaction(trans); +out: + clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state); + wake_up(&root->qgroup_flush_wait); + return ret; +} + +static int qgroup_reserve_data(struct btrfs_inode *inode, struct extent_changeset **reserved_ret, u64 start, u64 len) { @@ -3578,6 +3619,34 @@ out: return ret; } +/* + * Reserve qgroup space for range [start, start + len). + * + * This function will either reserve space from related qgroups or do nothing + * if the range is already reserved. + * + * Return 0 for successful reservation + * Return <0 for error (including -EQUOT) + * + * NOTE: This function may sleep for memory allocation, dirty page flushing and + * commit transaction. So caller should not hold any dirty page locked. + */ +int btrfs_qgroup_reserve_data(struct btrfs_inode *inode, + struct extent_changeset **reserved_ret, u64 start, + u64 len) +{ + int ret; + + ret = qgroup_reserve_data(inode, reserved_ret, start, len); + if (ret <= 0 && ret != -EDQUOT) + return ret; + + ret = try_flush_qgroup(inode->root); + if (ret < 0) + return ret; + return qgroup_reserve_data(inode, reserved_ret, start, len); +} + /* Free ranges specified by @reserved, normally in error path */ static int qgroup_free_reserved_data(struct btrfs_inode *inode, struct extent_changeset *reserved, u64 start, u64 len) @@ -3746,7 +3815,7 @@ static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes, return num_bytes; } -int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, +static int qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, enum btrfs_qgroup_rsv_type type, bool enforce) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -3773,6 +3842,21 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, return ret; } +int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, + enum btrfs_qgroup_rsv_type type, bool enforce) +{ + int ret; + + ret = qgroup_reserve_meta(root, num_bytes, type, enforce); + if (ret <= 0 && ret != -EDQUOT) + return ret; + + ret = try_flush_qgroup(root); + if (ret < 0) + return ret; + return qgroup_reserve_meta(root, num_bytes, type, enforce); +} + void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; -- cgit v1.2.3 From adca4d945c8dca28a85df45c5b117e6dac2e77f1 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Mon, 13 Jul 2020 18:50:49 +0800 Subject: btrfs: qgroup: remove ASYNC_COMMIT mechanism in favor of reserve retry-after-EDQUOT commit a514d63882c3 ("btrfs: qgroup: Commit transaction in advance to reduce early EDQUOT") tries to reduce the early EDQUOT problems by checking the qgroup free against threshold and tries to wake up commit kthread to free some space. The problem of that mechanism is, it can only free qgroup per-trans metadata space, can't do anything to data, nor prealloc qgroup space. Now since we have the ability to flush qgroup space, and implemented retry-after-EDQUOT behavior, such mechanism can be completely replaced. So this patch will cleanup such mechanism in favor of retry-after-EDQUOT. Reviewed-by: Josef Bacik Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 5 ----- fs/btrfs/disk-io.c | 1 - fs/btrfs/qgroup.c | 43 ++----------------------------------------- fs/btrfs/transaction.c | 1 - fs/btrfs/transaction.h | 14 -------------- 5 files changed, 2 insertions(+), 62 deletions(-) (limited to 'fs/btrfs/disk-io.c') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 13a0524cc048..050760dfc56d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -545,11 +545,6 @@ enum { * (device replace, resize, device add/delete, balance) */ BTRFS_FS_EXCL_OP, - /* - * To info transaction_kthread we need an immediate commit so it - * doesn't need to wait for commit_interval - */ - BTRFS_FS_NEED_ASYNC_COMMIT, /* * Indicate that balance has been set up from the ioctl and is in the * main phase. The fs_info::balance_ctl is initialized. diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4360937acc2f..c850d7f44fbe 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1810,7 +1810,6 @@ static int transaction_kthread(void *arg) now = ktime_get_seconds(); if (cur->state < TRANS_STATE_COMMIT_START && - !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) && (now < cur->start_time || now - cur->start_time < fs_info->commit_interval)) { spin_unlock(&fs_info->trans_lock); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 23579dc39fa6..c0f350c3a0cf 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -11,7 +11,6 @@ #include #include #include -#include #include "ctree.h" #include "transaction.h" @@ -2895,20 +2894,8 @@ out: return ret; } -/* - * Two limits to commit transaction in advance. - * - * For RATIO, it will be 1/RATIO of the remaining limit as threshold. - * For SIZE, it will be in byte unit as threshold. - */ -#define QGROUP_FREE_RATIO 32 -#define QGROUP_FREE_SIZE SZ_32M -static bool qgroup_check_limits(struct btrfs_fs_info *fs_info, - const struct btrfs_qgroup *qg, u64 num_bytes) +static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) { - u64 free; - u64 threshold; - if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) return false; @@ -2917,32 +2904,6 @@ static bool qgroup_check_limits(struct btrfs_fs_info *fs_info, qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) return false; - /* - * Even if we passed the check, it's better to check if reservation - * for meta_pertrans is pushing us near limit. - * If there is too much pertrans reservation or it's near the limit, - * let's try commit transaction to free some, using transaction_kthread - */ - if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER | - BTRFS_QGROUP_LIMIT_MAX_EXCL))) { - if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { - free = qg->max_excl - qgroup_rsv_total(qg) - qg->excl; - threshold = min_t(u64, qg->max_excl / QGROUP_FREE_RATIO, - QGROUP_FREE_SIZE); - } else { - free = qg->max_rfer - qgroup_rsv_total(qg) - qg->rfer; - threshold = min_t(u64, qg->max_rfer / QGROUP_FREE_RATIO, - QGROUP_FREE_SIZE); - } - - /* - * Use transaction_kthread to commit transaction, so we no - * longer need to bother nested transaction nor lock context. - */ - if (free < threshold) - btrfs_commit_transaction_locksafe(fs_info); - } - return true; } @@ -2990,7 +2951,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, qg = unode_aux_to_qgroup(unode); - if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) { + if (enforce && !qgroup_check_limits(qg, num_bytes)) { ret = -EDQUOT; goto out; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 3a7c392fbcc9..efafc286323c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -2351,7 +2351,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) */ cur_trans->state = TRANS_STATE_COMPLETED; wake_up(&cur_trans->commit_wait); - clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); spin_lock(&fs_info->trans_lock); list_del_init(&cur_trans->list); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 98f7860af5e9..d60b055b8695 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -210,20 +210,6 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root); int btrfs_commit_transaction(struct btrfs_trans_handle *trans); int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, int wait_for_unblock); - -/* - * Try to commit transaction asynchronously, so this is safe to call - * even holding a spinlock. - * - * It's done by informing transaction_kthread to commit transaction without - * waiting for commit interval. - */ -static inline void btrfs_commit_transaction_locksafe( - struct btrfs_fs_info *fs_info) -{ - set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); - wake_up_process(fs_info->transaction_kthread); -} int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans); int btrfs_should_end_transaction(struct btrfs_trans_handle *trans); void btrfs_throttle(struct btrfs_fs_info *fs_info); -- cgit v1.2.3