diff options
Diffstat (limited to 'fs/btrfs/qgroup.c')
-rw-r--r-- | fs/btrfs/qgroup.c | 462 |
1 files changed, 363 insertions, 99 deletions
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 11f4fffe503e..a59801dc2a34 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -131,8 +131,15 @@ struct btrfs_qgroup_list { struct btrfs_qgroup *member; }; -#define ptr_to_u64(x) ((u64)(uintptr_t)x) -#define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x) +static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg) +{ + return (u64)(uintptr_t)qg; +} + +static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n) +{ + return (struct btrfs_qgroup *)(uintptr_t)n->aux; +} static int qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, @@ -312,7 +319,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) return 0; - fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS); + fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); if (!fs_info->qgroup_ulist) { ret = -ENOMEM; goto out; @@ -869,7 +876,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans, goto out; } - fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS); + fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); if (!fs_info->qgroup_ulist) { ret = -ENOMEM; goto out; @@ -1012,7 +1019,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans, list_del("a_root->dirty_list); btrfs_tree_lock(quota_root->node); - clean_tree_block(trans, tree_root->fs_info, quota_root->node); + clean_tree_block(fs_info, quota_root->node); btrfs_tree_unlock(quota_root->node); btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1); @@ -1031,6 +1038,15 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info, list_add(&qgroup->dirty, &fs_info->dirty_qgroups); } +static void report_reserved_underflow(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup *qgroup, + u64 num_bytes) +{ + btrfs_warn(fs_info, + "qgroup %llu reserved space underflow, have: %llu, to free: %llu", + qgroup->qgroupid, qgroup->reserved, num_bytes); + qgroup->reserved = 0; +} /* * The easy accounting, if we are adding/removing the only ref for an extent * then this qgroup and all of the parent qgroups get their reference and @@ -1058,15 +1074,19 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, WARN_ON(sign < 0 && qgroup->excl < num_bytes); qgroup->excl += sign * num_bytes; qgroup->excl_cmpr += sign * num_bytes; - if (sign > 0) - qgroup->reserved -= num_bytes; + if (sign > 0) { + if (WARN_ON(qgroup->reserved < num_bytes)) + report_reserved_underflow(fs_info, qgroup, num_bytes); + else + qgroup->reserved -= num_bytes; + } qgroup_dirty(fs_info, qgroup); /* Get all of the parent groups that contain this qgroup */ list_for_each_entry(glist, &qgroup->groups, next_group) { ret = ulist_add(tmp, glist->group->qgroupid, - ptr_to_u64(glist->group), GFP_ATOMIC); + qgroup_to_aux(glist->group), GFP_ATOMIC); if (ret < 0) goto out; } @@ -1074,20 +1094,25 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, /* Iterate all of the parents and adjust their reference counts */ ULIST_ITER_INIT(&uiter); while ((unode = ulist_next(tmp, &uiter))) { - qgroup = u64_to_ptr(unode->aux); + qgroup = unode_aux_to_qgroup(unode); qgroup->rfer += sign * num_bytes; qgroup->rfer_cmpr += sign * num_bytes; WARN_ON(sign < 0 && qgroup->excl < num_bytes); qgroup->excl += sign * num_bytes; - if (sign > 0) - qgroup->reserved -= num_bytes; + if (sign > 0) { + if (WARN_ON(qgroup->reserved < num_bytes)) + report_reserved_underflow(fs_info, qgroup, + num_bytes); + else + qgroup->reserved -= num_bytes; + } qgroup->excl_cmpr += sign * num_bytes; qgroup_dirty(fs_info, qgroup); /* Add any parents of the parents */ list_for_each_entry(glist, &qgroup->groups, next_group) { ret = ulist_add(tmp, glist->group->qgroupid, - ptr_to_u64(glist->group), GFP_ATOMIC); + qgroup_to_aux(glist->group), GFP_ATOMIC); if (ret < 0) goto out; } @@ -1149,7 +1174,7 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) return -EINVAL; - tmp = ulist_alloc(GFP_NOFS); + tmp = ulist_alloc(GFP_KERNEL); if (!tmp) return -ENOMEM; @@ -1185,7 +1210,7 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, } spin_lock(&fs_info->qgroup_lock); - ret = add_relation_rb(quota_root->fs_info, src, dst); + ret = add_relation_rb(fs_info, src, dst); if (ret < 0) { spin_unlock(&fs_info->qgroup_lock); goto out; @@ -1198,7 +1223,7 @@ out: return ret; } -int __del_qgroup_relation(struct btrfs_trans_handle *trans, +static int __del_qgroup_relation(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 src, u64 dst) { struct btrfs_root *quota_root; @@ -1209,7 +1234,7 @@ int __del_qgroup_relation(struct btrfs_trans_handle *trans, int ret = 0; int err; - tmp = ulist_alloc(GFP_NOFS); + tmp = ulist_alloc(GFP_KERNEL); if (!tmp) return -ENOMEM; @@ -1333,7 +1358,7 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, } spin_lock(&fs_info->qgroup_lock); - del_qgroup_rb(quota_root->fs_info, qgroupid); + del_qgroup_rb(fs_info, qgroupid); spin_unlock(&fs_info->qgroup_lock); out: mutex_unlock(&fs_info->qgroup_ioctl_lock); @@ -1439,8 +1464,9 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans, while (node) { record = rb_entry(node, struct btrfs_qgroup_extent_record, node); - ret = btrfs_find_all_roots(NULL, fs_info, record->bytenr, 0, - &record->old_roots); + if (WARN_ON(!record->old_roots)) + ret = btrfs_find_all_roots(NULL, fs_info, + record->bytenr, 0, &record->old_roots); if (ret < 0) break; if (qgroup_to_skip) @@ -1450,7 +1476,7 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans, return ret; } -int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info, +int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_qgroup_extent_record *record) { @@ -1460,7 +1486,7 @@ int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info, u64 bytenr = record->bytenr; assert_spin_locked(&delayed_refs->lock); - trace_btrfs_qgroup_insert_dirty_extent(fs_info, record); + trace_btrfs_qgroup_trace_extent(fs_info, record); while (*p) { parent_node = *p; @@ -1479,7 +1505,29 @@ int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info, return 0; } -int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans, +int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, + struct btrfs_qgroup_extent_record *qrecord) +{ + struct ulist *old_root; + u64 bytenr = qrecord->bytenr; + int ret; + + ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root); + if (ret < 0) + return ret; + + /* + * Here we don't need to get the lock of + * trans->transaction->delayed_refs, since inserted qrecord won't + * be deleted, only qrecord->node may be modified (new qrecord insert) + * + * So modifying qrecord->old_roots is safe here + */ + qrecord->old_roots = old_root; + return 0; +} + +int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, gfp_t gfp_flag) { @@ -1502,14 +1550,229 @@ int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans, record->old_roots = NULL; spin_lock(&delayed_refs->lock); - ret = btrfs_qgroup_insert_dirty_extent_nolock(fs_info, delayed_refs, - record); + ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); spin_unlock(&delayed_refs->lock); - if (ret > 0) + if (ret > 0) { kfree(record); + return 0; + } + return btrfs_qgroup_trace_extent_post(fs_info, record); +} + +int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, + struct btrfs_fs_info *fs_info, + struct extent_buffer *eb) +{ + int nr = btrfs_header_nritems(eb); + int i, extent_type, ret; + struct btrfs_key key; + struct btrfs_file_extent_item *fi; + u64 bytenr, num_bytes; + + /* We can be called directly from walk_up_proc() */ + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) + return 0; + + for (i = 0; i < nr; i++) { + btrfs_item_key_to_cpu(eb, &key, i); + + if (key.type != BTRFS_EXTENT_DATA_KEY) + continue; + + fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); + /* filter out non qgroup-accountable extents */ + extent_type = btrfs_file_extent_type(eb, fi); + + if (extent_type == BTRFS_FILE_EXTENT_INLINE) + continue; + + bytenr = btrfs_file_extent_disk_bytenr(eb, fi); + if (!bytenr) + continue; + + num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); + + ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr, + num_bytes, GFP_NOFS); + if (ret) + return ret; + } return 0; } +/* + * Walk up the tree from the bottom, freeing leaves and any interior + * nodes which have had all slots visited. If a node (leaf or + * interior) is freed, the node above it will have it's slot + * incremented. The root node will never be freed. + * + * At the end of this function, we should have a path which has all + * slots incremented to the next position for a search. If we need to + * read a new node it will be NULL and the node above it will have the + * correct slot selected for a later read. + * + * If we increment the root nodes slot counter past the number of + * elements, 1 is returned to signal completion of the search. + */ +static int adjust_slots_upwards(struct btrfs_path *path, int root_level) +{ + int level = 0; + int nr, slot; + struct extent_buffer *eb; + + if (root_level == 0) + return 1; + + while (level <= root_level) { + eb = path->nodes[level]; + nr = btrfs_header_nritems(eb); + path->slots[level]++; + slot = path->slots[level]; + if (slot >= nr || level == 0) { + /* + * Don't free the root - we will detect this + * condition after our loop and return a + * positive value for caller to stop walking the tree. + */ + if (level != root_level) { + btrfs_tree_unlock_rw(eb, path->locks[level]); + path->locks[level] = 0; + + free_extent_buffer(eb); + path->nodes[level] = NULL; + path->slots[level] = 0; + } + } else { + /* + * We have a valid slot to walk back down + * from. Stop here so caller can process these + * new nodes. + */ + break; + } + + level++; + } + + eb = path->nodes[root_level]; + if (path->slots[root_level] >= btrfs_header_nritems(eb)) + return 1; + + return 0; +} + +int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct extent_buffer *root_eb, + u64 root_gen, int root_level) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + int ret = 0; + int level; + struct extent_buffer *eb = root_eb; + struct btrfs_path *path = NULL; + + BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL); + BUG_ON(root_eb == NULL); + + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) + return 0; + + if (!extent_buffer_uptodate(root_eb)) { + ret = btrfs_read_buffer(root_eb, root_gen); + if (ret) + goto out; + } + + if (root_level == 0) { + ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb); + goto out; + } + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + /* + * Walk down the tree. Missing extent blocks are filled in as + * we go. Metadata is accounted every time we read a new + * extent block. + * + * When we reach a leaf, we account for file extent items in it, + * walk back up the tree (adjusting slot pointers as we go) + * and restart the search process. + */ + extent_buffer_get(root_eb); /* For path */ + path->nodes[root_level] = root_eb; + path->slots[root_level] = 0; + path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ +walk_down: + level = root_level; + while (level >= 0) { + if (path->nodes[level] == NULL) { + int parent_slot; + u64 child_gen; + u64 child_bytenr; + + /* + * We need to get child blockptr/gen from parent before + * we can read it. + */ + eb = path->nodes[level + 1]; + parent_slot = path->slots[level + 1]; + child_bytenr = btrfs_node_blockptr(eb, parent_slot); + child_gen = btrfs_node_ptr_generation(eb, parent_slot); + + eb = read_tree_block(fs_info, child_bytenr, child_gen); + if (IS_ERR(eb)) { + ret = PTR_ERR(eb); + goto out; + } else if (!extent_buffer_uptodate(eb)) { + free_extent_buffer(eb); + ret = -EIO; + goto out; + } + + path->nodes[level] = eb; + path->slots[level] = 0; + + btrfs_tree_read_lock(eb); + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); + path->locks[level] = BTRFS_READ_LOCK_BLOCKING; + + ret = btrfs_qgroup_trace_extent(trans, fs_info, + child_bytenr, + fs_info->nodesize, + GFP_NOFS); + if (ret) + goto out; + } + + if (level == 0) { + ret = btrfs_qgroup_trace_leaf_items(trans,fs_info, + path->nodes[level]); + if (ret) + goto out; + + /* Nonzero return here means we completed our search */ + ret = adjust_slots_upwards(path, root_level); + if (ret) + break; + + /* Restart search with new slots */ + goto walk_down; + } + + level--; + } + + ret = 0; +out: + btrfs_free_path(path); + + return ret; +} + #define UPDATE_NEW 0 #define UPDATE_OLD 1 /* @@ -1535,30 +1798,30 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info, continue; ulist_reinit(tmp); - ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg), + ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC); if (ret < 0) return ret; - ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC); + ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC); if (ret < 0) return ret; ULIST_ITER_INIT(&tmp_uiter); while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { struct btrfs_qgroup_list *glist; - qg = u64_to_ptr(tmp_unode->aux); + qg = unode_aux_to_qgroup(tmp_unode); if (update_old) btrfs_qgroup_update_old_refcnt(qg, seq, 1); else btrfs_qgroup_update_new_refcnt(qg, seq, 1); list_for_each_entry(glist, &qg->groups, next_group) { ret = ulist_add(qgroups, glist->group->qgroupid, - ptr_to_u64(glist->group), + qgroup_to_aux(glist->group), GFP_ATOMIC); if (ret < 0) return ret; ret = ulist_add(tmp, glist->group->qgroupid, - ptr_to_u64(glist->group), + qgroup_to_aux(glist->group), GFP_ATOMIC); if (ret < 0) return ret; @@ -1619,7 +1882,7 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info, while ((unode = ulist_next(qgroups, &uiter))) { bool dirty = false; - qg = u64_to_ptr(unode->aux); + qg = unode_aux_to_qgroup(unode); cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); @@ -1706,13 +1969,14 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 nr_old_roots = 0; int ret = 0; + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) + return 0; + if (new_roots) nr_new_roots = new_roots->nnodes; if (old_roots) nr_old_roots = old_roots->nnodes; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) - goto out_free; BUG_ON(!fs_info->quota_root); trace_btrfs_qgroup_account_extent(fs_info, bytenr, num_bytes, @@ -1949,9 +2213,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, goto out; } - rcu_read_lock(); - level_size = srcroot->nodesize; - rcu_read_unlock(); + level_size = fs_info->nodesize; } /* @@ -2034,8 +2296,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, i_qgroups = (u64 *)(inherit + 1); for (i = 0; i < inherit->num_qgroups; ++i) { if (*i_qgroups) { - ret = add_relation_rb(quota_root->fs_info, objectid, - *i_qgroups); + ret = add_relation_rb(fs_info, objectid, *i_qgroups); if (ret) goto unlock; } @@ -2086,7 +2347,20 @@ out: return ret; } -static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes) +static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) +{ + if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && + qg->reserved + (s64)qg->rfer + num_bytes > qg->max_rfer) + return false; + + if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && + qg->reserved + (s64)qg->excl + num_bytes > qg->max_excl) + return false; + + return true; +} + +static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce) { struct btrfs_root *quota_root; struct btrfs_qgroup *qgroup; @@ -2125,18 +2399,9 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes) struct btrfs_qgroup *qg; struct btrfs_qgroup_list *glist; - qg = u64_to_ptr(unode->aux); - - if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && - qg->reserved + (s64)qg->rfer + num_bytes > - qg->max_rfer) { - ret = -EDQUOT; - goto out; - } + qg = unode_aux_to_qgroup(unode); - if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && - qg->reserved + (s64)qg->excl + num_bytes > - qg->max_excl) { + if (enforce && !qgroup_check_limits(qg, num_bytes)) { ret = -EDQUOT; goto out; } @@ -2157,7 +2422,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes) while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { struct btrfs_qgroup *qg; - qg = u64_to_ptr(unode->aux); + qg = unode_aux_to_qgroup(unode); qg->reserved += num_bytes; } @@ -2202,9 +2467,12 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qg; struct btrfs_qgroup_list *glist; - qg = u64_to_ptr(unode->aux); + qg = unode_aux_to_qgroup(unode); - qg->reserved -= num_bytes; + if (WARN_ON(qg->reserved < num_bytes)) + report_reserved_underflow(fs_info, qg, num_bytes); + else + qg->reserved -= num_bytes; list_for_each_entry(glist, &qg->groups, next_group) { ret = ulist_add(fs_info->qgroup_ulist, @@ -2219,11 +2487,6 @@ out: spin_unlock(&fs_info->qgroup_lock); } -static inline void qgroup_free(struct btrfs_root *root, u64 num_bytes) -{ - return btrfs_qgroup_free_refroot(root->fs_info, root->objectid, - num_bytes); -} void assert_qgroups_uptodate(struct btrfs_trans_handle *trans) { if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq) @@ -2302,7 +2565,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, found.type != BTRFS_METADATA_ITEM_KEY) continue; if (found.type == BTRFS_METADATA_ITEM_KEY) - num_bytes = fs_info->extent_root->nodesize; + num_bytes = fs_info->nodesize; else num_bytes = found.offset; @@ -2335,10 +2598,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) int err = -ENOMEM; int ret = 0; - mutex_lock(&fs_info->qgroup_rescan_lock); - fs_info->qgroup_rescan_running = true; - mutex_unlock(&fs_info->qgroup_rescan_lock); - path = btrfs_alloc_path(); if (!path) goto out; @@ -2356,9 +2615,9 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) err = qgroup_rescan_leaf(fs_info, path, trans); } if (err > 0) - btrfs_commit_transaction(trans, fs_info->fs_root); + btrfs_commit_transaction(trans); else - btrfs_end_transaction(trans, fs_info->fs_root); + btrfs_end_transaction(trans); } out: @@ -2393,7 +2652,7 @@ out: err = ret; btrfs_err(fs_info, "fail to update qgroup status: %d", err); } - btrfs_end_transaction(trans, fs_info->quota_root); + btrfs_end_transaction(trans); if (btrfs_fs_closing(fs_info)) { btrfs_info(fs_info, "qgroup scan paused"); @@ -2449,6 +2708,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, sizeof(fs_info->qgroup_rescan_progress)); fs_info->qgroup_rescan_progress.objectid = progress_objectid; init_completion(&fs_info->qgroup_rescan_completion); + fs_info->qgroup_rescan_running = true; spin_unlock(&fs_info->qgroup_lock); mutex_unlock(&fs_info->qgroup_rescan_lock); @@ -2512,7 +2772,7 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; return PTR_ERR(trans); } - ret = btrfs_commit_transaction(trans, fs_info->fs_root); + ret = btrfs_commit_transaction(trans); if (ret) { fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; return ret; @@ -2586,7 +2846,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len) return 0; changeset.bytes_changed = 0; - changeset.range_changed = ulist_alloc(GFP_NOFS); + ulist_init(&changeset.range_changed); ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len -1, EXTENT_QGROUP_RESERVED, &changeset); trace_btrfs_qgroup_reserve_data(inode, start, len, @@ -2594,21 +2854,21 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len) QGROUP_RESERVE); if (ret < 0) goto cleanup; - ret = qgroup_reserve(root, changeset.bytes_changed); + ret = qgroup_reserve(root, changeset.bytes_changed, true); if (ret < 0) goto cleanup; - ulist_free(changeset.range_changed); + ulist_release(&changeset.range_changed); return ret; cleanup: /* cleanup already reserved ranges */ ULIST_ITER_INIT(&uiter); - while ((unode = ulist_next(changeset.range_changed, &uiter))) + while ((unode = ulist_next(&changeset.range_changed, &uiter))) clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val, unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL, GFP_NOFS); - ulist_free(changeset.range_changed); + ulist_release(&changeset.range_changed); return ret; } @@ -2620,23 +2880,22 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len, int ret; changeset.bytes_changed = 0; - changeset.range_changed = ulist_alloc(GFP_NOFS); - if (!changeset.range_changed) - return -ENOMEM; - + ulist_init(&changeset.range_changed); ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len -1, EXTENT_QGROUP_RESERVED, &changeset); if (ret < 0) goto out; if (free) { - qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed); + btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, + BTRFS_I(inode)->root->objectid, + changeset.bytes_changed); trace_op = QGROUP_FREE; } trace_btrfs_qgroup_release_data(inode, start, len, changeset.bytes_changed, trace_op); out: - ulist_free(changeset.range_changed); + ulist_release(&changeset.range_changed); return ret; } @@ -2675,46 +2934,51 @@ int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len) return __btrfs_qgroup_release_data(inode, start, len, 0); } -int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes) +int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, + bool enforce) { + struct btrfs_fs_info *fs_info = root->fs_info; int ret; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || !is_fstree(root->objectid) || num_bytes == 0) return 0; - BUG_ON(num_bytes != round_down(num_bytes, root->nodesize)); - ret = qgroup_reserve(root, num_bytes); + BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); + ret = qgroup_reserve(root, num_bytes, enforce); if (ret < 0) return ret; - atomic_add(num_bytes, &root->qgroup_meta_rsv); + atomic64_add(num_bytes, &root->qgroup_meta_rsv); return ret; } void btrfs_qgroup_free_meta_all(struct btrfs_root *root) { - int reserved; + struct btrfs_fs_info *fs_info = root->fs_info; + u64 reserved; - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || !is_fstree(root->objectid)) return; - reserved = atomic_xchg(&root->qgroup_meta_rsv, 0); + reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0); if (reserved == 0) return; - qgroup_free(root, reserved); + btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved); } void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes) { - if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || + struct btrfs_fs_info *fs_info = root->fs_info; + + if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || !is_fstree(root->objectid)) return; - BUG_ON(num_bytes != round_down(num_bytes, root->nodesize)); - WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes); - atomic_sub(num_bytes, &root->qgroup_meta_rsv); - qgroup_free(root, num_bytes); + BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); + WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes); + atomic64_sub(num_bytes, &root->qgroup_meta_rsv); + btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes); } /* @@ -2729,22 +2993,22 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode) int ret; changeset.bytes_changed = 0; - changeset.range_changed = ulist_alloc(GFP_NOFS); - if (WARN_ON(!changeset.range_changed)) - return; - + ulist_init(&changeset.range_changed); ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, EXTENT_QGROUP_RESERVED, &changeset); WARN_ON(ret < 0); if (WARN_ON(changeset.bytes_changed)) { ULIST_ITER_INIT(&iter); - while ((unode = ulist_next(changeset.range_changed, &iter))) { + while ((unode = ulist_next(&changeset.range_changed, &iter))) { btrfs_warn(BTRFS_I(inode)->root->fs_info, "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu", inode->i_ino, unode->val, unode->aux); } - qgroup_free(BTRFS_I(inode)->root, changeset.bytes_changed); + btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, + BTRFS_I(inode)->root->objectid, + changeset.bytes_changed); + } - ulist_free(changeset.range_changed); + ulist_release(&changeset.range_changed); } |