summaryrefslogtreecommitdiff
path: root/libbcachefs/buckets.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/buckets.c')
-rw-r--r--libbcachefs/buckets.c316
1 files changed, 155 insertions, 161 deletions
diff --git a/libbcachefs/buckets.c b/libbcachefs/buckets.c
index 401ff825..d72e5951 100644
--- a/libbcachefs/buckets.c
+++ b/libbcachefs/buckets.c
@@ -105,9 +105,9 @@ static void bch2_fs_stats_verify(struct bch_fs *c)
bch_data_types[j],
stats.buckets[j]);
- if ((s64) stats.online_reserved < 0)
+ if ((s64) stats.s.online_reserved < 0)
panic("sectors_online_reserved underflow: %lli\n",
- stats.online_reserved);
+ stats.s.online_reserved);
}
static void bch2_dev_stats_verify(struct bch_dev *ca)
@@ -227,38 +227,6 @@ struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *c)
return bch2_usage_read_raw(c->usage[0]);
}
-struct fs_usage_sum {
- u64 hidden;
- u64 data;
- u64 cached;
- u64 reserved;
-};
-
-static inline struct fs_usage_sum __fs_usage_sum(struct bch_fs_usage stats)
-{
- struct fs_usage_sum sum = { 0 };
- unsigned i;
-
- /*
- * For superblock and journal we count bucket usage, not sector usage,
- * because any internal fragmentation should _not_ be counted as
- * free space:
- */
- sum.hidden += stats.buckets[BCH_DATA_SB];
- sum.hidden += stats.buckets[BCH_DATA_JOURNAL];
-
- for (i = 0; i < ARRAY_SIZE(stats.replicas); i++) {
- sum.data += stats.replicas[i].data[BCH_DATA_BTREE];
- sum.data += stats.replicas[i].data[BCH_DATA_USER];
- sum.data += stats.replicas[i].ec_data;
- sum.cached += stats.replicas[i].data[BCH_DATA_CACHED];
- sum.reserved += stats.replicas[i].persistent_reserved;
- }
-
- sum.reserved += stats.online_reserved;
- return sum;
-}
-
#define RESERVE_FACTOR 6
static u64 reserve_factor(u64 r)
@@ -271,16 +239,33 @@ static u64 avail_factor(u64 r)
return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
}
-static inline u64 __bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats)
+static inline u64 __bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage fs_usage)
{
- struct fs_usage_sum sum = __fs_usage_sum(stats);
+ return fs_usage.s.hidden +
+ fs_usage.s.data +
+ reserve_factor(fs_usage.s.reserved +
+ fs_usage.s.online_reserved);
+}
- return sum.hidden + sum.data + reserve_factor(sum.reserved);
+u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage fs_usage)
+{
+ return min(c->capacity, __bch2_fs_sectors_used(c, fs_usage));
}
-u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats)
+struct bch_fs_usage_short
+bch2_fs_usage_read_short(struct bch_fs *c)
{
- return min(c->capacity, __bch2_fs_sectors_used(c, stats));
+ struct bch_fs_usage_summarized usage =
+ bch2_usage_read_raw(&c->usage[0]->s);
+ struct bch_fs_usage_short ret;
+
+ ret.capacity = READ_ONCE(c->capacity) - usage.hidden;
+ ret.used = min(ret.capacity, usage.data +
+ reserve_factor(usage.reserved +
+ usage.online_reserved));
+ ret.nr_inodes = usage.nr_inodes;
+
+ return ret;
}
static inline int is_unavailable_bucket(struct bucket_mark m)
@@ -314,15 +299,14 @@ static bool bucket_became_unavailable(struct bucket_mark old,
}
void bch2_fs_usage_apply(struct bch_fs *c,
- struct bch_fs_usage *stats,
+ struct bch_fs_usage *fs_usage,
struct disk_reservation *disk_res,
struct gc_pos gc_pos)
{
- struct fs_usage_sum sum = __fs_usage_sum(*stats);
- s64 added = sum.data + sum.reserved;
+ s64 added = fs_usage->s.data + fs_usage->s.reserved;
s64 should_not_have_added;
- percpu_rwsem_assert_held(&c->usage_lock);
+ percpu_rwsem_assert_held(&c->mark_lock);
/*
* Not allowed to reduce sectors_available except by getting a
@@ -336,23 +320,30 @@ void bch2_fs_usage_apply(struct bch_fs *c,
}
if (added > 0) {
- disk_res->sectors -= added;
- stats->online_reserved -= added;
+ disk_res->sectors -= added;
+ fs_usage->s.online_reserved -= added;
}
- /* online_reserved not subject to gc: */
- this_cpu_ptr(c->usage[0])->online_reserved +=
- stats->online_reserved;
- stats->online_reserved = 0;
-
- bch2_usage_add(this_cpu_ptr(c->usage[0]), stats);
+ bch2_usage_add(this_cpu_ptr(c->usage[0]), fs_usage);
if (gc_visited(c, gc_pos))
- bch2_usage_add(this_cpu_ptr(c->usage[1]), stats);
+ bch2_usage_add(this_cpu_ptr(c->usage[1]), fs_usage);
bch2_fs_stats_verify(c);
- memset(stats, 0, sizeof(*stats));
+ memset(fs_usage, 0, sizeof(*fs_usage));
+}
+
+static inline void account_bucket(struct bch_fs_usage *fs_usage,
+ struct bch_dev_usage *dev_usage,
+ enum bch_data_type type,
+ int nr, s64 size)
+{
+ if (type == BCH_DATA_SB || type == BCH_DATA_JOURNAL)
+ fs_usage->s.hidden += size;
+
+ fs_usage->buckets[type] += size;
+ dev_usage->buckets[type] += nr;
}
static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
@@ -362,7 +353,7 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
{
struct bch_dev_usage *dev_usage;
- percpu_rwsem_assert_held(&c->usage_lock);
+ percpu_rwsem_assert_held(&c->mark_lock);
bch2_fs_inconsistent_on(old.data_type && new.data_type &&
old.data_type != new.data_type, c,
@@ -372,15 +363,13 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
dev_usage = this_cpu_ptr(ca->usage[gc]);
- if (bucket_type(old)) {
- fs_usage->buckets[bucket_type(old)] -= ca->mi.bucket_size;
- dev_usage->buckets[bucket_type(old)]--;
- }
+ if (bucket_type(old))
+ account_bucket(fs_usage, dev_usage, bucket_type(old),
+ -1, -ca->mi.bucket_size);
- if (bucket_type(new)) {
- fs_usage->buckets[bucket_type(new)] += ca->mi.bucket_size;
- dev_usage->buckets[bucket_type(new)]++;
- }
+ if (bucket_type(new))
+ account_bucket(fs_usage, dev_usage, bucket_type(new),
+ 1, ca->mi.bucket_size);
dev_usage->buckets_alloc +=
(int) new.owned_by_allocator - (int) old.owned_by_allocator;
@@ -409,14 +398,14 @@ void bch2_dev_usage_from_buckets(struct bch_fs *c, struct bch_dev *ca)
struct bucket_array *buckets;
struct bucket *g;
- percpu_down_read_preempt_disable(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->mark_lock);
fs_usage = this_cpu_ptr(c->usage[0]);
buckets = bucket_array(ca);
for_each_bucket(g, buckets)
if (g->mark.data_type)
bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false);
- percpu_up_read_preempt_enable(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->mark_lock);
}
#define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \
@@ -431,11 +420,11 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *old,
bool gc)
{
- struct bch_fs_usage *stats = this_cpu_ptr(c->usage[gc]);
+ struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc);
struct bucket_mark new;
- *old = bucket_data_cmpxchg(c, ca, stats, g, new, ({
+ *old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
BUG_ON(!is_available_bucket(new));
new.owned_by_allocator = 1;
@@ -445,13 +434,14 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
new.gen++;
}));
- stats->replicas[0].data[BCH_DATA_CACHED] -= old->cached_sectors;
+ fs_usage->replicas[0].data[BCH_DATA_CACHED] -= old->cached_sectors;
+ fs_usage->s.cached -= old->cached_sectors;
}
void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *old)
{
- percpu_rwsem_assert_held(&c->usage_lock);
+ percpu_rwsem_assert_held(&c->mark_lock);
__bch2_invalidate_bucket(c, ca, b, old, false);
@@ -464,11 +454,11 @@ static void __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator,
bool gc)
{
- struct bch_fs_usage *stats = this_cpu_ptr(c->usage[gc]);
+ struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc);
struct bucket_mark old, new;
- old = bucket_data_cmpxchg(c, ca, stats, g, new, ({
+ old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
new.owned_by_allocator = owned_by_allocator;
}));
@@ -480,7 +470,7 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator,
struct gc_pos pos, unsigned flags)
{
- percpu_rwsem_assert_held(&c->usage_lock);
+ percpu_rwsem_assert_held(&c->mark_lock);
if (!(flags & BCH_BUCKET_MARK_GC))
__bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, false);
@@ -513,7 +503,10 @@ static void __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
checked_add(new.dirty_sectors, sectors);
}));
- fs_usage->replicas[0].data[type] += sectors;
+ if (type == BCH_DATA_BTREE ||
+ type == BCH_DATA_USER)
+ fs_usage->s.data += sectors;
+ fs_usage->replicas[0].data[type] += sectors;
}
void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
@@ -525,7 +518,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
type != BCH_DATA_JOURNAL);
if (likely(c)) {
- percpu_rwsem_assert_held(&c->usage_lock);
+ percpu_rwsem_assert_held(&c->mark_lock);
if (!(flags & BCH_BUCKET_MARK_GC))
__bch2_mark_metadata_bucket(c, ca, b, type, sectors,
@@ -550,36 +543,25 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
}
}
-static int __disk_sectors(struct bch_extent_crc_unpacked crc, unsigned sectors)
-{
- if (!sectors)
- return 0;
-
- return max(1U, DIV_ROUND_UP(sectors * crc.compressed_size,
- crc.uncompressed_size));
-}
-
-static s64 ptr_disk_sectors(const struct bkey *k,
- struct extent_ptr_decoded p,
- s64 sectors)
+static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
+ s64 delta)
{
+ if (delta > 0) {
+ /*
+ * marking a new extent, which _will have size_ @delta
+ *
+ * in the bch2_mark_update -> BCH_EXTENT_OVERLAP_MIDDLE
+ * case, we haven't actually created the key we'll be inserting
+ * yet (for the split) - so we don't want to be using
+ * k->size/crc.live_size here:
+ */
+ return __ptr_disk_sectors(p, delta);
+ } else {
+ BUG_ON(-delta > p.crc.live_size);
- if (p.crc.compression_type) {
- unsigned old_sectors, new_sectors;
-
- if (sectors > 0) {
- old_sectors = 0;
- new_sectors = sectors;
- } else {
- old_sectors = k->size;
- new_sectors = k->size + sectors;
- }
-
- sectors = -__disk_sectors(p.crc, old_sectors)
- +__disk_sectors(p.crc, new_sectors);
+ return (s64) __ptr_disk_sectors(p, p.crc.live_size + delta) -
+ (s64) ptr_disk_sectors(p);
}
-
- return sectors;
}
/*
@@ -591,7 +573,7 @@ static void bch2_mark_pointer(struct bch_fs *c,
struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type,
struct bch_fs_usage *fs_usage,
- u64 journal_seq, unsigned flags,
+ unsigned journal_seq, unsigned flags,
bool gc)
{
struct bucket_mark old, new;
@@ -696,8 +678,8 @@ static int bch2_mark_stripe_ptr(struct bch_fs *c,
static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
s64 sectors, enum bch_data_type data_type,
- struct bch_fs_usage *stats,
- u64 journal_seq, unsigned flags,
+ struct bch_fs_usage *fs_usage,
+ unsigned journal_seq, unsigned flags,
bool gc)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
@@ -714,11 +696,13 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
BUG_ON(!sectors);
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
- s64 disk_sectors = ptr_disk_sectors(k.k, p, sectors);
+ s64 disk_sectors = data_type == BCH_DATA_BTREE
+ ? sectors
+ : ptr_disk_sectors_delta(p, sectors);
s64 adjusted_disk_sectors = disk_sectors;
bch2_mark_pointer(c, p, disk_sectors, data_type,
- stats, journal_seq, flags, gc);
+ fs_usage, journal_seq, flags, gc);
if (!p.ptr.cached)
for (i = 0; i < p.ec_nr; i++) {
@@ -741,13 +725,18 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
}
replicas = clamp_t(unsigned, replicas,
- 1, ARRAY_SIZE(stats->replicas));
+ 1, ARRAY_SIZE(fs_usage->replicas));
ec_redundancy = clamp_t(unsigned, ec_redundancy,
- 1, ARRAY_SIZE(stats->replicas));
+ 1, ARRAY_SIZE(fs_usage->replicas));
+
+ fs_usage->s.cached += cached_sectors;
+ fs_usage->replicas[0].data[BCH_DATA_CACHED] += cached_sectors;
- stats->replicas[0].data[BCH_DATA_CACHED] += cached_sectors;
- stats->replicas[replicas - 1].data[data_type] += dirty_sectors;
- stats->replicas[ec_redundancy - 1].ec_data += ec_sectors;
+ fs_usage->s.data += dirty_sectors;
+ fs_usage->replicas[replicas - 1].data[data_type] += dirty_sectors;
+
+ fs_usage->s.data += ec_sectors;
+ fs_usage->replicas[ec_redundancy - 1].ec_data += ec_sectors;
return 0;
}
@@ -832,8 +821,8 @@ static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
bool inserting, s64 sectors,
- struct bch_fs_usage *stats,
- u64 journal_seq, unsigned flags,
+ struct bch_fs_usage *fs_usage,
+ unsigned journal_seq, unsigned flags,
bool gc)
{
int ret = 0;
@@ -844,24 +833,31 @@ static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
? c->opts.btree_node_size
: -c->opts.btree_node_size,
BCH_DATA_BTREE,
- stats, journal_seq, flags, gc);
+ fs_usage, journal_seq, flags, gc);
break;
case KEY_TYPE_extent:
ret = bch2_mark_extent(c, k, sectors, BCH_DATA_USER,
- stats, journal_seq, flags, gc);
+ fs_usage, journal_seq, flags, gc);
break;
case KEY_TYPE_stripe:
ret = bch2_mark_stripe(c, k, inserting,
- stats, journal_seq, flags, gc);
+ fs_usage, journal_seq, flags, gc);
+ break;
+ case KEY_TYPE_alloc:
+ if (inserting)
+ fs_usage->s.nr_inodes++;
+ else
+ fs_usage->s.nr_inodes--;
break;
case KEY_TYPE_reservation: {
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
sectors *= replicas;
replicas = clamp_t(unsigned, replicas,
- 1, ARRAY_SIZE(stats->replicas));
+ 1, ARRAY_SIZE(fs_usage->replicas));
- stats->replicas[replicas - 1].persistent_reserved += sectors;
+ fs_usage->s.reserved += sectors;
+ fs_usage->replicas[replicas - 1].persistent_reserved += sectors;
break;
}
default:
@@ -875,17 +871,15 @@ int bch2_mark_key_locked(struct bch_fs *c,
struct bkey_s_c k,
bool inserting, s64 sectors,
struct gc_pos pos,
- struct bch_fs_usage *stats,
+ struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
int ret;
if (!(flags & BCH_BUCKET_MARK_GC)) {
- if (!stats)
- stats = this_cpu_ptr(c->usage[0]);
-
ret = __bch2_mark_key(c, k, inserting, sectors,
- stats, journal_seq, flags, false);
+ fs_usage ?: this_cpu_ptr(c->usage[0]),
+ journal_seq, flags, false);
if (ret)
return ret;
}
@@ -905,15 +899,15 @@ int bch2_mark_key_locked(struct bch_fs *c,
int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
bool inserting, s64 sectors,
struct gc_pos pos,
- struct bch_fs_usage *stats,
+ struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
int ret;
- percpu_down_read_preempt_disable(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->mark_lock);
ret = bch2_mark_key_locked(c, k, inserting, sectors,
- pos, stats, journal_seq, flags);
- percpu_up_read_preempt_enable(&c->usage_lock);
+ pos, fs_usage, journal_seq, flags);
+ percpu_up_read_preempt_enable(&c->mark_lock);
return ret;
}
@@ -925,20 +919,20 @@ void bch2_mark_update(struct btree_insert *trans,
struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
- struct bch_fs_usage stats = { 0 };
+ struct bch_fs_usage fs_usage = { 0 };
struct gc_pos pos = gc_pos_btree_node(b);
struct bkey_packed *_k;
if (!btree_node_type_needs_gc(iter->btree_id))
return;
- percpu_down_read_preempt_disable(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->mark_lock);
if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true,
bpos_min(insert->k->k.p, b->key.k.p).offset -
bkey_start_offset(&insert->k->k),
- pos, &stats, trans->journal_res.seq, 0);
+ pos, &fs_usage, trans->journal_res.seq, 0);
while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
KEY_TYPE_discard))) {
@@ -971,7 +965,7 @@ void bch2_mark_update(struct btree_insert *trans,
BUG_ON(sectors <= 0);
bch2_mark_key_locked(c, k, true, sectors,
- pos, &stats, trans->journal_res.seq, 0);
+ pos, &fs_usage, trans->journal_res.seq, 0);
sectors = bkey_start_offset(&insert->k->k) -
k.k->p.offset;
@@ -982,14 +976,14 @@ void bch2_mark_update(struct btree_insert *trans,
}
bch2_mark_key_locked(c, k, false, sectors,
- pos, &stats, trans->journal_res.seq, 0);
+ pos, &fs_usage, trans->journal_res.seq, 0);
bch2_btree_node_iter_advance(&node_iter, b);
}
- bch2_fs_usage_apply(c, &stats, trans->disk_res, pos);
+ bch2_fs_usage_apply(c, &fs_usage, trans->disk_res, pos);
- percpu_up_read_preempt_enable(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->mark_lock);
}
/* Disk reservations: */
@@ -999,19 +993,19 @@ static u64 bch2_recalc_sectors_available(struct bch_fs *c)
int cpu;
for_each_possible_cpu(cpu)
- per_cpu_ptr(c->usage[0], cpu)->available_cache = 0;
+ per_cpu_ptr(c->pcpu, cpu)->sectors_available = 0;
- return avail_factor(bch2_fs_sectors_free(c, bch2_fs_usage_read(c)));
+ return avail_factor(bch2_fs_sectors_free(c));
}
void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
{
- percpu_down_read_preempt_disable(&c->usage_lock);
- this_cpu_sub(c->usage[0]->online_reserved,
+ percpu_down_read_preempt_disable(&c->mark_lock);
+ this_cpu_sub(c->usage[0]->s.online_reserved,
res->sectors);
bch2_fs_stats_verify(c);
- percpu_up_read_preempt_enable(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->mark_lock);
res->sectors = 0;
}
@@ -1021,15 +1015,15 @@ void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
unsigned sectors, int flags)
{
- struct bch_fs_usage *stats;
+ struct bch_fs_pcpu *pcpu;
u64 old, v, get;
s64 sectors_available;
int ret;
- percpu_down_read_preempt_disable(&c->usage_lock);
- stats = this_cpu_ptr(c->usage[0]);
+ percpu_down_read_preempt_disable(&c->mark_lock);
+ pcpu = this_cpu_ptr(c->pcpu);
- if (sectors <= stats->available_cache)
+ if (sectors <= pcpu->sectors_available)
goto out;
v = atomic64_read(&c->sectors_available);
@@ -1038,22 +1032,22 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
get = min((u64) sectors + SECTORS_CACHE, old);
if (get < sectors) {
- percpu_up_read_preempt_enable(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->mark_lock);
goto recalculate;
}
} while ((v = atomic64_cmpxchg(&c->sectors_available,
old, old - get)) != old);
- stats->available_cache += get;
+ pcpu->sectors_available += get;
out:
- stats->available_cache -= sectors;
- stats->online_reserved += sectors;
- res->sectors += sectors;
+ pcpu->sectors_available -= sectors;
+ this_cpu_add(c->usage[0]->s.online_reserved, sectors);
+ res->sectors += sectors;
bch2_disk_reservations_verify(c, flags);
bch2_fs_stats_verify(c);
- percpu_up_read_preempt_enable(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->mark_lock);
return 0;
recalculate:
@@ -1074,15 +1068,15 @@ recalculate:
return -EINTR;
}
- percpu_down_write(&c->usage_lock);
+ percpu_down_write(&c->mark_lock);
sectors_available = bch2_recalc_sectors_available(c);
if (sectors <= sectors_available ||
(flags & BCH_DISK_RESERVATION_NOFAIL)) {
atomic64_set(&c->sectors_available,
max_t(s64, 0, sectors_available - sectors));
- stats->online_reserved += sectors;
- res->sectors += sectors;
+ this_cpu_add(c->usage[0]->s.online_reserved, sectors);
+ res->sectors += sectors;
ret = 0;
bch2_disk_reservations_verify(c, flags);
@@ -1092,7 +1086,7 @@ recalculate:
}
bch2_fs_stats_verify(c);
- percpu_up_write(&c->usage_lock);
+ percpu_up_write(&c->mark_lock);
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
up_read(&c->gc_lock);
@@ -1115,7 +1109,7 @@ static void buckets_free_rcu(struct rcu_head *rcu)
int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
{
struct bucket_array *buckets = NULL, *old_buckets = NULL;
- unsigned long *buckets_dirty = NULL;
+ unsigned long *buckets_nouse = NULL;
unsigned long *buckets_written = NULL;
u8 *oldest_gens = NULL;
alloc_fifo free[RESERVE_NR];
@@ -1145,7 +1139,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
GFP_KERNEL|__GFP_ZERO)) ||
!(oldest_gens = kvpmalloc(nbuckets * sizeof(u8),
GFP_KERNEL|__GFP_ZERO)) ||
- !(buckets_dirty = kvpmalloc(BITS_TO_LONGS(nbuckets) *
+ !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
sizeof(unsigned long),
GFP_KERNEL|__GFP_ZERO)) ||
!(buckets_written = kvpmalloc(BITS_TO_LONGS(nbuckets) *
@@ -1168,7 +1162,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
if (resize) {
down_write(&c->gc_lock);
down_write(&ca->bucket_lock);
- percpu_down_write(&c->usage_lock);
+ percpu_down_write(&c->mark_lock);
}
old_buckets = bucket_array(ca);
@@ -1182,8 +1176,8 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
memcpy(oldest_gens,
ca->oldest_gens,
n * sizeof(u8));
- memcpy(buckets_dirty,
- ca->buckets_dirty,
+ memcpy(buckets_nouse,
+ ca->buckets_nouse,
BITS_TO_LONGS(n) * sizeof(unsigned long));
memcpy(buckets_written,
ca->buckets_written,
@@ -1194,11 +1188,11 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
buckets = old_buckets;
swap(ca->oldest_gens, oldest_gens);
- swap(ca->buckets_dirty, buckets_dirty);
+ swap(ca->buckets_nouse, buckets_nouse);
swap(ca->buckets_written, buckets_written);
if (resize)
- percpu_up_write(&c->usage_lock);
+ percpu_up_write(&c->mark_lock);
spin_lock(&c->freelist_lock);
for (i = 0; i < RESERVE_NR; i++) {
@@ -1233,7 +1227,7 @@ err:
free_fifo(&free_inc);
for (i = 0; i < RESERVE_NR; i++)
free_fifo(&free[i]);
- kvpfree(buckets_dirty,
+ kvpfree(buckets_nouse,
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
kvpfree(buckets_written,
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
@@ -1256,7 +1250,7 @@ void bch2_dev_buckets_free(struct bch_dev *ca)
free_fifo(&ca->free[i]);
kvpfree(ca->buckets_written,
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
- kvpfree(ca->buckets_dirty,
+ kvpfree(ca->buckets_nouse,
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
kvpfree(rcu_dereference_protected(ca->buckets[0], 1),