summaryrefslogtreecommitdiff
path: root/libbcachefs/alloc_background.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/alloc_background.c')
-rw-r--r--libbcachefs/alloc_background.c52
1 files changed, 14 insertions, 38 deletions
diff --git a/libbcachefs/alloc_background.c b/libbcachefs/alloc_background.c
index c3efb435..9ff61deb 100644
--- a/libbcachefs/alloc_background.c
+++ b/libbcachefs/alloc_background.c
@@ -373,6 +373,11 @@ static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
}
}
+static inline u64 bucket_clock_freq(u64 capacity)
+{
+ return max(capacity >> 10, 2028ULL);
+}
+
static void bch2_inc_clock_hand(struct io_timer *timer)
{
struct bucket_clock *clock = container_of(timer,
@@ -411,7 +416,7 @@ static void bch2_inc_clock_hand(struct io_timer *timer)
* RW mode (that will be 0 when we're RO, yet we can still service
* reads)
*/
- timer->expire += capacity >> 10;
+ timer->expire += bucket_clock_freq(capacity);
bch2_io_timer_add(&c->io_clock[clock->rw], timer);
}
@@ -423,7 +428,7 @@ static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
clock->hand = 1;
clock->rw = rw;
clock->rescale.fn = bch2_inc_clock_hand;
- clock->rescale.expire = c->capacity >> 10;
+ clock->rescale.expire = bucket_clock_freq(c->capacity);
mutex_init(&clock->lock);
}
@@ -974,6 +979,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
{
struct bch_dev *ca;
u64 capacity = 0, reserved_sectors = 0, gc_reserve;
+ unsigned bucket_size_max = 0;
unsigned long ra_pages = 0;
unsigned i, j;
@@ -1009,14 +1015,9 @@ void bch2_recalc_capacity(struct bch_fs *c)
for (j = 0; j < RESERVE_NONE; j++)
dev_reserve += ca->free[j].size;
- dev_reserve += ca->free_inc.size;
-
- dev_reserve += ARRAY_SIZE(c->write_points);
-
dev_reserve += 1; /* btree write point */
dev_reserve += 1; /* copygc write point */
dev_reserve += 1; /* rebalance write point */
- dev_reserve += WRITE_POINT_COUNT;
dev_reserve *= ca->mi.bucket_size;
@@ -1026,6 +1027,9 @@ void bch2_recalc_capacity(struct bch_fs *c)
ca->mi.first_bucket);
reserved_sectors += dev_reserve * 2;
+
+ bucket_size_max = max_t(unsigned, bucket_size_max,
+ ca->mi.bucket_size);
}
gc_reserve = c->opts.gc_reserve_bytes
@@ -1038,6 +1042,8 @@ void bch2_recalc_capacity(struct bch_fs *c)
c->capacity = capacity - reserved_sectors;
+ c->bucket_size_max = bucket_size_max;
+
if (c->capacity) {
bch2_io_timer_add(&c->io_clock[READ],
&c->bucket_clock[READ].rescale);
@@ -1329,8 +1335,6 @@ not_enough:
* invalidated on disk:
*/
if (invalidating_data) {
- BUG();
- pr_info("holding writes");
pr_debug("invalidating existing data");
set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
} else {
@@ -1390,40 +1394,12 @@ int bch2_fs_allocator_start(struct bch_fs *c)
return bch2_alloc_write(c);
}
-void bch2_fs_allocator_init(struct bch_fs *c)
+void bch2_fs_allocator_background_init(struct bch_fs *c)
{
- struct open_bucket *ob;
- struct write_point *wp;
-
- mutex_init(&c->write_points_hash_lock);
spin_lock_init(&c->freelist_lock);
bch2_bucket_clock_init(c, READ);
bch2_bucket_clock_init(c, WRITE);
- /* open bucket 0 is a sentinal NULL: */
- spin_lock_init(&c->open_buckets[0].lock);
-
- for (ob = c->open_buckets + 1;
- ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
- spin_lock_init(&ob->lock);
- c->open_buckets_nr_free++;
-
- ob->freelist = c->open_buckets_freelist;
- c->open_buckets_freelist = ob - c->open_buckets;
- }
-
- writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
- writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
-
- for (wp = c->write_points;
- wp < c->write_points + ARRAY_SIZE(c->write_points); wp++) {
- writepoint_init(wp, BCH_DATA_USER);
-
- wp->last_used = sched_clock();
- wp->write_point = (unsigned long) wp;
- hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
- }
-
c->pd_controllers_update_seconds = 5;
INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);
}