summaryrefslogtreecommitdiff
path: root/libbcachefs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2022-04-04 13:48:45 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2022-04-04 13:48:45 -0400
commit498874fdb71973c1856f35414bd607e58be16790 (patch)
tree9befebc09f4ebd856788d5fd5ac4ffecbd7698e9 /libbcachefs
parent32aabbc4e547592f957de7d4b093986e55981085 (diff)
Update bcachefs sources to 91e6c3e0d5 bcachefs: Gap buffer for journal keys
Diffstat (limited to 'libbcachefs')
-rw-r--r--libbcachefs/bcachefs.h6
-rw-r--r--libbcachefs/btree_cache.c13
-rw-r--r--libbcachefs/buckets.c90
-rw-r--r--libbcachefs/buckets.h6
-rw-r--r--libbcachefs/io.c26
-rw-r--r--libbcachefs/journal.c11
-rw-r--r--libbcachefs/journal_sb.c3
-rw-r--r--libbcachefs/recovery.c126
-rw-r--r--libbcachefs/recovery.h3
-rw-r--r--libbcachefs/super-io.c11
-rw-r--r--libbcachefs/util.h25
11 files changed, 213 insertions, 107 deletions
diff --git a/libbcachefs/bcachefs.h b/libbcachefs/bcachefs.h
index a13845a2..ab6df637 100644
--- a/libbcachefs/bcachefs.h
+++ b/libbcachefs/bcachefs.h
@@ -548,6 +548,12 @@ struct journal_keys {
u32 journal_seq;
u32 journal_offset;
} *d;
+ /*
+ * Gap buffer: instead of all the empty space in the array being at the
+ * end of the buffer - from @nr to @size - the empty space is at @gap.
+ * This means that sequential insertions are O(n) instead of O(n^2).
+ */
+ size_t gap;
size_t nr;
size_t size;
u64 journal_seq_base;
diff --git a/libbcachefs/btree_cache.c b/libbcachefs/btree_cache.c
index 0dcdc30c..8e04129a 100644
--- a/libbcachefs/btree_cache.c
+++ b/libbcachefs/btree_cache.c
@@ -281,7 +281,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
struct btree_cache *bc = &c->btree_cache;
struct btree *b, *t;
unsigned long nr = sc->nr_to_scan;
- unsigned long can_free;
+ unsigned long can_free = 0;
unsigned long touched = 0;
unsigned long freed = 0;
unsigned i, flags;
@@ -305,7 +305,6 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
* succeed, so that inserting keys into the btree can always succeed and
* IO can always make forward progress:
*/
- nr /= btree_pages(c);
can_free = btree_cache_can_free(bc);
nr = min_t(unsigned long, nr, can_free);
@@ -375,13 +374,10 @@ touched:
mutex_unlock(&bc->lock);
out:
- ret = (unsigned long) freed * btree_pages(c);
+ ret = freed;
memalloc_nofs_restore(flags);
out_norestore:
- trace_btree_cache_scan(sc->nr_to_scan,
- sc->nr_to_scan / btree_pages(c),
- btree_cache_can_free(bc),
- ret);
+ trace_btree_cache_scan(sc->nr_to_scan, can_free, ret);
return ret;
}
@@ -395,7 +391,7 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
if (bch2_btree_shrinker_disabled)
return 0;
- return btree_cache_can_free(bc) * btree_pages(c);
+ return btree_cache_can_free(bc);
}
void bch2_fs_btree_cache_exit(struct bch_fs *c)
@@ -482,7 +478,6 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
bc->shrink.count_objects = bch2_btree_cache_count;
bc->shrink.scan_objects = bch2_btree_cache_scan;
bc->shrink.seeks = 4;
- bc->shrink.batch = btree_pages(c) * 2;
ret = register_shrinker(&bc->shrink);
out:
pr_verbose_init(c->opts, "ret %i", ret);
diff --git a/libbcachefs/buckets.c b/libbcachefs/buckets.c
index 7654ab24..51ed9609 100644
--- a/libbcachefs/buckets.c
+++ b/libbcachefs/buckets.c
@@ -620,13 +620,13 @@ int bch2_mark_alloc(struct btree_trans *trans,
return 0;
}
-void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, enum bch_data_type data_type,
- unsigned sectors, struct gc_pos pos,
- unsigned flags)
+int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, enum bch_data_type data_type,
+ unsigned sectors, struct gc_pos pos,
+ unsigned flags)
{
struct bucket old, new, *g;
- bool overflow;
+ int ret = 0;
BUG_ON(!(flags & BTREE_TRIGGER_GC));
BUG_ON(data_type != BCH_DATA_sb &&
@@ -636,7 +636,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
* Backup superblock might be past the end of our normal usable space:
*/
if (b >= ca->mi.nbuckets)
- return;
+ return 0;
percpu_down_read(&c->mark_lock);
g = gc_bucket(ca, b);
@@ -644,37 +644,43 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
bucket_lock(g);
old = *g;
+ if (bch2_fs_inconsistent_on(g->data_type &&
+ g->data_type != data_type, c,
+ "different types of data in same bucket: %s, %s",
+ bch2_data_types[g->data_type],
+ bch2_data_types[data_type])) {
+ ret = -EIO;
+ goto err;
+ }
+
+ if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
+ "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
+ ca->dev_idx, b, g->gen,
+ bch2_data_types[g->data_type ?: data_type],
+ g->dirty_sectors, sectors)) {
+ ret = -EIO;
+ goto err;
+ }
+
+
g->data_type = data_type;
g->dirty_sectors += sectors;
- overflow = g->dirty_sectors < sectors;
-
new = *g;
+err:
bucket_unlock(g);
-
- bch2_fs_inconsistent_on(old.data_type &&
- old.data_type != data_type, c,
- "different types of data in same bucket: %s, %s",
- bch2_data_types[old.data_type],
- bch2_data_types[data_type]);
-
- bch2_fs_inconsistent_on(overflow, c,
- "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
- ca->dev_idx, b, new.gen,
- bch2_data_types[old.data_type ?: data_type],
- old.dirty_sectors, sectors);
-
- bch2_dev_usage_update_m(c, ca, old, new, 0, true);
+ if (!ret)
+ bch2_dev_usage_update_m(c, ca, old, new, 0, true);
percpu_up_read(&c->mark_lock);
+ return ret;
}
static s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
{
EBUG_ON(sectors < 0);
- return p.crc.compression_type &&
- p.crc.compression_type != BCH_COMPRESSION_TYPE_incompressible
+ return crc_is_compressed(p.crc)
? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
- p.crc.uncompressed_size)
+ p.crc.uncompressed_size)
: sectors;
}
@@ -808,25 +814,22 @@ static int mark_stripe_bucket(struct btree_trans *trans,
old = *g;
ret = check_bucket_ref(c, k, ptr, sectors, data_type,
- new.gen, new.data_type,
- new.dirty_sectors, new.cached_sectors);
- if (ret) {
- bucket_unlock(g);
+ g->gen, g->data_type,
+ g->dirty_sectors, g->cached_sectors);
+ if (ret)
goto err;
- }
- new.dirty_sectors += sectors;
if (data_type)
- new.data_type = data_type;
+ g->data_type = data_type;
+ g->dirty_sectors += sectors;
g->stripe = k.k->p.offset;
g->stripe_redundancy = s->nr_redundant;
-
new = *g;
- bucket_unlock(g);
-
- bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
err:
+ bucket_unlock(g);
+ if (!ret)
+ bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
percpu_up_read(&c->mark_lock);
printbuf_exit(&buf);
return ret;
@@ -872,29 +875,22 @@ static int bch2_mark_pointer(struct btree_trans *trans,
percpu_down_read(&c->mark_lock);
g = PTR_GC_BUCKET(ca, &p.ptr);
-
bucket_lock(g);
old = *g;
bucket_data_type = g->data_type;
-
ret = __mark_pointer(trans, k, &p.ptr, sectors,
data_type, g->gen,
&bucket_data_type,
&g->dirty_sectors,
&g->cached_sectors);
- if (ret) {
- bucket_unlock(g);
- goto err;
- }
-
- g->data_type = bucket_data_type;
+ if (!ret)
+ g->data_type = bucket_data_type;
new = *g;
bucket_unlock(g);
-
- bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
-err:
+ if (!ret)
+ bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
percpu_up_read(&c->mark_lock);
return ret;
diff --git a/libbcachefs/buckets.h b/libbcachefs/buckets.h
index 853bc9dd..656a04b5 100644
--- a/libbcachefs/buckets.h
+++ b/libbcachefs/buckets.h
@@ -194,9 +194,9 @@ bch2_fs_usage_read_short(struct bch_fs *);
void bch2_fs_usage_initialize(struct bch_fs *);
-void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
- size_t, enum bch_data_type, unsigned,
- struct gc_pos, unsigned);
+int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
+ size_t, enum bch_data_type, unsigned,
+ struct gc_pos, unsigned);
int bch2_mark_alloc(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
int bch2_mark_extent(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
diff --git a/libbcachefs/io.c b/libbcachefs/io.c
index 36929451..223344e1 100644
--- a/libbcachefs/io.c
+++ b/libbcachefs/io.c
@@ -1981,22 +1981,28 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
struct printbuf buf = PRINTBUF;
int ret;
- bch2_bkey_val_to_text(&buf, c, k);
- bch2_fs_inconsistent(c, "Attempting to read from stale dirty pointer: %s", buf.buf);
-
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
- POS(ptr.dev, PTR_BUCKET_NR(ca, &ptr)),
+ PTR_BUCKET_POS(c, &ptr),
BTREE_ITER_CACHED);
- ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
- if (ret)
- goto out;
+ pr_buf(&buf, "Attempting to read from stale dirty pointer:");
+ pr_indent_push(&buf, 2);
+ pr_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k);
- bch_err(c, "%s", buf.buf);
- bch_err(c, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
+ pr_newline(&buf);
+
+ pr_buf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
+
+ ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
+ if (!ret) {
+ pr_newline(&buf);
+ bch2_bkey_val_to_text(&buf, c, k);
+ }
+
+ bch2_fs_inconsistent(c, "%s", buf.buf);
+
bch2_trans_iter_exit(trans, &iter);
-out:
printbuf_exit(&buf);
}
diff --git a/libbcachefs/journal.c b/libbcachefs/journal.c
index 505e8367..d01b1cd4 100644
--- a/libbcachefs/journal.c
+++ b/libbcachefs/journal.c
@@ -964,6 +964,7 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
int bch2_dev_journal_alloc(struct bch_dev *ca)
{
unsigned nr;
+ int ret;
if (dynamic_fault("bcachefs:add:journal_alloc"))
return -ENOMEM;
@@ -980,7 +981,15 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
min(1 << 13,
(1 << 24) / ca->mi.bucket_size));
- return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
+ if (ca->fs)
+ mutex_lock(&ca->fs->sb_lock);
+
+ ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
+
+ if (ca->fs)
+ mutex_unlock(&ca->fs->sb_lock);
+
+ return ret;
}
/* startup/shutdown: */
diff --git a/libbcachefs/journal_sb.c b/libbcachefs/journal_sb.c
index 8efe7b7e..506044e3 100644
--- a/libbcachefs/journal_sb.c
+++ b/libbcachefs/journal_sb.c
@@ -186,7 +186,8 @@ int bch2_journal_buckets_to_sb(struct bch_fs *c, struct bch_dev *ca)
struct bch_sb_field_journal_v2 *j;
unsigned i, dst = 0, nr = 1;
- lockdep_assert_held(&c->sb_lock);
+ if (c)
+ lockdep_assert_held(&c->sb_lock);
if (!ja->nr) {
bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);
diff --git a/libbcachefs/recovery.c b/libbcachefs/recovery.c
index ca92fe84..6a92c1a0 100644
--- a/libbcachefs/recovery.c
+++ b/libbcachefs/recovery.c
@@ -72,58 +72,97 @@ static int journal_key_cmp(const struct journal_key *l, const struct journal_key
return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
}
-size_t bch2_journal_key_search(struct journal_keys *journal_keys,
+static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
+{
+ size_t gap_size = keys->size - keys->nr;
+
+ if (idx >= keys->gap)
+ idx += gap_size;
+ return idx;
+}
+
+static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
+{
+ return keys->d + idx_to_pos(keys, idx);
+}
+
+size_t bch2_journal_key_search(struct journal_keys *keys,
enum btree_id id, unsigned level,
struct bpos pos)
{
- size_t l = 0, r = journal_keys->nr, m;
+ size_t l = 0, r = keys->nr, m;
while (l < r) {
m = l + ((r - l) >> 1);
- if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
+ if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
l = m + 1;
else
r = m;
}
- BUG_ON(l < journal_keys->nr &&
- __journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
+ BUG_ON(l < keys->nr &&
+ __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
BUG_ON(l &&
- __journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
+ __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
- return l;
+ return idx_to_pos(keys, l);
}
struct bkey_i *bch2_journal_keys_peek(struct bch_fs *c, enum btree_id btree_id,
unsigned level, struct bpos pos)
{
struct journal_keys *keys = &c->journal_keys;
- struct journal_key *end = keys->d + keys->nr;
- struct journal_key *k = keys->d +
- bch2_journal_key_search(keys, btree_id, level, pos);
+ size_t idx = bch2_journal_key_search(keys, btree_id, level, pos);
- while (k < end && k->overwritten)
- k++;
+ while (idx < keys->size &&
+ keys->d[idx].overwritten) {
+ idx++;
+ if (idx == keys->gap)
+ idx += keys->size - keys->nr;
+ }
- if (k < end &&
- k->btree_id == btree_id &&
- k->level == level)
- return k->k;
+ if (idx < keys->size &&
+ keys->d[idx].btree_id == btree_id &&
+ keys->d[idx].level == level)
+ return keys->d[idx].k;
return NULL;
}
-static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
+static void journal_iters_fix(struct bch_fs *c)
{
- struct bkey_i *n = iter->keys->d[idx].k;
- struct btree_and_journal_iter *biter =
- container_of(iter, struct btree_and_journal_iter, journal);
-
- if (iter->idx > idx ||
- (iter->idx == idx &&
- biter->last &&
- bpos_cmp(n->k.p, biter->unpacked.p) <= 0))
- iter->idx++;
+ struct journal_keys *keys = &c->journal_keys;
+ /* The key we just inserted is immediately before the gap: */
+ struct journal_key *n = &keys->d[keys->gap - 1];
+ size_t gap_end = keys->gap + (keys->size - keys->nr);
+ struct btree_and_journal_iter *iter;
+
+ /*
+ * If an iterator points one after the key we just inserted,
+ * and the key we just inserted compares >= the iterator's position,
+ * decrement the iterator so it points at the key we just inserted:
+ */
+ list_for_each_entry(iter, &c->journal_iters, journal.list)
+ if (iter->journal.idx == gap_end &&
+ iter->last &&
+ iter->b->c.btree_id == n->btree_id &&
+ iter->b->c.level == n->level &&
+ bpos_cmp(n->k->k.p, iter->unpacked.p) >= 0)
+ iter->journal.idx = keys->gap - 1;
+}
+
+static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
+{
+ struct journal_keys *keys = &c->journal_keys;
+ struct journal_iter *iter;
+ size_t gap_size = keys->size - keys->nr;
+
+ list_for_each_entry(iter, &c->journal_iters, list) {
+ if (iter->idx > old_gap)
+ iter->idx -= gap_size;
+ if (iter->idx >= new_gap)
+ iter->idx += gap_size;
+ }
}
int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
@@ -141,12 +180,11 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
.journal_seq = U32_MAX,
};
struct journal_keys *keys = &c->journal_keys;
- struct journal_iter *iter;
size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
BUG_ON(test_bit(BCH_FS_RW, &c->flags));
- if (idx < keys->nr &&
+ if (idx < keys->size &&
journal_key_cmp(&n, &keys->d[idx]) == 0) {
if (keys->d[idx].allocated)
kfree(keys->d[idx].k);
@@ -154,6 +192,9 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
return 0;
}
+ if (idx > keys->gap)
+ idx -= keys->size - keys->nr;
+
if (keys->nr == keys->size) {
struct journal_keys new_keys = {
.nr = keys->nr,
@@ -168,15 +209,24 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
return -ENOMEM;
}
+ /* Since @keys was full, there was no gap: */
memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
kvfree(keys->d);
*keys = new_keys;
+
+ /* And now the gap is at the end: */
+ keys->gap = keys->nr;
}
- array_insert_item(keys->d, keys->nr, idx, n);
+ journal_iters_move_gap(c, keys->gap, idx);
+
+ move_gap(keys->d, keys->nr, keys->size, keys->gap, idx);
+ keys->gap = idx;
+
+ keys->nr++;
+ keys->d[keys->gap++] = n;
- list_for_each_entry(iter, &c->journal_iters, list)
- journal_iter_fix(c, iter, idx);
+ journal_iters_fix(c);
return 0;
}
@@ -220,7 +270,7 @@ void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
struct journal_keys *keys = &c->journal_keys;
size_t idx = bch2_journal_key_search(keys, btree, level, pos);
- if (idx < keys->nr &&
+ if (idx < keys->size &&
keys->d[idx].btree_id == btree &&
keys->d[idx].level == level &&
!bpos_cmp(keys->d[idx].k->k.p, pos))
@@ -246,8 +296,11 @@ static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
static void bch2_journal_iter_advance(struct journal_iter *iter)
{
- if (iter->idx < iter->keys->nr)
+ if (iter->idx < iter->keys->size) {
iter->idx++;
+ if (iter->idx == iter->keys->gap)
+ iter->idx += iter->keys->size - iter->keys->nr;
+ }
}
static void bch2_journal_iter_exit(struct journal_iter *iter)
@@ -409,6 +462,9 @@ void bch2_journal_keys_free(struct journal_keys *keys)
{
struct journal_key *i;
+ move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
+ keys->gap = keys->nr;
+
for (i = keys->d; i < keys->d + keys->nr; i++)
if (i->allocated)
kfree(i->k);
@@ -478,6 +534,7 @@ static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
}
keys.nr = dst - keys.d;
+ keys.gap = keys.nr;
err:
return keys;
}
@@ -538,6 +595,9 @@ static int bch2_journal_replay(struct bch_fs *c)
size_t i;
int ret;
+ move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
+ keys->gap = keys->nr;
+
keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
if (!keys_sorted)
return -ENOMEM;
diff --git a/libbcachefs/recovery.h b/libbcachefs/recovery.h
index e6927a91..30580a89 100644
--- a/libbcachefs/recovery.h
+++ b/libbcachefs/recovery.h
@@ -2,9 +2,6 @@
#ifndef _BCACHEFS_RECOVERY_H
#define _BCACHEFS_RECOVERY_H
-#define for_each_journal_key(keys, i) \
- for (i = (keys).d; i < (keys).d + (keys).nr; (i)++)
-
struct journal_iter {
struct list_head list;
enum btree_id btree_id;
diff --git a/libbcachefs/super-io.c b/libbcachefs/super-io.c
index 71abf871..15241a56 100644
--- a/libbcachefs/super-io.c
+++ b/libbcachefs/super-io.c
@@ -21,6 +21,8 @@
#include <linux/backing-dev.h>
#include <linux/sort.h>
+#include <trace/events/bcachefs.h>
+
const char * const bch2_sb_fields[] = {
#define x(name, nr) #name,
BCH_SB_FIELDS()
@@ -797,6 +799,8 @@ int bch2_write_super(struct bch_fs *c)
unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
int ret = 0;
+ trace_write_super(c, _RET_IP_);
+
if (c->opts.very_degraded)
degraded_flags |= BCH_FORCE_IF_LOST;
@@ -831,6 +835,13 @@ int bch2_write_super(struct bch_fs *c)
if (c->opts.nochanges)
goto out;
+ /*
+ * Defer writing the superblock until filesystem initialization is
+ * complete - don't write out a partly initialized superblock:
+ */
+ if (!BCH_SB_INITIALIZED(c->disk_sb.sb))
+ goto out;
+
for_each_online_member(ca, c, i) {
__set_bit(ca->dev_idx, sb_written.d);
ca->sb_write_error = 0;
diff --git a/libbcachefs/util.h b/libbcachefs/util.h
index 086d941f..98f70a5c 100644
--- a/libbcachefs/util.h
+++ b/libbcachefs/util.h
@@ -806,6 +806,31 @@ do { \
#define array_remove_item(_array, _nr, _pos) \
array_remove_items(_array, _nr, _pos, 1)
+static inline void __move_gap(void *array, size_t element_size,
+ size_t nr, size_t size,
+ size_t old_gap, size_t new_gap)
+{
+ size_t gap_end = old_gap + size - nr;
+
+ if (new_gap < old_gap) {
+ size_t move = old_gap - new_gap;
+
+ memmove(array + element_size * (gap_end - move),
+ array + element_size * (old_gap - move),
+ element_size * move);
+ } else if (new_gap > old_gap) {
+ size_t move = new_gap - old_gap;
+
+ memmove(array + element_size * old_gap,
+ array + element_size * gap_end,
+ element_size * move);
+ }
+}
+
+/* Move the gap in a gap buffer: */
+#define move_gap(_array, _nr, _size, _old_gap, _new_gap) \
+ __move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap)
+
#define bubble_sort(_base, _nr, _cmp) \
do { \
ssize_t _i, _end; \