summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-05-24 19:53:37 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2019-06-10 14:08:07 -0400
commitf3c20f4171cae4eac191c4139c727fbfb6b6dc9b (patch)
tree3d707bc2ddb750f3fdb0b0301f01c32ca87a8e37
parent150819b4129ad774ae8079c7a668115c46943577 (diff)
bcachefs: Don't use percpu_down_read_preempt_disable()
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/alloc_background.c24
-rw-r--r--fs/bcachefs/alloc_foreground.c14
-rw-r--r--fs/bcachefs/btree_gc.c19
-rw-r--r--fs/bcachefs/buckets.c37
-rw-r--r--fs/bcachefs/chardev.c2
-rw-r--r--fs/bcachefs/io.c4
-rw-r--r--fs/bcachefs/journal.c8
-rw-r--r--fs/bcachefs/replicas.c16
-rw-r--r--fs/bcachefs/sysfs.c2
9 files changed, 64 insertions, 62 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index b002f01c8dc2..43dc2f270dc6 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -356,11 +356,11 @@ restart:
old_u = bch2_alloc_unpack(k);
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
g = bucket(ca, b);
m = READ_ONCE(g->mark);
new_u = alloc_mem_to_key(g, m);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
if (!m.dirty)
continue;
@@ -890,7 +890,7 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
b = ca->alloc_heap.data[0].bucket;
/* first, put on free_inc and mark as owned by allocator: */
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
verify_not_on_freelist(c, ca, b);
@@ -900,7 +900,7 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
spin_unlock(&c->freelist_lock);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
@@ -916,11 +916,11 @@ retry:
* we have to trust the in memory bucket @m, not the version in the
* btree:
*/
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
g = bucket(ca, b);
m = READ_ONCE(g->mark);
u = alloc_mem_to_key(g, m);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
invalidating_cached_data = m.cached_sectors != 0;
@@ -981,7 +981,7 @@ retry:
size_t b2;
/* remove from free_inc: */
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
bch2_mark_alloc_bucket(c, ca, b, false,
@@ -991,7 +991,7 @@ retry:
BUG_ON(b != b2);
spin_unlock(&c->freelist_lock);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
}
return ret;
@@ -1002,7 +1002,7 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
{
struct bucket_mark m;
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
bch2_invalidate_bucket(c, ca, bucket, &m);
@@ -1015,7 +1015,7 @@ static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
bucket_io_clock_reset(c, ca, bucket, READ);
bucket_io_clock_reset(c, ca, bucket, WRITE);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
*flush_seq = max(*flush_seq, bucket_journal_seq(c, m));
@@ -1564,10 +1564,10 @@ static bool bch2_fs_allocator_start_fast(struct bch_fs *c)
test_bit(bu, ca->buckets_nouse)))
continue;
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
bch2_mark_alloc_bucket(c, ca, bu, true,
gc_pos_alloc(c, NULL), 0);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
fifo_push(&ca->free_inc, bu);
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 5fc4eac9c563..a065225c5043 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -101,7 +101,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
return;
}
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
spin_lock(&ob->lock);
bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
@@ -110,7 +110,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
ob->type = 0;
spin_unlock(&ob->lock);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
ob->freelist = c->open_buckets_freelist;
@@ -465,7 +465,7 @@ static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
open_bucket_for_each(c, &h->blocks, ob, i)
__clear_bit(ob->ptr.dev, devs.d);
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
rcu_read_lock();
if (h->parity.nr < h->redundancy) {
@@ -501,12 +501,12 @@ static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
}
rcu_read_unlock();
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
return bch2_ec_stripe_new_alloc(c, h);
err:
rcu_read_unlock();
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
return -1;
}
@@ -662,7 +662,7 @@ static int open_bucket_add_buckets(struct bch_fs *c,
if (*nr_effective >= nr_replicas)
return 0;
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
rcu_read_lock();
retry_blocking:
@@ -679,7 +679,7 @@ retry_blocking:
}
rcu_read_unlock();
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
return ret;
}
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 132104058ccb..a458cfe0e92d 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -288,11 +288,11 @@ static int mark_journal_key(struct bch_fs *c, enum btree_id id,
for_each_btree_key(&trans, iter, id, bkey_start_pos(&insert->k),
BTREE_ITER_SLOTS, k, ret) {
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
ret = bch2_mark_overwrite(&trans, iter, k, insert, NULL,
BCH_BUCKET_MARK_GC|
BCH_BUCKET_MARK_NOATOMIC);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
if (!ret)
break;
@@ -368,9 +368,7 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
*/
if (c) {
lockdep_assert_held(&c->sb_lock);
- percpu_down_read_preempt_disable(&c->mark_lock);
- } else {
- preempt_disable();
+ percpu_down_read(&c->mark_lock);
}
for (i = 0; i < layout->nr_superblocks; i++) {
@@ -392,11 +390,8 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
gc_phase(GC_PHASE_SB), flags);
}
- if (c) {
- percpu_up_read_preempt_enable(&c->mark_lock);
- } else {
- preempt_enable();
- }
+ if (c)
+ percpu_up_read(&c->mark_lock);
}
static void bch2_mark_superblocks(struct bch_fs *c)
@@ -436,7 +431,7 @@ static void bch2_mark_allocator_buckets(struct bch_fs *c)
size_t i, j, iter;
unsigned ci;
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
gc_pos_set(c, gc_pos_alloc(c, NULL));
@@ -472,7 +467,7 @@ static void bch2_mark_allocator_buckets(struct bch_fs *c)
spin_unlock(&ob->lock);
}
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
}
static void bch2_gc_free(struct bch_fs *c)
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 6623b4659d8b..794adde61de1 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -228,12 +228,12 @@ retry:
if (unlikely(!ret))
return NULL;
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
v = fs_usage_u64s(c);
if (unlikely(u64s != v)) {
u64s = v;
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
kfree(ret);
goto retry;
}
@@ -351,9 +351,9 @@ bch2_fs_usage_read_short(struct bch_fs *c)
{
struct bch_fs_usage_short ret;
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
ret = __bch2_fs_usage_read_short(c);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
return ret;
}
@@ -450,6 +450,7 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
bch2_data_types[old.data_type],
bch2_data_types[new.data_type]);
+ preempt_disable();
dev_usage = this_cpu_ptr(ca->usage[gc]);
if (bucket_type(old))
@@ -473,6 +474,7 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
(int) new.cached_sectors - (int) old.cached_sectors;
dev_usage->sectors_fragmented +=
is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
+ preempt_enable();
if (!is_available_bucket(old) && is_available_bucket(new))
bch2_wake_allocator(ca);
@@ -496,11 +498,9 @@ void bch2_dev_usage_from_buckets(struct bch_fs *c)
buckets = bucket_array(ca);
- preempt_disable();
for_each_bucket(g, buckets)
bch2_dev_usage_update(c, ca, c->usage_base,
old, g->mark, false);
- preempt_enable();
}
}
@@ -682,8 +682,12 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator,
struct gc_pos pos, unsigned flags)
{
+ preempt_disable();
+
do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
ca, b, owned_by_allocator);
+
+ preempt_enable();
}
static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
@@ -793,12 +797,16 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
BUG_ON(type != BCH_DATA_SB &&
type != BCH_DATA_JOURNAL);
+ preempt_disable();
+
if (likely(c)) {
do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
ca, b, type, sectors);
} else {
__bch2_mark_metadata_bucket(c, ca, b, type, sectors, 0);
}
+
+ preempt_enable();
}
static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
@@ -1149,10 +1157,10 @@ int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
{
int ret;
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
ret = bch2_mark_key_locked(c, k, sectors,
fs_usage, journal_seq, flags);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
return ret;
}
@@ -1665,10 +1673,10 @@ static u64 bch2_recalc_sectors_available(struct bch_fs *c)
void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
{
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
this_cpu_sub(c->usage[0]->online_reserved,
res->sectors);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
res->sectors = 0;
}
@@ -1683,7 +1691,8 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
s64 sectors_available;
int ret;
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
+ preempt_disable();
pcpu = this_cpu_ptr(c->pcpu);
if (sectors <= pcpu->sectors_available)
@@ -1695,7 +1704,8 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
get = min((u64) sectors + SECTORS_CACHE, old);
if (get < sectors) {
- percpu_up_read_preempt_enable(&c->mark_lock);
+ preempt_enable();
+ percpu_up_read(&c->mark_lock);
goto recalculate;
}
} while ((v = atomic64_cmpxchg(&c->sectors_available,
@@ -1708,7 +1718,8 @@ out:
this_cpu_add(c->usage[0]->online_reserved, sectors);
res->sectors += sectors;
- percpu_up_read_preempt_enable(&c->mark_lock);
+ preempt_enable();
+ percpu_up_read(&c->mark_lock);
return 0;
recalculate:
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 0014ff3e9e6d..059eca01ccc4 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -406,7 +406,7 @@ static long bch2_ioctl_usage(struct bch_fs *c,
dst.used = bch2_fs_sectors_used(c, src);
dst.online_reserved = src->online_reserved;
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
dst.persistent_reserved[i] =
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index be9f2387d705..ff15e98323b9 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -1768,9 +1768,9 @@ noclone:
bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
bucket_io_clock_reset(c, ca, PTR_BUCKET_NR(ca, &pick.ptr), READ);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
if (likely(!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT)))) {
bio_inc_remaining(&orig->bio);
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index a348e360c4fd..5c3e146e3942 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -821,10 +821,8 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
}
if (c) {
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
spin_lock(&c->journal.lock);
- } else {
- preempt_disable();
}
pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
@@ -853,9 +851,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
if (c) {
spin_unlock(&c->journal.lock);
- percpu_up_read_preempt_enable(&c->mark_lock);
- } else {
- preempt_enable();
+ percpu_up_read(&c->mark_lock);
}
if (!new_fs)
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 46ad6f81489d..4818453c015a 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -229,9 +229,9 @@ bool bch2_replicas_marked(struct bch_fs *c,
{
bool marked;
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
marked = bch2_replicas_marked_locked(c, search, check_gc_replicas);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
return marked;
}
@@ -447,9 +447,9 @@ bool bch2_bkey_replicas_marked(struct bch_fs *c,
{
bool marked;
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
marked = bch2_bkey_replicas_marked_locked(c, k, check_gc_replicas);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
return marked;
}
@@ -972,7 +972,7 @@ struct replicas_status __bch2_replicas_status(struct bch_fs *c,
mi = bch2_sb_get_members(c->disk_sb.sb);
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
for_each_cpu_replicas_entry(&c->replicas, e) {
if (e->data_type >= ARRAY_SIZE(ret.replicas))
@@ -999,7 +999,7 @@ struct replicas_status __bch2_replicas_status(struct bch_fs *c,
nr_offline);
}
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
for (i = 0; i < ARRAY_SIZE(ret.replicas); i++)
if (ret.replicas[i].redundancy == INT_MAX)
@@ -1050,14 +1050,14 @@ unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
struct bch_replicas_entry *e;
unsigned i, ret = 0;
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
for_each_cpu_replicas_entry(&c->replicas, e)
for (i = 0; i < e->nr_devs; i++)
if (e->devs[i] == ca->dev_idx)
ret |= 1 << e->data_type;
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
return ret;
}
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 94105d140b95..27646c435e30 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -242,7 +242,7 @@ static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
bch2_fs_usage_to_text(&out, c, fs_usage);
- percpu_up_read_preempt_enable(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
kfree(fs_usage);