summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2018-07-22 06:10:52 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2018-07-23 05:14:52 -0400
commita58f89546866f51c5cec31b13428694329cc0a17 (patch)
treee87ecbe9d4b424af533d79876a7452e8df0ca35e
parent3269709fc678104396f2c565d3935033d800c2bf (diff)
bcachefs: kill bucket mark sector count saturation
-rw-r--r--fs/bcachefs/bcachefs.h1
-rw-r--r--fs/bcachefs/btree_gc.c3
-rw-r--r--fs/bcachefs/buckets.c45
-rw-r--r--fs/bcachefs/buckets.h5
-rw-r--r--include/trace/events/bcachefs.h25
5 files changed, 10 insertions, 69 deletions
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index a49143787b32..3dbce218205a 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -410,7 +410,6 @@ struct bch_dev {
/* last calculated minimum prio */
u16 max_last_bucket_io[2];
- atomic_long_t saturated_count;
size_t inc_gen_needs_gc;
size_t inc_gen_really_needs_gc;
u64 allocator_journal_seq_flush;
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 96c5e2592d09..da4228439836 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -567,9 +567,6 @@ void bch2_gc(struct bch_fs *c)
bch2_mark_pending_btree_node_frees(c);
bch2_mark_allocator_buckets(c);
- for_each_member_device(ca, c, i)
- atomic_long_set(&ca->saturated_count, 0);
-
/* Indicates that gc is no longer in progress: */
gc_pos_set(c, gc_phase(GC_PHASE_DONE));
c->gc_count++;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index e2843ed9a1cd..80920949d499 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -452,17 +452,11 @@ void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
c->gc_pos.phase == GC_PHASE_DONE);
}
-#define saturated_add(ca, dst, src, max) \
+#define checked_add(a, b) \
do { \
- BUG_ON((int) (dst) + (src) < 0); \
- if ((dst) == (max)) \
- ; \
- else if ((dst) + (src) <= (max)) \
- dst += (src); \
- else { \
- dst = (max); \
- trace_sectors_saturated(ca); \
- } \
+ unsigned _res = (unsigned) (a) + (b); \
+ (a) = _res; \
+ BUG_ON((a) != _res); \
} while (0)
void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
@@ -487,9 +481,9 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
g = bucket(ca, b);
old = bucket_data_cmpxchg(c, ca, g, new, ({
- saturated_add(ca, new.dirty_sectors, sectors,
- GC_MAX_SECTORS_USED);
- new.data_type = type;
+ new.data_type = type;
+ checked_add(new.dirty_sectors, sectors);
+ new.dirty_sectors += sectors;
}));
rcu_read_unlock();
@@ -523,7 +517,6 @@ static void bch2_mark_pointer(struct bch_fs *c,
u64 journal_seq, unsigned flags)
{
struct bucket_mark old, new;
- unsigned saturated;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g = PTR_BUCKET(ca, ptr);
enum bch_data_type data_type = type == S_META
@@ -558,7 +551,6 @@ static void bch2_mark_pointer(struct bch_fs *c,
v = atomic64_read(&g->_mark.v);
do {
new.v.counter = old.v.counter = v;
- saturated = 0;
/*
* Check this after reading bucket mark to guard against
@@ -572,17 +564,10 @@ static void bch2_mark_pointer(struct bch_fs *c,
return;
}
- if (!ptr->cached &&
- new.dirty_sectors == GC_MAX_SECTORS_USED &&
- sectors < 0)
- saturated = -sectors;
-
- if (ptr->cached)
- saturated_add(ca, new.cached_sectors, sectors,
- GC_MAX_SECTORS_USED);
+ if (!ptr->cached)
+ checked_add(new.dirty_sectors, sectors);
else
- saturated_add(ca, new.dirty_sectors, sectors,
- GC_MAX_SECTORS_USED);
+ checked_add(new.cached_sectors, sectors);
if (!new.dirty_sectors &&
!new.cached_sectors) {
@@ -608,16 +593,6 @@ static void bch2_mark_pointer(struct bch_fs *c,
BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) &&
bucket_became_unavailable(c, old, new));
-
- if (saturated &&
- atomic_long_add_return(saturated,
- &ca->saturated_count) >=
- bucket_to_sector(ca, ca->free_inc.size)) {
- if (c->gc_thread) {
- trace_gc_sectors_saturated(c);
- wake_up_process(c->gc_thread);
- }
- }
}
void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 4deb6c37391c..bc0bf849cb57 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -114,11 +114,6 @@ static inline u8 ptr_stale(struct bch_dev *ca,
/* bucket gc marks */
-/* The dirty and cached sector counts saturate. If this occurs,
- * reference counting alone will not free the bucket, and a btree
- * GC must be performed. */
-#define GC_MAX_SECTORS_USED ((1U << 15) - 1)
-
static inline unsigned bucket_sectors_used(struct bucket_mark mark)
{
return mark.dirty_sectors + mark.cached_sectors;
diff --git a/include/trace/events/bcachefs.h b/include/trace/events/bcachefs.h
index 13264b82ed77..2d1b62c09ad1 100644
--- a/include/trace/events/bcachefs.h
+++ b/include/trace/events/bcachefs.h
@@ -43,21 +43,6 @@ DECLARE_EVENT_CLASS(bkey,
__entry->offset, __entry->size)
);
-DECLARE_EVENT_CLASS(bch_dev,
- TP_PROTO(struct bch_dev *ca),
- TP_ARGS(ca),
-
- TP_STRUCT__entry(
- __array(char, uuid, 16 )
- ),
-
- TP_fast_assign(
- memcpy(__entry->uuid, ca->uuid.b, 16);
- ),
-
- TP_printk("%pU", __entry->uuid)
-);
-
DECLARE_EVENT_CLASS(bch_fs,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c),
@@ -360,16 +345,6 @@ DEFINE_EVENT(bch_fs, gc_coalesce_end,
TP_ARGS(c)
);
-DEFINE_EVENT(bch_dev, sectors_saturated,
- TP_PROTO(struct bch_dev *ca),
- TP_ARGS(ca)
-);
-
-DEFINE_EVENT(bch_fs, gc_sectors_saturated,
- TP_PROTO(struct bch_fs *c),
- TP_ARGS(c)
-);
-
DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)