summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-07-30 00:41:42 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2016-07-30 00:41:42 -0800
commit2ed905b595ebeb0b2a741a08258fbc91f66b1061 (patch)
tree92ddee1b1ccea983768f7ad3293d40a49a9395ab
parent72ac96ca24a1b6c1ad81cab362093bf7310d3337 (diff)
bcache: Compressed extent accounting
-rw-r--r--drivers/md/bcache/btree_gc.c6
-rw-r--r--drivers/md/bcache/btree_types.h1
-rw-r--r--drivers/md/bcache/btree_update.c9
-rw-r--r--drivers/md/bcache/btree_update.h1
-rw-r--r--drivers/md/bcache/buckets.c157
-rw-r--r--drivers/md/bcache/buckets.h15
-rw-r--r--drivers/md/bcache/buckets_types.h24
-rw-r--r--drivers/md/bcache/extents.c18
-rw-r--r--drivers/md/bcache/extents.h13
-rw-r--r--drivers/md/bcache/fs-io.c5
-rw-r--r--drivers/md/bcache/inode.c6
-rw-r--r--drivers/md/bcache/io.c9
-rw-r--r--drivers/md/bcache/io.h3
-rw-r--r--drivers/md/bcache/journal.c12
-rw-r--r--drivers/md/bcache/sysfs.c22
15 files changed, 201 insertions, 100 deletions
diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c
index 39355378bac4..a7fdec3e65a5 100644
--- a/drivers/md/bcache/btree_gc.c
+++ b/drivers/md/bcache/btree_gc.c
@@ -387,10 +387,8 @@ void bch_gc(struct cache_set *c)
struct bucket_stats_cache_set *p =
per_cpu_ptr(c->bucket_stats_percpu, cpu);
- p->sectors_dirty = 0;
- p->sectors_cached = 0;
- p->sectors_meta = 0;
- p->sectors_persistent_reserved = 0;
+ memset(p->s, 0, sizeof(p->s));
+ p->persistent_reserved = 0;
}
lg_global_unlock(&c->bucket_stats_lock);
diff --git a/drivers/md/bcache/btree_types.h b/drivers/md/bcache/btree_types.h
index 791dddadfae4..2c3ae7020a88 100644
--- a/drivers/md/bcache/btree_types.h
+++ b/drivers/md/bcache/btree_types.h
@@ -175,6 +175,7 @@ enum btree_insert_ret {
/* leaf node needs to be split */
BTREE_INSERT_BTREE_NODE_FULL,
BTREE_INSERT_JOURNAL_RES_FULL,
+ BTREE_INSERT_ENOSPC,
};
#endif /* _BCACHE_BTREE_TYPES_H */
diff --git a/drivers/md/bcache/btree_update.c b/drivers/md/bcache/btree_update.c
index ac6e8c0395e2..b816c712cd10 100644
--- a/drivers/md/bcache/btree_update.c
+++ b/drivers/md/bcache/btree_update.c
@@ -119,7 +119,8 @@ found:
* Btree nodes are accounted as freed in cache_set_stats when they're
* freed from the index:
*/
- stats->sectors_meta -= c->sb.btree_node_size;
+ stats->s[S_COMPRESSED][S_META] -= c->sb.btree_node_size;
+ stats->s[S_UNCOMPRESSED][S_META] -= c->sb.btree_node_size;
/*
* We're dropping @k from the btree, but it's still live until the
@@ -1657,6 +1658,9 @@ retry:
case BTREE_INSERT_BTREE_NODE_FULL:
split = i->iter;
break;
+ case BTREE_INSERT_ENOSPC:
+ ret = -ENOSPC;
+ break;
}
if (!trans->did_work && (ret || split))
@@ -1918,6 +1922,7 @@ int bch_btree_delete_range(struct cache_set *c, enum btree_id id,
struct bpos start,
struct bpos end,
u64 version,
+ struct disk_reservation *disk_res,
struct extent_insert_hook *hook,
u64 *journal_seq)
{
@@ -1965,7 +1970,7 @@ int bch_btree_delete_range(struct cache_set *c, enum btree_id id,
bch_cut_back(end, &delete.k);
}
- ret = bch_btree_insert_at(&iter, &delete, NULL, hook,
+ ret = bch_btree_insert_at(&iter, &delete, disk_res, hook,
journal_seq, BTREE_INSERT_NOFAIL);
if (ret)
break;
diff --git a/drivers/md/bcache/btree_update.h b/drivers/md/bcache/btree_update.h
index 3e31f685d85b..e8426308ade5 100644
--- a/drivers/md/bcache/btree_update.h
+++ b/drivers/md/bcache/btree_update.h
@@ -257,6 +257,7 @@ int bch_btree_update(struct cache_set *, enum btree_id,
int bch_btree_delete_range(struct cache_set *, enum btree_id,
struct bpos, struct bpos, u64,
+ struct disk_reservation *,
struct extent_insert_hook *, u64 *);
int bch_btree_node_rewrite(struct btree_iter *, struct btree *, struct closure *);
diff --git a/drivers/md/bcache/buckets.c b/drivers/md/bcache/buckets.c
index 3bbf85840fdf..b203b92c88a1 100644
--- a/drivers/md/bcache/buckets.c
+++ b/drivers/md/bcache/buckets.c
@@ -182,10 +182,11 @@ void bch_cache_set_stats_apply(struct cache_set *c,
struct disk_reservation *disk_res,
struct gc_pos gc_pos)
{
- s64 added = stats->sectors_dirty +
- stats->sectors_meta +
- stats->sectors_persistent_reserved +
- stats->sectors_online_reserved;
+ s64 added =
+ stats->s[S_COMPRESSED][S_META] +
+ stats->s[S_COMPRESSED][S_DIRTY] +
+ stats->persistent_reserved +
+ stats->online_reserved;
/*
* Not allowed to reduce sectors_available except by getting a
@@ -194,15 +195,15 @@ void bch_cache_set_stats_apply(struct cache_set *c,
BUG_ON(added > (disk_res ? disk_res->sectors : 0));
if (added > 0) {
- disk_res->sectors -= added;
- stats->sectors_online_reserved -= added;
+ disk_res->sectors -= added;
+ stats->online_reserved -= added;
}
lg_local_lock(&c->bucket_stats_lock);
- /* sectors_online_reserved not subject to gc: */
- this_cpu_ptr(c->bucket_stats_percpu)->sectors_online_reserved +=
- stats->sectors_online_reserved;
- stats->sectors_online_reserved = 0;
+ /* online_reserved not subject to gc: */
+ this_cpu_ptr(c->bucket_stats_percpu)->online_reserved +=
+ stats->online_reserved;
+ stats->online_reserved = 0;
if (!gc_will_visit(c, gc_pos))
bucket_stats_add(this_cpu_ptr(c->bucket_stats_percpu), stats);
@@ -227,18 +228,16 @@ static void bucket_stats_update(struct cache *ca,
c->gc_pos.phase == GC_PHASE_DONE);
if (cache_set_stats) {
- cache_set_stats->sectors_cached +=
+ cache_set_stats->s[S_COMPRESSED][S_CACHED] +=
(int) new.cached_sectors - (int) old.cached_sectors;
- if (old.is_metadata)
- cache_set_stats->sectors_meta -= old.dirty_sectors;
- else
- cache_set_stats->sectors_dirty -= old.dirty_sectors;
+ cache_set_stats->s[S_COMPRESSED]
+ [old.is_metadata ? S_META : S_DIRTY] -=
+ old.dirty_sectors;
- if (new.is_metadata)
- cache_set_stats->sectors_meta += new.dirty_sectors;
- else
- cache_set_stats->sectors_dirty += new.dirty_sectors;
+ cache_set_stats->s[S_COMPRESSED]
+ [new.is_metadata ? S_META : S_DIRTY] +=
+ new.dirty_sectors;
}
preempt_disable();
@@ -289,8 +288,9 @@ static struct bucket_mark bch_bucket_mark_set(struct cache *ca,
* subject to (saturating) overflow - and if they did overflow, the
* cache set stats will now be off. We can tolerate this for
* sectors_cached, but not anything else:
- * */
- stats.sectors_cached = 0;
+ */
+ stats.s[S_COMPRESSED][S_CACHED] = 0;
+ stats.s[S_UNCOMPRESSED][S_CACHED] = 0;
BUG_ON(!bch_is_zero((void *) &stats, sizeof(stats)));
return old;
@@ -357,24 +357,38 @@ do { \
} \
} while (0)
+static int compressed_sectors(union bch_extent_crc *crc, int sectors)
+{
+ return min_t(unsigned, crc_to_64(crc).compressed_size,
+ abs(sectors)) * (sectors < 0 ? -1 : 1);
+}
+
/*
* Checking against gc's position has to be done here, inside the cmpxchg()
* loop, to avoid racing with the start of gc clearing all the marks - GC does
* that with the gc pos seqlock held.
*/
-static void bch_mark_pointer(struct cache_set *c, struct cache *ca,
- const struct bch_extent_ptr *ptr, int sectors,
- bool dirty, bool metadata,
+static void bch_mark_pointer(struct cache_set *c,
+ struct bkey_s_c_extent e,
+ struct cache *ca,
+ union bch_extent_crc *crc,
+ const struct bch_extent_ptr *ptr,
+ int sectors, enum s_alloc type,
bool may_make_unavailable,
struct bucket_stats_cache_set *stats,
bool is_gc, struct gc_pos gc_pos)
{
struct bucket_mark old, new;
- unsigned long bucket_nr = PTR_BUCKET_NR(ca, ptr);
unsigned saturated;
+ struct bucket *g = ca->buckets + PTR_BUCKET_NR(ca, ptr);
+ u32 v = READ_ONCE(g->mark.counter);
+ struct bch_extent_crc64 crc64 = crc_to_64(crc);
+ int disk_sectors = crc64.compression_type
+ ? sectors * crc64.compressed_size / crc64.uncompressed_size
+ : sectors;
- bucket_cmpxchg(&ca->buckets[bucket_nr], old, new,
- may_make_unavailable, NULL, ({
+ do {
+ new.counter = old.counter = v;
saturated = 0;
/*
* cmpxchg() only implies a full barrier on success, not
@@ -390,7 +404,7 @@ static void bch_mark_pointer(struct cache_set *c, struct cache *ca,
* checked the gen
*/
if (ptr_stale(ca, ptr)) {
- BUG_ON(metadata);
+ BUG_ON(type == S_META);
return;
}
@@ -408,35 +422,38 @@ static void bch_mark_pointer(struct cache_set *c, struct cache *ca,
* already stale
*/
if (!may_make_unavailable &&
- (metadata || dirty) &&
+ type != S_CACHED &&
is_available_bucket(old)) {
- BUG_ON(metadata);
+ BUG_ON(type == S_META);
return;
}
BUG_ON((old.dirty_sectors ||
old.cached_sectors) &&
- old.is_metadata != metadata);
+ old.is_metadata != (type == S_META));
- if (dirty &&
+ if (type != S_CACHED &&
new.dirty_sectors == GC_MAX_SECTORS_USED &&
- sectors < 0)
- saturated = -sectors;
+ disk_sectors < 0)
+ saturated = -disk_sectors;
- if (dirty)
- saturated_add(ca, new.dirty_sectors, sectors,
+ if (type == S_CACHED)
+ saturated_add(ca, new.cached_sectors, disk_sectors,
GC_MAX_SECTORS_USED);
else
- saturated_add(ca, new.cached_sectors, sectors,
+ saturated_add(ca, new.dirty_sectors, disk_sectors,
GC_MAX_SECTORS_USED);
if (!new.dirty_sectors &&
!new.cached_sectors)
new.is_metadata = false;
else
- new.is_metadata = metadata;
+ new.is_metadata = (type == S_META);
+ } while ((v = cmpxchg(&g->mark.counter,
+ old.counter,
+ new.counter)) != old.counter);
- }));
+ bucket_stats_update(ca, old, new, may_make_unavailable, NULL);
if (saturated &&
atomic_long_add_return(saturated,
@@ -448,12 +465,18 @@ static void bch_mark_pointer(struct cache_set *c, struct cache *ca,
}
}
out:
- if (metadata)
- stats->sectors_meta += sectors;
- else if (dirty)
- stats->sectors_dirty += sectors;
- else
- stats->sectors_cached += sectors;
+ if (crc_to_64(crc).compression_type == BCH_COMPRESSION_NONE) {
+ stats->s[S_COMPRESSED][type] += sectors;
+ } else if (abs(sectors) == e.k->size) {
+ stats->s[S_COMPRESSED][type] += compressed_sectors(crc, sectors);
+ } else {
+ BUG_ON(sectors > 0);
+
+ stats->s[S_COMPRESSED][type] -= compressed_sectors(crc, e.k->size);
+ stats->s[S_COMPRESSED][type] += compressed_sectors(crc, e.k->size + sectors);
+ }
+
+ stats->s[S_UNCOMPRESSED][type] += sectors;
}
static void bch_mark_extent(struct cache_set *c, struct bkey_s_c_extent e,
@@ -463,19 +486,23 @@ static void bch_mark_extent(struct cache_set *c, struct bkey_s_c_extent e,
bool is_gc, struct gc_pos gc_pos)
{
const struct bch_extent_ptr *ptr;
+ union bch_extent_crc *crc;
struct cache *ca;
+ enum s_alloc type = metadata ? S_META : S_DIRTY;
BUG_ON(metadata && bkey_extent_is_cached(e.k));
BUG_ON(!sectors);
rcu_read_lock();
- extent_for_each_online_device(c, e, ptr, ca) {
+ extent_for_each_online_device_crc(c, e, crc, ptr, ca) {
bool dirty = bch_extent_ptr_is_dirty(c, e, ptr);
trace_bcache_mark_bucket(ca, e.k, ptr, sectors, dirty);
- bch_mark_pointer(c, ca, ptr, sectors, dirty, metadata,
- may_make_unavailable, stats, is_gc, gc_pos);
+ bch_mark_pointer(c, e, ca, crc, ptr, sectors,
+ dirty ? type : S_CACHED,
+ may_make_unavailable,
+ stats, is_gc, gc_pos);
}
rcu_read_unlock();
}
@@ -493,7 +520,7 @@ static void __bch_mark_key(struct cache_set *c, struct bkey_s_c k,
may_make_unavailable, stats, is_gc, gc_pos);
break;
case BCH_RESERVATION:
- stats->sectors_persistent_reserved += sectors;
+ stats->persistent_reserved += sectors;
break;
}
}
@@ -554,7 +581,7 @@ void bch_recalc_sectors_available(struct cache_set *c)
lg_global_lock(&c->bucket_stats_lock);
for_each_possible_cpu(cpu)
- per_cpu_ptr(c->bucket_stats_percpu, cpu)->sectors_available_cache = 0;
+ per_cpu_ptr(c->bucket_stats_percpu, cpu)->available_cache = 0;
atomic64_set(&c->sectors_available,
__recalc_sectors_available(c));
@@ -567,7 +594,7 @@ void bch_disk_reservation_put(struct cache_set *c,
{
if (res->sectors) {
lg_local_lock(&c->bucket_stats_lock);
- this_cpu_sub(c->bucket_stats_percpu->sectors_online_reserved,
+ this_cpu_sub(c->bucket_stats_percpu->online_reserved,
res->sectors);
bch_cache_set_stats_verify(c);
@@ -579,7 +606,7 @@ void bch_disk_reservation_put(struct cache_set *c,
#define SECTORS_CACHE 1024
-int bch_disk_reservation_get(struct cache_set *c,
+int bch_disk_reservation_add(struct cache_set *c,
struct disk_reservation *res,
unsigned sectors, int flags)
{
@@ -588,13 +615,10 @@ int bch_disk_reservation_get(struct cache_set *c,
s64 sectors_available;
int ret;
- res->sectors = sectors;
- res->gen = c->capacity_gen;
-
lg_local_lock(&c->bucket_stats_lock);
stats = this_cpu_ptr(c->bucket_stats_percpu);
- if (sectors >= stats->sectors_available_cache)
+ if (sectors >= stats->available_cache)
goto out;
v = atomic64_read(&c->sectors_available);
@@ -609,10 +633,11 @@ int bch_disk_reservation_get(struct cache_set *c,
} while ((v = atomic64_cmpxchg(&c->sectors_available,
old, new)) != old);
- stats->sectors_available_cache += old - new;
+ stats->available_cache += old - new;
out:
- stats->sectors_available_cache -= sectors;
- stats->sectors_online_reserved += sectors;
+ stats->available_cache -= sectors;
+ stats->online_reserved += sectors;
+ res->sectors += sectors;
bch_cache_set_stats_verify(c);
lg_local_unlock(&c->bucket_stats_lock);
@@ -633,11 +658,11 @@ recalculate:
(flags & BCH_DISK_RESERVATION_NOFAIL)) {
atomic64_set(&c->sectors_available,
max_t(s64, 0, sectors_available - sectors));
- stats->sectors_online_reserved += sectors;
+ stats->online_reserved += sectors;
+ res->sectors += sectors;
ret = 0;
} else {
atomic64_set(&c->sectors_available, sectors_available);
- res->sectors = 0;
ret = -ENOSPC;
}
@@ -648,3 +673,13 @@ recalculate:
return ret;
}
+
+int bch_disk_reservation_get(struct cache_set *c,
+ struct disk_reservation *res,
+ unsigned sectors, int flags)
+{
+ res->sectors = 0;
+ res->gen = c->capacity_gen;
+
+ return bch_disk_reservation_add(c, res, sectors, flags);
+}
diff --git a/drivers/md/bcache/buckets.h b/drivers/md/bcache/buckets.h
index 8e324e14a366..c96a398ca7bc 100644
--- a/drivers/md/bcache/buckets.h
+++ b/drivers/md/bcache/buckets.h
@@ -202,13 +202,13 @@ void bch_cache_set_stats_apply(struct cache_set *,
static inline u64 __cache_set_sectors_used(struct cache_set *c)
{
struct bucket_stats_cache_set stats = __bch_bucket_stats_read_cache_set(c);
+ u64 reserved = stats.persistent_reserved +
+ stats.online_reserved;
- return stats.sectors_meta +
- stats.sectors_dirty +
- stats.sectors_persistent_reserved +
- stats.sectors_online_reserved +
- ((stats.sectors_persistent_reserved +
- stats.sectors_online_reserved) >> 7);
+ return stats.s[S_COMPRESSED][S_META] +
+ stats.s[S_COMPRESSED][S_DIRTY] +
+ reserved +
+ (reserved >> 7);
}
static inline u64 cache_set_sectors_used(struct cache_set *c)
@@ -257,6 +257,9 @@ void bch_disk_reservation_put(struct cache_set *,
#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
#define BCH_DISK_RESERVATION_GC_LOCK_HELD (1 << 1)
+int bch_disk_reservation_add(struct cache_set *,
+ struct disk_reservation *,
+ unsigned, int);
int bch_disk_reservation_get(struct cache_set *,
struct disk_reservation *,
unsigned, int);
diff --git a/drivers/md/bcache/buckets_types.h b/drivers/md/bcache/buckets_types.h
index c3c0f189c603..256f6fe7f272 100644
--- a/drivers/md/bcache/buckets_types.h
+++ b/drivers/md/bcache/buckets_types.h
@@ -43,13 +43,25 @@ struct bucket_stats_cache {
u64 sectors_meta;
};
+enum s_alloc {
+ S_META,
+ S_DIRTY,
+ S_CACHED,
+ S_ALLOC_NR,
+};
+
+enum s_compressed {
+ S_COMPRESSED,
+ S_UNCOMPRESSED,
+ S_COMPRESSED_NR,
+};
+
struct bucket_stats_cache_set {
- u64 sectors_dirty;
- u64 sectors_cached;
- u64 sectors_meta;
- u64 sectors_persistent_reserved;
- u64 sectors_online_reserved;
- u64 sectors_available_cache;
+ /* all fields are in units of 512 byte sectors: */
+ u64 s[S_COMPRESSED_NR][S_ALLOC_NR];
+ u64 persistent_reserved;
+ u64 online_reserved;
+ u64 available_cache;
};
struct bucket_heap_entry {
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index d94ba9b1305d..d8b2f7797a70 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -1249,6 +1249,7 @@ bch_insert_fixup_extent(struct btree_insert_trans *trans,
start_time, nr_done)) == BTREE_INSERT_OK &&
(_k = bch_btree_node_iter_peek_all(node_iter, &b->keys))) {
struct bkey_s k = __bkey_disassemble(f, _k, &unpacked);
+ enum bch_extent_overlap overlap;
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
EBUG_ON(bkey_cmp(iter->pos, k.k->p) >= 0);
@@ -1256,6 +1257,23 @@ bch_insert_fixup_extent(struct btree_insert_trans *trans,
if (bkey_cmp(bkey_start_pos(k.k), insert->k->k.p) >= 0)
break;
+ overlap = bch_extent_overlap(&insert->k->k, k.k);
+ if (k.k->size &&
+ overlap == BCH_EXTENT_OVERLAP_MIDDLE) {
+ unsigned sectors = bkey_extent_is_compressed(c, k.s_c);
+ int res_flags = 0;
+
+ if (flags & BTREE_INSERT_NOFAIL)
+ res_flags |= BCH_DISK_RESERVATION_NOFAIL;
+
+ if (sectors &&
+ bch_disk_reservation_add(c, disk_res, sectors,
+ res_flags)) {
+ ret = BTREE_INSERT_ENOSPC;
+ goto stop;
+ }
+ }
+
/*
* Only call advance pos & call hook for nonzero size extents:
* If hook returned BTREE_HOOK_NO_INSERT, @insert->k no longer
diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
index 7bab66a30c97..ee74bfeea3a5 100644
--- a/drivers/md/bcache/extents.h
+++ b/drivers/md/bcache/extents.h
@@ -357,12 +357,13 @@ static inline struct bch_extent_crc64 crc_to_64(const union bch_extent_crc *crc)
}
}
-static inline bool bkey_extent_is_compressed(struct cache_set *c,
- struct bkey_s_c k)
+static inline unsigned bkey_extent_is_compressed(struct cache_set *c,
+ struct bkey_s_c k)
{
struct bkey_s_c_extent e;
const struct bch_extent_ptr *ptr;
const union bch_extent_crc *crc;
+ unsigned ret = 0;
switch (k.k->type) {
case BCH_EXTENT:
@@ -373,11 +374,11 @@ static inline bool bkey_extent_is_compressed(struct cache_set *c,
if (bch_extent_ptr_is_dirty(c, e, ptr) &&
crc_to_64(crc).compression_type != BCH_COMPRESSION_NONE &&
crc_to_64(crc).compressed_size < k.k->size)
- return true;
- return true;
- default:
- return false;
+ ret = max_t(unsigned, ret,
+ crc_to_64(crc).compressed_size);
}
+
+ return ret;
}
void extent_adjust_pointers(struct bkey_s_extent, union bch_extent_entry *);
diff --git a/drivers/md/bcache/fs-io.c b/drivers/md/bcache/fs-io.c
index b694b6423d97..fe557c77176d 100644
--- a/drivers/md/bcache/fs-io.c
+++ b/drivers/md/bcache/fs-io.c
@@ -1834,9 +1834,12 @@ static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len)
truncate_pagecache_range(inode, offset, offset + len - 1);
if (discard_start < discard_end) {
+ struct disk_reservation disk_res;
struct i_sectors_hook i_sectors_hook;
int ret;
+ BUG_ON(bch_disk_reservation_get(c, &disk_res, 0, 0));
+
ret = i_sectors_dirty_get(ei, &i_sectors_hook);
if (unlikely(ret))
goto out;
@@ -1845,10 +1848,12 @@ static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len)
POS(ino, discard_start),
POS(ino, discard_end),
0,
+ &disk_res,
&i_sectors_hook.hook,
&ei->journal_seq);
i_sectors_dirty_put(ei, &i_sectors_hook);
+ bch_disk_reservation_put(c, &disk_res);
}
out:
pagecache_block_put(&mapping->add_lock);
diff --git a/drivers/md/bcache/inode.c b/drivers/md/bcache/inode.c
index 29fb04913bac..58821fd75463 100644
--- a/drivers/md/bcache/inode.c
+++ b/drivers/md/bcache/inode.c
@@ -172,7 +172,7 @@ int bch_inode_truncate(struct cache_set *c, u64 inode_nr, u64 new_size,
struct extent_insert_hook *hook, u64 *journal_seq)
{
return bch_discard(c, POS(inode_nr, new_size), POS(inode_nr + 1, 0),
- 0, hook, journal_seq);
+ 0, NULL, hook, journal_seq);
}
int bch_inode_rm(struct cache_set *c, u64 inode_nr)
@@ -187,7 +187,7 @@ int bch_inode_rm(struct cache_set *c, u64 inode_nr)
ret = bch_btree_delete_range(c, BTREE_ID_XATTRS,
POS(inode_nr, 0),
POS(inode_nr + 1, 0),
- 0, NULL, NULL);
+ 0, NULL, NULL, NULL);
if (ret < 0)
return ret;
@@ -202,7 +202,7 @@ int bch_inode_rm(struct cache_set *c, u64 inode_nr)
ret = bch_btree_delete_range(c, BTREE_ID_DIRENTS,
POS(inode_nr, 0),
POS(inode_nr + 1, 0),
- 0, NULL, NULL);
+ 0, NULL, NULL, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 3e3b74c01c5d..a7e0bc931434 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -521,7 +521,7 @@ static struct bio *__bio_compress(struct cache_set *c,
unsigned order = get_order(LZ4_MEM_COMPRESS);
size_t dst_size = dst->bi_iter.bi_size;
- workmem = alloc_pages(GFP_NOWAIT, order);
+ workmem = alloc_pages(GFP_NOWAIT|__GFP_NOWARN, order);
if (!workmem) {
workmem = mempool_alloc(&c->compression_workspace_pool,
GFP_NOIO);
@@ -780,7 +780,7 @@ static void bch_write_discard(struct closure *cl)
POS(inode, bio->bi_iter.bi_sector),
POS(inode, bio_end_sector(bio)),
op->insert_key.k.version,
- NULL, NULL);
+ &op->res, NULL, NULL);
}
/*
@@ -1499,11 +1499,12 @@ void bch_replace_init(struct bch_replace_info *r, struct bkey_s_c old)
*/
int bch_discard(struct cache_set *c, struct bpos start,
struct bpos end, u64 version,
+ struct disk_reservation *disk_res,
struct extent_insert_hook *hook,
u64 *journal_seq)
{
- return bch_btree_delete_range(c, BTREE_ID_EXTENTS, start, end,
- version, hook, journal_seq);
+ return bch_btree_delete_range(c, BTREE_ID_EXTENTS, start, end, version,
+ disk_res, hook, journal_seq);
}
/* Cache promotion on read */
diff --git a/drivers/md/bcache/io.h b/drivers/md/bcache/io.h
index 3d21ecd0f048..b244161d322a 100644
--- a/drivers/md/bcache/io.h
+++ b/drivers/md/bcache/io.h
@@ -83,7 +83,8 @@ void bch_submit_bbio_replicas(struct bch_write_bio *, struct cache_set *,
const struct bkey_i *, unsigned, bool);
int bch_discard(struct cache_set *, struct bpos, struct bpos,
- u64, struct extent_insert_hook *, u64 *);
+ u64, struct disk_reservation *,
+ struct extent_insert_hook *, u64 *);
void __cache_promote(struct cache_set *, struct bbio *,
struct bkey_s_c, struct bkey_s_c, unsigned);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 5bdb3d551548..f484b5d49c59 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -1252,12 +1252,22 @@ int bch_journal_replay(struct cache_set *c, struct list_head *list)
j->pin.mask)];
for_each_jset_key(k, _n, entry, &i->j) {
+ struct disk_reservation disk_res;
+
+ /*
+ * We might cause compressed extents to be split, so we
+ * need to pass in a disk_reservation:
+ */
+ BUG_ON(bch_disk_reservation_get(c, &disk_res, 0, 0));
+
trace_bcache_journal_replay_key(&k->k);
ret = bch_btree_insert(c, entry->btree_id, k,
- NULL, NULL, NULL,
+ &disk_res, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_NO_MARK_KEY);
+ bch_disk_reservation_put(c, &disk_res);
+
if (ret)
goto err;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 596d43571641..1652c09b7fb6 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -575,15 +575,25 @@ static ssize_t show_cache_set_alloc_debug(struct cache_set *c, char *buf)
return scnprintf(buf, PAGE_SIZE,
"capacity:\t\t%llu\n"
- "meta sectors:\t\t%llu\n"
- "dirty sectors:\t\t%llu\n"
+ "compressed:\n"
+ "\tmeta:\t\t%llu\n"
+ "\tdirty:\t\t%llu\n"
+ "\tcached:\t\t%llu\n"
+ "uncompressed:\n"
+ "\tmeta:\t\t%llu\n"
+ "\tdirty:\t\t%llu\n"
+ "\tcached:\t\t%llu\n"
"persistent reserved sectors:\t%llu\n"
"online reserved sectors:\t%llu\n",
c->capacity,
- stats.sectors_meta,
- stats.sectors_dirty,
- stats.sectors_persistent_reserved,
- stats.sectors_online_reserved);
+ stats.s[S_COMPRESSED][S_META],
+ stats.s[S_COMPRESSED][S_DIRTY],
+ stats.s[S_COMPRESSED][S_CACHED],
+ stats.s[S_UNCOMPRESSED][S_META],
+ stats.s[S_UNCOMPRESSED][S_DIRTY],
+ stats.s[S_UNCOMPRESSED][S_CACHED],
+ stats.persistent_reserved,
+ stats.online_reserved);
}
static ssize_t bch_compression_stats(struct cache_set *c, char *buf)