summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2017-03-10 12:40:01 -0900
committerKent Overstreet <kent.overstreet@gmail.com>2017-03-10 12:40:01 -0900
commit3bf874183a518cc1bf785d9944de05cf51a177d3 (patch)
tree65fb33f65a65996669fb96e5c746f6dd4d22d31c /include
parentc3844b0b7ff75527a87f52229f15341c24c0d356 (diff)
update bcache sources
Diffstat (limited to 'include')
-rw-r--r--include/linux/bcache.h2
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/completion.h5
-rw-r--r--include/linux/mutex.h3
-rw-r--r--include/trace/events/bcache.h146
5 files changed, 90 insertions, 74 deletions
diff --git a/include/linux/bcache.h b/include/linux/bcache.h
index ac3b8b45..f4c2f275 100644
--- a/include/linux/bcache.h
+++ b/include/linux/bcache.h
@@ -788,7 +788,7 @@ LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
#endif
enum bch_member_state {
- BCH_MEMBER_STATE_ACTIVE = 0,
+ BCH_MEMBER_STATE_RW = 0,
BCH_MEMBER_STATE_RO = 1,
BCH_MEMBER_STATE_FAILED = 2,
BCH_MEMBER_STATE_SPARE = 3,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 217ff094..37a04a32 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -3,6 +3,7 @@
#include <linux/backing-dev.h>
#include <linux/blk_types.h>
+#include <linux/kobject.h>
typedef u64 sector_t;
typedef unsigned fmode_t;
@@ -65,11 +66,18 @@ struct request_queue {
struct gendisk {
};
+struct hd_struct {
+ struct kobject kobj;
+};
+
+#define part_to_dev(part) (part)
+
struct block_device {
char name[BDEVNAME_SIZE];
struct inode *bd_inode;
struct request_queue queue;
void *bd_holder;
+ struct hd_struct *bd_part;
struct gendisk *bd_disk;
struct gendisk __bd_disk;
int bd_fd;
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 71c6b616..1808d21e 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -29,6 +29,11 @@ static inline void init_completion(struct completion *x)
init_waitqueue_head(&x->wait);
}
+static inline void reinit_completion(struct completion *x)
+{
+ x->done = 0;
+}
+
void complete(struct completion *);
void wait_for_completion(struct completion *);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 964bd338..801f06e1 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -7,6 +7,9 @@ struct mutex {
pthread_mutex_t lock;
};
+#define DEFINE_MUTEX(mutexname) \
+ struct mutex mutexname = { .lock = PTHREAD_MUTEX_INITIALIZER }
+
#define mutex_init(l) pthread_mutex_init(&(l)->lock, NULL)
#define mutex_lock(l) pthread_mutex_lock(&(l)->lock)
#define mutex_trylock(l) (!pthread_mutex_trylock(&(l)->lock))
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 06ce0218..b39fdde7 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -10,8 +10,8 @@ struct bcache_device;
struct bio;
struct bkey;
struct btree;
-struct cache;
-struct cache_set;
+struct bch_dev;
+struct bch_fs;
struct keylist;
struct moving_queue;
@@ -170,7 +170,7 @@ TRACE_EVENT(bcache_read,
);
TRACE_EVENT(bcache_write,
- TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio,
+ TP_PROTO(struct bch_fs *c, u64 inode, struct bio *bio,
bool writeback, bool bypass),
TP_ARGS(c, inode, bio, writeback, bypass),
@@ -202,7 +202,7 @@ TRACE_EVENT(bcache_write,
);
TRACE_EVENT(bcache_write_throttle,
- TP_PROTO(struct cache_set *c, u64 inode, struct bio *bio, u64 delay),
+ TP_PROTO(struct bch_fs *c, u64 inode, struct bio *bio, u64 delay),
TP_ARGS(c, inode, bio, delay),
TP_STRUCT__entry(
@@ -236,7 +236,7 @@ DEFINE_EVENT(bcache_bio, bcache_read_retry,
);
DECLARE_EVENT_CLASS(page_alloc_fail,
- TP_PROTO(struct cache_set *c, u64 size),
+ TP_PROTO(struct bch_fs *c, u64 size),
TP_ARGS(c, size),
TP_STRUCT__entry(
@@ -255,7 +255,7 @@ DECLARE_EVENT_CLASS(page_alloc_fail,
/* Journal */
DECLARE_EVENT_CLASS(cache_set,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c),
TP_STRUCT__entry(
@@ -275,7 +275,7 @@ DEFINE_EVENT(bkey, bcache_journal_replay_key,
);
TRACE_EVENT(bcache_journal_next_bucket,
- TP_PROTO(struct cache *ca, unsigned cur_idx, unsigned last_idx),
+ TP_PROTO(struct bch_dev *ca, unsigned cur_idx, unsigned last_idx),
TP_ARGS(ca, cur_idx, last_idx),
TP_STRUCT__entry(
@@ -295,7 +295,7 @@ TRACE_EVENT(bcache_journal_next_bucket,
);
TRACE_EVENT(bcache_journal_write_oldest,
- TP_PROTO(struct cache_set *c, u64 seq),
+ TP_PROTO(struct bch_fs *c, u64 seq),
TP_ARGS(c, seq),
TP_STRUCT__entry(
@@ -312,7 +312,7 @@ TRACE_EVENT(bcache_journal_write_oldest,
);
TRACE_EVENT(bcache_journal_write_oldest_done,
- TP_PROTO(struct cache_set *c, u64 seq, unsigned written),
+ TP_PROTO(struct bch_fs *c, u64 seq, unsigned written),
TP_ARGS(c, seq, written),
TP_STRUCT__entry(
@@ -332,12 +332,12 @@ TRACE_EVENT(bcache_journal_write_oldest_done,
);
DEFINE_EVENT(cache_set, bcache_journal_full,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(cache_set, bcache_journal_entry_full,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
@@ -349,17 +349,17 @@ DEFINE_EVENT(bcache_bio, bcache_journal_write,
/* Device state changes */
DEFINE_EVENT(cache_set, fs_read_only,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(cache_set, fs_read_only_done,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DECLARE_EVENT_CLASS(cache,
- TP_PROTO(struct cache *ca),
+ TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca),
TP_STRUCT__entry(
@@ -376,22 +376,22 @@ DECLARE_EVENT_CLASS(cache,
);
DEFINE_EVENT(cache, bcache_cache_read_only,
- TP_PROTO(struct cache *ca),
+ TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
DEFINE_EVENT(cache, bcache_cache_read_only_done,
- TP_PROTO(struct cache *ca),
+ TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
DEFINE_EVENT(cache, bcache_cache_read_write,
- TP_PROTO(struct cache *ca),
+ TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
DEFINE_EVENT(cache, bcache_cache_read_write_done,
- TP_PROTO(struct cache *ca),
+ TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
@@ -405,7 +405,7 @@ DEFINE_EVENT(bpos, bkey_pack_pos_fail,
/* Btree */
DECLARE_EVENT_CLASS(btree_node,
- TP_PROTO(struct cache_set *c, struct btree *b),
+ TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b),
TP_STRUCT__entry(
@@ -432,7 +432,7 @@ DECLARE_EVENT_CLASS(btree_node,
);
DEFINE_EVENT(btree_node, bcache_btree_read,
- TP_PROTO(struct cache_set *c, struct btree *b),
+ TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
@@ -457,12 +457,12 @@ TRACE_EVENT(bcache_btree_write,
);
DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
- TP_PROTO(struct cache_set *c, struct btree *b),
+ TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
TRACE_EVENT(bcache_btree_node_alloc_fail,
- TP_PROTO(struct cache_set *c, enum btree_id id),
+ TP_PROTO(struct bch_fs *c, enum btree_id id),
TP_ARGS(c, id),
TP_STRUCT__entry(
@@ -479,12 +479,12 @@ TRACE_EVENT(bcache_btree_node_alloc_fail,
);
DEFINE_EVENT(btree_node, bcache_btree_node_free,
- TP_PROTO(struct cache_set *c, struct btree *b),
+ TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
TRACE_EVENT(bcache_mca_reap,
- TP_PROTO(struct cache_set *c, struct btree *b, int ret),
+ TP_PROTO(struct bch_fs *c, struct btree *b, int ret),
TP_ARGS(c, b, ret),
TP_STRUCT__entry(
@@ -501,7 +501,7 @@ TRACE_EVENT(bcache_mca_reap,
);
TRACE_EVENT(bcache_mca_scan,
- TP_PROTO(struct cache_set *c, unsigned touched, unsigned freed,
+ TP_PROTO(struct bch_fs *c, unsigned touched, unsigned freed,
unsigned can_free, unsigned long nr),
TP_ARGS(c, touched, freed, can_free, nr),
@@ -527,7 +527,7 @@ TRACE_EVENT(bcache_mca_scan,
);
DECLARE_EVENT_CLASS(mca_cannibalize_lock,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c),
TP_STRUCT__entry(
@@ -542,27 +542,27 @@ DECLARE_EVENT_CLASS(mca_cannibalize_lock,
);
DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize_lock_fail,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize_lock,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(cache_set, bcache_mca_cannibalize_unlock,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
TRACE_EVENT(bcache_btree_insert_key,
- TP_PROTO(struct cache_set *c, struct btree *b, struct bkey_i *k),
+ TP_PROTO(struct bch_fs *c, struct btree *b, struct bkey_i *k),
TP_ARGS(c, b, k),
TP_STRUCT__entry(
@@ -594,7 +594,7 @@ TRACE_EVENT(bcache_btree_insert_key,
);
DECLARE_EVENT_CLASS(btree_split,
- TP_PROTO(struct cache_set *c, struct btree *b, unsigned keys),
+ TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
TP_ARGS(c, b, keys),
TP_STRUCT__entry(
@@ -621,24 +621,24 @@ DECLARE_EVENT_CLASS(btree_split,
);
DEFINE_EVENT(btree_split, bcache_btree_node_split,
- TP_PROTO(struct cache_set *c, struct btree *b, unsigned keys),
+ TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
TP_ARGS(c, b, keys)
);
DEFINE_EVENT(btree_split, bcache_btree_node_compact,
- TP_PROTO(struct cache_set *c, struct btree *b, unsigned keys),
+ TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
TP_ARGS(c, b, keys)
);
DEFINE_EVENT(btree_node, bcache_btree_set_root,
- TP_PROTO(struct cache_set *c, struct btree *b),
+ TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
/* Garbage collection */
TRACE_EVENT(bcache_btree_gc_coalesce,
- TP_PROTO(struct cache_set *c, struct btree *b, unsigned nodes),
+ TP_PROTO(struct bch_fs *c, struct btree *b, unsigned nodes),
TP_ARGS(c, b, nodes),
TP_STRUCT__entry(
@@ -665,7 +665,7 @@ TRACE_EVENT(bcache_btree_gc_coalesce,
);
TRACE_EVENT(bcache_btree_gc_coalesce_fail,
- TP_PROTO(struct cache_set *c, int reason),
+ TP_PROTO(struct bch_fs *c, int reason),
TP_ARGS(c, reason),
TP_STRUCT__entry(
@@ -682,7 +682,7 @@ TRACE_EVENT(bcache_btree_gc_coalesce_fail,
);
TRACE_EVENT(bcache_btree_node_alloc_replacement,
- TP_PROTO(struct cache_set *c, struct btree *old, struct btree *b),
+ TP_PROTO(struct bch_fs *c, struct btree *old, struct btree *b),
TP_ARGS(c, old, b),
TP_STRUCT__entry(
@@ -713,57 +713,57 @@ TRACE_EVENT(bcache_btree_node_alloc_replacement,
);
DEFINE_EVENT(btree_node, bcache_btree_gc_rewrite_node,
- TP_PROTO(struct cache_set *c, struct btree *b),
+ TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
DEFINE_EVENT(btree_node, bcache_btree_gc_rewrite_node_fail,
- TP_PROTO(struct cache_set *c, struct btree *b),
+ TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
DEFINE_EVENT(cache_set, bcache_gc_start,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(cache_set, bcache_gc_end,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(cache_set, bcache_gc_coalesce_start,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(cache_set, bcache_gc_coalesce_end,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(cache, bcache_sectors_saturated,
- TP_PROTO(struct cache *ca),
+ TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
DEFINE_EVENT(cache_set, bcache_gc_sectors_saturated,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(cache_set, bcache_gc_cannot_inc_gens,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(cache_set, bcache_gc_periodic,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
TRACE_EVENT(bcache_mark_bucket,
- TP_PROTO(struct cache *ca, const struct bkey *k,
+ TP_PROTO(struct bch_dev *ca, const struct bkey *k,
const struct bch_extent_ptr *ptr,
int sectors, bool dirty),
TP_ARGS(ca, k, ptr, sectors, dirty),
@@ -794,7 +794,7 @@ TRACE_EVENT(bcache_mark_bucket,
/* Allocator */
TRACE_EVENT(bcache_alloc_batch,
- TP_PROTO(struct cache *ca, size_t free, size_t total),
+ TP_PROTO(struct bch_dev *ca, size_t free, size_t total),
TP_ARGS(ca, free, total),
TP_STRUCT__entry(
@@ -814,7 +814,7 @@ TRACE_EVENT(bcache_alloc_batch,
);
TRACE_EVENT(bcache_btree_reserve_get_fail,
- TP_PROTO(struct cache_set *c, size_t required, struct closure *cl),
+ TP_PROTO(struct bch_fs *c, size_t required, struct closure *cl),
TP_ARGS(c, required, cl),
TP_STRUCT__entry(
@@ -834,17 +834,17 @@ TRACE_EVENT(bcache_btree_reserve_get_fail,
);
DEFINE_EVENT(cache, bcache_prio_write_start,
- TP_PROTO(struct cache *ca),
+ TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
DEFINE_EVENT(cache, bcache_prio_write_end,
- TP_PROTO(struct cache *ca),
+ TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
TRACE_EVENT(bcache_invalidate,
- TP_PROTO(struct cache *ca, size_t bucket, unsigned sectors),
+ TP_PROTO(struct bch_dev *ca, size_t bucket, unsigned sectors),
TP_ARGS(ca, bucket, sectors),
TP_STRUCT__entry(
@@ -865,12 +865,12 @@ TRACE_EVENT(bcache_invalidate,
);
DEFINE_EVENT(cache_set, bcache_rescale_prios,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DECLARE_EVENT_CLASS(cache_bucket_alloc,
- TP_PROTO(struct cache *ca, enum alloc_reserve reserve),
+ TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
TP_ARGS(ca, reserve),
TP_STRUCT__entry(
@@ -887,17 +887,17 @@ DECLARE_EVENT_CLASS(cache_bucket_alloc,
);
DEFINE_EVENT(cache_bucket_alloc, bcache_bucket_alloc,
- TP_PROTO(struct cache *ca, enum alloc_reserve reserve),
+ TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
TP_ARGS(ca, reserve)
);
DEFINE_EVENT(cache_bucket_alloc, bcache_bucket_alloc_fail,
- TP_PROTO(struct cache *ca, enum alloc_reserve reserve),
+ TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
TP_ARGS(ca, reserve)
);
TRACE_EVENT(bcache_freelist_empty_fail,
- TP_PROTO(struct cache_set *c, enum alloc_reserve reserve,
+ TP_PROTO(struct bch_fs *c, enum alloc_reserve reserve,
struct closure *cl),
TP_ARGS(c, reserve, cl),
@@ -918,7 +918,7 @@ TRACE_EVENT(bcache_freelist_empty_fail,
);
DECLARE_EVENT_CLASS(open_bucket_alloc,
- TP_PROTO(struct cache_set *c, struct closure *cl),
+ TP_PROTO(struct bch_fs *c, struct closure *cl),
TP_ARGS(c, cl),
TP_STRUCT__entry(
@@ -936,12 +936,12 @@ DECLARE_EVENT_CLASS(open_bucket_alloc,
);
DEFINE_EVENT(open_bucket_alloc, bcache_open_bucket_alloc,
- TP_PROTO(struct cache_set *c, struct closure *cl),
+ TP_PROTO(struct bch_fs *c, struct closure *cl),
TP_ARGS(c, cl)
);
DEFINE_EVENT(open_bucket_alloc, bcache_open_bucket_alloc_fail,
- TP_PROTO(struct cache_set *c, struct closure *cl),
+ TP_PROTO(struct bch_fs *c, struct closure *cl),
TP_ARGS(c, cl)
);
@@ -1026,17 +1026,17 @@ DEFINE_EVENT(moving_io, bcache_copy_collision,
/* Copy GC */
DEFINE_EVENT(page_alloc_fail, bcache_moving_gc_alloc_fail,
- TP_PROTO(struct cache_set *c, u64 size),
+ TP_PROTO(struct bch_fs *c, u64 size),
TP_ARGS(c, size)
);
DEFINE_EVENT(cache, bcache_moving_gc_start,
- TP_PROTO(struct cache *ca),
+ TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
TRACE_EVENT(bcache_moving_gc_end,
- TP_PROTO(struct cache *ca, u64 sectors_moved, u64 keys_moved,
+ TP_PROTO(struct bch_dev *ca, u64 sectors_moved, u64 keys_moved,
u64 buckets_moved),
TP_ARGS(ca, sectors_moved, keys_moved, buckets_moved),
@@ -1060,12 +1060,12 @@ TRACE_EVENT(bcache_moving_gc_end,
);
DEFINE_EVENT(cache, bcache_moving_gc_reserve_empty,
- TP_PROTO(struct cache *ca),
+ TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
DEFINE_EVENT(cache, bcache_moving_gc_no_work,
- TP_PROTO(struct cache *ca),
+ TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
@@ -1077,27 +1077,27 @@ DEFINE_EVENT(bkey, bcache_gc_copy,
/* Tiering */
DEFINE_EVENT(cache_set, bcache_tiering_refill_start,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(cache_set, bcache_tiering_refill_end,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(page_alloc_fail, bcache_tiering_alloc_fail,
- TP_PROTO(struct cache_set *c, u64 size),
+ TP_PROTO(struct bch_fs *c, u64 size),
TP_ARGS(c, size)
);
DEFINE_EVENT(cache_set, bcache_tiering_start,
- TP_PROTO(struct cache_set *c),
+ TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
TRACE_EVENT(bcache_tiering_end,
- TP_PROTO(struct cache_set *c, u64 sectors_moved,
+ TP_PROTO(struct bch_fs *c, u64 sectors_moved,
u64 keys_moved),
TP_ARGS(c, sectors_moved, keys_moved),
@@ -1161,7 +1161,7 @@ TRACE_EVENT(bcache_writeback_error,
);
DEFINE_EVENT(page_alloc_fail, bcache_writeback_alloc_fail,
- TP_PROTO(struct cache_set *c, u64 size),
+ TP_PROTO(struct bch_fs *c, u64 size),
TP_ARGS(c, size)
);