summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/backingdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/bcache/backingdev.h')
-rw-r--r--drivers/md/bcache/backingdev.h39
1 files changed, 38 insertions, 1 deletions
diff --git a/drivers/md/bcache/backingdev.h b/drivers/md/bcache/backingdev.h
index 58362eb7902a..038b532e91d8 100644
--- a/drivers/md/bcache/backingdev.h
+++ b/drivers/md/bcache/backingdev.h
@@ -22,7 +22,11 @@ struct bcache_device {
struct kobject kobj;
struct cache_set *c;
- unsigned int id;
+ struct bch_fs *c2;
+
+ u64 id;
+ struct inode *inode;
+
#define BCACHEDEVNAME_SIZE 12
char name[BCACHEDEVNAME_SIZE];
@@ -49,6 +53,11 @@ struct bcache_device {
unsigned int cmd, unsigned long arg);
};
+static inline bool bcache_dev_is_attached(struct bcache_device *d)
+{
+ return d->c != NULL || d->c2 != NULL;
+}
+
enum stop_on_failure {
BCH_CACHED_DEV_STOP_AUTO = 0,
BCH_CACHED_DEV_STOP_ALWAYS,
@@ -87,6 +96,9 @@ struct cached_dev {
*/
atomic_t running;
+ struct bio_set bch2_bio_read;
+ mempool_t bch2_io_write;
+
/*
* Writes take a shared lock from start to finish; scanning for dirty
* data to refill the rb tree requires an exclusive lock.
@@ -225,6 +237,31 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
}
}
+static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
+ unsigned int cache_mode, bool would_skip,
+ unsigned int in_use)
+{
+ if (cache_mode != CACHE_MODE_WRITEBACK ||
+ test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
+ in_use > bch_cutoff_writeback_sync)
+ return false;
+
+ if (bio_op(bio) == REQ_OP_DISCARD)
+ return false;
+
+ if (dc->partial_stripes_expensive &&
+ bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
+ bio_sectors(bio)))
+ return true;
+
+ if (would_skip)
+ return false;
+
+ return (op_is_sync(bio->bi_opf) ||
+ bio->bi_opf & (REQ_META|REQ_PRIO) ||
+ in_use <= bch_cutoff_writeback);
+}
+
static inline void bch_writeback_queue(struct cached_dev *dc)
{
if (!IS_ERR_OR_NULL(dc->writeback_thread))