summaryrefslogtreecommitdiff
path: root/libbcache/io.h
blob: 302ed2e0944f3300710de878561d71533fd9f14f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
#ifndef _BCACHE_IO_H
#define _BCACHE_IO_H

#include "io_types.h"

#define to_wbio(_bio)			\
	container_of((_bio), struct bch_write_bio, bio)

#define to_rbio(_bio)			\
	container_of((_bio), struct bch_read_bio, bio)

void bch_bio_free_pages_pool(struct bch_fs *, struct bio *);
void bch_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);

enum bch_write_flags {
	BCH_WRITE_ALLOC_NOWAIT		= (1 << 0),
	BCH_WRITE_DISCARD		= (1 << 1),
	BCH_WRITE_CACHED		= (1 << 2),
	BCH_WRITE_FLUSH			= (1 << 3),
	BCH_WRITE_DISCARD_ON_ERROR	= (1 << 4),
	BCH_WRITE_DATA_COMPRESSED	= (1 << 5),

	/* Internal: */
	BCH_WRITE_JOURNAL_SEQ_PTR	= (1 << 6),
	BCH_WRITE_DONE			= (1 << 7),
	BCH_WRITE_LOOPED		= (1 << 8),
};

static inline u64 *op_journal_seq(struct bch_write_op *op)
{
	return (op->flags & BCH_WRITE_JOURNAL_SEQ_PTR)
		? op->journal_seq_p : &op->journal_seq;
}

static inline struct write_point *foreground_write_point(struct bch_fs *c,
							 unsigned long v)
{
	return c->write_points +
		hash_long(v, ilog2(ARRAY_SIZE(c->write_points)));
}

void bch_write_op_init(struct bch_write_op *, struct bch_fs *,
		       struct bch_write_bio *,
		       struct disk_reservation, struct write_point *,
		       struct bpos, u64 *, unsigned);
void bch_write(struct closure *);

struct cache_promote_op;

struct extent_pick_ptr;

void bch_read_extent_iter(struct bch_fs *, struct bch_read_bio *,
			  struct bvec_iter, struct bkey_s_c k,
			  struct extent_pick_ptr *, unsigned);

static inline void bch_read_extent(struct bch_fs *c,
				   struct bch_read_bio *orig,
				   struct bkey_s_c k,
				   struct extent_pick_ptr *pick,
				   unsigned flags)
{
	bch_read_extent_iter(c, orig, orig->bio.bi_iter,
			     k, pick, flags);
}

enum bch_read_flags {
	BCH_READ_FORCE_BOUNCE		= 1 << 0,
	BCH_READ_RETRY_IF_STALE		= 1 << 1,
	BCH_READ_PROMOTE		= 1 << 2,
	BCH_READ_IS_LAST		= 1 << 3,
	BCH_READ_MAY_REUSE_BIO		= 1 << 4,
};

void bch_read(struct bch_fs *, struct bch_read_bio *, u64);

void bch_generic_make_request(struct bio *, struct bch_fs *);
void bch_bio_submit_work(struct work_struct *);
void bch_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
			      const struct bkey_i *, bool);

int bch_discard(struct bch_fs *, struct bpos, struct bpos,
		struct bversion, struct disk_reservation *,
		struct extent_insert_hook *, u64 *);

void bch_read_retry_work(struct work_struct *);
void bch_wake_delayed_writes(unsigned long data);

void bch_bio_decompress_work(struct work_struct *);

#endif /* _BCACHE_IO_H */