summaryrefslogtreecommitdiff
path: root/fs/bcachefs/io_types.h
blob: 50856c6ce207bebc06d7469ef080d9fc7088da04 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
#ifndef _BCACHE_IO_TYPES_H
#define _BCACHE_IO_TYPES_H

#include "btree_types.h"
#include "buckets_types.h"
#include "keylist_types.h"

#include <linux/llist.h>
#include <linux/workqueue.h>

/* XXX kill kill kill */
struct bbio {
	struct cache		*ca;
	struct bch_extent_ptr	ptr;
	unsigned		submit_time_us;
	struct bio		bio;
};

struct bch_read_bio {
	/*
	 * Reads will often have to be split, and if the extent being read from
	 * was checksummed or compressed we'll also have to allocate bounce
	 * buffers and copy the data back into the original bio.
	 *
	 * If we didn't have to split, we have to save and restore the original
	 * bi_end_io - @split below indicates which:
	 */
	union {
	struct bch_read_bio	*parent;
	bio_end_io_t		*orig_bi_end_io;
	};

	/*
	 * Saved copy of parent->bi_iter, from submission time - allows us to
	 * resubmit on IO error, and also to copy data back to the original bio
	 * when we're bouncing:
	 */
	struct bvec_iter	parent_iter;

	/*
	 * If we have to retry the read (IO error, checksum failure, read stale
	 * data (raced with allocator), we retry the portion of the parent bio
	 * that failed (i.e. this bio's portion, parent_iter).
	 *
	 * But we need to stash the inode somewhere:
	 */
	u64			inode;

	unsigned		submit_time_us;
	u16			flags;
	u8			bounce:1,
				split:1;

	struct bch_extent_crc64	crc;
	struct bch_extent_ptr	ptr;
	struct cache		*ca;

	struct cache_promote_op *promote;

	/* bio_decompress_worker list */
	struct llist_node	list;

	struct bio		bio;
};

static inline struct bch_read_bio *
bch_rbio_parent(struct bch_read_bio *rbio)
{
	return rbio->split ? rbio->parent : rbio;
}

struct bch_write_bio {
	struct bio		*orig;
	unsigned		bounce:1,
				split:1;
	struct bbio		bio;
};

struct bch_replace_info {
	struct extent_insert_hook	hook;
	/* How many insertions succeeded */
	unsigned			successes;
	/* How many insertions failed */
	unsigned			failures;
	BKEY_PADDED(key);
};

struct bch_write_op {
	struct closure		cl;
	struct cache_set	*c;
	struct workqueue_struct	*io_wq;
	struct bch_write_bio	*bio;

	unsigned		written; /* sectors */

	short			error;

	u16			flags;
	unsigned		compression_type:4;
	unsigned		nr_replicas:4;

	struct bch_extent_crc64	crc;

	struct disk_reservation	res;

	struct write_point	*wp;

	union {
	struct open_bucket	*open_buckets[2];
	struct {
	struct bch_write_op	*next;
	unsigned long		expires;
	};
	};

	/*
	 * If caller wants to flush but hasn't passed us a journal_seq ptr, we
	 * still need to stash the journal_seq somewhere:
	 */
	union {
		u64			*journal_seq_p;
		u64			journal_seq;
	};

	struct extent_insert_hook *insert_hook;
	int			(*index_update_fn)(struct bch_write_op *);

	struct keylist		insert_keys;
	BKEY_PADDED(insert_key);
	u64			inline_keys[BKEY_EXTENT_U64s_MAX * 2];
};

struct bio_decompress_worker {
	struct cache_set		*c;
	struct work_struct		work;
	struct llist_head		bio_list;
};

#endif /* _BCACHE_IO_TYPES_H */