1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
|
#ifndef _BCACHE_SUPER_H
#define _BCACHE_SUPER_H
#include "extents.h"
#include "bcachefs_ioctl.h"
static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
{
return s >> ca->bucket_bits;
}
static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
{
return ((sector_t) b) << ca->bucket_bits;
}
static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
{
return s & (ca->mi.bucket_size - 1);
}
static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter)
{
struct bch_dev *ca = NULL;
while (*iter < c->sb.nr_devices &&
!(ca = rcu_dereference_check(c->devs[*iter],
lockdep_is_held(&c->state_lock))))
(*iter)++;
return ca;
}
#define __for_each_member_device(ca, c, iter) \
for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter))); (iter)++)
#define for_each_member_device_rcu(ca, c, iter) \
__for_each_member_device(ca, c, iter)
static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
{
struct bch_dev *ca;
rcu_read_lock();
if ((ca = __bch2_next_dev(c, iter)))
percpu_ref_get(&ca->ref);
rcu_read_unlock();
return ca;
}
/*
* If you break early, you must drop your ref on the current device
*/
#define for_each_member_device(ca, c, iter) \
for ((iter) = 0; \
(ca = bch2_get_next_dev(c, &(iter))); \
percpu_ref_put(&ca->ref), (iter)++)
static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
unsigned *iter,
int state_mask)
{
struct bch_dev *ca;
rcu_read_lock();
while ((ca = __bch2_next_dev(c, iter)) &&
(!((1 << ca->mi.state) & state_mask) ||
!percpu_ref_tryget(&ca->io_ref)))
(*iter)++;
rcu_read_unlock();
return ca;
}
#define __for_each_online_member(ca, c, iter, state_mask) \
for ((iter) = 0; \
(ca = bch2_get_next_online_dev(c, &(iter), state_mask)); \
percpu_ref_put(&ca->io_ref), (iter)++)
#define for_each_online_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, ~0)
#define for_each_rw_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_RW)
#define for_each_readable_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, \
(1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO))
struct bch_fs *bch2_bdev_to_fs(struct block_device *);
struct bch_fs *bch2_uuid_to_fs(uuid_le);
int bch2_congested(struct bch_fs *, int);
void bch2_dev_release(struct kobject *);
bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
enum bch_member_state, int);
int __bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
enum bch_member_state, int);
int bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
enum bch_member_state, int);
int bch2_dev_fail(struct bch_dev *, int);
int bch2_dev_remove(struct bch_fs *, struct bch_dev *, int);
int bch2_dev_add(struct bch_fs *, const char *);
int bch2_dev_online(struct bch_fs *, const char *);
int bch2_dev_offline(struct bch_fs *, struct bch_dev *, int);
int bch2_dev_evacuate(struct bch_fs *, struct bch_dev *);
bool bch2_fs_emergency_read_only(struct bch_fs *);
void bch2_fs_read_only(struct bch_fs *);
const char *bch2_fs_read_write(struct bch_fs *);
void bch2_fs_release(struct kobject *);
void bch2_fs_stop(struct bch_fs *);
const char *bch2_fs_start(struct bch_fs *);
const char *bch2_fs_open(char * const *, unsigned, struct bch_opts,
struct bch_fs **);
const char *bch2_fs_open_incremental(const char *path);
extern struct kobj_type bch2_fs_ktype;
extern struct kobj_type bch2_fs_internal_ktype;
extern struct kobj_type bch2_fs_time_stats_ktype;
extern struct kobj_type bch2_fs_opts_dir_ktype;
extern struct kobj_type bch2_dev_ktype;
#endif /* _BCACHE_SUPER_H */
|