summaryrefslogtreecommitdiff
path: root/libbcachefs/btree_update.h
blob: 36e34b3d9213298fba90e02823f67bb4d0d33221 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_BTREE_UPDATE_H
#define _BCACHEFS_BTREE_UPDATE_H

#include "btree_iter.h"
#include "journal.h"

struct bch_fs;
struct btree;

void bch2_btree_node_lock_for_insert(struct bch_fs *, struct btree *,
				     struct btree_iter *);
bool bch2_btree_bset_insert_key(struct btree_iter *, struct btree *,
				struct btree_node_iter *, struct bkey_i *);
void bch2_btree_journal_key(struct btree_trans *, struct btree_iter *,
			    struct bkey_i *);

void bch2_deferred_update_free(struct bch_fs *,
			       struct deferred_update *);
struct deferred_update *
bch2_deferred_update_alloc(struct bch_fs *, enum btree_id, unsigned);

#define BTREE_INSERT_ENTRY(_iter, _k)					\
	((struct btree_insert_entry) {					\
		.iter		= (_iter),				\
		.k		= (_k),					\
	})

#define BTREE_INSERT_DEFERRED(_d, _k)					\
	((struct btree_insert_entry) {					\
		.k		= (_k),					\
		.d		= (_d),					\
		.deferred	= true,					\
	})

enum {
	__BTREE_INSERT_ATOMIC,
	__BTREE_INSERT_NOUNLOCK,
	__BTREE_INSERT_NOFAIL,
	__BTREE_INSERT_NOCHECK_RW,
	__BTREE_INSERT_LAZY_RW,
	__BTREE_INSERT_USE_RESERVE,
	__BTREE_INSERT_USE_ALLOC_RESERVE,
	__BTREE_INSERT_JOURNAL_REPLAY,
	__BTREE_INSERT_JOURNAL_RESERVED,
	__BTREE_INSERT_NOMARK_OVERWRITES,
	__BTREE_INSERT_NOMARK,
	__BTREE_INSERT_MARK_INMEM,
	__BTREE_INSERT_NO_CLEAR_REPLICAS,
	__BTREE_INSERT_BUCKET_INVALIDATE,
	__BTREE_INSERT_NOWAIT,
	__BTREE_INSERT_GC_LOCK_HELD,
	__BCH_HASH_SET_MUST_CREATE,
	__BCH_HASH_SET_MUST_REPLACE,
};

/*
 * Don't drop/retake locks before doing btree update, instead return -EINTR if
 * we had to drop locks for any reason
 */
#define BTREE_INSERT_ATOMIC		(1 << __BTREE_INSERT_ATOMIC)

/*
 * Don't drop locks _after_ successfully updating btree:
 */
#define BTREE_INSERT_NOUNLOCK		(1 << __BTREE_INSERT_NOUNLOCK)

/* Don't check for -ENOSPC: */
#define BTREE_INSERT_NOFAIL		(1 << __BTREE_INSERT_NOFAIL)

#define BTREE_INSERT_NOCHECK_RW		(1 << __BTREE_INSERT_NOCHECK_RW)
#define BTREE_INSERT_LAZY_RW		(1 << __BTREE_INSERT_LAZY_RW)

/* for copygc, or when merging btree nodes */
#define BTREE_INSERT_USE_RESERVE	(1 << __BTREE_INSERT_USE_RESERVE)
#define BTREE_INSERT_USE_ALLOC_RESERVE	(1 << __BTREE_INSERT_USE_ALLOC_RESERVE)

/* Insert is for journal replay - don't get journal reservations: */
#define BTREE_INSERT_JOURNAL_REPLAY	(1 << __BTREE_INSERT_JOURNAL_REPLAY)

#define BTREE_INSERT_JOURNAL_RESERVED	(1 << __BTREE_INSERT_JOURNAL_RESERVED)

/* Don't mark overwrites, just new key: */
#define BTREE_INSERT_NOMARK_OVERWRITES	(1 << __BTREE_INSERT_NOMARK_OVERWRITES)

/* Don't call mark new key at all: */
#define BTREE_INSERT_NOMARK		(1 << __BTREE_INSERT_NOMARK)

/* Don't mark transactionally: */
#define BTREE_INSERT_MARK_INMEM		(1 << __BTREE_INSERT_MARK_INMEM)

#define BTREE_INSERT_NO_CLEAR_REPLICAS	(1 << __BTREE_INSERT_NO_CLEAR_REPLICAS)

#define BTREE_INSERT_BUCKET_INVALIDATE	(1 << __BTREE_INSERT_BUCKET_INVALIDATE)

/* Don't block on allocation failure (for new btree nodes: */
#define BTREE_INSERT_NOWAIT		(1 << __BTREE_INSERT_NOWAIT)
#define BTREE_INSERT_GC_LOCK_HELD	(1 << __BTREE_INSERT_GC_LOCK_HELD)

#define BCH_HASH_SET_MUST_CREATE	(1 << __BCH_HASH_SET_MUST_CREATE)
#define BCH_HASH_SET_MUST_REPLACE	(1 << __BCH_HASH_SET_MUST_REPLACE)

int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned);

int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
		     struct disk_reservation *, u64 *, int flags);

int bch2_btree_delete_at_range(struct btree_trans *, struct btree_iter *,
			       struct bpos, u64 *);
int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
			    struct bpos, struct bpos, u64 *);

int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
			    __le64, unsigned);
int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
			       struct btree *, struct bkey_i_btree_ptr *);

int bch2_trans_commit(struct btree_trans *,
		      struct disk_reservation *,
		      u64 *, unsigned);

static inline void bch2_trans_update(struct btree_trans *trans,
				     struct btree_insert_entry entry)
{
	EBUG_ON(trans->nr_updates >= trans->nr_iters + 4);

	trans->updates[trans->nr_updates++] = entry;
}

#define bch2_trans_do(_c, _journal_seq, _flags, _do)			\
({									\
	struct btree_trans trans;					\
	int _ret;							\
									\
	bch2_trans_init(&trans, (_c), 0, 0);				\
									\
	do {								\
		bch2_trans_begin(&trans);				\
									\
		_ret = (_do) ?:	bch2_trans_commit(&trans, NULL,		\
					(_journal_seq), (_flags));	\
	} while (_ret == -EINTR);					\
									\
	bch2_trans_exit(&trans);					\
	_ret;								\
})

#define __trans_next_update(_trans, _i, _filter)			\
({									\
	while ((_i) < (_trans)->updates + (_trans->nr_updates) && !(_filter))\
		(_i)++;							\
									\
	(_i) < (_trans)->updates + (_trans->nr_updates);		\
})

#define __trans_for_each_update(_trans, _i, _filter)			\
	for ((_i) = (_trans)->updates;					\
	     __trans_next_update(_trans, _i, _filter);			\
	     (_i)++)

#define trans_for_each_update(trans, i)					\
	__trans_for_each_update(trans, i, true)

#define trans_for_each_update_iter(trans, i)				\
	__trans_for_each_update(trans, i, !(i)->deferred)

#endif /* _BCACHEFS_BTREE_UPDATE_H */