summaryrefslogtreecommitdiff
path: root/libbcache/alloc_types.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2017-03-19 15:56:34 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2017-03-19 17:31:47 -0800
commit5ec39af8eaba49aee7bafa44c661da39e2f40dc3 (patch)
tree1fb1a981602cbf22c7d2b2dba1168c715d7cecb5 /libbcache/alloc_types.h
parentbb1941de5378a7b8122d3575dcbc7d0aeb6326f0 (diff)
Rename from bcache-tools to bcachefs-tools
Diffstat (limited to 'libbcache/alloc_types.h')
-rw-r--r--libbcache/alloc_types.h102
1 files changed, 0 insertions, 102 deletions
diff --git a/libbcache/alloc_types.h b/libbcache/alloc_types.h
deleted file mode 100644
index 1bf48ef9..00000000
--- a/libbcache/alloc_types.h
+++ /dev/null
@@ -1,102 +0,0 @@
-#ifndef _BCACHE_ALLOC_TYPES_H
-#define _BCACHE_ALLOC_TYPES_H
-
-#include <linux/mutex.h>
-
-#include "clock_types.h"
-
-/*
- * There's two of these clocks, one for reads and one for writes:
- *
- * All fields protected by bucket_lock
- */
-struct prio_clock {
- /*
- * "now" in (read/write) IO time - incremented whenever we do X amount
- * of reads or writes.
- *
- * Goes with the bucket read/write prios: when we read or write to a
- * bucket we reset the bucket's prio to the current hand; thus hand -
- * prio = time since bucket was last read/written.
- *
- * The units are some amount (bytes/sectors) of data read/written, and
- * the units can change on the fly if we need to rescale to fit
- * everything in a u16 - your only guarantee is that the units are
- * consistent.
- */
- u16 hand;
- u16 min_prio;
-
- int rw;
-
- struct io_timer rescale;
-};
-
-/* There is one reserve for each type of btree, one for prios and gens
- * and one for moving GC */
-enum alloc_reserve {
- RESERVE_PRIO,
- RESERVE_BTREE,
- RESERVE_METADATA_LAST = RESERVE_BTREE,
- RESERVE_MOVINGGC,
-
- RESERVE_NONE,
- RESERVE_NR,
-};
-
-static inline bool allocation_is_metadata(enum alloc_reserve id)
-{
- return id <= RESERVE_METADATA_LAST;
-}
-
-struct dev_group {
- spinlock_t lock;
- unsigned nr;
- unsigned cur_device;
- struct {
- u64 weight;
- struct bch_dev *dev;
- } d[BCH_SB_MEMBERS_MAX];
-};
-
-/* Enough for 16 cache devices, 2 tiers and some left over for pipelining */
-#define OPEN_BUCKETS_COUNT 256
-
-#define WRITE_POINT_COUNT 16
-
-struct open_bucket {
- struct list_head list;
- struct mutex lock;
- atomic_t pin;
- bool has_full_ptrs;
- /*
- * recalculated every time we allocate from this open_bucket based on
- * how many pointers we're actually going to use:
- */
- unsigned sectors_free;
- unsigned nr_ptrs;
- struct bch_extent_ptr ptrs[BCH_REPLICAS_MAX];
- unsigned ptr_offset[BCH_REPLICAS_MAX];
-};
-
-struct write_point {
- struct open_bucket *b;
-
- /*
- * Throttle writes to this write point if tier 0 is full?
- */
- bool throttle;
-
- /*
- * If not NULL, cache group for tiering, promotion and moving GC -
- * always allocates a single replica
- */
- struct dev_group *group;
-
- /*
- * Otherwise do a normal replicated bucket allocation that could come
- * from any device in tier 0 (foreground write)
- */
-};
-
-#endif /* _BCACHE_ALLOC_TYPES_H */