summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2017-03-03 21:09:52 -0900
committerKent Overstreet <kent.overstreet@gmail.com>2017-03-03 23:17:31 -0900
commit171ee48e57be78f4e95954c99851553fa523bf91 (patch)
tree5a54443f074f8f7922eda1451743bb9d2f60368e /include
parenta5b5eba7f788bb77cf57f9c94f3474a2d439ab0b (diff)
Delete more unused shim code, update bcache code
Diffstat (limited to 'include')
-rw-r--r--include/linux/bcache.h74
-rw-r--r--include/linux/completion.h62
-rw-r--r--include/linux/kernel.h20
-rw-r--r--include/linux/rhashtable.h466
-rw-r--r--include/linux/wait.h1172
-rw-r--r--include/linux/zconf.h57
-rw-r--r--include/linux/zlib.h593
-rw-r--r--include/trace/events/bcache.h12
8 files changed, 84 insertions, 2372 deletions
diff --git a/include/linux/bcache.h b/include/linux/bcache.h
index 4179f8dd..dbb02742 100644
--- a/include/linux/bcache.h
+++ b/include/linux/bcache.h
@@ -1021,80 +1021,6 @@ enum bch_compression_opts {
BCH_COMPRESSION_NR = 3,
};
-/**
- * BCH_OPT(name, choices, min, max, sb_option, sysfs_writeable)
- *
- * @name - name of mount option, sysfs attribute, and struct cache_set_opts
- * member
- *
- * @choices - array of strings that the user can select from - option is by
- * array index
- *
- * Booleans are special cased; if @choices is bch_bool_opt the mount
- * options name and noname will work as expected.
- *
- * @min, @max
- *
- * @sb_option - name of corresponding superblock option
- *
- * @sysfs_writeable - if true, option will be modifiable at runtime via sysfs
- */
-
-#define BCH_SB_OPTS() \
- BCH_OPT(errors, \
- bch_error_actions, \
- 0, BCH_NR_ERROR_ACTIONS, \
- BCH_SB_ERROR_ACTION, \
- true) \
- BCH_OPT(metadata_replicas, \
- bch_uint_opt, \
- 0, BCH_REPLICAS_MAX, \
- BCH_SB_META_REPLICAS_WANT, \
- false) \
- BCH_OPT(data_replicas, \
- bch_uint_opt, \
- 0, BCH_REPLICAS_MAX, \
- BCH_SB_DATA_REPLICAS_WANT, \
- false) \
- BCH_OPT(metadata_checksum, \
- bch_csum_types, \
- 0, BCH_CSUM_OPT_NR, \
- BCH_SB_META_CSUM_TYPE, \
- true) \
- BCH_OPT(data_checksum, \
- bch_csum_types, \
- 0, BCH_CSUM_OPT_NR, \
- BCH_SB_DATA_CSUM_TYPE, \
- true) \
- BCH_OPT(compression, \
- bch_compression_types, \
- 0, BCH_COMPRESSION_NR, \
- BCH_SB_COMPRESSION_TYPE, \
- true) \
- BCH_OPT(str_hash, \
- bch_str_hash_types, \
- 0, BCH_STR_HASH_NR, \
- BCH_SB_STR_HASH_TYPE, \
- true) \
- BCH_OPT(inodes_32bit, \
- bch_bool_opt, 0, 2, \
- BCH_SB_INODE_32BIT, \
- true) \
- BCH_OPT(gc_reserve_percent, \
- bch_uint_opt, \
- 5, 21, \
- BCH_SB_GC_RESERVE, \
- false) \
- BCH_OPT(root_reserve_percent, \
- bch_uint_opt, \
- 0, 100, \
- BCH_SB_ROOT_RESERVE, \
- false) \
- BCH_OPT(wide_macs, \
- bch_bool_opt, 0, 2, \
- BCH_SB_128_BIT_MACS, \
- true)
-
/* backing device specific stuff: */
struct backingdev_sb {
diff --git a/include/linux/completion.h b/include/linux/completion.h
index b8bac212..71c6b616 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -10,74 +10,26 @@
#include <linux/wait.h>
-/*
- * struct completion - structure used to maintain state for a "completion"
- *
- * This is the opaque structure used to maintain the state for a "completion".
- * Completions currently use a FIFO to queue threads that have to wait for
- * the "completion" event.
- *
- * See also: complete(), wait_for_completion() (and friends _timeout,
- * _interruptible, _interruptible_timeout, and _killable), init_completion(),
- * reinit_completion(), and macros DECLARE_COMPLETION(),
- * DECLARE_COMPLETION_ONSTACK().
- */
struct completion {
unsigned int done;
wait_queue_head_t wait;
};
-#define COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+#define DECLARE_COMPLETION(work) \
+ struct completion work = { \
+ .done = 0, \
+ .wait = __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) \
+ }
-#define COMPLETION_INITIALIZER_ONSTACK(work) \
- ({ init_completion(&work); work; })
-
-#define DECLARE_COMPLETION(work) \
- struct completion work = COMPLETION_INITIALIZER(work)
#define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
-/**
- * init_completion - Initialize a dynamically allocated completion
- * @x: pointer to completion structure that is to be initialized
- *
- * This inline function will initialize a dynamically created completion
- * structure.
- */
static inline void init_completion(struct completion *x)
{
x->done = 0;
init_waitqueue_head(&x->wait);
}
-/**
- * reinit_completion - reinitialize a completion structure
- * @x: pointer to completion structure that is to be reinitialized
- *
- * This inline function should be used to reinitialize a completion structure so it can
- * be reused. This is especially important after complete_all() is used.
- */
-static inline void reinit_completion(struct completion *x)
-{
- x->done = 0;
-}
-
-extern void wait_for_completion(struct completion *);
-extern void wait_for_completion_io(struct completion *);
-extern int wait_for_completion_interruptible(struct completion *x);
-extern int wait_for_completion_killable(struct completion *x);
-extern unsigned long wait_for_completion_timeout(struct completion *x,
- unsigned long timeout);
-extern unsigned long wait_for_completion_io_timeout(struct completion *x,
- unsigned long timeout);
-extern long wait_for_completion_interruptible_timeout(
- struct completion *x, unsigned long timeout);
-extern long wait_for_completion_killable_timeout(
- struct completion *x, unsigned long timeout);
-extern bool try_wait_for_completion(struct completion *x);
-extern bool completion_done(struct completion *x);
-
-extern void complete(struct completion *);
-extern void complete_all(struct completion *);
+void complete(struct completion *);
+void wait_for_completion(struct completion *);
#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index ac72858b..741e0ba4 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -194,6 +194,26 @@ static inline int __must_check kstrtol(const char *s, unsigned int base, long *r
int __must_check kstrtouint(const char *s, unsigned int base, unsigned int *res);
int __must_check kstrtoint(const char *s, unsigned int base, int *res);
+static inline int __must_check kstrtou64(const char *s, unsigned int base, u64 *res)
+{
+ return kstrtoull(s, base, res);
+}
+
+static inline int __must_check kstrtos64(const char *s, unsigned int base, s64 *res)
+{
+ return kstrtoll(s, base, res);
+}
+
+static inline int __must_check kstrtou32(const char *s, unsigned int base, u32 *res)
+{
+ return kstrtouint(s, base, res);
+}
+
+static inline int __must_check kstrtos32(const char *s, unsigned int base, s32 *res)
+{
+ return kstrtoint(s, base, res);
+}
+
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
#define VERIFY_OCTAL_PERMISSIONS(perms) \
(BUILD_BUG_ON_ZERO((perms) < 0) + \
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index e5b35edd..f3faea17 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -30,44 +30,15 @@
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
-/*
- * The end of the chain is marked with a special nulls marks which has
- * the following format:
- *
- * +-------+-----------------------------------------------------+-+
- * | Base | Hash |1|
- * +-------+-----------------------------------------------------+-+
- *
- * Base (4 bits) : Reserved to distinguish between multiple tables.
- * Specified via &struct rhashtable_params.nulls_base.
- * Hash (27 bits): Full hash (unmasked) of first element added to bucket
- * 1 (1 bit) : Nulls marker (always set)
- *
- * The remaining bits of the next pointer remain unused for now.
- */
#define RHT_BASE_BITS 4
#define RHT_HASH_BITS 27
#define RHT_BASE_SHIFT RHT_HASH_BITS
-
-/* Base bits plus 1 bit for nulls marker */
#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
struct rhash_head {
struct rhash_head __rcu *next;
};
-/**
- * struct bucket_table - Table of hash buckets
- * @size: Number of hash buckets
- * @rehash: Current bucket being rehashed
- * @hash_rnd: Random seed to fold into hash
- * @locks_mask: Mask to apply before accessing locks[]
- * @locks: Array of spinlocks protecting individual buckets
- * @walkers: List of active walkers
- * @rcu: RCU structure for freeing the table
- * @future_tbl: Table under construction during rehashing
- * @buckets: size * hash buckets
- */
struct bucket_table {
unsigned int size;
unsigned int rehash;
@@ -82,11 +53,6 @@ struct bucket_table {
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
-/**
- * struct rhashtable_compare_arg - Key for the function rhashtable_compare
- * @ht: Hash table
- * @key: Key to compare against
- */
struct rhashtable_compare_arg {
struct rhashtable *ht;
const void *key;
@@ -97,25 +63,6 @@ typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
const void *obj);
-struct rhashtable;
-
-/**
- * struct rhashtable_params - Hash table construction parameters
- * @nelem_hint: Hint on number of elements, should be 75% of desired size
- * @key_len: Length of key
- * @key_offset: Offset of key in struct to be hashed
- * @head_offset: Offset of rhash_head in struct to be hashed
- * @insecure_max_entries: Maximum number of entries (may be exceeded)
- * @max_size: Maximum size while expanding
- * @min_size: Minimum size while shrinking
- * @nulls_base: Base value to generate nulls marker
- * @insecure_elasticity: Set to true to disable chain length checks
- * @automatic_shrinking: Enable automatic shrinking of tables
- * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
- * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
- * @obj_hashfn: Function to hash object
- * @obj_cmpfn: Function to compare key with object
- */
struct rhashtable_params {
size_t nelem_hint;
size_t key_len;
@@ -133,17 +80,6 @@ struct rhashtable_params {
rht_obj_cmpfn_t obj_cmpfn;
};
-/**
- * struct rhashtable - Hash table handle
- * @tbl: Bucket table
- * @nelems: Number of elements in table
- * @key_len: Key length for hashfn
- * @elasticity: Maximum chain length before rehash
- * @p: Configuration parameters
- * @run_work: Deferred worker to expand/shrink asynchronously
- * @mutex: Mutex to protect current/future table swapping
- * @lock: Spin lock to protect walker list
- */
struct rhashtable {
struct bucket_table __rcu *tbl;
atomic_t nelems;
@@ -155,32 +91,11 @@ struct rhashtable {
spinlock_t lock;
};
-/**
- * struct rhashtable_walker - Hash table walker
- * @list: List entry on list of walkers
- * @tbl: The table that we were walking over
- */
struct rhashtable_walker {
struct list_head list;
struct bucket_table *tbl;
};
-/**
- * struct rhashtable_iter - Hash table iterator, fits into netlink cb
- * @ht: Table to iterate through
- * @p: Current pointer
- * @walker: Associated rhashtable walker
- * @slot: Current slot
- * @skip: Number of entries to skip in slot
- */
-struct rhashtable_iter {
- struct rhashtable *ht;
- struct rhash_head *p;
- struct rhashtable_walker *walker;
- unsigned int slot;
- unsigned int skip;
-};
-
static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
{
return NULLS_MARKER(ht->p.nulls_base + hash);
@@ -255,11 +170,6 @@ static inline unsigned int rht_head_hashfn(
rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
}
-/**
- * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
- * @ht: hash table
- * @tbl: current table
- */
static inline bool rht_grow_above_75(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
@@ -268,11 +178,6 @@ static inline bool rht_grow_above_75(const struct rhashtable *ht,
(!ht->p.max_size || tbl->size < ht->p.max_size);
}
-/**
- * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
- * @ht: hash table
- * @tbl: current table
- */
static inline bool rht_shrink_below_30(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
@@ -281,11 +186,6 @@ static inline bool rht_shrink_below_30(const struct rhashtable *ht,
tbl->size > ht->p.min_size;
}
-/**
- * rht_grow_above_100 - returns true if nelems > table-size
- * @ht: hash table
- * @tbl: current table
- */
static inline bool rht_grow_above_100(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
@@ -293,11 +193,6 @@ static inline bool rht_grow_above_100(const struct rhashtable *ht,
(!ht->p.max_size || tbl->size < ht->p.max_size);
}
-/**
- * rht_grow_above_max - returns true if table is above maximum
- * @ht: hash table
- * @tbl: current table
- */
static inline bool rht_grow_above_max(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
@@ -305,206 +200,52 @@ static inline bool rht_grow_above_max(const struct rhashtable *ht,
atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
}
-/* The bucket lock is selected based on the hash and protects mutations
- * on a group of hash buckets.
- *
- * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
- * a single lock always covers both buckets which may both contains
- * entries which link to the same bucket of the old table during resizing.
- * This allows to simplify the locking as locking the bucket in both
- * tables during resize always guarantee protection.
- *
- * IMPORTANT: When holding the bucket lock of both the old and new table
- * during expansions and shrinking, the old bucket lock must always be
- * acquired first.
- */
static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
unsigned int hash)
{
return &tbl->locks[hash & tbl->locks_mask];
}
-#ifdef CONFIG_PROVE_LOCKING
-int lockdep_rht_mutex_is_held(struct rhashtable *ht);
-int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
-#else
-static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
-{
- return 1;
-}
-
-static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
- u32 hash)
-{
- return 1;
-}
-#endif /* CONFIG_PROVE_LOCKING */
+int rhashtable_insert_rehash(struct rhashtable *, struct bucket_table *);
+struct bucket_table *rhashtable_insert_slow(struct rhashtable *,
+ const void *,
+ struct rhash_head *,
+ struct bucket_table *);
-int rhashtable_init(struct rhashtable *ht,
- const struct rhashtable_params *params);
+int rhashtable_init(struct rhashtable *, const struct rhashtable_params *);
+void rhashtable_destroy(struct rhashtable *);
-struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
- const void *key,
- struct rhash_head *obj,
- struct bucket_table *old_tbl);
-int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
-
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
- gfp_t gfp);
-void rhashtable_walk_exit(struct rhashtable_iter *iter);
-int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
-void *rhashtable_walk_next(struct rhashtable_iter *iter);
-void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
-
-void rhashtable_free_and_destroy(struct rhashtable *ht,
- void (*free_fn)(void *ptr, void *arg),
- void *arg);
-void rhashtable_destroy(struct rhashtable *ht);
-
-#define rht_dereference(p, ht) \
- rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
-
-#define rht_dereference_rcu(p, ht) \
- rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
-
-#define rht_dereference_bucket(p, tbl, hash) \
- rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
-
-#define rht_dereference_bucket_rcu(p, tbl, hash) \
- rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
+#define rht_dereference(p, ht) rcu_dereference(p)
+#define rht_dereference_rcu(p, ht) rcu_dereference(p)
+#define rht_dereference_bucket(p, tbl, hash) rcu_dereference(p)
+#define rht_dereference_bucket_rcu(p, tbl, hash) rcu_dereference(p)
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
-/**
- * rht_for_each_continue - continue iterating over hash chain
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- */
#define rht_for_each_continue(pos, head, tbl, hash) \
for (pos = rht_dereference_bucket(head, tbl, hash); \
!rht_is_a_nulls(pos); \
pos = rht_dereference_bucket((pos)->next, tbl, hash))
-/**
- * rht_for_each - iterate over hash chain
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- */
#define rht_for_each(pos, tbl, hash) \
rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
-/**
- * rht_for_each_entry_continue - continue iterating over hash chain
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- * @member: name of the &struct rhash_head within the hashable struct.
- */
-#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
- for (pos = rht_dereference_bucket(head, tbl, hash); \
- (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
- pos = rht_dereference_bucket((pos)->next, tbl, hash))
-
-/**
- * rht_for_each_entry - iterate over hash chain of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- * @member: name of the &struct rhash_head within the hashable struct.
- */
-#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
- tbl, hash, member)
-
-/**
- * rht_for_each_entry_safe - safely iterate over hash chain of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @next: the &struct rhash_head to use as next in loop cursor.
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- * @member: name of the &struct rhash_head within the hashable struct.
- *
- * This hash chain list-traversal primitive allows for the looped code to
- * remove the loop cursor from the list.
- */
-#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
- for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
- next = !rht_is_a_nulls(pos) ? \
- rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
- (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
- pos = next, \
- next = !rht_is_a_nulls(pos) ? \
- rht_dereference_bucket(pos->next, tbl, hash) : NULL)
-
-/**
- * rht_for_each_rcu_continue - continue iterating over rcu hash chain
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- *
- * This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu mutation primitives such as rhashtable_insert() as long as the
- * traversal is guarded by rcu_read_lock().
- */
#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
for (({barrier(); }), \
pos = rht_dereference_bucket_rcu(head, tbl, hash); \
!rht_is_a_nulls(pos); \
pos = rcu_dereference_raw(pos->next))
-/**
- * rht_for_each_rcu - iterate over rcu hash chain
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- *
- * This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu mutation primitives such as rhashtable_insert() as long as the
- * traversal is guarded by rcu_read_lock().
- */
#define rht_for_each_rcu(pos, tbl, hash) \
rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
-/**
- * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- * @member: name of the &struct rhash_head within the hashable struct.
- *
- * This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu mutation primitives such as rhashtable_insert() as long as the
- * traversal is guarded by rcu_read_lock().
- */
#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
for (({barrier(); }), \
pos = rht_dereference_bucket_rcu(head, tbl, hash); \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
-/**
- * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct rhash_head to use as a loop cursor.
- * @tbl: the &struct bucket_table
- * @hash: the hash value / bucket index
- * @member: name of the &struct rhash_head within the hashable struct.
- *
- * This hash chain list-traversal primitive may safely run concurrently with
- * the _rcu mutation primitives such as rhashtable_insert() as long as the
- * traversal is guarded by rcu_read_lock().
- */
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
tbl, hash, member)
@@ -518,17 +259,6 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
}
-/**
- * rhashtable_lookup_fast - search hash table, inlined version
- * @ht: hash table
- * @key: the pointer to the key
- * @params: hash table parameters
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * Returns the first entry on which the compare function returned true.
- */
static inline void *rhashtable_lookup_fast(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
@@ -566,7 +296,6 @@ restart:
return NULL;
}
-/* Internal function, please use rhashtable_insert_fast() instead */
static inline int __rhashtable_insert_fast(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
@@ -658,50 +387,6 @@ out:
return err;
}
-/**
- * rhashtable_insert_fast - insert object into hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Will take a per bucket spinlock to protect against mutual mutations
- * on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
- *
- * It is safe to call this function from atomic context.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
-static inline int rhashtable_insert_fast(
- struct rhashtable *ht, struct rhash_head *obj,
- const struct rhashtable_params params)
-{
- return __rhashtable_insert_fast(ht, NULL, obj, params);
-}
-
-/**
- * rhashtable_lookup_insert_fast - lookup and insert object into hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
- * This lookup function may only be used for fixed key hash table (key_len
- * parameter set). It will BUG() if used inappropriately.
- *
- * It is safe to call this function from atomic context.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- */
static inline int rhashtable_lookup_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
@@ -714,38 +399,6 @@ static inline int rhashtable_lookup_insert_fast(
params);
}
-/**
- * rhashtable_lookup_insert_key - search and insert object to hash table
- * with explicit key
- * @ht: hash table
- * @key: key
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
- * Lookups may occur in parallel with hashtable mutations and resizing.
- *
- * Will trigger an automatic deferred table resizing if the size grows
- * beyond the watermark indicated by grow_decision() which can be passed
- * to rhashtable_init().
- *
- * Returns zero on success.
- */
-static inline int rhashtable_lookup_insert_key(
- struct rhashtable *ht, const void *key, struct rhash_head *obj,
- const struct rhashtable_params params)
-{
- BUG_ON(!ht->p.obj_hashfn || !key);
-
- return __rhashtable_insert_fast(ht, key, obj, params);
-}
-
-/* Internal function, please use rhashtable_remove_fast() instead */
static inline int __rhashtable_remove_fast(
struct rhashtable *ht, struct bucket_table *tbl,
struct rhash_head *obj, const struct rhashtable_params params)
@@ -778,21 +431,6 @@ static inline int __rhashtable_remove_fast(
return err;
}
-/**
- * rhashtable_remove_fast - remove object from hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Since the hash chain is single linked, the removal operation needs to
- * walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
- *
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
- *
- * Returns zero on success, -ENOENT if the entry could not be found.
- */
static inline int rhashtable_remove_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
@@ -827,86 +465,4 @@ out:
return err;
}
-/* Internal function, please use rhashtable_replace_fast() instead */
-static inline int __rhashtable_replace_fast(
- struct rhashtable *ht, struct bucket_table *tbl,
- struct rhash_head *obj_old, struct rhash_head *obj_new,
- const struct rhashtable_params params)
-{
- struct rhash_head __rcu **pprev;
- struct rhash_head *he;
- spinlock_t *lock;
- unsigned int hash;
- int err = -ENOENT;
-
- /* Minimally, the old and new objects must have same hash
- * (which should mean identifiers are the same).
- */
- hash = rht_head_hashfn(ht, tbl, obj_old, params);
- if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
- return -EINVAL;
-
- lock = rht_bucket_lock(tbl, hash);
-
- spin_lock_bh(lock);
-
- pprev = &tbl->buckets[hash];
- rht_for_each(he, tbl, hash) {
- if (he != obj_old) {
- pprev = &he->next;
- continue;
- }
-
- rcu_assign_pointer(obj_new->next, obj_old->next);
- rcu_assign_pointer(*pprev, obj_new);
- err = 0;
- break;
- }
-
- spin_unlock_bh(lock);
-
- return err;
-}
-
-/**
- * rhashtable_replace_fast - replace an object in hash table
- * @ht: hash table
- * @obj_old: pointer to hash head inside object being replaced
- * @obj_new: pointer to hash head inside object which is new
- * @params: hash table parameters
- *
- * Replacing an object doesn't affect the number of elements in the hash table
- * or bucket, so we don't need to worry about shrinking or expanding the
- * table here.
- *
- * Returns zero on success, -ENOENT if the entry could not be found,
- * -EINVAL if hash is not the same for the old and new objects.
- */
-static inline int rhashtable_replace_fast(
- struct rhashtable *ht, struct rhash_head *obj_old,
- struct rhash_head *obj_new,
- const struct rhashtable_params params)
-{
- struct bucket_table *tbl;
- int err;
-
- rcu_read_lock();
-
- tbl = rht_dereference_rcu(ht->tbl, ht);
-
- /* Because we have already taken (and released) the bucket
- * lock in old_tbl, if we find that future_tbl is not yet
- * visible then that guarantees the entry to still be in
- * the old tbl if it exists.
- */
- while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
- obj_new, params)) &&
- (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
- ;
-
- rcu_read_unlock();
-
- return err;
-}
-
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 77cba057..f6f5757a 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -2,20 +2,14 @@
#define _LINUX_WAIT_H
#include <pthread.h>
-
#include <linux/bitmap.h>
#include <linux/list.h>
-#include <linux/lockdep.h>
#include <linux/spinlock.h>
-//#include <uapi/linux/wait.h>
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
-int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
-/* __wait_queue::flags */
#define WQ_FLAG_EXCLUSIVE 0x01
-#define WQ_FLAG_WOKEN 0x02
struct __wait_queue {
unsigned int flags;
@@ -24,37 +18,23 @@ struct __wait_queue {
struct list_head task_list;
};
-struct wait_bit_key {
- void *flags;
- int bit_nr;
-#define WAIT_ATOMIC_T_BIT_NR -1
- unsigned long timeout;
-};
-
-struct wait_bit_queue {
- struct wait_bit_key key;
- wait_queue_t wait;
-};
-
-struct __wait_queue_head {
+typedef struct {
spinlock_t lock;
struct list_head task_list;
-};
-typedef struct __wait_queue_head wait_queue_head_t;
-
-struct task_struct;
+} wait_queue_head_t;
-/*
- * Macros for declaration and initialisaton of the datatypes
- */
-
-#define __WAITQUEUE_INITIALIZER(name, tsk) { \
- .private = tsk, \
- .func = default_wake_function, \
- .task_list = { NULL, NULL } }
+void wake_up(wait_queue_head_t *);
+void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
+void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
+int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
+int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
#define DECLARE_WAITQUEUE(name, tsk) \
- wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
+ wait_queue_t name = { \
+ .private = tsk, \
+ .func = default_wake_function, \
+ .task_list = { NULL, NULL } \
+ }
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
@@ -63,178 +43,18 @@ struct task_struct;
#define DECLARE_WAIT_QUEUE_HEAD(name) \
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
-#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
- { .flags = word, .bit_nr = bit, }
-
-#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
- { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
-
-extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
-
-#define init_waitqueue_head(q) \
- do { \
- static struct lock_class_key __key; \
- \
- __init_waitqueue_head((q), #q, &__key); \
- } while (0)
-
-#ifdef CONFIG_LOCKDEP
-# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
- ({ init_waitqueue_head(&name); name; })
-# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
- wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
-#else
-# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
-#endif
-
-static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
-{
- q->flags = 0;
- q->private = p;
- q->func = default_wake_function;
-}
-
-static inline void
-init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
-{
- q->flags = 0;
- q->private = NULL;
- q->func = func;
-}
-
-/**
- * waitqueue_active -- locklessly test for waiters on the queue
- * @q: the waitqueue to test for waiters
- *
- * returns true if the wait list is not empty
- *
- * NOTE: this function is lockless and requires care, incorrect usage _will_
- * lead to sporadic and non-obvious failure.
- *
- * Use either while holding wait_queue_head_t::lock or when used for wakeups
- * with an extra smp_mb() like:
- *
- * CPU0 - waker CPU1 - waiter
- *
- * for (;;) {
- * @cond = true; prepare_to_wait(&wq, &wait, state);
- * smp_mb(); // smp_mb() from set_current_state()
- * if (waitqueue_active(wq)) if (@cond)
- * wake_up(wq); break;
- * schedule();
- * }
- * finish_wait(&wq, &wait);
- *
- * Because without the explicit smp_mb() it's possible for the
- * waitqueue_active() load to get hoisted over the @cond store such that we'll
- * observe an empty wait list while the waiter might not observe @cond.
- *
- * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
- * which (when the lock is uncontended) are of roughly equal cost.
- */
-static inline int waitqueue_active(wait_queue_head_t *q)
-{
- return !list_empty(&q->task_list);
-}
-
-/**
- * wq_has_sleeper - check if there are any waiting processes
- * @wq: wait queue head
- *
- * Returns true if wq has waiting processes
- *
- * Please refer to the comment for waitqueue_active.
- */
-static inline bool wq_has_sleeper(wait_queue_head_t *wq)
-{
- /*
- * We need to be sure we are in sync with the
- * add_wait_queue modifications to the wait queue.
- *
- * This memory barrier should be paired with one on the
- * waiting side.
- */
- smp_mb();
- return waitqueue_active(wq);
-}
-
-extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
-extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
-extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
-
-static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
-{
- list_add(&new->task_list, &head->task_list);
-}
-
-/*
- * Used for wake-one threads:
- */
-static inline void
-__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
-{
- wait->flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue(q, wait);
-}
-
-static inline void __add_wait_queue_tail(wait_queue_head_t *head,
- wait_queue_t *new)
-{
- list_add_tail(&new->task_list, &head->task_list);
-}
-
-static inline void
-__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
-{
- wait->flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue_tail(q, wait);
-}
-
-static inline void
-__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
+static inline void init_waitqueue_head(wait_queue_head_t *q)
{
- list_del(&old->task_list);
+ spin_lock_init(&q->lock);
+ INIT_LIST_HEAD(&q->task_list);
}
-typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
-void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
-void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
-void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
-void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
-void __wake_up_bit(wait_queue_head_t *, void *, int);
-int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
-int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
-void wake_up_bit(void *, int);
-void wake_up_atomic_t(atomic_t *);
-int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
-int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
-int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
-int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
-wait_queue_head_t *bit_waitqueue(void *, int);
-
-#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
-#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
-#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
-#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
-#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
-
-#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
-#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
-#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
-#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
-
-/*
- * Wakeup macros to be used to report events to the targets.
- */
-#define wake_up_poll(x, m) \
- __wake_up(x, TASK_NORMAL, 1, (void *) (m))
-#define wake_up_locked_poll(x, m) \
- __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
-#define wake_up_interruptible_poll(x, m) \
- __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
-#define wake_up_interruptible_sync_poll(x, m) \
- __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
+#define DEFINE_WAIT(name) \
+ wait_queue_t name = { \
+ .private = current, \
+ .func = autoremove_wake_function, \
+ .task_list = LIST_HEAD_INIT((name).task_list), \
+ }
#define ___wait_cond_timeout(condition) \
({ \
@@ -244,992 +64,68 @@ wait_queue_head_t *bit_waitqueue(void *, int);
__cond || !__ret; \
})
-#define ___wait_is_interruptible(state) \
- (!__builtin_constant_p(state) || \
- state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
-
-/*
- * The below macro ___wait_event() has an explicit shadow of the __ret
- * variable when used from the wait_event_*() macros.
- *
- * This is so that both can use the ___wait_cond_timeout() construct
- * to wrap the condition.
- *
- * The type inconsistency of the wait_event_*() __ret variable is also
- * on purpose; we use long where we can return timeout values and int
- * otherwise.
- */
-
#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
({ \
- __label__ __out; \
- wait_queue_t __wait; \
- long __ret = ret; /* explicit shadow */ \
- \
- INIT_LIST_HEAD(&__wait.task_list); \
- if (exclusive) \
- __wait.flags = WQ_FLAG_EXCLUSIVE; \
- else \
- __wait.flags = 0; \
+ DEFINE_WAIT(__wait); \
+ long __ret = ret; \
\
for (;;) { \
- long __int = prepare_to_wait_event(&wq, &__wait, state);\
- \
+ prepare_to_wait(&wq, &__wait, state); \
if (condition) \
break; \
- \
- if (___wait_is_interruptible(state) && __int) { \
- __ret = __int; \
- if (exclusive) { \
- abort_exclusive_wait(&wq, &__wait, \
- state, NULL); \
- goto __out; \
- } \
- break; \
- } \
- \
cmd; \
} \
finish_wait(&wq, &__wait); \
-__out: __ret; \
+ __ret; \
})
#define __wait_event(wq, condition) \
(void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
schedule())
-/**
- * wait_event - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- */
#define wait_event(wq, condition) \
do { \
- might_sleep(); \
if (condition) \
break; \
__wait_event(wq, condition); \
} while (0)
-#define __io_wait_event(wq, condition) \
- (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
- io_schedule())
-
-/*
- * io_wait_event() -- like wait_event() but with io_schedule()
- */
-#define io_wait_event(wq, condition) \
-do { \
- might_sleep(); \
- if (condition) \
- break; \
- __io_wait_event(wq, condition); \
-} while (0)
-
-#define __wait_event_freezable(wq, condition) \
- ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
- schedule(); try_to_freeze())
-
-/**
- * wait_event_freezable - sleep (or freeze) until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
- * to system load) until the @condition evaluates to true. The
- * @condition is checked each time the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- */
-#define wait_event_freezable(wq, condition) \
-({ \
- int __ret = 0; \
- might_sleep(); \
- if (!(condition)) \
- __ret = __wait_event_freezable(wq, condition); \
- __ret; \
-})
-
#define __wait_event_timeout(wq, condition, timeout) \
___wait_event(wq, ___wait_cond_timeout(condition), \
TASK_UNINTERRUPTIBLE, 0, timeout, \
__ret = schedule_timeout(__ret))
-/**
- * wait_event_timeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @timeout: timeout, in jiffies
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * Returns:
- * 0 if the @condition evaluated to %false after the @timeout elapsed,
- * 1 if the @condition evaluated to %true after the @timeout elapsed,
- * or the remaining jiffies (at least 1) if the @condition evaluated
- * to %true before the @timeout elapsed.
- */
#define wait_event_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
- might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_event_timeout(wq, condition, timeout); \
__ret; \
})
-#define __wait_event_freezable_timeout(wq, condition, timeout) \
- ___wait_event(wq, ___wait_cond_timeout(condition), \
- TASK_INTERRUPTIBLE, 0, timeout, \
- __ret = schedule_timeout(__ret); try_to_freeze())
-
-/*
- * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
- * increasing load and is freezable.
- */
-#define wait_event_freezable_timeout(wq, condition, timeout) \
-({ \
- long __ret = timeout; \
- might_sleep(); \
- if (!___wait_cond_timeout(condition)) \
- __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
- __ret; \
-})
-
-#define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
- (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
- cmd1; schedule(); cmd2)
-/*
- * Just like wait_event_cmd(), except it sets exclusive flag
- */
-#define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
-do { \
- if (condition) \
- break; \
- __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
-} while (0)
-
-#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
- (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
- cmd1; schedule(); cmd2)
-
-/**
- * wait_event_cmd - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @cmd1: the command will be executed before sleep
- * @cmd2: the command will be executed after sleep
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- */
-#define wait_event_cmd(wq, condition, cmd1, cmd2) \
-do { \
- if (condition) \
- break; \
- __wait_event_cmd(wq, condition, cmd1, cmd2); \
-} while (0)
-
-#define __wait_event_interruptible(wq, condition) \
- ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
- schedule())
-
-/**
- * wait_event_interruptible - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function will return -ERESTARTSYS if it was interrupted by a
- * signal and 0 if @condition evaluated to true.
- */
-#define wait_event_interruptible(wq, condition) \
-({ \
- int __ret = 0; \
- might_sleep(); \
- if (!(condition)) \
- __ret = __wait_event_interruptible(wq, condition); \
- __ret; \
-})
-
-#define __wait_event_interruptible_timeout(wq, condition, timeout) \
- ___wait_event(wq, ___wait_cond_timeout(condition), \
- TASK_INTERRUPTIBLE, 0, timeout, \
- __ret = schedule_timeout(__ret))
-
-/**
- * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @timeout: timeout, in jiffies
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * Returns:
- * 0 if the @condition evaluated to %false after the @timeout elapsed,
- * 1 if the @condition evaluated to %true after the @timeout elapsed,
- * the remaining jiffies (at least 1) if the @condition evaluated
- * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
- * interrupted by a signal.
- */
-#define wait_event_interruptible_timeout(wq, condition, timeout) \
-({ \
- long __ret = timeout; \
- might_sleep(); \
- if (!___wait_cond_timeout(condition)) \
- __ret = __wait_event_interruptible_timeout(wq, \
- condition, timeout); \
- __ret; \
-})
-
-#define __wait_event_hrtimeout(wq, condition, timeout, state) \
-({ \
- int __ret = 0; \
- struct hrtimer_sleeper __t; \
- \
- hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
- HRTIMER_MODE_REL); \
- hrtimer_init_sleeper(&__t, current); \
- if ((timeout).tv64 != KTIME_MAX) \
- hrtimer_start_range_ns(&__t.timer, timeout, \
- current->timer_slack_ns, \
- HRTIMER_MODE_REL); \
- \
- __ret = ___wait_event(wq, condition, state, 0, 0, \
- if (!__t.task) { \
- __ret = -ETIME; \
- break; \
- } \
- schedule()); \
- \
- hrtimer_cancel(&__t.timer); \
- destroy_hrtimer_on_stack(&__t.timer); \
- __ret; \
-})
-
-/**
- * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @timeout: timeout, as a ktime_t
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function returns 0 if @condition became true, or -ETIME if the timeout
- * elapsed.
- */
-#define wait_event_hrtimeout(wq, condition, timeout) \
-({ \
- int __ret = 0; \
- might_sleep(); \
- if (!(condition)) \
- __ret = __wait_event_hrtimeout(wq, condition, timeout, \
- TASK_UNINTERRUPTIBLE); \
- __ret; \
-})
-
-/**
- * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @timeout: timeout, as a ktime_t
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function returns 0 if @condition became true, -ERESTARTSYS if it was
- * interrupted by a signal, or -ETIME if the timeout elapsed.
- */
-#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
-({ \
- long __ret = 0; \
- might_sleep(); \
- if (!(condition)) \
- __ret = __wait_event_hrtimeout(wq, condition, timeout, \
- TASK_INTERRUPTIBLE); \
- __ret; \
-})
-
-#define __wait_event_interruptible_exclusive(wq, condition) \
- ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
- schedule())
-
-#define wait_event_interruptible_exclusive(wq, condition) \
-({ \
- int __ret = 0; \
- might_sleep(); \
- if (!(condition)) \
- __ret = __wait_event_interruptible_exclusive(wq, condition);\
- __ret; \
-})
-
-#define __wait_event_killable_exclusive(wq, condition) \
- ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
- schedule())
-
-#define wait_event_killable_exclusive(wq, condition) \
-({ \
- int __ret = 0; \
- might_sleep(); \
- if (!(condition)) \
- __ret = __wait_event_killable_exclusive(wq, condition); \
- __ret; \
-})
-
-
-#define __wait_event_freezable_exclusive(wq, condition) \
- ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
- schedule(); try_to_freeze())
-
-#define wait_event_freezable_exclusive(wq, condition) \
-({ \
- int __ret = 0; \
- might_sleep(); \
- if (!(condition)) \
- __ret = __wait_event_freezable_exclusive(wq, condition);\
- __ret; \
-})
-
-
-#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
-({ \
- int __ret = 0; \
- DEFINE_WAIT(__wait); \
- if (exclusive) \
- __wait.flags |= WQ_FLAG_EXCLUSIVE; \
- do { \
- if (likely(list_empty(&__wait.task_list))) \
- __add_wait_queue_tail(&(wq), &__wait); \
- set_current_state(TASK_INTERRUPTIBLE); \
- if (signal_pending(current)) { \
- __ret = -ERESTARTSYS; \
- break; \
- } \
- if (irq) \
- spin_unlock_irq(&(wq).lock); \
- else \
- spin_unlock(&(wq).lock); \
- schedule(); \
- if (irq) \
- spin_lock_irq(&(wq).lock); \
- else \
- spin_lock(&(wq).lock); \
- } while (!(condition)); \
- __remove_wait_queue(&(wq), &__wait); \
- __set_current_state(TASK_RUNNING); \
- __ret; \
-})
-
-
-/**
- * wait_event_interruptible_locked - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
- *
- * It must be called with wq.lock being held. This spinlock is
- * unlocked while sleeping but @condition testing is done while lock
- * is held and when this macro exits the lock is held.
- *
- * The lock is locked/unlocked using spin_lock()/spin_unlock()
- * functions which must match the way they are locked/unlocked outside
- * of this macro.
- *
- * wake_up_locked() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function will return -ERESTARTSYS if it was interrupted by a
- * signal and 0 if @condition evaluated to true.
- */
-#define wait_event_interruptible_locked(wq, condition) \
- ((condition) \
- ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
-
-/**
- * wait_event_interruptible_locked_irq - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
- *
- * It must be called with wq.lock being held. This spinlock is
- * unlocked while sleeping but @condition testing is done while lock
- * is held and when this macro exits the lock is held.
- *
- * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
- * functions which must match the way they are locked/unlocked outside
- * of this macro.
- *
- * wake_up_locked() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function will return -ERESTARTSYS if it was interrupted by a
- * signal and 0 if @condition evaluated to true.
- */
-#define wait_event_interruptible_locked_irq(wq, condition) \
- ((condition) \
- ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
-
-/**
- * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
- *
- * It must be called with wq.lock being held. This spinlock is
- * unlocked while sleeping but @condition testing is done while lock
- * is held and when this macro exits the lock is held.
- *
- * The lock is locked/unlocked using spin_lock()/spin_unlock()
- * functions which must match the way they are locked/unlocked outside
- * of this macro.
- *
- * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
- * set thus when other process waits process on the list if this
- * process is awaken further processes are not considered.
- *
- * wake_up_locked() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function will return -ERESTARTSYS if it was interrupted by a
- * signal and 0 if @condition evaluated to true.
- */
-#define wait_event_interruptible_exclusive_locked(wq, condition) \
- ((condition) \
- ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
-
-/**
- * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
- *
- * It must be called with wq.lock being held. This spinlock is
- * unlocked while sleeping but @condition testing is done while lock
- * is held and when this macro exits the lock is held.
- *
- * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
- * functions which must match the way they are locked/unlocked outside
- * of this macro.
- *
- * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
- * set thus when other process waits process on the list if this
- * process is awaken further processes are not considered.
- *
- * wake_up_locked() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function will return -ERESTARTSYS if it was interrupted by a
- * signal and 0 if @condition evaluated to true.
- */
-#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
- ((condition) \
- ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
-
-
-#define __wait_event_killable(wq, condition) \
- ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
-
-/**
- * wait_event_killable - sleep until a condition gets true
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- *
- * The process is put to sleep (TASK_KILLABLE) until the
- * @condition evaluates to true or a signal is received.
- * The @condition is checked each time the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * The function will return -ERESTARTSYS if it was interrupted by a
- * signal and 0 if @condition evaluated to true.
- */
-#define wait_event_killable(wq, condition) \
-({ \
- int __ret = 0; \
- might_sleep(); \
- if (!(condition)) \
- __ret = __wait_event_killable(wq, condition); \
- __ret; \
-})
-
-
-#define __wait_event_lock_irq(wq, condition, lock, cmd) \
- (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
- spin_unlock_irq(&lock); \
- cmd; \
- schedule(); \
- spin_lock_irq(&lock))
-
-/**
- * wait_event_lock_irq_cmd - sleep until a condition gets true. The
- * condition is checked under the lock. This
- * is expected to be called with the lock
- * taken.
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @lock: a locked spinlock_t, which will be released before cmd
- * and schedule() and reacquired afterwards.
- * @cmd: a command which is invoked outside the critical section before
- * sleep
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * This is supposed to be called while holding the lock. The lock is
- * dropped before invoking the cmd and going to sleep and is reacquired
- * afterwards.
- */
-#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
-do { \
- if (condition) \
- break; \
- __wait_event_lock_irq(wq, condition, lock, cmd); \
-} while (0)
-
-/**
- * wait_event_lock_irq - sleep until a condition gets true. The
- * condition is checked under the lock. This
- * is expected to be called with the lock
- * taken.
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @lock: a locked spinlock_t, which will be released before schedule()
- * and reacquired afterwards.
- *
- * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
- * @condition evaluates to true. The @condition is checked each time
- * the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * This is supposed to be called while holding the lock. The lock is
- * dropped before going to sleep and is reacquired afterwards.
- */
-#define wait_event_lock_irq(wq, condition, lock) \
-do { \
- if (condition) \
- break; \
- __wait_event_lock_irq(wq, condition, lock, ); \
-} while (0)
-
-
-#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
- ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
- spin_unlock_irq(&lock); \
- cmd; \
- schedule(); \
- spin_lock_irq(&lock))
-
-/**
- * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
- * The condition is checked under the lock. This is expected to
- * be called with the lock taken.
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @lock: a locked spinlock_t, which will be released before cmd and
- * schedule() and reacquired afterwards.
- * @cmd: a command which is invoked outside the critical section before
- * sleep
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true or a signal is received. The @condition is
- * checked each time the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * This is supposed to be called while holding the lock. The lock is
- * dropped before invoking the cmd and going to sleep and is reacquired
- * afterwards.
- *
- * The macro will return -ERESTARTSYS if it was interrupted by a signal
- * and 0 if @condition evaluated to true.
- */
-#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __ret = __wait_event_interruptible_lock_irq(wq, \
- condition, lock, cmd); \
- __ret; \
-})
-
-/**
- * wait_event_interruptible_lock_irq - sleep until a condition gets true.
- * The condition is checked under the lock. This is expected
- * to be called with the lock taken.
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @lock: a locked spinlock_t, which will be released before schedule()
- * and reacquired afterwards.
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true or signal is received. The @condition is
- * checked each time the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * This is supposed to be called while holding the lock. The lock is
- * dropped before going to sleep and is reacquired afterwards.
- *
- * The macro will return -ERESTARTSYS if it was interrupted by a signal
- * and 0 if @condition evaluated to true.
- */
-#define wait_event_interruptible_lock_irq(wq, condition, lock) \
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __ret = __wait_event_interruptible_lock_irq(wq, \
- condition, lock,); \
- __ret; \
-})
-
-#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
- lock, timeout) \
- ___wait_event(wq, ___wait_cond_timeout(condition), \
- TASK_INTERRUPTIBLE, 0, timeout, \
- spin_unlock_irq(&lock); \
- __ret = schedule_timeout(__ret); \
- spin_lock_irq(&lock));
-
-/**
- * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
- * true or a timeout elapses. The condition is checked under
- * the lock. This is expected to be called with the lock taken.
- * @wq: the waitqueue to wait on
- * @condition: a C expression for the event to wait for
- * @lock: a locked spinlock_t, which will be released before schedule()
- * and reacquired afterwards.
- * @timeout: timeout, in jiffies
- *
- * The process is put to sleep (TASK_INTERRUPTIBLE) until the
- * @condition evaluates to true or signal is received. The @condition is
- * checked each time the waitqueue @wq is woken up.
- *
- * wake_up() has to be called after changing any variable that could
- * change the result of the wait condition.
- *
- * This is supposed to be called while holding the lock. The lock is
- * dropped before going to sleep and is reacquired afterwards.
- *
- * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
- * was interrupted by a signal, and the remaining jiffies otherwise
- * if the condition evaluated to true before the timeout elapsed.
- */
-#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
- timeout) \
-({ \
- long __ret = timeout; \
- if (!___wait_cond_timeout(condition)) \
- __ret = __wait_event_interruptible_lock_irq_timeout( \
- wq, condition, lock, timeout); \
- __ret; \
-})
-
-/*
- * Waitqueues which are removed from the waitqueue_head at wakeup time
- */
-void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
-void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
-long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
-void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
-void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
-long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
-int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
-int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
-int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
-
-#define DEFINE_WAIT_FUNC(name, function) \
- wait_queue_t name = { \
- .private = current, \
- .func = function, \
- .task_list = LIST_HEAD_INIT((name).task_list), \
- }
-
-#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
-
-#define DEFINE_WAIT_BIT(name, word, bit) \
- struct wait_bit_queue name = { \
- .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
- .wait = { \
- .private = current, \
- .func = wake_bit_function, \
- .task_list = \
- LIST_HEAD_INIT((name).wait.task_list), \
- }, \
- }
-
-#define init_wait(wait) \
- do { \
- (wait)->private = current; \
- (wait)->func = autoremove_wake_function; \
- INIT_LIST_HEAD(&(wait)->task_list); \
- (wait)->flags = 0; \
- } while (0)
-
-
-extern int bit_wait(struct wait_bit_key *, int);
-extern int bit_wait_io(struct wait_bit_key *, int);
-extern int bit_wait_timeout(struct wait_bit_key *, int);
-extern int bit_wait_io_timeout(struct wait_bit_key *, int);
+void wake_up_bit(void *, int);
+void __wait_on_bit(void *, int, unsigned);
+void __wait_on_bit_lock(void *, int, unsigned);
-/**
- * wait_on_bit - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @mode: the task state to sleep in
- *
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that waits on a bit.
- * For instance, if one were to have waiters on a bitflag, one would
- * call wait_on_bit() in threads waiting for the bit to clear.
- * One uses wait_on_bit() where one is waiting for the bit to clear,
- * but has no intention of setting it.
- * Returned value will be zero if the bit was cleared, or non-zero
- * if the process received a signal and the mode permitted wakeup
- * on that signal.
- */
static inline int
wait_on_bit(unsigned long *word, int bit, unsigned mode)
{
- might_sleep();
if (!test_bit(bit, word))
return 0;
- return out_of_line_wait_on_bit(word, bit,
- bit_wait,
- mode);
+ __wait_on_bit(word, bit, mode);
+ return 0;
}
-/**
- * wait_on_bit_io - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @mode: the task state to sleep in
- *
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared. This is similar to wait_on_bit(), but calls
- * io_schedule() instead of schedule() for the actual waiting.
- *
- * Returned value will be zero if the bit was cleared, or non-zero
- * if the process received a signal and the mode permitted wakeup
- * on that signal.
- */
-static inline int
-wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
-{
- might_sleep();
- if (!test_bit(bit, word))
- return 0;
- return out_of_line_wait_on_bit(word, bit,
- bit_wait_io,
- mode);
-}
-
-/**
- * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @mode: the task state to sleep in
- * @timeout: timeout, in jiffies
- *
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared. This is similar to wait_on_bit(), except also takes a
- * timeout parameter.
- *
- * Returned value will be zero if the bit was cleared before the
- * @timeout elapsed, or non-zero if the @timeout elapsed or process
- * received a signal and the mode permitted wakeup on that signal.
- */
-static inline int
-wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
- unsigned long timeout)
-{
- might_sleep();
- if (!test_bit(bit, word))
- return 0;
- return out_of_line_wait_on_bit_timeout(word, bit,
- bit_wait_timeout,
- mode, timeout);
-}
-
-/**
- * wait_on_bit_action - wait for a bit to be cleared
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @action: the function used to sleep, which may take special actions
- * @mode: the task state to sleep in
- *
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared, and allow the waiting action to be specified.
- * This is like wait_on_bit() but allows fine control of how the waiting
- * is done.
- *
- * Returned value will be zero if the bit was cleared, or non-zero
- * if the process received a signal and the mode permitted wakeup
- * on that signal.
- */
-static inline int
-wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
- unsigned mode)
-{
- might_sleep();
- if (!test_bit(bit, word))
- return 0;
- return out_of_line_wait_on_bit(word, bit, action, mode);
-}
-
-/**
- * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @mode: the task state to sleep in
- *
- * There is a standard hashed waitqueue table for generic use. This
- * is the part of the hashtable's accessor API that waits on a bit
- * when one intends to set it, for instance, trying to lock bitflags.
- * For instance, if one were to have waiters trying to set bitflag
- * and waiting for it to clear before setting it, one would call
- * wait_on_bit() in threads waiting to be able to set the bit.
- * One uses wait_on_bit_lock() where one is waiting for the bit to
- * clear with the intention of setting it, and when done, clearing it.
- *
- * Returns zero if the bit was (eventually) found to be clear and was
- * set. Returns non-zero if a signal was delivered to the process and
- * the @mode allows that signal to wake the process.
- */
static inline int
wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
{
- might_sleep();
if (!test_and_set_bit(bit, word))
return 0;
- return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
+ __wait_on_bit_lock(word, bit, mode);
+ return 0;
}
-/**
- * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @mode: the task state to sleep in
- *
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared and then to atomically set it. This is similar
- * to wait_on_bit(), but calls io_schedule() instead of schedule()
- * for the actual waiting.
- *
- * Returns zero if the bit was (eventually) found to be clear and was
- * set. Returns non-zero if a signal was delivered to the process and
- * the @mode allows that signal to wake the process.
- */
-static inline int
-wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
-{
- might_sleep();
- if (!test_and_set_bit(bit, word))
- return 0;
- return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
-}
-
-/**
- * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
- * @word: the word being waited on, a kernel virtual address
- * @bit: the bit of the word being waited on
- * @action: the function used to sleep, which may take special actions
- * @mode: the task state to sleep in
- *
- * Use the standard hashed waitqueue table to wait for a bit
- * to be cleared and then to set it, and allow the waiting action
- * to be specified.
- * This is like wait_on_bit() but allows fine control of how the waiting
- * is done.
- *
- * Returns zero if the bit was (eventually) found to be clear and was
- * set. Returns non-zero if a signal was delivered to the process and
- * the @mode allows that signal to wake the process.
- */
-static inline int
-wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
- unsigned mode)
-{
- might_sleep();
- if (!test_and_set_bit(bit, word))
- return 0;
- return out_of_line_wait_on_bit_lock(word, bit, action, mode);
-}
-
-/**
- * wait_on_atomic_t - Wait for an atomic_t to become 0
- * @val: The atomic value being waited on, a kernel virtual address
- * @action: the function used to sleep, which may take special actions
- * @mode: the task state to sleep in
- *
- * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
- * the purpose of getting a waitqueue, but we set the key to a bit number
- * outside of the target 'word'.
- */
-static inline
-int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
-{
- might_sleep();
- if (atomic_read(val) == 0)
- return 0;
- return out_of_line_wait_on_atomic_t(val, action, mode);
-}
+#define wait_on_bit_io(w, b, m) wait_on_bit(w, b, m)
+#define wait_on_bit_lock_io(w, b, m) wait_on_bit_lock(w, b, m)
#endif /* _LINUX_WAIT_H */
diff --git a/include/linux/zconf.h b/include/linux/zconf.h
deleted file mode 100644
index 0beb75e3..00000000
--- a/include/linux/zconf.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* zconf.h -- configuration of the zlib compression library
- * Copyright (C) 1995-1998 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* @(#) $Id$ */
-
-#ifndef _ZCONF_H
-#define _ZCONF_H
-
-/* The memory requirements for deflate are (in bytes):
- (1 << (windowBits+2)) + (1 << (memLevel+9))
- that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
- plus a few kilobytes for small objects. For example, if you want to reduce
- the default memory requirements from 256K to 128K, compile with
- make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
- Of course this will generally degrade compression (there's no free lunch).
-
- The memory requirements for inflate are (in bytes) 1 << windowBits
- that is, 32K for windowBits=15 (default value) plus a few kilobytes
- for small objects.
-*/
-
-/* Maximum value for memLevel in deflateInit2 */
-#ifndef MAX_MEM_LEVEL
-# define MAX_MEM_LEVEL 8
-#endif
-
-/* Maximum value for windowBits in deflateInit2 and inflateInit2.
- * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files
- * created by gzip. (Files created by minigzip can still be extracted by
- * gzip.)
- */
-#ifndef MAX_WBITS
-# define MAX_WBITS 15 /* 32K LZ77 window */
-#endif
-
-/* default windowBits for decompression. MAX_WBITS is for compression only */
-#ifndef DEF_WBITS
-# define DEF_WBITS MAX_WBITS
-#endif
-
-/* default memLevel */
-#if MAX_MEM_LEVEL >= 8
-# define DEF_MEM_LEVEL 8
-#else
-# define DEF_MEM_LEVEL MAX_MEM_LEVEL
-#endif
-
- /* Type declarations */
-
-typedef unsigned char Byte; /* 8 bits */
-typedef unsigned int uInt; /* 16 bits or more */
-typedef unsigned long uLong; /* 32 bits or more */
-typedef void *voidp;
-
-#endif /* _ZCONF_H */
diff --git a/include/linux/zlib.h b/include/linux/zlib.h
index 92dbbd3f..45cfbd87 100644
--- a/include/linux/zlib.h
+++ b/include/linux/zlib.h
@@ -1,593 +1,18 @@
-/* zlib.h -- interface of the 'zlib' general purpose compression library
-
- Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler
-
- This software is provided 'as-is', without any express or implied
- warranty. In no event will the authors be held liable for any damages
- arising from the use of this software.
-
- Permission is granted to anyone to use this software for any purpose,
- including commercial applications, and to alter it and redistribute it
- freely, subject to the following restrictions:
-
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
- 3. This notice may not be removed or altered from any source distribution.
-
- Jean-loup Gailly Mark Adler
- jloup@gzip.org madler@alumni.caltech.edu
-
-
- The data format used by the zlib library is described by RFCs (Request for
- Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt
- (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
-*/
-
#ifndef _ZLIB_H
#define _ZLIB_H
-#include <linux/zconf.h>
-
-/* zlib deflate based on ZLIB_VERSION "1.1.3" */
-/* zlib inflate based on ZLIB_VERSION "1.2.3" */
-
-/*
- This is a modified version of zlib for use inside the Linux kernel.
- The main changes are to perform all memory allocation in advance.
-
- Inflation Changes:
- * Z_PACKET_FLUSH is added and used by ppp_deflate. Before returning
- this checks there is no more input data available and the next data
- is a STORED block. It also resets the mode to be read for the next
- data, all as per PPP requirements.
- * Addition of zlib_inflateIncomp which copies incompressible data into
- the history window and adjusts the accoutning without calling
- zlib_inflate itself to inflate the data.
-*/
-
-/*
- The 'zlib' compression library provides in-memory compression and
- decompression functions, including integrity checks of the uncompressed
- data. This version of the library supports only one compression method
- (deflation) but other algorithms will be added later and will have the same
- stream interface.
-
- Compression can be done in a single step if the buffers are large
- enough (for example if an input file is mmap'ed), or can be done by
- repeated calls of the compression function. In the latter case, the
- application must provide more input and/or consume the output
- (providing more output space) before each call.
-
- The compressed data format used by default by the in-memory functions is
- the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped
- around a deflate stream, which is itself documented in RFC 1951.
-
- The library also supports reading and writing files in gzip (.gz) format
- with an interface similar to that of stdio.
-
- The zlib format was designed to be compact and fast for use in memory
- and on communications channels. The gzip format was designed for single-
- file compression on file systems, has a larger header than zlib to maintain
- directory information, and uses a different, slower check method than zlib.
-
- The library does not install any signal handler. The decoder checks
- the consistency of the compressed data, so the library should never
- crash even in case of corrupted input.
-*/
-
-struct internal_state;
-
-typedef struct z_stream_s {
- const Byte *next_in; /* next input byte */
- uLong avail_in; /* number of bytes available at next_in */
- uLong total_in; /* total nb of input bytes read so far */
-
- Byte *next_out; /* next output byte should be put there */
- uLong avail_out; /* remaining free space at next_out */
- uLong total_out; /* total nb of bytes output so far */
-
- char *msg; /* last error message, NULL if no error */
- struct internal_state *state; /* not visible by applications */
-
- void *workspace; /* memory allocated for this stream */
-
- int data_type; /* best guess about the data type: ascii or binary */
- uLong adler; /* adler32 value of the uncompressed data */
- uLong reserved; /* reserved for future use */
-} z_stream;
-
-typedef z_stream *z_streamp;
-
-/*
- The application must update next_in and avail_in when avail_in has
- dropped to zero. It must update next_out and avail_out when avail_out
- has dropped to zero. The application must initialize zalloc, zfree and
- opaque before calling the init function. All other fields are set by the
- compression library and must not be updated by the application.
-
- The opaque value provided by the application will be passed as the first
- parameter for calls of zalloc and zfree. This can be useful for custom
- memory management. The compression library attaches no meaning to the
- opaque value.
-
- zalloc must return NULL if there is not enough memory for the object.
- If zlib is used in a multi-threaded application, zalloc and zfree must be
- thread safe.
-
- On 16-bit systems, the functions zalloc and zfree must be able to allocate
- exactly 65536 bytes, but will not be required to allocate more than this
- if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
- pointers returned by zalloc for objects of exactly 65536 bytes *must*
- have their offset normalized to zero. The default allocation function
- provided by this library ensures this (see zutil.c). To reduce memory
- requirements and avoid any allocation of 64K objects, at the expense of
- compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
-
- The fields total_in and total_out can be used for statistics or
- progress reports. After compression, total_in holds the total size of
- the uncompressed data and may be saved for use in the decompressor
- (particularly if the decompressor wants to decompress everything in
- a single step).
-*/
-
- /* constants */
-
-#define Z_NO_FLUSH 0
-#define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */
-#define Z_PACKET_FLUSH 2
-#define Z_SYNC_FLUSH 3
-#define Z_FULL_FLUSH 4
-#define Z_FINISH 5
-#define Z_BLOCK 6 /* Only for inflate at present */
-/* Allowed flush values; see deflate() and inflate() below for details */
-
-#define Z_OK 0
-#define Z_STREAM_END 1
-#define Z_NEED_DICT 2
-#define Z_ERRNO (-1)
-#define Z_STREAM_ERROR (-2)
-#define Z_DATA_ERROR (-3)
-#define Z_MEM_ERROR (-4)
-#define Z_BUF_ERROR (-5)
-#define Z_VERSION_ERROR (-6)
-/* Return codes for the compression/decompression functions. Negative
- * values are errors, positive values are used for special but normal events.
- */
-
-#define Z_NO_COMPRESSION 0
-#define Z_BEST_SPEED 1
-#define Z_BEST_COMPRESSION 9
-#define Z_DEFAULT_COMPRESSION (-1)
-/* compression levels */
-
-#define Z_FILTERED 1
-#define Z_HUFFMAN_ONLY 2
-#define Z_DEFAULT_STRATEGY 0
-/* compression strategy; see deflateInit2() below for details */
-
-#define Z_BINARY 0
-#define Z_ASCII 1
-#define Z_UNKNOWN 2
-/* Possible values of the data_type field */
-
-#define Z_DEFLATED 8
-/* The deflate compression method (the only one supported in this version) */
-
- /* basic functions */
-
-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
-/*
- Returns the number of bytes that needs to be allocated for a per-
- stream workspace with the specified parameters. A pointer to this
- number of bytes should be returned in stream->workspace before
- you call zlib_deflateInit() or zlib_deflateInit2(). If you call
- zlib_deflateInit(), specify windowBits = MAX_WBITS and memLevel =
- MAX_MEM_LEVEL here. If you call zlib_deflateInit2(), the windowBits
- and memLevel parameters passed to zlib_deflateInit2() must not
- exceed those passed here.
-*/
-
-/*
-extern int deflateInit (z_streamp strm, int level);
-
- Initializes the internal stream state for compression. The fields
- zalloc, zfree and opaque must be initialized before by the caller.
- If zalloc and zfree are set to NULL, deflateInit updates them to
- use default allocation functions.
-
- The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
- 1 gives best speed, 9 gives best compression, 0 gives no compression at
- all (the input data is simply copied a block at a time).
- Z_DEFAULT_COMPRESSION requests a default compromise between speed and
- compression (currently equivalent to level 6).
-
- deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_STREAM_ERROR if level is not a valid compression level,
- Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
- with the version assumed by the caller (ZLIB_VERSION).
- msg is set to null if there is no error message. deflateInit does not
- perform any compression: this will be done by deflate().
-*/
-
-
-extern int zlib_deflate (z_streamp strm, int flush);
-/*
- deflate compresses as much data as possible, and stops when the input
- buffer becomes empty or the output buffer becomes full. It may introduce some
- output latency (reading input without producing any output) except when
- forced to flush.
-
- The detailed semantics are as follows. deflate performs one or both of the
- following actions:
-
- - Compress more input starting at next_in and update next_in and avail_in
- accordingly. If not all input can be processed (because there is not
- enough room in the output buffer), next_in and avail_in are updated and
- processing will resume at this point for the next call of deflate().
-
- - Provide more output starting at next_out and update next_out and avail_out
- accordingly. This action is forced if the parameter flush is non zero.
- Forcing flush frequently degrades the compression ratio, so this parameter
- should be set only when necessary (in interactive applications).
- Some output may be provided even if flush is not set.
-
- Before the call of deflate(), the application should ensure that at least
- one of the actions is possible, by providing more input and/or consuming
- more output, and updating avail_in or avail_out accordingly; avail_out
- should never be zero before the call. The application can consume the
- compressed output when it wants, for example when the output buffer is full
- (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
- and with zero avail_out, it must be called again after making room in the
- output buffer because there might be more output pending.
-
- If the parameter flush is set to Z_SYNC_FLUSH, all pending output is
- flushed to the output buffer and the output is aligned on a byte boundary, so
- that the decompressor can get all input data available so far. (In particular
- avail_in is zero after the call if enough output space has been provided
- before the call.) Flushing may degrade compression for some compression
- algorithms and so it should be used only when necessary.
-
- If flush is set to Z_FULL_FLUSH, all output is flushed as with
- Z_SYNC_FLUSH, and the compression state is reset so that decompression can
- restart from this point if previous compressed data has been damaged or if
- random access is desired. Using Z_FULL_FLUSH too often can seriously degrade
- the compression.
-
- If deflate returns with avail_out == 0, this function must be called again
- with the same value of the flush parameter and more output space (updated
- avail_out), until the flush is complete (deflate returns with non-zero
- avail_out).
-
- If the parameter flush is set to Z_FINISH, pending input is processed,
- pending output is flushed and deflate returns with Z_STREAM_END if there
- was enough output space; if deflate returns with Z_OK, this function must be
- called again with Z_FINISH and more output space (updated avail_out) but no
- more input data, until it returns with Z_STREAM_END or an error. After
- deflate has returned Z_STREAM_END, the only possible operations on the
- stream are deflateReset or deflateEnd.
-
- Z_FINISH can be used immediately after deflateInit if all the compression
- is to be done in a single step. In this case, avail_out must be at least
- 0.1% larger than avail_in plus 12 bytes. If deflate does not return
- Z_STREAM_END, then it must be called again as described above.
-
- deflate() sets strm->adler to the adler32 checksum of all input read
- so far (that is, total_in bytes).
-
- deflate() may update data_type if it can make a good guess about
- the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
- binary. This field is only for information purposes and does not affect
- the compression algorithm in any manner.
-
- deflate() returns Z_OK if some progress has been made (more input
- processed or more output produced), Z_STREAM_END if all input has been
- consumed and all output has been produced (only when flush is set to
- Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
- if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible
- (for example avail_in or avail_out was zero).
-*/
-
-
-extern int zlib_deflateEnd (z_streamp strm);
-/*
- All dynamically allocated data structures for this stream are freed.
- This function discards any unprocessed input and does not flush any
- pending output.
-
- deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
- stream state was inconsistent, Z_DATA_ERROR if the stream was freed
- prematurely (some input or output was discarded). In the error case,
- msg may be set but then points to a static string (which must not be
- deallocated).
-*/
-
-
-extern int zlib_inflate_workspacesize (void);
-/*
- Returns the number of bytes that needs to be allocated for a per-
- stream workspace. A pointer to this number of bytes should be
- returned in stream->workspace before calling zlib_inflateInit().
-*/
-
-/*
-extern int zlib_inflateInit (z_streamp strm);
-
- Initializes the internal stream state for decompression. The fields
- next_in, avail_in, and workspace must be initialized before by
- the caller. If next_in is not NULL and avail_in is large enough (the exact
- value depends on the compression method), inflateInit determines the
- compression method from the zlib header and allocates all data structures
- accordingly; otherwise the allocation will be deferred to the first call of
- inflate. If zalloc and zfree are set to NULL, inflateInit updates them to
- use default allocation functions.
-
- inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough
- memory, Z_VERSION_ERROR if the zlib library version is incompatible with the
- version assumed by the caller. msg is set to null if there is no error
- message. inflateInit does not perform any decompression apart from reading
- the zlib header if present: this will be done by inflate(). (So next_in and
- avail_in may be modified, but next_out and avail_out are unchanged.)
-*/
-
-
-extern int zlib_inflate (z_streamp strm, int flush);
-/*
- inflate decompresses as much data as possible, and stops when the input
- buffer becomes empty or the output buffer becomes full. It may introduce
- some output latency (reading input without producing any output) except when
- forced to flush.
-
- The detailed semantics are as follows. inflate performs one or both of the
- following actions:
-
- - Decompress more input starting at next_in and update next_in and avail_in
- accordingly. If not all input can be processed (because there is not
- enough room in the output buffer), next_in is updated and processing
- will resume at this point for the next call of inflate().
-
- - Provide more output starting at next_out and update next_out and avail_out
- accordingly. inflate() provides as much output as possible, until there
- is no more input data or no more space in the output buffer (see below
- about the flush parameter).
-
- Before the call of inflate(), the application should ensure that at least
- one of the actions is possible, by providing more input and/or consuming
- more output, and updating the next_* and avail_* values accordingly.
- The application can consume the uncompressed output when it wants, for
- example when the output buffer is full (avail_out == 0), or after each
- call of inflate(). If inflate returns Z_OK and with zero avail_out, it
- must be called again after making room in the output buffer because there
- might be more output pending.
-
- The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH,
- Z_FINISH, or Z_BLOCK. Z_SYNC_FLUSH requests that inflate() flush as much
- output as possible to the output buffer. Z_BLOCK requests that inflate() stop
- if and when it gets to the next deflate block boundary. When decoding the
- zlib or gzip format, this will cause inflate() to return immediately after
- the header and before the first block. When doing a raw inflate, inflate()
- will go ahead and process the first block, and will return when it gets to
- the end of that block, or when it runs out of data.
-
- The Z_BLOCK option assists in appending to or combining deflate streams.
- Also to assist in this, on return inflate() will set strm->data_type to the
- number of unused bits in the last byte taken from strm->next_in, plus 64
- if inflate() is currently decoding the last block in the deflate stream,
- plus 128 if inflate() returned immediately after decoding an end-of-block
- code or decoding the complete header up to just before the first byte of the
- deflate stream. The end-of-block will not be indicated until all of the
- uncompressed data from that block has been written to strm->next_out. The
- number of unused bits may in general be greater than seven, except when
- bit 7 of data_type is set, in which case the number of unused bits will be
- less than eight.
-
- inflate() should normally be called until it returns Z_STREAM_END or an
- error. However if all decompression is to be performed in a single step
- (a single call of inflate), the parameter flush should be set to
- Z_FINISH. In this case all pending input is processed and all pending
- output is flushed; avail_out must be large enough to hold all the
- uncompressed data. (The size of the uncompressed data may have been saved
- by the compressor for this purpose.) The next operation on this stream must
- be inflateEnd to deallocate the decompression state. The use of Z_FINISH
- is never required, but can be used to inform inflate that a faster approach
- may be used for the single inflate() call.
-
- In this implementation, inflate() always flushes as much output as
- possible to the output buffer, and always uses the faster approach on the
- first call. So the only effect of the flush parameter in this implementation
- is on the return value of inflate(), as noted below, or when it returns early
- because Z_BLOCK is used.
-
- If a preset dictionary is needed after this call (see inflateSetDictionary
- below), inflate sets strm->adler to the adler32 checksum of the dictionary
- chosen by the compressor and returns Z_NEED_DICT; otherwise it sets
- strm->adler to the adler32 checksum of all output produced so far (that is,
- total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described
- below. At the end of the stream, inflate() checks that its computed adler32
- checksum is equal to that saved by the compressor and returns Z_STREAM_END
- only if the checksum is correct.
-
- inflate() will decompress and check either zlib-wrapped or gzip-wrapped
- deflate data. The header type is detected automatically. Any information
- contained in the gzip header is not retained, so applications that need that
- information should instead use raw inflate, see inflateInit2() below, or
- inflateBack() and perform their own processing of the gzip header and
- trailer.
-
- inflate() returns Z_OK if some progress has been made (more input processed
- or more output produced), Z_STREAM_END if the end of the compressed data has
- been reached and all uncompressed output has been produced, Z_NEED_DICT if a
- preset dictionary is needed at this point, Z_DATA_ERROR if the input data was
- corrupted (input stream not conforming to the zlib format or incorrect check
- value), Z_STREAM_ERROR if the stream structure was inconsistent (for example
- if next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory,
- Z_BUF_ERROR if no progress is possible or if there was not enough room in the
- output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and
- inflate() can be called again with more input and more output space to
- continue decompressing. If Z_DATA_ERROR is returned, the application may then
- call inflateSync() to look for a good compression block if a partial recovery
- of the data is desired.
-*/
-
-
-extern int zlib_inflateEnd (z_streamp strm);
-/*
- All dynamically allocated data structures for this stream are freed.
- This function discards any unprocessed input and does not flush any
- pending output.
-
- inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
- was inconsistent. In the error case, msg may be set but then points to a
- static string (which must not be deallocated).
-*/
-
- /* Advanced functions */
-
-/*
- The following functions are needed only in some special applications.
-*/
-
-/*
-extern int deflateInit2 (z_streamp strm,
- int level,
- int method,
- int windowBits,
- int memLevel,
- int strategy);
-
- This is another version of deflateInit with more compression options. The
- fields next_in, zalloc, zfree and opaque must be initialized before by
- the caller.
-
- The method parameter is the compression method. It must be Z_DEFLATED in
- this version of the library.
-
- The windowBits parameter is the base two logarithm of the window size
- (the size of the history buffer). It should be in the range 8..15 for this
- version of the library. Larger values of this parameter result in better
- compression at the expense of memory usage. The default value is 15 if
- deflateInit is used instead.
-
- The memLevel parameter specifies how much memory should be allocated
- for the internal compression state. memLevel=1 uses minimum memory but
- is slow and reduces compression ratio; memLevel=9 uses maximum memory
- for optimal speed. The default value is 8. See zconf.h for total memory
- usage as a function of windowBits and memLevel.
-
- The strategy parameter is used to tune the compression algorithm. Use the
- value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
- filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
- string match). Filtered data consists mostly of small values with a
- somewhat random distribution. In this case, the compression algorithm is
- tuned to compress them better. The effect of Z_FILTERED is to force more
- Huffman coding and less string matching; it is somewhat intermediate
- between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
- the compression ratio but not the correctness of the compressed output even
- if it is not set appropriately.
-
- deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
- memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid
- method). msg is set to null if there is no error message. deflateInit2 does
- not perform any compression: this will be done by deflate().
-*/
-
-extern int zlib_deflateReset (z_streamp strm);
-/*
- This function is equivalent to deflateEnd followed by deflateInit,
- but does not free and reallocate all the internal compression state.
- The stream will keep the same compression level and any other attributes
- that may have been set by deflateInit2.
-
- deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
- stream state was inconsistent (such as zalloc or state being NULL).
-*/
-
-static inline unsigned long deflateBound(unsigned long s)
-{
- return s + ((s + 7) >> 3) + ((s + 63) >> 6) + 11;
-}
-
-/*
-extern int inflateInit2 (z_streamp strm, int windowBits);
-
- This is another version of inflateInit with an extra parameter. The
- fields next_in, avail_in, zalloc, zfree and opaque must be initialized
- before by the caller.
-
- The windowBits parameter is the base two logarithm of the maximum window
- size (the size of the history buffer). It should be in the range 8..15 for
- this version of the library. The default value is 15 if inflateInit is used
- instead. windowBits must be greater than or equal to the windowBits value
- provided to deflateInit2() while compressing, or it must be equal to 15 if
- deflateInit2() was not used. If a compressed stream with a larger window
- size is given as input, inflate() will return with the error code
- Z_DATA_ERROR instead of trying to allocate a larger window.
-
- windowBits can also be -8..-15 for raw inflate. In this case, -windowBits
- determines the window size. inflate() will then process raw deflate data,
- not looking for a zlib or gzip header, not generating a check value, and not
- looking for any check values for comparison at the end of the stream. This
- is for use with other formats that use the deflate compressed data format
- such as zip. Those formats provide their own check values. If a custom
- format is developed using the raw deflate format for compressed data, it is
- recommended that a check value such as an adler32 or a crc32 be applied to
- the uncompressed data as is done in the zlib, gzip, and zip formats. For
- most applications, the zlib format should be used as is. Note that comments
- above on the use in deflateInit2() applies to the magnitude of windowBits.
-
- windowBits can also be greater than 15 for optional gzip decoding. Add
- 32 to windowBits to enable zlib and gzip decoding with automatic header
- detection, or add 16 to decode only the gzip format (the zlib format will
- return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is
- a crc32 instead of an adler32.
-
- inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
- memory, Z_STREAM_ERROR if a parameter is invalid (such as a null strm). msg
- is set to null if there is no error message. inflateInit2 does not perform
- any decompression apart from reading the zlib header if present: this will
- be done by inflate(). (So next_in and avail_in may be modified, but next_out
- and avail_out are unchanged.)
-*/
-
-extern int zlib_inflateReset (z_streamp strm);
-/*
- This function is equivalent to inflateEnd followed by inflateInit,
- but does not free and reallocate all the internal decompression state.
- The stream will keep attributes that may have been set by inflateInit2.
-
- inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
- stream state was inconsistent (such as zalloc or state being NULL).
-*/
-
-extern int zlib_inflateIncomp (z_stream *strm);
-/*
- This function adds the data at next_in (avail_in bytes) to the output
- history without performing any output. There must be no pending output,
- and the decompressor must be expecting to see the start of a block.
- Calling this function is equivalent to decompressing a stored block
- containing the data at next_in (except that the data is not output).
-*/
+#include <zlib.h>
-#define zlib_deflateInit(strm, level) \
- zlib_deflateInit2((strm), (level), Z_DEFLATED, MAX_WBITS, \
- DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY)
-#define zlib_inflateInit(strm) \
- zlib_inflateInit2((strm), DEF_WBITS)
+#define zlib_inflate_workspacesize() 0
+#define zlib_deflate_workspacesize(windowBits, memLevel) 0
-extern int zlib_deflateInit2(z_streamp strm, int level, int method,
- int windowBits, int memLevel,
- int strategy);
-extern int zlib_inflateInit2(z_streamp strm, int windowBits);
+#define zlib_inflateInit2 inflateInit2
+#define zlib_inflate inflate
-#if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL)
- struct internal_state {int dummy;}; /* hack for buggy compilers */
-#endif
+#define zlib_deflateInit2 deflateInit2
+#define zlib_deflate deflate
+#define zlib_deflateEnd deflateEnd
-/* Utility function: initialize zlib, unpack binary blob, clean up zlib,
- * return len or negative error code. */
-extern int zlib_inflate_blob(void *dst, unsigned dst_sz, const void *src, unsigned src_sz);
+#define DEF_MEM_LEVEL 8
#endif /* _ZLIB_H */
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 01e4b79d..06ce0218 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -348,12 +348,12 @@ DEFINE_EVENT(bcache_bio, bcache_journal_write,
/* Device state changes */
-DEFINE_EVENT(cache_set, bcache_cache_set_read_only,
+DEFINE_EVENT(cache_set, fs_read_only,
TP_PROTO(struct cache_set *c),
TP_ARGS(c)
);
-DEFINE_EVENT(cache_set, bcache_cache_set_read_only_done,
+DEFINE_EVENT(cache_set, fs_read_only_done,
TP_PROTO(struct cache_set *c),
TP_ARGS(c)
);
@@ -896,7 +896,7 @@ DEFINE_EVENT(cache_bucket_alloc, bcache_bucket_alloc_fail,
TP_ARGS(ca, reserve)
);
-DECLARE_EVENT_CLASS(cache_set_bucket_alloc,
+TRACE_EVENT(bcache_freelist_empty_fail,
TP_PROTO(struct cache_set *c, enum alloc_reserve reserve,
struct closure *cl),
TP_ARGS(c, reserve, cl),
@@ -917,12 +917,6 @@ DECLARE_EVENT_CLASS(cache_set_bucket_alloc,
__entry->cl)
);
-DEFINE_EVENT(cache_set_bucket_alloc, bcache_freelist_empty_fail,
- TP_PROTO(struct cache_set *c, enum alloc_reserve reserve,
- struct closure *cl),
- TP_ARGS(c, reserve, cl)
-);
-
DECLARE_EVENT_CLASS(open_bucket_alloc,
TP_PROTO(struct cache_set *c, struct closure *cl),
TP_ARGS(c, cl),