summaryrefslogtreecommitdiff
path: root/linux
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2018-05-17 03:14:09 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2018-05-17 07:24:39 -0400
commit62e4df2a38081f62fd1bd657459b7ffb2d4f522c (patch)
tree9b4ed5d3c597e19894ca77299b53057efe071c50 /linux
parent426e88e41cdcecd007a689daf4fe432bb61303ec (diff)
drop dead code
Diffstat (limited to 'linux')
-rw-r--r--linux/bio.c17
-rw-r--r--linux/crypto/api.c216
-rw-r--r--linux/crypto/blkcipher.c47
-rw-r--r--linux/crypto/chacha20_generic.c43
-rw-r--r--linux/crypto/internal.h23
-rw-r--r--linux/crypto/poly1305_generic.c30
-rw-r--r--linux/crypto/sha256_generic.c28
-rw-r--r--linux/crypto/shash.c72
-rw-r--r--linux/rbtree.c615
-rw-r--r--linux/sched.c66
-rw-r--r--linux/semaphore.c256
11 files changed, 116 insertions, 1297 deletions
diff --git a/linux/bio.c b/linux/bio.c
index 79f50dc2..c4cdceaa 100644
--- a/linux/bio.c
+++ b/linux/bio.c
@@ -172,23 +172,6 @@ void bio_free_pages(struct bio *bio)
__free_page(bvec->bv_page);
}
-int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
-{
- int i;
- struct bio_vec *bv;
-
- bio_for_each_segment_all(bv, bio, i) {
- bv->bv_page = alloc_page(gfp_mask);
- if (!bv->bv_page) {
- while (--bv >= bio->bi_io_vec)
- __free_page(bv->bv_page);
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
void bio_advance(struct bio *bio, unsigned bytes)
{
bio_advance_iter(bio, &bio->bi_iter, bytes);
diff --git a/linux/crypto/api.c b/linux/crypto/api.c
index 63efee30..2f3ab2b5 100644
--- a/linux/crypto/api.c
+++ b/linux/crypto/api.c
@@ -20,217 +20,95 @@
#include <linux/string.h>
#include <crypto/algapi.h>
-#include "internal.h"
static LIST_HEAD(crypto_alg_list);
static DECLARE_RWSEM(crypto_alg_sem);
-static unsigned crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
-{
- return alg->cra_type->ctxsize(alg, type, mask);
-}
+struct crypto_type {
+};
-unsigned crypto_alg_extsize(struct crypto_alg *alg)
+int crypto_register_alg(struct crypto_alg *alg)
{
- return alg->cra_ctxsize;
+ down_write(&crypto_alg_sem);
+ list_add(&alg->cra_list, &crypto_alg_list);
+ up_write(&crypto_alg_sem);
+
+ return 0;
}
-struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
+static void *crypto_alloc_tfm(const char *name,
+ const struct crypto_type *type)
{
struct crypto_alg *alg;
down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list)
- if (!((alg->cra_flags ^ type) & mask) &&
- !strcmp(alg->cra_name, name))
+ if (alg->cra_type == type && !strcmp(alg->cra_name, name))
goto found;
alg = ERR_PTR(-ENOENT);
found:
up_read(&crypto_alg_sem);
- return alg;
-}
+ if (IS_ERR(alg))
+ return ERR_CAST(alg);
-static void crypto_exit_ops(struct crypto_tfm *tfm)
-{
- if (tfm->exit)
- tfm->exit(tfm);
+ return alg->alloc_tfm() ?: ERR_PTR(-ENOMEM);
}
-static struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg,
- u32 type, u32 mask)
-{
- struct crypto_tfm *tfm = NULL;
- unsigned tfm_size;
- int err = -ENOMEM;
-
- tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
- tfm = kzalloc(tfm_size, GFP_KERNEL);
- if (tfm == NULL)
- return ERR_PTR(-ENOMEM);
-
- tfm->__crt_alg = alg;
-
- err = alg->cra_type->init(tfm, type, mask);
- if (err)
- goto out_free_tfm;
-
- if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
- goto cra_init_failed;
-
- return tfm;
+/* skcipher: */
-cra_init_failed:
- crypto_exit_ops(tfm);
-out_free_tfm:
- kfree(tfm);
- return ERR_PTR(err);
-}
+static const struct crypto_type crypto_skcipher_type2 = {
+};
-struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
+struct crypto_skcipher *crypto_alloc_skcipher(const char *name,
+ u32 type, u32 mask)
{
- struct crypto_alg *alg;
- struct crypto_tfm *tfm;
-
- alg = crypto_alg_mod_lookup(alg_name, type, mask);
- if (IS_ERR(alg)) {
- fprintf(stderr, "unknown cipher %s\n", alg_name);
- return ERR_CAST(alg);
- }
-
- tfm = __crypto_alloc_tfm(alg, type, mask);
- if (IS_ERR(tfm))
- return tfm;
-
- return tfm;
+ return crypto_alloc_tfm(name, &crypto_skcipher_type2);
}
-static void *crypto_create_tfm(struct crypto_alg *alg,
- const struct crypto_type *frontend)
+int crypto_register_skcipher(struct skcipher_alg *alg)
{
- struct crypto_tfm *tfm = NULL;
- unsigned tfmsize;
- unsigned total;
- void *mem;
- int err = -ENOMEM;
-
- tfmsize = frontend->tfmsize;
- total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
-
- mem = kzalloc(total, GFP_KERNEL);
- if (!mem)
- goto out_err;
-
- tfm = mem + tfmsize;
- tfm->__crt_alg = alg;
-
- err = frontend->init_tfm(tfm);
- if (err)
- goto out_free_tfm;
-
- if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
- goto cra_init_failed;
-
- goto out;
-
-cra_init_failed:
- crypto_exit_ops(tfm);
-out_free_tfm:
- kfree(mem);
-out_err:
- mem = ERR_PTR(err);
-out:
- return mem;
-}
+ alg->base.cra_type = &crypto_skcipher_type2;
-static struct crypto_alg *crypto_find_alg(const char *alg_name,
- const struct crypto_type *frontend,
- u32 type, u32 mask)
-{
- if (frontend) {
- type &= frontend->maskclear;
- mask &= frontend->maskclear;
- type |= frontend->type;
- mask |= frontend->maskset;
- }
-
- return crypto_alg_mod_lookup(alg_name, type, mask);
+ return crypto_register_alg(&alg->base);
}
-void *crypto_alloc_tfm(const char *alg_name,
- const struct crypto_type *frontend,
- u32 type, u32 mask)
-{
- struct crypto_alg *alg;
- void *tfm;
-
- alg = crypto_find_alg(alg_name, frontend, type, mask);
- if (IS_ERR(alg))
- return ERR_CAST(alg);
-
- tfm = crypto_create_tfm(alg, frontend);
- if (IS_ERR(tfm))
- return tfm;
+/* shash: */
- return tfm;
-}
+#include <crypto/hash.h>
-void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
+static int shash_finup(struct shash_desc *desc, const u8 *data,
+ unsigned len, u8 *out)
{
- struct crypto_alg *alg;
-
- if (unlikely(!mem))
- return;
-
- alg = tfm->__crt_alg;
-
- if (!tfm->exit && alg->cra_exit)
- alg->cra_exit(tfm);
- crypto_exit_ops(tfm);
- kzfree(mem);
+ return crypto_shash_update(desc, data, len) ?:
+ crypto_shash_final(desc, out);
}
-int crypto_register_alg(struct crypto_alg *alg)
+static int shash_digest(struct shash_desc *desc, const u8 *data,
+ unsigned len, u8 *out)
{
- INIT_LIST_HEAD(&alg->cra_users);
-
- down_write(&crypto_alg_sem);
- list_add(&alg->cra_list, &crypto_alg_list);
- up_write(&crypto_alg_sem);
-
- return 0;
+ return crypto_shash_init(desc) ?:
+ crypto_shash_finup(desc, data, len, out);
}
-/* skcipher: */
+static const struct crypto_type crypto_shash_type = {
+};
-static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
+struct crypto_shash *crypto_alloc_shash(const char *name,
+ u32 type, u32 mask)
{
- struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
- struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
-
- skcipher->setkey = alg->setkey;
- skcipher->encrypt = alg->encrypt;
- skcipher->decrypt = alg->decrypt;
- skcipher->ivsize = alg->ivsize;
- skcipher->keysize = alg->max_keysize;
-
- if (alg->init)
- return alg->init(skcipher);
-
- return 0;
+ return crypto_alloc_tfm(name, &crypto_shash_type);
}
-static const struct crypto_type crypto_skcipher_type2 = {
- .extsize = crypto_alg_extsize,
- .init_tfm = crypto_skcipher_init_tfm,
- .maskclear = ~CRYPTO_ALG_TYPE_MASK,
- .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
- .tfmsize = offsetof(struct crypto_skcipher, base),
-};
-
-struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
- u32 type, u32 mask)
+int crypto_register_shash(struct shash_alg *alg)
{
- return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
+ alg->base.cra_type = &crypto_shash_type;
+
+ if (!alg->finup)
+ alg->finup = shash_finup;
+ if (!alg->digest)
+ alg->digest = shash_digest;
+
+ return crypto_register_alg(&alg->base);
}
diff --git a/linux/crypto/blkcipher.c b/linux/crypto/blkcipher.c
deleted file mode 100644
index 31f91418..00000000
--- a/linux/crypto/blkcipher.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Block chaining cipher operations.
- *
- * Generic encrypt/decrypt wrapper for ciphers, handles operations across
- * multiple page boundaries by using temporary blocks. In user context,
- * the kernel is given a chance to schedule us once per page.
- *
- * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include <crypto/algapi.h>
-#include "internal.h"
-
-static unsigned crypto_blkcipher_ctxsize(struct crypto_alg *alg,
- u32 type, u32 mask)
-{
- return alg->cra_ctxsize;
-}
-
-static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
- struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
- struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
-
- BUG_ON((mask & CRYPTO_ALG_TYPE_MASK) != CRYPTO_ALG_TYPE_MASK);
-
- crt->setkey = alg->setkey;
- crt->encrypt = alg->encrypt;
- crt->decrypt = alg->decrypt;
- return 0;
-}
-
-const struct crypto_type crypto_blkcipher_type = {
- .ctxsize = crypto_blkcipher_ctxsize,
- .init = crypto_init_blkcipher_ops,
-};
diff --git a/linux/crypto/chacha20_generic.c b/linux/crypto/chacha20_generic.c
index df4c0e04..c6f14945 100644
--- a/linux/crypto/chacha20_generic.c
+++ b/linux/crypto/chacha20_generic.c
@@ -22,14 +22,18 @@
#include <sodium/crypto_stream_chacha20.h>
-struct chacha20_ctx {
- u32 key[8];
+static struct skcipher_alg alg;
+
+struct chacha20_tfm {
+ struct crypto_skcipher tfm;
+ u32 key[8];
};
static int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize)
{
- struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha20_tfm *ctx =
+ container_of(tfm, struct chacha20_tfm, tfm);
int i;
if (keysize != CHACHA20_KEY_SIZE)
@@ -43,8 +47,8 @@ static int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int crypto_chacha20_crypt(struct skcipher_request *req)
{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha20_tfm *ctx =
+ container_of(req->tfm, struct chacha20_tfm, tfm.base);
struct scatterlist *sg = req->src;
unsigned nbytes = req->cryptlen;
u32 iv[4];
@@ -78,21 +82,30 @@ static int crypto_chacha20_crypt(struct skcipher_request *req)
return 0;
}
+static void *crypto_chacha20_alloc_tfm(void)
+{
+ struct chacha20_tfm *tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+
+ if (!tfm)
+ return NULL;
+
+ tfm->tfm.base.alg = &alg.base;
+ tfm->tfm.setkey = crypto_chacha20_setkey;
+ tfm->tfm.encrypt = crypto_chacha20_crypt;
+ tfm->tfm.decrypt = crypto_chacha20_crypt;
+ tfm->tfm.ivsize = CHACHA20_IV_SIZE;
+ tfm->tfm.keysize = CHACHA20_KEY_SIZE;
+
+ return tfm;
+}
+
static struct skcipher_alg alg = {
.base.cra_name = "chacha20",
- .base.cra_ctxsize = sizeof(struct chacha20_ctx),
-
- .min_keysize = CHACHA20_KEY_SIZE,
- .max_keysize = CHACHA20_KEY_SIZE,
- .ivsize = CHACHA20_IV_SIZE,
- .chunksize = CHACHA20_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
- .encrypt = crypto_chacha20_crypt,
- .decrypt = crypto_chacha20_crypt,
+ .base.alloc_tfm = crypto_chacha20_alloc_tfm,
};
__attribute__((constructor(110)))
static int chacha20_generic_mod_init(void)
{
- return crypto_register_alg(&alg.base);
+ return crypto_register_skcipher(&alg);
}
diff --git a/linux/crypto/internal.h b/linux/crypto/internal.h
deleted file mode 100644
index 5b21f836..00000000
--- a/linux/crypto/internal.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#ifndef _CRYPTO_INTERNAL_H
-#define _CRYPTO_INTERNAL_H
-
-struct crypto_type;
-struct crypto_alg;
-
-void *crypto_alloc_tfm(const char *, const struct crypto_type *, u32, u32);
-unsigned int crypto_alg_extsize(struct crypto_alg *);
-
-#endif /* _CRYPTO_INTERNAL_H */
-
diff --git a/linux/crypto/poly1305_generic.c b/linux/crypto/poly1305_generic.c
index 5d385d54..acb554c0 100644
--- a/linux/crypto/poly1305_generic.c
+++ b/linux/crypto/poly1305_generic.c
@@ -18,18 +18,19 @@
#include <linux/crypto.h>
#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
+#include <crypto/hash.h>
#include <crypto/poly1305.h>
+static struct shash_alg poly1305_alg;
+
struct poly1305_desc_ctx {
bool key_done;
crypto_onetimeauth_poly1305_state s;
};
-
static int poly1305_init(struct shash_desc *desc)
{
- struct poly1305_desc_ctx *state = shash_desc_ctx(desc);
+ struct poly1305_desc_ctx *state = (void *) desc->ctx;
state->key_done = false;
return 0;
@@ -38,7 +39,7 @@ static int poly1305_init(struct shash_desc *desc)
static int poly1305_update(struct shash_desc *desc,
const u8 *src, unsigned len)
{
- struct poly1305_desc_ctx *state = shash_desc_ctx(desc);
+ struct poly1305_desc_ctx *state = (void *) desc->ctx;
if (!state->key_done) {
BUG_ON(len != crypto_onetimeauth_poly1305_KEYBYTES);
@@ -52,21 +53,32 @@ static int poly1305_update(struct shash_desc *desc,
static int poly1305_final(struct shash_desc *desc, u8 *out)
{
- struct poly1305_desc_ctx *state = shash_desc_ctx(desc);
+ struct poly1305_desc_ctx *state = (void *) desc->ctx;
return crypto_onetimeauth_poly1305_final(&state->s, out);
}
+static void *poly1305_alloc_tfm(void)
+{
+ struct crypto_shash *tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+
+ if (!tfm)
+ return NULL;
+
+ tfm->base.alg = &poly1305_alg.base;
+ tfm->descsize = sizeof(struct poly1305_desc_ctx);
+ return tfm;
+}
+
static struct shash_alg poly1305_alg = {
.digestsize = crypto_onetimeauth_poly1305_BYTES,
.init = poly1305_init,
.update = poly1305_update,
.final = poly1305_final,
.descsize = sizeof(struct poly1305_desc_ctx),
- .base = {
- .cra_name = "poly1305",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- },
+
+ .base.cra_name = "poly1305",
+ .base.alloc_tfm = poly1305_alloc_tfm,
};
__attribute__((constructor(110)))
diff --git a/linux/crypto/sha256_generic.c b/linux/crypto/sha256_generic.c
index 0bd272f0..9326bfe7 100644
--- a/linux/crypto/sha256_generic.c
+++ b/linux/crypto/sha256_generic.c
@@ -24,13 +24,15 @@
#include <asm/unaligned.h>
#include <linux/crypto.h>
-#include <crypto/internal/hash.h>
+#include <crypto/hash.h>
#include <sodium/crypto_hash_sha256.h>
+static struct shash_alg sha256_alg;
+
static int sha256_init(struct shash_desc *desc)
{
- crypto_hash_sha256_state *state = shash_desc_ctx(desc);
+ crypto_hash_sha256_state *state = (void *) desc->ctx;
return crypto_hash_sha256_init(state);
}
@@ -38,28 +40,38 @@ static int sha256_init(struct shash_desc *desc)
static int sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- crypto_hash_sha256_state *state = shash_desc_ctx(desc);
+ crypto_hash_sha256_state *state = (void *) desc->ctx;
return crypto_hash_sha256_update(state, data, len);
}
static int sha256_final(struct shash_desc *desc, u8 *out)
{
- crypto_hash_sha256_state *state = shash_desc_ctx(desc);
+ crypto_hash_sha256_state *state = (void *) desc->ctx;
return crypto_hash_sha256_final(state, out);
}
+static void *sha256_alloc_tfm(void)
+{
+ struct crypto_shash *tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+
+ if (!tfm)
+ return NULL;
+
+ tfm->base.alg = &sha256_alg.base;
+ tfm->descsize = sizeof(crypto_hash_sha256_state);
+ return tfm;
+}
+
static struct shash_alg sha256_alg = {
.digestsize = crypto_hash_sha256_BYTES,
.init = sha256_init,
.update = sha256_update,
.final = sha256_final,
.descsize = sizeof(crypto_hash_sha256_state),
- .base = {
- .cra_name = "sha256",
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- }
+ .base.cra_name = "sha256",
+ .base.alloc_tfm = sha256_alloc_tfm,
};
__attribute__((constructor(110)))
diff --git a/linux/crypto/shash.c b/linux/crypto/shash.c
deleted file mode 100644
index 4f07a8b8..00000000
--- a/linux/crypto/shash.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Synchronous Cryptographic Hash operations.
- *
- * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <crypto/internal/hash.h>
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/printk.h>
-#include <linux/slab.h>
-
-#include "internal.h"
-
-static int shash_finup(struct shash_desc *desc, const u8 *data,
- unsigned len, u8 *out)
-{
- return crypto_shash_update(desc, data, len) ?:
- crypto_shash_final(desc, out);
-}
-
-static int shash_digest(struct shash_desc *desc, const u8 *data,
- unsigned len, u8 *out)
-{
- return crypto_shash_init(desc) ?:
- crypto_shash_finup(desc, data, len, out);
-}
-
-static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_shash *hash = __crypto_shash_cast(tfm);
-
- hash->descsize = crypto_shash_alg(hash)->descsize;
- return 0;
-}
-
-static const struct crypto_type crypto_shash_type = {
- .extsize = crypto_alg_extsize,
- .init_tfm = crypto_shash_init_tfm,
- .maskclear = ~CRYPTO_ALG_TYPE_MASK,
- .maskset = CRYPTO_ALG_TYPE_MASK,
- .type = CRYPTO_ALG_TYPE_SHASH,
- .tfmsize = offsetof(struct crypto_shash, base),
-};
-
-struct crypto_shash *crypto_alloc_shash(const char *alg_name,
- u32 type, u32 mask)
-{
- return crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask);
-}
-
-int crypto_register_shash(struct shash_alg *alg)
-{
- struct crypto_alg *base = &alg->base;
-
- base->cra_type = &crypto_shash_type;
- base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
- base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
-
- if (!alg->finup)
- alg->finup = shash_finup;
- if (!alg->digest)
- alg->digest = shash_digest;
-
- return crypto_register_alg(base);
-}
diff --git a/linux/rbtree.c b/linux/rbtree.c
deleted file mode 100644
index d0e3cbfc..00000000
--- a/linux/rbtree.c
+++ /dev/null
@@ -1,615 +0,0 @@
-/*
- Red Black Trees
- (C) 1999 Andrea Arcangeli <andrea@suse.de>
- (C) 2002 David Woodhouse <dwmw2@infradead.org>
- (C) 2012 Michel Lespinasse <walken@google.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- linux/lib/rbtree.c
-*/
-
-#include <linux/atomic.h>
-#include <linux/rbtree_augmented.h>
-#include <linux/export.h>
-
-/*
- * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
- *
- * 1) A node is either red or black
- * 2) The root is black
- * 3) All leaves (NULL) are black
- * 4) Both children of every red node are black
- * 5) Every simple path from root to leaves contains the same number
- * of black nodes.
- *
- * 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
- * consecutive red nodes in a path and every red node is therefore followed by
- * a black. So if B is the number of black nodes on every simple path (as per
- * 5), then the longest possible path due to 4 is 2B.
- *
- * We shall indicate color with case, where black nodes are uppercase and red
- * nodes will be lowercase. Unknown color nodes shall be drawn as red within
- * parentheses and have some accompanying text comment.
- */
-
-/*
- * Notes on lockless lookups:
- *
- * All stores to the tree structure (rb_left and rb_right) must be done using
- * WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the
- * tree structure as seen in program order.
- *
- * These two requirements will allow lockless iteration of the tree -- not
- * correct iteration mind you, tree rotations are not atomic so a lookup might
- * miss entire subtrees.
- *
- * But they do guarantee that any such traversal will only see valid elements
- * and that it will indeed complete -- does not get stuck in a loop.
- *
- * It also guarantees that if the lookup returns an element it is the 'correct'
- * one. But not returning an element does _NOT_ mean it's not present.
- *
- * NOTE:
- *
- * Stores to __rb_parent_color are not important for simple lookups so those
- * are left undone as of now. Nor did I check for loops involving parent
- * pointers.
- */
-
-static inline void rb_set_black(struct rb_node *rb)
-{
- rb->__rb_parent_color |= RB_BLACK;
-}
-
-static inline struct rb_node *rb_red_parent(struct rb_node *red)
-{
- return (struct rb_node *)red->__rb_parent_color;
-}
-
-/*
- * Helper function for rotations:
- * - old's parent and color get assigned to new
- * - old gets assigned new as a parent and 'color' as a color.
- */
-static inline void
-__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
- struct rb_root *root, int color)
-{
- struct rb_node *parent = rb_parent(old);
- new->__rb_parent_color = old->__rb_parent_color;
- rb_set_parent_color(old, new, color);
- __rb_change_child(old, new, parent, root);
-}
-
-static __always_inline void
-__rb_insert(struct rb_node *node, struct rb_root *root,
- void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
-{
- struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
-
- while (true) {
- /*
- * Loop invariant: node is red
- *
- * If there is a black parent, we are done.
- * Otherwise, take some corrective action as we don't
- * want a red root or two consecutive red nodes.
- */
- if (!parent) {
- rb_set_parent_color(node, NULL, RB_BLACK);
- break;
- } else if (rb_is_black(parent))
- break;
-
- gparent = rb_red_parent(parent);
-
- tmp = gparent->rb_right;
- if (parent != tmp) { /* parent == gparent->rb_left */
- if (tmp && rb_is_red(tmp)) {
- /*
- * Case 1 - color flips
- *
- * G g
- * / \ / \
- * p u --> P U
- * / /
- * n n
- *
- * However, since g's parent might be red, and
- * 4) does not allow this, we need to recurse
- * at g.
- */
- rb_set_parent_color(tmp, gparent, RB_BLACK);
- rb_set_parent_color(parent, gparent, RB_BLACK);
- node = gparent;
- parent = rb_parent(node);
- rb_set_parent_color(node, parent, RB_RED);
- continue;
- }
-
- tmp = parent->rb_right;
- if (node == tmp) {
- /*
- * Case 2 - left rotate at parent
- *
- * G G
- * / \ / \
- * p U --> n U
- * \ /
- * n p
- *
- * This still leaves us in violation of 4), the
- * continuation into Case 3 will fix that.
- */
- tmp = node->rb_left;
- WRITE_ONCE(parent->rb_right, tmp);
- WRITE_ONCE(node->rb_left, parent);
- if (tmp)
- rb_set_parent_color(tmp, parent,
- RB_BLACK);
- rb_set_parent_color(parent, node, RB_RED);
- augment_rotate(parent, node);
- parent = node;
- tmp = node->rb_right;
- }
-
- /*
- * Case 3 - right rotate at gparent
- *
- * G P
- * / \ / \
- * p U --> n g
- * / \
- * n U
- */
- WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */
- WRITE_ONCE(parent->rb_right, gparent);
- if (tmp)
- rb_set_parent_color(tmp, gparent, RB_BLACK);
- __rb_rotate_set_parents(gparent, parent, root, RB_RED);
- augment_rotate(gparent, parent);
- break;
- } else {
- tmp = gparent->rb_left;
- if (tmp && rb_is_red(tmp)) {
- /* Case 1 - color flips */
- rb_set_parent_color(tmp, gparent, RB_BLACK);
- rb_set_parent_color(parent, gparent, RB_BLACK);
- node = gparent;
- parent = rb_parent(node);
- rb_set_parent_color(node, parent, RB_RED);
- continue;
- }
-
- tmp = parent->rb_left;
- if (node == tmp) {
- /* Case 2 - right rotate at parent */
- tmp = node->rb_right;
- WRITE_ONCE(parent->rb_left, tmp);
- WRITE_ONCE(node->rb_right, parent);
- if (tmp)
- rb_set_parent_color(tmp, parent,
- RB_BLACK);
- rb_set_parent_color(parent, node, RB_RED);
- augment_rotate(parent, node);
- parent = node;
- tmp = node->rb_left;
- }
-
- /* Case 3 - left rotate at gparent */
- WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */
- WRITE_ONCE(parent->rb_left, gparent);
- if (tmp)
- rb_set_parent_color(tmp, gparent, RB_BLACK);
- __rb_rotate_set_parents(gparent, parent, root, RB_RED);
- augment_rotate(gparent, parent);
- break;
- }
- }
-}
-
-/*
- * Inline version for rb_erase() use - we want to be able to inline
- * and eliminate the dummy_rotate callback there
- */
-static __always_inline void
-____rb_erase_color(struct rb_node *parent, struct rb_root *root,
- void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
-{
- struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
-
- while (true) {
- /*
- * Loop invariants:
- * - node is black (or NULL on first iteration)
- * - node is not the root (parent is not NULL)
- * - All leaf paths going through parent and node have a
- * black node count that is 1 lower than other leaf paths.
- */
- sibling = parent->rb_right;
- if (node != sibling) { /* node == parent->rb_left */
- if (rb_is_red(sibling)) {
- /*
- * Case 1 - left rotate at parent
- *
- * P S
- * / \ / \
- * N s --> p Sr
- * / \ / \
- * Sl Sr N Sl
- */
- tmp1 = sibling->rb_left;
- WRITE_ONCE(parent->rb_right, tmp1);
- WRITE_ONCE(sibling->rb_left, parent);
- rb_set_parent_color(tmp1, parent, RB_BLACK);
- __rb_rotate_set_parents(parent, sibling, root,
- RB_RED);
- augment_rotate(parent, sibling);
- sibling = tmp1;
- }
- tmp1 = sibling->rb_right;
- if (!tmp1 || rb_is_black(tmp1)) {
- tmp2 = sibling->rb_left;
- if (!tmp2 || rb_is_black(tmp2)) {
- /*
- * Case 2 - sibling color flip
- * (p could be either color here)
- *
- * (p) (p)
- * / \ / \
- * N S --> N s
- * / \ / \
- * Sl Sr Sl Sr
- *
- * This leaves us violating 5) which
- * can be fixed by flipping p to black
- * if it was red, or by recursing at p.
- * p is red when coming from Case 1.
- */
- rb_set_parent_color(sibling, parent,
- RB_RED);
- if (rb_is_red(parent))
- rb_set_black(parent);
- else {
- node = parent;
- parent = rb_parent(node);
- if (parent)
- continue;
- }
- break;
- }
- /*
- * Case 3 - right rotate at sibling
- * (p could be either color here)
- *
- * (p) (p)
- * / \ / \
- * N S --> N Sl
- * / \ \
- * sl Sr s
- * \
- * Sr
- */
- tmp1 = tmp2->rb_right;
- WRITE_ONCE(sibling->rb_left, tmp1);
- WRITE_ONCE(tmp2->rb_right, sibling);
- WRITE_ONCE(parent->rb_right, tmp2);
- if (tmp1)
- rb_set_parent_color(tmp1, sibling,
- RB_BLACK);
- augment_rotate(sibling, tmp2);
- tmp1 = sibling;
- sibling = tmp2;
- }
- /*
- * Case 4 - left rotate at parent + color flips
- * (p and sl could be either color here.
- * After rotation, p becomes black, s acquires
- * p's color, and sl keeps its color)
- *
- * (p) (s)
- * / \ / \
- * N S --> P Sr
- * / \ / \
- * (sl) sr N (sl)
- */
- tmp2 = sibling->rb_left;
- WRITE_ONCE(parent->rb_right, tmp2);
- WRITE_ONCE(sibling->rb_left, parent);
- rb_set_parent_color(tmp1, sibling, RB_BLACK);
- if (tmp2)
- rb_set_parent(tmp2, parent);
- __rb_rotate_set_parents(parent, sibling, root,
- RB_BLACK);
- augment_rotate(parent, sibling);
- break;
- } else {
- sibling = parent->rb_left;
- if (rb_is_red(sibling)) {
- /* Case 1 - right rotate at parent */
- tmp1 = sibling->rb_right;
- WRITE_ONCE(parent->rb_left, tmp1);
- WRITE_ONCE(sibling->rb_right, parent);
- rb_set_parent_color(tmp1, parent, RB_BLACK);
- __rb_rotate_set_parents(parent, sibling, root,
- RB_RED);
- augment_rotate(parent, sibling);
- sibling = tmp1;
- }
- tmp1 = sibling->rb_left;
- if (!tmp1 || rb_is_black(tmp1)) {
- tmp2 = sibling->rb_right;
- if (!tmp2 || rb_is_black(tmp2)) {
- /* Case 2 - sibling color flip */
- rb_set_parent_color(sibling, parent,
- RB_RED);
- if (rb_is_red(parent))
- rb_set_black(parent);
- else {
- node = parent;
- parent = rb_parent(node);
- if (parent)
- continue;
- }
- break;
- }
- /* Case 3 - right rotate at sibling */
- tmp1 = tmp2->rb_left;
- WRITE_ONCE(sibling->rb_right, tmp1);
- WRITE_ONCE(tmp2->rb_left, sibling);
- WRITE_ONCE(parent->rb_left, tmp2);
- if (tmp1)
- rb_set_parent_color(tmp1, sibling,
- RB_BLACK);
- augment_rotate(sibling, tmp2);
- tmp1 = sibling;
- sibling = tmp2;
- }
- /* Case 4 - left rotate at parent + color flips */
- tmp2 = sibling->rb_right;
- WRITE_ONCE(parent->rb_left, tmp2);
- WRITE_ONCE(sibling->rb_right, parent);
- rb_set_parent_color(tmp1, sibling, RB_BLACK);
- if (tmp2)
- rb_set_parent(tmp2, parent);
- __rb_rotate_set_parents(parent, sibling, root,
- RB_BLACK);
- augment_rotate(parent, sibling);
- break;
- }
- }
-}
-
-/* Non-inline version for rb_erase_augmented() use */
-void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
- void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
-{
- ____rb_erase_color(parent, root, augment_rotate);
-}
-EXPORT_SYMBOL(__rb_erase_color);
-
-/*
- * Non-augmented rbtree manipulation functions.
- *
- * We use dummy augmented callbacks here, and have the compiler optimize them
- * out of the rb_insert_color() and rb_erase() function definitions.
- */
-
-static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
-static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
-static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
-
-static const struct rb_augment_callbacks dummy_callbacks = {
- dummy_propagate, dummy_copy, dummy_rotate
-};
-
-void rb_insert_color(struct rb_node *node, struct rb_root *root)
-{
- __rb_insert(node, root, dummy_rotate);
-}
-EXPORT_SYMBOL(rb_insert_color);
-
-void rb_erase(struct rb_node *node, struct rb_root *root)
-{
- struct rb_node *rebalance;
- rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
- if (rebalance)
- ____rb_erase_color(rebalance, root, dummy_rotate);
-}
-EXPORT_SYMBOL(rb_erase);
-
-/*
- * Augmented rbtree manipulation functions.
- *
- * This instantiates the same __always_inline functions as in the non-augmented
- * case, but this time with user-defined callbacks.
- */
-
-void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
- void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
-{
- __rb_insert(node, root, augment_rotate);
-}
-EXPORT_SYMBOL(__rb_insert_augmented);
-
-/*
- * This function returns the first node (in sort order) of the tree.
- */
-struct rb_node *rb_first(const struct rb_root *root)
-{
- struct rb_node *n;
-
- n = root->rb_node;
- if (!n)
- return NULL;
- while (n->rb_left)
- n = n->rb_left;
- return n;
-}
-EXPORT_SYMBOL(rb_first);
-
-struct rb_node *rb_last(const struct rb_root *root)
-{
- struct rb_node *n;
-
- n = root->rb_node;
- if (!n)
- return NULL;
- while (n->rb_right)
- n = n->rb_right;
- return n;
-}
-EXPORT_SYMBOL(rb_last);
-
-struct rb_node *rb_next(const struct rb_node *node)
-{
- struct rb_node *parent;
-
- if (RB_EMPTY_NODE(node))
- return NULL;
-
- /*
- * If we have a right-hand child, go down and then left as far
- * as we can.
- */
- if (node->rb_right) {
- node = node->rb_right;
- while (node->rb_left)
- node=node->rb_left;
- return (struct rb_node *)node;
- }
-
- /*
- * No right-hand children. Everything down and left is smaller than us,
- * so any 'next' node must be in the general direction of our parent.
- * Go up the tree; any time the ancestor is a right-hand child of its
- * parent, keep going up. First time it's a left-hand child of its
- * parent, said parent is our 'next' node.
- */
- while ((parent = rb_parent(node)) && node == parent->rb_right)
- node = parent;
-
- return parent;
-}
-EXPORT_SYMBOL(rb_next);
-
-struct rb_node *rb_prev(const struct rb_node *node)
-{
- struct rb_node *parent;
-
- if (RB_EMPTY_NODE(node))
- return NULL;
-
- /*
- * If we have a left-hand child, go down and then right as far
- * as we can.
- */
- if (node->rb_left) {
- node = node->rb_left;
- while (node->rb_right)
- node=node->rb_right;
- return (struct rb_node *)node;
- }
-
- /*
- * No left-hand children. Go up till we find an ancestor which
- * is a right-hand child of its parent.
- */
- while ((parent = rb_parent(node)) && node == parent->rb_left)
- node = parent;
-
- return parent;
-}
-EXPORT_SYMBOL(rb_prev);
-
-void rb_replace_node(struct rb_node *victim, struct rb_node *new,
- struct rb_root *root)
-{
- struct rb_node *parent = rb_parent(victim);
-
- /* Copy the pointers/colour from the victim to the replacement */
- *new = *victim;
-
- /* Set the surrounding nodes to point to the replacement */
- if (victim->rb_left)
- rb_set_parent(victim->rb_left, new);
- if (victim->rb_right)
- rb_set_parent(victim->rb_right, new);
- __rb_change_child(victim, new, parent, root);
-}
-EXPORT_SYMBOL(rb_replace_node);
-
-void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
- struct rb_root *root)
-{
- struct rb_node *parent = rb_parent(victim);
-
- /* Copy the pointers/colour from the victim to the replacement */
- *new = *victim;
-
- /* Set the surrounding nodes to point to the replacement */
- if (victim->rb_left)
- rb_set_parent(victim->rb_left, new);
- if (victim->rb_right)
- rb_set_parent(victim->rb_right, new);
-
- /* Set the parent's pointer to the new node last after an RCU barrier
- * so that the pointers onwards are seen to be set correctly when doing
- * an RCU walk over the tree.
- */
- __rb_change_child_rcu(victim, new, parent, root);
-}
-EXPORT_SYMBOL(rb_replace_node_rcu);
-
-static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
-{
- for (;;) {
- if (node->rb_left)
- node = node->rb_left;
- else if (node->rb_right)
- node = node->rb_right;
- else
- return (struct rb_node *)node;
- }
-}
-
-struct rb_node *rb_next_postorder(const struct rb_node *node)
-{
- const struct rb_node *parent;
- if (!node)
- return NULL;
- parent = rb_parent(node);
-
- /* If we're sitting on node, we've already seen our children */
- if (parent && node == parent->rb_left && parent->rb_right) {
- /* If we are the parent's left node, go to the parent's right
- * node then all the way down to the left */
- return rb_left_deepest_node(parent->rb_right);
- } else
- /* Otherwise we are the parent's right node, and the parent
- * should be next */
- return (struct rb_node *)parent;
-}
-EXPORT_SYMBOL(rb_next_postorder);
-
-struct rb_node *rb_first_postorder(const struct rb_root *root)
-{
- if (!root->rb_node)
- return NULL;
-
- return rb_left_deepest_node(root->rb_node);
-}
-EXPORT_SYMBOL(rb_first_postorder);
diff --git a/linux/sched.c b/linux/sched.c
index 52b741fa..c996945e 100644
--- a/linux/sched.c
+++ b/linux/sched.c
@@ -103,72 +103,6 @@ out:
return timeout < 0 ? 0 : timeout;
}
-unsigned long __msecs_to_jiffies(const unsigned int m)
-{
- /*
- * Negative value, means infinite timeout:
- */
- if ((int)m < 0)
- return MAX_JIFFY_OFFSET;
- return _msecs_to_jiffies(m);
-}
-
-u64 nsecs_to_jiffies64(u64 n)
-{
-#if (NSEC_PER_SEC % HZ) == 0
- /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
- return div_u64(n, NSEC_PER_SEC / HZ);
-#elif (HZ % 512) == 0
- /* overflow after 292 years if HZ = 1024 */
- return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
-#else
- /*
- * Generic case - optimized for cases where HZ is a multiple of 3.
- * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
- */
- return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
-#endif
-}
-
-unsigned long nsecs_to_jiffies(u64 n)
-{
- return (unsigned long)nsecs_to_jiffies64(n);
-}
-
-unsigned int jiffies_to_msecs(const unsigned long j)
-{
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
- return (MSEC_PER_SEC / HZ) * j;
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
- return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
-#else
-# if BITS_PER_LONG == 32
- return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
-# else
- return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
-# endif
-#endif
-}
-
-unsigned int jiffies_to_usecs(const unsigned long j)
-{
- /*
- * Hz usually doesn't go much further MSEC_PER_SEC.
- * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
- */
- BUILD_BUG_ON(HZ > USEC_PER_SEC);
-
-#if !(USEC_PER_SEC % HZ)
- return (USEC_PER_SEC / HZ) * j;
-#else
-# if BITS_PER_LONG == 32
- return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
-# else
- return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
-# endif
-#endif
-}
-
__attribute__((constructor(101)))
static void sched_init(void)
{
diff --git a/linux/semaphore.c b/linux/semaphore.c
deleted file mode 100644
index 6561dd22..00000000
--- a/linux/semaphore.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Copyright (c) 2008 Intel Corporation
- * Author: Matthew Wilcox <willy@linux.intel.com>
- *
- * Distributed under the terms of the GNU GPL, version 2
- *
- * This file implements counting semaphores.
- * A counting semaphore may be acquired 'n' times before sleeping.
- * See mutex.c for single-acquisition sleeping locks which enforce
- * rules which allow code to be debugged more easily.
- */
-
-/*
- * Some notes on the implementation:
- *
- * The spinlock controls access to the other members of the semaphore.
- * down_trylock() and up() can be called from interrupt context, so we
- * have to disable interrupts when taking the lock. It turns out various
- * parts of the kernel expect to be able to use down() on a semaphore in
- * interrupt context when they know it will succeed, so we have to use
- * irqsave variants for down(), down_interruptible() and down_killable()
- * too.
- *
- * The ->count variable represents how many more tasks can acquire this
- * semaphore. If it's zero, there may be tasks waiting on the wait_list.
- */
-
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/semaphore.h>
-#include <linux/spinlock.h>
-
-static noinline void __down(struct semaphore *sem);
-static noinline int __down_interruptible(struct semaphore *sem);
-static noinline int __down_killable(struct semaphore *sem);
-static noinline int __down_timeout(struct semaphore *sem, long timeout);
-static noinline void __up(struct semaphore *sem);
-
-/**
- * down - acquire the semaphore
- * @sem: the semaphore to be acquired
- *
- * Acquires the semaphore. If no more tasks are allowed to acquire the
- * semaphore, calling this function will put the task to sleep until the
- * semaphore is released.
- *
- * Use of this function is deprecated, please use down_interruptible() or
- * down_killable() instead.
- */
-void down(struct semaphore *sem)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
- __down(sem);
- raw_spin_unlock_irqrestore(&sem->lock, flags);
-}
-EXPORT_SYMBOL(down);
-
-/**
- * down_interruptible - acquire the semaphore unless interrupted
- * @sem: the semaphore to be acquired
- *
- * Attempts to acquire the semaphore. If no more tasks are allowed to
- * acquire the semaphore, calling this function will put the task to sleep.
- * If the sleep is interrupted by a signal, this function will return -EINTR.
- * If the semaphore is successfully acquired, this function returns 0.
- */
-int down_interruptible(struct semaphore *sem)
-{
- unsigned long flags;
- int result = 0;
-
- raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
- result = __down_interruptible(sem);
- raw_spin_unlock_irqrestore(&sem->lock, flags);
-
- return result;
-}
-EXPORT_SYMBOL(down_interruptible);
-
-/**
- * down_killable - acquire the semaphore unless killed
- * @sem: the semaphore to be acquired
- *
- * Attempts to acquire the semaphore. If no more tasks are allowed to
- * acquire the semaphore, calling this function will put the task to sleep.
- * If the sleep is interrupted by a fatal signal, this function will return
- * -EINTR. If the semaphore is successfully acquired, this function returns
- * 0.
- */
-int down_killable(struct semaphore *sem)
-{
- unsigned long flags;
- int result = 0;
-
- raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
- result = __down_killable(sem);
- raw_spin_unlock_irqrestore(&sem->lock, flags);
-
- return result;
-}
-EXPORT_SYMBOL(down_killable);
-
-/**
- * down_trylock - try to acquire the semaphore, without waiting
- * @sem: the semaphore to be acquired
- *
- * Try to acquire the semaphore atomically. Returns 0 if the semaphore has
- * been acquired successfully or 1 if it it cannot be acquired.
- *
- * NOTE: This return value is inverted from both spin_trylock and
- * mutex_trylock! Be careful about this when converting code.
- *
- * Unlike mutex_trylock, this function can be used from interrupt context,
- * and the semaphore can be released by any task or interrupt.
- */
-int down_trylock(struct semaphore *sem)
-{
- unsigned long flags;
- int count;
-
- raw_spin_lock_irqsave(&sem->lock, flags);
- count = sem->count - 1;
- if (likely(count >= 0))
- sem->count = count;
- raw_spin_unlock_irqrestore(&sem->lock, flags);
-
- return (count < 0);
-}
-EXPORT_SYMBOL(down_trylock);
-
-/**
- * down_timeout - acquire the semaphore within a specified time
- * @sem: the semaphore to be acquired
- * @timeout: how long to wait before failing
- *
- * Attempts to acquire the semaphore. If no more tasks are allowed to
- * acquire the semaphore, calling this function will put the task to sleep.
- * If the semaphore is not released within the specified number of jiffies,
- * this function returns -ETIME. It returns 0 if the semaphore was acquired.
- */
-int down_timeout(struct semaphore *sem, long timeout)
-{
- unsigned long flags;
- int result = 0;
-
- raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(sem->count > 0))
- sem->count--;
- else
- result = __down_timeout(sem, timeout);
- raw_spin_unlock_irqrestore(&sem->lock, flags);
-
- return result;
-}
-EXPORT_SYMBOL(down_timeout);
-
-/**
- * up - release the semaphore
- * @sem: the semaphore to release
- *
- * Release the semaphore. Unlike mutexes, up() may be called from any
- * context and even by tasks which have never called down().
- */
-void up(struct semaphore *sem)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&sem->lock, flags);
- if (likely(list_empty(&sem->wait_list)))
- sem->count++;
- else
- __up(sem);
- raw_spin_unlock_irqrestore(&sem->lock, flags);
-}
-EXPORT_SYMBOL(up);
-
-/* Functions for the contended case */
-
-struct semaphore_waiter {
- struct list_head list;
- struct task_struct *task;
- bool up;
-};
-
-/*
- * Because this function is inlined, the 'state' parameter will be
- * constant, and thus optimised away by the compiler. Likewise the
- * 'timeout' parameter for the cases without timeouts.
- */
-static inline int __sched __down_common(struct semaphore *sem, long state,
- long timeout)
-{
- struct task_struct *task = current;
- struct semaphore_waiter waiter;
-
- list_add_tail(&waiter.list, &sem->wait_list);
- waiter.task = task;
- waiter.up = false;
-
- for (;;) {
- if (unlikely(timeout <= 0))
- goto timed_out;
- __set_task_state(task, state);
- raw_spin_unlock_irq(&sem->lock);
- timeout = schedule_timeout(timeout);
- raw_spin_lock_irq(&sem->lock);
- if (waiter.up)
- return 0;
- }
-
- timed_out:
- list_del(&waiter.list);
- return -1;
-}
-
-static noinline void __sched __down(struct semaphore *sem)
-{
- __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
-}
-
-static noinline int __sched __down_interruptible(struct semaphore *sem)
-{
- return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
-}
-
-static noinline int __sched __down_killable(struct semaphore *sem)
-{
- return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
-}
-
-static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
-{
- return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
-}
-
-static noinline void __sched __up(struct semaphore *sem)
-{
- struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
- struct semaphore_waiter, list);
- list_del(&waiter->list);
- waiter->up = true;
- wake_up_process(waiter->task);
-}