summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/crypto/algapi.h184
-rw-r--r--include/crypto/chacha20.h11
-rw-r--r--include/crypto/hash.h115
-rw-r--r--include/crypto/internal/hash.h3
-rw-r--r--include/crypto/poly1305.h34
-rw-r--r--include/crypto/sha.h110
-rw-r--r--include/crypto/sha1_base.h107
-rw-r--r--include/keys/user-type.h6
-rw-r--r--include/linux/bcache.h758
-rw-r--r--include/linux/crypto.h800
-rw-r--r--include/linux/cryptohash.h20
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/key.h50
-rw-r--r--include/linux/mempool.h5
-rw-r--r--include/linux/page.h7
-rw-r--r--include/linux/scatterlist.h111
-rw-r--r--include/linux/time64.h18
-rw-r--r--include/trace/events/bcache.h44
18 files changed, 765 insertions, 1620 deletions
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 31f453ee..d8bfcc1f 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -13,200 +13,24 @@
#define _CRYPTO_ALGAPI_H
#include <linux/crypto.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-
-struct crypto_aead;
-struct crypto_instance;
-struct module;
-struct rtattr;
-struct seq_file;
-struct sk_buff;
struct crypto_type {
unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
unsigned int (*extsize)(struct crypto_alg *alg);
int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
int (*init_tfm)(struct crypto_tfm *tfm);
- void (*show)(struct seq_file *m, struct crypto_alg *alg);
- struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
- void (*free)(struct crypto_instance *inst);
-
- unsigned int type;
- unsigned int maskclear;
- unsigned int maskset;
- unsigned int tfmsize;
-};
-
-struct crypto_instance {
- struct crypto_alg alg;
-
- struct crypto_template *tmpl;
- struct hlist_node list;
-
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
-struct crypto_template {
- struct list_head list;
- struct hlist_head instances;
- struct module *module;
-
- struct crypto_instance *(*alloc)(struct rtattr **tb);
- void (*free)(struct crypto_instance *inst);
- int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
-
- char name[CRYPTO_MAX_ALG_NAME];
-};
-
-struct scatter_walk {
- struct scatterlist *sg;
- unsigned int offset;
-};
-
-struct blkcipher_walk {
- union {
- struct {
- struct page *page;
- unsigned long offset;
- } phys;
-
- struct {
- u8 *page;
- u8 *addr;
- } virt;
- } src, dst;
- struct scatter_walk in;
- unsigned int nbytes;
-
- struct scatter_walk out;
- unsigned int total;
-
- void *page;
- u8 *buffer;
- u8 *iv;
- unsigned int ivsize;
-
- int flags;
- unsigned int walk_blocksize;
- unsigned int cipher_blocksize;
- unsigned int alignmask;
+ unsigned type;
+ unsigned maskclear;
+ unsigned maskset;
+ unsigned tfmsize;
};
extern const struct crypto_type crypto_blkcipher_type;
-struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
-int crypto_check_attr_type(struct rtattr **tb, u32 type);
-const char *crypto_attr_alg_name(struct rtattr *rta);
-struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
- const struct crypto_type *frontend,
- u32 type, u32 mask);
-
-static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
- u32 type, u32 mask)
-{
- return crypto_attr_alg2(rta, NULL, type, mask);
-}
-
-int crypto_attr_u32(struct rtattr *rta, u32 *num);
-
-/* These functions require the input/output to be aligned as u32. */
-void crypto_inc(u8 *a, unsigned int size);
-void crypto_xor(u8 *dst, const u8 *src, unsigned int size);
-
-int blkcipher_walk_done(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk, int err);
-int blkcipher_walk_virt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk);
-int blkcipher_walk_phys(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk);
-int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- unsigned int blocksize);
-int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_aead *tfm,
- unsigned int blocksize);
-
-static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
-{
- return PTR_ALIGN(crypto_tfm_ctx(tfm),
- crypto_tfm_alg_alignmask(tfm) + 1);
-}
-
-static inline struct crypto_instance *crypto_tfm_alg_instance(
- struct crypto_tfm *tfm)
-{
- return container_of(tfm->__crt_alg, struct crypto_instance, alg);
-}
-
-static inline void *crypto_instance_ctx(struct crypto_instance *inst)
-{
- return inst->__ctx;
-}
-
static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
-static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_ctx_aligned(&tfm->base);
-}
-
-static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
-{
- return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
-}
-
-static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- walk->in.sg = src;
- walk->out.sg = dst;
- walk->total = nbytes;
-}
-
-static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
- u32 type, u32 mask)
-{
- return crypto_attr_alg(tb[1], type, mask);
-}
-
-static inline int crypto_requires_sync(u32 type, u32 mask)
-{
- return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
-}
-
-noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
-
-/**
- * crypto_memneq - Compare two areas of memory without leaking
- * timing information.
- *
- * @a: One area of memory
- * @b: Another area of memory
- * @size: The size of the area.
- *
- * Returns 0 when data is equal, 1 otherwise.
- */
-static inline int crypto_memneq(const void *a, const void *b, size_t size)
-{
- return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
-}
-
-static inline void crypto_yield(u32 flags)
-{
-#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY)
- if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
- cond_resched();
-#endif
-}
-
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
index 20d20f68..1cdc77ba 100644
--- a/include/crypto/chacha20.h
+++ b/include/crypto/chacha20.h
@@ -12,15 +12,4 @@
#define CHACHA20_KEY_SIZE 32
#define CHACHA20_BLOCK_SIZE 64
-struct chacha20_ctx {
- u32 key[8];
-};
-
-void chacha20_block(u32 *state, void *stream);
-void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
-int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keysize);
-int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
-
#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 00bd4e7e..97edaa88 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -16,13 +16,6 @@
#include <linux/crypto.h>
#include <linux/string.h>
-struct hash_alg_common {
- unsigned int digestsize;
- unsigned int statesize;
-
- struct crypto_alg base;
-};
-
struct shash_desc {
struct crypto_shash *tfm;
u32 flags;
@@ -37,31 +30,21 @@ struct shash_desc {
struct shash_alg {
int (*init)(struct shash_desc *desc);
- int (*update)(struct shash_desc *desc, const u8 *data,
- unsigned int len);
+ int (*update)(struct shash_desc *desc, const u8 *data, unsigned len);
int (*final)(struct shash_desc *desc, u8 *out);
int (*finup)(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
+ unsigned len, u8 *out);
int (*digest)(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
- int (*export)(struct shash_desc *desc, void *out);
- int (*import)(struct shash_desc *desc, const void *in);
- int (*setkey)(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen);
-
- unsigned int descsize;
-
- /* These fields must match hash_alg_common. */
- unsigned int digestsize
- __attribute__ ((aligned(__alignof__(struct hash_alg_common))));
- unsigned int statesize;
+ unsigned len, u8 *out);
- struct crypto_alg base;
+ unsigned descsize;
+ unsigned digestsize;
+ struct crypto_alg base;
};
struct crypto_shash {
- unsigned int descsize;
- struct crypto_tfm base;
+ unsigned descsize;
+ struct crypto_tfm base;
};
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
@@ -77,27 +60,6 @@ static inline void crypto_free_shash(struct crypto_shash *tfm)
crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
}
-static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm)
-{
- return crypto_tfm_alg_name(crypto_shash_tfm(tfm));
-}
-
-static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm)
-{
- return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
-}
-
-static inline unsigned int crypto_shash_alignmask(
- struct crypto_shash *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
-}
-
-static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
-}
-
static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
{
return container_of(alg, struct shash_alg, base);
@@ -108,32 +70,12 @@ static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
}
-static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
+static inline unsigned crypto_shash_digestsize(struct crypto_shash *tfm)
{
return crypto_shash_alg(tfm)->digestsize;
}
-static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm)
-{
- return crypto_shash_alg(tfm)->statesize;
-}
-
-static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm)
-{
- return crypto_tfm_get_flags(crypto_shash_tfm(tfm));
-}
-
-static inline void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags)
-{
- crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags);
-}
-
-static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
-{
- crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
-}
-
-static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
+static inline unsigned crypto_shash_descsize(struct crypto_shash *tfm)
{
return tfm->descsize;
}
@@ -143,39 +85,32 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
return desc->__ctx;
}
-int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen);
-
-int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
-
-static inline int crypto_shash_export(struct shash_desc *desc, void *out)
+static inline int crypto_shash_init(struct shash_desc *desc)
{
- return crypto_shash_alg(desc->tfm)->export(desc, out);
+ return crypto_shash_alg(desc->tfm)->init(desc);
}
-static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
+static inline int crypto_shash_update(struct shash_desc *desc,
+ const u8 *data, unsigned len)
{
- return crypto_shash_alg(desc->tfm)->import(desc, in);
+ return crypto_shash_alg(desc->tfm)->update(desc, data, len);
}
-static inline int crypto_shash_init(struct shash_desc *desc)
+static inline int crypto_shash_final(struct shash_desc *desc, u8 *out)
{
- return crypto_shash_alg(desc->tfm)->init(desc);
+ return crypto_shash_alg(desc->tfm)->final(desc, out);
}
-int crypto_shash_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-int crypto_shash_final(struct shash_desc *desc, u8 *out);
-
-int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out);
+static inline int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
+ unsigned len, u8 *out)
+{
+ return crypto_shash_alg(desc->tfm)->finup(desc, data, len, out);
+}
-static inline void shash_desc_zero(struct shash_desc *desc)
+static inline int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
+ unsigned len, u8 *out)
{
- memzero_explicit(desc,
- sizeof(*desc) + crypto_shash_descsize(desc->tfm));
+ return crypto_shash_alg(desc->tfm)->digest(desc, data, len, out);
}
#endif /* _CRYPTO_HASH_H */
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 2d85c803..3973047b 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -5,9 +5,6 @@
#include <crypto/hash.h>
int crypto_register_shash(struct shash_alg *alg);
-int crypto_unregister_shash(struct shash_alg *alg);
-int crypto_register_shashes(struct shash_alg *algs, int count);
-int crypto_unregister_shashes(struct shash_alg *algs, int count);
static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
{
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index 894df59b..9fcfbfeb 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -5,37 +5,9 @@
#ifndef _CRYPTO_POLY1305_H
#define _CRYPTO_POLY1305_H
-#include <linux/types.h>
-#include <linux/crypto.h>
+#include <sodium/crypto_onetimeauth_poly1305.h>
-#define POLY1305_BLOCK_SIZE 16
-#define POLY1305_KEY_SIZE 32
-#define POLY1305_DIGEST_SIZE 16
-
-struct poly1305_desc_ctx {
- /* key */
- u32 r[5];
- /* finalize key */
- u32 s[4];
- /* accumulator */
- u32 h[5];
- /* partial buffer */
- u8 buf[POLY1305_BLOCK_SIZE];
- /* bytes used in partial buffer */
- unsigned int buflen;
- /* r key has been set */
- bool rset;
- /* s key has been set */
- bool sset;
-};
-
-int crypto_poly1305_init(struct shash_desc *desc);
-int crypto_poly1305_setkey(struct crypto_shash *tfm,
- const u8 *key, unsigned int keylen);
-unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen);
-int crypto_poly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen);
-int crypto_poly1305_final(struct shash_desc *desc, u8 *dst);
+#define POLY1305_KEY_SIZE crypto_onetimeauth_poly1305_KEYBYTES
+#define POLY1305_DIGEST_SIZE crypto_onetimeauth_poly1305_BYTES
#endif
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
deleted file mode 100644
index c94d3eb1..00000000
--- a/include/crypto/sha.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Common values for SHA algorithms
- */
-
-#ifndef _CRYPTO_SHA_H
-#define _CRYPTO_SHA_H
-
-#include <linux/types.h>
-
-#define SHA1_DIGEST_SIZE 20
-#define SHA1_BLOCK_SIZE 64
-
-#define SHA224_DIGEST_SIZE 28
-#define SHA224_BLOCK_SIZE 64
-
-#define SHA256_DIGEST_SIZE 32
-#define SHA256_BLOCK_SIZE 64
-
-#define SHA384_DIGEST_SIZE 48
-#define SHA384_BLOCK_SIZE 128
-
-#define SHA512_DIGEST_SIZE 64
-#define SHA512_BLOCK_SIZE 128
-
-#define SHA1_H0 0x67452301UL
-#define SHA1_H1 0xefcdab89UL
-#define SHA1_H2 0x98badcfeUL
-#define SHA1_H3 0x10325476UL
-#define SHA1_H4 0xc3d2e1f0UL
-
-#define SHA224_H0 0xc1059ed8UL
-#define SHA224_H1 0x367cd507UL
-#define SHA224_H2 0x3070dd17UL
-#define SHA224_H3 0xf70e5939UL
-#define SHA224_H4 0xffc00b31UL
-#define SHA224_H5 0x68581511UL
-#define SHA224_H6 0x64f98fa7UL
-#define SHA224_H7 0xbefa4fa4UL
-
-#define SHA256_H0 0x6a09e667UL
-#define SHA256_H1 0xbb67ae85UL
-#define SHA256_H2 0x3c6ef372UL
-#define SHA256_H3 0xa54ff53aUL
-#define SHA256_H4 0x510e527fUL
-#define SHA256_H5 0x9b05688cUL
-#define SHA256_H6 0x1f83d9abUL
-#define SHA256_H7 0x5be0cd19UL
-
-#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
-#define SHA384_H1 0x629a292a367cd507ULL
-#define SHA384_H2 0x9159015a3070dd17ULL
-#define SHA384_H3 0x152fecd8f70e5939ULL
-#define SHA384_H4 0x67332667ffc00b31ULL
-#define SHA384_H5 0x8eb44a8768581511ULL
-#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
-#define SHA384_H7 0x47b5481dbefa4fa4ULL
-
-#define SHA512_H0 0x6a09e667f3bcc908ULL
-#define SHA512_H1 0xbb67ae8584caa73bULL
-#define SHA512_H2 0x3c6ef372fe94f82bULL
-#define SHA512_H3 0xa54ff53a5f1d36f1ULL
-#define SHA512_H4 0x510e527fade682d1ULL
-#define SHA512_H5 0x9b05688c2b3e6c1fULL
-#define SHA512_H6 0x1f83d9abfb41bd6bULL
-#define SHA512_H7 0x5be0cd19137e2179ULL
-
-extern const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE];
-
-extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE];
-
-extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
-
-struct sha1_state {
- u32 state[SHA1_DIGEST_SIZE / 4];
- u64 count;
- u8 buffer[SHA1_BLOCK_SIZE];
-};
-
-struct sha256_state {
- u32 state[SHA256_DIGEST_SIZE / 4];
- u64 count;
- u8 buf[SHA256_BLOCK_SIZE];
-};
-
-struct sha512_state {
- u64 state[SHA512_DIGEST_SIZE / 8];
- u64 count[2];
- u8 buf[SHA512_BLOCK_SIZE];
-};
-
-struct shash_desc;
-
-extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-
-extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len);
-
-extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *hash);
-#endif
diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h
deleted file mode 100644
index 01b002de..00000000
--- a/include/crypto/sha1_base.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * sha1_base.h - core logic for SHA-1 implementations
- *
- * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
-#include <linux/byteorder.h>
-#include <linux/crypto.h>
-#include <linux/module.h>
-
-#include <asm/unaligned.h>
-
-typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
-
-static inline int sha1_base_init(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA1_H0;
- sctx->state[1] = SHA1_H1;
- sctx->state[2] = SHA1_H2;
- sctx->state[3] = SHA1_H3;
- sctx->state[4] = SHA1_H4;
- sctx->count = 0;
-
- return 0;
-}
-
-static inline int sha1_base_do_update(struct shash_desc *desc,
- const u8 *data,
- unsigned int len,
- sha1_block_fn *block_fn)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->count += len;
-
- if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) {
- int blocks;
-
- if (partial) {
- int p = SHA1_BLOCK_SIZE - partial;
-
- memcpy(sctx->buffer + partial, data, p);
- data += p;
- len -= p;
-
- block_fn(sctx, sctx->buffer, 1);
- }
-
- blocks = len / SHA1_BLOCK_SIZE;
- len %= SHA1_BLOCK_SIZE;
-
- if (blocks) {
- block_fn(sctx, data, blocks);
- data += blocks * SHA1_BLOCK_SIZE;
- }
- partial = 0;
- }
- if (len)
- memcpy(sctx->buffer + partial, data, len);
-
- return 0;
-}
-
-static inline int sha1_base_do_finalize(struct shash_desc *desc,
- sha1_block_fn *block_fn)
-{
- const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
- struct sha1_state *sctx = shash_desc_ctx(desc);
- __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
- unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
-
- sctx->buffer[partial++] = 0x80;
- if (partial > bit_offset) {
- memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
- partial = 0;
-
- block_fn(sctx, sctx->buffer, 1);
- }
-
- memset(sctx->buffer + partial, 0x0, bit_offset - partial);
- *bits = cpu_to_be64(sctx->count << 3);
- block_fn(sctx, sctx->buffer, 1);
-
- return 0;
-}
-
-static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
- __be32 *digest = (__be32 *)out;
- int i;
-
- for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
- put_unaligned_be32(sctx->state[i], digest++);
-
- *sctx = (struct sha1_state){};
- return 0;
-}
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
new file mode 100644
index 00000000..a7a2ee45
--- /dev/null
+++ b/include/keys/user-type.h
@@ -0,0 +1,6 @@
+#ifndef _KEYS_USER_TYPE_H
+#define _KEYS_USER_TYPE_H
+
+#include <linux/key.h>
+
+#endif /* _KEYS_USER_TYPE_H */
diff --git a/include/linux/bcache.h b/include/linux/bcache.h
index f09a44a6..4179f8dd 100644
--- a/include/linux/bcache.h
+++ b/include/linux/bcache.h
@@ -102,9 +102,17 @@ struct bch_val {
__u64 __nothing[0];
};
-struct bkey {
- __u64 _data[0];
+struct bversion {
+#if defined(__LITTLE_ENDIAN)
+ __u64 lo;
+ __u32 hi;
+#elif defined(__BIG_ENDIAN)
+ __u32 hi;
+ __u64 lo;
+#endif
+} __attribute__((packed, aligned(4)));
+struct bkey {
/* Size of combined key and value, in u64s */
__u8 u64s;
@@ -125,13 +133,13 @@ struct bkey {
#if defined(__LITTLE_ENDIAN)
__u8 pad[1];
- __u32 version;
+ struct bversion version;
__u32 size; /* extent size, in sectors */
struct bpos p;
#elif defined(__BIG_ENDIAN)
struct bpos p;
__u32 size; /* extent size, in sectors */
- __u32 version;
+ struct bversion version;
__u8 pad[1];
#endif
@@ -184,7 +192,8 @@ enum bch_bkey_fields {
BKEY_FIELD_OFFSET,
BKEY_FIELD_SNAPSHOT,
BKEY_FIELD_SIZE,
- BKEY_FIELD_VERSION,
+ BKEY_FIELD_VERSION_HI,
+ BKEY_FIELD_VERSION_LO,
BKEY_NR_FIELDS,
};
@@ -200,14 +209,25 @@ enum bch_bkey_fields {
bkey_format_field(OFFSET, p.offset), \
bkey_format_field(SNAPSHOT, p.snapshot), \
bkey_format_field(SIZE, size), \
- bkey_format_field(VERSION, version), \
+ bkey_format_field(VERSION_HI, version.hi), \
+ bkey_format_field(VERSION_LO, version.lo), \
}, \
})
/* bkey with inline value */
struct bkey_i {
- struct bkey k;
- struct bch_val v;
+ __u64 _data[0];
+
+ union {
+ struct {
+ /* Size of combined key and value, in u64s */
+ __u8 u64s;
+ };
+ struct {
+ struct bkey k;
+ struct bch_val v;
+ };
+ };
};
#ifndef __cplusplus
@@ -358,20 +378,47 @@ BKEY_VAL_TYPE(cookie, KEY_TYPE_COOKIE);
* is neither checksummed nor compressed.
*/
+/* 128 bits, sufficient for cryptographic MACs: */
+struct bch_csum {
+ __le64 lo;
+ __le64 hi;
+} __attribute__((packed, aligned(8)));
+
+#define BCH_CSUM_NONE 0U
+#define BCH_CSUM_CRC32C 1U
+#define BCH_CSUM_CRC64 2U
+#define BCH_CSUM_CHACHA20_POLY1305_80 3U
+#define BCH_CSUM_CHACHA20_POLY1305_128 4U
+#define BCH_CSUM_NR 5U
+
+static inline _Bool bch_csum_type_is_encryption(unsigned type)
+{
+ switch (type) {
+ case BCH_CSUM_CHACHA20_POLY1305_80:
+ case BCH_CSUM_CHACHA20_POLY1305_128:
+ return true;
+ default:
+ return false;
+ }
+}
+
enum bch_extent_entry_type {
- BCH_EXTENT_ENTRY_crc32 = 0,
- BCH_EXTENT_ENTRY_ptr = 1,
+ BCH_EXTENT_ENTRY_ptr = 0,
+ BCH_EXTENT_ENTRY_crc32 = 1,
BCH_EXTENT_ENTRY_crc64 = 2,
+ BCH_EXTENT_ENTRY_crc128 = 3,
};
-#define BCH_EXTENT_ENTRY_MAX 3
+#define BCH_EXTENT_ENTRY_MAX 4
+/* Compressed/uncompressed size are stored biased by 1: */
struct bch_extent_crc32 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u32 type:1,
+ __u32 type:2,
+ _compressed_size:7,
+ _uncompressed_size:7,
offset:7,
- compressed_size:8,
- uncompressed_size:8,
+ _unused:1,
csum_type:4,
compression_type:4;
__u32 csum;
@@ -379,45 +426,80 @@ struct bch_extent_crc32 {
__u32 csum;
__u32 compression_type:4,
csum_type:4,
- uncompressed_size:8,
- compressed_size:8,
+ _unused:1,
offset:7,
- type:1;
+ _uncompressed_size:7,
+ _compressed_size:7,
+ type:2;
#endif
} __attribute__((packed, aligned(8)));
-#define CRC32_EXTENT_SIZE_MAX (1U << 7)
-
-/* 64k */
-#define BCH_COMPRESSED_EXTENT_MAX 128U
+#define CRC32_SIZE_MAX (1U << 7)
+#define CRC32_NONCE_MAX 0
struct bch_extent_crc64 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:3,
- offset:17,
- compressed_size:18,
- uncompressed_size:18,
+ _compressed_size:9,
+ _uncompressed_size:9,
+ offset:9,
+ nonce:10,
+ csum_type:4,
+ compression_type:4,
+ csum_hi:16;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 csum_hi:16,
+ compression_type:4,
+ csum_type:4,
+ nonce:10,
+ offset:9,
+ _uncompressed_size:9,
+ _compressed_size:9,
+ type:3;
+#endif
+ __u64 csum_lo;
+} __attribute__((packed, aligned(8)));
+
+#define CRC64_SIZE_MAX (1U << 9)
+#define CRC64_NONCE_MAX ((1U << 10) - 1)
+
+struct bch_extent_crc128 {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:4,
+ _compressed_size:13,
+ _uncompressed_size:13,
+ offset:13,
+ nonce:13,
csum_type:4,
compression_type:4;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 compression_type:4,
csum_type:4,
- uncompressed_size:18,
- compressed_size:18,
- offset:17,
+ nonce:14,
+ offset:13,
+ _uncompressed_size:13,
+ _compressed_size:13,
type:3;
#endif
- __u64 csum;
+ struct bch_csum csum;
} __attribute__((packed, aligned(8)));
-#define CRC64_EXTENT_SIZE_MAX (1U << 17)
+#define CRC128_SIZE_MAX (1U << 13)
+#define CRC128_NONCE_MAX ((1U << 13) - 1)
+
+/*
+ * Max size of an extent that may require bouncing to read or write
+ * (checksummed, compressed): 64k
+ */
+#define BCH_ENCODED_EXTENT_MAX 128U
/*
* @reservation - pointer hasn't been written to, just reserved
*/
struct bch_extent_ptr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u64 type:2,
+ __u64 type:1,
+ cached:1,
erasure_coded:1,
reservation:1,
offset:44, /* 8 petabytes */
@@ -429,10 +511,25 @@ struct bch_extent_ptr {
offset:44,
reservation:1,
erasure_coded:1,
- type:2;
+ cached:1,
+ type:1;
#endif
} __attribute__((packed, aligned(8)));
+struct bch_extent_reservation {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u64 type:5,
+ unused:23,
+ replicas:4,
+ generation:32;
+#elif defined (__BIG_ENDIAN_BITFIELD)
+ __u64 generation:32,
+ replicas:4,
+ unused:23,
+ type:5;
+#endif
+};
+
union bch_extent_entry {
#if defined(__LITTLE_ENDIAN) || __BITS_PER_LONG == 64
unsigned long type;
@@ -446,6 +543,7 @@ union bch_extent_entry {
#endif
struct bch_extent_crc32 crc32;
struct bch_extent_crc64 crc64;
+ struct bch_extent_crc128 crc128;
struct bch_extent_ptr ptr;
};
@@ -473,9 +571,18 @@ struct bch_extent {
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(extent, BCH_EXTENT);
+struct bch_reservation {
+ struct bch_val v;
+
+ __le32 generation;
+ __u8 nr_replicas;
+ __u8 pad[3];
+} __attribute__((packed, aligned(8)));
+BKEY_VAL_TYPE(reservation, BCH_RESERVATION);
+
/* Maximum size (in u64s) a single pointer could be: */
#define BKEY_EXTENT_PTR_U64s_MAX\
- ((sizeof(struct bch_extent_crc64) + \
+ ((sizeof(struct bch_extent_crc128) + \
sizeof(struct bch_extent_ptr)) / sizeof(u64))
/* Maximum possible size of an entire extent value: */
@@ -506,28 +613,26 @@ enum bch_inode_types {
struct bch_inode {
struct bch_val v;
- __le16 i_mode;
- __le16 pad;
- __le32 i_flags;
-
- /* Nanoseconds */
- __le64 i_atime;
- __le64 i_ctime;
- __le64 i_mtime;
-
- __le64 i_size;
- __le64 i_sectors;
-
- __le32 i_uid;
- __le32 i_gid;
- __le32 i_nlink;
-
- __le32 i_dev;
-
__le64 i_hash_seed;
+ __le32 i_flags;
+ __le16 i_mode;
+ __u8 fields[0];
} __attribute__((packed));
BKEY_VAL_TYPE(inode, BCH_INODE_FS);
+#define BCH_INODE_FIELDS() \
+ BCH_INODE_FIELD(i_atime, 64) \
+ BCH_INODE_FIELD(i_ctime, 64) \
+ BCH_INODE_FIELD(i_mtime, 64) \
+ BCH_INODE_FIELD(i_otime, 64) \
+ BCH_INODE_FIELD(i_size, 64) \
+ BCH_INODE_FIELD(i_sectors, 64) \
+ BCH_INODE_FIELD(i_uid, 32) \
+ BCH_INODE_FIELD(i_gid, 32) \
+ BCH_INODE_FIELD(i_nlink, 32) \
+ BCH_INODE_FIELD(i_generation, 32) \
+ BCH_INODE_FIELD(i_dev, 32)
+
enum {
/*
* User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
@@ -544,9 +649,9 @@ enum {
/* not implemented yet: */
__BCH_INODE_HAS_XATTRS = 7, /* has xattrs in xattr btree */
-};
-LE32_BITMASK(INODE_STR_HASH_TYPE, struct bch_inode, i_flags, 28, 32);
+ /* bits 20+ reserved for packed fields below: */
+};
#define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
#define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
@@ -557,6 +662,9 @@ LE32_BITMASK(INODE_STR_HASH_TYPE, struct bch_inode, i_flags, 28, 32);
#define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
#define BCH_INODE_HAS_XATTRS (1 << __BCH_INODE_HAS_XATTRS)
+LE32_BITMASK(INODE_STR_HASH, struct bch_inode, i_flags, 20, 24);
+LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, i_flags, 24, 32);
+
struct bch_inode_blockdev {
struct bch_val v;
@@ -574,6 +682,7 @@ BKEY_VAL_TYPE(inode_blockdev, BCH_INODE_BLOCKDEV);
/* Thin provisioned volume, or cache for another block device? */
LE64_BITMASK(CACHED_DEV, struct bch_inode_blockdev, i_flags, 0, 1)
+
/* Dirents */
/*
@@ -639,6 +748,7 @@ BKEY_VAL_TYPE(xattr, BCH_XATTR);
* Version 4: Backing device with data offset
* Version 5: All the incompat changes
* Version 6: Cache device UUIDs all in superblock, another incompat bset change
+ * Version 7: Encryption (expanded checksum fields), other random things
*/
#define BCACHE_SB_VERSION_CDEV_V0 0
#define BCACHE_SB_VERSION_BDEV 1
@@ -646,16 +756,15 @@ BKEY_VAL_TYPE(xattr, BCH_XATTR);
#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
#define BCACHE_SB_VERSION_CDEV_V2 5
#define BCACHE_SB_VERSION_CDEV_V3 6
-#define BCACHE_SB_VERSION_CDEV 6
-#define BCACHE_SB_MAX_VERSION 6
+#define BCACHE_SB_VERSION_CDEV_V4 7
+#define BCACHE_SB_VERSION_CDEV 7
+#define BCACHE_SB_MAX_VERSION 7
-#define SB_SECTOR 8
-#define SB_LABEL_SIZE 32
-#define MAX_CACHES_PER_SET 64
-
-#define BDEV_DATA_START_DEFAULT 16 /* sectors */
+#define BCH_SB_SECTOR 8
+#define BCH_SB_LABEL_SIZE 32
+#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
-struct cache_member {
+struct bch_member {
uuid_le uuid;
__le64 nbuckets; /* device size */
__le16 first_bucket; /* index of first bucket used */
@@ -663,164 +772,257 @@ struct cache_member {
__le32 pad;
__le64 last_mount; /* time_t */
- __le64 f1;
- __le64 f2;
+ __le64 flags[2];
};
-LE64_BITMASK(CACHE_STATE, struct cache_member, f1, 0, 4)
-#define CACHE_ACTIVE 0U
-#define CACHE_RO 1U
-#define CACHE_FAILED 2U
-#define CACHE_SPARE 3U
-#define CACHE_STATE_NR 4U
+LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags[0], 0, 4)
+LE64_BITMASK(BCH_MEMBER_TIER, struct bch_member, flags[0], 4, 8)
+LE64_BITMASK(BCH_MEMBER_HAS_METADATA, struct bch_member, flags[0], 8, 9)
+LE64_BITMASK(BCH_MEMBER_HAS_DATA, struct bch_member, flags[0], 9, 10)
+LE64_BITMASK(BCH_MEMBER_REPLACEMENT, struct bch_member, flags[0], 10, 14)
+LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15);
-LE64_BITMASK(CACHE_TIER, struct cache_member, f1, 4, 8)
-#define CACHE_TIERS 4U
+#if 0
+LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
+LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
+#endif
-LE64_BITMASK(CACHE_REPLICATION_SET, struct cache_member, f1, 8, 16)
+enum bch_member_state {
+ BCH_MEMBER_STATE_ACTIVE = 0,
+ BCH_MEMBER_STATE_RO = 1,
+ BCH_MEMBER_STATE_FAILED = 2,
+ BCH_MEMBER_STATE_SPARE = 3,
+ BCH_MEMBER_STATE_NR = 4,
+};
-LE64_BITMASK(CACHE_HAS_METADATA, struct cache_member, f1, 24, 25)
-LE64_BITMASK(CACHE_HAS_DATA, struct cache_member, f1, 25, 26)
+#define BCH_TIER_MAX 4U
-LE64_BITMASK(CACHE_REPLACEMENT, struct cache_member, f1, 26, 30)
-#define CACHE_REPLACEMENT_LRU 0U
-#define CACHE_REPLACEMENT_FIFO 1U
-#define CACHE_REPLACEMENT_RANDOM 2U
-#define CACHE_REPLACEMENT_NR 3U
+enum cache_replacement {
+ CACHE_REPLACEMENT_LRU = 0,
+ CACHE_REPLACEMENT_FIFO = 1,
+ CACHE_REPLACEMENT_RANDOM = 2,
+ CACHE_REPLACEMENT_NR = 3,
+};
-LE64_BITMASK(CACHE_DISCARD, struct cache_member, f1, 30, 31);
+struct bch_sb_layout {
+ uuid_le magic; /* bcache superblock UUID */
+ __u8 layout_type;
+ __u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
+ __u8 nr_superblocks;
+ __u8 pad[5];
+ __u64 sb_offset[61];
+} __attribute__((packed));
-LE64_BITMASK(CACHE_NR_READ_ERRORS, struct cache_member, f2, 0, 20);
-LE64_BITMASK(CACHE_NR_WRITE_ERRORS, struct cache_member, f2, 20, 40);
+#define BCH_SB_LAYOUT_SECTOR 7
-struct cache_sb {
- __le64 csum;
- __le64 offset; /* sector where this sb was written */
- __le64 version; /* of on disk format */
+struct bch_sb_field {
+ __u64 _data[0];
+ __le32 u64s;
+ __le32 type;
+};
- uuid_le magic; /* bcache superblock UUID */
+enum bch_sb_field_types {
+ BCH_SB_FIELD_journal = 0,
+ BCH_SB_FIELD_members = 1,
+ BCH_SB_FIELD_crypt = 2,
+ BCH_SB_FIELD_NR = 3,
+};
- /* Identifies this disk within the cache set: */
- uuid_le disk_uuid;
+struct bch_sb_field_journal {
+ struct bch_sb_field field;
+ __le64 buckets[0];
+};
- /*
- * Internal cache set UUID - xored with various magic numbers and thus
- * must never change:
- */
- union {
- uuid_le set_uuid;
- __le64 set_magic;
- };
+struct bch_sb_field_members {
+ struct bch_sb_field field;
+ struct bch_member members[0];
+};
+
+/* Crypto: */
- __u8 label[SB_LABEL_SIZE];
+struct nonce {
+ __le32 d[4];
+};
+
+struct bch_key {
+ __le64 key[4];
+};
+
+#define BCH_KEY_MAGIC \
+ (((u64) 'b' << 0)|((u64) 'c' << 8)| \
+ ((u64) 'h' << 16)|((u64) '*' << 24)| \
+ ((u64) '*' << 32)|((u64) 'k' << 40)| \
+ ((u64) 'e' << 48)|((u64) 'y' << 56))
+
+struct bch_encrypted_key {
+ __le64 magic;
+ struct bch_key key;
+};
+
+/*
+ * If this field is present in the superblock, it stores an encryption key which
+ * is used encrypt all other data/metadata. The key will normally be encrypted
+ * with the key userspace provides, but if encryption has been turned off we'll
+ * just store the master key unencrypted in the superblock so we can access the
+ * previously encrypted data.
+ */
+struct bch_sb_field_crypt {
+ struct bch_sb_field field;
__le64 flags;
+ __le64 kdf_flags;
+ struct bch_encrypted_key key;
+};
- /* Incremented each time superblock is written: */
- __le64 seq;
+LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
- /*
- * User visible UUID for identifying the cache set the user is allowed
- * to change:
- */
- uuid_le user_uuid;
+enum bch_kdf_types {
+ BCH_KDF_SCRYPT = 0,
+ BCH_KDF_NR = 1,
+};
- __le64 flags2;
- __le64 pad1[5];
+/* stored as base 2 log of scrypt params: */
+LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
+LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
+LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
- /* Number of cache_member entries: */
- __u8 nr_in_set;
+/*
+ * @offset - sector where this sb was written
+ * @version - on disk format version
+ * @magic - identifies as a bcache superblock (BCACHE_MAGIC)
+ * @seq - incremented each time superblock is written
+ * @uuid - used for generating various magic numbers and identifying
+ * member devices, never changes
+ * @user_uuid - user visible UUID, may be changed
+ * @label - filesystem label
+ * @seq - identifies most recent superblock, incremented each time
+ * superblock is written
+ * @features - enabled incompatible features
+ */
+struct bch_sb {
+ struct bch_csum csum;
+ __le64 version;
+ uuid_le magic;
+ uuid_le uuid;
+ uuid_le user_uuid;
+ __u8 label[BCH_SB_LABEL_SIZE];
+ __le64 offset;
+ __le64 seq;
- /*
- * Index of this device - for PTR_DEV(), and also this device's
- * slot in the cache_member array:
- */
- __u8 nr_this_dev;
- __le16 pad2[3];
+ __le16 block_size;
+ __u8 dev_idx;
+ __u8 nr_devices;
+ __le32 u64s;
- __le16 block_size; /* sectors */
- __le16 pad3[6];
+ __le64 time_base_lo;
+ __le32 time_base_hi;
+ __le32 time_precision;
+
+ __le64 flags[8];
+ __le64 features[2];
+ __le64 compat[2];
- __le16 u64s; /* size of variable length portion */
+ struct bch_sb_layout layout;
union {
- struct cache_member members[0];
- /*
- * Journal buckets also in the variable length portion, after
- * the member info:
- */
- __le64 _data[0];
+ struct bch_sb_field start[0];
+ __le64 _data[0];
};
-};
+} __attribute__((packed, aligned(8)));
-/* XXX: rename CACHE_SET -> BCH_FS or something? */
+/*
+ * Flags:
+ * BCH_SB_INITALIZED - set on first mount
+ * BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
+ * behaviour of mount/recovery path:
+ * BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
+ * BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
+ * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
+ * DATA/META_CSUM_TYPE. Also indicates encryption
+ * algorithm in use, if/when we get more than one
+ */
-LE64_BITMASK(CACHE_SET_SYNC, struct cache_sb, flags, 0, 1);
+LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
+LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
+LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
+LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
-LE64_BITMASK(CACHE_SET_ERROR_ACTION, struct cache_sb, flags, 1, 4);
-#define BCH_ON_ERROR_CONTINUE 0U
-#define BCH_ON_ERROR_RO 1U
-#define BCH_ON_ERROR_PANIC 2U
-#define BCH_NR_ERROR_ACTIONS 3U
+LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
-LE64_BITMASK(CACHE_SET_META_REPLICAS_WANT,struct cache_sb, flags, 4, 8);
-LE64_BITMASK(CACHE_SET_DATA_REPLICAS_WANT,struct cache_sb, flags, 8, 12);
+LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
+LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
-#define BCH_REPLICAS_MAX 4U
+LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
+LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
-LE64_BITMASK(CACHE_SB_CSUM_TYPE, struct cache_sb, flags, 12, 16);
+LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
+LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
-LE64_BITMASK(CACHE_SET_META_PREFERRED_CSUM_TYPE,struct cache_sb, flags, 16, 20);
-#define BCH_CSUM_NONE 0U
-#define BCH_CSUM_CRC32C 1U
-#define BCH_CSUM_CRC64 2U
-#define BCH_CSUM_NR 3U
+LE64_BITMASK(BCH_SB_META_REPLICAS_HAVE, struct bch_sb, flags[0], 56, 60);
+LE64_BITMASK(BCH_SB_DATA_REPLICAS_HAVE, struct bch_sb, flags[0], 60, 64);
-LE64_BITMASK(CACHE_SET_BTREE_NODE_SIZE, struct cache_sb, flags, 20, 36);
+LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
+LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8);
+LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
-LE64_BITMASK(CACHE_SET_META_REPLICAS_HAVE,struct cache_sb, flags, 36, 40);
-LE64_BITMASK(CACHE_SET_DATA_REPLICAS_HAVE,struct cache_sb, flags, 40, 44);
+LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
+LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
+LE64_BITMASK(BCH_SB_JOURNAL_ENTRY_SIZE, struct bch_sb, flags[1], 14, 20);
-LE64_BITMASK(CACHE_SET_STR_HASH_TYPE,struct cache_sb, flags, 44, 48);
-enum bch_str_hash_type {
- BCH_STR_HASH_CRC32C = 0,
- BCH_STR_HASH_CRC64 = 1,
- BCH_STR_HASH_SIPHASH = 2,
- BCH_STR_HASH_SHA1 = 3,
+/* Features: */
+enum bch_sb_features {
+ BCH_FEATURE_LZ4 = 0,
+ BCH_FEATURE_GZIP = 1,
};
-#define BCH_STR_HASH_NR 4
+/* options: */
-LE64_BITMASK(CACHE_SET_DATA_PREFERRED_CSUM_TYPE, struct cache_sb, flags, 48, 52);
+#define BCH_REPLICAS_MAX 4U
-LE64_BITMASK(CACHE_SET_COMPRESSION_TYPE, struct cache_sb, flags, 52, 56);
-enum {
- BCH_COMPRESSION_NONE = 0,
- BCH_COMPRESSION_LZ4 = 1,
- BCH_COMPRESSION_GZIP = 2,
+#if 0
+#define BCH_ERROR_ACTIONS() \
+ x(BCH_ON_ERROR_CONTINUE, 0, "continue") \
+ x(BCH_ON_ERROR_RO, 1, "remount-ro") \
+ x(BCH_ON_ERROR_PANIC, 2, "panic") \
+ x(BCH_NR_ERROR_ACTIONS, 3, NULL)
+
+enum bch_error_actions {
+#define x(_opt, _nr, _str) _opt = _nr,
+ BCH_ERROR_ACTIONS()
+#undef x
};
+#endif
-#define BCH_COMPRESSION_NR 3U
-
-/* Limit inode numbers to 32 bits: */
-LE64_BITMASK(CACHE_INODE_32BIT, struct cache_sb, flags, 56, 57);
-
-LE64_BITMASK(CACHE_SET_GC_RESERVE, struct cache_sb, flags, 57, 63);
-
-LE64_BITMASK(CACHE_SET_ROOT_RESERVE, struct cache_sb, flags2, 0, 6);
+enum bch_error_actions {
+ BCH_ON_ERROR_CONTINUE = 0,
+ BCH_ON_ERROR_RO = 1,
+ BCH_ON_ERROR_PANIC = 2,
+ BCH_NR_ERROR_ACTIONS = 3,
+};
-/*
- * Did we shut down cleanly? Just a hint, doesn't affect behaviour of
- * mount/recovery path:
- */
-LE64_BITMASK(CACHE_SET_CLEAN, struct cache_sb, flags2, 6, 7);
+enum bch_csum_opts {
+ BCH_CSUM_OPT_NONE = 0,
+ BCH_CSUM_OPT_CRC32C = 1,
+ BCH_CSUM_OPT_CRC64 = 2,
+ BCH_CSUM_OPT_NR = 3,
+};
-LE64_BITMASK(CACHE_SET_JOURNAL_ENTRY_SIZE, struct cache_sb, flags2, 7, 15);
+enum bch_str_hash_opts {
+ BCH_STR_HASH_CRC32C = 0,
+ BCH_STR_HASH_CRC64 = 1,
+ BCH_STR_HASH_SIPHASH = 2,
+ BCH_STR_HASH_NR = 3,
+};
-/* options: */
+enum bch_compression_opts {
+ BCH_COMPRESSION_NONE = 0,
+ BCH_COMPRESSION_LZ4 = 1,
+ BCH_COMPRESSION_GZIP = 2,
+ BCH_COMPRESSION_NR = 3,
+};
/**
- * CACHE_SET_OPT(name, choices, min, max, sb_option, sysfs_writeable)
+ * BCH_OPT(name, choices, min, max, sb_option, sysfs_writeable)
*
* @name - name of mount option, sysfs attribute, and struct cache_set_opts
* member
@@ -838,56 +1040,60 @@ LE64_BITMASK(CACHE_SET_JOURNAL_ENTRY_SIZE, struct cache_sb, flags2, 7, 15);
* @sysfs_writeable - if true, option will be modifiable at runtime via sysfs
*/
-#define CACHE_SET_SB_OPTS() \
- CACHE_SET_OPT(errors, \
- bch_error_actions, \
- 0, BCH_NR_ERROR_ACTIONS, \
- CACHE_SET_ERROR_ACTION, \
- true) \
- CACHE_SET_OPT(metadata_replicas, \
- bch_uint_opt, \
- 0, BCH_REPLICAS_MAX, \
- CACHE_SET_META_REPLICAS_WANT, \
- false) \
- CACHE_SET_OPT(data_replicas, \
- bch_uint_opt, \
- 0, BCH_REPLICAS_MAX, \
- CACHE_SET_DATA_REPLICAS_WANT, \
- false) \
- CACHE_SET_OPT(metadata_checksum, \
- bch_csum_types, \
- 0, BCH_CSUM_NR, \
- CACHE_SET_META_PREFERRED_CSUM_TYPE, \
- true) \
- CACHE_SET_OPT(data_checksum, \
- bch_csum_types, \
- 0, BCH_CSUM_NR, \
- CACHE_SET_DATA_PREFERRED_CSUM_TYPE, \
- true) \
- CACHE_SET_OPT(compression, \
- bch_compression_types, \
- 0, BCH_COMPRESSION_NR, \
- CACHE_SET_COMPRESSION_TYPE, \
- true) \
- CACHE_SET_OPT(str_hash, \
- bch_str_hash_types, \
- 0, BCH_STR_HASH_NR, \
- CACHE_SET_STR_HASH_TYPE, \
- true) \
- CACHE_SET_OPT(inodes_32bit, \
- bch_bool_opt, 0, 2, \
- CACHE_INODE_32BIT, \
- true) \
- CACHE_SET_OPT(gc_reserve_percent, \
- bch_uint_opt, \
- 5, 21, \
- CACHE_SET_GC_RESERVE, \
- false) \
- CACHE_SET_OPT(root_reserve_percent, \
- bch_uint_opt, \
- 0, 21, \
- CACHE_SET_ROOT_RESERVE, \
- false)
+#define BCH_SB_OPTS() \
+ BCH_OPT(errors, \
+ bch_error_actions, \
+ 0, BCH_NR_ERROR_ACTIONS, \
+ BCH_SB_ERROR_ACTION, \
+ true) \
+ BCH_OPT(metadata_replicas, \
+ bch_uint_opt, \
+ 0, BCH_REPLICAS_MAX, \
+ BCH_SB_META_REPLICAS_WANT, \
+ false) \
+ BCH_OPT(data_replicas, \
+ bch_uint_opt, \
+ 0, BCH_REPLICAS_MAX, \
+ BCH_SB_DATA_REPLICAS_WANT, \
+ false) \
+ BCH_OPT(metadata_checksum, \
+ bch_csum_types, \
+ 0, BCH_CSUM_OPT_NR, \
+ BCH_SB_META_CSUM_TYPE, \
+ true) \
+ BCH_OPT(data_checksum, \
+ bch_csum_types, \
+ 0, BCH_CSUM_OPT_NR, \
+ BCH_SB_DATA_CSUM_TYPE, \
+ true) \
+ BCH_OPT(compression, \
+ bch_compression_types, \
+ 0, BCH_COMPRESSION_NR, \
+ BCH_SB_COMPRESSION_TYPE, \
+ true) \
+ BCH_OPT(str_hash, \
+ bch_str_hash_types, \
+ 0, BCH_STR_HASH_NR, \
+ BCH_SB_STR_HASH_TYPE, \
+ true) \
+ BCH_OPT(inodes_32bit, \
+ bch_bool_opt, 0, 2, \
+ BCH_SB_INODE_32BIT, \
+ true) \
+ BCH_OPT(gc_reserve_percent, \
+ bch_uint_opt, \
+ 5, 21, \
+ BCH_SB_GC_RESERVE, \
+ false) \
+ BCH_OPT(root_reserve_percent, \
+ bch_uint_opt, \
+ 0, 100, \
+ BCH_SB_ROOT_RESERVE, \
+ false) \
+ BCH_OPT(wide_macs, \
+ bch_bool_opt, 0, 2, \
+ BCH_SB_128_BIT_MACS, \
+ true)
/* backing device specific stuff: */
@@ -908,7 +1114,7 @@ struct backingdev_sb {
uuid_le set_uuid;
__le64 set_magic;
};
- __u8 label[SB_LABEL_SIZE];
+ __u8 label[BCH_SB_LABEL_SIZE];
__le64 flags;
@@ -947,15 +1153,7 @@ LE64_BITMASK(BDEV_STATE, struct backingdev_sb, flags, 61, 63);
#define BDEV_STATE_DIRTY 2U
#define BDEV_STATE_STALE 3U
-static inline unsigned bch_journal_buckets_offset(struct cache_sb *sb)
-{
- return sb->nr_in_set * (sizeof(struct cache_member) / sizeof(__u64));
-}
-
-static inline unsigned bch_nr_journal_buckets(struct cache_sb *sb)
-{
- return __le16_to_cpu(sb->u64s) - bch_journal_buckets_offset(sb);
-}
+#define BDEV_DATA_START_DEFAULT 16 /* sectors */
static inline _Bool __SB_IS_BDEV(__u64 version)
{
@@ -963,7 +1161,7 @@ static inline _Bool __SB_IS_BDEV(__u64 version)
|| version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
}
-static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
+static inline _Bool SB_IS_BDEV(const struct bch_sb *sb)
{
return __SB_IS_BDEV(sb->version);
}
@@ -981,29 +1179,33 @@ static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
#define BCACHE_STATFS_MAGIC 0xca451a4e
-#define BCACHE_SB_MAGIC 0xca451a4ef67385c6ULL
-#define BCACHE_SB_MAGIC2 0x816dba487ff56582ULL
-#define JSET_MAGIC 0x245235c1a3625032ULL
-#define PSET_MAGIC 0x6750e15f87337f91ULL
-#define BSET_MAGIC 0x90135c78b99e07f5ULL
+#define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
+#define PSET_MAGIC __cpu_to_le64(0x6750e15f87337f91ULL)
+#define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
-static inline __u64 jset_magic(struct cache_sb *sb)
+static inline __le64 __bch_sb_magic(struct bch_sb *sb)
{
- return __le64_to_cpu(sb->set_magic) ^ JSET_MAGIC;
+ __le64 ret;
+ memcpy(&ret, &sb->uuid, sizeof(ret));
+ return ret;
}
-static inline __u64 pset_magic(struct cache_sb *sb)
+static inline __u64 __jset_magic(struct bch_sb *sb)
{
- return __le64_to_cpu(sb->set_magic) ^ PSET_MAGIC;
+ return __le64_to_cpu(__bch_sb_magic(sb) ^ JSET_MAGIC);
}
-static inline __u64 bset_magic(struct cache_sb *sb)
+static inline __u64 __pset_magic(struct bch_sb *sb)
{
- return __le64_to_cpu(sb->set_magic) ^ BSET_MAGIC;
+ return __le64_to_cpu(__bch_sb_magic(sb) ^ PSET_MAGIC);
}
-/* Journal */
+static inline __u64 __bset_magic(struct bch_sb *sb)
+{
+ return __le64_to_cpu(__bch_sb_magic(sb) ^ BSET_MAGIC);
+}
+/* Journal */
#define BCACHE_JSET_VERSION_UUIDv1 1
#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
@@ -1054,24 +1256,29 @@ enum {
* version is for on disk format changes.
*/
struct jset {
- __le64 csum;
+ struct bch_csum csum;
+
__le64 magic;
+ __le64 seq;
__le32 version;
__le32 flags;
- /* Sequence number of oldest dirty journal entry */
- __le64 seq;
- __le64 last_seq;
+ __le32 u64s; /* size of d[] in u64s */
+
+ __u8 encrypted_start[0];
__le16 read_clock;
__le16 write_clock;
- __le32 u64s; /* size of d[] in u64s */
+
+ /* Sequence number of oldest dirty journal entry */
+ __le64 last_seq;
+
union {
struct jset_entry start[0];
__u64 _data[0];
};
-};
+} __attribute__((packed));
LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
@@ -1081,10 +1288,14 @@ LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
/* Bucket prios/gens */
struct prio_set {
- __le64 csum;
+ struct bch_csum csum;
+
__le64 magic;
- __le32 version;
- __le32 flags;
+ __le32 nonce[3];
+ __le16 version;
+ __le16 flags;
+
+ __u8 encrypted_start[0];
__le64 next_bucket;
@@ -1093,7 +1304,7 @@ struct prio_set {
__le16 write_prio;
__u8 gen;
} __attribute__((packed)) data[];
-};
+} __attribute__((packed));
LE32_BITMASK(PSET_CSUM_TYPE, struct prio_set, flags, 0, 4);
@@ -1155,28 +1366,49 @@ struct bset {
LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
-/* Only used in first bset */
-LE32_BITMASK(BSET_BTREE_LEVEL, struct bset, flags, 4, 8);
-
-LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 8, 9);
+LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
- struct bset, flags, 9, 10);
+ struct bset, flags, 5, 6);
struct btree_node {
- __le64 csum;
+ struct bch_csum csum;
__le64 magic;
+ /* this flags field is encrypted, unlike bset->flags: */
+ __le64 flags;
+
/* Closed interval: */
struct bpos min_key;
struct bpos max_key;
+ struct bch_extent_ptr ptr;
struct bkey_format format;
+ union {
struct bset keys;
+ struct {
+ __u8 pad[22];
+ __le16 u64s;
+ __u64 _data[0];
+
+ };
+ };
} __attribute__((packed));
+LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
+LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
+
struct btree_node_entry {
- __le64 csum;
+ struct bch_csum csum;
+
+ union {
struct bset keys;
+ struct {
+ __u8 pad[22];
+ __le16 u64s;
+ __u64 _data[0];
+
+ };
+ };
} __attribute__((packed));
/* OBSOLETE */
@@ -1237,7 +1469,7 @@ struct jset_v0 {
__u16 btree_level;
__u16 pad[3];
- __u64 prio_bucket[MAX_CACHES_PER_SET];
+ __u64 prio_bucket[64];
union {
struct bkey start[0];
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index cb9ad24f..0dbeaaed 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -24,299 +24,81 @@
#include <linux/slab.h>
#include <linux/string.h>
-/*
- * Autoloaded crypto modules should only use a prefixed name to avoid allowing
- * arbitrary modules to be loaded. Loading from userspace may still need the
- * unprefixed names, so retains those aliases as well.
- * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
- * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
- * expands twice on the same line. Instead, use a separate base name for the
- * alias.
- */
-#define MODULE_ALIAS_CRYPTO(name) \
- __MODULE_INFO(alias, alias_userspace, name); \
- __MODULE_INFO(alias, alias_crypto, "crypto-" name)
-
-/*
- * Algorithm masks and types.
- */
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
-#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
-#define CRYPTO_ALG_TYPE_AEAD 0x00000003
#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
-#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
-#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
-#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
-#define CRYPTO_ALG_TYPE_KPP 0x00000008
-#define CRYPTO_ALG_TYPE_RNG 0x0000000c
-#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
-#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
-#define CRYPTO_ALG_TYPE_HASH 0x0000000e
#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
-#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
-
-#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
-#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
-
#define CRYPTO_ALG_ASYNC 0x00000080
-/*
- * Set this bit if and only if the algorithm requires another algorithm of
- * the same type to handle corner cases.
- */
-#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
-
-/*
- * This bit is set for symmetric key ciphers that have already been wrapped
- * with a generic IV generator to prevent them from being wrapped again.
- */
-#define CRYPTO_ALG_GENIV 0x00000200
-
-/*
- * Set if the algorithm is an instance that is build from templates.
- */
-#define CRYPTO_ALG_INSTANCE 0x00000800
-
-/* Set this bit if the algorithm provided is hardware accelerated but
- * not available to userspace via instruction set or so.
- */
-#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
-
-/*
- * Mark a cipher as a service implementation only usable by another
- * cipher and never by a normal user of the kernel crypto API
- */
-#define CRYPTO_ALG_INTERNAL 0x00002000
-
-/*
- * Transform masks and values (for crt_flags).
- */
-#define CRYPTO_TFM_REQ_MASK 0x000fff00
-#define CRYPTO_TFM_RES_MASK 0xfff00000
-
-#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
-#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
-#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
-#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
-#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
-#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
-#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
-#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
-
-/*
- * Miscellaneous stuff.
- */
#define CRYPTO_MAX_ALG_NAME 64
-/*
- * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
- * declaration) is used to ensure that the crypto_tfm context structure is
- * aligned correctly for the given architecture so that there are no alignment
- * faults for C data types. In particular, this is required on platforms such
- * as arm where pointers are 32-bit aligned but there are data types such as
- * u64 which require 64-bit alignment.
- */
#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
-
#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
struct scatterlist;
struct crypto_blkcipher;
struct crypto_tfm;
struct crypto_type;
-struct skcipher_givcrypt_request;
struct blkcipher_desc {
- struct crypto_blkcipher *tfm;
- void *info;
- u32 flags;
-};
-
-struct cipher_desc {
- struct crypto_tfm *tfm;
- void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
- const u8 *src, unsigned int nbytes);
- void *info;
+ struct crypto_blkcipher *tfm;
+ void *info;
+ u32 flags;
};
struct blkcipher_alg {
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
+ unsigned keylen);
int (*encrypt)(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes);
+ unsigned nbytes);
int (*decrypt)(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes);
-
- const char *geniv;
-
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
-};
-
-struct cipher_alg {
- unsigned int cia_min_keysize;
- unsigned int cia_max_keysize;
- int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
- void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
-};
-
-struct compress_alg {
- int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen);
- int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
- unsigned int slen, u8 *dst, unsigned int *dlen);
+ unsigned nbytes);
};
-
#define cra_blkcipher cra_u.blkcipher
-#define cra_cipher cra_u.cipher
-#define cra_compress cra_u.compress
struct crypto_alg {
- struct list_head cra_list;
- struct list_head cra_users;
-
- u32 cra_flags;
- unsigned int cra_blocksize;
- unsigned int cra_ctxsize;
- unsigned int cra_alignmask;
-
- int cra_priority;
- atomic_t cra_refcnt;
+ struct list_head cra_list;
+ struct list_head cra_users;
- char cra_name[CRYPTO_MAX_ALG_NAME];
- char cra_driver_name[CRYPTO_MAX_ALG_NAME];
+ u32 cra_flags;
+ unsigned cra_ctxsize;
+ char cra_name[CRYPTO_MAX_ALG_NAME];
const struct crypto_type *cra_type;
union {
struct blkcipher_alg blkcipher;
- struct cipher_alg cipher;
- struct compress_alg compress;
} cra_u;
int (*cra_init)(struct crypto_tfm *tfm);
void (*cra_exit)(struct crypto_tfm *tfm);
- void (*cra_destroy)(struct crypto_alg *alg);
-
- struct module *cra_module;
} CRYPTO_MINALIGN_ATTR;
-/*
- * Algorithm registration interface.
- */
int crypto_register_alg(struct crypto_alg *alg);
-int crypto_unregister_alg(struct crypto_alg *alg);
-int crypto_register_algs(struct crypto_alg *algs, int count);
-int crypto_unregister_algs(struct crypto_alg *algs, int count);
-
-/*
- * Algorithm query interface.
- */
-int crypto_has_alg(const char *name, u32 type, u32 mask);
-
-/*
- * Transforms: user-instantiated objects which encapsulate algorithms
- * and core processing logic. Managed via crypto_alloc_*() and
- * crypto_free_*(), as well as the various helpers below.
- */
struct blkcipher_tfm {
- void *iv;
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
+ unsigned keylen);
int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
+ struct scatterlist *src, unsigned nbytes);
int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
+ struct scatterlist *src, unsigned nbytes);
};
-struct cipher_tfm {
- int (*cit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
- void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
-};
-
-struct compress_tfm {
- int (*cot_compress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*cot_decompress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-};
-
-#define crt_blkcipher crt_u.blkcipher
-#define crt_cipher crt_u.cipher
-#define crt_compress crt_u.compress
-
struct crypto_tfm {
+ u32 crt_flags;
- u32 crt_flags;
-
- union {
- struct blkcipher_tfm blkcipher;
- struct cipher_tfm cipher;
- struct compress_tfm compress;
- } crt_u;
+ struct blkcipher_tfm crt_blkcipher;
void (*exit)(struct crypto_tfm *tfm);
- struct crypto_alg *__crt_alg;
-
- void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
-struct crypto_blkcipher {
- struct crypto_tfm base;
-};
-
-struct crypto_cipher {
- struct crypto_tfm base;
-};
-
-struct crypto_comp {
- struct crypto_tfm base;
+ struct crypto_alg *__crt_alg;
+ void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
-enum {
- CRYPTOA_UNSPEC,
- CRYPTOA_ALG,
- CRYPTOA_TYPE,
- CRYPTOA_U32,
- __CRYPTOA_MAX,
-};
-
-#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
-
-/* Maximum number of (rtattr) parameters for each template. */
-#define CRYPTO_MAX_ATTRS 32
-
-struct crypto_attr_alg {
- char name[CRYPTO_MAX_ALG_NAME];
-};
-
-struct crypto_attr_type {
- u32 type;
- u32 mask;
-};
-
-struct crypto_attr_u32 {
- u32 num;
-};
-
-/*
- * Transform user interface.
- */
-
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
@@ -325,110 +107,19 @@ static inline void crypto_free_tfm(struct crypto_tfm *tfm)
return crypto_destroy_tfm(tfm, tfm);
}
-int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
-
-/*
- * Transform helpers which query the underlying algorithm.
- */
-static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_name;
-}
-
-static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_driver_name;
-}
-
-static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_priority;
-}
-
static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
}
-static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_blocksize;
-}
-
-static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
-{
- return tfm->__crt_alg->cra_alignmask;
-}
-
-static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
-{
- return tfm->crt_flags;
-}
-
-static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
-{
- tfm->crt_flags |= flags;
-}
-
-static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
-{
- tfm->crt_flags &= ~flags;
-}
-
static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
{
return tfm->__crt_ctx;
}
-static inline unsigned int crypto_tfm_ctx_alignment(void)
-{
- struct crypto_tfm *tfm;
- return __alignof__(tfm->__crt_ctx);
-}
-
-static inline u32 crypto_skcipher_type(u32 type)
-{
- type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- return type;
-}
-
-static inline u32 crypto_skcipher_mask(u32 mask)
-{
- mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
- return mask;
-}
-
-/**
- * DOC: Synchronous Block Cipher API
- *
- * The synchronous block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
- *
- * Synchronous calls, have a context in the tfm. But since a single tfm can be
- * used in multiple calls and in parallel, this info should not be changeable
- * (unless a lock is used). This applies, for example, to the symmetric key.
- * However, the IV is changeable, so there is an iv field in blkcipher_tfm
- * structure for synchronous blkcipher api. So, its the only state info that can
- * be kept for synchronous calls without using a big lock across a tfm.
- *
- * The block cipher API allows the use of a complete cipher, i.e. a cipher
- * consisting of a template (a block chaining mode) and a single block cipher
- * primitive (e.g. AES).
- *
- * The plaintext data buffer and the ciphertext data buffer are pointed to
- * by using scatter/gather lists. The cipher operation is performed
- * on all segments of the provided scatter/gather lists.
- *
- * The kernel crypto API supports a cipher operation "in-place" which means that
- * the caller may provide the same scatter/gather list for the plaintext and
- * cipher text. After the completion of the cipher operation, the plaintext
- * data is replaced with the ciphertext data in case of an encryption and vice
- * versa for a decryption. The caller must ensure that the scatter/gather lists
- * for the output data point to sufficiently large buffers, i.e. multiples of
- * the block size of the cipher.
- */
+struct crypto_blkcipher {
+ struct crypto_tfm base;
+};
static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
struct crypto_tfm *tfm)
@@ -443,20 +134,6 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast(
return __crypto_blkcipher_cast(tfm);
}
-/**
- * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * blkcipher cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a block cipher. The returned struct
- * crypto_blkcipher is the cipher handle that is required for any subsequent
- * API invocation for that block cipher.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
const char *alg_name, u32 type, u32 mask)
{
@@ -467,455 +144,30 @@ static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
}
-static inline struct crypto_tfm *crypto_blkcipher_tfm(
- struct crypto_blkcipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_blkcipher() - zeroize and free the block cipher handle
- * @tfm: cipher handle to be freed
- */
static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
{
- crypto_free_tfm(crypto_blkcipher_tfm(tfm));
-}
-
-/**
- * crypto_has_blkcipher() - Search for the availability of a block cipher
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the block cipher is known to the kernel crypto API; false
- * otherwise
- */
-static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-/**
- * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
- * @tfm: cipher handle
- *
- * Return: The character string holding the name of the cipher
- */
-static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
+ crypto_free_tfm(&tfm->base);
}
static inline struct blkcipher_tfm *crypto_blkcipher_crt(
struct crypto_blkcipher *tfm)
{
- return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
+ return &tfm->base.crt_blkcipher;
}
-static inline struct blkcipher_alg *crypto_blkcipher_alg(
- struct crypto_blkcipher *tfm)
-{
- return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
-}
-
-/**
- * crypto_blkcipher_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the block cipher referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
-{
- return crypto_blkcipher_alg(tfm)->ivsize;
-}
-
-/**
- * crypto_blkcipher_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the block cipher referenced with the cipher handle is
- * returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation.
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_blkcipher_blocksize(
- struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_blkcipher_alignmask(
- struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
-}
-
-static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
-}
-
-static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
-}
-
-static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_blkcipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the block cipher referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
- const u8 *key, unsigned int keylen)
+ const u8 *key, unsigned keylen)
{
- return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
- key, keylen);
+ return crypto_blkcipher_crt(tfm)->setkey(&tfm->base, key, keylen);
}
-/**
- * crypto_blkcipher_encrypt() - encrypt plaintext
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * ciphertext
- * @src: scatter/gather list that holds the plaintext
- * @nbytes: number of bytes of the plaintext to encrypt.
- *
- * Encrypt plaintext data using the IV set by the caller with a preceding
- * call of crypto_blkcipher_set_iv.
- *
- * The blkcipher_desc data structure must be filled by the caller and can
- * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
- * with the block cipher handle; desc.flags is filled with either
- * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
- return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * ciphertext
- * @src: scatter/gather list that holds the plaintext
- * @nbytes: number of bytes of the plaintext to encrypt.
- *
- * Encrypt plaintext data with the use of an IV that is solely used for this
- * cipher operation. Any previously set IV is not used.
- *
- * The blkcipher_desc data structure must be filled by the caller and can
- * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
- * with the block cipher handle; desc.info is filled with the IV to be used for
- * the current operation; desc.flags is filled with either
- * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
- unsigned int nbytes)
+ unsigned nbytes)
{
return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
}
-/**
- * crypto_blkcipher_decrypt() - decrypt ciphertext
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * plaintext
- * @src: scatter/gather list that holds the ciphertext
- * @nbytes: number of bytes of the ciphertext to decrypt.
- *
- * Decrypt ciphertext data using the IV set by the caller with a preceding
- * call of crypto_blkcipher_set_iv.
- *
- * The blkcipher_desc data structure must be filled by the caller as documented
- * for the crypto_blkcipher_encrypt call above.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- *
- */
-static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
- return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * plaintext
- * @src: scatter/gather list that holds the ciphertext
- * @nbytes: number of bytes of the ciphertext to decrypt.
- *
- * Decrypt ciphertext data with the use of an IV that is solely used for this
- * cipher operation. Any previously set IV is not used.
- *
- * The blkcipher_desc data structure must be filled by the caller as documented
- * for the crypto_blkcipher_encrypt_iv call above.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_set_iv() - set IV for cipher
- * @tfm: cipher handle
- * @src: buffer holding the IV
- * @len: length of the IV in bytes
- *
- * The caller provided IV is set for the block cipher referenced by the cipher
- * handle.
- */
-static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
- const u8 *src, unsigned int len)
-{
- memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
-}
-
-/**
- * crypto_blkcipher_get_iv() - obtain IV from cipher
- * @tfm: cipher handle
- * @dst: buffer filled with the IV
- * @len: length of the buffer dst
- *
- * The caller can obtain the IV set for the block cipher referenced by the
- * cipher handle and store it into the user-provided buffer. If the buffer
- * has an insufficient space, the IV is truncated to fit the buffer.
- */
-static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
- u8 *dst, unsigned int len)
-{
- memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
-}
-
-/**
- * DOC: Single Block Cipher API
- *
- * The single block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
- *
- * Using the single block cipher API calls, operations with the basic cipher
- * primitive can be implemented. These cipher primitives exclude any block
- * chaining operations including IV handling.
- *
- * The purpose of this single block cipher API is to support the implementation
- * of templates or other concepts that only need to perform the cipher operation
- * on one block at a time. Templates invoke the underlying cipher primitive
- * block-wise and process either the input or the output data of these cipher
- * operations.
- */
-
-static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
-{
- return (struct crypto_cipher *)tfm;
-}
-
-static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return __crypto_cipher_cast(tfm);
-}
-
-/**
- * crypto_alloc_cipher() - allocate single block cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * single block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a single block cipher. The returned struct
- * crypto_cipher is the cipher handle that is required for any subsequent API
- * invocation for that single block cipher.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
- u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_cipher() - zeroize and free the single block cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_cipher(struct crypto_cipher *tfm)
-{
- crypto_free_tfm(crypto_cipher_tfm(tfm));
-}
-
-/**
- * crypto_has_cipher() - Search for the availability of a single block cipher
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * single block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the single block cipher is known to the kernel crypto API;
- * false otherwise
- */
-static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_CIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
-{
- return &crypto_cipher_tfm(tfm)->crt_cipher;
-}
-
-/**
- * crypto_cipher_blocksize() - obtain block size for cipher
- * @tfm: cipher handle
- *
- * The block size for the single block cipher referenced with the cipher handle
- * tfm is returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
-}
-
-static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
-}
-
-static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
-}
-
-static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_cipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the single block cipher referenced by the
- * cipher handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
- key, keylen);
-}
-
-/**
- * crypto_cipher_encrypt_one() - encrypt one block of plaintext
- * @tfm: cipher handle
- * @dst: points to the buffer that will be filled with the ciphertext
- * @src: buffer holding the plaintext to be encrypted
- *
- * Invoke the encryption operation of one block. The caller must ensure that
- * the plaintext and ciphertext buffers are at least one block in size.
- */
-static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src)
-{
- crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
- dst, src);
-}
-
-/**
- * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
- * @tfm: cipher handle
- * @dst: points to the buffer that will be filled with the plaintext
- * @src: buffer holding the ciphertext to be decrypted
- *
- * Invoke the decryption operation of one block. The caller must ensure that
- * the plaintext and ciphertext buffers are at least one block in size.
- */
-static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src)
-{
- crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
- dst, src);
-}
-
#endif /* _LINUX_CRYPTO_H */
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
deleted file mode 100644
index 8dfcb83b..00000000
--- a/include/linux/cryptohash.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __CRYPTOHASH_H
-#define __CRYPTOHASH_H
-
-#include <linux/types.h>
-
-#define SHA_DIGEST_WORDS 5
-#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8)
-#define SHA_WORKSPACE_WORDS 16
-
-void sha_init(__u32 *buf);
-void sha_transform(__u32 *digest, const char *data, __u32 *W);
-
-#define MD5_DIGEST_WORDS 4
-#define MD5_MESSAGE_BYTES 64
-
-void md5_transform(__u32 *hash, __u32 const *in);
-
-__u32 half_md4_transform(__u32 buf[4], __u32 const in[8]);
-
-#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 2233350b..ac72858b 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -207,6 +207,4 @@ int __must_check kstrtoint(const char *s, unsigned int base, int *res);
BUILD_BUG_ON_ZERO((perms) & 2) + \
(perms))
-#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
-
#endif
diff --git a/include/linux/key.h b/include/linux/key.h
new file mode 100644
index 00000000..adc12a9e
--- /dev/null
+++ b/include/linux/key.h
@@ -0,0 +1,50 @@
+#ifndef _LINUX_KEY_H
+#define _LINUX_KEY_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+#include <linux/sysctl.h>
+#include <linux/rwsem.h>
+#include <linux/atomic.h>
+
+#include <keyutils.h>
+
+struct key;
+
+struct user_key_payload {
+ size_t datalen; /* length of this data */
+ char data[0]; /* actual data */
+};
+
+struct key {
+ atomic_t usage; /* number of references */
+ key_serial_t serial; /* key serial number */
+ struct rw_semaphore sem; /* change vs change sem */
+ struct user_key_payload payload;
+};
+
+static inline const struct user_key_payload *user_key_payload(const struct key *key)
+{
+ return &key->payload;
+}
+
+static inline void key_put(struct key *key)
+{
+ if (atomic_dec_and_test(&key->usage))
+ free(key);
+}
+
+static inline struct key *__key_get(struct key *key)
+{
+ atomic_inc(&key->usage);
+ return key;
+}
+
+static inline struct key *key_get(struct key *key)
+{
+ return key ? __key_get(key) : key;
+}
+
+#endif /* _LINUX_KEY_H */
diff --git a/include/linux/mempool.h b/include/linux/mempool.h
index c2789f93..ddf6f941 100644
--- a/include/linux/mempool.h
+++ b/include/linux/mempool.h
@@ -14,6 +14,11 @@ typedef struct mempool_s {
size_t elem_size;
} mempool_t;
+static inline bool mempool_initialized(mempool_t *pool)
+{
+ return true;
+}
+
extern int mempool_resize(mempool_t *pool, int new_min_nr);
static inline void mempool_free(void *element, mempool_t *pool)
diff --git a/include/linux/page.h b/include/linux/page.h
index c99d9de3..8d6413ce 100644
--- a/include/linux/page.h
+++ b/include/linux/page.h
@@ -5,8 +5,11 @@
struct page;
-#define virt_to_page(kaddr) ((struct page *) (kaddr))
-#define page_address(kaddr) ((void *) (kaddr))
+#define virt_to_page(p) \
+ ((struct page *) (((unsigned long) (p)) & PAGE_MASK))
+#define offset_in_page(p) ((unsigned long) (p) & ~PAGE_MASK)
+
+#define page_address(p) ((void *) (p))
#define kmap_atomic(page) page_address(page)
#define kunmap_atomic(addr) do {} while (0)
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
new file mode 100644
index 00000000..04bf59df
--- /dev/null
+++ b/include/linux/scatterlist.h
@@ -0,0 +1,111 @@
+#ifndef _LINUX_SCATTERLIST_H
+#define _LINUX_SCATTERLIST_H
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/mm.h>
+
+struct scatterlist {
+ unsigned long page_link;
+ unsigned int offset;
+ unsigned int length;
+};
+
+#define sg_is_chain(sg) ((sg)->page_link & 0x01)
+#define sg_is_last(sg) ((sg)->page_link & 0x02)
+#define sg_chain_ptr(sg) \
+ ((struct scatterlist *) ((sg)->page_link & ~0x03))
+
+static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
+{
+ unsigned long page_link = sg->page_link & 0x3;
+
+ /*
+ * In order for the low bit stealing approach to work, pages
+ * must be aligned at a 32-bit boundary as a minimum.
+ */
+ BUG_ON((unsigned long) page & 0x03);
+ sg->page_link = page_link | (unsigned long) page;
+}
+
+static inline void sg_set_page(struct scatterlist *sg, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ sg_assign_page(sg, page);
+ sg->offset = offset;
+ sg->length = len;
+}
+
+static inline struct page *sg_page(struct scatterlist *sg)
+{
+ return (struct page *)((sg)->page_link & ~0x3);
+}
+
+static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
+ unsigned int buflen)
+{
+ sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
+}
+
+static inline struct scatterlist *sg_next(struct scatterlist *sg)
+{
+ if (sg_is_last(sg))
+ return NULL;
+
+ sg++;
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+
+ return sg;
+}
+
+#define for_each_sg(sglist, sg, nr, __i) \
+ for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
+
+static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
+ struct scatterlist *sgl)
+{
+ /*
+ * offset and length are unused for chain entry. Clear them.
+ */
+ prv[prv_nents - 1].offset = 0;
+ prv[prv_nents - 1].length = 0;
+
+ /*
+ * Set lowest bit to indicate a link pointer, and make sure to clear
+ * the termination bit if it happens to be set.
+ */
+ prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
+}
+
+static inline void sg_mark_end(struct scatterlist *sg)
+{
+ sg->page_link |= 0x02;
+ sg->page_link &= ~0x01;
+}
+
+static inline void sg_unmark_end(struct scatterlist *sg)
+{
+ sg->page_link &= ~0x02;
+}
+
+static inline void *sg_virt(struct scatterlist *sg)
+{
+ return page_address(sg_page(sg)) + sg->offset;
+}
+
+static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
+{
+ memset(sgl, 0, sizeof(*sgl) * nents);
+ sg_mark_end(&sgl[nents - 1]);
+}
+
+static inline void sg_init_one(struct scatterlist *sg, const void *buf,
+ unsigned int buflen)
+{
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, buf, buflen);
+}
+
+#endif /* _LINUX_SCATTERLIST_H */
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 2e1ad82e..2d9f8291 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -38,6 +38,19 @@ struct itimerspec64 {
#define KTIME_MAX ((s64)~((u64)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+static inline struct timespec ns_to_timespec(const u64 nsec)
+{
+ return (struct timespec) {
+ .tv_sec = nsec / NSEC_PER_SEC,
+ .tv_nsec = nsec % NSEC_PER_SEC,
+ };
+}
+
+static inline s64 timespec_to_ns(const struct timespec *ts)
+{
+ return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
+}
+
#if __BITS_PER_LONG == 64
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
@@ -61,11 +74,6 @@ static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
# define ns_to_timespec64 ns_to_timespec
# define timespec64_add_ns timespec_add_ns
-static inline s64 timespec_to_ns(const struct timespec *ts)
-{
- return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
-}
-
#else
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index d4968c54..01e4b79d 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -185,7 +185,7 @@ TRACE_EVENT(bcache_write,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->inode = inode;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
@@ -215,7 +215,7 @@ TRACE_EVENT(bcache_write_throttle,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->inode = inode;
__entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9;
@@ -245,7 +245,7 @@ DECLARE_EVENT_CLASS(page_alloc_fail,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->size = size;
),
@@ -263,7 +263,7 @@ DECLARE_EVENT_CLASS(cache_set,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
),
TP_printk("%pU", __entry->uuid)
@@ -285,7 +285,7 @@ TRACE_EVENT(bcache_journal_next_bucket,
),
TP_fast_assign(
- memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
+ memcpy(__entry->uuid, ca->uuid.b, 16);
__entry->cur_idx = cur_idx;
__entry->last_idx = last_idx;
),
@@ -304,7 +304,7 @@ TRACE_EVENT(bcache_journal_write_oldest,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->seq = seq;
),
@@ -322,7 +322,7 @@ TRACE_EVENT(bcache_journal_write_oldest_done,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->seq = seq;
__entry->written = written;
),
@@ -368,7 +368,7 @@ DECLARE_EVENT_CLASS(cache,
),
TP_fast_assign(
- memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
+ memcpy(__entry->uuid, ca->uuid.b, 16);
__entry->tier = ca->mi.tier;
),
@@ -418,7 +418,7 @@ DECLARE_EVENT_CLASS(btree_node,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
__entry->level = b->level;
__entry->id = b->btree_id;
@@ -471,7 +471,7 @@ TRACE_EVENT(bcache_btree_node_alloc_fail,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->id = id;
),
@@ -514,7 +514,7 @@ TRACE_EVENT(bcache_mca_scan,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->touched = touched;
__entry->freed = freed;
__entry->can_free = can_free;
@@ -535,7 +535,7 @@ DECLARE_EVENT_CLASS(mca_cannibalize_lock,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
),
TP_printk("%pU", __entry->uuid)
@@ -675,7 +675,7 @@ TRACE_EVENT(bcache_btree_gc_coalesce_fail,
TP_fast_assign(
__entry->reason = reason;
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->disk_sb->user_uuid.b, 16);
),
TP_printk("%pU: %u", __entry->uuid, __entry->reason)
@@ -696,7 +696,7 @@ TRACE_EVENT(bcache_btree_node_alloc_replacement,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->old_bucket = PTR_BUCKET_NR_TRACE(c,
&old->key, 0);
__entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
@@ -778,7 +778,7 @@ TRACE_EVENT(bcache_mark_bucket,
),
TP_fast_assign(
- memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
+ memcpy(__entry->uuid, ca->uuid.b, 16);
__entry->inode = k->p.inode;
__entry->offset = k->p.offset;
__entry->sectors = sectors;
@@ -804,7 +804,7 @@ TRACE_EVENT(bcache_alloc_batch,
),
TP_fast_assign(
- memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
+ memcpy(__entry->uuid, ca->uuid.b, 16);
__entry->free = free;
__entry->total = total;
),
@@ -824,7 +824,7 @@ TRACE_EVENT(bcache_btree_reserve_get_fail,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->required = required;
__entry->cl = cl;
),
@@ -879,7 +879,7 @@ DECLARE_EVENT_CLASS(cache_bucket_alloc,
),
TP_fast_assign(
- memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
+ memcpy(__entry->uuid, ca->uuid.b, 16);
__entry->reserve = reserve;
),
@@ -908,7 +908,7 @@ DECLARE_EVENT_CLASS(cache_set_bucket_alloc,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->reserve = reserve;
__entry->cl = cl;
),
@@ -933,7 +933,7 @@ DECLARE_EVENT_CLASS(open_bucket_alloc,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->cl = cl;
),
@@ -1054,7 +1054,7 @@ TRACE_EVENT(bcache_moving_gc_end,
),
TP_fast_assign(
- memcpy(__entry->uuid, ca->disk_sb.sb->disk_uuid.b, 16);
+ memcpy(__entry->uuid, ca->uuid.b, 16);
__entry->sectors_moved = sectors_moved;
__entry->keys_moved = keys_moved;
__entry->buckets_moved = buckets_moved;
@@ -1114,7 +1114,7 @@ TRACE_EVENT(bcache_tiering_end,
),
TP_fast_assign(
- memcpy(__entry->uuid, c->disk_sb.user_uuid.b, 16);
+ memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
__entry->sectors_moved = sectors_moved;
__entry->keys_moved = keys_moved;
),