summaryrefslogtreecommitdiff
path: root/include/linux/crypto.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2017-01-08 00:13:18 -0900
committerKent Overstreet <kent.overstreet@gmail.com>2017-01-20 09:07:08 -0900
commitb33fc8298f7e13226b9895abc57c9bfce5e3fa2d (patch)
treea3d2a5a909b6372f7777c1c5c18cef5f81d123a9 /include/linux/crypto.h
parent7f4191a202ea4558ca2d5eb8a47daea33c9999c7 (diff)
bcache in userspace; userspace fsck
Diffstat (limited to 'include/linux/crypto.h')
-rw-r--r--include/linux/crypto.h921
1 files changed, 921 insertions, 0 deletions
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
new file mode 100644
index 00000000..cb9ad24f
--- /dev/null
+++ b/include/linux/crypto.h
@@ -0,0 +1,921 @@
+/*
+ * Scatterlist Cryptographic API.
+ *
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
+ * and Nettle, by Niels Möller.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _LINUX_CRYPTO_H
+#define _LINUX_CRYPTO_H
+
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+/*
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
+ * arbitrary modules to be loaded. Loading from userspace may still need the
+ * unprefixed names, so retains those aliases as well.
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
+ * expands twice on the same line. Instead, use a separate base name for the
+ * alias.
+ */
+#define MODULE_ALIAS_CRYPTO(name) \
+ __MODULE_INFO(alias, alias_userspace, name); \
+ __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+
+/*
+ * Algorithm masks and types.
+ */
+#define CRYPTO_ALG_TYPE_MASK 0x0000000f
+#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
+#define CRYPTO_ALG_TYPE_AEAD 0x00000003
+#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
+#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
+#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
+#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
+#define CRYPTO_ALG_TYPE_KPP 0x00000008
+#define CRYPTO_ALG_TYPE_RNG 0x0000000c
+#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
+#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
+#define CRYPTO_ALG_TYPE_HASH 0x0000000e
+#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
+#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
+
+#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
+#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
+#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
+
+#define CRYPTO_ALG_ASYNC 0x00000080
+
+/*
+ * Set this bit if and only if the algorithm requires another algorithm of
+ * the same type to handle corner cases.
+ */
+#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
+
+/*
+ * This bit is set for symmetric key ciphers that have already been wrapped
+ * with a generic IV generator to prevent them from being wrapped again.
+ */
+#define CRYPTO_ALG_GENIV 0x00000200
+
+/*
+ * Set if the algorithm is an instance that is build from templates.
+ */
+#define CRYPTO_ALG_INSTANCE 0x00000800
+
+/* Set this bit if the algorithm provided is hardware accelerated but
+ * not available to userspace via instruction set or so.
+ */
+#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
+
+/*
+ * Mark a cipher as a service implementation only usable by another
+ * cipher and never by a normal user of the kernel crypto API
+ */
+#define CRYPTO_ALG_INTERNAL 0x00002000
+
+/*
+ * Transform masks and values (for crt_flags).
+ */
+#define CRYPTO_TFM_REQ_MASK 0x000fff00
+#define CRYPTO_TFM_RES_MASK 0xfff00000
+
+#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
+#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
+#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
+#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
+#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
+#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
+#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
+#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
+
+/*
+ * Miscellaneous stuff.
+ */
+#define CRYPTO_MAX_ALG_NAME 64
+
+/*
+ * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
+ * declaration) is used to ensure that the crypto_tfm context structure is
+ * aligned correctly for the given architecture so that there are no alignment
+ * faults for C data types. In particular, this is required on platforms such
+ * as arm where pointers are 32-bit aligned but there are data types such as
+ * u64 which require 64-bit alignment.
+ */
+#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
+
+#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
+
+struct scatterlist;
+struct crypto_blkcipher;
+struct crypto_tfm;
+struct crypto_type;
+struct skcipher_givcrypt_request;
+
+struct blkcipher_desc {
+ struct crypto_blkcipher *tfm;
+ void *info;
+ u32 flags;
+};
+
+struct cipher_desc {
+ struct crypto_tfm *tfm;
+ void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+ unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
+ const u8 *src, unsigned int nbytes);
+ void *info;
+};
+
+struct blkcipher_alg {
+ int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes);
+ int (*decrypt)(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes);
+
+ const char *geniv;
+
+ unsigned int min_keysize;
+ unsigned int max_keysize;
+ unsigned int ivsize;
+};
+
+struct cipher_alg {
+ unsigned int cia_min_keysize;
+ unsigned int cia_max_keysize;
+ int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+ void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+ void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+};
+
+struct compress_alg {
+ int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen);
+ int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen);
+};
+
+
+#define cra_blkcipher cra_u.blkcipher
+#define cra_cipher cra_u.cipher
+#define cra_compress cra_u.compress
+
+struct crypto_alg {
+ struct list_head cra_list;
+ struct list_head cra_users;
+
+ u32 cra_flags;
+ unsigned int cra_blocksize;
+ unsigned int cra_ctxsize;
+ unsigned int cra_alignmask;
+
+ int cra_priority;
+ atomic_t cra_refcnt;
+
+ char cra_name[CRYPTO_MAX_ALG_NAME];
+ char cra_driver_name[CRYPTO_MAX_ALG_NAME];
+
+ const struct crypto_type *cra_type;
+
+ union {
+ struct blkcipher_alg blkcipher;
+ struct cipher_alg cipher;
+ struct compress_alg compress;
+ } cra_u;
+
+ int (*cra_init)(struct crypto_tfm *tfm);
+ void (*cra_exit)(struct crypto_tfm *tfm);
+ void (*cra_destroy)(struct crypto_alg *alg);
+
+ struct module *cra_module;
+} CRYPTO_MINALIGN_ATTR;
+
+/*
+ * Algorithm registration interface.
+ */
+int crypto_register_alg(struct crypto_alg *alg);
+int crypto_unregister_alg(struct crypto_alg *alg);
+int crypto_register_algs(struct crypto_alg *algs, int count);
+int crypto_unregister_algs(struct crypto_alg *algs, int count);
+
+/*
+ * Algorithm query interface.
+ */
+int crypto_has_alg(const char *name, u32 type, u32 mask);
+
+/*
+ * Transforms: user-instantiated objects which encapsulate algorithms
+ * and core processing logic. Managed via crypto_alloc_*() and
+ * crypto_free_*(), as well as the various helpers below.
+ */
+
+struct blkcipher_tfm {
+ void *iv;
+ int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes);
+ int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes);
+};
+
+struct cipher_tfm {
+ int (*cit_setkey)(struct crypto_tfm *tfm,
+ const u8 *key, unsigned int keylen);
+ void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+ void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
+};
+
+struct compress_tfm {
+ int (*cot_compress)(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+ int (*cot_decompress)(struct crypto_tfm *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
+};
+
+#define crt_blkcipher crt_u.blkcipher
+#define crt_cipher crt_u.cipher
+#define crt_compress crt_u.compress
+
+struct crypto_tfm {
+
+ u32 crt_flags;
+
+ union {
+ struct blkcipher_tfm blkcipher;
+ struct cipher_tfm cipher;
+ struct compress_tfm compress;
+ } crt_u;
+
+ void (*exit)(struct crypto_tfm *tfm);
+
+ struct crypto_alg *__crt_alg;
+
+ void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+struct crypto_blkcipher {
+ struct crypto_tfm base;
+};
+
+struct crypto_cipher {
+ struct crypto_tfm base;
+};
+
+struct crypto_comp {
+ struct crypto_tfm base;
+};
+
+enum {
+ CRYPTOA_UNSPEC,
+ CRYPTOA_ALG,
+ CRYPTOA_TYPE,
+ CRYPTOA_U32,
+ __CRYPTOA_MAX,
+};
+
+#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
+
+/* Maximum number of (rtattr) parameters for each template. */
+#define CRYPTO_MAX_ATTRS 32
+
+struct crypto_attr_alg {
+ char name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct crypto_attr_type {
+ u32 type;
+ u32 mask;
+};
+
+struct crypto_attr_u32 {
+ u32 num;
+};
+
+/*
+ * Transform user interface.
+ */
+
+struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
+void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
+
+static inline void crypto_free_tfm(struct crypto_tfm *tfm)
+{
+ return crypto_destroy_tfm(tfm, tfm);
+}
+
+int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
+
+/*
+ * Transform helpers which query the underlying algorithm.
+ */
+static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_name;
+}
+
+static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_driver_name;
+}
+
+static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_priority;
+}
+
+static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
+}
+
+static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_blocksize;
+}
+
+static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_alg->cra_alignmask;
+}
+
+static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
+{
+ return tfm->crt_flags;
+}
+
+static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
+{
+ tfm->crt_flags |= flags;
+}
+
+static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
+{
+ tfm->crt_flags &= ~flags;
+}
+
+static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
+{
+ return tfm->__crt_ctx;
+}
+
+static inline unsigned int crypto_tfm_ctx_alignment(void)
+{
+ struct crypto_tfm *tfm;
+ return __alignof__(tfm->__crt_ctx);
+}
+
+static inline u32 crypto_skcipher_type(u32 type)
+{
+ type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ type |= CRYPTO_ALG_TYPE_BLKCIPHER;
+ return type;
+}
+
+static inline u32 crypto_skcipher_mask(u32 mask)
+{
+ mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
+ return mask;
+}
+
+/**
+ * DOC: Synchronous Block Cipher API
+ *
+ * The synchronous block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
+ *
+ * Synchronous calls, have a context in the tfm. But since a single tfm can be
+ * used in multiple calls and in parallel, this info should not be changeable
+ * (unless a lock is used). This applies, for example, to the symmetric key.
+ * However, the IV is changeable, so there is an iv field in blkcipher_tfm
+ * structure for synchronous blkcipher api. So, its the only state info that can
+ * be kept for synchronous calls without using a big lock across a tfm.
+ *
+ * The block cipher API allows the use of a complete cipher, i.e. a cipher
+ * consisting of a template (a block chaining mode) and a single block cipher
+ * primitive (e.g. AES).
+ *
+ * The plaintext data buffer and the ciphertext data buffer are pointed to
+ * by using scatter/gather lists. The cipher operation is performed
+ * on all segments of the provided scatter/gather lists.
+ *
+ * The kernel crypto API supports a cipher operation "in-place" which means that
+ * the caller may provide the same scatter/gather list for the plaintext and
+ * cipher text. After the completion of the cipher operation, the plaintext
+ * data is replaced with the ciphertext data in case of an encryption and vice
+ * versa for a decryption. The caller must ensure that the scatter/gather lists
+ * for the output data point to sufficiently large buffers, i.e. multiples of
+ * the block size of the cipher.
+ */
+
+static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
+ struct crypto_tfm *tfm)
+{
+ return (struct crypto_blkcipher *)tfm;
+}
+
+static inline struct crypto_blkcipher *crypto_blkcipher_cast(
+ struct crypto_tfm *tfm)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
+ return __crypto_blkcipher_cast(tfm);
+}
+
+/**
+ * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * blkcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a block cipher. The returned struct
+ * crypto_blkcipher is the cipher handle that is required for any subsequent
+ * API invocation for that block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
+ const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_BLKCIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
+}
+
+static inline struct crypto_tfm *crypto_blkcipher_tfm(
+ struct crypto_blkcipher *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_blkcipher() - zeroize and free the block cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
+{
+ crypto_free_tfm(crypto_blkcipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_blkcipher() - Search for the availability of a block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the block cipher is known to the kernel crypto API; false
+ * otherwise
+ */
+static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_BLKCIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return crypto_has_alg(alg_name, type, mask);
+}
+
+/**
+ * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
+ * @tfm: cipher handle
+ *
+ * Return: The character string holding the name of the cipher
+ */
+static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
+{
+ return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
+}
+
+static inline struct blkcipher_tfm *crypto_blkcipher_crt(
+ struct crypto_blkcipher *tfm)
+{
+ return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
+}
+
+static inline struct blkcipher_alg *crypto_blkcipher_alg(
+ struct crypto_blkcipher *tfm)
+{
+ return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
+}
+
+/**
+ * crypto_blkcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the block cipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
+{
+ return crypto_blkcipher_alg(tfm)->ivsize;
+}
+
+/**
+ * crypto_blkcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the block cipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation.
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_blkcipher_blocksize(
+ struct crypto_blkcipher *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_blkcipher_alignmask(
+ struct crypto_blkcipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
+}
+
+static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
+}
+
+static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
+}
+
+static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_blkcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the block cipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
+ key, keylen);
+}
+
+/**
+ * crypto_blkcipher_encrypt() - encrypt plaintext
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * ciphertext
+ * @src: scatter/gather list that holds the plaintext
+ * @nbytes: number of bytes of the plaintext to encrypt.
+ *
+ * Encrypt plaintext data using the IV set by the caller with a preceding
+ * call of crypto_blkcipher_set_iv.
+ *
+ * The blkcipher_desc data structure must be filled by the caller and can
+ * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
+ * with the block cipher handle; desc.flags is filled with either
+ * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
+ return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
+}
+
+/**
+ * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * ciphertext
+ * @src: scatter/gather list that holds the plaintext
+ * @nbytes: number of bytes of the plaintext to encrypt.
+ *
+ * Encrypt plaintext data with the use of an IV that is solely used for this
+ * cipher operation. Any previously set IV is not used.
+ *
+ * The blkcipher_desc data structure must be filled by the caller and can
+ * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
+ * with the block cipher handle; desc.info is filled with the IV to be used for
+ * the current operation; desc.flags is filled with either
+ * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
+}
+
+/**
+ * crypto_blkcipher_decrypt() - decrypt ciphertext
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * plaintext
+ * @src: scatter/gather list that holds the ciphertext
+ * @nbytes: number of bytes of the ciphertext to decrypt.
+ *
+ * Decrypt ciphertext data using the IV set by the caller with a preceding
+ * call of crypto_blkcipher_set_iv.
+ *
+ * The blkcipher_desc data structure must be filled by the caller as documented
+ * for the crypto_blkcipher_encrypt call above.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ *
+ */
+static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
+ return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
+}
+
+/**
+ * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ * plaintext
+ * @src: scatter/gather list that holds the ciphertext
+ * @nbytes: number of bytes of the ciphertext to decrypt.
+ *
+ * Decrypt ciphertext data with the use of an IV that is solely used for this
+ * cipher operation. Any previously set IV is not used.
+ *
+ * The blkcipher_desc data structure must be filled by the caller as documented
+ * for the crypto_blkcipher_encrypt_iv call above.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
+}
+
+/**
+ * crypto_blkcipher_set_iv() - set IV for cipher
+ * @tfm: cipher handle
+ * @src: buffer holding the IV
+ * @len: length of the IV in bytes
+ *
+ * The caller provided IV is set for the block cipher referenced by the cipher
+ * handle.
+ */
+static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
+ const u8 *src, unsigned int len)
+{
+ memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
+}
+
+/**
+ * crypto_blkcipher_get_iv() - obtain IV from cipher
+ * @tfm: cipher handle
+ * @dst: buffer filled with the IV
+ * @len: length of the buffer dst
+ *
+ * The caller can obtain the IV set for the block cipher referenced by the
+ * cipher handle and store it into the user-provided buffer. If the buffer
+ * has an insufficient space, the IV is truncated to fit the buffer.
+ */
+static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
+ u8 *dst, unsigned int len)
+{
+ memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
+}
+
+/**
+ * DOC: Single Block Cipher API
+ *
+ * The single block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
+ *
+ * Using the single block cipher API calls, operations with the basic cipher
+ * primitive can be implemented. These cipher primitives exclude any block
+ * chaining operations including IV handling.
+ *
+ * The purpose of this single block cipher API is to support the implementation
+ * of templates or other concepts that only need to perform the cipher operation
+ * on one block at a time. Templates invoke the underlying cipher primitive
+ * block-wise and process either the input or the output data of these cipher
+ * operations.
+ */
+
+static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
+{
+ return (struct crypto_cipher *)tfm;
+}
+
+static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
+{
+ BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
+ return __crypto_cipher_cast(tfm);
+}
+
+/**
+ * crypto_alloc_cipher() - allocate single block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a single block cipher. The returned struct
+ * crypto_cipher is the cipher handle that is required for any subsequent API
+ * invocation for that single block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
+ u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
+}
+
+static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_cipher() - zeroize and free the single block cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_cipher(struct crypto_cipher *tfm)
+{
+ crypto_free_tfm(crypto_cipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_cipher() - Search for the availability of a single block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the single block cipher is known to the kernel crypto API;
+ * false otherwise
+ */
+static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return crypto_has_alg(alg_name, type, mask);
+}
+
+static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
+{
+ return &crypto_cipher_tfm(tfm)->crt_cipher;
+}
+
+/**
+ * crypto_cipher_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the single block cipher referenced with the cipher handle
+ * tfm is returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
+}
+
+static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
+}
+
+static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
+}
+
+static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_cipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the single block cipher referenced by the
+ * cipher handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
+ key, keylen);
+}
+
+/**
+ * crypto_cipher_encrypt_one() - encrypt one block of plaintext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the ciphertext
+ * @src: buffer holding the plaintext to be encrypted
+ *
+ * Invoke the encryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
+static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src)
+{
+ crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
+ dst, src);
+}
+
+/**
+ * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the plaintext
+ * @src: buffer holding the ciphertext to be decrypted
+ *
+ * Invoke the decryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
+static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src)
+{
+ crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
+ dst, src);
+}
+
+#endif /* _LINUX_CRYPTO_H */
+