From 8279dd748f9704b811e528b31304e2fab026abc5 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Wed, 6 Jul 2005 13:51:00 -0700 Subject: [CRYPTO] Don't check for NULL before kfree() Checking a pointer for NULL before calling kfree() on it is redundant. This patch removes such checks from crypto/ Signed-off-by: Jesper Juhl Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- crypto/cipher.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'crypto/cipher.c') diff --git a/crypto/cipher.c b/crypto/cipher.c index f434ce7c2d0b..69264497b48c 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -336,6 +336,5 @@ out: void crypto_exit_cipher_ops(struct crypto_tfm *tfm) { - if (tfm->crt_cipher.cit_iv) - kfree(tfm->crt_cipher.cit_iv); + kfree(tfm->crt_cipher.cit_iv); } -- cgit v1.2.3 From c774e93e2152d0be2612739418689e6e6400f4eb Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 6 Jul 2005 13:51:31 -0700 Subject: [CRYPTO] Add plumbing for multi-block operations The VIA Padlock device is able to perform much better when multiple blocks are fed to it at once. As this device offers an exceptional throughput rate it is worthwhile to optimise the infrastructure specifically for it. We shift the existing page-sized fast path down to the CBC/ECB functions. We can then replace the CBC/ECB functions with functions provided by the underlying algorithm that performs the multi-block operations. As a side-effect this improves the performance of large cipher operations for all existing algorithm implementations. I've measured the gain to be around 5% for 3DES and 15% for AES. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- crypto/cipher.c | 246 ++++++++++++++++++++++++++++++++------------------- crypto/scatterwalk.c | 4 +- crypto/scatterwalk.h | 6 +- 3 files changed, 161 insertions(+), 95 deletions(-) (limited to 'crypto/cipher.c') diff --git a/crypto/cipher.c b/crypto/cipher.c index 69264497b48c..c4243345b154 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -4,6 +4,7 @@ * Cipher operations. * * Copyright (c) 2002 James Morris + * Copyright (c) 2005 Herbert Xu * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -22,9 +23,13 @@ #include "internal.h" #include "scatterwalk.h" -typedef void (cryptfn_t)(void *, u8 *, const u8 *); -typedef void (procfn_t)(struct crypto_tfm *, u8 *, - u8*, cryptfn_t, void *); +struct cipher_desc { + struct crypto_tfm *tfm; + void (*crfn)(void *ctx, u8 *dst, const u8 *src); + unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, + const u8 *src, unsigned int nbytes); + void *info; +}; static inline void xor_64(u8 *a, const u8 *b) { @@ -39,63 +44,57 @@ static inline void xor_128(u8 *a, const u8 *b) ((u32 *)a)[2] ^= ((u32 *)b)[2]; ((u32 *)a)[3] ^= ((u32 *)b)[3]; } - -static inline void *prepare_src(struct scatter_walk *walk, int bsize, - void *tmp, int in_place) + +static unsigned int crypt_slow(const struct cipher_desc *desc, + struct scatter_walk *in, + struct scatter_walk *out, unsigned int bsize) { - void *src = walk->data; - int n = bsize; + u8 src[bsize]; + u8 dst[bsize]; + unsigned int n; - if (unlikely(scatterwalk_across_pages(walk, bsize))) { - src = tmp; - n = scatterwalk_copychunks(src, walk, bsize, 0); - } - scatterwalk_advance(walk, n); - return src; -} + n = scatterwalk_copychunks(src, in, bsize, 0); + scatterwalk_advance(in, n); -static inline void *prepare_dst(struct scatter_walk *walk, int bsize, - void *tmp, int in_place) -{ - void *dst = walk->data; + desc->prfn(desc, dst, src, bsize); - if (unlikely(scatterwalk_across_pages(walk, bsize)) || in_place) - dst = tmp; - return dst; -} + n = scatterwalk_copychunks(dst, out, bsize, 1); + scatterwalk_advance(out, n); -static inline void complete_src(struct scatter_walk *walk, int bsize, - void *src, int in_place) -{ + return bsize; } -static inline void complete_dst(struct scatter_walk *walk, int bsize, - void *dst, int in_place) +static inline unsigned int crypt_fast(const struct cipher_desc *desc, + struct scatter_walk *in, + struct scatter_walk *out, + unsigned int nbytes) { - int n = bsize; + u8 *src, *dst; + + src = in->data; + dst = scatterwalk_samebuf(in, out) ? src : out->data; + + nbytes = desc->prfn(desc, dst, src, nbytes); + + scatterwalk_advance(in, nbytes); + scatterwalk_advance(out, nbytes); - if (unlikely(scatterwalk_across_pages(walk, bsize))) - n = scatterwalk_copychunks(dst, walk, bsize, 1); - else if (in_place) - memcpy(walk->data, dst, bsize); - scatterwalk_advance(walk, n); + return nbytes; } /* * Generic encrypt/decrypt wrapper for ciphers, handles operations across * multiple page boundaries by using temporary blocks. In user context, - * the kernel is given a chance to schedule us once per block. + * the kernel is given a chance to schedule us once per page. */ -static int crypt(struct crypto_tfm *tfm, +static int crypt(const struct cipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes, cryptfn_t crfn, - procfn_t prfn, void *info) + unsigned int nbytes) { struct scatter_walk walk_in, walk_out; + struct crypto_tfm *tfm = desc->tfm; const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); - u8 tmp_src[bsize]; - u8 tmp_dst[bsize]; if (!nbytes) return 0; @@ -109,29 +108,20 @@ static int crypt(struct crypto_tfm *tfm, scatterwalk_start(&walk_out, dst); for(;;) { - u8 *src_p, *dst_p; - int in_place; + unsigned int n; scatterwalk_map(&walk_in, 0); scatterwalk_map(&walk_out, 1); - in_place = scatterwalk_samebuf(&walk_in, &walk_out); + n = scatterwalk_clamp(&walk_in, nbytes); + n = scatterwalk_clamp(&walk_out, n); - do { - src_p = prepare_src(&walk_in, bsize, tmp_src, - in_place); - dst_p = prepare_dst(&walk_out, bsize, tmp_dst, - in_place); + if (likely(n >= bsize)) + n = crypt_fast(desc, &walk_in, &walk_out, n); + else + n = crypt_slow(desc, &walk_in, &walk_out, bsize); - prfn(tfm, dst_p, src_p, crfn, info); - - complete_src(&walk_in, bsize, src_p, in_place); - complete_dst(&walk_out, bsize, dst_p, in_place); - - nbytes -= bsize; - } while (nbytes && - !scatterwalk_across_pages(&walk_in, bsize) && - !scatterwalk_across_pages(&walk_out, bsize)); + nbytes -= n; scatterwalk_done(&walk_in, 0, nbytes); scatterwalk_done(&walk_out, 1, nbytes); @@ -143,30 +133,78 @@ static int crypt(struct crypto_tfm *tfm, } } -static void cbc_process_encrypt(struct crypto_tfm *tfm, u8 *dst, u8 *src, - cryptfn_t fn, void *info) +static unsigned int cbc_process_encrypt(const struct cipher_desc *desc, + u8 *dst, const u8 *src, + unsigned int nbytes) { - u8 *iv = info; + struct crypto_tfm *tfm = desc->tfm; + void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; + int bsize = crypto_tfm_alg_blocksize(tfm); + + void (*fn)(void *, u8 *, const u8 *) = desc->crfn; + u8 *iv = desc->info; + unsigned int done = 0; + + do { + xor(iv, src); + fn(crypto_tfm_ctx(tfm), dst, iv); + memcpy(iv, dst, bsize); - tfm->crt_u.cipher.cit_xor_block(iv, src); - fn(crypto_tfm_ctx(tfm), dst, iv); - memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm)); + src += bsize; + dst += bsize; + } while ((done += bsize) < nbytes); + + return done; } -static void cbc_process_decrypt(struct crypto_tfm *tfm, u8 *dst, u8 *src, - cryptfn_t fn, void *info) +static unsigned int cbc_process_decrypt(const struct cipher_desc *desc, + u8 *dst, const u8 *src, + unsigned int nbytes) { - u8 *iv = info; + struct crypto_tfm *tfm = desc->tfm; + void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; + int bsize = crypto_tfm_alg_blocksize(tfm); + + u8 stack[src == dst ? bsize : 0]; + u8 *buf = stack; + u8 **dst_p = src == dst ? &buf : &dst; + + void (*fn)(void *, u8 *, const u8 *) = desc->crfn; + u8 *iv = desc->info; + unsigned int done = 0; + + do { + u8 *tmp_dst = *dst_p; - fn(crypto_tfm_ctx(tfm), dst, src); - tfm->crt_u.cipher.cit_xor_block(dst, iv); - memcpy(iv, src, crypto_tfm_alg_blocksize(tfm)); + fn(crypto_tfm_ctx(tfm), tmp_dst, src); + xor(tmp_dst, iv); + memcpy(iv, src, bsize); + if (tmp_dst != dst) + memcpy(dst, tmp_dst, bsize); + + src += bsize; + dst += bsize; + } while ((done += bsize) < nbytes); + + return done; } -static void ecb_process(struct crypto_tfm *tfm, u8 *dst, u8 *src, - cryptfn_t fn, void *info) +static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst, + const u8 *src, unsigned int nbytes) { - fn(crypto_tfm_ctx(tfm), dst, src); + struct crypto_tfm *tfm = desc->tfm; + int bsize = crypto_tfm_alg_blocksize(tfm); + void (*fn)(void *, u8 *, const u8 *) = desc->crfn; + unsigned int done = 0; + + do { + fn(crypto_tfm_ctx(tfm), dst, src); + + src += bsize; + dst += bsize; + } while ((done += bsize) < nbytes); + + return done; } static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) @@ -185,9 +223,13 @@ static int ecb_encrypt(struct crypto_tfm *tfm, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - return crypt(tfm, dst, src, nbytes, - tfm->__crt_alg->cra_cipher.cia_encrypt, - ecb_process, NULL); + struct cipher_desc desc; + + desc.tfm = tfm; + desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt; + desc.prfn = ecb_process; + + return crypt(&desc, dst, src, nbytes); } static int ecb_decrypt(struct crypto_tfm *tfm, @@ -195,9 +237,13 @@ static int ecb_decrypt(struct crypto_tfm *tfm, struct scatterlist *src, unsigned int nbytes) { - return crypt(tfm, dst, src, nbytes, - tfm->__crt_alg->cra_cipher.cia_decrypt, - ecb_process, NULL); + struct cipher_desc desc; + + desc.tfm = tfm; + desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt; + desc.prfn = ecb_process; + + return crypt(&desc, dst, src, nbytes); } static int cbc_encrypt(struct crypto_tfm *tfm, @@ -205,9 +251,14 @@ static int cbc_encrypt(struct crypto_tfm *tfm, struct scatterlist *src, unsigned int nbytes) { - return crypt(tfm, dst, src, nbytes, - tfm->__crt_alg->cra_cipher.cia_encrypt, - cbc_process_encrypt, tfm->crt_cipher.cit_iv); + struct cipher_desc desc; + + desc.tfm = tfm; + desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt; + desc.prfn = cbc_process_encrypt; + desc.info = tfm->crt_cipher.cit_iv; + + return crypt(&desc, dst, src, nbytes); } static int cbc_encrypt_iv(struct crypto_tfm *tfm, @@ -215,9 +266,14 @@ static int cbc_encrypt_iv(struct crypto_tfm *tfm, struct scatterlist *src, unsigned int nbytes, u8 *iv) { - return crypt(tfm, dst, src, nbytes, - tfm->__crt_alg->cra_cipher.cia_encrypt, - cbc_process_encrypt, iv); + struct cipher_desc desc; + + desc.tfm = tfm; + desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt; + desc.prfn = cbc_process_encrypt; + desc.info = iv; + + return crypt(&desc, dst, src, nbytes); } static int cbc_decrypt(struct crypto_tfm *tfm, @@ -225,9 +281,14 @@ static int cbc_decrypt(struct crypto_tfm *tfm, struct scatterlist *src, unsigned int nbytes) { - return crypt(tfm, dst, src, nbytes, - tfm->__crt_alg->cra_cipher.cia_decrypt, - cbc_process_decrypt, tfm->crt_cipher.cit_iv); + struct cipher_desc desc; + + desc.tfm = tfm; + desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt; + desc.prfn = cbc_process_decrypt; + desc.info = tfm->crt_cipher.cit_iv; + + return crypt(&desc, dst, src, nbytes); } static int cbc_decrypt_iv(struct crypto_tfm *tfm, @@ -235,9 +296,14 @@ static int cbc_decrypt_iv(struct crypto_tfm *tfm, struct scatterlist *src, unsigned int nbytes, u8 *iv) { - return crypt(tfm, dst, src, nbytes, - tfm->__crt_alg->cra_cipher.cia_decrypt, - cbc_process_decrypt, iv); + struct cipher_desc desc; + + desc.tfm = tfm; + desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt; + desc.prfn = cbc_process_decrypt; + desc.info = iv; + + return crypt(&desc, dst, src, nbytes); } static int nocrypt(struct crypto_tfm *tfm, diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 50c9461e8cc6..47ac90e615f4 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -100,7 +100,7 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more) int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out) { - do { + while (nbytes > walk->len_this_page) { memcpy_dir(buf, walk->data, walk->len_this_page, out); buf += walk->len_this_page; nbytes -= walk->len_this_page; @@ -108,7 +108,7 @@ int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, scatterwalk_unmap(walk, out); scatterwalk_pagedone(walk, out, 1); scatterwalk_map(walk, out); - } while (nbytes > walk->len_this_page); + } memcpy_dir(buf, walk->data, nbytes, out); return nbytes; diff --git a/crypto/scatterwalk.h b/crypto/scatterwalk.h index 02aa56c649b4..5495bb970816 100644 --- a/crypto/scatterwalk.h +++ b/crypto/scatterwalk.h @@ -40,10 +40,10 @@ static inline int scatterwalk_samebuf(struct scatter_walk *walk_in, walk_in->offset == walk_out->offset; } -static inline int scatterwalk_across_pages(struct scatter_walk *walk, - unsigned int nbytes) +static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, + unsigned int nbytes) { - return nbytes > walk->len_this_page; + return nbytes > walk->len_this_page ? walk->len_this_page : nbytes; } static inline void scatterwalk_advance(struct scatter_walk *walk, -- cgit v1.2.3 From 40725181b74be6b0e3bdc8c05bd1e0b9873ec5cc Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 6 Jul 2005 13:51:52 -0700 Subject: [CRYPTO] Add support for low-level multi-block operations This patch adds hooks for cipher algorithms to implement multi-block ECB/CBC operations directly. This is expected to provide significant performance boots to the VIA Padlock. It could also be used for improving software implementations such as AES where operating on multiple blocks at a time may enable certain optimisations. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- crypto/cipher.c | 38 ++++++++++++++++++-------------------- crypto/internal.h | 5 ----- include/linux/crypto.h | 28 +++++++++++++++++++++++++++- 3 files changed, 45 insertions(+), 26 deletions(-) (limited to 'crypto/cipher.c') diff --git a/crypto/cipher.c b/crypto/cipher.c index c4243345b154..54c4a560070d 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -23,14 +23,6 @@ #include "internal.h" #include "scatterwalk.h" -struct cipher_desc { - struct crypto_tfm *tfm; - void (*crfn)(void *ctx, u8 *dst, const u8 *src); - unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, - const u8 *src, unsigned int nbytes); - void *info; -}; - static inline void xor_64(u8 *a, const u8 *b) { ((u32 *)a)[0] ^= ((u32 *)b)[0]; @@ -224,10 +216,11 @@ static int ecb_encrypt(struct crypto_tfm *tfm, struct scatterlist *src, unsigned int nbytes) { struct cipher_desc desc; + struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; desc.tfm = tfm; - desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt; - desc.prfn = ecb_process; + desc.crfn = cipher->cia_encrypt; + desc.prfn = cipher->cia_encrypt_ecb ?: ecb_process; return crypt(&desc, dst, src, nbytes); } @@ -238,10 +231,11 @@ static int ecb_decrypt(struct crypto_tfm *tfm, unsigned int nbytes) { struct cipher_desc desc; + struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; desc.tfm = tfm; - desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt; - desc.prfn = ecb_process; + desc.crfn = cipher->cia_decrypt; + desc.prfn = cipher->cia_decrypt_ecb ?: ecb_process; return crypt(&desc, dst, src, nbytes); } @@ -252,10 +246,11 @@ static int cbc_encrypt(struct crypto_tfm *tfm, unsigned int nbytes) { struct cipher_desc desc; + struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; desc.tfm = tfm; - desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt; - desc.prfn = cbc_process_encrypt; + desc.crfn = cipher->cia_encrypt; + desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt; desc.info = tfm->crt_cipher.cit_iv; return crypt(&desc, dst, src, nbytes); @@ -267,10 +262,11 @@ static int cbc_encrypt_iv(struct crypto_tfm *tfm, unsigned int nbytes, u8 *iv) { struct cipher_desc desc; + struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; desc.tfm = tfm; - desc.crfn = tfm->__crt_alg->cra_cipher.cia_encrypt; - desc.prfn = cbc_process_encrypt; + desc.crfn = cipher->cia_encrypt; + desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt; desc.info = iv; return crypt(&desc, dst, src, nbytes); @@ -282,10 +278,11 @@ static int cbc_decrypt(struct crypto_tfm *tfm, unsigned int nbytes) { struct cipher_desc desc; + struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; desc.tfm = tfm; - desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt; - desc.prfn = cbc_process_decrypt; + desc.crfn = cipher->cia_decrypt; + desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt; desc.info = tfm->crt_cipher.cit_iv; return crypt(&desc, dst, src, nbytes); @@ -297,10 +294,11 @@ static int cbc_decrypt_iv(struct crypto_tfm *tfm, unsigned int nbytes, u8 *iv) { struct cipher_desc desc; + struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; desc.tfm = tfm; - desc.crfn = tfm->__crt_alg->cra_cipher.cia_decrypt; - desc.prfn = cbc_process_decrypt; + desc.crfn = cipher->cia_decrypt; + desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt; desc.info = iv; return crypt(&desc, dst, src, nbytes); diff --git a/crypto/internal.h b/crypto/internal.h index 964b9a60ca24..5ed383f7dce6 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -42,11 +42,6 @@ static inline void crypto_yield(struct crypto_tfm *tfm) cond_resched(); } -static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) -{ - return (void *)&tfm[1]; -} - struct crypto_alg *crypto_alg_lookup(const char *name); /* A far more intelligent version of this is planned. For now, just diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 387da6a3e58c..26ce01c25745 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -61,6 +61,15 @@ #define CRYPTO_DIR_DECRYPT 0 struct scatterlist; +struct crypto_tfm; + +struct cipher_desc { + struct crypto_tfm *tfm; + void (*crfn)(void *ctx, u8 *dst, const u8 *src); + unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, + const u8 *src, unsigned int nbytes); + void *info; +}; /* * Algorithms: modular crypto algorithm implementations, managed @@ -73,6 +82,19 @@ struct cipher_alg { unsigned int keylen, u32 *flags); void (*cia_encrypt)(void *ctx, u8 *dst, const u8 *src); void (*cia_decrypt)(void *ctx, u8 *dst, const u8 *src); + + unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc, + u8 *dst, const u8 *src, + unsigned int nbytes); + unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc, + u8 *dst, const u8 *src, + unsigned int nbytes); + unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc, + u8 *dst, const u8 *src, + unsigned int nbytes); + unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc, + u8 *dst, const u8 *src, + unsigned int nbytes); }; struct digest_alg { @@ -136,7 +158,6 @@ static inline int crypto_alg_available(const char *name, u32 flags) * and core processing logic. Managed via crypto_alloc_tfm() and * crypto_free_tfm(), as well as the various helpers below. */ -struct crypto_tfm; struct cipher_tfm { void *cit_iv; @@ -266,6 +287,11 @@ static inline unsigned int crypto_tfm_alg_digestsize(struct crypto_tfm *tfm) return tfm->__crt_alg->cra_digest.dia_digestsize; } +static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) +{ + return (void *)&tfm[1]; +} + /* * API wrappers. */ -- cgit v1.2.3 From 95477377995aefa2ec1654a9a3777bd57ea99146 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 6 Jul 2005 13:52:09 -0700 Subject: [CRYPTO] Add alignmask for low-level cipher implementations The VIA Padlock device requires the input and output buffers to be aligned on 16-byte boundaries. This patch adds the alignmask attribute for low-level cipher implementations to indicate their alignment requirements. The mid-level crypt() function will copy the input/output buffers if they are not aligned correctly before they are passed to the low-level implementation. Strictly speaking, some of the software implementations require the buffers to be aligned on 4-byte boundaries as they do 32-bit loads. However, it is not clear whether it is better to copy the buffers or pay the penalty for unaligned loads/stores. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- crypto/api.c | 6 ++++++ crypto/cipher.c | 43 ++++++++++++++++++++++++++++++++++++------- crypto/scatterwalk.h | 6 ++++++ include/linux/crypto.h | 1 + 4 files changed, 49 insertions(+), 7 deletions(-) (limited to 'crypto/cipher.c') diff --git a/crypto/api.c b/crypto/api.c index 394169a8577d..f55856b21992 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -168,6 +168,12 @@ int crypto_register_alg(struct crypto_alg *alg) { int ret = 0; struct crypto_alg *q; + + if (alg->cra_alignmask & (alg->cra_alignmask + 1)) + return -EINVAL; + + if (alg->cra_alignmask > PAGE_SIZE) + return -EINVAL; down_write(&crypto_alg_sem); diff --git a/crypto/cipher.c b/crypto/cipher.c index 54c4a560070d..85eb12f8e564 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -41,8 +41,10 @@ static unsigned int crypt_slow(const struct cipher_desc *desc, struct scatter_walk *in, struct scatter_walk *out, unsigned int bsize) { - u8 src[bsize]; - u8 dst[bsize]; + unsigned int alignmask = desc->tfm->__crt_alg->cra_alignmask; + u8 buffer[bsize * 2 + alignmask]; + u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); + u8 *dst = src + bsize; unsigned int n; n = scatterwalk_copychunks(src, in, bsize, 0); @@ -59,15 +61,24 @@ static unsigned int crypt_slow(const struct cipher_desc *desc, static inline unsigned int crypt_fast(const struct cipher_desc *desc, struct scatter_walk *in, struct scatter_walk *out, - unsigned int nbytes) + unsigned int nbytes, u8 *tmp) { u8 *src, *dst; src = in->data; dst = scatterwalk_samebuf(in, out) ? src : out->data; + if (tmp) { + memcpy(tmp, in->data, nbytes); + src = tmp; + dst = tmp; + } + nbytes = desc->prfn(desc, dst, src, nbytes); + if (tmp) + memcpy(out->data, tmp, nbytes); + scatterwalk_advance(in, nbytes); scatterwalk_advance(out, nbytes); @@ -87,6 +98,8 @@ static int crypt(const struct cipher_desc *desc, struct scatter_walk walk_in, walk_out; struct crypto_tfm *tfm = desc->tfm; const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); + unsigned int alignmask = tfm->__crt_alg->cra_alignmask; + unsigned long buffer = 0; if (!nbytes) return 0; @@ -100,16 +113,27 @@ static int crypt(const struct cipher_desc *desc, scatterwalk_start(&walk_out, dst); for(;;) { - unsigned int n; + unsigned int n = nbytes; + u8 *tmp = NULL; + + if (!scatterwalk_aligned(&walk_in, alignmask) || + !scatterwalk_aligned(&walk_out, alignmask)) { + if (!buffer) { + buffer = __get_free_page(GFP_ATOMIC); + if (!buffer) + n = 0; + } + tmp = (u8 *)buffer; + } scatterwalk_map(&walk_in, 0); scatterwalk_map(&walk_out, 1); - n = scatterwalk_clamp(&walk_in, nbytes); + n = scatterwalk_clamp(&walk_in, n); n = scatterwalk_clamp(&walk_out, n); if (likely(n >= bsize)) - n = crypt_fast(desc, &walk_in, &walk_out, n); + n = crypt_fast(desc, &walk_in, &walk_out, n, tmp); else n = crypt_slow(desc, &walk_in, &walk_out, bsize); @@ -119,10 +143,15 @@ static int crypt(const struct cipher_desc *desc, scatterwalk_done(&walk_out, 1, nbytes); if (!nbytes) - return 0; + break; crypto_yield(tfm); } + + if (buffer) + free_page(buffer); + + return 0; } static unsigned int cbc_process_encrypt(const struct cipher_desc *desc, diff --git a/crypto/scatterwalk.h b/crypto/scatterwalk.h index 5495bb970816..e79925c474a3 100644 --- a/crypto/scatterwalk.h +++ b/crypto/scatterwalk.h @@ -55,6 +55,12 @@ static inline void scatterwalk_advance(struct scatter_walk *walk, walk->len_this_segment -= nbytes; } +static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk, + unsigned int alignmask) +{ + return !(walk->offset & alignmask); +} + void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out); void scatterwalk_map(struct scatter_walk *walk, int out); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 26ce01c25745..ac9d49beecd3 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -124,6 +124,7 @@ struct crypto_alg { u32 cra_flags; unsigned int cra_blocksize; unsigned int cra_ctxsize; + unsigned int cra_alignmask; const char cra_name[CRYPTO_MAX_ALG_NAME]; union { -- cgit v1.2.3 From fbdae9f3e7fb57c07cb0d973f113eb25da2e8ff2 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 6 Jul 2005 13:53:29 -0700 Subject: [CRYPTO] Ensure cit_iv is aligned correctly This patch ensures that cit_iv is aligned according to cra_alignmask by allocating it as part of the tfm structure. As a side effect the crypto layer will also guarantee that the tfm ctx area has enough space to be aligned by cra_alignmask. This allows us to remove the extra space reservation from the Padlock driver. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- crypto/api.c | 32 +++++++++++++++++++++++++++++--- crypto/cipher.c | 15 +++++++++------ crypto/internal.h | 28 ++++++++++++++++++++++++++++ drivers/crypto/padlock-aes.c | 3 +-- include/linux/crypto.h | 5 +++++ 5 files changed, 72 insertions(+), 11 deletions(-) (limited to 'crypto/cipher.c') diff --git a/crypto/api.c b/crypto/api.c index 0b583d24f7fa..2d8d828c0ca2 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -125,20 +125,46 @@ static void crypto_exit_ops(struct crypto_tfm *tfm) } } +static unsigned int crypto_ctxsize(struct crypto_alg *alg, int flags) +{ + unsigned int len; + + switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { + default: + BUG(); + + case CRYPTO_ALG_TYPE_CIPHER: + len = crypto_cipher_ctxsize(alg, flags); + break; + + case CRYPTO_ALG_TYPE_DIGEST: + len = crypto_digest_ctxsize(alg, flags); + break; + + case CRYPTO_ALG_TYPE_COMPRESS: + len = crypto_compress_ctxsize(alg, flags); + break; + } + + return len + alg->cra_alignmask; +} + struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags) { struct crypto_tfm *tfm = NULL; struct crypto_alg *alg; + unsigned int tfm_size; alg = crypto_alg_mod_lookup(name); if (alg == NULL) goto out; - - tfm = kmalloc(sizeof(*tfm) + alg->cra_ctxsize, GFP_KERNEL); + + tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, flags); + tfm = kmalloc(tfm_size, GFP_KERNEL); if (tfm == NULL) goto out_put; - memset(tfm, 0, sizeof(*tfm) + alg->cra_ctxsize); + memset(tfm, 0, tfm_size); tfm->__crt_alg = alg; diff --git a/crypto/cipher.c b/crypto/cipher.c index 85eb12f8e564..d3295ce14a57 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -41,7 +41,7 @@ static unsigned int crypt_slow(const struct cipher_desc *desc, struct scatter_walk *in, struct scatter_walk *out, unsigned int bsize) { - unsigned int alignmask = desc->tfm->__crt_alg->cra_alignmask; + unsigned int alignmask = crypto_tfm_alg_alignmask(desc->tfm); u8 buffer[bsize * 2 + alignmask]; u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); u8 *dst = src + bsize; @@ -98,7 +98,7 @@ static int crypt(const struct cipher_desc *desc, struct scatter_walk walk_in, walk_out; struct crypto_tfm *tfm = desc->tfm; const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); - unsigned int alignmask = tfm->__crt_alg->cra_alignmask; + unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); unsigned long buffer = 0; if (!nbytes) @@ -399,6 +399,8 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm) } if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) { + unsigned int align; + unsigned long addr; switch (crypto_tfm_alg_blocksize(tfm)) { case 8: @@ -418,9 +420,11 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm) } ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm); - ops->cit_iv = kmalloc(ops->cit_ivsize, GFP_KERNEL); - if (ops->cit_iv == NULL) - ret = -ENOMEM; + align = crypto_tfm_alg_alignmask(tfm) + 1; + addr = (unsigned long)crypto_tfm_ctx(tfm); + addr = ALIGN(addr, align); + addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); + ops->cit_iv = (void *)addr; } out: @@ -429,5 +433,4 @@ out: void crypto_exit_cipher_ops(struct crypto_tfm *tfm) { - kfree(tfm->crt_cipher.cit_iv); } diff --git a/crypto/internal.h b/crypto/internal.h index 83b1b6d6d92b..68612874b5fd 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -16,6 +16,7 @@ #include #include #include +#include #include extern enum km_type crypto_km_types[]; @@ -61,6 +62,33 @@ static inline void crypto_init_proc(void) { } #endif +static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg, + int flags) +{ + return alg->cra_ctxsize; +} + +static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg, + int flags) +{ + unsigned int len = alg->cra_ctxsize; + + switch (flags & CRYPTO_TFM_MODE_MASK) { + case CRYPTO_TFM_MODE_CBC: + len = ALIGN(len, alg->cra_alignmask + 1); + len += alg->cra_blocksize; + break; + } + + return len; +} + +static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg, + int flags) +{ + return alg->cra_ctxsize; +} + int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags); int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags); int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags); diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index d2745ff4699c..c5b58fae95f2 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -465,8 +465,7 @@ static struct crypto_alg aes_alg = { .cra_name = "aes", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct aes_ctx) + - PADLOCK_ALIGNMENT, + .cra_ctxsize = sizeof(struct aes_ctx), .cra_alignmask = PADLOCK_ALIGNMENT - 1, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), diff --git a/include/linux/crypto.h b/include/linux/crypto.h index ac9d49beecd3..5e2bcc636a02 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -288,6 +288,11 @@ static inline unsigned int crypto_tfm_alg_digestsize(struct crypto_tfm *tfm) return tfm->__crt_alg->cra_digest.dia_digestsize; } +static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_alignmask; +} + static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) { return (void *)&tfm[1]; -- cgit v1.2.3 From 915e8561d559abba1b81934e31e54a3f850fa7bf Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 6 Jul 2005 13:53:47 -0700 Subject: [CRYPTO] Handle unaligned iv from encrypt_iv/decrypt_iv Even though cit_iv is now always aligned, the user can still supply an unaligned iv through crypto_cipher_encrypt_iv/crypto_cipher_decrypt_iv. This patch will check the alignment of the user-supplied iv and copy it if necessary. Signed-off-by: Herbert Xu Signed-off-by: David S. Miller --- crypto/cipher.c | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) (limited to 'crypto/cipher.c') diff --git a/crypto/cipher.c b/crypto/cipher.c index d3295ce14a57..1c92c6bb138b 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -154,6 +154,31 @@ static int crypt(const struct cipher_desc *desc, return 0; } +static int crypt_iv_unaligned(struct cipher_desc *desc, + struct scatterlist *dst, + struct scatterlist *src, + unsigned int nbytes) +{ + struct crypto_tfm *tfm = desc->tfm; + unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); + u8 *iv = desc->info; + + if (unlikely(((unsigned long)iv & alignmask))) { + unsigned int ivsize = tfm->crt_cipher.cit_ivsize; + u8 buffer[ivsize + alignmask]; + u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); + int err; + + desc->info = memcpy(tmp, iv, ivsize); + err = crypt(desc, dst, src, nbytes); + memcpy(iv, tmp, ivsize); + + return err; + } + + return crypt(desc, dst, src, nbytes); +} + static unsigned int cbc_process_encrypt(const struct cipher_desc *desc, u8 *dst, const u8 *src, unsigned int nbytes) @@ -298,7 +323,7 @@ static int cbc_encrypt_iv(struct crypto_tfm *tfm, desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt; desc.info = iv; - return crypt(&desc, dst, src, nbytes); + return crypt_iv_unaligned(&desc, dst, src, nbytes); } static int cbc_decrypt(struct crypto_tfm *tfm, @@ -330,7 +355,7 @@ static int cbc_decrypt_iv(struct crypto_tfm *tfm, desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt; desc.info = iv; - return crypt(&desc, dst, src, nbytes); + return crypt_iv_unaligned(&desc, dst, src, nbytes); } static int nocrypt(struct crypto_tfm *tfm, -- cgit v1.2.3