From 0d6ecb2e43d6b15699cea1fbd7ce0c981694b9b4 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 14 Oct 2019 19:45:15 -0700 Subject: crypto: powerpc - don't unnecessarily use atomic scatterwalk The PowerPC SPE implementations of AES modes only disable preemption during the actual encryption/decryption, not during the scatterwalk functions. It's therefore unnecessary to request an atomic scatterwalk. So don't do so. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/powerpc/crypto/aes-spe-glue.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c index 3a4ca7d32477..319f1dbb3a70 100644 --- a/arch/powerpc/crypto/aes-spe-glue.c +++ b/arch/powerpc/crypto/aes-spe-glue.c @@ -186,7 +186,6 @@ static int ppc_ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, unsigned int ubytes; int err; - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -214,7 +213,6 @@ static int ppc_ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, unsigned int ubytes; int err; - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -242,7 +240,6 @@ static int ppc_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, unsigned int ubytes; int err; - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -270,7 +267,6 @@ static int ppc_cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, unsigned int ubytes; int err; - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); @@ -298,7 +294,6 @@ static int ppc_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, unsigned int pbytes, ubytes; int err; - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); @@ -329,7 +324,6 @@ static int ppc_xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, int err; u32 *twk; - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); twk = ctx->key_twk; @@ -360,7 +354,6 @@ static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, int err; u32 *twk; - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); twk = ctx->key_twk; -- cgit v1.2.3 From 8255e65df961fd0c9b7d86317e915606751562a4 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 14 Oct 2019 19:45:16 -0700 Subject: crypto: powerpc - don't set ivsize for AES-ECB Set the ivsize for the "ecb-ppc-spe" algorithm to 0, since ECB mode doesn't take an IV. This fixes a failure in the extra crypto self-tests: alg: skcipher: ivsize for ecb-ppc-spe (16) doesn't match generic impl (0) Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/powerpc/crypto/aes-spe-glue.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c index 319f1dbb3a70..4189d2644f74 100644 --- a/arch/powerpc/crypto/aes-spe-glue.c +++ b/arch/powerpc/crypto/aes-spe-glue.c @@ -415,7 +415,6 @@ static struct crypto_alg aes_algs[] = { { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, .setkey = ppc_aes_setkey, .encrypt = ppc_ecb_encrypt, .decrypt = ppc_ecb_decrypt, -- cgit v1.2.3 From 7f725f41f62750832817047e44892ce92d65e6aa Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 14 Oct 2019 19:45:17 -0700 Subject: crypto: powerpc - convert SPE AES algorithms to skcipher API Convert the glue code for the PowerPC SPE implementations of AES-ECB, AES-CBC, AES-CTR, and AES-XTS from the deprecated "blkcipher" API to the "skcipher" API. This is needed in order for the blkcipher API to be removed. Tested with: export ARCH=powerpc CROSS_COMPILE=powerpc-linux-gnu- make mpc85xx_defconfig cat >> .config << EOF # CONFIG_MODULES is not set # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_DEBUG_KERNEL=y CONFIG_CRYPTO_MANAGER_EXTRA_TESTS=y CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CTR=y CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_XTS=y CONFIG_CRYPTO_AES_PPC_SPE=y EOF make olddefconfig make -j32 qemu-system-ppc -M mpc8544ds -cpu e500 -nographic \ -kernel arch/powerpc/boot/zImage \ -append cryptomgr.fuzz_iterations=1000 Note that xts-ppc-spe still fails the comparison tests due to the lack of ciphertext stealing support. This is not addressed by this patch. This patch also cleans up the code by making ->encrypt() and ->decrypt() call a common function for each of ECB, CBC, and XTS, and by using a clearer way to compute the length to process at each step. Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- arch/powerpc/crypto/aes-spe-glue.c | 381 ++++++++++++++++--------------------- crypto/Kconfig | 1 + 2 files changed, 166 insertions(+), 216 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c index 4189d2644f74..f828f8bcd0c6 100644 --- a/arch/powerpc/crypto/aes-spe-glue.c +++ b/arch/powerpc/crypto/aes-spe-glue.c @@ -17,6 +17,7 @@ #include #include #include +#include #include /* @@ -118,13 +119,19 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, return 0; } -static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key, +static int ppc_aes_setkey_skcipher(struct crypto_skcipher *tfm, + const u8 *in_key, unsigned int key_len) +{ + return ppc_aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len); +} + +static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { - struct ppc_xts_ctx *ctx = crypto_tfm_ctx(tfm); + struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int err; - err = xts_check_key(tfm, in_key, key_len); + err = xts_verify_key(tfm, in_key, key_len); if (err) return err; @@ -133,7 +140,7 @@ static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key, if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && key_len != AES_KEYSIZE_256) { - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -178,201 +185,154 @@ static void ppc_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) spe_end(); } -static int ppc_ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_ecb_crypt(struct skcipher_request *req, bool enc) { - struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int ubytes; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes)) { - ubytes = nbytes > MAX_BYTES ? - nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); - nbytes -= ubytes; + while ((nbytes = walk.nbytes) != 0) { + nbytes = min_t(unsigned int, nbytes, MAX_BYTES); + nbytes = round_down(nbytes, AES_BLOCK_SIZE); spe_begin(); - ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, ctx->rounds, nbytes); + if (enc) + ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_enc, ctx->rounds, nbytes); + else + ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_dec, ctx->rounds, nbytes); spe_end(); - err = blkcipher_walk_done(desc, &walk, ubytes); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } -static int ppc_ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_ecb_encrypt(struct skcipher_request *req) { - struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int ubytes; - int err; - - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - - while ((nbytes = walk.nbytes)) { - ubytes = nbytes > MAX_BYTES ? - nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); - nbytes -= ubytes; - - spe_begin(); - ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, ctx->rounds, nbytes); - spe_end(); - - err = blkcipher_walk_done(desc, &walk, ubytes); - } + return ppc_ecb_crypt(req, true); +} - return err; +static int ppc_ecb_decrypt(struct skcipher_request *req) +{ + return ppc_ecb_crypt(req, false); } -static int ppc_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_cbc_crypt(struct skcipher_request *req, bool enc) { - struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int ubytes; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes)) { - ubytes = nbytes > MAX_BYTES ? - nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); - nbytes -= ubytes; + while ((nbytes = walk.nbytes) != 0) { + nbytes = min_t(unsigned int, nbytes, MAX_BYTES); + nbytes = round_down(nbytes, AES_BLOCK_SIZE); spe_begin(); - ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, ctx->rounds, nbytes, walk.iv); + if (enc) + ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_enc, ctx->rounds, nbytes, + walk.iv); + else + ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_dec, ctx->rounds, nbytes, + walk.iv); spe_end(); - err = blkcipher_walk_done(desc, &walk, ubytes); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } -static int ppc_cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_cbc_encrypt(struct skcipher_request *req) { - struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int ubytes; - int err; - - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - - while ((nbytes = walk.nbytes)) { - ubytes = nbytes > MAX_BYTES ? - nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); - nbytes -= ubytes; - - spe_begin(); - ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, ctx->rounds, nbytes, walk.iv); - spe_end(); - - err = blkcipher_walk_done(desc, &walk, ubytes); - } + return ppc_cbc_crypt(req, true); +} - return err; +static int ppc_cbc_decrypt(struct skcipher_request *req) +{ + return ppc_cbc_crypt(req, false); } -static int ppc_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_ctr_crypt(struct skcipher_request *req) { - struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int pbytes, ubytes; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + err = skcipher_walk_virt(&walk, req, false); - while ((pbytes = walk.nbytes)) { - pbytes = pbytes > MAX_BYTES ? MAX_BYTES : pbytes; - pbytes = pbytes == nbytes ? - nbytes : pbytes & ~(AES_BLOCK_SIZE - 1); - ubytes = walk.nbytes - pbytes; + while ((nbytes = walk.nbytes) != 0) { + nbytes = min_t(unsigned int, nbytes, MAX_BYTES); + if (nbytes < walk.total) + nbytes = round_down(nbytes, AES_BLOCK_SIZE); spe_begin(); ppc_crypt_ctr(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, ctx->rounds, pbytes , walk.iv); + ctx->key_enc, ctx->rounds, nbytes, walk.iv); spe_end(); - nbytes -= pbytes; - err = blkcipher_walk_done(desc, &walk, ubytes); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } -static int ppc_xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_xts_crypt(struct skcipher_request *req, bool enc) { - struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int ubytes; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; u32 *twk; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); twk = ctx->key_twk; - while ((nbytes = walk.nbytes)) { - ubytes = nbytes > MAX_BYTES ? - nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); - nbytes -= ubytes; + while ((nbytes = walk.nbytes) != 0) { + nbytes = min_t(unsigned int, nbytes, MAX_BYTES); + nbytes = round_down(nbytes, AES_BLOCK_SIZE); spe_begin(); - ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, ctx->rounds, nbytes, walk.iv, twk); + if (enc) + ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_enc, ctx->rounds, nbytes, + walk.iv, twk); + else + ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_dec, ctx->rounds, nbytes, + walk.iv, twk); spe_end(); twk = NULL; - err = blkcipher_walk_done(desc, &walk, ubytes); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } -static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_xts_encrypt(struct skcipher_request *req) { - struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int ubytes; - int err; - u32 *twk; - - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - twk = ctx->key_twk; - - while ((nbytes = walk.nbytes)) { - ubytes = nbytes > MAX_BYTES ? - nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); - nbytes -= ubytes; - - spe_begin(); - ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, ctx->rounds, nbytes, walk.iv, twk); - spe_end(); - - twk = NULL; - err = blkcipher_walk_done(desc, &walk, ubytes); - } + return ppc_xts_crypt(req, true); +} - return err; +static int ppc_xts_decrypt(struct skcipher_request *req) +{ + return ppc_xts_crypt(req, false); } /* @@ -381,9 +341,9 @@ static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, * This improves IPsec thoughput by another few percent. Additionally we assume * that AES context is always aligned to at least 8 bytes because it is created * with kmalloc() in the crypto infrastructure - * */ -static struct crypto_alg aes_algs[] = { { + +static struct crypto_alg aes_cipher_alg = { .cra_name = "aes", .cra_driver_name = "aes-ppc-spe", .cra_priority = 300, @@ -401,95 +361,84 @@ static struct crypto_alg aes_algs[] = { { .cia_decrypt = ppc_aes_decrypt } } -}, { - .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-ppc-spe", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct ppc_aes_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = ppc_aes_setkey, - .encrypt = ppc_ecb_encrypt, - .decrypt = ppc_ecb_decrypt, - } - } -}, { - .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-ppc-spe", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct ppc_aes_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = ppc_aes_setkey, - .encrypt = ppc_cbc_encrypt, - .decrypt = ppc_cbc_decrypt, - } - } -}, { - .cra_name = "ctr(aes)", - .cra_driver_name = "ctr-ppc-spe", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct ppc_aes_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = ppc_aes_setkey, - .encrypt = ppc_ctr_crypt, - .decrypt = ppc_ctr_crypt, - } - } -}, { - .cra_name = "xts(aes)", - .cra_driver_name = "xts-ppc-spe", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct ppc_xts_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE * 2, - .max_keysize = AES_MAX_KEY_SIZE * 2, - .ivsize = AES_BLOCK_SIZE, - .setkey = ppc_xts_setkey, - .encrypt = ppc_xts_encrypt, - .decrypt = ppc_xts_decrypt, - } +}; + +static struct skcipher_alg aes_skcipher_algs[] = { + { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-ppc-spe", + .base.cra_priority = 300, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ppc_aes_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = ppc_aes_setkey_skcipher, + .encrypt = ppc_ecb_encrypt, + .decrypt = ppc_ecb_decrypt, + }, { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-ppc-spe", + .base.cra_priority = 300, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ppc_aes_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ppc_aes_setkey_skcipher, + .encrypt = ppc_cbc_encrypt, + .decrypt = ppc_cbc_decrypt, + }, { + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "ctr-ppc-spe", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct ppc_aes_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ppc_aes_setkey_skcipher, + .encrypt = ppc_ctr_crypt, + .decrypt = ppc_ctr_crypt, + .chunksize = AES_BLOCK_SIZE, + }, { + .base.cra_name = "xts(aes)", + .base.cra_driver_name = "xts-ppc-spe", + .base.cra_priority = 300, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ppc_xts_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE * 2, + .max_keysize = AES_MAX_KEY_SIZE * 2, + .ivsize = AES_BLOCK_SIZE, + .setkey = ppc_xts_setkey, + .encrypt = ppc_xts_encrypt, + .decrypt = ppc_xts_decrypt, } -} }; +}; static int __init ppc_aes_mod_init(void) { - return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs)); + int err; + + err = crypto_register_alg(&aes_cipher_alg); + if (err) + return err; + + err = crypto_register_skciphers(aes_skcipher_algs, + ARRAY_SIZE(aes_skcipher_algs)); + if (err) + crypto_unregister_alg(&aes_cipher_alg); + return err; } static void __exit ppc_aes_mod_fini(void) { - crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs)); + crypto_unregister_alg(&aes_cipher_alg); + crypto_unregister_skciphers(aes_skcipher_algs, + ARRAY_SIZE(aes_skcipher_algs)); } module_init(ppc_aes_mod_init); diff --git a/crypto/Kconfig b/crypto/Kconfig index 8c38c2b7f8e7..320548b4dfa9 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1125,6 +1125,7 @@ config CRYPTO_AES_SPARC64 config CRYPTO_AES_PPC_SPE tristate "AES cipher algorithms (PPC SPE)" depends on PPC && SPE + select CRYPTO_BLKCIPHER help AES cipher algorithms (FIPS-197). Additionally the acceleration for popular block cipher modes ECB, CBC, CTR and XTS is supported. -- cgit v1.2.3 From d0be0720576439da2cefc16e648a61a7aebcf34f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Tue, 15 Oct 2019 10:14:12 +0200 Subject: crypto: powerpc/spe-xts - implement support for ciphertext stealing Add the logic to deal with input sizes that are not a round multiple of the AES block size, as described by the XTS spec. This brings the SPE implementation in line with other kernel drivers that have been updated recently to take this into account. Cc: Eric Biggers Signed-off-by: Ard Biesheuvel Signed-off-by: Herbert Xu --- arch/powerpc/crypto/aes-spe-glue.c | 81 +++++++++++++++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 2 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c index f828f8bcd0c6..1fad5d4c658d 100644 --- a/arch/powerpc/crypto/aes-spe-glue.c +++ b/arch/powerpc/crypto/aes-spe-glue.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include /* * MAX_BYTES defines the number of bytes that are allowed to be processed @@ -327,12 +329,87 @@ static int ppc_xts_crypt(struct skcipher_request *req, bool enc) static int ppc_xts_encrypt(struct skcipher_request *req) { - return ppc_xts_crypt(req, true); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + int tail = req->cryptlen % AES_BLOCK_SIZE; + int offset = req->cryptlen - tail - AES_BLOCK_SIZE; + struct skcipher_request subreq; + u8 b[2][AES_BLOCK_SIZE]; + int err; + + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + if (tail) { + subreq = *req; + skcipher_request_set_crypt(&subreq, req->src, req->dst, + req->cryptlen - tail, req->iv); + req = &subreq; + } + + err = ppc_xts_crypt(req, true); + if (err || !tail) + return err; + + scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE, 0); + memcpy(b[1], b[0], tail); + scatterwalk_map_and_copy(b[0], req->src, offset + AES_BLOCK_SIZE, tail, 0); + + spe_begin(); + ppc_encrypt_xts(b[0], b[0], ctx->key_enc, ctx->rounds, AES_BLOCK_SIZE, + req->iv, NULL); + spe_end(); + + scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE + tail, 1); + + return 0; } static int ppc_xts_decrypt(struct skcipher_request *req) { - return ppc_xts_crypt(req, false); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + int tail = req->cryptlen % AES_BLOCK_SIZE; + int offset = req->cryptlen - tail - AES_BLOCK_SIZE; + struct skcipher_request subreq; + u8 b[3][AES_BLOCK_SIZE]; + le128 twk; + int err; + + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + if (tail) { + subreq = *req; + skcipher_request_set_crypt(&subreq, req->src, req->dst, + offset, req->iv); + req = &subreq; + } + + err = ppc_xts_crypt(req, false); + if (err || !tail) + return err; + + scatterwalk_map_and_copy(b[1], req->src, offset, AES_BLOCK_SIZE + tail, 0); + + spe_begin(); + if (!offset) + ppc_encrypt_ecb(req->iv, req->iv, ctx->key_twk, ctx->rounds, + AES_BLOCK_SIZE); + + gf128mul_x_ble(&twk, (le128 *)req->iv); + + ppc_decrypt_xts(b[1], b[1], ctx->key_dec, ctx->rounds, AES_BLOCK_SIZE, + (u8 *)&twk, NULL); + memcpy(b[0], b[2], tail); + memcpy(b[0] + tail, b[1] + tail, AES_BLOCK_SIZE - tail); + ppc_decrypt_xts(b[0], b[0], ctx->key_dec, ctx->rounds, AES_BLOCK_SIZE, + req->iv, NULL); + spe_end(); + + scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE + tail, 1); + + return 0; } /* -- cgit v1.2.3