diff options
Diffstat (limited to 'arch/s390/crypto')
-rw-r--r-- | arch/s390/crypto/Kconfig | 10 | ||||
-rw-r--r-- | arch/s390/crypto/Makefile | 1 | ||||
-rw-r--r-- | arch/s390/crypto/aes_s390.c | 120 | ||||
-rw-r--r-- | arch/s390/crypto/hmac_s390.c | 359 | ||||
-rw-r--r-- | arch/s390/crypto/paes_s390.c | 9 | ||||
-rw-r--r-- | arch/s390/crypto/sha.h | 1 | ||||
-rw-r--r-- | arch/s390/crypto/sha3_256_s390.c | 11 | ||||
-rw-r--r-- | arch/s390/crypto/sha3_512_s390.c | 11 | ||||
-rw-r--r-- | arch/s390/crypto/sha_common.c | 20 |
9 files changed, 528 insertions, 14 deletions
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig index 06ee706b0d78..d3eb3a233693 100644 --- a/arch/s390/crypto/Kconfig +++ b/arch/s390/crypto/Kconfig @@ -132,4 +132,14 @@ config CRYPTO_CHACHA_S390 It is available as of z13. +config CRYPTO_HMAC_S390 + tristate "Keyed-hash message authentication code: HMAC" + depends on S390 + select CRYPTO_HASH + help + s390 specific HMAC hardware support for SHA224, SHA256, SHA384 and + SHA512. + + Architecture: s390 + endmenu diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile index 1b1cc478fa94..a0cb96937c3d 100644 --- a/arch/s390/crypto/Makefile +++ b/arch/s390/crypto/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_CRYPTO_CHACHA_S390) += chacha_s390.o obj-$(CONFIG_S390_PRNG) += prng.o obj-$(CONFIG_CRYPTO_GHASH_S390) += ghash_s390.o obj-$(CONFIG_CRYPTO_CRC32_S390) += crc32-vx_s390.o +obj-$(CONFIG_CRYPTO_HMAC_S390) += hmac_s390.o obj-y += arch_random.o crc32-vx_s390-y := crc32-vx.o crc32le-vx.o crc32be-vx.o diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index c6fe5405de4a..8cc02d6e0d0f 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -51,8 +51,13 @@ struct s390_aes_ctx { }; struct s390_xts_ctx { - u8 key[32]; - u8 pcc_key[32]; + union { + u8 keys[64]; + struct { + u8 key[32]; + u8 pcc_key[32]; + }; + }; int key_len; unsigned long fc; struct crypto_skcipher *fallback; @@ -526,6 +531,108 @@ static struct skcipher_alg xts_aes_alg = { .decrypt = xts_aes_decrypt, }; +static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); + unsigned long fc; + int err; + + err = xts_fallback_setkey(tfm, in_key, key_len); + if (err) + return err; + + /* Pick the correct function code based on the key length */ + fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL : + (key_len == 64) ? CPACF_KM_XTS_256_FULL : 0; + + /* Check if the function code is available */ + xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + if (!xts_ctx->fc) + return 0; + + /* Store double-key */ + memcpy(xts_ctx->keys, in_key, key_len); + xts_ctx->key_len = key_len; + return 0; +} + +static int fullxts_aes_crypt(struct skcipher_request *req, unsigned long modifier) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); + unsigned int offset, nbytes, n; + struct skcipher_walk walk; + int ret; + struct { + __u8 key[64]; + __u8 tweak[16]; + __u8 nap[16]; + } fxts_param = { + .nap = {0}, + }; + + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) { + struct skcipher_request *subreq = skcipher_request_ctx(req); + + *subreq = *req; + skcipher_request_set_tfm(subreq, xts_ctx->fallback); + return (modifier & CPACF_DECRYPT) ? + crypto_skcipher_decrypt(subreq) : + crypto_skcipher_encrypt(subreq); + } + + ret = skcipher_walk_virt(&walk, req, false); + if (ret) + return ret; + + offset = xts_ctx->key_len & 0x20; + memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len); + memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE); + fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */ + + while ((nbytes = walk.nbytes) != 0) { + /* only use complete blocks */ + n = nbytes & ~(AES_BLOCK_SIZE - 1); + cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset, + walk.dst.virt.addr, walk.src.virt.addr, n); + ret = skcipher_walk_done(&walk, nbytes - n); + } + memzero_explicit(&fxts_param, sizeof(fxts_param)); + return ret; +} + +static int fullxts_aes_encrypt(struct skcipher_request *req) +{ + return fullxts_aes_crypt(req, 0); +} + +static int fullxts_aes_decrypt(struct skcipher_request *req) +{ + return fullxts_aes_crypt(req, CPACF_DECRYPT); +} + +static struct skcipher_alg fullxts_aes_alg = { + .base.cra_name = "xts(aes)", + .base.cra_driver_name = "full-xts-aes-s390", + .base.cra_priority = 403, /* aes-xts-s390 + 1 */ + .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_xts_ctx), + .base.cra_module = THIS_MODULE, + .init = xts_fallback_init, + .exit = xts_fallback_exit, + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = fullxts_aes_set_key, + .encrypt = fullxts_aes_encrypt, + .decrypt = fullxts_aes_decrypt, +}; + static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { @@ -955,7 +1062,7 @@ static struct aead_alg gcm_aes_aead = { }; static struct crypto_alg *aes_s390_alg; -static struct skcipher_alg *aes_s390_skcipher_algs[4]; +static struct skcipher_alg *aes_s390_skcipher_algs[5]; static int aes_s390_skciphers_num; static struct aead_alg *aes_s390_aead_alg; @@ -1012,6 +1119,13 @@ static int __init aes_s390_init(void) goto out_err; } + if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) || + cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) { + ret = aes_s390_register_skcipher(&fullxts_aes_alg); + if (ret) + goto out_err; + } + if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) || cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) { ret = aes_s390_register_skcipher(&xts_aes_alg); diff --git a/arch/s390/crypto/hmac_s390.c b/arch/s390/crypto/hmac_s390.c new file mode 100644 index 000000000000..bba9a818dfdc --- /dev/null +++ b/arch/s390/crypto/hmac_s390.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright IBM Corp. 2024 + * + * s390 specific HMAC support. + */ + +#define KMSG_COMPONENT "hmac_s390" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <asm/cpacf.h> +#include <crypto/sha2.h> +#include <crypto/internal/hash.h> +#include <linux/cpufeature.h> +#include <linux/module.h> + +/* + * KMAC param block layout for sha2 function codes: + * The layout of the param block for the KMAC instruction depends on the + * blocksize of the used hashing sha2-algorithm function codes. The param block + * contains the hash chaining value (cv), the input message bit-length (imbl) + * and the hmac-secret (key). To prevent code duplication, the sizes of all + * these are calculated based on the blocksize. + * + * param-block: + * +-------+ + * | cv | + * +-------+ + * | imbl | + * +-------+ + * | key | + * +-------+ + * + * sizes: + * part | sh2-alg | calculation | size | type + * -----+---------+-------------+------+-------- + * cv | 224/256 | blocksize/2 | 32 | u64[8] + * | 384/512 | | 64 | u128[8] + * imbl | 224/256 | blocksize/8 | 8 | u64 + * | 384/512 | | 16 | u128 + * key | 224/256 | blocksize | 64 | u8[64] + * | 384/512 | | 128 | u8[128] + */ + +#define MAX_DIGEST_SIZE SHA512_DIGEST_SIZE +#define MAX_IMBL_SIZE sizeof(u128) +#define MAX_BLOCK_SIZE SHA512_BLOCK_SIZE + +#define SHA2_CV_SIZE(bs) ((bs) >> 1) +#define SHA2_IMBL_SIZE(bs) ((bs) >> 3) + +#define SHA2_IMBL_OFFSET(bs) (SHA2_CV_SIZE(bs)) +#define SHA2_KEY_OFFSET(bs) (SHA2_CV_SIZE(bs) + SHA2_IMBL_SIZE(bs)) + +struct s390_hmac_ctx { + u8 key[MAX_BLOCK_SIZE]; +}; + +union s390_kmac_gr0 { + unsigned long reg; + struct { + unsigned long : 48; + unsigned long ikp : 1; + unsigned long iimp : 1; + unsigned long ccup : 1; + unsigned long : 6; + unsigned long fc : 7; + }; +}; + +struct s390_kmac_sha2_ctx { + u8 param[MAX_DIGEST_SIZE + MAX_IMBL_SIZE + MAX_BLOCK_SIZE]; + union s390_kmac_gr0 gr0; + u8 buf[MAX_BLOCK_SIZE]; + unsigned int buflen; +}; + +/* + * kmac_sha2_set_imbl - sets the input message bit-length based on the blocksize + */ +static inline void kmac_sha2_set_imbl(u8 *param, unsigned int buflen, + unsigned int blocksize) +{ + u8 *imbl = param + SHA2_IMBL_OFFSET(blocksize); + + switch (blocksize) { + case SHA256_BLOCK_SIZE: + *(u64 *)imbl = (u64)buflen * BITS_PER_BYTE; + break; + case SHA512_BLOCK_SIZE: + *(u128 *)imbl = (u128)buflen * BITS_PER_BYTE; + break; + default: + break; + } +} + +static int hash_key(const u8 *in, unsigned int inlen, + u8 *digest, unsigned int digestsize) +{ + unsigned long func; + union { + struct sha256_paramblock { + u32 h[8]; + u64 mbl; + } sha256; + struct sha512_paramblock { + u64 h[8]; + u128 mbl; + } sha512; + } __packed param; + +#define PARAM_INIT(x, y, z) \ + param.sha##x.h[0] = SHA##y ## _H0; \ + param.sha##x.h[1] = SHA##y ## _H1; \ + param.sha##x.h[2] = SHA##y ## _H2; \ + param.sha##x.h[3] = SHA##y ## _H3; \ + param.sha##x.h[4] = SHA##y ## _H4; \ + param.sha##x.h[5] = SHA##y ## _H5; \ + param.sha##x.h[6] = SHA##y ## _H6; \ + param.sha##x.h[7] = SHA##y ## _H7; \ + param.sha##x.mbl = (z) + + switch (digestsize) { + case SHA224_DIGEST_SIZE: + func = CPACF_KLMD_SHA_256; + PARAM_INIT(256, 224, inlen * 8); + break; + case SHA256_DIGEST_SIZE: + func = CPACF_KLMD_SHA_256; + PARAM_INIT(256, 256, inlen * 8); + break; + case SHA384_DIGEST_SIZE: + func = CPACF_KLMD_SHA_512; + PARAM_INIT(512, 384, inlen * 8); + break; + case SHA512_DIGEST_SIZE: + func = CPACF_KLMD_SHA_512; + PARAM_INIT(512, 512, inlen * 8); + break; + default: + return -EINVAL; + } + +#undef PARAM_INIT + + cpacf_klmd(func, ¶m, in, inlen); + + memcpy(digest, ¶m, digestsize); + + return 0; +} + +static int s390_hmac_sha2_setkey(struct crypto_shash *tfm, + const u8 *key, unsigned int keylen) +{ + struct s390_hmac_ctx *tfm_ctx = crypto_shash_ctx(tfm); + unsigned int ds = crypto_shash_digestsize(tfm); + unsigned int bs = crypto_shash_blocksize(tfm); + + memset(tfm_ctx, 0, sizeof(*tfm_ctx)); + + if (keylen > bs) + return hash_key(key, keylen, tfm_ctx->key, ds); + + memcpy(tfm_ctx->key, key, keylen); + return 0; +} + +static int s390_hmac_sha2_init(struct shash_desc *desc) +{ + struct s390_hmac_ctx *tfm_ctx = crypto_shash_ctx(desc->tfm); + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); + unsigned int bs = crypto_shash_blocksize(desc->tfm); + + memcpy(ctx->param + SHA2_KEY_OFFSET(bs), + tfm_ctx->key, bs); + + ctx->buflen = 0; + ctx->gr0.reg = 0; + switch (crypto_shash_digestsize(desc->tfm)) { + case SHA224_DIGEST_SIZE: + ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_224; + break; + case SHA256_DIGEST_SIZE: + ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_256; + break; + case SHA384_DIGEST_SIZE: + ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_384; + break; + case SHA512_DIGEST_SIZE: + ctx->gr0.fc = CPACF_KMAC_HMAC_SHA_512; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int s390_hmac_sha2_update(struct shash_desc *desc, + const u8 *data, unsigned int len) +{ + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); + unsigned int bs = crypto_shash_blocksize(desc->tfm); + unsigned int offset, n; + + /* check current buffer */ + offset = ctx->buflen % bs; + ctx->buflen += len; + if (offset + len < bs) + goto store; + + /* process one stored block */ + if (offset) { + n = bs - offset; + memcpy(ctx->buf + offset, data, n); + ctx->gr0.iimp = 1; + _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs); + data += n; + len -= n; + offset = 0; + } + /* process as many blocks as possible */ + if (len >= bs) { + n = (len / bs) * bs; + ctx->gr0.iimp = 1; + _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, n); + data += n; + len -= n; + } +store: + /* store incomplete block in buffer */ + if (len) + memcpy(ctx->buf + offset, data, len); + + return 0; +} + +static int s390_hmac_sha2_final(struct shash_desc *desc, u8 *out) +{ + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); + unsigned int bs = crypto_shash_blocksize(desc->tfm); + + ctx->gr0.iimp = 0; + kmac_sha2_set_imbl(ctx->param, ctx->buflen, bs); + _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, ctx->buflen % bs); + memcpy(out, ctx->param, crypto_shash_digestsize(desc->tfm)); + + return 0; +} + +static int s390_hmac_sha2_digest(struct shash_desc *desc, + const u8 *data, unsigned int len, u8 *out) +{ + struct s390_kmac_sha2_ctx *ctx = shash_desc_ctx(desc); + unsigned int ds = crypto_shash_digestsize(desc->tfm); + int rc; + + rc = s390_hmac_sha2_init(desc); + if (rc) + return rc; + + ctx->gr0.iimp = 0; + kmac_sha2_set_imbl(ctx->param, len, + crypto_shash_blocksize(desc->tfm)); + _cpacf_kmac(&ctx->gr0.reg, ctx->param, data, len); + memcpy(out, ctx->param, ds); + + return 0; +} + +#define S390_HMAC_SHA2_ALG(x) { \ + .fc = CPACF_KMAC_HMAC_SHA_##x, \ + .alg = { \ + .init = s390_hmac_sha2_init, \ + .update = s390_hmac_sha2_update, \ + .final = s390_hmac_sha2_final, \ + .digest = s390_hmac_sha2_digest, \ + .setkey = s390_hmac_sha2_setkey, \ + .descsize = sizeof(struct s390_kmac_sha2_ctx), \ + .halg = { \ + .digestsize = SHA##x##_DIGEST_SIZE, \ + .base = { \ + .cra_name = "hmac(sha" #x ")", \ + .cra_driver_name = "hmac_s390_sha" #x, \ + .cra_blocksize = SHA##x##_BLOCK_SIZE, \ + .cra_priority = 400, \ + .cra_ctxsize = sizeof(struct s390_hmac_ctx), \ + .cra_module = THIS_MODULE, \ + }, \ + }, \ + }, \ +} + +static struct s390_hmac_alg { + bool registered; + unsigned int fc; + struct shash_alg alg; +} s390_hmac_algs[] = { + S390_HMAC_SHA2_ALG(224), + S390_HMAC_SHA2_ALG(256), + S390_HMAC_SHA2_ALG(384), + S390_HMAC_SHA2_ALG(512), +}; + +static __always_inline void _s390_hmac_algs_unregister(void) +{ + struct s390_hmac_alg *hmac; + int i; + + for (i = ARRAY_SIZE(s390_hmac_algs) - 1; i >= 0; i--) { + hmac = &s390_hmac_algs[i]; + if (!hmac->registered) + continue; + crypto_unregister_shash(&hmac->alg); + } +} + +static int __init hmac_s390_init(void) +{ + struct s390_hmac_alg *hmac; + int i, rc = -ENODEV; + + if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256)) + return -ENODEV; + if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512)) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(s390_hmac_algs); i++) { + hmac = &s390_hmac_algs[i]; + if (!cpacf_query_func(CPACF_KMAC, hmac->fc)) + continue; + + rc = crypto_register_shash(&hmac->alg); + if (rc) { + pr_err("unable to register %s\n", + hmac->alg.halg.base.cra_name); + goto out; + } + hmac->registered = true; + pr_debug("registered %s\n", hmac->alg.halg.base.cra_name); + } + return rc; +out: + _s390_hmac_algs_unregister(); + return rc; +} + +static void __exit hmac_s390_exit(void) +{ + _s390_hmac_algs_unregister(); +} + +module_cpu_feature_match(S390_CPU_FEATURE_MSA, hmac_s390_init); +module_exit(hmac_s390_exit); + +MODULE_DESCRIPTION("S390 HMAC driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index 99ea3f12c5d2..ef4491ccbbf8 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -133,8 +133,8 @@ static inline int __paes_keyblob2pkey(struct key_blob *kb, if (msleep_interruptible(1000)) return -EINTR; } - ret = pkey_keyblob2pkey(kb->key, kb->keylen, - pk->protkey, &pk->len, &pk->type); + ret = pkey_key2protkey(kb->key, kb->keylen, + pk->protkey, &pk->len, &pk->type); } return ret; @@ -802,7 +802,10 @@ out_err: module_init(paes_s390_init); module_exit(paes_s390_fini); -MODULE_ALIAS_CRYPTO("paes"); +MODULE_ALIAS_CRYPTO("ecb(paes)"); +MODULE_ALIAS_CRYPTO("cbc(paes)"); +MODULE_ALIAS_CRYPTO("ctr(paes)"); +MODULE_ALIAS_CRYPTO("xts(paes)"); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys"); MODULE_LICENSE("GPL"); diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h index 65ea12fc87a1..2bb22db54c31 100644 --- a/arch/s390/crypto/sha.h +++ b/arch/s390/crypto/sha.h @@ -25,6 +25,7 @@ struct s390_sha_ctx { u32 state[CPACF_MAX_PARMBLOCK_SIZE / sizeof(u32)]; u8 buf[SHA_MAX_BLOCK_SIZE]; int func; /* KIMD function to use */ + int first_message_part; }; struct shash_desc; diff --git a/arch/s390/crypto/sha3_256_s390.c b/arch/s390/crypto/sha3_256_s390.c index e1350e033a32..a84ef692f572 100644 --- a/arch/s390/crypto/sha3_256_s390.c +++ b/arch/s390/crypto/sha3_256_s390.c @@ -21,9 +21,11 @@ static int sha3_256_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - memset(sctx->state, 0, sizeof(sctx->state)); + if (!test_facility(86)) /* msa 12 */ + memset(sctx->state, 0, sizeof(sctx->state)); sctx->count = 0; sctx->func = CPACF_KIMD_SHA3_256; + sctx->first_message_part = 1; return 0; } @@ -36,6 +38,7 @@ static int sha3_256_export(struct shash_desc *desc, void *out) octx->rsiz = sctx->count; memcpy(octx->st, sctx->state, sizeof(octx->st)); memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); + octx->partial = sctx->first_message_part; return 0; } @@ -48,6 +51,7 @@ static int sha3_256_import(struct shash_desc *desc, const void *in) sctx->count = ictx->rsiz; memcpy(sctx->state, ictx->st, sizeof(ictx->st)); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); + sctx->first_message_part = ictx->partial; sctx->func = CPACF_KIMD_SHA3_256; return 0; @@ -61,6 +65,7 @@ static int sha3_224_import(struct shash_desc *desc, const void *in) sctx->count = ictx->rsiz; memcpy(sctx->state, ictx->st, sizeof(ictx->st)); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); + sctx->first_message_part = ictx->partial; sctx->func = CPACF_KIMD_SHA3_224; return 0; @@ -88,9 +93,11 @@ static int sha3_224_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - memset(sctx->state, 0, sizeof(sctx->state)); + if (!test_facility(86)) /* msa 12 */ + memset(sctx->state, 0, sizeof(sctx->state)); sctx->count = 0; sctx->func = CPACF_KIMD_SHA3_224; + sctx->first_message_part = 1; return 0; } diff --git a/arch/s390/crypto/sha3_512_s390.c b/arch/s390/crypto/sha3_512_s390.c index 06c142ed9bb1..07528fc98ff7 100644 --- a/arch/s390/crypto/sha3_512_s390.c +++ b/arch/s390/crypto/sha3_512_s390.c @@ -20,9 +20,11 @@ static int sha3_512_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - memset(sctx->state, 0, sizeof(sctx->state)); + if (!test_facility(86)) /* msa 12 */ + memset(sctx->state, 0, sizeof(sctx->state)); sctx->count = 0; sctx->func = CPACF_KIMD_SHA3_512; + sctx->first_message_part = 1; return 0; } @@ -37,6 +39,7 @@ static int sha3_512_export(struct shash_desc *desc, void *out) memcpy(octx->st, sctx->state, sizeof(octx->st)); memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); + octx->partial = sctx->first_message_part; return 0; } @@ -52,6 +55,7 @@ static int sha3_512_import(struct shash_desc *desc, const void *in) memcpy(sctx->state, ictx->st, sizeof(ictx->st)); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); + sctx->first_message_part = ictx->partial; sctx->func = CPACF_KIMD_SHA3_512; return 0; @@ -68,6 +72,7 @@ static int sha3_384_import(struct shash_desc *desc, const void *in) memcpy(sctx->state, ictx->st, sizeof(ictx->st)); memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); + sctx->first_message_part = ictx->partial; sctx->func = CPACF_KIMD_SHA3_384; return 0; @@ -97,9 +102,11 @@ static int sha3_384_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); - memset(sctx->state, 0, sizeof(sctx->state)); + if (!test_facility(86)) /* msa 12 */ + memset(sctx->state, 0, sizeof(sctx->state)); sctx->count = 0; sctx->func = CPACF_KIMD_SHA3_384; + sctx->first_message_part = 1; return 0; } diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c index 686fe7aa192f..961d7d522af1 100644 --- a/arch/s390/crypto/sha_common.c +++ b/arch/s390/crypto/sha_common.c @@ -18,6 +18,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) struct s390_sha_ctx *ctx = shash_desc_ctx(desc); unsigned int bsize = crypto_shash_blocksize(desc->tfm); unsigned int index, n; + int fc; /* how much is already in the buffer? */ index = ctx->count % bsize; @@ -26,10 +27,16 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) if ((index + len) < bsize) goto store; + fc = ctx->func; + if (ctx->first_message_part) + fc |= test_facility(86) ? CPACF_KIMD_NIP : 0; + /* process one stored block */ if (index) { memcpy(ctx->buf + index, data, bsize - index); - cpacf_kimd(ctx->func, ctx->state, ctx->buf, bsize); + cpacf_kimd(fc, ctx->state, ctx->buf, bsize); + ctx->first_message_part = 0; + fc &= ~CPACF_KIMD_NIP; data += bsize - index; len -= bsize - index; index = 0; @@ -38,7 +45,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) /* process as many blocks as possible */ if (len >= bsize) { n = (len / bsize) * bsize; - cpacf_kimd(ctx->func, ctx->state, data, n); + cpacf_kimd(fc, ctx->state, data, n); + ctx->first_message_part = 0; data += n; len -= n; } @@ -75,7 +83,7 @@ int s390_sha_final(struct shash_desc *desc, u8 *out) unsigned int bsize = crypto_shash_blocksize(desc->tfm); u64 bits; unsigned int n; - int mbl_offset; + int mbl_offset, fc; n = ctx->count % bsize; bits = ctx->count * 8; @@ -109,7 +117,11 @@ int s390_sha_final(struct shash_desc *desc, u8 *out) return -EINVAL; } - cpacf_klmd(ctx->func, ctx->state, ctx->buf, n); + fc = ctx->func; + fc |= test_facility(86) ? CPACF_KLMD_DUFOP : 0; + if (ctx->first_message_part) + fc |= CPACF_KLMD_NIP; + cpacf_klmd(fc, ctx->state, ctx->buf, n); /* copy digest to out */ memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm)); |