diff options
68 files changed, 1540 insertions, 548 deletions
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml b/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml index c147900f9041..6da674666d45 100644 --- a/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml +++ b/Documentation/devicetree/bindings/rng/brcm,bcm2835.yaml @@ -28,6 +28,12 @@ properties: clock-names: const: ipsec + resets: + maxItems: 1 + + reset-names: + const: ipsec + interrupts: maxItems: 1 @@ -35,6 +41,18 @@ required: - compatible - reg +if: + properties: + compatible: + enum: + - brcm,bcm6368-rng +then: + required: + - clocks + - clock-names + - resets + - reset-names + additionalProperties: false examples: @@ -58,4 +76,7 @@ examples: clocks = <&periph_clk 18>; clock-names = "ipsec"; + + resets = <&periph_rst 4>; + reset-names = "ipsec"; }; diff --git a/arch/arm/crypto/aes-cipher-core.S b/arch/arm/crypto/aes-cipher-core.S index 472e56d09eea..1da3f41359aa 100644 --- a/arch/arm/crypto/aes-cipher-core.S +++ b/arch/arm/crypto/aes-cipher-core.S @@ -99,28 +99,6 @@ __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op, \oldcpsr .endm - .macro __rev, out, in - .if __LINUX_ARM_ARCH__ < 6 - lsl t0, \in, #24 - and t1, \in, #0xff00 - and t2, \in, #0xff0000 - orr \out, t0, \in, lsr #24 - orr \out, \out, t1, lsl #8 - orr \out, \out, t2, lsr #8 - .else - rev \out, \in - .endif - .endm - - .macro __adrl, out, sym, c - .if __LINUX_ARM_ARCH__ < 7 - ldr\c \out, =\sym - .else - movw\c \out, #:lower16:\sym - movt\c \out, #:upper16:\sym - .endif - .endm - .macro do_crypt, round, ttab, ltab, bsz push {r3-r11, lr} @@ -133,10 +111,10 @@ ldr r7, [in, #12] #ifdef CONFIG_CPU_BIG_ENDIAN - __rev r4, r4 - __rev r5, r5 - __rev r6, r6 - __rev r7, r7 + rev_l r4, t0 + rev_l r5, t0 + rev_l r6, t0 + rev_l r7, t0 #endif eor r4, r4, r8 @@ -144,7 +122,7 @@ eor r6, r6, r10 eor r7, r7, r11 - __adrl ttab, \ttab + mov_l ttab, \ttab /* * Disable interrupts and prefetch the 1024-byte 'ft' or 'it' table into * L1 cache, assuming cacheline size >= 32. This is a hardening measure @@ -180,7 +158,7 @@ 2: .ifb \ltab add ttab, ttab, #1 .else - __adrl ttab, \ltab + mov_l ttab, \ltab // Prefetch inverse S-box for final round; see explanation above .set i, 0 .rept 256 / 64 @@ -194,10 +172,10 @@ \round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b, rounds #ifdef CONFIG_CPU_BIG_ENDIAN - __rev r4, r4 - __rev r5, r5 - __rev r6, r6 - __rev r7, r7 + rev_l r4, t0 + rev_l r5, t0 + rev_l r6, t0 + rev_l r7, t0 #endif ldr out, [sp] diff --git a/arch/arm/crypto/blake2b-neon-glue.c b/arch/arm/crypto/blake2b-neon-glue.c index 34d73200e7fa..4b59d027ba4a 100644 --- a/arch/arm/crypto/blake2b-neon-glue.c +++ b/arch/arm/crypto/blake2b-neon-glue.c @@ -85,8 +85,8 @@ static int __init blake2b_neon_mod_init(void) static void __exit blake2b_neon_mod_exit(void) { - return crypto_unregister_shashes(blake2b_neon_algs, - ARRAY_SIZE(blake2b_neon_algs)); + crypto_unregister_shashes(blake2b_neon_algs, + ARRAY_SIZE(blake2b_neon_algs)); } module_init(blake2b_neon_mod_init); diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S index bed897e9a181..86345751bbf3 100644 --- a/arch/arm/crypto/blake2s-core.S +++ b/arch/arm/crypto/blake2s-core.S @@ -8,6 +8,7 @@ */ #include <linux/linkage.h> +#include <asm/assembler.h> // Registers used to hold message words temporarily. There aren't // enough ARM registers to hold the whole message block, so we have to @@ -38,6 +39,23 @@ #endif .endm +.macro _le32_bswap a, tmp +#ifdef __ARMEB__ + rev_l \a, \tmp +#endif +.endm + +.macro _le32_bswap_8x a, b, c, d, e, f, g, h, tmp + _le32_bswap \a, \tmp + _le32_bswap \b, \tmp + _le32_bswap \c, \tmp + _le32_bswap \d, \tmp + _le32_bswap \e, \tmp + _le32_bswap \f, \tmp + _le32_bswap \g, \tmp + _le32_bswap \h, \tmp +.endm + // Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals. // (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two // columns/diagonals. s0-s1 are the word offsets to the message words the first @@ -180,8 +198,10 @@ ENTRY(blake2s_compress_arch) tst r1, #3 bne .Lcopy_block_misaligned ldmia r1!, {r2-r9} + _le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14 stmia r12!, {r2-r9} ldmia r1!, {r2-r9} + _le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14 stmia r12, {r2-r9} .Lcopy_block_done: str r1, [sp, #68] // Update message pointer @@ -268,6 +288,7 @@ ENTRY(blake2s_compress_arch) 1: #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ldr r3, [r1], #4 + _le32_bswap r3, r4 #else ldrb r3, [r1, #0] ldrb r4, [r1, #1] diff --git a/arch/arm/crypto/chacha-scalar-core.S b/arch/arm/crypto/chacha-scalar-core.S index 2985b80a45b5..083fe1ab96d0 100644 --- a/arch/arm/crypto/chacha-scalar-core.S +++ b/arch/arm/crypto/chacha-scalar-core.S @@ -41,32 +41,15 @@ X14 .req r12 X15 .req r14 -.macro __rev out, in, t0, t1, t2 -.if __LINUX_ARM_ARCH__ >= 6 - rev \out, \in -.else - lsl \t0, \in, #24 - and \t1, \in, #0xff00 - and \t2, \in, #0xff0000 - orr \out, \t0, \in, lsr #24 - orr \out, \out, \t1, lsl #8 - orr \out, \out, \t2, lsr #8 -.endif -.endm - -.macro _le32_bswap x, t0, t1, t2 +.macro _le32_bswap_4x a, b, c, d, tmp #ifdef __ARMEB__ - __rev \x, \x, \t0, \t1, \t2 + rev_l \a, \tmp + rev_l \b, \tmp + rev_l \c, \tmp + rev_l \d, \tmp #endif .endm -.macro _le32_bswap_4x a, b, c, d, t0, t1, t2 - _le32_bswap \a, \t0, \t1, \t2 - _le32_bswap \b, \t0, \t1, \t2 - _le32_bswap \c, \t0, \t1, \t2 - _le32_bswap \d, \t0, \t1, \t2 -.endm - .macro __ldrd a, b, src, offset #if __LINUX_ARM_ARCH__ >= 6 ldrd \a, \b, [\src, #\offset] @@ -200,7 +183,7 @@ add X1, X1, r9 add X2, X2, r10 add X3, X3, r11 - _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 + _le32_bswap_4x X0, X1, X2, X3, r8 ldmia r12!, {r8-r11} eor X0, X0, r8 eor X1, X1, r9 @@ -216,7 +199,7 @@ ldmia r12!, {X0-X3} add X6, r10, X6, ror #brot add X7, r11, X7, ror #brot - _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 + _le32_bswap_4x X4, X5, X6, X7, r8 eor X4, X4, X0 eor X5, X5, X1 eor X6, X6, X2 @@ -231,7 +214,7 @@ add r1, r1, r9 // x9 add r6, r6, r10 // x10 add r7, r7, r11 // x11 - _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 + _le32_bswap_4x r0, r1, r6, r7, r8 ldmia r12!, {r8-r11} eor r0, r0, r8 // x8 eor r1, r1, r9 // x9 @@ -245,7 +228,7 @@ add r3, r9, r3, ror #drot // x13 add r4, r10, r4, ror #drot // x14 add r5, r11, r5, ror #drot // x15 - _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 + _le32_bswap_4x r2, r3, r4, r5, r9 ldr r9, [sp, #72] // load LEN eor r2, r2, r0 // x12 eor r3, r3, r1 // x13 @@ -301,7 +284,7 @@ add X1, X1, r9 add X2, X2, r10 add X3, X3, r11 - _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 + _le32_bswap_4x X0, X1, X2, X3, r8 stmia r14!, {X0-X3} // Save keystream for x4-x7 @@ -311,7 +294,7 @@ add X5, r9, X5, ror #brot add X6, r10, X6, ror #brot add X7, r11, X7, ror #brot - _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 + _le32_bswap_4x X4, X5, X6, X7, r8 add r8, sp, #64 stmia r14!, {X4-X7} @@ -323,7 +306,7 @@ add r1, r1, r9 // x9 add r6, r6, r10 // x10 add r7, r7, r11 // x11 - _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 + _le32_bswap_4x r0, r1, r6, r7, r8 stmia r14!, {r0,r1,r6,r7} __ldrd r8, r9, sp, 144 __ldrd r10, r11, sp, 152 @@ -331,7 +314,7 @@ add r3, r9, r3, ror #drot // x13 add r4, r10, r4, ror #drot // x14 add r5, r11, r5, ror #drot // x15 - _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 + _le32_bswap_4x r2, r3, r4, r5, r9 stmia r14, {r2-r5} // Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c index b1e577cbf00c..88e8ea73bfa7 100644 --- a/arch/powerpc/crypto/sha1-spe-glue.c +++ b/arch/powerpc/crypto/sha1-spe-glue.c @@ -107,7 +107,7 @@ static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data, src += bytes; len -= bytes; - }; + } memcpy((char *)sctx->buffer, src, len); return 0; diff --git a/crypto/aegis.h b/crypto/aegis.h index 6920ebe77679..6ef9c174c973 100644 --- a/crypto/aegis.h +++ b/crypto/aegis.h @@ -21,9 +21,28 @@ union aegis_block { u8 bytes[AEGIS_BLOCK_SIZE]; }; +struct aegis_state; + +extern int aegis128_have_aes_insn; + #define AEGIS_BLOCK_ALIGN (__alignof__(union aegis_block)) #define AEGIS_ALIGNED(p) IS_ALIGNED((uintptr_t)p, AEGIS_BLOCK_ALIGN) +bool crypto_aegis128_have_simd(void); +void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg); +void crypto_aegis128_init_simd(struct aegis_state *state, + const union aegis_block *key, + const u8 *iv); +void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, + const u8 *src, unsigned int size); +int crypto_aegis128_final_simd(struct aegis_state *state, + union aegis_block *tag_xor, + unsigned int assoclen, + unsigned int cryptlen, + unsigned int authsize); + static __always_inline void crypto_aegis_block_xor(union aegis_block *dst, const union aegis_block *src) { diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index 89dc1c559689..c4f1bfa1d04f 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -58,21 +58,6 @@ static bool aegis128_do_simd(void) return false; } -bool crypto_aegis128_have_simd(void); -void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg); -void crypto_aegis128_init_simd(struct aegis_state *state, - const union aegis_block *key, - const u8 *iv); -void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, - const u8 *src, unsigned int size); -int crypto_aegis128_final_simd(struct aegis_state *state, - union aegis_block *tag_xor, - unsigned int assoclen, - unsigned int cryptlen, - unsigned int authsize); - static void crypto_aegis128_update(struct aegis_state *state) { union aegis_block tmp; diff --git a/crypto/aegis128-neon.c b/crypto/aegis128-neon.c index 94d591a002a4..a7856915ec85 100644 --- a/crypto/aegis128-neon.c +++ b/crypto/aegis128-neon.c @@ -30,7 +30,7 @@ bool crypto_aegis128_have_simd(void) return IS_ENABLED(CONFIG_ARM64); } -void crypto_aegis128_init_simd(union aegis_block *state, +void crypto_aegis128_init_simd(struct aegis_state *state, const union aegis_block *key, const u8 *iv) { @@ -39,14 +39,14 @@ void crypto_aegis128_init_simd(union aegis_block *state, kernel_neon_end(); } -void crypto_aegis128_update_simd(union aegis_block *state, const void *msg) +void crypto_aegis128_update_simd(struct aegis_state *state, const void *msg) { kernel_neon_begin(); crypto_aegis128_update_neon(state, msg); kernel_neon_end(); } -void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst, +void crypto_aegis128_encrypt_chunk_simd(struct aegis_state *state, u8 *dst, const u8 *src, unsigned int size) { kernel_neon_begin(); @@ -54,7 +54,7 @@ void crypto_aegis128_encrypt_chunk_simd(union aegis_block *state, u8 *dst, kernel_neon_end(); } -void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst, +void crypto_aegis128_decrypt_chunk_simd(struct aegis_state *state, u8 *dst, const u8 *src, unsigned int size) { kernel_neon_begin(); @@ -62,7 +62,7 @@ void crypto_aegis128_decrypt_chunk_simd(union aegis_block *state, u8 *dst, kernel_neon_end(); } -int crypto_aegis128_final_simd(union aegis_block *state, +int crypto_aegis128_final_simd(struct aegis_state *state, union aegis_block *tag_xor, unsigned int assoclen, unsigned int cryptlen, diff --git a/crypto/api.c b/crypto/api.c index ed08cbd5b9d3..c4eda56cff89 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -562,7 +562,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) { struct crypto_alg *alg; - if (unlikely(!mem)) + if (IS_ERR_OR_NULL(mem)) return; alg = tfm->__crt_alg; diff --git a/crypto/ecc.c b/crypto/ecc.c index 589831d82063..884fe05fc270 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -24,6 +24,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include <crypto/ecc_curve.h> #include <linux/module.h> #include <linux/random.h> #include <linux/slab.h> @@ -42,6 +43,13 @@ typedef struct { u64 m_high; } uint128_t; +/* Returns curv25519 curve param */ +const struct ecc_curve *ecc_get_curve25519(void) +{ + return &ecc_25519; +} +EXPORT_SYMBOL(ecc_get_curve25519); + const struct ecc_curve *ecc_get_curve(unsigned int curve_id) { switch (curve_id) { diff --git a/crypto/ecc.h b/crypto/ecc.h index 3bee655d437d..46aa9bc03ddc 100644 --- a/crypto/ecc.h +++ b/crypto/ecc.h @@ -26,6 +26,8 @@ #ifndef _CRYPTO_ECC_H #define _CRYPTO_ECC_H +#include <crypto/ecc_curve.h> + /* One digit is u64 qword. */ #define ECC_CURVE_NIST_P192_DIGITS 3 #define ECC_CURVE_NIST_P256_DIGITS 4 @@ -36,44 +38,9 @@ #define ECC_MAX_BYTES (ECC_MAX_DIGITS << ECC_DIGITS_TO_BYTES_SHIFT) -/** - * struct ecc_point - elliptic curve point in affine coordinates - * - * @x: X coordinate in vli form. - * @y: Y coordinate in vli form. - * @ndigits: Length of vlis in u64 qwords. - */ -struct ecc_point { - u64 *x; - u64 *y; - u8 ndigits; -}; - #define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits } /** - * struct ecc_curve - definition of elliptic curve - * - * @name: Short name of the curve. - * @g: Generator point of the curve. - * @p: Prime number, if Barrett's reduction is used for this curve - * pre-calculated value 'mu' is appended to the @p after ndigits. - * Use of Barrett's reduction is heuristically determined in - * vli_mmod_fast(). - * @n: Order of the curve group. - * @a: Curve parameter a. - * @b: Curve parameter b. - */ -struct ecc_curve { - char *name; - struct ecc_point g; - u64 *p; - u64 *n; - u64 *a; - u64 *b; -}; - -/** * ecc_swap_digits() - Copy ndigits from big endian array to native array * @in: Input array * @out: Output array diff --git a/crypto/ecc_curve_defs.h b/crypto/ecc_curve_defs.h index b327732f6ef5..9719934c9428 100644 --- a/crypto/ecc_curve_defs.h +++ b/crypto/ecc_curve_defs.h @@ -86,4 +86,21 @@ static struct ecc_curve nist_p384 = { .b = nist_p384_b }; +/* curve25519 */ +static u64 curve25519_g_x[] = { 0x0000000000000009, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000 }; +static u64 curve25519_p[] = { 0xffffffffffffffed, 0xffffffffffffffff, + 0xffffffffffffffff, 0x7fffffffffffffff }; +static u64 curve25519_a[] = { 0x000000000001DB41, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000 }; +static const struct ecc_curve ecc_25519 = { + .name = "curve25519", + .g = { + .x = curve25519_g_x, + .ndigits = 4, + }, + .p = curve25519_p, + .a = curve25519_a, +}; + #endif diff --git a/crypto/ecdh.c b/crypto/ecdh.c index 96f80c8f8e30..04a427b8c956 100644 --- a/crypto/ecdh.c +++ b/crypto/ecdh.c @@ -23,33 +23,16 @@ static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm) return kpp_tfm_ctx(tfm); } -static unsigned int ecdh_supported_curve(unsigned int curve_id) -{ - switch (curve_id) { - case ECC_CURVE_NIST_P192: return ECC_CURVE_NIST_P192_DIGITS; - case ECC_CURVE_NIST_P256: return ECC_CURVE_NIST_P256_DIGITS; - default: return 0; - } -} - static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, unsigned int len) { struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); struct ecdh params; - unsigned int ndigits; if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0 || - params.key_size > sizeof(ctx->private_key)) + params.key_size > sizeof(u64) * ctx->ndigits) return -EINVAL; - ndigits = ecdh_supported_curve(params.curve_id); - if (!ndigits) - return -EINVAL; - - ctx->curve_id = params.curve_id; - ctx->ndigits = ndigits; - if (!params.key || !params.key_size) return ecc_gen_privkey(ctx->curve_id, ctx->ndigits, ctx->private_key); @@ -140,13 +123,24 @@ static unsigned int ecdh_max_size(struct crypto_kpp *tfm) return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1); } -static struct kpp_alg ecdh = { +static int ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) +{ + struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P192; + ctx->ndigits = ECC_CURVE_NIST_P192_DIGITS; + + return 0; +} + +static struct kpp_alg ecdh_nist_p192 = { .set_secret = ecdh_set_secret, .generate_public_key = ecdh_compute_value, .compute_shared_secret = ecdh_compute_value, .max_size = ecdh_max_size, + .init = ecdh_nist_p192_init_tfm, .base = { - .cra_name = "ecdh", + .cra_name = "ecdh-nist-p192", .cra_driver_name = "ecdh-generic", .cra_priority = 100, .cra_module = THIS_MODULE, @@ -154,14 +148,48 @@ static struct kpp_alg ecdh = { }, }; +static int ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) +{ + struct ecdh_ctx *ctx = ecdh_get_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P256; + ctx->ndigits = ECC_CURVE_NIST_P256_DIGITS; + + return 0; +} + +static struct kpp_alg ecdh_nist_p256 = { + .set_secret = ecdh_set_secret, + .generate_public_key = ecdh_compute_value, + .compute_shared_secret = ecdh_compute_value, + .max_size = ecdh_max_size, + .init = ecdh_nist_p256_init_tfm, + .base = { + .cra_name = "ecdh-nist-p256", + .cra_driver_name = "ecdh-generic", + .cra_priority = 100, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct ecdh_ctx), + }, +}; + +static bool ecdh_nist_p192_registered; + static int ecdh_init(void) { - return crypto_register_kpp(&ecdh); + int ret; + + ret = crypto_register_kpp(&ecdh_nist_p192); + ecdh_nist_p192_registered = ret == 0; + + return crypto_register_kpp(&ecdh_nist_p256); } static void ecdh_exit(void) { - crypto_unregister_kpp(&ecdh); + if (ecdh_nist_p192_registered) + crypto_unregister_kpp(&ecdh_nist_p192); + crypto_unregister_kpp(&ecdh_nist_p256); } subsys_initcall(ecdh_init); diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c index fca63b559f65..f18f9028f912 100644 --- a/crypto/ecdh_helper.c +++ b/crypto/ecdh_helper.c @@ -10,7 +10,7 @@ #include <crypto/ecdh.h> #include <crypto/kpp.h> -#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + 2 * sizeof(short)) +#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + sizeof(short)) static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz) { @@ -46,7 +46,6 @@ int crypto_ecdh_encode_key(char *buf, unsigned int len, return -EINVAL; ptr = ecdh_pack_data(ptr, &secret, sizeof(secret)); - ptr = ecdh_pack_data(ptr, ¶ms->curve_id, sizeof(params->curve_id)); ptr = ecdh_pack_data(ptr, ¶ms->key_size, sizeof(params->key_size)); ecdh_pack_data(ptr, params->key, params->key_size); @@ -70,7 +69,6 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len, if (unlikely(len < secret.len)) return -EINVAL; - ptr = ecdh_unpack_data(¶ms->curve_id, ptr, sizeof(params->curve_id)); ptr = ecdh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size)); if (secret.len != crypto_ecdh_key_len(params)) return -EINVAL; diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index 236c87547a17..45f98b750053 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -272,6 +272,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, u32 *k = ctx->expkey; u8 *k8 = (u8 *)k; u32 r0, r1, r2, r3, r4; + __le32 *lk; int i; /* Copy key, add padding */ @@ -283,22 +284,32 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key, while (i < SERPENT_MAX_KEY_SIZE) k8[i++] = 0; + lk = (__le32 *)k; + k[0] = le32_to_cpu(lk[0]); + k[1] = le32_to_cpu(lk[1]); + k[2] = le32_to_cpu(lk[2]); + k[3] = le32_to_cpu(lk[3]); + k[4] = le32_to_cpu(lk[4]); + k[5] = le32_to_cpu(lk[5]); + k[6] = le32_to_cpu(lk[6]); + k[7] = le32_to_cpu(lk[7]); + /* Expand key using polynomial */ - r0 = le32_to_cpu(k[3]); - r1 = le32_to_cpu(k[4]); - r2 = le32_to_cpu(k[5]); - r3 = le32_to_cpu(k[6]); - r4 = le32_to_cpu(k[7]); - - keyiter(le32_to_cpu(k[0]), r0, r4, r2, 0, 0); - keyiter(le32_to_cpu(k[1]), r1, r0, r3, 1, 1); - keyiter(le32_to_cpu(k[2]), r2, r1, r4, 2, 2); - keyiter(le32_to_cpu(k[3]), r3, r2, r0, 3, 3); - keyiter(le32_to_cpu(k[4]), r4, r3, r1, 4, 4); - keyiter(le32_to_cpu(k[5]), r0, r4, r2, 5, 5); - keyiter(le32_to_cpu(k[6]), r1, r0, r3, 6, 6); - keyiter(le32_to_cpu(k[7]), r2, r1, r4, 7, 7); + r0 = k[3]; + r1 = k[4]; + r2 = k[5]; + r3 = k[6]; + r4 = k[7]; + + keyiter(k[0], r0, r4, r2, 0, 0); + keyiter(k[1], r1, r0, r3, 1, 1); + keyiter(k[2], r2, r1, r4, 2, 2); + keyiter(k[3], r3, r2, r0, 3, 3); + keyiter(k[4], r4, r3, r1, 4, 4); + keyiter(k[5], r0, r4, r2, 5, 5); + keyiter(k[6], r1, r0, r3, 6, 6); + keyiter(k[7], r2, r1, r4, 7, 7); keyiter(k[0], r3, r2, r0, 8, 8); keyiter(k[1], r4, r3, r1, 9, 9); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 367aba992548..10c5b3b01ec4 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1168,11 +1168,6 @@ static inline int check_shash_op(const char *op, int err, return err; } -static inline const void *sg_data(struct scatterlist *sg) -{ - return page_address(sg_page(sg)) + sg->offset; -} - /* Test one hash test vector in one configuration, using the shash API */ static int test_shash_vec_cfg(const struct hash_testvec *vec, const char *vec_name, @@ -1230,7 +1225,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, return 0; if (cfg->nosimd) crypto_disable_simd_for_test(); - err = crypto_shash_digest(desc, sg_data(&tsgl->sgl[0]), + err = crypto_shash_digest(desc, sg_virt(&tsgl->sgl[0]), tsgl->sgl[0].length, result); if (cfg->nosimd) crypto_reenable_simd_for_test(); @@ -1266,7 +1261,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, cfg->finalization_type == FINALIZATION_TYPE_FINUP) { if (divs[i]->nosimd) crypto_disable_simd_for_test(); - err = crypto_shash_finup(desc, sg_data(&tsgl->sgl[i]), + err = crypto_shash_finup(desc, sg_virt(&tsgl->sgl[i]), tsgl->sgl[i].length, result); if (divs[i]->nosimd) crypto_reenable_simd_for_test(); @@ -1278,7 +1273,7 @@ static int test_shash_vec_cfg(const struct hash_testvec *vec, } if (divs[i]->nosimd) crypto_disable_simd_for_test(); - err = crypto_shash_update(desc, sg_data(&tsgl->sgl[i]), + err = crypto_shash_update(desc, sg_virt(&tsgl->sgl[i]), tsgl->sgl[i].length); if (divs[i]->nosimd) crypto_reenable_simd_for_test(); @@ -4904,11 +4899,20 @@ static const struct alg_test_desc alg_test_descs[] = { } }, { #endif - .alg = "ecdh", +#ifndef CONFIG_CRYPTO_FIPS + .alg = "ecdh-nist-p192", + .test = alg_test_kpp, + .fips_allowed = 1, + .suite = { + .kpp = __VECS(ecdh_p192_tv_template) + } + }, { +#endif + .alg = "ecdh-nist-p256", .test = alg_test_kpp, .fips_allowed = 1, .suite = { - .kpp = __VECS(ecdh_tv_template) + .kpp = __VECS(ecdh_p256_tv_template) } }, { .alg = "ecdsa-nist-p192", diff --git a/crypto/testmgr.h b/crypto/testmgr.h index de9bb4226b8b..34e4a3db3991 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -2685,19 +2685,17 @@ static const struct kpp_testvec curve25519_tv_template[] = { } }; -static const struct kpp_testvec ecdh_tv_template[] = { - { #ifndef CONFIG_CRYPTO_FIPS +static const struct kpp_testvec ecdh_p192_tv_template[] = { + { .secret = #ifdef __LITTLE_ENDIAN "\x02\x00" /* type */ - "\x20\x00" /* len */ - "\x01\x00" /* curve_id */ + "\x1e\x00" /* len */ "\x18\x00" /* key_size */ #else "\x00\x02" /* type */ - "\x00\x20" /* len */ - "\x00\x01" /* curve_id */ + "\x00\x1e" /* len */ "\x00\x18" /* key_size */ #endif "\xb5\x05\xb1\x71\x1e\xbf\x8c\xda" @@ -2725,18 +2723,20 @@ static const struct kpp_testvec ecdh_tv_template[] = { .b_public_size = 48, .expected_a_public_size = 48, .expected_ss_size = 24 - }, { + } +}; #endif + +static const struct kpp_testvec ecdh_p256_tv_template[] = { + { .secret = #ifdef __LITTLE_ENDIAN "\x02\x00" /* type */ - "\x28\x00" /* len */ - "\x02\x00" /* curve_id */ + "\x26\x00" /* len */ "\x20\x00" /* key_size */ #else "\x00\x02" /* type */ - "\x00\x28" /* len */ - "\x00\x02" /* curve_id */ + "\x00\x26" /* len */ "\x00\x20" /* key_size */ #endif "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83" @@ -2774,25 +2774,21 @@ static const struct kpp_testvec ecdh_tv_template[] = { .secret = #ifdef __LITTLE_ENDIAN "\x02\x00" /* type */ - "\x08\x00" /* len */ - "\x02\x00" /* curve_id */ + "\x06\x00" /* len */ "\x00\x00", /* key_size */ #else "\x00\x02" /* type */ - "\x00\x08" /* len */ - "\x00\x02" /* curve_id */ + "\x00\x06" /* len */ "\x00\x00", /* key_size */ #endif .b_secret = #ifdef __LITTLE_ENDIAN "\x02\x00" /* type */ - "\x28\x00" /* len */ - "\x02\x00" /* curve_id */ + "\x26\x00" /* len */ "\x20\x00" /* key_size */ #else "\x00\x02" /* type */ - "\x00\x28" /* len */ - "\x00\x02" /* curve_id */ + "\x00\x26" /* len */ "\x00\x20" /* key_size */ #endif "\x24\xd1\x21\xeb\xe5\xcf\x2d\x83" diff --git a/drivers/char/hw_random/ba431-rng.c b/drivers/char/hw_random/ba431-rng.c index 410b50b05e21..5b7ca0416490 100644 --- a/drivers/char/hw_random/ba431-rng.c +++ b/drivers/char/hw_random/ba431-rng.c @@ -170,7 +170,6 @@ static int ba431_trng_init(struct hwrng *rng) static int ba431_trng_probe(struct platform_device *pdev) { struct ba431_trng *ba431; - struct resource *res; int ret; ba431 = devm_kzalloc(&pdev->dev, sizeof(*ba431), GFP_KERNEL); @@ -179,8 +178,7 @@ static int ba431_trng_probe(struct platform_device *pdev) ba431->dev = &pdev->dev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ba431->base = devm_ioremap_resource(&pdev->dev, res); + ba431->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ba431->base)) return PTR_ERR(ba431->base); @@ -193,7 +191,7 @@ static int ba431_trng_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ba431); - ret = hwrng_register(&ba431->rng); + ret = devm_hwrng_register(&pdev->dev, &ba431->rng); if (ret) { dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret); return ret; @@ -204,15 +202,6 @@ static int ba431_trng_probe(struct platform_device *pdev) return 0; } -static int ba431_trng_remove(struct platform_device *pdev) -{ - struct ba431_trng *ba431 = platform_get_drvdata(pdev); - - hwrng_unregister(&ba431->rng); - - return 0; -} - static const struct of_device_id ba431_trng_dt_ids[] = { { .compatible = "silex-insight,ba431-rng", .data = NULL }, { /* sentinel */ } @@ -225,7 +214,6 @@ static struct platform_driver ba431_trng_driver = { .of_match_table = ba431_trng_dt_ids, }, .probe = ba431_trng_probe, - .remove = ba431_trng_remove, }; module_platform_driver(ba431_trng_driver); diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index 1a7c43b43c6b..e7dd457e9b22 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -13,6 +13,7 @@ #include <linux/platform_device.h> #include <linux/printk.h> #include <linux/clk.h> +#include <linux/reset.h> #define RNG_CTRL 0x0 #define RNG_STATUS 0x4 @@ -32,6 +33,7 @@ struct bcm2835_rng_priv { void __iomem *base; bool mask_interrupts; struct clk *clk; + struct reset_control *reset; }; static inline struct bcm2835_rng_priv *to_rng_priv(struct hwrng *rng) @@ -88,11 +90,13 @@ static int bcm2835_rng_init(struct hwrng *rng) int ret = 0; u32 val; - if (!IS_ERR(priv->clk)) { - ret = clk_prepare_enable(priv->clk); - if (ret) - return ret; - } + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + ret = reset_control_reset(priv->reset); + if (ret) + return ret; if (priv->mask_interrupts) { /* mask the interrupt */ @@ -115,8 +119,7 @@ static void bcm2835_rng_cleanup(struct hwrng *rng) /* disable rng hardware */ rng_writel(priv, 0, RNG_CTRL); - if (!IS_ERR(priv->clk)) - clk_disable_unprepare(priv->clk); + clk_disable_unprepare(priv->clk); } struct bcm2835_rng_of_data { @@ -155,9 +158,13 @@ static int bcm2835_rng_probe(struct platform_device *pdev) return PTR_ERR(priv->base); /* Clock is optional on most platforms */ - priv->clk = devm_clk_get(dev, NULL); - if (PTR_ERR(priv->clk) == -EPROBE_DEFER) - return -EPROBE_DEFER; + priv->clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + priv->reset = devm_reset_control_get_optional_exclusive(dev, NULL); + if (IS_ERR(priv->reset)) + return PTR_ERR(priv->reset); priv->rng.name = pdev->name; priv->rng.init = bcm2835_rng_init; diff --git a/drivers/char/hw_random/cctrng.c b/drivers/char/hw_random/cctrng.c index 7a293f2147a0..302ffa354c2f 100644 --- a/drivers/char/hw_random/cctrng.c +++ b/drivers/char/hw_random/cctrng.c @@ -486,7 +486,6 @@ static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata) static int cctrng_probe(struct platform_device *pdev) { - struct resource *req_mem_cc_regs = NULL; struct cctrng_drvdata *drvdata; struct device *dev = &pdev->dev; int rc = 0; @@ -510,27 +509,16 @@ static int cctrng_probe(struct platform_device *pdev) drvdata->circ.buf = (char *)drvdata->data_buf; - /* Get device resources */ - /* First CC registers space */ - req_mem_cc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - /* Map registers space */ - drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs); + drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(drvdata->cc_base)) { dev_err(dev, "Failed to ioremap registers"); return PTR_ERR(drvdata->cc_base); } - dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name, - req_mem_cc_regs); - dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n", - &req_mem_cc_regs->start, drvdata->cc_base); - /* Then IRQ */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "Failed getting IRQ resource\n"); + if (irq < 0) return irq; - } /* parse sampling rate from device tree */ rc = cc_trng_parse_sampling_ratio(drvdata); @@ -585,7 +573,7 @@ static int cctrng_probe(struct platform_device *pdev) atomic_set(&drvdata->pending_hw, 1); /* registration of the hwrng device */ - rc = hwrng_register(&drvdata->rng); + rc = devm_hwrng_register(dev, &drvdata->rng); if (rc) { dev_err(dev, "Could not register hwrng device.\n"); goto post_pm_err; @@ -618,8 +606,6 @@ static int cctrng_remove(struct platform_device *pdev) dev_dbg(dev, "Releasing cctrng resources...\n"); - hwrng_unregister(&drvdata->rng); - cc_trng_pm_fini(drvdata); cc_trng_clk_fini(drvdata); diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 5cc5fc504968..4380c23587be 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -30,8 +30,7 @@ #include <linux/of_address.h> #include <linux/interrupt.h> #include <linux/clk.h> - -#include <asm/io.h> +#include <linux/io.h> #define RNG_REG_STATUS_RDY (1 << 0) diff --git a/drivers/char/hw_random/pic32-rng.c b/drivers/char/hw_random/pic32-rng.c index e8210c1715cf..99c8bd0859a1 100644 --- a/drivers/char/hw_random/pic32-rng.c +++ b/drivers/char/hw_random/pic32-rng.c @@ -96,7 +96,7 @@ static int pic32_rng_probe(struct platform_device *pdev) priv->rng.name = pdev->name; priv->rng.read = pic32_rng_read; - ret = hwrng_register(&priv->rng); + ret = devm_hwrng_register(&pdev->dev, &priv->rng); if (ret) goto err_register; @@ -113,7 +113,6 @@ static int pic32_rng_remove(struct platform_device *pdev) { struct pic32_rng *rng = platform_get_drvdata(pdev); - hwrng_unregister(&rng->rng); writel(0, rng->base + RNGCON); clk_disable_unprepare(rng->clk); return 0; diff --git a/drivers/char/hw_random/xiphera-trng.c b/drivers/char/hw_random/xiphera-trng.c index 7bdab8c8a6a8..2a9fea72b2e0 100644 --- a/drivers/char/hw_random/xiphera-trng.c +++ b/drivers/char/hw_random/xiphera-trng.c @@ -63,14 +63,12 @@ static int xiphera_trng_probe(struct platform_device *pdev) int ret; struct xiphera_trng *trng; struct device *dev = &pdev->dev; - struct resource *res; trng = devm_kzalloc(dev, sizeof(*trng), GFP_KERNEL); if (!trng) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - trng->mem = devm_ioremap_resource(dev, res); + trng->mem = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(trng->mem)) return PTR_ERR(trng->mem); diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c index 709905ec4680..ef224d5e4903 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c @@ -288,8 +288,7 @@ static int sun4i_ss_pm_suspend(struct device *dev) { struct sun4i_ss_ctx *ss = dev_get_drvdata(dev); - if (ss->reset) - reset_control_assert(ss->reset); + reset_control_assert(ss->reset); clk_disable_unprepare(ss->ssclk); clk_disable_unprepare(ss->busclk); @@ -314,12 +313,10 @@ static int sun4i_ss_pm_resume(struct device *dev) goto err_enable; } - if (ss->reset) { - err = reset_control_deassert(ss->reset); - if (err) { - dev_err(ss->dev, "Cannot deassert reset control\n"); - goto err_enable; - } + err = reset_control_deassert(ss->reset); + if (err) { + dev_err(ss->dev, "Cannot deassert reset control\n"); + goto err_enable; } return err; @@ -401,12 +398,10 @@ static int sun4i_ss_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "clock ahb_ss acquired\n"); ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); - if (IS_ERR(ss->reset)) { - if (PTR_ERR(ss->reset) == -EPROBE_DEFER) - return PTR_ERR(ss->reset); + if (IS_ERR(ss->reset)) + return PTR_ERR(ss->reset); + if (!ss->reset) dev_info(&pdev->dev, "no reset control found\n"); - ss->reset = NULL; - } /* * Check that clock have the correct rates given in the datasheet diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c index cfde9ee4356b..cd1baee424a1 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c @@ -99,6 +99,7 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src, dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); if (dma_mapping_error(ce->dev, dma_iv)) { dev_err(ce->dev, "Cannot DMA MAP IV\n"); + err = -EFAULT; goto err_iv; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 11cbcbc83a7b..0b9aa24a5edd 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -438,8 +438,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) kfree(pad); memcpy(areq->result, result, algt->alg.hash.halg.digestsize); - kfree(result); theend: + kfree(result); crypto_finalize_hash_request(engine, breq, err); return 0; } diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c index 08a1473b2145..3191527928e4 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c @@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); if (dma_mapping_error(ss->dev, dma_iv)) { dev_err(ss->dev, "Cannot DMA MAP IV\n"); - return -EFAULT; + err = -EFAULT; + goto err_free; } dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE); @@ -167,6 +168,7 @@ err_iv: memcpy(ctx->seed, d + dlen, ctx->slen); } memzero_explicit(d, todo); +err_free: kfree(d); return err; diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c index 5bbeff433c8c..6e7ae896717c 100644 --- a/drivers/crypto/amlogic/amlogic-gxl-core.c +++ b/drivers/crypto/amlogic/amlogic-gxl-core.c @@ -217,9 +217,6 @@ static int meson_crypto_probe(struct platform_device *pdev) struct meson_dev *mc; int err, i; - if (!pdev->dev.of_node) - return -ENODEV; - mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); if (!mc) return -ENOMEM; diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c index 9bd8e5167be3..515946c99394 100644 --- a/drivers/crypto/atmel-ecc.c +++ b/drivers/crypto/atmel-ecc.c @@ -34,7 +34,6 @@ static struct atmel_ecc_driver_data driver_data; * of the user to not call set_secret() while * generate_public_key() or compute_shared_secret() are in flight. * @curve_id : elliptic curve id - * @n_sz : size in bytes of the n prime * @do_fallback: true when the device doesn't support the curve or when the user * wants to use its own private key. */ @@ -43,7 +42,6 @@ struct atmel_ecdh_ctx { struct crypto_kpp *fallback; const u8 *public_key; unsigned int curve_id; - size_t n_sz; bool do_fallback; }; @@ -51,7 +49,6 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq, int status) { struct kpp_request *req = areq; - struct atmel_ecdh_ctx *ctx = work_data->ctx; struct atmel_i2c_cmd *cmd = &work_data->cmd; size_t copied, n_sz; @@ -59,7 +56,7 @@ static void atmel_ecdh_done(struct atmel_i2c_work_data *work_data, void *areq, goto free_work_data; /* might want less than we've got */ - n_sz = min_t(size_t, ctx->n_sz, req->dst_len); + n_sz = min_t(size_t, ATMEL_ECC_NIST_P256_N_SIZE, req->dst_len); /* copy the shared secret */ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, n_sz), @@ -73,14 +70,6 @@ free_work_data: kpp_request_complete(req, status); } -static unsigned int atmel_ecdh_supported_curve(unsigned int curve_id) -{ - if (curve_id == ECC_CURVE_NIST_P256) - return ATMEL_ECC_NIST_P256_N_SIZE; - - return 0; -} - /* * A random private key is generated and stored in the device. The device * returns the pair public key. @@ -104,8 +93,7 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, return -EINVAL; } - ctx->n_sz = atmel_ecdh_supported_curve(params.curve_id); - if (!ctx->n_sz || params.key_size) { + if (params.key_size) { /* fallback to ecdh software implementation */ ctx->do_fallback = true; return crypto_kpp_set_secret(ctx->fallback, buf, len); @@ -125,7 +113,6 @@ static int atmel_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, goto free_cmd; ctx->do_fallback = false; - ctx->curve_id = params.curve_id; atmel_i2c_init_genkey_cmd(cmd, DATA_SLOT_2); @@ -263,6 +250,7 @@ static int atmel_ecdh_init_tfm(struct crypto_kpp *tfm) struct crypto_kpp *fallback; struct atmel_ecdh_ctx *ctx = kpp_tfm_ctx(tfm); + ctx->curve_id = ECC_CURVE_NIST_P256; ctx->client = atmel_ecc_i2c_client_alloc(); if (IS_ERR(ctx->client)) { pr_err("tfm - i2c_client binding failed\n"); @@ -306,7 +294,7 @@ static unsigned int atmel_ecdh_max_size(struct crypto_kpp *tfm) return ATMEL_ECC_PUBKEY_SIZE; } -static struct kpp_alg atmel_ecdh = { +static struct kpp_alg atmel_ecdh_nist_p256 = { .set_secret = atmel_ecdh_set_secret, .generate_public_key = atmel_ecdh_generate_public_key, .compute_shared_secret = atmel_ecdh_compute_shared_secret, @@ -315,7 +303,7 @@ static struct kpp_alg atmel_ecdh = { .max_size = atmel_ecdh_max_size, .base = { .cra_flags = CRYPTO_ALG_NEED_FALLBACK, - .cra_name = "ecdh", + .cra_name = "ecdh-nist-p256", .cra_driver_name = "atmel-ecdh", .cra_priority = ATMEL_ECC_PRIORITY, .cra_module = THIS_MODULE, @@ -340,14 +328,14 @@ static int atmel_ecc_probe(struct i2c_client *client, &driver_data.i2c_client_list); spin_unlock(&driver_data.i2c_list_lock); - ret = crypto_register_kpp(&atmel_ecdh); + ret = crypto_register_kpp(&atmel_ecdh_nist_p256); if (ret) { spin_lock(&driver_data.i2c_list_lock); list_del(&i2c_priv->i2c_client_list_node); spin_unlock(&driver_data.i2c_list_lock); dev_err(&client->dev, "%s alg registration failed\n", - atmel_ecdh.base.cra_driver_name); + atmel_ecdh_nist_p256.base.cra_driver_name); } else { dev_info(&client->dev, "atmel ecc algorithms registered in /proc/crypto\n"); } @@ -365,7 +353,7 @@ static int atmel_ecc_remove(struct i2c_client *client) return -EBUSY; } - crypto_unregister_kpp(&atmel_ecdh); + crypto_unregister_kpp(&atmel_ecdh_nist_p256); spin_lock(&driver_data.i2c_list_lock); list_del(&i2c_priv->i2c_client_list_node); diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c index 711b1acdd4e0..06ee42e8a245 100644 --- a/drivers/crypto/cavium/cpt/cptpf_main.c +++ b/drivers/crypto/cavium/cpt/cptpf_main.c @@ -10,7 +10,6 @@ #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/printk.h> -#include <linux/version.h> #include "cptpf.h" diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index cb9b4c4e371e..da3872c48308 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -21,6 +21,7 @@ #include <linux/ccp.h> #include <linux/firmware.h> #include <linux/gfp.h> +#include <linux/cpufeature.h> #include <asm/smp.h> @@ -972,6 +973,11 @@ int sev_dev_init(struct psp_device *psp) struct sev_device *sev; int ret = -ENOMEM; + if (!boot_cpu_has(X86_FEATURE_SEV)) { + dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); + return 0; + } + sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL); if (!sev) goto e_err; diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 843192666dc3..c45adb15ce8d 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -65,6 +65,7 @@ config CRYPTO_DEV_HISI_HPRE depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) depends on ACPI + select CRYPTO_LIB_CURVE25519_GENERIC select CRYPTO_DEV_HISI_QM select CRYPTO_DH select CRYPTO_RSA diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h index 181c109b19f7..92892e373cd0 100644 --- a/drivers/crypto/hisilicon/hpre/hpre.h +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -10,6 +10,14 @@ #define HPRE_PF_DEF_Q_NUM 64 #define HPRE_PF_DEF_Q_BASE 0 +/* + * type used in qm sqc DW6. + * 0 - Algorithm which has been supported in V2, like RSA, DH and so on; + * 1 - ECC algorithm in V3. + */ +#define HPRE_V2_ALG_TYPE 0 +#define HPRE_V3_ECC_ALG_TYPE 1 + enum { HPRE_CLUSTER0, HPRE_CLUSTER1, @@ -75,6 +83,9 @@ enum hpre_alg_type { HPRE_ALG_KG_CRT = 0x3, HPRE_ALG_DH_G2 = 0x4, HPRE_ALG_DH = 0x5, + HPRE_ALG_ECC_MUL = 0xD, + /* shared by x25519 and x448, but x448 is not supported now */ + HPRE_ALG_CURVE25519_MUL = 0x10, }; struct hpre_sqe { @@ -92,8 +103,8 @@ struct hpre_sqe { __le32 rsvd1[_HPRE_SQE_ALIGN_EXT]; }; -struct hisi_qp *hpre_create_qp(void); -int hpre_algs_register(void); -void hpre_algs_unregister(void); +struct hisi_qp *hpre_create_qp(u8 type); +int hpre_algs_register(struct hisi_qm *qm); +void hpre_algs_unregister(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index a87f9904087a..53068d2a19cf 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -1,7 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include <crypto/akcipher.h> +#include <crypto/curve25519.h> #include <crypto/dh.h> +#include <crypto/ecc_curve.h> +#include <crypto/ecdh.h> #include <crypto/internal/akcipher.h> #include <crypto/internal/kpp.h> #include <crypto/internal/rsa.h> @@ -36,6 +39,13 @@ struct hpre_ctx; #define HPRE_DFX_SEC_TO_US 1000000 #define HPRE_DFX_US_TO_NS 1000 +/* size in bytes of the n prime */ +#define HPRE_ECC_NIST_P192_N_SIZE 24 +#define HPRE_ECC_NIST_P256_N_SIZE 32 + +/* size in bytes */ +#define HPRE_ECC_HW256_KSZ_B 32 + typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); struct hpre_rsa_ctx { @@ -61,14 +71,35 @@ struct hpre_dh_ctx { * else if base if the counterpart public key we * compute the shared secret * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1] + * low address: d--->n, please refer to Hisilicon HPRE UM */ - char *xa_p; /* low address: d--->n, please refer to Hisilicon HPRE UM */ + char *xa_p; dma_addr_t dma_xa_p; char *g; /* m */ dma_addr_t dma_g; }; +struct hpre_ecdh_ctx { + /* low address: p->a->k->b */ + unsigned char *p; + dma_addr_t dma_p; + + /* low address: x->y */ + unsigned char *g; + dma_addr_t dma_g; +}; + +struct hpre_curve25519_ctx { + /* low address: p->a->k */ + unsigned char *p; + dma_addr_t dma_p; + + /* gx coordinate */ + unsigned char *g; + dma_addr_t dma_g; +}; + struct hpre_ctx { struct hisi_qp *qp; struct hpre_asym_request **req_list; @@ -80,7 +111,11 @@ struct hpre_ctx { union { struct hpre_rsa_ctx rsa; struct hpre_dh_ctx dh; + struct hpre_ecdh_ctx ecdh; + struct hpre_curve25519_ctx curve25519; }; + /* for ecc algorithms */ + unsigned int curve_id; }; struct hpre_asym_request { @@ -91,6 +126,8 @@ struct hpre_asym_request { union { struct akcipher_request *rsa; struct kpp_request *dh; + struct kpp_request *ecdh; + struct kpp_request *curve25519; } areq; int err; int req_id; @@ -152,12 +189,12 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req) } } -static struct hisi_qp *hpre_get_qp_and_start(void) +static struct hisi_qp *hpre_get_qp_and_start(u8 type) { struct hisi_qp *qp; int ret; - qp = hpre_create_qp(); + qp = hpre_create_qp(type); if (!qp) { pr_err("Can not create hpre qp!\n"); return ERR_PTR(-ENODEV); @@ -413,7 +450,6 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp) struct hpre_sqe *sqe = resp; struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)]; - if (unlikely(!req)) { atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value); return; @@ -422,11 +458,11 @@ static void hpre_alg_cb(struct hisi_qp *qp, void *resp) req->cb(ctx, resp); } -static int hpre_ctx_init(struct hpre_ctx *ctx) +static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) { struct hisi_qp *qp; - qp = hpre_get_qp_and_start(); + qp = hpre_get_qp_and_start(type); if (IS_ERR(qp)) return PTR_ERR(qp); @@ -674,7 +710,7 @@ static int hpre_dh_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - return hpre_ctx_init(ctx); + return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); } static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) @@ -1100,7 +1136,7 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) return PTR_ERR(ctx->rsa.soft_tfm); } - ret = hpre_ctx_init(ctx); + ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); if (ret) crypto_free_akcipher(ctx->rsa.soft_tfm); @@ -1115,6 +1151,728 @@ static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) crypto_free_akcipher(ctx->rsa.soft_tfm); } +static void hpre_key_to_big_end(u8 *data, int len) +{ + int i, j; + u8 tmp; + + for (i = 0; i < len / 2; i++) { + j = len - i - 1; + tmp = data[j]; + data[j] = data[i]; + data[i] = tmp; + } +} + +static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all, + bool is_ecdh) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz = ctx->key_sz; + unsigned int shift = sz << 1; + + if (is_clear_all) + hisi_qm_stop_qp(ctx->qp); + + if (is_ecdh && ctx->ecdh.p) { + /* ecdh: p->a->k->b */ + memzero_explicit(ctx->ecdh.p + shift, sz); + dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p); + ctx->ecdh.p = NULL; + } else if (!is_ecdh && ctx->curve25519.p) { + /* curve25519: p->a->k */ + memzero_explicit(ctx->curve25519.p + shift, sz); + dma_free_coherent(dev, sz << 2, ctx->curve25519.p, + ctx->curve25519.dma_p); + ctx->curve25519.p = NULL; + } + + hpre_ctx_clear(ctx, is_clear_all); +} + +static unsigned int hpre_ecdh_supported_curve(unsigned short id) +{ + switch (id) { + case ECC_CURVE_NIST_P192: + case ECC_CURVE_NIST_P256: + return HPRE_ECC_HW256_KSZ_B; + default: + break; + } + + return 0; +} + +static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits) +{ + unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64); + u8 i = 0; + + while (i < ndigits - 1) { + memcpy(addr + sizeof(u64) * i, ¶m[i], sizeof(u64)); + i++; + } + + memcpy(addr + sizeof(u64) * i, ¶m[ndigits - 1], sz); + hpre_key_to_big_end((u8 *)addr, cur_sz); +} + +static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params, + unsigned int cur_sz) +{ + unsigned int shifta = ctx->key_sz << 1; + unsigned int shiftb = ctx->key_sz << 2; + void *p = ctx->ecdh.p + ctx->key_sz - cur_sz; + void *a = ctx->ecdh.p + shifta - cur_sz; + void *b = ctx->ecdh.p + shiftb - cur_sz; + void *x = ctx->ecdh.g + ctx->key_sz - cur_sz; + void *y = ctx->ecdh.g + shifta - cur_sz; + const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id); + char *n; + + if (unlikely(!curve)) + return -EINVAL; + + n = kzalloc(ctx->key_sz, GFP_KERNEL); + if (!n) + return -ENOMEM; + + fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits); + fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits); + fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits); + fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits); + fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits); + fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits); + + if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) { + kfree(n); + return -EINVAL; + } + + kfree(n); + return 0; +} + +static unsigned int hpre_ecdh_get_curvesz(unsigned short id) +{ + switch (id) { + case ECC_CURVE_NIST_P192: + return HPRE_ECC_NIST_P192_N_SIZE; + case ECC_CURVE_NIST_P256: + return HPRE_ECC_NIST_P256_N_SIZE; + default: + break; + } + + return 0; +} + +static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz, shift, curve_sz; + int ret; + + ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id); + if (!ctx->key_sz) + return -EINVAL; + + curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id); + if (!curve_sz || params->key_size > curve_sz) + return -EINVAL; + + sz = ctx->key_sz; + + if (!ctx->ecdh.p) { + ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p, + GFP_KERNEL); + if (!ctx->ecdh.p) + return -ENOMEM; + } + + shift = sz << 2; + ctx->ecdh.g = ctx->ecdh.p + shift; + ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift; + + ret = hpre_ecdh_fill_curve(ctx, params, curve_sz); + if (ret) { + dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret); + dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p); + ctx->ecdh.p = NULL; + return ret; + } + + return 0; +} + +static bool hpre_key_is_zero(char *key, unsigned short key_sz) +{ + int i; + + for (i = 0; i < key_sz; i++) + if (key[i]) + return false; + + return true; +} + +static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, + unsigned int len) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = HPRE_DEV(ctx); + unsigned int sz, sz_shift; + struct ecdh params; + int ret; + + if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) { + dev_err(dev, "failed to decode ecdh key!\n"); + return -EINVAL; + } + + if (hpre_key_is_zero(params.key, params.key_size)) { + dev_err(dev, "Invalid hpre key!\n"); + return -EINVAL; + } + + hpre_ecc_clear_ctx(ctx, false, true); + + ret = hpre_ecdh_set_param(ctx, ¶ms); + if (ret < 0) { + dev_err(dev, "failed to set hpre param, ret = %d!\n", ret); + return ret; + } + + sz = ctx->key_sz; + sz_shift = (sz << 1) + sz - params.key_size; + memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size); + + return 0; +} + +static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx, + struct hpre_asym_request *req, + struct scatterlist *dst, + struct scatterlist *src) +{ + struct device *dev = HPRE_DEV(ctx); + struct hpre_sqe *sqe = &req->req; + dma_addr_t dma; + + dma = le64_to_cpu(sqe->in); + if (unlikely(!dma)) + return; + + if (src && req->src) + dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma); + + dma = le64_to_cpu(sqe->out); + if (unlikely(!dma)) + return; + + if (req->dst) + dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma); + if (dst) + dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE); +} + +static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp) +{ + unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id); + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + struct hpre_asym_request *req = NULL; + struct kpp_request *areq; + u64 overtime_thrhld; + char *p; + int ret; + + ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); + areq = req->areq.ecdh; + areq->dst_len = ctx->key_sz << 1; + + overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + + p = sg_virt(areq->dst); + memmove(p, p + ctx->key_sz - curve_sz, curve_sz); + memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz); + + hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); + kpp_request_complete(areq, ret); + + atomic64_inc(&dfx[HPRE_RECV_CNT].value); +} + +static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx, + struct kpp_request *req) +{ + struct hpre_asym_request *h_req; + struct hpre_sqe *msg; + int req_id; + void *tmp; + + if (req->dst_len < ctx->key_sz << 1) { + req->dst_len = ctx->key_sz << 1; + return -EINVAL; + } + + tmp = kpp_request_ctx(req); + h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req->cb = hpre_ecdh_cb; + h_req->areq.ecdh = req; + msg = &h_req->req; + memset(msg, 0, sizeof(*msg)); + msg->key = cpu_to_le64(ctx->ecdh.dma_p); + + msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT); + msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; + h_req->ctx = ctx; + + req_id = hpre_add_req_to_ctx(h_req); + if (req_id < 0) + return -EBUSY; + + msg->tag = cpu_to_le16((u16)req_id); + return 0; +} + +static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + unsigned int tmpshift; + dma_addr_t dma = 0; + void *ptr; + int shift; + + /* Src_data include gx and gy. */ + shift = ctx->key_sz - (len >> 1); + if (unlikely(shift < 0)) + return -EINVAL; + + ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL); + if (unlikely(!ptr)) + return -ENOMEM; + + tmpshift = ctx->key_sz << 1; + scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0); + memcpy(ptr + shift, ptr + tmpshift, len >> 1); + memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1); + + hpre_req->src = ptr; + msg->in = cpu_to_le64(dma); + return 0; +} + +static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + dma_addr_t dma = 0; + + if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) { + dev_err(dev, "data or data length is illegal!\n"); + return -EINVAL; + } + + hpre_req->dst = NULL; + dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, dma))) { + dev_err(dev, "dma map data err!\n"); + return -ENOMEM; + } + + msg->out = cpu_to_le64(dma); + return 0; +} + +static int hpre_ecdh_compute_value(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = HPRE_DEV(ctx); + void *tmp = kpp_request_ctx(req); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_sqe *msg = &hpre_req->req; + int ret; + + ret = hpre_ecdh_msg_request_set(ctx, req); + if (unlikely(ret)) { + dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret); + return ret; + } + + if (req->src) { + ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len); + if (unlikely(ret)) { + dev_err(dev, "failed to init src data, ret = %d!\n", ret); + goto clear_all; + } + } else { + msg->in = cpu_to_le64(ctx->ecdh.dma_g); + } + + ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len); + if (unlikely(ret)) { + dev_err(dev, "failed to init dst data, ret = %d!\n", ret); + goto clear_all; + } + + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL); + ret = hpre_send(ctx, msg); + if (likely(!ret)) + return -EINPROGRESS; + +clear_all: + hpre_rm_req_from_ctx(hpre_req); + hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); + return ret; +} + +static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + /* max size is the pub_key_size, include x and y */ + return ctx->key_sz << 1; +} + +static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P192; + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); +} + +static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + ctx->curve_id = ECC_CURVE_NIST_P256; + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); +} + +static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + hpre_ecc_clear_ctx(ctx, true, true); +} + +static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf, + unsigned int len) +{ + u8 secret[CURVE25519_KEY_SIZE] = { 0 }; + unsigned int sz = ctx->key_sz; + const struct ecc_curve *curve; + unsigned int shift = sz << 1; + void *p; + + /* + * The key from 'buf' is in little-endian, we should preprocess it as + * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64", + * then convert it to big endian. Only in this way, the result can be + * the same as the software curve-25519 that exists in crypto. + */ + memcpy(secret, buf, len); + curve25519_clamp_secret(secret); + hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE); + + p = ctx->curve25519.p + sz - len; + + curve = ecc_get_curve25519(); + + /* fill curve parameters */ + fill_curve_param(p, curve->p, len, curve->g.ndigits); + fill_curve_param(p + sz, curve->a, len, curve->g.ndigits); + memcpy(p + shift, secret, len); + fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits); + memzero_explicit(secret, CURVE25519_KEY_SIZE); +} + +static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf, + unsigned int len) +{ + struct device *dev = HPRE_DEV(ctx); + unsigned int sz = ctx->key_sz; + unsigned int shift = sz << 1; + + /* p->a->k->gx */ + if (!ctx->curve25519.p) { + ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2, + &ctx->curve25519.dma_p, + GFP_KERNEL); + if (!ctx->curve25519.p) + return -ENOMEM; + } + + ctx->curve25519.g = ctx->curve25519.p + shift + sz; + ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz; + + hpre_curve25519_fill_curve(ctx, buf, len); + + return 0; +} + +static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, + unsigned int len) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = HPRE_DEV(ctx); + int ret = -EINVAL; + + if (len != CURVE25519_KEY_SIZE || + !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) { + dev_err(dev, "key is null or key len is not 32bytes!\n"); + return ret; + } + + /* Free old secret if any */ + hpre_ecc_clear_ctx(ctx, false, false); + + ctx->key_sz = CURVE25519_KEY_SIZE; + ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE); + if (ret) { + dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret); + hpre_ecc_clear_ctx(ctx, false, false); + return ret; + } + + return 0; +} + +static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx, + struct hpre_asym_request *req, + struct scatterlist *dst, + struct scatterlist *src) +{ + struct device *dev = HPRE_DEV(ctx); + struct hpre_sqe *sqe = &req->req; + dma_addr_t dma; + + dma = le64_to_cpu(sqe->in); + if (unlikely(!dma)) + return; + + if (src && req->src) + dma_free_coherent(dev, ctx->key_sz, req->src, dma); + + dma = le64_to_cpu(sqe->out); + if (unlikely(!dma)) + return; + + if (req->dst) + dma_free_coherent(dev, ctx->key_sz, req->dst, dma); + if (dst) + dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE); +} + +static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp) +{ + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + struct hpre_asym_request *req = NULL; + struct kpp_request *areq; + u64 overtime_thrhld; + int ret; + + ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); + areq = req->areq.curve25519; + areq->dst_len = ctx->key_sz; + + overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); + if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) + atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); + + hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE); + + hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); + kpp_request_complete(areq, ret); + + atomic64_inc(&dfx[HPRE_RECV_CNT].value); +} + +static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx, + struct kpp_request *req) +{ + struct hpre_asym_request *h_req; + struct hpre_sqe *msg; + int req_id; + void *tmp; + + if (unlikely(req->dst_len < ctx->key_sz)) { + req->dst_len = ctx->key_sz; + return -EINVAL; + } + + tmp = kpp_request_ctx(req); + h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + h_req->cb = hpre_curve25519_cb; + h_req->areq.curve25519 = req; + msg = &h_req->req; + memset(msg, 0, sizeof(*msg)); + msg->key = cpu_to_le64(ctx->curve25519.dma_p); + + msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT); + msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; + h_req->ctx = ctx; + + req_id = hpre_add_req_to_ctx(h_req); + if (req_id < 0) + return -EBUSY; + + msg->tag = cpu_to_le16((u16)req_id); + return 0; +} + +static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + u8 p[CURVE25519_KEY_SIZE] = { 0 }; + const struct ecc_curve *curve; + dma_addr_t dma = 0; + u8 *ptr; + + if (len != CURVE25519_KEY_SIZE) { + dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len); + return -EINVAL; + } + + ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL); + if (unlikely(!ptr)) + return -ENOMEM; + + scatterwalk_map_and_copy(ptr, data, 0, len, 0); + + if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) { + dev_err(dev, "gx is null!\n"); + goto err; + } + + /* + * Src_data(gx) is in little-endian order, MSB in the final byte should + * be masked as discribed in RFC7748, then transform it to big-endian + * form, then hisi_hpre can use the data. + */ + ptr[31] &= 0x7f; + hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE); + + curve = ecc_get_curve25519(); + + fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits); + if (memcmp(ptr, p, ctx->key_sz) >= 0) { + dev_err(dev, "gx is out of p!\n"); + goto err; + } + + hpre_req->src = ptr; + msg->in = cpu_to_le64(dma); + return 0; + +err: + dma_free_coherent(dev, ctx->key_sz, ptr, dma); + return -EINVAL; +} + +static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req, + struct scatterlist *data, unsigned int len) +{ + struct hpre_sqe *msg = &hpre_req->req; + struct hpre_ctx *ctx = hpre_req->ctx; + struct device *dev = HPRE_DEV(ctx); + dma_addr_t dma = 0; + + if (!data || !sg_is_last(data) || len != ctx->key_sz) { + dev_err(dev, "data or data length is illegal!\n"); + return -EINVAL; + } + + hpre_req->dst = NULL; + dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, dma))) { + dev_err(dev, "dma map data err!\n"); + return -ENOMEM; + } + + msg->out = cpu_to_le64(dma); + return 0; +} + +static int hpre_curve25519_compute_value(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct device *dev = HPRE_DEV(ctx); + void *tmp = kpp_request_ctx(req); + struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ); + struct hpre_sqe *msg = &hpre_req->req; + int ret; + + ret = hpre_curve25519_msg_request_set(ctx, req); + if (unlikely(ret)) { + dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret); + return ret; + } + + if (req->src) { + ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len); + if (unlikely(ret)) { + dev_err(dev, "failed to init src data, ret = %d!\n", + ret); + goto clear_all; + } + } else { + msg->in = cpu_to_le64(ctx->curve25519.dma_g); + } + + ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len); + if (unlikely(ret)) { + dev_err(dev, "failed to init dst data, ret = %d!\n", ret); + goto clear_all; + } + + msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL); + ret = hpre_send(ctx, msg); + if (likely(!ret)) + return -EINPROGRESS; + +clear_all: + hpre_rm_req_from_ctx(hpre_req); + hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); + return ret; +} + +static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + return ctx->key_sz; +} + +static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); +} + +static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + hpre_ecc_clear_ctx(ctx, true, false); +} + static struct akcipher_alg rsa = { .sign = hpre_rsa_dec, .verify = hpre_rsa_enc, @@ -1154,7 +1912,82 @@ static struct kpp_alg dh = { }; #endif -int hpre_algs_register(void) +static struct kpp_alg ecdh_nist_p192 = { + .set_secret = hpre_ecdh_set_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, + .max_size = hpre_ecdh_max_size, + .init = hpre_ecdh_nist_p192_init_tfm, + .exit = hpre_ecdh_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "ecdh-nist-p192", + .cra_driver_name = "hpre-ecdh", + .cra_module = THIS_MODULE, + }, +}; + +static struct kpp_alg ecdh_nist_p256 = { + .set_secret = hpre_ecdh_set_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, + .max_size = hpre_ecdh_max_size, + .init = hpre_ecdh_nist_p256_init_tfm, + .exit = hpre_ecdh_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "ecdh-nist-p256", + .cra_driver_name = "hpre-ecdh", + .cra_module = THIS_MODULE, + }, +}; + +static struct kpp_alg curve25519_alg = { + .set_secret = hpre_curve25519_set_secret, + .generate_public_key = hpre_curve25519_compute_value, + .compute_shared_secret = hpre_curve25519_compute_value, + .max_size = hpre_curve25519_max_size, + .init = hpre_curve25519_init_tfm, + .exit = hpre_curve25519_exit_tfm, + .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ, + .base = { + .cra_ctxsize = sizeof(struct hpre_ctx), + .cra_priority = HPRE_CRYPTO_ALG_PRI, + .cra_name = "curve25519", + .cra_driver_name = "hpre-curve25519", + .cra_module = THIS_MODULE, + }, +}; + + +static int hpre_register_ecdh(void) +{ + int ret; + + ret = crypto_register_kpp(&ecdh_nist_p192); + if (ret) + return ret; + + ret = crypto_register_kpp(&ecdh_nist_p256); + if (ret) { + crypto_unregister_kpp(&ecdh_nist_p192); + return ret; + } + + return 0; +} + +static void hpre_unregister_ecdh(void) +{ + crypto_unregister_kpp(&ecdh_nist_p256); + crypto_unregister_kpp(&ecdh_nist_p192); +} + +int hpre_algs_register(struct hisi_qm *qm) { int ret; @@ -1164,17 +1997,41 @@ int hpre_algs_register(void) return ret; #ifdef CONFIG_CRYPTO_DH ret = crypto_register_kpp(&dh); - if (ret) + if (ret) { crypto_unregister_akcipher(&rsa); + return ret; + } #endif + if (qm->ver >= QM_HW_V3) { + ret = hpre_register_ecdh(); + if (ret) + goto reg_err; + ret = crypto_register_kpp(&curve25519_alg); + if (ret) { + hpre_unregister_ecdh(); + goto reg_err; + } + } + return 0; + +reg_err: +#ifdef CONFIG_CRYPTO_DH + crypto_unregister_kpp(&dh); +#endif + crypto_unregister_akcipher(&rsa); return ret; } -void hpre_algs_unregister(void) +void hpre_algs_unregister(struct hisi_qm *qm) { - crypto_unregister_akcipher(&rsa); + if (qm->ver >= QM_HW_V3) { + crypto_unregister_kpp(&curve25519_alg); + hpre_unregister_ecdh(); + } + #ifdef CONFIG_CRYPTO_DH crypto_unregister_kpp(&dh); #endif + crypto_unregister_akcipher(&rsa); } diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index e7a2c70eb9cf..87e8f4d60474 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -226,13 +226,20 @@ static u32 vfs_num; module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); -struct hisi_qp *hpre_create_qp(void) +struct hisi_qp *hpre_create_qp(u8 type) { int node = cpu_to_node(smp_processor_id()); struct hisi_qp *qp = NULL; int ret; - ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, 0, node, &qp); + if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE) + return NULL; + + /* + * type: 0 - RSA/DH. algorithm supported in V2, + * 1 - ECC algorithm in V3. + */ + ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp); if (!ret) return qp; @@ -1075,4 +1082,5 @@ module_exit(hpre_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); +MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>"); MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator"); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 13cb4216561a..bc231742ad36 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -4084,7 +4084,7 @@ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list) mutex_unlock(&qm_list->lock); if (flag) { - ret = qm_list->register_to_crypto(); + ret = qm_list->register_to_crypto(qm); if (ret) { mutex_lock(&qm_list->lock); list_del(&qm->list); @@ -4115,7 +4115,7 @@ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) mutex_unlock(&qm_list->lock); if (list_empty(&qm_list->list)) - qm_list->unregister_from_crypto(); + qm_list->unregister_from_crypto(qm); } EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 54967c6b9c78..f91110fcf6a4 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -199,8 +199,8 @@ struct hisi_qm_err_ini { struct hisi_qm_list { struct mutex lock; struct list_head list; - int (*register_to_crypto)(void); - void (*unregister_from_crypto)(void); + int (*register_to_crypto)(struct hisi_qm *qm); + void (*unregister_from_crypto)(struct hisi_qm *qm); }; struct hisi_qm { diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 08491912afd5..f7eba9bd312b 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -4,8 +4,6 @@ #ifndef __HISI_SEC_V2_H #define __HISI_SEC_V2_H -#include <linux/list.h> - #include "../qm.h" #include "sec_crypto.h" @@ -50,7 +48,7 @@ struct sec_req { int err_type; int req_id; - int flag; + u32 flag; /* Status of the SEC request */ bool fake_busy; @@ -139,6 +137,7 @@ struct sec_ctx { bool pbuf_supported; struct sec_cipher_ctx c_ctx; struct sec_auth_ctx a_ctx; + struct device *dev; }; enum sec_endian { @@ -183,6 +182,6 @@ struct sec_dev { void sec_destroy_qps(struct hisi_qp **qps, int qp_num); struct hisi_qp **sec_create_qps(void); -int sec_register_to_crypto(void); -void sec_unregister_from_crypto(void); +int sec_register_to_crypto(struct hisi_qm *qm); +void sec_unregister_from_crypto(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 2eaa516b3231..c0efa611f2be 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -43,7 +43,6 @@ #define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH) #define SEC_SGL_SGE_NR 128 -#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev) #define SEC_CIPHER_AUTH 0xfe #define SEC_AUTH_CIPHER 0x1 #define SEC_MAX_MAC_LEN 64 @@ -96,7 +95,7 @@ static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) 0, QM_Q_DEPTH, GFP_ATOMIC); mutex_unlock(&qp_ctx->req_lock); if (unlikely(req_id < 0)) { - dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n"); + dev_err(req->ctx->dev, "alloc req id fail!\n"); return req_id; } @@ -112,7 +111,7 @@ static void sec_free_req_id(struct sec_req *req) int req_id = req->req_id; if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) { - dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n"); + dev_err(req->ctx->dev, "free request id invalid!\n"); return; } @@ -138,7 +137,7 @@ static int sec_aead_verify(struct sec_req *req) aead_req->cryptlen + aead_req->assoclen - authsize); if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) { - dev_err(SEC_CTX_DEV(req->ctx), "aead verify failure!\n"); + dev_err(req->ctx->dev, "aead verify failure!\n"); return -EBADMSG; } @@ -177,7 +176,7 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) if (unlikely(req->err_type || done != SEC_SQE_DONE || (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG) || (ctx->alg_type == SEC_AEAD && flag != SEC_SQE_AEAD_FLAG))) { - dev_err(SEC_CTX_DEV(ctx), + dev_err_ratelimited(ctx->dev, "err_type[%d],done[%d],flag[%d]\n", req->err_type, done, flag); err = -EIO; @@ -326,8 +325,8 @@ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) static int sec_alg_resource_alloc(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); struct sec_alg_res *res = qp_ctx->res; + struct device *dev = ctx->dev; int ret; ret = sec_alloc_civ_resource(dev, res); @@ -360,7 +359,7 @@ alloc_fail: static void sec_alg_resource_free(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; sec_free_civ_resource(dev, qp_ctx->res); @@ -373,7 +372,7 @@ static void sec_alg_resource_free(struct sec_ctx *ctx, static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, int qp_ctx_id, int alg_type) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; struct sec_qp_ctx *qp_ctx; struct hisi_qp *qp; int ret = -ENOMEM; @@ -428,7 +427,7 @@ err_destroy_idr: static void sec_release_qp_ctx(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; hisi_qm_stop_qp(qp_ctx->qp); sec_alg_resource_free(ctx, qp_ctx); @@ -452,6 +451,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); ctx->sec = sec; + ctx->dev = &sec->qm.pdev->dev; ctx->hlf_q_num = sec->ctx_q_num >> 1; ctx->pbuf_supported = ctx->sec->iommu_used; @@ -476,11 +476,9 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) err_sec_release_qp_ctx: for (i = i - 1; i >= 0; i--) sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); - kfree(ctx->qp_ctx); err_destroy_qps: sec_destroy_qps(ctx->qps, sec->ctx_q_num); - return ret; } @@ -499,7 +497,7 @@ static int sec_cipher_init(struct sec_ctx *ctx) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; - c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, &c_ctx->c_key_dma, GFP_KERNEL); if (!c_ctx->c_key) return -ENOMEM; @@ -512,7 +510,7 @@ static void sec_cipher_uninit(struct sec_ctx *ctx) struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); - dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, c_ctx->c_key, c_ctx->c_key_dma); } @@ -520,7 +518,7 @@ static int sec_auth_init(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - a_ctx->a_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, &a_ctx->a_key_dma, GFP_KERNEL); if (!a_ctx->a_key) return -ENOMEM; @@ -533,7 +531,7 @@ static void sec_auth_uninit(struct sec_ctx *ctx) struct sec_auth_ctx *a_ctx = &ctx->a_ctx; memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE); - dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE, + dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, a_ctx->a_key, a_ctx->a_key_dma); } @@ -546,7 +544,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm) crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { - dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n"); + pr_err("get error skcipher iv size!\n"); return -EINVAL; } @@ -633,12 +631,13 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct device *dev = ctx->dev; int ret; if (c_mode == SEC_CMODE_XTS) { ret = xts_verify_key(tfm, key, keylen); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n"); + dev_err(dev, "xts mode key err!\n"); return ret; } } @@ -659,7 +658,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n"); + dev_err(dev, "set sec key err!\n"); return ret; } @@ -691,7 +690,7 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; @@ -701,9 +700,8 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, copy_size = c_req->c_len; pbuf_length = sg_copy_to_buffer(src, sg_nents(src), - qp_ctx->res[req_id].pbuf, - copy_size); - + qp_ctx->res[req_id].pbuf, + copy_size); if (unlikely(pbuf_length != copy_size)) { dev_err(dev, "copy src data to pbuf error!\n"); return -EINVAL; @@ -727,7 +725,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; @@ -739,7 +737,6 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf, copy_size); - if (unlikely(pbuf_length != copy_size)) dev_err(dev, "copy pbuf data to dst error!\n"); } @@ -751,7 +748,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, struct sec_aead_req *a_req = &req->aead_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct sec_alg_res *res = &qp_ctx->res[req->req_id]; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; int ret; if (req->use_pbuf) { @@ -806,7 +803,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src, struct scatterlist *dst) { struct sec_cipher_req *c_req = &req->c_req; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; if (req->use_pbuf) { sec_cipher_pbuf_unmap(ctx, req, dst); @@ -891,6 +888,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, { struct sec_ctx *ctx = crypto_aead_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + struct device *dev = ctx->dev; struct crypto_authenc_keys keys; int ret; @@ -904,13 +902,13 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, ret = sec_aead_aes_set_key(c_ctx, &keys); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec cipher key err!\n"); + dev_err(dev, "set sec cipher key err!\n"); goto bad_key; } ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); if (ret) { - dev_err(SEC_CTX_DEV(ctx), "set sec auth key err!\n"); + dev_err(dev, "set sec auth key err!\n"); goto bad_key; } @@ -1062,7 +1060,7 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, cryptlen - iv_size); if (unlikely(sz != iv_size)) - dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n"); + dev_err(req->ctx->dev, "copy output iv error!\n"); } static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, @@ -1160,7 +1158,7 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) ret = sec_skcipher_bd_fill(ctx, req); if (unlikely(ret)) { - dev_err(SEC_CTX_DEV(ctx), "skcipher bd fill is error!\n"); + dev_err(ctx->dev, "skcipher bd fill is error!\n"); return ret; } @@ -1194,7 +1192,7 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) a_req->assoclen); if (unlikely(sz != authsize)) { - dev_err(SEC_CTX_DEV(req->ctx), "copy out mac err!\n"); + dev_err(c->dev, "copy out mac err!\n"); err = -EINVAL; } } @@ -1259,7 +1257,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) ret = ctx->req_op->bd_send(ctx, req); if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { - dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); + dev_err_ratelimited(ctx->dev, "send sec request failed!\n"); goto err_send_req; } @@ -1325,7 +1323,7 @@ static int sec_aead_init(struct crypto_aead *tfm) ctx->alg_type = SEC_AEAD; ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { - dev_err(SEC_CTX_DEV(ctx), "get error aead iv size!\n"); + dev_err(ctx->dev, "get error aead iv size!\n"); return -EINVAL; } @@ -1374,7 +1372,7 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); if (IS_ERR(auth_ctx->hash_tfm)) { - dev_err(SEC_CTX_DEV(ctx), "aead alloc shash error!\n"); + dev_err(ctx->dev, "aead alloc shash error!\n"); sec_aead_exit(tfm); return PTR_ERR(auth_ctx->hash_tfm); } @@ -1408,7 +1406,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) { struct skcipher_request *sk_req = sreq->c_req.sk_req; - struct device *dev = SEC_CTX_DEV(ctx); + struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; if (unlikely(!sk_req->src || !sk_req->dst)) { @@ -1531,14 +1529,15 @@ static struct skcipher_alg sec_skciphers[] = { static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) { - u8 c_alg = ctx->c_ctx.c_alg; struct aead_request *req = sreq->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(req); size_t authsize = crypto_aead_authsize(tfm); + struct device *dev = ctx->dev; + u8 c_alg = ctx->c_ctx.c_alg; if (unlikely(!req->src || !req->dst || !req->cryptlen || req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(SEC_CTX_DEV(ctx), "aead input param error!\n"); + dev_err(dev, "aead input param error!\n"); return -EINVAL; } @@ -1550,7 +1549,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) /* Support AES only */ if (unlikely(c_alg != SEC_CALG_AES)) { - dev_err(SEC_CTX_DEV(ctx), "aead crypto alg error!\n"); + dev_err(dev, "aead crypto alg error!\n"); return -EINVAL; } if (sreq->c_req.encrypt) @@ -1559,7 +1558,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) sreq->c_req.c_len = req->cryptlen - authsize; if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { - dev_err(SEC_CTX_DEV(ctx), "aead crypto length error!\n"); + dev_err(dev, "aead crypto length error!\n"); return -EINVAL; } @@ -1634,7 +1633,7 @@ static struct aead_alg sec_aeads[] = { AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), }; -int sec_register_to_crypto(void) +int sec_register_to_crypto(struct hisi_qm *qm) { int ret; @@ -1651,7 +1650,7 @@ int sec_register_to_crypto(void) return ret; } -void sec_unregister_from_crypto(void) +void sec_unregister_from_crypto(struct hisi_qm *qm) { crypto_unregister_skciphers(sec_skciphers, ARRAY_SIZE(sec_skciphers)); diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index b2786e17d8fe..9c78edac56a4 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -64,7 +64,6 @@ enum sec_addr_type { }; struct sec_sqe_type2 { - /* * mac_len: 0~4 bits * a_key_len: 5~10 bits @@ -120,7 +119,6 @@ struct sec_sqe_type2 { /* c_pad_len_field: 0~1 bits */ __le16 c_pad_len_field; - __le64 long_a_data_len; __le64 a_ivin_addr; __le64 a_key_addr; @@ -211,6 +209,6 @@ struct sec_sqe { struct sec_sqe_type2 type2; }; -int sec_register_to_crypto(void); -void sec_unregister_from_crypto(void); +int sec_register_to_crypto(struct hisi_qm *qm); +void sec_unregister_from_crypto(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index dc68ba76f65e..78a604394140 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -35,15 +35,13 @@ #define SEC_CTX_Q_NUM_MAX 32 #define SEC_CTRL_CNT_CLR_CE 0x301120 -#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) -#define SEC_ENGINE_PF_CFG_OFF 0x300000 -#define SEC_ACC_COMMON_REG_OFF 0x1000 +#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) #define SEC_CORE_INT_SOURCE 0x301010 #define SEC_CORE_INT_MASK 0x301000 #define SEC_CORE_INT_STATUS 0x301008 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 -#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFF) -#define SEC_ECC_ADDR(err) ((err) >> 0) +#define SEC_ECC_NUM 16 +#define SEC_ECC_MASH 0xFF #define SEC_CORE_INT_DISABLE 0x0 #define SEC_CORE_INT_ENABLE 0x1ff #define SEC_CORE_INT_CLEAR 0x1ff @@ -55,23 +53,23 @@ #define SEC_RAS_CE_ENB_MSK 0x88 #define SEC_RAS_FE_ENB_MSK 0x0 #define SEC_RAS_NFE_ENB_MSK 0x177 -#define SEC_RAS_DISABLE 0x0 -#define SEC_MEM_START_INIT_REG 0x0100 -#define SEC_MEM_INIT_DONE_REG 0x0104 +#define SEC_RAS_DISABLE 0x0 +#define SEC_MEM_START_INIT_REG 0x301100 +#define SEC_MEM_INIT_DONE_REG 0x301104 -#define SEC_CONTROL_REG 0x0200 +#define SEC_CONTROL_REG 0x301200 #define SEC_TRNG_EN_SHIFT 8 #define SEC_CLK_GATE_ENABLE BIT(3) #define SEC_CLK_GATE_DISABLE (~BIT(3)) #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF -#define SEC_INTERFACE_USER_CTRL0_REG 0x0220 -#define SEC_INTERFACE_USER_CTRL1_REG 0x0224 -#define SEC_SAA_EN_REG 0x0270 -#define SEC_BD_ERR_CHK_EN_REG0 0x0380 -#define SEC_BD_ERR_CHK_EN_REG1 0x0384 -#define SEC_BD_ERR_CHK_EN_REG3 0x038c +#define SEC_INTERFACE_USER_CTRL0_REG 0x301220 +#define SEC_INTERFACE_USER_CTRL1_REG 0x301224 +#define SEC_SAA_EN_REG 0x301270 +#define SEC_BD_ERR_CHK_EN_REG0 0x301380 +#define SEC_BD_ERR_CHK_EN_REG1 0x301384 +#define SEC_BD_ERR_CHK_EN_REG3 0x30138c #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) @@ -95,9 +93,6 @@ #define SEC_SQE_MASK_OFFSET 64 #define SEC_SQE_MASK_LEN 48 -#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \ - SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF) - struct sec_hw_error { u32 int_msk; const char *msg; @@ -117,16 +112,43 @@ static struct hisi_qm_list sec_devices = { }; static const struct sec_hw_error sec_hw_errors[] = { - {.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"}, - {.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"}, - {.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"}, - {.int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint"}, - {.int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint"}, - {.int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint"}, - {.int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint"}, - {.int_msk = BIT(7), .msg = "sec_bd_err_rint"}, - {.int_msk = BIT(8), .msg = "sec_chain_buff_err_rint"}, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "sec_axi_rresp_err_rint" + }, + { + .int_msk = BIT(1), + .msg = "sec_axi_bresp_err_rint" + }, + { + .int_msk = BIT(2), + .msg = "sec_ecc_2bit_err_rint" + }, + { + .int_msk = BIT(3), + .msg = "sec_ecc_1bit_err_rint" + }, + { + .int_msk = BIT(4), + .msg = "sec_req_trng_timeout_rint" + }, + { + .int_msk = BIT(5), + .msg = "sec_fsm_hbeat_rint" + }, + { + .int_msk = BIT(6), + .msg = "sec_channel_req_rng_timeout_rint" + }, + { + .int_msk = BIT(7), + .msg = "sec_bd_err_rint" + }, + { + .int_msk = BIT(8), + .msg = "sec_chain_buff_err_rint" + }, + {} }; static const char * const sec_dbg_file_name[] = { @@ -277,9 +299,7 @@ static u8 sec_get_endian(struct hisi_qm *qm) "cannot access a register in VF!\n"); return SEC_LE; } - reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF + - SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG); - + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); /* BD little endian mode */ if (!(reg & BIT(0))) return SEC_LE; @@ -299,13 +319,13 @@ static int sec_engine_init(struct hisi_qm *qm) u32 reg; /* disable clock gate control */ - reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); reg &= SEC_CLK_GATE_DISABLE; - writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); - writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG)); + writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG); - ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG), + ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG, reg, reg & 0x1, SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); if (ret) { @@ -313,40 +333,40 @@ static int sec_engine_init(struct hisi_qm *qm) return ret; } - reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); reg |= (0x1 << SEC_TRNG_EN_SHIFT); - writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); - reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); + reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); reg |= SEC_USER0_SMMU_NORMAL; - writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG)); + writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); - reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); reg &= SEC_USER1_SMMU_MASK; if (qm->use_sva && qm->ver == QM_HW_V2) reg |= SEC_USER1_SMMU_SVA; else reg |= SEC_USER1_SMMU_NORMAL; - writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG)); + writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); writel(SEC_SINGLE_PORT_MAX_TRANS, qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); - writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG)); + writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); /* Enable sm4 extra mode, as ctr/ecb */ writel_relaxed(SEC_BD_ERR_CHK_EN0, - SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0)); + qm->io_base + SEC_BD_ERR_CHK_EN_REG0); /* Enable sm4 xts mode multiple iv */ writel_relaxed(SEC_BD_ERR_CHK_EN1, - SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1)); + qm->io_base + SEC_BD_ERR_CHK_EN_REG1); writel_relaxed(SEC_BD_ERR_CHK_EN3, - SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3)); + qm->io_base + SEC_BD_ERR_CHK_EN_REG3); /* config endian */ - reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG)); + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); reg |= sec_get_endian(qm); - writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); return 0; } @@ -406,7 +426,7 @@ static void sec_hw_error_enable(struct hisi_qm *qm) return; } - val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); + val = readl(qm->io_base + SEC_CONTROL_REG); /* clear SEC hw error source if having */ writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); @@ -422,14 +442,14 @@ static void sec_hw_error_enable(struct hisi_qm *qm) /* enable SEC block master OOO when m-bit error occur */ val = val | SEC_AXI_SHUTDOWN_ENABLE; - writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel(val, qm->io_base + SEC_CONTROL_REG); } static void sec_hw_error_disable(struct hisi_qm *qm) { u32 val; - val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); + val = readl(qm->io_base + SEC_CONTROL_REG); /* disable RAS int */ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); @@ -442,7 +462,7 @@ static void sec_hw_error_disable(struct hisi_qm *qm) /* disable SEC block master OOO when m-bit error occur */ val = val & SEC_AXI_SHUTDOWN_DISABLE; - writel(val, SEC_ADDR(qm, SEC_CONTROL_REG)); + writel(val, qm->io_base + SEC_CONTROL_REG); } static u32 sec_current_qm_read(struct sec_debug_file *file) @@ -712,7 +732,8 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) err_val = readl(qm->io_base + SEC_CORE_SRAM_ECC_ERR_INFO); dev_err(dev, "multi ecc sram num=0x%x\n", - SEC_ECC_NUM(err_val)); + ((err_val) >> SEC_ECC_NUM) & + SEC_ECC_MASH); } } errs++; @@ -733,9 +754,9 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm) { u32 val; - val = readl(SEC_ADDR(qm, SEC_CONTROL_REG)); - writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); - writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG)); + val = readl(qm->io_base + SEC_CONTROL_REG); + writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG); + writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG); } static const struct hisi_qm_err_ini sec_err_ini = { diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index 92397f993e23..9ed74611f722 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -62,6 +62,6 @@ struct hisi_zip_sqe { }; int zip_create_qps(struct hisi_qp **qps, int ctx_num, int node); -int hisi_zip_register_to_crypto(void); -void hisi_zip_unregister_from_crypto(void); +int hisi_zip_register_to_crypto(struct hisi_qm *qm); +void hisi_zip_unregister_from_crypto(struct hisi_qm *qm); #endif diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 08b4660b014c..41f69662024a 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -665,7 +665,7 @@ static struct acomp_alg hisi_zip_acomp_gzip = { } }; -int hisi_zip_register_to_crypto(void) +int hisi_zip_register_to_crypto(struct hisi_qm *qm) { int ret; @@ -684,7 +684,7 @@ int hisi_zip_register_to_crypto(void) return ret; } -void hisi_zip_unregister_from_crypto(void) +void hisi_zip_unregister_from_crypto(struct hisi_qm *qm) { crypto_unregister_acomp(&hisi_zip_acomp_gzip); crypto_unregister_acomp(&hisi_zip_acomp_zlib); diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c index b6b25d994af3..2ef312866338 100644 --- a/drivers/crypto/keembay/keembay-ocs-aes-core.c +++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c @@ -1649,8 +1649,10 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev) /* Initialize crypto engine */ aes_dev->engine = crypto_engine_alloc_init(dev, true); - if (!aes_dev->engine) + if (!aes_dev->engine) { + rc = -ENOMEM; goto list_del; + } rc = crypto_engine_start(aes_dev->engine); if (rc) { diff --git a/drivers/crypto/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/keembay/keembay-ocs-hcu-core.c index c4b97b4160e9..322c51a6936f 100644 --- a/drivers/crypto/keembay/keembay-ocs-hcu-core.c +++ b/drivers/crypto/keembay/keembay-ocs-hcu-core.c @@ -1220,8 +1220,10 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev) /* Initialize crypto engine */ hcu_dev->engine = crypto_engine_alloc_init(dev, 1); - if (!hcu_dev->engine) + if (!hcu_dev->engine) { + rc = -ENOMEM; goto list_del; + } rc = crypto_engine_start(hcu_dev->engine); if (rc) { diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index 13c65deda8e9..446f611726df 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -932,8 +932,10 @@ static int __init nx_powernv_probe_vas(struct device_node *pn) ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip); - if (ret) + if (ret) { + of_node_put(dn); return ret; + } } if (!ct_842 || !ct_gzip) { diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c index 8b090b7ae8c6..a1b77bd7a894 100644 --- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c @@ -169,7 +169,7 @@ out: * @msg: Message to send * @vf_nr: VF number to which the message will be sent * - * Function sends a messge from the PF to a VF + * Function sends a message from the PF to a VF * * Return: 0 on success, error code otherwise. */ diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c index 2c98fb63f7b7..e85bd62d134a 100644 --- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c +++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c @@ -8,7 +8,7 @@ * adf_vf2pf_init() - send init msg to PF * @accel_dev: Pointer to acceleration VF device. * - * Function sends an init messge from the VF to a PF + * Function sends an init message from the VF to a PF * * Return: 0 on success, error code otherwise. */ @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(adf_vf2pf_init); * adf_vf2pf_shutdown() - send shutdown msg to PF * @accel_dev: Pointer to acceleration VF device. * - * Function sends a shutdown messge from the VF to a PF + * Function sends a shutdown message from the VF to a PF * * Return: void */ diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index ff78c73c47e3..f998ed58457c 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -718,8 +718,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, int n = sg_nents(sgl); struct qat_alg_buf_list *bufl; struct qat_alg_buf_list *buflout = NULL; - dma_addr_t blp; - dma_addr_t bloutp = 0; + dma_addr_t blp = DMA_MAPPING_ERROR; + dma_addr_t bloutp = DMA_MAPPING_ERROR; struct scatterlist *sg; size_t sz_out, sz = struct_size(bufl, bufers, n + 1); @@ -731,9 +731,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, if (unlikely(!bufl)) return -ENOMEM; - blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, blp))) - goto err_in; + for_each_sg(sgl, sg, n, i) + bufl->bufers[i].addr = DMA_MAPPING_ERROR; for_each_sg(sgl, sg, n, i) { int y = sg_nctr; @@ -750,6 +749,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, sg_nctr++; } bufl->num_bufs = sg_nctr; + blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, blp))) + goto err_in; qat_req->buf.bl = bufl; qat_req->buf.blp = blp; qat_req->buf.sz = sz; @@ -764,10 +766,11 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, dev_to_node(&GET_DEV(inst->accel_dev))); if (unlikely(!buflout)) goto err_in; - bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, bloutp))) - goto err_out; + bufers = buflout->bufers; + for_each_sg(sglout, sg, n, i) + bufers[i].addr = DMA_MAPPING_ERROR; + for_each_sg(sglout, sg, n, i) { int y = sg_nctr; @@ -784,6 +787,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, } buflout->num_bufs = sg_nctr; buflout->num_mapped_bufs = sg_nctr; + bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, bloutp))) + goto err_out; qat_req->buf.blout = buflout; qat_req->buf.bloutp = bloutp; qat_req->buf.sz_out = sz_out; @@ -795,17 +801,21 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, return 0; err_out: + if (!dma_mapping_error(dev, bloutp)) + dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); + n = sg_nents(sglout); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, buflout->bufers[i].addr)) dma_unmap_single(dev, buflout->bufers[i].addr, buflout->bufers[i].len, DMA_BIDIRECTIONAL); - if (!dma_mapping_error(dev, bloutp)) - dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE); kfree(buflout); err_in: + if (!dma_mapping_error(dev, blp)) + dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); + n = sg_nents(sgl); for (i = 0; i < n; i++) if (!dma_mapping_error(dev, bufl->bufers[i].addr)) @@ -813,8 +823,6 @@ err_in: bufl->bufers[i].len, DMA_BIDIRECTIONAL); - if (!dma_mapping_error(dev, blp)) - dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); kfree(bufl); dev_err(dev, "Failed to map buf for dma\n"); diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h index cffa9fc628ff..850f257d00f3 100644 --- a/drivers/crypto/qce/cipher.h +++ b/drivers/crypto/qce/cipher.h @@ -40,7 +40,6 @@ struct qce_cipher_reqctx { struct scatterlist result_sg; struct sg_table dst_tbl; struct scatterlist *dst_sg; - struct sg_table src_tbl; struct scatterlist *src_sg; unsigned int cryptlen; struct skcipher_request fallback_req; // keep at the end diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c index a73db2a5637f..dceb9579d87a 100644 --- a/drivers/crypto/qce/common.c +++ b/drivers/crypto/qce/common.c @@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size) return cfg; } -static int qce_setup_regs_ahash(struct crypto_async_request *async_req, - u32 totallen, u32 offset) +static int qce_setup_regs_ahash(struct crypto_async_request *async_req) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); @@ -295,19 +294,18 @@ static void qce_xtskey(struct qce_device *qce, const u8 *enckey, { u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0}; unsigned int xtsklen = enckeylen / (2 * sizeof(u32)); - unsigned int xtsdusize; qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2, enckeylen / 2); qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen); - /* xts du size 512B */ - xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen); - qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize); + /* Set data unit size to cryptlen. Anything else causes + * crypto engine to return back incorrect results. + */ + qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen); } -static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, - u32 totallen, u32 offset) +static int qce_setup_regs_skcipher(struct crypto_async_request *async_req) { struct skcipher_request *req = skcipher_request_cast(async_req); struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); @@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); - qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff); + qce_write(qce, REG_ENCR_SEG_START, 0); if (IS_CTR(flags)) { qce_write(qce, REG_CNTR_MASK, ~0); @@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, qce_write(qce, REG_CNTR_MASK2, ~0); } - qce_write(qce, REG_SEG_SIZE, totallen); + qce_write(qce, REG_SEG_SIZE, rctx->cryptlen); /* get little endianness */ config = qce_config_reg(qce, 1); @@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, } #endif -int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, - u32 offset) +int qce_start(struct crypto_async_request *async_req, u32 type) { switch (type) { #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER case CRYPTO_ALG_TYPE_SKCIPHER: - return qce_setup_regs_skcipher(async_req, totallen, offset); + return qce_setup_regs_skcipher(async_req); #endif #ifdef CONFIG_CRYPTO_DEV_QCE_SHA case CRYPTO_ALG_TYPE_AHASH: - return qce_setup_regs_ahash(async_req, totallen, offset); + return qce_setup_regs_ahash(async_req); #endif default: return -EINVAL; diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h index 85ba16418a04..3bc244bcca2d 100644 --- a/drivers/crypto/qce/common.h +++ b/drivers/crypto/qce/common.h @@ -94,7 +94,6 @@ struct qce_alg_template { void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len); int qce_check_status(struct qce_device *qce, u32 *status); void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step); -int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, - u32 offset); +int qce_start(struct crypto_async_request *async_req, u32 type); #endif /* _COMMON_H_ */ diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 61c418c12345..8e6fcf2c21cc 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -12,9 +12,15 @@ #include "core.h" #include "sha.h" -/* crypto hw padding constant for first operation */ -#define SHA_PADDING 64 -#define SHA_PADDING_MASK (SHA_PADDING - 1) +struct qce_sha_saved_state { + u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE]; + u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE]; + __be32 byte_count[2]; + unsigned int pending_buflen; + unsigned int flags; + u64 count; + bool first_blk; +}; static LIST_HEAD(ahash_algs); @@ -107,7 +113,7 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) qce_dma_issue_pending(&qce->dma); - ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); + ret = qce_start(async_req, tmpl->crypto_alg_type); if (ret) goto error_terminate; @@ -139,97 +145,37 @@ static int qce_ahash_init(struct ahash_request *req) static int qce_ahash_export(struct ahash_request *req, void *out) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx(req); - unsigned long flags = rctx->flags; - unsigned int digestsize = crypto_ahash_digestsize(ahash); - unsigned int blocksize = - crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); - - if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { - struct sha1_state *out_state = out; - - out_state->count = rctx->count; - qce_cpu_to_be32p_array((__be32 *)out_state->state, - rctx->digest, digestsize); - memcpy(out_state->buffer, rctx->buf, blocksize); - } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { - struct sha256_state *out_state = out; - - out_state->count = rctx->count; - qce_cpu_to_be32p_array((__be32 *)out_state->state, - rctx->digest, digestsize); - memcpy(out_state->buf, rctx->buf, blocksize); - } else { - return -EINVAL; - } + struct qce_sha_saved_state *export_state = out; - return 0; -} - -static int qce_import_common(struct ahash_request *req, u64 in_count, - const u32 *state, const u8 *buffer, bool hmac) -{ - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct qce_sha_reqctx *rctx = ahash_request_ctx(req); - unsigned int digestsize = crypto_ahash_digestsize(ahash); - unsigned int blocksize; - u64 count = in_count; - - blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); - rctx->count = in_count; - memcpy(rctx->buf, buffer, blocksize); - - if (in_count <= blocksize) { - rctx->first_blk = 1; - } else { - rctx->first_blk = 0; - /* - * For HMAC, there is a hardware padding done when first block - * is set. Therefore the byte_count must be incremened by 64 - * after the first block operation. - */ - if (hmac) - count += SHA_PADDING; - } - - rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK); - rctx->byte_count[1] = (__force __be32)(count >> 32); - qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state, - digestsize); - rctx->buflen = (unsigned int)(in_count & (blocksize - 1)); + memcpy(export_state->pending_buf, rctx->buf, rctx->buflen); + memcpy(export_state->partial_digest, rctx->digest, sizeof(rctx->digest)); + export_state->byte_count[0] = rctx->byte_count[0]; + export_state->byte_count[1] = rctx->byte_count[1]; + export_state->pending_buflen = rctx->buflen; + export_state->count = rctx->count; + export_state->first_blk = rctx->first_blk; + export_state->flags = rctx->flags; return 0; } static int qce_ahash_import(struct ahash_request *req, const void *in) { - struct qce_sha_reqctx *rctx; - unsigned long flags; - bool hmac; - int ret; - - ret = qce_ahash_init(req); - if (ret) - return ret; - - rctx = ahash_request_ctx(req); - flags = rctx->flags; - hmac = IS_SHA_HMAC(flags); - - if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) { - const struct sha1_state *state = in; - - ret = qce_import_common(req, state->count, state->state, - state->buffer, hmac); - } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) { - const struct sha256_state *state = in; + struct qce_sha_reqctx *rctx = ahash_request_ctx(req); + const struct qce_sha_saved_state *import_state = in; - ret = qce_import_common(req, state->count, state->state, - state->buf, hmac); - } + memset(rctx, 0, sizeof(*rctx)); + rctx->count = import_state->count; + rctx->buflen = import_state->pending_buflen; + rctx->first_blk = import_state->first_blk; + rctx->flags = import_state->flags; + rctx->byte_count[0] = import_state->byte_count[0]; + rctx->byte_count[1] = import_state->byte_count[1]; + memcpy(rctx->buf, import_state->pending_buf, rctx->buflen); + memcpy(rctx->digest, import_state->partial_digest, sizeof(rctx->digest)); - return ret; + return 0; } static int qce_ahash_update(struct ahash_request *req) @@ -270,6 +216,25 @@ static int qce_ahash_update(struct ahash_request *req) /* calculate how many bytes will be hashed later */ hash_later = total % blocksize; + + /* + * At this point, there is more than one block size of data. If + * the available data to transfer is exactly a multiple of block + * size, save the last block to be transferred in qce_ahash_final + * (with the last block bit set) if this is indeed the end of data + * stream. If not this saved block will be transferred as part of + * next update. If this block is not held back and if this is + * indeed the end of data stream, the digest obtained will be wrong + * since qce_ahash_final will see that rctx->buflen is 0 and return + * doing nothing which in turn means that a digest will not be + * copied to the destination result buffer. qce_ahash_final cannot + * be made to alter this behavior and allowed to proceed if + * rctx->buflen is 0 because the crypto engine BAM does not allow + * for zero length transfers. + */ + if (!hash_later) + hash_later = blocksize; + if (hash_later) { unsigned int src_offset = req->nbytes - hash_later; scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, @@ -450,7 +415,7 @@ static const struct qce_ahash_def ahash_def[] = { .drv_name = "sha1-qce", .digestsize = SHA1_DIGEST_SIZE, .blocksize = SHA1_BLOCK_SIZE, - .statesize = sizeof(struct sha1_state), + .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha1, }, { @@ -459,7 +424,7 @@ static const struct qce_ahash_def ahash_def[] = { .drv_name = "sha256-qce", .digestsize = SHA256_DIGEST_SIZE, .blocksize = SHA256_BLOCK_SIZE, - .statesize = sizeof(struct sha256_state), + .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha256, }, { @@ -468,7 +433,7 @@ static const struct qce_ahash_def ahash_def[] = { .drv_name = "hmac-sha1-qce", .digestsize = SHA1_DIGEST_SIZE, .blocksize = SHA1_BLOCK_SIZE, - .statesize = sizeof(struct sha1_state), + .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha1, }, { @@ -477,7 +442,7 @@ static const struct qce_ahash_def ahash_def[] = { .drv_name = "hmac-sha256-qce", .digestsize = SHA256_DIGEST_SIZE, .blocksize = SHA256_BLOCK_SIZE, - .statesize = sizeof(struct sha256_state), + .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha256, }, }; diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index a2d3da0ad95f..c0a0d8c4fce1 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -8,6 +8,7 @@ #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/types.h> +#include <linux/errno.h> #include <crypto/aes.h> #include <crypto/internal/des.h> #include <crypto/internal/skcipher.h> @@ -143,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req) qce_dma_issue_pending(&qce->dma); - ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0); + ret = qce_start(async_req, tmpl->crypto_alg_type); if (ret) goto error_terminate; @@ -167,16 +168,33 @@ static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk); struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); unsigned long flags = to_cipher_tmpl(ablk)->alg_flags; + unsigned int __keylen; int ret; if (!key || !keylen) return -EINVAL; - switch (IS_XTS(flags) ? keylen >> 1 : keylen) { + /* + * AES XTS key1 = key2 not supported by crypto engine. + * Revisit to request a fallback cipher in this case. + */ + if (IS_XTS(flags)) { + __keylen = keylen >> 1; + if (!memcmp(key, key + __keylen, __keylen)) + return -ENOKEY; + } else { + __keylen = keylen; + } + + switch (__keylen) { case AES_KEYSIZE_128: case AES_KEYSIZE_256: memcpy(ctx->enc_key, key, keylen); break; + case AES_KEYSIZE_192: + break; + default: + return -EINVAL; } ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); @@ -204,12 +222,27 @@ static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key, unsigned int keylen) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); + u32 _key[6]; int err; err = verify_skcipher_des3_key(ablk, key); if (err) return err; + /* + * The crypto engine does not support any two keys + * being the same for triple des algorithms. The + * verify_skcipher_des3_key does not check for all the + * below conditions. Return -ENOKEY in case any two keys + * are the same. Revisit to see if a fallback cipher + * is needed to handle this condition. + */ + memcpy(_key, key, DES3_EDE_KEY_SIZE); + if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) || + !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) || + !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5]))) + return -ENOKEY; + ctx->enc_keylen = keylen; memcpy(ctx->enc_key, key, keylen); return 0; @@ -221,6 +254,7 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); + unsigned int blocksize = crypto_skcipher_blocksize(tfm); int keylen; int ret; @@ -228,14 +262,31 @@ static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; - /* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and - * is not a multiple of it; pass such requests to the fallback + /* CE does not handle 0 length messages */ + if (!req->cryptlen) + return 0; + + /* + * ECB and CBC algorithms require message lengths to be + * multiples of block size. + */ + if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags)) + if (!IS_ALIGNED(req->cryptlen, blocksize)) + return -EINVAL; + + /* + * Conditions for requesting a fallback cipher + * AES-192 (not supported by crypto engine (CE)) + * AES-XTS request with len <= 512 byte (not recommended to use CE) + * AES-XTS request with len > QCE_SECTOR_SIZE and + * is not a multiple of it.(Revisit this condition to check if it is + * needed in all versions of CE) */ if (IS_AES(rctx->flags) && - (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || - req->cryptlen <= aes_sw_max_len) || - (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE && - req->cryptlen % QCE_SECTOR_SIZE))) { + ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || + (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) || + (req->cryptlen > QCE_SECTOR_SIZE && + req->cryptlen % QCE_SECTOR_SIZE))))) { skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, @@ -307,7 +358,7 @@ static const struct qce_skcipher_def skcipher_def[] = { .name = "ecb(aes)", .drv_name = "ecb-aes-qce", .blocksize = AES_BLOCK_SIZE, - .ivsize = AES_BLOCK_SIZE, + .ivsize = 0, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, }, diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 682c8a450a57..8ed08130196f 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -401,7 +401,7 @@ static const struct samsung_aes_variant exynos_aes_data = { static const struct samsung_aes_variant exynos5433_slim_aes_data = { .aes_offset = 0x400, .hash_offset = 0x800, - .clk_names = { "pclk", "aclk", }, + .clk_names = { "aclk", "pclk", }, }; static const struct of_device_id s5p_sss_dt_match[] = { diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index fcde59c65a81..cb3d6b1c655d 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -165,6 +165,8 @@ static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) * crypto_free_acomp() -- free ACOMPRESS tfm handle * * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_acomp(struct crypto_acomp *tfm) { diff --git a/include/crypto/aead.h b/include/crypto/aead.h index fcc12c593ef8..e728469c4ccc 100644 --- a/include/crypto/aead.h +++ b/include/crypto/aead.h @@ -185,6 +185,8 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) /** * crypto_free_aead() - zeroize and free aead handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_aead(struct crypto_aead *tfm) { diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 1d3aa252caba..5764b46bd1ec 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -174,6 +174,8 @@ static inline struct crypto_akcipher *crypto_akcipher_reqtfm( * crypto_free_akcipher() - free AKCIPHER tfm handle * * @tfm: AKCIPHER tfm handle allocated with crypto_alloc_akcipher() + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_akcipher(struct crypto_akcipher *tfm) { diff --git a/include/crypto/ecc_curve.h b/include/crypto/ecc_curve.h new file mode 100644 index 000000000000..70964781eb68 --- /dev/null +++ b/include/crypto/ecc_curve.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 HiSilicon */ + +#ifndef _CRYTO_ECC_CURVE_H +#define _CRYTO_ECC_CURVE_H + +#include <linux/types.h> + +/** + * struct ecc_point - elliptic curve point in affine coordinates + * + * @x: X coordinate in vli form. + * @y: Y coordinate in vli form. + * @ndigits: Length of vlis in u64 qwords. + */ +struct ecc_point { + u64 *x; + u64 *y; + u8 ndigits; +}; + +/** + * struct ecc_curve - definition of elliptic curve + * + * @name: Short name of the curve. + * @g: Generator point of the curve. + * @p: Prime number, if Barrett's reduction is used for this curve + * pre-calculated value 'mu' is appended to the @p after ndigits. + * Use of Barrett's reduction is heuristically determined in + * vli_mmod_fast(). + * @n: Order of the curve group. + * @a: Curve parameter a. + * @b: Curve parameter b. + */ +struct ecc_curve { + char *name; + struct ecc_point g; + u64 *p; + u64 *n; + u64 *a; + u64 *b; +}; + +/** + * ecc_get_curve() - get elliptic curve; + * @curve_id: Curves IDs: + * defined in 'include/crypto/ecdh.h'; + * + * Returns curve if get curve succssful, NULL otherwise + */ +const struct ecc_curve *ecc_get_curve(unsigned int curve_id); + +/** + * ecc_get_curve25519() - get curve25519 curve; + * + * Returns curve25519 + */ +const struct ecc_curve *ecc_get_curve25519(void); + +#endif diff --git a/include/crypto/ecdh.h b/include/crypto/ecdh.h index e4ba1de961e4..a9f98078d29c 100644 --- a/include/crypto/ecdh.h +++ b/include/crypto/ecdh.h @@ -30,12 +30,10 @@ /** * struct ecdh - define an ECDH private key * - * @curve_id: ECC curve the key is based on. * @key: Private ECDH key * @key_size: Size of the private ECDH key */ struct ecdh { - unsigned short curve_id; char *key; unsigned short key_size; }; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 13f8a6a54ca8..b2bc1e46e86a 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -281,6 +281,8 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm) /** * crypto_free_ahash() - zeroize and free the ahash handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_ahash(struct crypto_ahash *tfm) { @@ -724,6 +726,8 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) /** * crypto_free_shash() - zeroize and free the message digest handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_shash(struct crypto_shash *tfm) { diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index 88b591215d5c..cccceadc164b 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -154,6 +154,8 @@ static inline void crypto_kpp_set_flags(struct crypto_kpp *tfm, u32 flags) * crypto_free_kpp() - free KPP tfm handle * * @tfm: KPP tfm handle allocated with crypto_alloc_kpp() + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_kpp(struct crypto_kpp *tfm) { diff --git a/include/crypto/rng.h b/include/crypto/rng.h index 8b4b844b4eef..17bb3673d3c1 100644 --- a/include/crypto/rng.h +++ b/include/crypto/rng.h @@ -111,6 +111,8 @@ static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm) /** * crypto_free_rng() - zeroize and free RNG handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_rng(struct crypto_rng *tfm) { diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 6a733b171a5d..ef0fc9ed4342 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -196,6 +196,8 @@ static inline struct crypto_tfm *crypto_skcipher_tfm( /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed + * + * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm) { diff --git a/net/bluetooth/ecdh_helper.c b/net/bluetooth/ecdh_helper.c index 3226fe02e875..989401f116e9 100644 --- a/net/bluetooth/ecdh_helper.c +++ b/net/bluetooth/ecdh_helper.c @@ -126,8 +126,6 @@ int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]) int err; struct ecdh p = {0}; - p.curve_id = ECC_CURVE_NIST_P256; - if (private_key) { tmp = kmalloc(32, GFP_KERNEL); if (!tmp) diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c index f71c6fa65fb3..f49604d44b87 100644 --- a/net/bluetooth/selftest.c +++ b/net/bluetooth/selftest.c @@ -205,7 +205,7 @@ static int __init test_ecdh(void) calltime = ktime_get(); - tfm = crypto_alloc_kpp("ecdh", 0, 0); + tfm = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); if (IS_ERR(tfm)) { BT_ERR("Unable to create ECDH crypto context"); err = PTR_ERR(tfm); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index b0c1ee110eff..21e445993f39 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -1386,7 +1386,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) goto zfree_smp; } - smp->tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0); + smp->tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); if (IS_ERR(smp->tfm_ecdh)) { BT_ERR("Unable to create ECDH crypto context"); goto free_shash; @@ -3281,7 +3281,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) return ERR_CAST(tfm_cmac); } - tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0); + tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); if (IS_ERR(tfm_ecdh)) { BT_ERR("Unable to create ECDH crypto context"); crypto_free_shash(tfm_cmac); @@ -3806,7 +3806,7 @@ int __init bt_selftest_smp(void) return PTR_ERR(tfm_cmac); } - tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0); + tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); if (IS_ERR(tfm_ecdh)) { BT_ERR("Unable to create ECDH crypto context"); crypto_free_shash(tfm_cmac); |