diff options
Diffstat (limited to 'include/vdso')
-rw-r--r-- | include/vdso/datapage.h | 27 | ||||
-rw-r--r-- | include/vdso/getrandom.h | 46 | ||||
-rw-r--r-- | include/vdso/math64.h | 38 |
3 files changed, 104 insertions, 7 deletions
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h index 5d5c0b8efff2..b85f24cac3f5 100644 --- a/include/vdso/datapage.h +++ b/include/vdso/datapage.h @@ -19,12 +19,6 @@ #include <vdso/time32.h> #include <vdso/time64.h> -#ifdef CONFIG_ARM64 -#include <asm/page-def.h> -#else -#include <asm/page.h> -#endif - #ifdef CONFIG_ARCH_HAS_VDSO_DATA #include <asm/vdso/data.h> #else @@ -67,6 +61,7 @@ struct vdso_timestamp { * @seq: timebase sequence counter * @clock_mode: clock mode * @cycle_last: timebase at clocksource init + * @max_cycles: maximum cycles which won't overflow 64bit multiplication * @mask: clocksource mask * @mult: clocksource multiplier * @shift: clocksource shift @@ -82,6 +77,10 @@ struct vdso_timestamp { * vdso_data will be accessed by 64 bit and compat code at the same time * so we should be careful before modifying this structure. * + * The ordering of the struct members is optimized to have fast access to the + * often required struct members which are related to CLOCK_REALTIME and + * CLOCK_MONOTONIC. This information is stored in the first cache lines. + * * @basetime is used to store the base time for the system wide time getter * VVAR page. * @@ -98,6 +97,9 @@ struct vdso_data { s32 clock_mode; u64 cycle_last; +#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT + u64 max_cycles; +#endif u64 mask; u32 mult; u32 shift; @@ -115,6 +117,16 @@ struct vdso_data { struct arch_vdso_data arch_data; }; +/** + * struct vdso_rng_data - vdso RNG state information + * @generation: counter representing the number of RNG reseeds + * @is_ready: boolean signaling whether the RNG is initialized + */ +struct vdso_rng_data { + u64 generation; + u8 is_ready; +}; + /* * We use the hidden visibility to prevent the compiler from generating a GOT * relocation. Not only is going through a GOT useless (the entry couldn't and @@ -126,13 +138,14 @@ struct vdso_data { */ extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden"))); extern struct vdso_data _timens_data[CS_BASES] __attribute__((visibility("hidden"))); +extern struct vdso_rng_data _vdso_rng_data __attribute__((visibility("hidden"))); /** * union vdso_data_store - Generic vDSO data page */ union vdso_data_store { struct vdso_data data[CS_BASES]; - u8 page[PAGE_SIZE]; + u8 page[1U << CONFIG_PAGE_SHIFT]; }; /* diff --git a/include/vdso/getrandom.h b/include/vdso/getrandom.h new file mode 100644 index 000000000000..a8b7c14b0ae0 --- /dev/null +++ b/include/vdso/getrandom.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. + */ + +#ifndef _VDSO_GETRANDOM_H +#define _VDSO_GETRANDOM_H + +#include <linux/types.h> + +#define CHACHA_KEY_SIZE 32 +#define CHACHA_BLOCK_SIZE 64 + +/** + * struct vgetrandom_state - State used by vDSO getrandom(). + * + * @batch: One and a half ChaCha20 blocks of buffered RNG output. + * + * @key: Key to be used for generating next batch. + * + * @batch_key: Union of the prior two members, which is exactly two full + * ChaCha20 blocks in size, so that @batch and @key can be filled + * together. + * + * @generation: Snapshot of @rng_info->generation in the vDSO data page at + * the time @key was generated. + * + * @pos: Offset into @batch of the next available random byte. + * + * @in_use: Reentrancy guard for reusing a state within the same thread + * due to signal handlers. + */ +struct vgetrandom_state { + union { + struct { + u8 batch[CHACHA_BLOCK_SIZE * 3 / 2]; + u32 key[CHACHA_KEY_SIZE / sizeof(u32)]; + }; + u8 batch_key[CHACHA_BLOCK_SIZE * 2]; + }; + u64 generation; + u8 pos; + bool in_use; +}; + +#endif /* _VDSO_GETRANDOM_H */ diff --git a/include/vdso/math64.h b/include/vdso/math64.h index 7da703ee5561..22ae212f8b28 100644 --- a/include/vdso/math64.h +++ b/include/vdso/math64.h @@ -21,4 +21,42 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) return ret; } +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + +#ifndef mul_u64_u32_add_u64_shr +static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift) +{ + return (u64)((((unsigned __int128)a * mul) + b) >> shift); +} +#endif /* mul_u64_u32_add_u64_shr */ + +#else + +#ifndef mul_u64_u32_add_u64_shr +#ifndef mul_u32_u32 +static inline u64 mul_u32_u32(u32 a, u32 b) +{ + return (u64)a * b; +} +#define mul_u32_u32 mul_u32_u32 +#endif +static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift) +{ + u32 ah = a >> 32, al = a; + bool ovf; + u64 ret; + + ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret); + ret >>= shift; + if (ovf && shift) + ret += 1ULL << (64 - shift); + if (ah) + ret += mul_u32_u32(ah, mul) << (32 - shift); + + return ret; +} +#endif /* mul_u64_u32_add_u64_shr */ + +#endif + #endif /* __VDSO_MATH64_H */ |