Skip to content

Commit 37beb42

Browse files
ryanhrobkees
authored andcommitted
randomize_kstack: Maintain kstack_offset per task
kstack_offset was previously maintained per-cpu, but this caused a couple of issues. So let's instead make it per-task. Issue 1: add_random_kstack_offset() and choose_random_kstack_offset() expected and required to be called with interrupts and preemption disabled so that it could manipulate per-cpu state. But arm64, loongarch and risc-v are calling them with interrupts and preemption enabled. I don't _think_ this causes any functional issues, but it's certainly unexpected and could lead to manipulating the wrong cpu's state, which could cause a minor performance degradation due to bouncing the cache lines. By maintaining the state per-task those functions can safely be called in preemptible context. Issue 2: add_random_kstack_offset() is called before executing the syscall and expands the stack using a previously chosen random offset. choose_random_kstack_offset() is called after executing the syscall and chooses and stores a new random offset for the next syscall. With per-cpu storage for this offset, an attacker could force cpu migration during the execution of the syscall and prevent the offset from being updated for the original cpu such that it is predictable for the next syscall on that cpu. By maintaining the state per-task, this problem goes away because the per-task random offset is updated after the syscall regardless of which cpu it is executing on. Fixes: 39218ff ("stack: Optionally randomize kernel stack offset each syscall") Closes: https://lore.kernel.org/all/dd8c37bc-795f-4c7a-9086-69e584d8ab24@arm.com/ Cc: stable@vger.kernel.org Acked-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Link: https://patch.msgid.link/20260303150840.3789438-2-ryan.roberts@arm.com Signed-off-by: Kees Cook <kees@kernel.org>
1 parent 11439c4 commit 37beb42

4 files changed

Lines changed: 21 additions & 12 deletions

File tree

include/linux/randomize_kstack.h

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99

1010
DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
1111
randomize_kstack_offset);
12-
DECLARE_PER_CPU(u32, kstack_offset);
1312

1413
/*
1514
* Do not use this anywhere else in the kernel. This is used here because
@@ -50,15 +49,14 @@ DECLARE_PER_CPU(u32, kstack_offset);
5049
* add_random_kstack_offset - Increase stack utilization by previously
5150
* chosen random offset
5251
*
53-
* This should be used in the syscall entry path when interrupts and
54-
* preempt are disabled, and after user registers have been stored to
55-
* the stack. For testing the resulting entropy, please see:
56-
* tools/testing/selftests/lkdtm/stack-entropy.sh
52+
* This should be used in the syscall entry path after user registers have been
53+
* stored to the stack. Preemption may be enabled. For testing the resulting
54+
* entropy, please see: tools/testing/selftests/lkdtm/stack-entropy.sh
5755
*/
5856
#define add_random_kstack_offset() do { \
5957
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
6058
&randomize_kstack_offset)) { \
61-
u32 offset = raw_cpu_read(kstack_offset); \
59+
u32 offset = current->kstack_offset; \
6260
u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \
6361
/* Keep allocation even after "ptr" loses scope. */ \
6462
asm volatile("" :: "r"(ptr) : "memory"); \
@@ -69,9 +67,9 @@ DECLARE_PER_CPU(u32, kstack_offset);
6967
* choose_random_kstack_offset - Choose the random offset for the next
7068
* add_random_kstack_offset()
7169
*
72-
* This should only be used during syscall exit when interrupts and
73-
* preempt are disabled. This position in the syscall flow is done to
74-
* frustrate attacks from userspace attempting to learn the next offset:
70+
* This should only be used during syscall exit. Preemption may be enabled. This
71+
* position in the syscall flow is done to frustrate attacks from userspace
72+
* attempting to learn the next offset:
7573
* - Maximize the timing uncertainty visible from userspace: if the
7674
* offset is chosen at syscall entry, userspace has much more control
7775
* over the timing between choosing offsets. "How long will we be in
@@ -85,14 +83,20 @@ DECLARE_PER_CPU(u32, kstack_offset);
8583
#define choose_random_kstack_offset(rand) do { \
8684
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
8785
&randomize_kstack_offset)) { \
88-
u32 offset = raw_cpu_read(kstack_offset); \
86+
u32 offset = current->kstack_offset; \
8987
offset = ror32(offset, 5) ^ (rand); \
90-
raw_cpu_write(kstack_offset, offset); \
88+
current->kstack_offset = offset; \
9189
} \
9290
} while (0)
91+
92+
static inline void random_kstack_task_init(struct task_struct *tsk)
93+
{
94+
tsk->kstack_offset = 0;
95+
}
9396
#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
9497
#define add_random_kstack_offset() do { } while (0)
9598
#define choose_random_kstack_offset(rand) do { } while (0)
99+
#define random_kstack_task_init(tsk) do { } while (0)
96100
#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
97101

98102
#endif

include/linux/sched.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1592,6 +1592,10 @@ struct task_struct {
15921592
unsigned long prev_lowest_stack;
15931593
#endif
15941594

1595+
#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
1596+
u32 kstack_offset;
1597+
#endif
1598+
15951599
#ifdef CONFIG_X86_MCE
15961600
void __user *mce_vaddr;
15971601
__u64 mce_kflags;

init/main.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -833,7 +833,6 @@ static inline void initcall_debug_enable(void)
833833
#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
834834
DEFINE_STATIC_KEY_MAYBE_RO(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
835835
randomize_kstack_offset);
836-
DEFINE_PER_CPU(u32, kstack_offset);
837836

838837
static int __init early_randomize_kstack_offset(char *buf)
839838
{

kernel/fork.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,7 @@
9595
#include <linux/thread_info.h>
9696
#include <linux/kstack_erase.h>
9797
#include <linux/kasan.h>
98+
#include <linux/randomize_kstack.h>
9899
#include <linux/scs.h>
99100
#include <linux/io_uring.h>
100101
#include <linux/io_uring_types.h>
@@ -2233,6 +2234,7 @@ __latent_entropy struct task_struct *copy_process(
22332234
if (retval)
22342235
goto bad_fork_cleanup_io;
22352236

2237+
random_kstack_task_init(p);
22362238
stackleak_task_init(p);
22372239

22382240
if (pid != &init_struct_pid) {

0 commit comments

Comments
 (0)