Skip to content

Commit d568788

Browse files
committed
Merge tag 'hardening-v7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull hardening updates from Kees Cook: - randomize_kstack: Improve implementation across arches (Ryan Roberts) - lkdtm/fortify: Drop unneeded FORTIFY_STR_OBJECT test - refcount: Remove unused __signed_wrap function annotations * tag 'hardening-v7.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: lkdtm/fortify: Drop unneeded FORTIFY_STR_OBJECT test refcount: Remove unused __signed_wrap function annotations randomize_kstack: Unify random source across arches randomize_kstack: Maintain kstack_offset per task
2 parents cea4a90 + cf2f06f commit d568788

17 files changed

Lines changed: 49 additions & 154 deletions

File tree

arch/Kconfig

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1518,9 +1518,8 @@ config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
15181518
def_bool n
15191519
help
15201520
An arch should select this symbol if it can support kernel stack
1521-
offset randomization with calls to add_random_kstack_offset()
1522-
during syscall entry and choose_random_kstack_offset() during
1523-
syscall exit. Careful removal of -fstack-protector-strong and
1521+
offset randomization with a call to add_random_kstack_offset()
1522+
during syscall entry. Careful removal of -fstack-protector-strong and
15241523
-fstack-protector should also be applied to the entry code and
15251524
closely examined, as the artificial stack bump looks like an array
15261525
to the compiler, so it will attempt to add canary checks regardless

arch/arm64/kernel/syscall.c

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -52,17 +52,6 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
5252
}
5353

5454
syscall_set_return_value(current, regs, 0, ret);
55-
56-
/*
57-
* This value will get limited by KSTACK_OFFSET_MAX(), which is 10
58-
* bits. The actual entropy will be further reduced by the compiler
59-
* when applying stack alignment constraints: the AAPCS mandates a
60-
* 16-byte aligned SP at function boundaries, which will remove the
61-
* 4 low bits from any entropy chosen here.
62-
*
63-
* The resulting 6 bits of entropy is seen in SP[9:4].
64-
*/
65-
choose_random_kstack_offset(get_random_u16());
6655
}
6756

6857
static inline bool has_syscall_work(unsigned long flags)

arch/loongarch/kernel/syscall.c

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -79,16 +79,5 @@ void noinstr __no_stack_protector do_syscall(struct pt_regs *regs)
7979
regs->regs[7], regs->regs[8], regs->regs[9]);
8080
}
8181

82-
/*
83-
* This value will get limited by KSTACK_OFFSET_MAX(), which is 10
84-
* bits. The actual entropy will be further reduced by the compiler
85-
* when applying stack alignment constraints: 16-bytes (i.e. 4-bits)
86-
* aligned, which will remove the 4 low bits from any entropy chosen
87-
* here.
88-
*
89-
* The resulting 6 bits of entropy is seen in SP[9:4].
90-
*/
91-
choose_random_kstack_offset(get_cycles());
92-
9382
syscall_exit_to_user_mode(regs);
9483
}

arch/powerpc/kernel/syscall.c

Lines changed: 2 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,6 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
2020

2121
kuap_lock();
2222

23-
add_random_kstack_offset();
24-
2523
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
2624
BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
2725

@@ -30,6 +28,8 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
3028
CT_WARN_ON(ct_state() == CT_STATE_KERNEL);
3129
user_exit_irqoff();
3230

31+
add_random_kstack_offset();
32+
3333
BUG_ON(regs_is_unrecoverable(regs));
3434
BUG_ON(!user_mode(regs));
3535
BUG_ON(arch_irq_disabled_regs(regs));
@@ -173,17 +173,5 @@ notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
173173
}
174174
#endif
175175

176-
/*
177-
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
178-
* so the maximum stack offset is 1k bytes (10 bits).
179-
*
180-
* The actual entropy will be further reduced by the compiler when
181-
* applying stack alignment constraints: the powerpc architecture
182-
* may have two kinds of stack alignment (16-bytes and 8-bytes).
183-
*
184-
* So the resulting 6 or 7 bits of entropy is seen in SP[9:4] or SP[9:3].
185-
*/
186-
choose_random_kstack_offset(mftb());
187-
188176
return ret;
189177
}

arch/riscv/kernel/traps.c

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -344,18 +344,6 @@ void do_trap_ecall_u(struct pt_regs *regs)
344344
syscall_handler(regs, syscall);
345345
}
346346

347-
/*
348-
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
349-
* so the maximum stack offset is 1k bytes (10 bits).
350-
*
351-
* The actual entropy will be further reduced by the compiler when
352-
* applying stack alignment constraints: 16-byte (i.e. 4-bit) aligned
353-
* for RV32I or RV64I.
354-
*
355-
* The resulting 6 bits of entropy is seen in SP[9:4].
356-
*/
357-
choose_random_kstack_offset(get_random_u16());
358-
359347
syscall_exit_to_user_mode(regs);
360348
} else {
361349
irqentry_state_t state = irqentry_nmi_enter(regs);

arch/s390/include/asm/entry-common.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -51,14 +51,6 @@ static __always_inline void arch_exit_to_user_mode(void)
5151

5252
#define arch_exit_to_user_mode arch_exit_to_user_mode
5353

54-
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
55-
unsigned long ti_work)
56-
{
57-
choose_random_kstack_offset(get_tod_clock_fast());
58-
}
59-
60-
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
61-
6254
static __always_inline bool arch_in_rcu_eqs(void)
6355
{
6456
if (IS_ENABLED(CONFIG_KVM))

arch/s390/kernel/syscall.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,8 +97,8 @@ void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
9797
{
9898
unsigned long nr;
9999

100-
add_random_kstack_offset();
101100
enter_from_user_mode(regs);
101+
add_random_kstack_offset();
102102
regs->psw = get_lowcore()->svc_old_psw;
103103
regs->int_code = get_lowcore()->svc_int_code;
104104
update_timer_sys();

arch/x86/entry/syscall_32.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -247,7 +247,6 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
247247
{
248248
int nr = syscall_32_enter(regs);
249249

250-
add_random_kstack_offset();
251250
/*
252251
* Subtlety here: if ptrace pokes something larger than 2^31-1 into
253252
* orig_ax, the int return value truncates it. This matches
@@ -256,6 +255,7 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
256255
nr = syscall_enter_from_user_mode(regs, nr);
257256
instrumentation_begin();
258257

258+
add_random_kstack_offset();
259259
do_syscall_32_irqs_on(regs, nr);
260260

261261
instrumentation_end();
@@ -268,7 +268,6 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
268268
int nr = syscall_32_enter(regs);
269269
int res;
270270

271-
add_random_kstack_offset();
272271
/*
273272
* This cannot use syscall_enter_from_user_mode() as it has to
274273
* fetch EBP before invoking any of the syscall entry work
@@ -277,6 +276,7 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
277276
enter_from_user_mode(regs);
278277

279278
instrumentation_begin();
279+
add_random_kstack_offset();
280280
local_irq_enable();
281281
/* Fetch EBP from where the vDSO stashed it. */
282282
if (IS_ENABLED(CONFIG_X86_64)) {

arch/x86/entry/syscall_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,10 +86,10 @@ static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
8686
/* Returns true to return using SYSRET, or false to use IRET */
8787
__visible noinstr bool do_syscall_64(struct pt_regs *regs, int nr)
8888
{
89-
add_random_kstack_offset();
9089
nr = syscall_enter_from_user_mode(regs, nr);
9190

9291
instrumentation_begin();
92+
add_random_kstack_offset();
9393

9494
if (!do_syscall_x64(regs, nr) && !do_syscall_x32(regs, nr) && nr != -1) {
9595
/* Invalid system call, but still a system call. */

arch/x86/include/asm/entry-common.h

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -82,18 +82,6 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
8282
current_thread_info()->status &= ~(TS_COMPAT | TS_I386_REGS_POKED);
8383
#endif
8484

85-
/*
86-
* This value will get limited by KSTACK_OFFSET_MAX(), which is 10
87-
* bits. The actual entropy will be further reduced by the compiler
88-
* when applying stack alignment constraints (see cc_stack_align4/8 in
89-
* arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32)
90-
* low bits from any entropy chosen here.
91-
*
92-
* Therefore, final stack offset entropy will be 7 (x86_64) or
93-
* 8 (ia32) bits.
94-
*/
95-
choose_random_kstack_offset(rdtsc());
96-
9785
/* Avoid unnecessary reads of 'x86_ibpb_exit_to_user' */
9886
if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER) &&
9987
this_cpu_read(x86_ibpb_exit_to_user)) {

0 commit comments

Comments
 (0)