Skip to content

Commit 24b8f8d

Browse files
guilhermepiccolikees
authored andcommitted
pstore/ftrace: Factor KASLR offset in the core kernel instruction addresses
The pstore ftrace frontend works by purely collecting the instruction address, saving it on the persistent area through the backend and when the log is read, on next boot for example, the address is then resolved by using the regular printk symbol lookup (%pS for example). Problem: if we are running a relocatable kernel with KASLR enabled, this is a recipe for failure in the symbol resolution on next boots, since the addresses are offset'ed by the KASLR address. So, naturally the way to go is factor the KASLR address out of instruction address collection, and adding the fresh offset when resolving the symbol on future boots. Problem #2: modules also have varying addresses that float based on module base address and potentially the module ordering in memory, meaning factoring KASLR offset for them is useless. So, let's hereby only take KASLR offset into account for core kernel addresses, leaving module ones as is. And we have yet a 3rd complexity: not necessarily the check range for core kernel addresses holds true on future boots, since the module base address will vary. With that, the choice was to mark the addresses as being core vs module based on its MSB. And with that... ...we have the 4th challenge here: for some "simple" architectures, the CPU number is saved bit-encoded on the instruction pointer, to allow bigger timestamps - this is set through the PSTORE_CPU_IN_IP define for such architectures. Hence, the approach here is to skip such architectures (at least in a first moment). Finished? No. On top of all previous complexities, we have one extra pain point: kaslr_offset() is inlined and fully "resolved" at boot-time, after kernel decompression, through ELF relocation mechanism. Once the offset is known, it's patched to the kernel text area, wherever it is used. The mechanism, and its users, are only built-in - incompatible with module usage. Though there are possibly some hacks (as computing the offset using some kallsym lookup), the choice here is to restrict this optimization to the (hopefully common) case of CONFIG_PSTORE=y. TL;DR: let's factor KASLR offsets on pstore/ftrace for core kernel addresses, only when PSTORE is built-in and leaving module addresses out, as well as architectures that define PSTORE_CPU_IN_IP. Signed-off-by: Guilherme G. Piccoli <gpiccoli@igalia.com> Link: https://patch.msgid.link/20260410205848.2607169-1-gpiccoli@igalia.com Signed-off-by: Kees Cook <kees@kernel.org>
1 parent 421a41c commit 24b8f8d

3 files changed

Lines changed: 32 additions & 4 deletions

File tree

fs/pstore/ftrace.c

Lines changed: 26 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,35 @@
1818
#include <linux/cache.h>
1919
#include <linux/slab.h>
2020
#include <asm/barrier.h>
21+
#include <asm/setup.h>
2122
#include "internal.h"
2223

2324
/* This doesn't need to be atomic: speed is chosen over correctness here. */
2425
static u64 pstore_ftrace_stamp;
2526

27+
static inline unsigned long adjust_ip(unsigned long ip)
28+
{
29+
#if defined(CONFIG_RANDOMIZE_BASE) && !defined(PSTORE_CPU_IN_IP) && IS_BUILTIN(CONFIG_PSTORE)
30+
if (core_kernel_text(ip))
31+
return ip - kaslr_offset();
32+
33+
__clear_bit(BITS_PER_LONG - 1, &ip);
34+
#endif
35+
return ip;
36+
}
37+
38+
inline unsigned long decode_ip(unsigned long ip)
39+
{
40+
#if defined(CONFIG_RANDOMIZE_BASE) && !defined(PSTORE_CPU_IN_IP) && IS_BUILTIN(CONFIG_PSTORE)
41+
if (test_bit(BITS_PER_LONG - 1, &ip))
42+
return ip + kaslr_offset();
43+
44+
__set_bit(BITS_PER_LONG - 1, &ip);
45+
46+
#endif
47+
return ip;
48+
}
49+
2650
static void notrace pstore_ftrace_call(unsigned long ip,
2751
unsigned long parent_ip,
2852
struct ftrace_ops *op,
@@ -47,8 +71,8 @@ static void notrace pstore_ftrace_call(unsigned long ip,
4771

4872
local_irq_save(flags);
4973

50-
rec.ip = ip;
51-
rec.parent_ip = parent_ip;
74+
rec.ip = adjust_ip(ip);
75+
rec.parent_ip = adjust_ip(parent_ip);
5276
pstore_ftrace_write_timestamp(&rec, pstore_ftrace_stamp++);
5377
pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id());
5478
psinfo->write(&record);

fs/pstore/inode.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,17 +105,19 @@ static int pstore_ftrace_seq_show(struct seq_file *s, void *v)
105105
struct pstore_private *ps = s->private;
106106
struct pstore_ftrace_seq_data *data = v;
107107
struct pstore_ftrace_record *rec;
108+
unsigned long ip, parent_ip;
108109

109110
if (!data)
110111
return 0;
111112

112113
rec = (struct pstore_ftrace_record *)(ps->record->buf + data->off);
113114

115+
ip = decode_ip(rec->ip);
116+
parent_ip = decode_ip(rec->parent_ip);
114117
seq_printf(s, "CPU:%d ts:%llu %08lx %08lx %ps <- %pS\n",
115118
pstore_ftrace_decode_cpu(rec),
116119
pstore_ftrace_read_timestamp(rec),
117-
rec->ip, rec->parent_ip, (void *)rec->ip,
118-
(void *)rec->parent_ip);
120+
ip, parent_ip, (void *)ip, (void *)parent_ip);
119121

120122
return 0;
121123
}

fs/pstore/internal.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,15 @@
99
extern unsigned int kmsg_bytes;
1010

1111
#ifdef CONFIG_PSTORE_FTRACE
12+
extern unsigned long decode_ip(unsigned long ip);
1213
extern void pstore_register_ftrace(void);
1314
extern void pstore_unregister_ftrace(void);
1415
ssize_t pstore_ftrace_combine_log(char **dest_log, size_t *dest_log_size,
1516
const char *src_log, size_t src_log_size);
1617
#else
1718
static inline void pstore_register_ftrace(void) {}
1819
static inline void pstore_unregister_ftrace(void) {}
20+
static inline unsigned long decode_ip(unsigned long ip) { return ip; }
1921
static inline ssize_t
2022
pstore_ftrace_combine_log(char **dest_log, size_t *dest_log_size,
2123
const char *src_log, size_t src_log_size)

0 commit comments

Comments
 (0)