Skip to content

Commit 744b02f

Browse files
kaihuanghansendc
authored andcommitted
x86/kexec: Consolidate relocate_kernel() function parameters
During kexec, the kernel jumps to the new kernel in relocate_kernel(), which is implemented in assembly and both 32-bit and 64-bit have their own version. Currently, for both 32-bit and 64-bit, the last two parameters of the relocate_kernel() are both 'unsigned int' but actually they only convey a boolean, i.e., one bit information. The 'unsigned int' has enough space to carry two bits information therefore there's no need to pass the two booleans in two separate 'unsigned int'. Consolidate the last two function parameters of relocate_kernel() into a single 'unsigned int' and pass flags instead. Only consolidate the 64-bit version albeit the similar optimization can be done for the 32-bit version too. Don't bother changing the 32-bit version while it is working (since assembly code change is required). Signed-off-by: Kai Huang <kai.huang@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de> Reviewed-by: David Woodhouse <dwmw@amazon.co.uk> Link: https://lore.kernel.org/all/20250901160930.1785244-2-pbonzini%40redhat.com
1 parent 01fb93a commit 744b02f

3 files changed

Lines changed: 38 additions & 21 deletions

File tree

arch/x86/include/asm/kexec.h

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,15 @@
1313
# define KEXEC_DEBUG_EXC_HANDLER_SIZE 6 /* PUSHI, PUSHI, 2-byte JMP */
1414
#endif
1515

16+
#ifdef CONFIG_X86_64
17+
18+
#include <linux/bits.h>
19+
20+
#define RELOC_KERNEL_PRESERVE_CONTEXT BIT(0)
21+
#define RELOC_KERNEL_HOST_MEM_ENC_ACTIVE BIT(1)
22+
23+
#endif
24+
1625
# define KEXEC_CONTROL_PAGE_SIZE 4096
1726
# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
1827

@@ -121,8 +130,7 @@ typedef unsigned long
121130
relocate_kernel_fn(unsigned long indirection_page,
122131
unsigned long pa_control_page,
123132
unsigned long start_address,
124-
unsigned int preserve_context,
125-
unsigned int host_mem_enc_active);
133+
unsigned int flags);
126134
#endif
127135
extern relocate_kernel_fn relocate_kernel;
128136
#define ARCH_HAS_KIMAGE_ARCH

arch/x86/kernel/machine_kexec_64.c

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -384,16 +384,10 @@ void __nocfi machine_kexec(struct kimage *image)
384384
{
385385
unsigned long reloc_start = (unsigned long)__relocate_kernel_start;
386386
relocate_kernel_fn *relocate_kernel_ptr;
387-
unsigned int host_mem_enc_active;
387+
unsigned int relocate_kernel_flags;
388388
int save_ftrace_enabled;
389389
void *control_page;
390390

391-
/*
392-
* This must be done before load_segments() since if call depth tracking
393-
* is used then GS must be valid to make any function calls.
394-
*/
395-
host_mem_enc_active = cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT);
396-
397391
#ifdef CONFIG_KEXEC_JUMP
398392
if (image->preserve_context)
399393
save_processor_state();
@@ -427,6 +421,17 @@ void __nocfi machine_kexec(struct kimage *image)
427421
*/
428422
relocate_kernel_ptr = control_page + (unsigned long)relocate_kernel - reloc_start;
429423

424+
relocate_kernel_flags = 0;
425+
if (image->preserve_context)
426+
relocate_kernel_flags |= RELOC_KERNEL_PRESERVE_CONTEXT;
427+
428+
/*
429+
* This must be done before load_segments() since if call depth tracking
430+
* is used then GS must be valid to make any function calls.
431+
*/
432+
if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
433+
relocate_kernel_flags |= RELOC_KERNEL_HOST_MEM_ENC_ACTIVE;
434+
430435
/*
431436
* The segment registers are funny things, they have both a
432437
* visible and an invisible part. Whenever the visible part is
@@ -443,8 +448,7 @@ void __nocfi machine_kexec(struct kimage *image)
443448
image->start = relocate_kernel_ptr((unsigned long)image->head,
444449
virt_to_phys(control_page),
445450
image->start,
446-
image->preserve_context,
447-
host_mem_enc_active);
451+
relocate_kernel_flags);
448452

449453
#ifdef CONFIG_KEXEC_JUMP
450454
if (image->preserve_context)

arch/x86/kernel/relocate_kernel_64.S

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
6666
* %rdi indirection_page
6767
* %rsi pa_control_page
6868
* %rdx start address
69-
* %rcx preserve_context
70-
* %r8 host_mem_enc_active
69+
* %rcx flags: RELOC_KERNEL_*
7170
*/
7271

7372
/* Save the CPU context, used for jumping back */
@@ -111,7 +110,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
111110
/* save indirection list for jumping back */
112111
movq %rdi, pa_backup_pages_map(%rip)
113112

114-
/* Save the preserve_context to %r11 as swap_pages clobbers %rcx. */
113+
/* Save the flags to %r11 as swap_pages clobbers %rcx. */
115114
movq %rcx, %r11
116115

117116
/* setup a new stack at the end of the physical control page */
@@ -129,9 +128,8 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
129128
/*
130129
* %rdi indirection page
131130
* %rdx start address
132-
* %r8 host_mem_enc_active
133131
* %r9 page table page
134-
* %r11 preserve_context
132+
* %r11 flags: RELOC_KERNEL_*
135133
* %r13 original CR4 when relocate_kernel() was invoked
136134
*/
137135

@@ -204,7 +202,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
204202
* entries that will conflict with the now unencrypted memory
205203
* used by kexec. Flush the caches before copying the kernel.
206204
*/
207-
testq %r8, %r8
205+
testb $RELOC_KERNEL_HOST_MEM_ENC_ACTIVE, %r11b
208206
jz .Lsme_off
209207
wbinvd
210208
.Lsme_off:
@@ -220,7 +218,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
220218
movq %cr3, %rax
221219
movq %rax, %cr3
222220

223-
testq %r11, %r11 /* preserve_context */
221+
testb $RELOC_KERNEL_PRESERVE_CONTEXT, %r11b
224222
jnz .Lrelocate
225223

226224
/*
@@ -273,7 +271,13 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
273271
ANNOTATE_NOENDBR
274272
andq $PAGE_MASK, %r8
275273
lea PAGE_SIZE(%r8), %rsp
276-
movl $1, %r11d /* Ensure preserve_context flag is set */
274+
/*
275+
* Ensure RELOC_KERNEL_PRESERVE_CONTEXT flag is set so that
276+
* swap_pages() can swap pages correctly. Note all other
277+
* RELOC_KERNEL_* flags passed to relocate_kernel() are not
278+
* restored.
279+
*/
280+
movl $RELOC_KERNEL_PRESERVE_CONTEXT, %r11d
277281
call swap_pages
278282
movq kexec_va_control_page(%rip), %rax
279283
0: addq $virtual_mapped - 0b, %rax
@@ -321,7 +325,7 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
321325
UNWIND_HINT_END_OF_STACK
322326
/*
323327
* %rdi indirection page
324-
* %r11 preserve_context
328+
* %r11 flags: RELOC_KERNEL_*
325329
*/
326330
movq %rdi, %rcx /* Put the indirection_page in %rcx */
327331
xorl %edi, %edi
@@ -357,7 +361,8 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
357361
movq %rdi, %rdx /* Save destination page to %rdx */
358362
movq %rsi, %rax /* Save source page to %rax */
359363

360-
testq %r11, %r11 /* Only actually swap for ::preserve_context */
364+
/* Only actually swap for ::preserve_context */
365+
testb $RELOC_KERNEL_PRESERVE_CONTEXT, %r11b
361366
jz .Lnoswap
362367

363368
/* copy source page to swap page */

0 commit comments

Comments
 (0)