Skip to content

Commit 3fde528

Browse files
ardbiesheuvelliuw
authored andcommitted
x86/hyperv: Use __naked attribute to fix stackless C function
hv_crash_c_entry() is a C function that is entered without a stack, and this is only allowed for functions that have the __naked attribute, which informs the compiler that it must not emit the usual prologue and epilogue or emit any other kind of instrumentation that relies on a stack frame. So split up the function, and set the __naked attribute on the initial part that sets up the stack, GDT, IDT and other pieces that are needed for ordinary C execution. Given that function calls are not permitted either, use the existing long return coded in an asm() block to call the second part of the function, which is an ordinary function that is permitted to call other functions as usual. Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> # asm parts, not hv parts Reviewed-by: Mukesh Rathor <mrathor@linux.microsoft.com> Acked-by: Uros Bizjak <ubizjak@gmail.com> Cc: Wei Liu <wei.liu@kernel.org> Cc: linux-hyperv@vger.kernel.org Fixes: 94212d3 ("x86/hyperv: Implement hypervisor RAM collection into vmcore") Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Wei Liu <wei.liu@kernel.org>
1 parent edd20cb commit 3fde528

1 file changed

Lines changed: 43 additions & 39 deletions

File tree

arch/x86/hyperv/hv_crash.c

Lines changed: 43 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -107,14 +107,12 @@ static void __noreturn hv_panic_timeout_reboot(void)
107107
cpu_relax();
108108
}
109109

110-
/* This cannot be inlined as it needs stack */
111-
static noinline __noclone void hv_crash_restore_tss(void)
110+
static void hv_crash_restore_tss(void)
112111
{
113112
load_TR_desc();
114113
}
115114

116-
/* This cannot be inlined as it needs stack */
117-
static noinline void hv_crash_clear_kernpt(void)
115+
static void hv_crash_clear_kernpt(void)
118116
{
119117
pgd_t *pgd;
120118
p4d_t *p4d;
@@ -125,6 +123,25 @@ static noinline void hv_crash_clear_kernpt(void)
125123
native_p4d_clear(p4d);
126124
}
127125

126+
127+
static void __noreturn hv_crash_handle(void)
128+
{
129+
hv_crash_restore_tss();
130+
hv_crash_clear_kernpt();
131+
132+
/* we are now fully in devirtualized normal kernel mode */
133+
__crash_kexec(NULL);
134+
135+
hv_panic_timeout_reboot();
136+
}
137+
138+
/*
139+
* __naked functions do not permit function calls, not even to __always_inline
140+
* functions that only contain asm() blocks themselves. So use a macro instead.
141+
*/
142+
#define hv_wrmsr(msr, val) \
143+
asm volatile("wrmsr" :: "c"(msr), "a"((u32)val), "d"((u32)(val >> 32)) : "memory")
144+
128145
/*
129146
* This is the C entry point from the asm glue code after the disable hypercall.
130147
* We enter here in IA32-e long mode, ie, full 64bit mode running on kernel
@@ -133,51 +150,38 @@ static noinline void hv_crash_clear_kernpt(void)
133150
* available. We restore kernel GDT, and rest of the context, and continue
134151
* to kexec.
135152
*/
136-
static asmlinkage void __noreturn hv_crash_c_entry(void)
153+
static void __naked hv_crash_c_entry(void)
137154
{
138-
struct hv_crash_ctxt *ctxt = &hv_crash_ctxt;
139-
140155
/* first thing, restore kernel gdt */
141-
native_load_gdt(&ctxt->gdtr);
156+
asm volatile("lgdt %0" : : "m" (hv_crash_ctxt.gdtr));
142157

143-
asm volatile("movw %%ax, %%ss" : : "a"(ctxt->ss));
144-
asm volatile("movq %0, %%rsp" : : "m"(ctxt->rsp));
158+
asm volatile("movw %0, %%ss\n\t"
159+
"movq %1, %%rsp"
160+
:: "m"(hv_crash_ctxt.ss), "m"(hv_crash_ctxt.rsp));
145161

146-
asm volatile("movw %%ax, %%ds" : : "a"(ctxt->ds));
147-
asm volatile("movw %%ax, %%es" : : "a"(ctxt->es));
148-
asm volatile("movw %%ax, %%fs" : : "a"(ctxt->fs));
149-
asm volatile("movw %%ax, %%gs" : : "a"(ctxt->gs));
162+
asm volatile("movw %0, %%ds" : : "m"(hv_crash_ctxt.ds));
163+
asm volatile("movw %0, %%es" : : "m"(hv_crash_ctxt.es));
164+
asm volatile("movw %0, %%fs" : : "m"(hv_crash_ctxt.fs));
165+
asm volatile("movw %0, %%gs" : : "m"(hv_crash_ctxt.gs));
150166

151-
native_wrmsrq(MSR_IA32_CR_PAT, ctxt->pat);
152-
asm volatile("movq %0, %%cr0" : : "r"(ctxt->cr0));
167+
hv_wrmsr(MSR_IA32_CR_PAT, hv_crash_ctxt.pat);
168+
asm volatile("movq %0, %%cr0" : : "r"(hv_crash_ctxt.cr0));
153169

154-
asm volatile("movq %0, %%cr8" : : "r"(ctxt->cr8));
155-
asm volatile("movq %0, %%cr4" : : "r"(ctxt->cr4));
156-
asm volatile("movq %0, %%cr2" : : "r"(ctxt->cr4));
170+
asm volatile("movq %0, %%cr8" : : "r"(hv_crash_ctxt.cr8));
171+
asm volatile("movq %0, %%cr4" : : "r"(hv_crash_ctxt.cr4));
172+
asm volatile("movq %0, %%cr2" : : "r"(hv_crash_ctxt.cr2));
157173

158-
native_load_idt(&ctxt->idtr);
159-
native_wrmsrq(MSR_GS_BASE, ctxt->gsbase);
160-
native_wrmsrq(MSR_EFER, ctxt->efer);
174+
asm volatile("lidt %0" : : "m" (hv_crash_ctxt.idtr));
175+
hv_wrmsr(MSR_GS_BASE, hv_crash_ctxt.gsbase);
176+
hv_wrmsr(MSR_EFER, hv_crash_ctxt.efer);
161177

162178
/* restore the original kernel CS now via far return */
163-
asm volatile("movzwq %0, %%rax\n\t"
164-
"pushq %%rax\n\t"
165-
"pushq $1f\n\t"
166-
"lretq\n\t"
167-
"1:nop\n\t" : : "m"(ctxt->cs) : "rax");
168-
169-
/* We are in asmlinkage without stack frame, hence make C function
170-
* calls which will buy stack frames.
171-
*/
172-
hv_crash_restore_tss();
173-
hv_crash_clear_kernpt();
174-
175-
/* we are now fully in devirtualized normal kernel mode */
176-
__crash_kexec(NULL);
177-
178-
hv_panic_timeout_reboot();
179+
asm volatile("pushq %q0\n\t"
180+
"pushq %q1\n\t"
181+
"lretq"
182+
:: "r"(hv_crash_ctxt.cs), "r"(hv_crash_handle));
179183
}
180-
/* Tell gcc we are using lretq long jump in the above function intentionally */
184+
/* Tell objtool we are using lretq long jump in the above function intentionally */
181185
STACK_FRAME_NON_STANDARD(hv_crash_c_entry);
182186

183187
static void hv_mark_tss_not_busy(void)

0 commit comments

Comments
 (0)