diff options
Diffstat (limited to '0039-x86-entry-Introduce-EFRAME_-constants.patch')
-rw-r--r-- | 0039-x86-entry-Introduce-EFRAME_-constants.patch | 314 |
1 files changed, 314 insertions, 0 deletions
diff --git a/0039-x86-entry-Introduce-EFRAME_-constants.patch b/0039-x86-entry-Introduce-EFRAME_-constants.patch new file mode 100644 index 0000000..c280286 --- /dev/null +++ b/0039-x86-entry-Introduce-EFRAME_-constants.patch @@ -0,0 +1,314 @@ +From e691f99f17198906f813b85dcabafe5addb9a57a Mon Sep 17 00:00:00 2001 +From: Andrew Cooper <andrew.cooper3@citrix.com> +Date: Sat, 27 Jan 2024 17:52:09 +0000 +Subject: [PATCH 39/67] x86/entry: Introduce EFRAME_* constants + +restore_all_guest() does a lot of manipulation of the stack after popping the +GPRs, and uses raw %rsp displacements to do so. Also, almost all entrypaths +use raw %rsp displacements prior to pushing GPRs. + +Provide better mnemonics, to aid readability and reduce the chance of errors +when editing. + +No functional change. The resulting binary is identical. + +Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> +Reviewed-by: Jan Beulich <jbeulich@suse.com> +(cherry picked from commit 37541208f119a9c552c6c6c3246ea61be0d44035) +--- + xen/arch/x86/x86_64/asm-offsets.c | 17 ++++++++ + xen/arch/x86/x86_64/compat/entry.S | 2 +- + xen/arch/x86/x86_64/entry.S | 70 +++++++++++++++--------------- + 3 files changed, 53 insertions(+), 36 deletions(-) + +diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c +index 287dac101a..31fa63b77f 100644 +--- a/xen/arch/x86/x86_64/asm-offsets.c ++++ b/xen/arch/x86/x86_64/asm-offsets.c +@@ -51,6 +51,23 @@ void __dummy__(void) + OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es); + BLANK(); + ++ /* ++ * EFRAME_* is for the entry/exit logic where %rsp is pointing at ++ * UREGS_error_code and GPRs are still/already guest values. ++ */ ++#define OFFSET_EF(sym, mem) \ ++ DEFINE(sym, offsetof(struct cpu_user_regs, mem) - \ ++ offsetof(struct cpu_user_regs, error_code)) ++ ++ OFFSET_EF(EFRAME_entry_vector, entry_vector); ++ OFFSET_EF(EFRAME_rip, rip); ++ OFFSET_EF(EFRAME_cs, cs); ++ OFFSET_EF(EFRAME_eflags, eflags); ++ OFFSET_EF(EFRAME_rsp, rsp); ++ BLANK(); ++ ++#undef OFFSET_EF ++ + OFFSET(VCPU_processor, struct vcpu, processor); + OFFSET(VCPU_domain, struct vcpu, domain); + OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info); +diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S +index 253bb1688c..7c211314d8 100644 +--- a/xen/arch/x86/x86_64/compat/entry.S ++++ b/xen/arch/x86/x86_64/compat/entry.S +@@ -15,7 +15,7 @@ ENTRY(entry_int82) + ENDBR64 + ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP + pushq $0 +- movl $HYPERCALL_VECTOR, 4(%rsp) ++ movl $HYPERCALL_VECTOR, EFRAME_entry_vector(%rsp) + SAVE_ALL compat=1 /* DPL1 gate, restricted to 32bit PV guests only. */ + + SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */ +diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S +index 585b0c9551..412cbeb3ec 100644 +--- a/xen/arch/x86/x86_64/entry.S ++++ b/xen/arch/x86/x86_64/entry.S +@@ -190,15 +190,15 @@ restore_all_guest: + SPEC_CTRL_EXIT_TO_PV /* Req: a=spec_ctrl %rsp=regs/cpuinfo, Clob: cd */ + + RESTORE_ALL +- testw $TRAP_syscall,4(%rsp) ++ testw $TRAP_syscall, EFRAME_entry_vector(%rsp) + jz iret_exit_to_guest + +- movq 24(%rsp),%r11 # RFLAGS ++ mov EFRAME_eflags(%rsp), %r11 + andq $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), %r11 + orq $X86_EFLAGS_IF,%r11 + + /* Don't use SYSRET path if the return address is not canonical. */ +- movq 8(%rsp),%rcx ++ mov EFRAME_rip(%rsp), %rcx + sarq $47,%rcx + incl %ecx + cmpl $1,%ecx +@@ -213,20 +213,20 @@ restore_all_guest: + ALTERNATIVE "", rag_clrssbsy, X86_FEATURE_XEN_SHSTK + #endif + +- movq 8(%rsp), %rcx # RIP +- cmpw $FLAT_USER_CS32,16(%rsp)# CS +- movq 32(%rsp),%rsp # RSP ++ mov EFRAME_rip(%rsp), %rcx ++ cmpw $FLAT_USER_CS32, EFRAME_cs(%rsp) ++ mov EFRAME_rsp(%rsp), %rsp + je 1f + sysretq + 1: sysretl + + ALIGN + .Lrestore_rcx_iret_exit_to_guest: +- movq 8(%rsp), %rcx # RIP ++ mov EFRAME_rip(%rsp), %rcx + /* No special register assumptions. */ + iret_exit_to_guest: +- andl $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), 24(%rsp) +- orl $X86_EFLAGS_IF,24(%rsp) ++ andl $~(X86_EFLAGS_IOPL | X86_EFLAGS_VM), EFRAME_eflags(%rsp) ++ orl $X86_EFLAGS_IF, EFRAME_eflags(%rsp) + addq $8,%rsp + .Lft0: iretq + _ASM_PRE_EXTABLE(.Lft0, handle_exception) +@@ -257,7 +257,7 @@ ENTRY(lstar_enter) + pushq $FLAT_KERNEL_CS64 + pushq %rcx + pushq $0 +- movl $TRAP_syscall, 4(%rsp) ++ movl $TRAP_syscall, EFRAME_entry_vector(%rsp) + SAVE_ALL + + SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */ +@@ -294,7 +294,7 @@ ENTRY(cstar_enter) + pushq $FLAT_USER_CS32 + pushq %rcx + pushq $0 +- movl $TRAP_syscall, 4(%rsp) ++ movl $TRAP_syscall, EFRAME_entry_vector(%rsp) + SAVE_ALL + + SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */ +@@ -335,7 +335,7 @@ GLOBAL(sysenter_eflags_saved) + pushq $3 /* ring 3 null cs */ + pushq $0 /* null rip */ + pushq $0 +- movl $TRAP_syscall, 4(%rsp) ++ movl $TRAP_syscall, EFRAME_entry_vector(%rsp) + SAVE_ALL + + SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */ +@@ -389,7 +389,7 @@ ENTRY(int80_direct_trap) + ENDBR64 + ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP + pushq $0 +- movl $0x80, 4(%rsp) ++ movl $0x80, EFRAME_entry_vector(%rsp) + SAVE_ALL + + SPEC_CTRL_ENTRY_FROM_PV /* Req: %rsp=regs/cpuinfo, %rdx=0, Clob: acd */ +@@ -649,7 +649,7 @@ ret_from_intr: + .section .init.text, "ax", @progbits + ENTRY(early_page_fault) + ENDBR64 +- movl $TRAP_page_fault, 4(%rsp) ++ movl $TRAP_page_fault, EFRAME_entry_vector(%rsp) + SAVE_ALL + movq %rsp, %rdi + call do_early_page_fault +@@ -716,7 +716,7 @@ ENTRY(common_interrupt) + + ENTRY(page_fault) + ENDBR64 +- movl $TRAP_page_fault,4(%rsp) ++ movl $TRAP_page_fault, EFRAME_entry_vector(%rsp) + /* No special register assumptions. */ + GLOBAL(handle_exception) + ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP +@@ -892,90 +892,90 @@ FATAL_exception_with_ints_disabled: + ENTRY(divide_error) + ENDBR64 + pushq $0 +- movl $TRAP_divide_error,4(%rsp) ++ movl $TRAP_divide_error, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(coprocessor_error) + ENDBR64 + pushq $0 +- movl $TRAP_copro_error,4(%rsp) ++ movl $TRAP_copro_error, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(simd_coprocessor_error) + ENDBR64 + pushq $0 +- movl $TRAP_simd_error,4(%rsp) ++ movl $TRAP_simd_error, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(device_not_available) + ENDBR64 + pushq $0 +- movl $TRAP_no_device,4(%rsp) ++ movl $TRAP_no_device, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(debug) + ENDBR64 + pushq $0 +- movl $TRAP_debug,4(%rsp) ++ movl $TRAP_debug, EFRAME_entry_vector(%rsp) + jmp handle_ist_exception + + ENTRY(int3) + ENDBR64 + pushq $0 +- movl $TRAP_int3,4(%rsp) ++ movl $TRAP_int3, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(overflow) + ENDBR64 + pushq $0 +- movl $TRAP_overflow,4(%rsp) ++ movl $TRAP_overflow, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(bounds) + ENDBR64 + pushq $0 +- movl $TRAP_bounds,4(%rsp) ++ movl $TRAP_bounds, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(invalid_op) + ENDBR64 + pushq $0 +- movl $TRAP_invalid_op,4(%rsp) ++ movl $TRAP_invalid_op, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(invalid_TSS) + ENDBR64 +- movl $TRAP_invalid_tss,4(%rsp) ++ movl $TRAP_invalid_tss, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(segment_not_present) + ENDBR64 +- movl $TRAP_no_segment,4(%rsp) ++ movl $TRAP_no_segment, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(stack_segment) + ENDBR64 +- movl $TRAP_stack_error,4(%rsp) ++ movl $TRAP_stack_error, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(general_protection) + ENDBR64 +- movl $TRAP_gp_fault,4(%rsp) ++ movl $TRAP_gp_fault, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(alignment_check) + ENDBR64 +- movl $TRAP_alignment_check,4(%rsp) ++ movl $TRAP_alignment_check, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(entry_CP) + ENDBR64 +- movl $X86_EXC_CP, 4(%rsp) ++ movl $X86_EXC_CP, EFRAME_entry_vector(%rsp) + jmp handle_exception + + ENTRY(double_fault) + ENDBR64 +- movl $TRAP_double_fault,4(%rsp) ++ movl $TRAP_double_fault, EFRAME_entry_vector(%rsp) + /* Set AC to reduce chance of further SMAP faults */ + ALTERNATIVE "", stac, X86_FEATURE_XEN_SMAP + SAVE_ALL +@@ -1001,7 +1001,7 @@ ENTRY(double_fault) + ENTRY(nmi) + ENDBR64 + pushq $0 +- movl $TRAP_nmi,4(%rsp) ++ movl $TRAP_nmi, EFRAME_entry_vector(%rsp) + handle_ist_exception: + ALTERNATIVE "", clac, X86_FEATURE_XEN_SMAP + SAVE_ALL +@@ -1134,7 +1134,7 @@ handle_ist_exception: + ENTRY(machine_check) + ENDBR64 + pushq $0 +- movl $TRAP_machine_check,4(%rsp) ++ movl $TRAP_machine_check, EFRAME_entry_vector(%rsp) + jmp handle_ist_exception + + /* No op trap handler. Required for kexec crash path. */ +@@ -1171,7 +1171,7 @@ autogen_stubs: /* Automatically generated stubs. */ + 1: + ENDBR64 + pushq $0 +- movb $vec,4(%rsp) ++ movb $vec, EFRAME_entry_vector(%rsp) + jmp common_interrupt + + entrypoint 1b +@@ -1185,7 +1185,7 @@ autogen_stubs: /* Automatically generated stubs. */ + test $8,%spl /* 64bit exception frames are 16 byte aligned, but the word */ + jz 2f /* size is 8 bytes. Check whether the processor gave us an */ + pushq $0 /* error code, and insert an empty one if not. */ +-2: movb $vec,4(%rsp) ++2: movb $vec, EFRAME_entry_vector(%rsp) + jmp handle_exception + + entrypoint 1b +-- +2.44.0 + |