diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S index 240327e422..c93a23ed64 100644 --- a/xen/arch/x86/boot/head.S +++ b/xen/arch/x86/boot/head.S @@ -351,10 +351,12 @@ cs32_switch: jmp *%edi /* - * Entry point for TrenchBoot Secure Launch on Intel TXT platforms. + * Entry point for TrenchBoot Secure Launch, common for Intel TXT and + * AMD Secure Startup, but state is slightly different. * + * On Intel: * CPU is in 32b protected mode with paging disabled. On entry: - * - %ebx = %eip = MLE entry point, + * - %ebx = %eip = this entry point, * - stack pointer is undefined, * - CS is flat 4GB code segment, * - DS, ES, SS, FS and GS are undefined. @@ -368,13 +370,34 @@ cs32_switch: * - trying to enter real mode results in reset * - APs must be brought up by MONITOR or GETSEC[WAKEUP], depending on * which is supported by a given SINIT ACM + * + * On AMD (as implemented by TrenchBoot's SKL): + * CPU is in 32b protected mode with paging disabled. On entry: + * - %ebx = %eip = this entry point, + * - %ebp holds base address of SKL + * - stack pointer is treated as undefined for parity with TXT, + * - CS is flat 4GB code segment, + * - DS, ES, SS are flat 4GB data segments, but treated as undefined for + * parity with TXT. + * + * Additional restrictions: + * - interrupts (including NMIs and SMIs) are disabled and must be + * enabled later + * - APs must be brought up by SIPI without an INIT */ slaunch_stub_entry: /* Calculate the load base address. */ mov %ebx, %esi sub $sym_offs(slaunch_stub_entry), %esi - /* Mark Secure Launch boot protocol and jump to common entry. */ + /* On AMD, %ebp holds the base address of SLB, save it for later. */ + mov %ebp, %ebx + + /* + * Mark Secure Launch boot protocol and jump to common entry. Note that + * all general purpose registers except %ebx and %esi are clobbered + * between here and .Lslaunch_proto. + */ mov $SLAUNCH_BOOTLOADER_MAGIC, %eax jmp .Lset_stack @@ -497,10 +520,12 @@ __start: sub $8, %esp push %esp /* pointer to output structure */ + push %ebx /* Slaunch parameter on AMD */ lea sym_offs(__2M_rwdata_end), %ecx /* end of target image */ lea sym_offs(_start), %edx /* target base address */ mov %esi, %eax /* load base address */ - /* slaunch_early_tests(load/eax, tgt/edx, tgt_end/ecx, ret/stk) using fastcall. */ + /* slaunch_early_tests(load/eax, tgt/edx, tgt_end/ecx, + slaunch/stk, ret/stk) using fastcall. */ call slaunch_early_tests /* Move outputs of slaunch_early_tests() from stack into registers. */ diff --git a/xen/arch/x86/boot/slaunch_early.c b/xen/arch/x86/boot/slaunch_early.c index e818415a2d..79f315be31 100644 --- a/xen/arch/x86/boot/slaunch_early.c +++ b/xen/arch/x86/boot/slaunch_early.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include struct early_tests_results { @@ -25,6 +27,21 @@ struct early_tests_results uint32_t slrt_pa; } __packed; +static bool is_intel_cpu(void) +{ + /* + * asm/processor.h can't be included in early code, which means neither + * cpuid() function nor boot_cpu_data can be used here. + */ + uint32_t eax, ebx, ecx, edx; + asm volatile ( "cpuid" + : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) + : "0" (0), "c" (0) ); + return ebx == X86_VENDOR_INTEL_EBX + && ecx == X86_VENDOR_INTEL_ECX + && edx == X86_VENDOR_INTEL_EDX; +} + static void verify_pmr_ranges(struct txt_os_mle_data *os_mle, struct txt_os_sinit_data *os_sinit, uint32_t load_base_addr, uint32_t tgt_base_addr, @@ -104,6 +121,7 @@ static void verify_pmr_ranges(struct txt_os_mle_data *os_mle, void slaunch_early_tests(uint32_t load_base_addr, uint32_t tgt_base_addr, uint32_t tgt_end_addr, + uint32_t slaunch_param, struct early_tests_results *result) { void *txt_heap; @@ -111,6 +129,35 @@ void slaunch_early_tests(uint32_t load_base_addr, struct txt_os_sinit_data *os_sinit; uint32_t size = tgt_end_addr - tgt_base_addr; + if ( !is_intel_cpu() ) + { + /* + * Not an Intel CPU. Currently the only other option is AMD with SKINIT + * and secure-kernel-loader. + */ + struct slr_table *slrt; + struct slr_entry_dl_info *dl_info; + + const uint16_t *sl_header = (void *)slaunch_param; + /* + * The fourth 16-bit integer of SKL's header is an offset to + * bootloader's data, which is SLRT. + */ + result->slrt_pa = slaunch_param + sl_header[3]; + slrt = (struct slr_table *)result->slrt_pa; + + result->mbi_pa = 0; + dl_info = (struct slr_entry_dl_info *) + slr_next_entry_by_tag (slrt, NULL, SLR_ENTRY_DL_INFO); + /* Basic checks only, SKL checked and consumed the rest. */ + if ( dl_info == NULL + || dl_info->hdr.size != sizeof(*dl_info) + || dl_info->bl_context.bootloader != SLR_BOOTLOADER_GRUB ) + result->mbi_pa = dl_info->bl_context.context; + + return; + } + /* Clear the TXT error registers for a clean start of day */ write_txt_reg(TXTCR_ERRORCODE, 0);