Skip to content

Commit c9e12eb

Browse files
committed
x86: Secure Launch SMP bringup support
On Intel, the APs are left in a well documented state after TXT performs the late launch. Specifically they cannot have #INIT asserted on them so a standard startup via INIT/SIPI/SIPI cannot be performed. Instead the early SL stub code uses MONITOR and MWAIT to park the APs. The realmode/init.c code updates the jump address for the waiting APs with the location of the Secure Launch entry point in the rmpiggy image. The rmpiggy image is a payload contained in the kernel used to start the APs (in 16b or 32b modes). It is loaded at runtime so its location and entry point must be updated in the long jump for the waiting APs by the running kernel. As the APs are woken up by writing the monitor, the APs jump to the Secure Launch entry point in the rmpiggy which mimics what the real mode code would do then jumps to the standard rmpiggy protected mode entry point. Signed-off-by: Ross Philipson <ross.philipson@oracle.com>
1 parent 9511992 commit c9e12eb

6 files changed

Lines changed: 117 additions & 2 deletions

File tree

arch/x86/include/asm/realmode.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,9 @@ struct real_mode_header {
3838
#ifdef CONFIG_X86_64
3939
u32 machine_real_restart_seg;
4040
#endif
41+
#ifdef CONFIG_SECURE_LAUNCH
42+
u32 sl_trampoline_start32;
43+
#endif
4144
};
4245

4346
/* This must match data at realmode/rm/trampoline_{32,64}.S */

arch/x86/kernel/slaunch.c

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -507,3 +507,29 @@ void __init slaunch_setup(void)
507507
if (boot_cpu_has(X86_FEATURE_SMX))
508508
slaunch_setup_txt();
509509
}
510+
511+
/*
512+
* After a launch, the APs are woken up, enter the DRTM and are left to
513+
* wait for a wakeup call on a MONITOR address. The block where they are
514+
* idle has a long jump to the AP startup code in the mainline kernel.
515+
* This address has to be calculated at runtime and "fixed up" to point
516+
* to the SL startup location in the rmpiggy SMP startup image. This image
517+
* is loaded into separate memory at kernel start time.
518+
*/
519+
void __init slaunch_fixup_ap_wake_vector(void)
520+
{
521+
struct sl_ap_wake_info *ap_wake_info;
522+
u32 *ap_jmp_ptr;
523+
524+
if (!slaunch_is_txt_launch())
525+
return;
526+
527+
ap_wake_info = slaunch_get_ap_wake_info();
528+
529+
ap_jmp_ptr = (u32 *)__va(ap_wake_info->ap_wake_block +
530+
ap_wake_info->ap_jmp_offset);
531+
532+
*ap_jmp_ptr = real_mode_header->sl_trampoline_start32;
533+
534+
pr_info("TXT AP startup vector address updated\n");
535+
}

arch/x86/kernel/smpboot.c

Lines changed: 45 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@
6161
#include <linux/cpuhotplug.h>
6262
#include <linux/mc146818rtc.h>
6363
#include <linux/acpi.h>
64+
#include <linux/slaunch.h>
6465

6566
#include <asm/acpi.h>
6667
#include <asm/cacheinfo.h>
@@ -833,6 +834,45 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
833834
return 0;
834835
}
835836

837+
#if (IS_ENABLED(CONFIG_SECURE_LAUNCH))
838+
839+
/*
840+
* TXT AP startup is quite different than normal. The APs cannot have #INIT
841+
* asserted on them or receive SIPIs. The early Secure Launch code has parked
842+
* the APs using MONITOR/MWAIT in the safe AP wake block area (details in
843+
* sl_stub.S). The SMP boot will wake the APs by writing the MONITOR associated
844+
* with the AP and have them jump to the protected mode code in the rmpiggy where
845+
* the rest of the SMP boot of the AP will proceed normally.
846+
*
847+
* Intel Trusted Execution Technology (TXT) Software Development Guide
848+
* Section 2.3 - MLE Initialization
849+
*/
850+
static void slaunch_wakeup_cpu_from_txt(int cpu, int apicid)
851+
{
852+
struct sl_ap_stack_and_monitor *stack_monitor;
853+
struct sl_ap_wake_info *ap_wake_info;
854+
855+
ap_wake_info = slaunch_get_ap_wake_info();
856+
857+
stack_monitor = (struct sl_ap_stack_and_monitor *)__va(ap_wake_info->ap_wake_block +
858+
ap_wake_info->ap_stacks_offset);
859+
860+
for (unsigned int i = SL_MAX_CPUS - 1; i >= 0; i--) {
861+
if (stack_monitor[i].apicid == apicid) {
862+
stack_monitor[i].monitor = 1;
863+
break;
864+
}
865+
}
866+
}
867+
868+
#else
869+
870+
static inline void slaunch_wakeup_cpu_from_txt(int cpu, int apicid)
871+
{
872+
}
873+
874+
#endif /* IS_ENABLED(CONFIG_SECURE_LAUNCH) */
875+
836876
/*
837877
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
838878
* (ie clustered apic addressing mode), this is a LOGICAL apic ID.
@@ -842,7 +882,7 @@ int common_cpu_up(unsigned int cpu, struct task_struct *idle)
842882
static int do_boot_cpu(u32 apicid, unsigned int cpu, struct task_struct *idle)
843883
{
844884
unsigned long start_ip = real_mode_header->trampoline_start;
845-
int ret;
885+
int ret = 0;
846886

847887
#ifdef CONFIG_X86_64
848888
/* If 64-bit wakeup method exists, use the 64-bit mode trampoline IP */
@@ -887,12 +927,15 @@ static int do_boot_cpu(u32 apicid, unsigned int cpu, struct task_struct *idle)
887927

888928
/*
889929
* Wake up a CPU in difference cases:
930+
* - Intel TXT DRTM launch uses its own method to wake the APs
890931
* - Use a method from the APIC driver if one defined, with wakeup
891932
* straight to 64-bit mode preferred over wakeup to RM.
892933
* Otherwise,
893934
* - Use an INIT boot APIC message
894935
*/
895-
if (apic->wakeup_secondary_cpu_64)
936+
if (slaunch_is_txt_launch())
937+
slaunch_wakeup_cpu_from_txt(cpu, apicid);
938+
else if (apic->wakeup_secondary_cpu_64)
896939
ret = apic->wakeup_secondary_cpu_64(apicid, start_ip, cpu);
897940
else if (apic->wakeup_secondary_cpu)
898941
ret = apic->wakeup_secondary_cpu(apicid, start_ip, cpu);

arch/x86/realmode/init.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include <linux/memblock.h>
55
#include <linux/cc_platform.h>
66
#include <linux/pgtable.h>
7+
#include <linux/slaunch.h>
78

89
#include <asm/set_memory.h>
910
#include <asm/realmode.h>
@@ -213,6 +214,13 @@ void __init init_real_mode(void)
213214

214215
setup_real_mode();
215216
set_real_mode_permissions();
217+
218+
/*
219+
* If Secure Launch is active, it will use the rmpiggy to do the TXT AP
220+
* startup. Secure Launch has its own entry stub in the rmpiggy and this prepares
221+
* it for SMP boot.
222+
*/
223+
slaunch_fixup_ap_wake_vector();
216224
}
217225

218226
static int __init do_init_real_mode(void)

arch/x86/realmode/rm/header.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,9 @@ SYM_DATA_START(real_mode_header)
3737
#ifdef CONFIG_X86_64
3838
.long __KERNEL32_CS
3939
#endif
40+
#ifdef CONFIG_SECURE_LAUNCH
41+
.long pa_sl_trampoline_start32
42+
#endif
4043
SYM_DATA_END(real_mode_header)
4144

4245
/* End signature, used to verify integrity */

arch/x86/realmode/rm/trampoline_64.S

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,38 @@ SYM_CODE_END(sev_es_trampoline_start)
122122

123123
.section ".text32","ax"
124124
.code32
125+
#ifdef CONFIG_SECURE_LAUNCH
126+
.balign 4
127+
SYM_CODE_START(sl_trampoline_start32)
128+
/*
129+
* The early secure launch stub AP wakeup code has taken care of all
130+
* the vagaries of launching out of TXT. This bit just mimics what the
131+
* 16b entry code does and jumps off to the real startup_32.
132+
*/
133+
cli
134+
wbinvd
135+
136+
/*
137+
* The %ebx provided is not terribly useful since it is the physical
138+
* address of tb_trampoline_start and not the base of the image.
139+
* Use pa_real_mode_base, which is fixed up, to get a run time
140+
* base register to use for offsets to location that do not have
141+
* pa_ symbols.
142+
*/
143+
movl $pa_real_mode_base, %ebx
144+
145+
LOCK_AND_LOAD_REALMODE_ESP lock_pa=1
146+
147+
lgdt tr_gdt(%ebx)
148+
lidt tr_idt(%ebx)
149+
150+
movw $__KERNEL_DS, %dx # Data segment descriptor
151+
152+
/* Jump to where the 16b code would have jumped */
153+
ljmpl $__KERNEL32_CS, $pa_startup_32
154+
SYM_CODE_END(sl_trampoline_start32)
155+
#endif
156+
125157
.balign 4
126158
SYM_CODE_START(startup_32)
127159
movl %edx, %ss

0 commit comments

Comments
 (0)