Skip to content

Commit 2b6a5fb

Browse files
bp3tk0vgregkh
authored andcommitted
x86/bugs: Rename MDS machinery to something more generic
Commit f9af88a upstream. It will be used by other x86 mitigations. No functional changes. Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 8a7ac27 commit 2b6a5fb

8 files changed

Lines changed: 36 additions & 36 deletions

File tree

Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in
157157
combination with a microcode update. The microcode clears the affected CPU
158158
buffers when the VERW instruction is executed.
159159

160-
Kernel reuses the MDS function to invoke the buffer clearing:
161-
162-
mds_clear_cpu_buffers()
160+
Kernel does the buffer clearing with x86_clear_cpu_buffers().
163161

164162
On MDS affected CPUs, the kernel already invokes CPU buffer clear on
165163
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No

Documentation/arch/x86/mds.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ enters a C-state.
9393

9494
The kernel provides a function to invoke the buffer clearing:
9595

96-
mds_clear_cpu_buffers()
96+
x86_clear_cpu_buffers()
9797

9898
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
9999
Other than CFLAGS.ZF, this macro doesn't clobber any registers.
@@ -185,9 +185,9 @@ Mitigation points
185185
idle clearing would be a window dressing exercise and is therefore not
186186
activated.
187187

188-
The invocation is controlled by the static key mds_idle_clear which is
189-
switched depending on the chosen mitigation mode and the SMT state of
190-
the system.
188+
The invocation is controlled by the static key cpu_buf_idle_clear which is
189+
switched depending on the chosen mitigation mode and the SMT state of the
190+
system.
191191

192192
The buffer clear is only invoked before entering the C-State to prevent
193193
that stale data from the idling CPU from spilling to the Hyper-Thread

arch/x86/entry/entry.S

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,20 +31,20 @@ EXPORT_SYMBOL_GPL(entry_ibpb);
3131

3232
/*
3333
* Define the VERW operand that is disguised as entry code so that
34-
* it can be referenced with KPTI enabled. This ensure VERW can be
34+
* it can be referenced with KPTI enabled. This ensures VERW can be
3535
* used late in exit-to-user path after page tables are switched.
3636
*/
3737
.pushsection .entry.text, "ax"
3838

3939
.align L1_CACHE_BYTES, 0xcc
40-
SYM_CODE_START_NOALIGN(mds_verw_sel)
40+
SYM_CODE_START_NOALIGN(x86_verw_sel)
4141
UNWIND_HINT_UNDEFINED
4242
ANNOTATE_NOENDBR
4343
.word __KERNEL_DS
4444
.align L1_CACHE_BYTES, 0xcc
45-
SYM_CODE_END(mds_verw_sel);
45+
SYM_CODE_END(x86_verw_sel);
4646
/* For KVM */
47-
EXPORT_SYMBOL_GPL(mds_verw_sel);
47+
EXPORT_SYMBOL_GPL(x86_verw_sel);
4848

4949
.popsection
5050

arch/x86/include/asm/irqflags.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void)
4444

4545
static __always_inline void native_safe_halt(void)
4646
{
47-
mds_idle_clear_cpu_buffers();
47+
x86_idle_clear_cpu_buffers();
4848
asm volatile("sti; hlt": : :"memory");
4949
}
5050

5151
static __always_inline void native_halt(void)
5252
{
53-
mds_idle_clear_cpu_buffers();
53+
x86_idle_clear_cpu_buffers();
5454
asm volatile("hlt": : :"memory");
5555
}
5656

arch/x86/include/asm/mwait.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ static __always_inline void __monitorx(const void *eax, unsigned long ecx,
4444

4545
static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
4646
{
47-
mds_idle_clear_cpu_buffers();
47+
x86_idle_clear_cpu_buffers();
4848

4949
/* "mwait %eax, %ecx;" */
5050
asm volatile(".byte 0x0f, 0x01, 0xc9;"
@@ -89,7 +89,8 @@ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
8989

9090
static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
9191
{
92-
mds_idle_clear_cpu_buffers();
92+
x86_idle_clear_cpu_buffers();
93+
9394
/* "mwait %eax, %ecx;" */
9495
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
9596
:: "a" (eax), "c" (ecx));

arch/x86/include/asm/nospec-branch.h

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -324,22 +324,22 @@
324324
.endm
325325

326326
/*
327-
* Macro to execute VERW instruction that mitigate transient data sampling
328-
* attacks such as MDS. On affected systems a microcode update overloaded VERW
329-
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
330-
*
327+
* Macro to execute VERW insns that mitigate transient data sampling
328+
* attacks such as MDS or TSA. On affected systems a microcode update
329+
* overloaded VERW insns to also clear the CPU buffers. VERW clobbers
330+
* CFLAGS.ZF.
331331
* Note: Only the memory operand variant of VERW clears the CPU buffers.
332332
*/
333333
.macro CLEAR_CPU_BUFFERS
334334
#ifdef CONFIG_X86_64
335-
ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
335+
ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
336336
#else
337337
/*
338338
* In 32bit mode, the memory operand must be a %cs reference. The data
339339
* segments may not be usable (vm86 mode), and the stack segment may not
340340
* be flat (ESPFIX32).
341341
*/
342-
ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
342+
ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
343343
#endif
344344
.endm
345345

@@ -592,24 +592,24 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
592592
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
593593
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
594594

595-
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
595+
DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
596596

597597
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
598598

599599
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
600600

601-
extern u16 mds_verw_sel;
601+
extern u16 x86_verw_sel;
602602

603603
#include <asm/segment.h>
604604

605605
/**
606-
* mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
606+
* x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
607607
*
608608
* This uses the otherwise unused and obsolete VERW instruction in
609609
* combination with microcode which triggers a CPU buffer flush when the
610610
* instruction is executed.
611611
*/
612-
static __always_inline void mds_clear_cpu_buffers(void)
612+
static __always_inline void x86_clear_cpu_buffers(void)
613613
{
614614
static const u16 ds = __KERNEL_DS;
615615

@@ -626,14 +626,15 @@ static __always_inline void mds_clear_cpu_buffers(void)
626626
}
627627

628628
/**
629-
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
629+
* x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
630+
* vulnerability
630631
*
631632
* Clear CPU buffers if the corresponding static key is enabled
632633
*/
633-
static __always_inline void mds_idle_clear_cpu_buffers(void)
634+
static __always_inline void x86_idle_clear_cpu_buffers(void)
634635
{
635-
if (static_branch_likely(&mds_idle_clear))
636-
mds_clear_cpu_buffers();
636+
if (static_branch_likely(&cpu_buf_idle_clear))
637+
x86_clear_cpu_buffers();
637638
}
638639

639640
#endif /* __ASSEMBLY__ */

arch/x86/kernel/cpu/bugs.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -122,9 +122,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
122122
/* Control unconditional IBPB in switch_mm() */
123123
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
124124

125-
/* Control MDS CPU buffer clear before idling (halt, mwait) */
126-
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
127-
EXPORT_SYMBOL_GPL(mds_idle_clear);
125+
/* Control CPU buffer clear before idling (halt, mwait) */
126+
DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
127+
EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
128128

129129
/*
130130
* Controls whether l1d flush based mitigations are enabled,
@@ -445,7 +445,7 @@ static void __init mmio_select_mitigation(void)
445445
* is required irrespective of SMT state.
446446
*/
447447
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
448-
static_branch_enable(&mds_idle_clear);
448+
static_branch_enable(&cpu_buf_idle_clear);
449449

450450
/*
451451
* Check if the system has the right microcode.
@@ -2082,10 +2082,10 @@ static void update_mds_branch_idle(void)
20822082
return;
20832083

20842084
if (sched_smt_active()) {
2085-
static_branch_enable(&mds_idle_clear);
2085+
static_branch_enable(&cpu_buf_idle_clear);
20862086
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
20872087
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
2088-
static_branch_disable(&mds_idle_clear);
2088+
static_branch_disable(&cpu_buf_idle_clear);
20892089
}
20902090
}
20912091

arch/x86/kvm/vmx/vmx.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7263,7 +7263,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
72637263
vmx_l1d_flush(vcpu);
72647264
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
72657265
kvm_arch_has_assigned_device(vcpu->kvm))
7266-
mds_clear_cpu_buffers();
7266+
x86_clear_cpu_buffers();
72677267

72687268
vmx_disable_fb_clear(vmx);
72697269

0 commit comments

Comments
 (0)