Skip to content

Commit 1fec416

Browse files
tlendackygregkh
authored andcommitted
x86/sev: Evict cache lines during SNP memory validation
Commit 7b306df upstream. An SNP cache coherency vulnerability requires a cache line eviction mitigation when validating memory after a page state change to private. The specific mitigation is to touch the first and last byte of each 4K page that is being validated. There is no need to perform the mitigation when performing a page state change to shared and rescinding validation. CPUID bit Fn8000001F_EBX[31] defines the COHERENCY_SFW_NO CPUID bit that, when set, indicates that the software mitigation for this vulnerability is not needed. Implement the mitigation and invoke it when validating memory (making it private) and the COHERENCY_SFW_NO bit is not set, indicating the SNP guest is vulnerable. Co-developed-by: Michael Roth <michael.roth@amd.com> Signed-off-by: Michael Roth <michael.roth@amd.com> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent a47974c commit 1fec416

6 files changed

Lines changed: 50 additions & 1 deletion

File tree

arch/x86/boot/compressed/sev.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -164,6 +164,13 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
164164
*/
165165
if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1))
166166
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
167+
168+
/*
169+
* If validating memory (making it private) and affected by the
170+
* cache-coherency vulnerability, perform the cache eviction mitigation.
171+
*/
172+
if (op == SNP_PAGE_STATE_PRIVATE && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
173+
sev_evict_cache((void *)paddr, 1);
167174
}
168175

169176
void snp_set_page_private(unsigned long paddr)

arch/x86/boot/cpuflags.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -124,5 +124,18 @@ void get_cpuflags(void)
124124
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
125125
&cpu.flags[1]);
126126
}
127+
128+
if (max_amd_level >= 0x8000001f) {
129+
u32 ebx;
130+
131+
/*
132+
* The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
133+
* the virtualization flags entry (word 8) and set by
134+
* scattered.c, so the bit needs to be explicitly set.
135+
*/
136+
cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
137+
if (ebx & BIT(31))
138+
set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
139+
}
127140
}
128141
}

arch/x86/include/asm/cpufeatures.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -230,6 +230,7 @@
230230
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
231231
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
232232
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
233+
#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 5) /* "" SNP cache coherency software work around not needed */
233234

234235
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
235236
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */

arch/x86/kernel/cpu/scattered.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ static const struct cpuid_bit cpuid_bits[] = {
4545
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
4646
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
4747
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
48+
{ X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
4849
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
4950
{ X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 },
5051
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },

arch/x86/kernel/sev-shared.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1064,3 +1064,21 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
10641064
RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
10651065
}
10661066
}
1067+
1068+
static inline void sev_evict_cache(void *va, int npages)
1069+
{
1070+
volatile u8 val __always_unused;
1071+
u8 *bytes = va;
1072+
int page_idx;
1073+
1074+
/*
1075+
* For SEV guests, a read from the first/last cache-lines of a 4K page
1076+
* using the guest key is sufficient to cause a flush of all cache-lines
1077+
* associated with that 4K page without incurring all the overhead of a
1078+
* full CLFLUSH sequence.
1079+
*/
1080+
for (page_idx = 0; page_idx < npages; page_idx++) {
1081+
val = bytes[page_idx * PAGE_SIZE];
1082+
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
1083+
}
1084+
}

arch/x86/kernel/sev.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -676,10 +676,12 @@ static u64 __init get_jump_table_addr(void)
676676

677677
static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool validate)
678678
{
679-
unsigned long vaddr_end;
679+
unsigned long vaddr_begin, vaddr_end;
680680
int rc;
681681

682682
vaddr = vaddr & PAGE_MASK;
683+
684+
vaddr_begin = vaddr;
683685
vaddr_end = vaddr + (npages << PAGE_SHIFT);
684686

685687
while (vaddr < vaddr_end) {
@@ -689,6 +691,13 @@ static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool vali
689691

690692
vaddr = vaddr + PAGE_SIZE;
691693
}
694+
695+
/*
696+
* If validating memory (making it private) and affected by the
697+
* cache-coherency vulnerability, perform the cache eviction mitigation.
698+
*/
699+
if (validate && !cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
700+
sev_evict_cache((void *)vaddr_begin, npages);
692701
}
693702

694703
static void __head early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)

0 commit comments

Comments
 (0)