Skip to content

Commit 1fb8739

Browse files
tlendackygregkh
authored andcommitted
x86/sev: Evict cache lines during SNP memory validation
Commit 7b306df upstream. An SNP cache coherency vulnerability requires a cache line eviction mitigation when validating memory after a page state change to private. The specific mitigation is to touch the first and last byte of each 4K page that is being validated. There is no need to perform the mitigation when performing a page state change to shared and rescinding validation. CPUID bit Fn8000001F_EBX[31] defines the COHERENCY_SFW_NO CPUID bit that, when set, indicates that the software mitigation for this vulnerability is not needed. Implement the mitigation and invoke it when validating memory (making it private) and the COHERENCY_SFW_NO bit is not set, indicating the SNP guest is vulnerable. Co-developed-by: Michael Roth <michael.roth@amd.com> Signed-off-by: Michael Roth <michael.roth@amd.com> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Acked-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent fdf6959 commit 1fb8739

6 files changed

Lines changed: 68 additions & 1 deletion

File tree

arch/x86/boot/compressed/sev.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,13 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
165165
*/
166166
if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1))
167167
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
168+
169+
/*
170+
* If validating memory (making it private) and affected by the
171+
* cache-coherency vulnerability, perform the cache eviction mitigation.
172+
*/
173+
if (op == SNP_PAGE_STATE_PRIVATE && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
174+
sev_evict_cache((void *)paddr, 1);
168175
}
169176

170177
void snp_set_page_private(unsigned long paddr)

arch/x86/boot/cpuflags.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,5 +115,18 @@ void get_cpuflags(void)
115115
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
116116
&cpu.flags[1]);
117117
}
118+
119+
if (max_amd_level >= 0x8000001f) {
120+
u32 ebx;
121+
122+
/*
123+
* The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
124+
* the virtualization flags entry (word 8) and set by
125+
* scattered.c, so the bit needs to be explicitly set.
126+
*/
127+
cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
128+
if (ebx & BIT(31))
129+
set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
130+
}
118131
}
119132
}

arch/x86/include/asm/cpufeatures.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -228,6 +228,7 @@
228228
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* Intel FlexPriority */
229229
#define X86_FEATURE_EPT ( 8*32+ 2) /* Intel Extended Page Table */
230230
#define X86_FEATURE_VPID ( 8*32+ 3) /* Intel Virtual Processor ID */
231+
#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* "" SNP cache coherency software work around not needed */
231232

232233
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
233234
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */

arch/x86/kernel/cpu/scattered.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ static const struct cpuid_bit cpuid_bits[] = {
4646
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
4747
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
4848
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
49+
{ X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
4950
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
5051
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
5152
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },

arch/x86/kernel/sev-shared.c

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1068,6 +1068,24 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
10681068
}
10691069
}
10701070

1071+
static inline void sev_evict_cache(void *va, int npages)
1072+
{
1073+
volatile u8 val __always_unused;
1074+
u8 *bytes = va;
1075+
int page_idx;
1076+
1077+
/*
1078+
* For SEV guests, a read from the first/last cache-lines of a 4K page
1079+
* using the guest key is sufficient to cause a flush of all cache-lines
1080+
* associated with that 4K page without incurring all the overhead of a
1081+
* full CLFLUSH sequence.
1082+
*/
1083+
for (page_idx = 0; page_idx < npages; page_idx++) {
1084+
val = bytes[page_idx * PAGE_SIZE];
1085+
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
1086+
}
1087+
}
1088+
10711089
static void pvalidate_pages(struct snp_psc_desc *desc)
10721090
{
10731091
struct psc_entry *e;
@@ -1100,6 +1118,24 @@ static void pvalidate_pages(struct snp_psc_desc *desc)
11001118
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
11011119
}
11021120
}
1121+
1122+
/*
1123+
* If not affected by the cache-coherency vulnerability there is no need
1124+
* to perform the cache eviction mitigation.
1125+
*/
1126+
if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
1127+
return;
1128+
1129+
for (i = 0; i <= desc->hdr.end_entry; i++) {
1130+
e = &desc->entries[i];
1131+
1132+
/*
1133+
* If validating memory (making it private) perform the cache
1134+
* eviction mitigation.
1135+
*/
1136+
if (e->operation == SNP_PAGE_STATE_PRIVATE)
1137+
sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
1138+
}
11031139
}
11041140

11051141
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)

arch/x86/kernel/sev.c

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -688,12 +688,14 @@ static void __head
688688
early_set_pages_state(unsigned long vaddr, unsigned long paddr,
689689
unsigned long npages, enum psc_op op)
690690
{
691-
unsigned long paddr_end;
691+
unsigned long vaddr_begin, paddr_end;
692692
u64 val;
693693
int ret;
694694

695695
vaddr = vaddr & PAGE_MASK;
696696

697+
vaddr_begin = vaddr;
698+
697699
paddr = paddr & PAGE_MASK;
698700
paddr_end = paddr + (npages << PAGE_SHIFT);
699701

@@ -736,6 +738,13 @@ early_set_pages_state(unsigned long vaddr, unsigned long paddr,
736738
paddr += PAGE_SIZE;
737739
}
738740

741+
/*
742+
* If validating memory (making it private) and affected by the
743+
* cache-coherency vulnerability, perform the cache eviction mitigation.
744+
*/
745+
if (op == SNP_PAGE_STATE_PRIVATE && !cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
746+
sev_evict_cache((void *)vaddr_begin, npages);
747+
739748
return;
740749

741750
e_term:

0 commit comments

Comments
 (0)