Skip to content

Commit ee0ba4b

Browse files
Lai Jiangshanbysui
authored andcommitted
PVM: Make PGD preloading mandatory
The code to preload the user PGD's shadow pagetable shows performance improved, make it mandatory and remove the module parameter which allow simplifying other code that checks the hw_CR3s a lot. Signed-off-by: Lai Jiangshan <jiangshan.ljs@antgroup.com> Link: #17
1 parent 24879a4 commit ee0ba4b

3 files changed

Lines changed: 41 additions & 65 deletions

File tree

arch/x86/kvm/mmu/mmu.c

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -4562,24 +4562,6 @@ static void nonpaging_init_context(struct kvm_mmu *context)
45624562
context->sync_spte = NULL;
45634563
}
45644564

4565-
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4566-
union kvm_mmu_page_role role)
4567-
{
4568-
struct kvm_mmu_page *sp;
4569-
4570-
if (!VALID_PAGE(root->hpa))
4571-
return false;
4572-
4573-
if (!role.direct && pgd != root->pgd)
4574-
return false;
4575-
4576-
sp = root_to_sp(root->hpa);
4577-
if (WARN_ON_ONCE(!sp))
4578-
return false;
4579-
4580-
return role.word == sp->role.word;
4581-
}
4582-
45834565
/*
45844566
* Find out if a previously cached root matching the new pgd/role is available,
45854567
* and insert the current root as the MRU in the cache.

arch/x86/kvm/mmu/spte.h

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,24 @@ static inline struct kvm_mmu_page *root_to_sp(hpa_t root)
249249
return spte_to_child_sp(root);
250250
}
251251

252+
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
253+
union kvm_mmu_page_role role)
254+
{
255+
struct kvm_mmu_page *sp;
256+
257+
if (!VALID_PAGE(root->hpa))
258+
return false;
259+
260+
if (!role.direct && pgd != root->pgd)
261+
return false;
262+
263+
sp = root_to_sp(root->hpa);
264+
if (WARN_ON_ONCE(!sp))
265+
return false;
266+
267+
return role.word == sp->role.word;
268+
}
269+
252270
static inline bool is_mmio_spte(u64 spte)
253271
{
254272
return (spte & shadow_mmio_mask) == shadow_mmio_value &&

arch/x86/kvm/pvm/pvm.c

Lines changed: 23 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -26,16 +26,14 @@
2626
#include "trace.h"
2727
#include "x86.h"
2828
#include "pvm.h"
29+
#include "mmu/spte.h"
2930

3031
MODULE_AUTHOR("AntGroup");
3132
MODULE_LICENSE("GPL");
3233

3334
static bool __read_mostly enable_cpuid_intercept = 0;
3435
module_param_named(cpuid_intercept, enable_cpuid_intercept, bool, 0444);
3536

36-
static bool __read_mostly enable_pgtbl_preload = 0;
37-
module_param_named(pgtbl_preload, enable_pgtbl_preload, bool, 0444);
38-
3937
static bool __read_mostly is_intel;
4038

4139
static unsigned long host_idt_base;
@@ -744,53 +742,28 @@ static void pvm_flush_hwtlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
744742
put_cpu();
745743
}
746744

747-
static bool check_switch_cr3(struct vcpu_pvm *pvm, u64 switch_host_cr3)
745+
static u64 get_switch_hw_cr3(struct vcpu_pvm *pvm)
748746
{
749-
u64 root = pvm->vcpu.arch.mmu->prev_roots[0].hpa;
750-
751-
if (pvm->vcpu.arch.mmu->prev_roots[0].pgd != pvm->msr_switch_cr3)
752-
return false;
753-
if (!VALID_PAGE(root))
754-
return false;
755-
if (root != (switch_host_cr3 & CR3_ADDR_MASK))
756-
return false;
757-
758-
if (static_cpu_has(X86_FEATURE_PCID)) {
759-
if (host_pcid_owner(switch_host_cr3 & X86_CR3_PCID_MASK) != pvm)
760-
return false;
761-
if (host_pcid_root(switch_host_cr3 & X86_CR3_PCID_MASK) != root)
762-
return false;
763-
}
764-
765-
return true;
766-
}
767-
768-
static void pvm_pgtbl_preload_for_guest_with_host_pcid(struct vcpu_pvm *pvm, u64 *switch_host_cr3)
769-
{
770-
u32 host_pcid;
771-
u64 hw_cr3;
772-
u64 prev_root_hpa = pvm->vcpu.arch.mmu->prev_roots[0].hpa;
747+
struct kvm_mmu *mmu = pvm->vcpu.arch.mmu;
748+
u64 cr3 = is_smod(pvm) ? pvm->vcpu.arch.cr3 : pvm->msr_switch_cr3;
749+
int i;
773750

774-
if (enable_pgtbl_preload &&
775-
VALID_PAGE(prev_root_hpa) &&
776-
pvm->vcpu.arch.mmu->prev_roots[0].pgd == pvm->msr_switch_cr3 &&
777-
*switch_host_cr3 != pvm->msr_switch_cr3) {
778-
host_pcid = host_pcid_find(pvm, prev_root_hpa);
779-
if (host_pcid) {
780-
hw_cr3 = prev_root_hpa | host_pcid;
781-
this_cpu_write(cpu_tss_rw.tss_ex.umod_cr3, hw_cr3 | CR3_NOFLUSH);
782-
*switch_host_cr3 = hw_cr3 | CR3_NOFLUSH;
751+
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
752+
if (is_root_usable(&mmu->prev_roots[i], cr3, mmu->root_role)) {
753+
if (i != 0)
754+
swap(mmu->prev_roots[0], mmu->prev_roots[i]);
755+
return mmu->prev_roots[0].hpa;
783756
}
784757
}
785758

786-
return;
759+
return INVALID_PAGE;
787760
}
788761

789762
static void pvm_set_host_cr3_for_guest(struct vcpu_pvm *pvm)
790763
{
791764
u64 hw_cr3 = pvm->vcpu.arch.mmu->root.hpa;
792765
u64 enter_hw_cr3 = hw_cr3;
793-
u64 switch_host_cr3;
766+
u64 switch_hw_cr3 = get_switch_hw_cr3(pvm);
794767

795768
if (static_cpu_has(X86_FEATURE_PCID)) {
796769
bool flush = false;
@@ -800,21 +773,27 @@ static void pvm_set_host_cr3_for_guest(struct vcpu_pvm *pvm)
800773
if (!flush)
801774
enter_hw_cr3 |= CR3_NOFLUSH;
802775
hw_cr3 |= host_pcid | CR3_NOFLUSH;
776+
777+
if (switch_hw_cr3 != INVALID_PAGE) {
778+
host_pcid = host_pcid_find(pvm, switch_hw_cr3);
779+
if (!host_pcid)
780+
switch_hw_cr3 = INVALID_PAGE;
781+
else
782+
switch_hw_cr3 |= host_pcid | CR3_NOFLUSH;
783+
}
803784
}
804785

805786
this_cpu_write(cpu_tss_rw.tss_ex.enter_cr3, enter_hw_cr3);
806787

807788
if (is_smod(pvm)) {
808789
this_cpu_write(cpu_tss_rw.tss_ex.smod_cr3, hw_cr3);
809-
switch_host_cr3 = this_cpu_read(cpu_tss_rw.tss_ex.umod_cr3);
810-
if (static_cpu_has(X86_FEATURE_PCID))
811-
pvm_pgtbl_preload_for_guest_with_host_pcid(pvm, &switch_host_cr3);
790+
this_cpu_write(cpu_tss_rw.tss_ex.umod_cr3, switch_hw_cr3);
812791
} else {
813792
this_cpu_write(cpu_tss_rw.tss_ex.umod_cr3, hw_cr3);
814-
switch_host_cr3 = this_cpu_read(cpu_tss_rw.tss_ex.smod_cr3);
793+
this_cpu_write(cpu_tss_rw.tss_ex.smod_cr3, switch_hw_cr3);
815794
}
816795

817-
if (check_switch_cr3(pvm, switch_host_cr3))
796+
if (switch_hw_cr3 != INVALID_PAGE)
818797
pvm->switch_flags &= ~SWITCH_FLAGS_NO_DS_CR3;
819798
else
820799
pvm->switch_flags |= SWITCH_FLAGS_NO_DS_CR3;
@@ -1902,9 +1881,6 @@ static int handle_hc_load_pagetables(struct kvm_vcpu *vcpu, unsigned long flags,
19021881
if (cr4 != vcpu->arch.cr4) {
19031882
vcpu->arch.cr4 = cr4;
19041883
kvm_mmu_reset_context(vcpu);
1905-
} else if (enable_pgtbl_preload) {
1906-
// try to preload user_pgd.
1907-
kvm_mmu_new_pgd(vcpu, user_pgd);
19081884
}
19091885

19101886
kvm_mmu_new_pgd(vcpu, pgd);

0 commit comments

Comments
 (0)