Skip to content

Commit 74b566e

Browse files
Junaid Shahidbonzini
authored andcommitted
kvm: x86: Refactor mmu_free_roots()
Extract the logic to free a root page in a separate function to avoid code duplication in mmu_free_roots(). Also, change it to an exported function i.e. kvm_mmu_free_roots(). Signed-off-by: Junaid Shahid <junaids@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent a780a3e commit 74b566e

File tree

2 files changed

+31
-34
lines changed

2 files changed

+31
-34
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1277,6 +1277,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
12771277
int kvm_mmu_load(struct kvm_vcpu *vcpu);
12781278
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
12791279
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1280+
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu);
12801281
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
12811282
struct x86_exception *exception);
12821283
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,

arch/x86/kvm/mmu.c

Lines changed: 30 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,6 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
222222
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
223223

224224
static void mmu_spte_set(u64 *sptep, u64 spte);
225-
static void mmu_free_roots(struct kvm_vcpu *vcpu);
226225

227226
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
228227
{
@@ -3342,51 +3341,48 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
33423341
return RET_PF_RETRY;
33433342
}
33443343

3345-
3346-
static void mmu_free_roots(struct kvm_vcpu *vcpu)
3344+
static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
3345+
struct list_head *invalid_list)
33473346
{
3348-
int i;
33493347
struct kvm_mmu_page *sp;
3350-
LIST_HEAD(invalid_list);
33513348

3352-
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3349+
if (!VALID_PAGE(*root_hpa))
33533350
return;
33543351

3355-
if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL &&
3356-
(vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL ||
3357-
vcpu->arch.mmu.direct_map)) {
3358-
hpa_t root = vcpu->arch.mmu.root_hpa;
3352+
sp = page_header(*root_hpa & PT64_BASE_ADDR_MASK);
3353+
--sp->root_count;
3354+
if (!sp->root_count && sp->role.invalid)
3355+
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
33593356

3360-
spin_lock(&vcpu->kvm->mmu_lock);
3361-
sp = page_header(root);
3362-
--sp->root_count;
3363-
if (!sp->root_count && sp->role.invalid) {
3364-
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
3365-
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3366-
}
3367-
spin_unlock(&vcpu->kvm->mmu_lock);
3368-
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3357+
*root_hpa = INVALID_PAGE;
3358+
}
3359+
3360+
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu)
3361+
{
3362+
int i;
3363+
LIST_HEAD(invalid_list);
3364+
struct kvm_mmu *mmu = &vcpu->arch.mmu;
3365+
3366+
if (!VALID_PAGE(mmu->root_hpa))
33693367
return;
3370-
}
33713368

33723369
spin_lock(&vcpu->kvm->mmu_lock);
3373-
for (i = 0; i < 4; ++i) {
3374-
hpa_t root = vcpu->arch.mmu.pae_root[i];
33753370

3376-
if (root) {
3377-
root &= PT64_BASE_ADDR_MASK;
3378-
sp = page_header(root);
3379-
--sp->root_count;
3380-
if (!sp->root_count && sp->role.invalid)
3381-
kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
3382-
&invalid_list);
3383-
}
3384-
vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
3371+
if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
3372+
(mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
3373+
mmu_free_root_page(vcpu->kvm, &mmu->root_hpa, &invalid_list);
3374+
} else {
3375+
for (i = 0; i < 4; ++i)
3376+
if (mmu->pae_root[i] != 0)
3377+
mmu_free_root_page(vcpu->kvm, &mmu->pae_root[i],
3378+
&invalid_list);
3379+
mmu->root_hpa = INVALID_PAGE;
33853380
}
3381+
33863382
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
33873383
spin_unlock(&vcpu->kvm->mmu_lock);
3388-
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
33893384
}
3385+
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
33903386

33913387
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
33923388
{
@@ -3950,7 +3946,7 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
39503946

39513947
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
39523948
{
3953-
mmu_free_roots(vcpu);
3949+
kvm_mmu_free_roots(vcpu);
39543950
}
39553951

39563952
static unsigned long get_cr3(struct kvm_vcpu *vcpu)
@@ -4663,7 +4659,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);
46634659

46644660
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
46654661
{
4666-
mmu_free_roots(vcpu);
4662+
kvm_mmu_free_roots(vcpu);
46674663
WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
46684664
}
46694665
EXPORT_SYMBOL_GPL(kvm_mmu_unload);

0 commit comments

Comments
 (0)