Skip to content

Commit a1a3912

Browse files
committed
KVM: MMU: propagate alloc_workqueue failure
If kvm->arch.tdp_mmu_zap_wq cannot be created, the failure has to be propagated up to kvm_mmu_init_vm and kvm_arch_init_vm. kvm_arch_init_vm also has to undo all the initialization, so group all the MMU initialization code at the beginning and handle cleaning up of kvm_page_track_init. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent b1e34d3 commit a1a3912

File tree

5 files changed

+32
-17
lines changed

5 files changed

+32
-17
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1584,7 +1584,7 @@ void kvm_mmu_module_exit(void);
15841584

15851585
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
15861586
int kvm_mmu_create(struct kvm_vcpu *vcpu);
1587-
void kvm_mmu_init_vm(struct kvm *kvm);
1587+
int kvm_mmu_init_vm(struct kvm *kvm);
15881588
void kvm_mmu_uninit_vm(struct kvm *kvm);
15891589

15901590
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);

arch/x86/kvm/mmu/mmu.c

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5768,17 +5768,24 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
57685768
kvm_mmu_zap_all_fast(kvm);
57695769
}
57705770

5771-
void kvm_mmu_init_vm(struct kvm *kvm)
5771+
int kvm_mmu_init_vm(struct kvm *kvm)
57725772
{
57735773
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5774+
int r;
57745775

5776+
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
5777+
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
5778+
INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
57755779
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
57765780

5777-
kvm_mmu_init_tdp_mmu(kvm);
5781+
r = kvm_mmu_init_tdp_mmu(kvm);
5782+
if (r < 0)
5783+
return r;
57785784

57795785
node->track_write = kvm_mmu_pte_write;
57805786
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
57815787
kvm_page_track_register_notifier(kvm, node);
5788+
return 0;
57825789
}
57835790

57845791
void kvm_mmu_uninit_vm(struct kvm *kvm)

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,21 +14,24 @@ static bool __read_mostly tdp_mmu_enabled = true;
1414
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
1515

1616
/* Initializes the TDP MMU for the VM, if enabled. */
17-
bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
17+
int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
1818
{
19+
struct workqueue_struct *wq;
20+
1921
if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20-
return false;
22+
return 0;
23+
24+
wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
25+
if (!wq)
26+
return -ENOMEM;
2127

2228
/* This should not be changed for the lifetime of the VM. */
2329
kvm->arch.tdp_mmu_enabled = true;
24-
2530
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
2631
spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
2732
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28-
kvm->arch.tdp_mmu_zap_wq =
29-
alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
30-
31-
return true;
33+
kvm->arch.tdp_mmu_zap_wq = wq;
34+
return 1;
3235
}
3336

3437
/* Arbitrarily returns true so that this may be used in if statements. */

arch/x86/kvm/mmu/tdp_mmu.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
7272
u64 *spte);
7373

7474
#ifdef CONFIG_X86_64
75-
bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
75+
int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
7676
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
7777
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
7878

@@ -93,7 +93,7 @@ static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
9393
return sp && is_tdp_mmu_page(sp) && sp->root_count;
9494
}
9595
#else
96-
static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
96+
static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
9797
static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
9898
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
9999
static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }

arch/x86/kvm/x86.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11629,12 +11629,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1162911629

1163011630
ret = kvm_page_track_init(kvm);
1163111631
if (ret)
11632-
return ret;
11632+
goto out;
11633+
11634+
ret = kvm_mmu_init_vm(kvm);
11635+
if (ret)
11636+
goto out_page_track;
1163311637

1163411638
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
11635-
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
11636-
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
11637-
INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages);
1163811639
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
1163911640
atomic_set(&kvm->arch.noncoherent_dma_count, 0);
1164011641

@@ -11666,10 +11667,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1166611667

1166711668
kvm_apicv_init(kvm);
1166811669
kvm_hv_init_vm(kvm);
11669-
kvm_mmu_init_vm(kvm);
1167011670
kvm_xen_init_vm(kvm);
1167111671

1167211672
return static_call(kvm_x86_vm_init)(kvm);
11673+
11674+
out_page_track:
11675+
kvm_page_track_cleanup(kvm);
11676+
out:
11677+
return ret;
1167311678
}
1167411679

1167511680
int kvm_arch_post_init_vm(struct kvm *kvm)

0 commit comments

Comments
 (0)