Skip to content

Commit

Permalink
kvm: Convert kvm_lock to a mutex
Browse files Browse the repository at this point in the history
commit 0d9ce16 upstream.

It doesn't seem as if there is any particular need for kvm_lock to be a
spinlock, so convert the lock to a mutex so that sleepable functions (in
particular cond_resched()) can be called while holding it.

Signed-off-by: Junaid Shahid <junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
Junaid Shahid authored and gregkh committed Nov 12, 2019
1 parent a991063 commit 30d8d8d
Show file tree
Hide file tree
Showing 6 changed files with 28 additions and 30 deletions.
4 changes: 1 addition & 3 deletions Documentation/virtual/kvm/locking.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@ The acquisition orders for mutexes are as follows:

On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.

For spinlocks, kvm_lock is taken outside kvm->mmu_lock.

Everything else is a leaf: no other lock is taken inside the critical
sections.

Expand Down Expand Up @@ -169,7 +167,7 @@ which time it will be set using the Dirty tracking mechanism described above.
------------

Name: kvm_lock
Type: spinlock_t
Type: mutex
Arch: any
Protects: - vm_list

Expand Down
4 changes: 2 additions & 2 deletions arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -2110,13 +2110,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
if (!kvm->arch.sca)
goto out_err;
spin_lock(&kvm_lock);
mutex_lock(&kvm_lock);
sca_offset += 16;
if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
sca_offset = 0;
kvm->arch.sca = (struct bsca_block *)
((char *) kvm->arch.sca + sca_offset);
spin_unlock(&kvm_lock);
mutex_unlock(&kvm_lock);

sprintf(debug_name, "kvm-%u", current->pid);

Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -5819,7 +5819,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
int nr_to_scan = sc->nr_to_scan;
unsigned long freed = 0;

spin_lock(&kvm_lock);
mutex_lock(&kvm_lock);

list_for_each_entry(kvm, &vm_list, vm_list) {
int idx;
Expand Down Expand Up @@ -5869,7 +5869,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
break;
}

spin_unlock(&kvm_lock);
mutex_unlock(&kvm_lock);
return freed;
}

Expand Down
14 changes: 7 additions & 7 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -6529,7 +6529,7 @@ static void kvm_hyperv_tsc_notifier(void)
struct kvm_vcpu *vcpu;
int cpu;

spin_lock(&kvm_lock);
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_make_mclock_inprogress_request(kvm);

Expand All @@ -6555,7 +6555,7 @@ static void kvm_hyperv_tsc_notifier(void)

spin_unlock(&ka->pvclock_gtod_sync_lock);
}
spin_unlock(&kvm_lock);
mutex_unlock(&kvm_lock);
}
#endif

Expand Down Expand Up @@ -6613,17 +6613,17 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va

smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);

spin_lock(&kvm_lock);
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu)
continue;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != smp_processor_id())
if (vcpu->cpu != raw_smp_processor_id())
send_ipi = 1;
}
}
spin_unlock(&kvm_lock);
mutex_unlock(&kvm_lock);

if (freq->old < freq->new && send_ipi) {
/*
Expand Down Expand Up @@ -6749,12 +6749,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
struct kvm_vcpu *vcpu;
int i;

spin_lock(&kvm_lock);
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
atomic_set(&kvm_guest_has_master_clock, 0);
spin_unlock(&kvm_lock);
mutex_unlock(&kvm_lock);
}

static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
Expand Down
2 changes: 1 addition & 1 deletion include/linux/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ static inline bool is_error_page(struct page *page)

extern struct kmem_cache *kvm_vcpu_cache;

extern spinlock_t kvm_lock;
extern struct mutex kvm_lock;
extern struct list_head vm_list;

struct kvm_io_range {
Expand Down
30 changes: 15 additions & 15 deletions virt/kvm/kvm_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/

DEFINE_SPINLOCK(kvm_lock);
DEFINE_MUTEX(kvm_lock);
static DEFINE_RAW_SPINLOCK(kvm_count_lock);
LIST_HEAD(vm_list);

Expand Down Expand Up @@ -685,9 +685,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (r)
goto out_err;

spin_lock(&kvm_lock);
mutex_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock);
mutex_unlock(&kvm_lock);

preempt_notifier_inc();

Expand Down Expand Up @@ -733,9 +733,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
kvm_destroy_vm_debugfs(kvm);
kvm_arch_sync_events(kvm);
spin_lock(&kvm_lock);
mutex_lock(&kvm_lock);
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
mutex_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) {
struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
Expand Down Expand Up @@ -3831,13 +3831,13 @@ static int vm_stat_get(void *_offset, u64 *val)
u64 tmp_val;

*val = 0;
spin_lock(&kvm_lock);
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
*val += tmp_val;
}
spin_unlock(&kvm_lock);
mutex_unlock(&kvm_lock);
return 0;
}

Expand All @@ -3850,12 +3850,12 @@ static int vm_stat_clear(void *_offset, u64 val)
if (val)
return -EINVAL;

spin_lock(&kvm_lock);
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vm_stat_clear_per_vm((void *)&stat_tmp, 0);
}
spin_unlock(&kvm_lock);
mutex_unlock(&kvm_lock);

return 0;
}
Expand All @@ -3870,13 +3870,13 @@ static int vcpu_stat_get(void *_offset, u64 *val)
u64 tmp_val;

*val = 0;
spin_lock(&kvm_lock);
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
*val += tmp_val;
}
spin_unlock(&kvm_lock);
mutex_unlock(&kvm_lock);
return 0;
}

Expand All @@ -3889,12 +3889,12 @@ static int vcpu_stat_clear(void *_offset, u64 val)
if (val)
return -EINVAL;

spin_lock(&kvm_lock);
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
stat_tmp.kvm = kvm;
vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
}
spin_unlock(&kvm_lock);
mutex_unlock(&kvm_lock);

return 0;
}
Expand All @@ -3915,7 +3915,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
if (!kvm_dev.this_device || !kvm)
return;

spin_lock(&kvm_lock);
mutex_lock(&kvm_lock);
if (type == KVM_EVENT_CREATE_VM) {
kvm_createvm_count++;
kvm_active_vms++;
Expand All @@ -3924,7 +3924,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
}
created = kvm_createvm_count;
active = kvm_active_vms;
spin_unlock(&kvm_lock);
mutex_unlock(&kvm_lock);

env = kzalloc(sizeof(*env), GFP_KERNEL);
if (!env)
Expand Down

0 comments on commit 30d8d8d

Please sign in to comment.