Skip to content

Commit

Permalink
KVM: split kvm_arch_set_memory_region into prepare and commit
Browse files Browse the repository at this point in the history
Required for SRCU convertion later.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
  • Loading branch information
matosatti committed Mar 1, 2010
1 parent fef9cce commit f7784b8
Show file tree
Hide file tree
Showing 6 changed files with 82 additions and 47 deletions.
16 changes: 12 additions & 4 deletions arch/ia64/kvm/kvm-ia64.c
Original file line number Diff line number Diff line change
Expand Up @@ -1578,15 +1578,15 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
return r;
}

int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
unsigned long i;
unsigned long pfn;
int npages = mem->memory_size >> PAGE_SHIFT;
struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
int npages = memslot->npages;
unsigned long base_gfn = memslot->base_gfn;

if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
Expand All @@ -1610,6 +1610,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc)
{
return;
}

void kvm_arch_flush_shadow(struct kvm *kvm)
{
kvm_flush_remote_tlbs(kvm);
Expand Down
18 changes: 14 additions & 4 deletions arch/powerpc/kvm/powerpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,14 +165,24 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}

int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc)
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc)
{
return;
}


void kvm_arch_flush_shadow(struct kvm *kvm)
{
}
Expand Down
25 changes: 16 additions & 9 deletions arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -690,14 +690,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}

/* Section: memory related */
int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc)
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
int i;
struct kvm_vcpu *vcpu;

/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
page boundary in userland and which has to end at a page boundary.
Expand All @@ -720,14 +718,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (!user_alloc)
return -EINVAL;

return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc)
{
int i;
struct kvm_vcpu *vcpu;

/* request update of sie control block for all available vcpus */
kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
continue;
kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
}

return 0;
}

void kvm_arch_flush_shadow(struct kvm *kvm)
Expand Down
51 changes: 29 additions & 22 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -5228,13 +5228,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kfree(kvm);
}

int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
int npages = mem->memory_size >> PAGE_SHIFT;
struct kvm_memory_slot *memslot = &kvm->memslots->memslots[mem->slot];
int npages = memslot->npages;

/*To keep backward compatibility with older userspace,
*x86 needs to hanlde !user_alloc case.
Expand All @@ -5254,26 +5254,35 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);

/* set userspace_addr atomically for kvm_hva_to_rmapp */
spin_lock(&kvm->mmu_lock);
memslot->userspace_addr = userspace_addr;
spin_unlock(&kvm->mmu_lock);
} else {
if (!old.user_alloc && old.rmap) {
int ret;

down_write(&current->mm->mmap_sem);
ret = do_munmap(current->mm, old.userspace_addr,
old.npages * PAGE_SIZE);
up_write(&current->mm->mmap_sem);
if (ret < 0)
printk(KERN_WARNING
"kvm_vm_ioctl_set_memory_region: "
"failed to munmap memory\n");
}
}
}


return 0;
}

void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc)
{

int npages = mem->memory_size >> PAGE_SHIFT;

if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
int ret;

down_write(&current->mm->mmap_sem);
ret = do_munmap(current->mm, old.userspace_addr,
old.npages * PAGE_SIZE);
up_write(&current->mm->mmap_sem);
if (ret < 0)
printk(KERN_WARNING
"kvm_vm_ioctl_set_memory_region: "
"failed to munmap memory\n");
}

spin_lock(&kvm->mmu_lock);
if (!kvm->arch.n_requested_mmu_pages) {
unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
Expand All @@ -5282,8 +5291,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,

kvm_mmu_slot_remove_write_access(kvm, mem->slot);
spin_unlock(&kvm->mmu_lock);

return 0;
}

void kvm_arch_flush_shadow(struct kvm *kvm)
Expand Down
7 changes: 6 additions & 1 deletion include/linux/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,12 @@ int kvm_set_memory_region(struct kvm *kvm,
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc);
int kvm_arch_set_memory_region(struct kvm *kvm,
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
int user_alloc);
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc);
Expand Down
12 changes: 5 additions & 7 deletions virt/kvm/kvm_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -663,20 +663,18 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (!npages)
kvm_arch_flush_shadow(kvm);

r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
if (r)
goto out_free;

spin_lock(&kvm->mmu_lock);
if (mem->slot >= kvm->memslots->nmemslots)
kvm->memslots->nmemslots = mem->slot + 1;

*memslot = new;
spin_unlock(&kvm->mmu_lock);

r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
if (r) {
spin_lock(&kvm->mmu_lock);
*memslot = old;
spin_unlock(&kvm->mmu_lock);
goto out_free;
}
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);

kvm_free_physmem_slot(&old, npages ? &new : NULL);
/* Slot deletion case: we have to update the current slot */
Expand Down

0 comments on commit f7784b8

Please sign in to comment.