KVM: set_memory_region: Drop user_alloc from set_memory_region()

Except ia64's stale code, KVM_SET_MEMORY_REGION support, this is only
used for sanity checks in __kvm_set_memory_region() which can easily
be changed to use slot id instead.

Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Takuya Yoshikawa 2013-02-27 19:43:00 +09:00 committed by Marcelo Tosatti
parent 462fce4606
commit 47ae31e257
4 changed files with 13 additions and 39 deletions

View File

@ -942,24 +942,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
int r = -ENOTTY;
switch (ioctl) {
case KVM_SET_MEMORY_REGION: {
struct kvm_memory_region kvm_mem;
struct kvm_userspace_memory_region kvm_userspace_mem;
r = -EFAULT;
if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
goto out;
kvm_userspace_mem.slot = kvm_mem.slot;
kvm_userspace_mem.flags = kvm_mem.flags;
kvm_userspace_mem.guest_phys_addr =
kvm_mem.guest_phys_addr;
kvm_userspace_mem.memory_size = kvm_mem.memory_size;
r = kvm_vm_ioctl_set_memory_region(kvm,
&kvm_userspace_mem, false);
if (r)
goto out;
break;
}
case KVM_CREATE_IRQCHIP:
r = -EFAULT;
r = kvm_ioapic_init(kvm);

View File

@ -3694,7 +3694,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
kvm_userspace_mem.flags = 0;
kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
if (r)
goto out;
@ -3724,7 +3724,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
kvm_userspace_mem.guest_phys_addr =
kvm->arch.ept_identity_map_addr;
kvm_userspace_mem.memory_size = PAGE_SIZE;
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem);
if (r)
goto out;
@ -4364,7 +4364,7 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
.flags = 0,
};
ret = kvm_set_memory_region(kvm, &tss_mem, false);
ret = kvm_set_memory_region(kvm, &tss_mem);
if (ret)
return ret;
kvm->arch.tss_addr = addr;

View File

@ -453,11 +453,9 @@ id_to_memslot(struct kvm_memslots *slots, int id)
}
int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
bool user_alloc);
struct kvm_userspace_memory_region *mem);
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
bool user_alloc);
struct kvm_userspace_memory_region *mem);
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
@ -553,9 +551,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log);
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct
kvm_userspace_memory_region *mem,
bool user_alloc);
struct kvm_userspace_memory_region *mem);
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);

View File

@ -745,8 +745,7 @@ enum kvm_mr_change {
* Must be called holding mmap_sem for write.
*/
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
bool user_alloc)
struct kvm_userspace_memory_region *mem)
{
int r;
gfn_t base_gfn;
@ -767,7 +766,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
/* We can read the guest memory with __xxx_user() later on. */
if (user_alloc &&
if ((mem->slot < KVM_USER_MEM_SLOTS) &&
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
!access_ok(VERIFY_WRITE,
(void __user *)(unsigned long)mem->userspace_addr,
@ -932,26 +931,23 @@ out:
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
bool user_alloc)
struct kvm_userspace_memory_region *mem)
{
int r;
mutex_lock(&kvm->slots_lock);
r = __kvm_set_memory_region(kvm, mem, user_alloc);
r = __kvm_set_memory_region(kvm, mem);
mutex_unlock(&kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct
kvm_userspace_memory_region *mem,
bool user_alloc)
struct kvm_userspace_memory_region *mem)
{
if (mem->slot >= KVM_USER_MEM_SLOTS)
return -EINVAL;
return kvm_set_memory_region(kvm, mem, user_alloc);
return kvm_set_memory_region(kvm, mem);
}
int kvm_get_dirty_log(struct kvm *kvm,
@ -2198,7 +2194,7 @@ static long kvm_vm_ioctl(struct file *filp,
sizeof kvm_userspace_mem))
goto out;
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true);
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
break;
}
case KVM_GET_DIRTY_LOG: {