KVM: Add kernel-internal memory slots

Reserve a few memory slots for kernel internal use.  This is good for case
you have to register memory region and you want to be sure it was not
registered from userspace, and for case you want to register a memory region
that won't be seen from userspace.

Signed-off-by: Izik Eidus <izike@qumranet.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
Izik Eidus 2007-10-24 23:57:46 +02:00 committed by Avi Kivity
parent 210c7c4d7f
commit e0d62c7f48
2 changed files with 26 additions and 2 deletions

View File

@ -41,6 +41,8 @@
#define KVM_MAX_VCPUS 4
#define KVM_ALIAS_SLOTS 4
#define KVM_MEMORY_SLOTS 8
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
#define KVM_NUM_MMU_PAGES 1024
@ -361,7 +363,8 @@ struct kvm {
int naliases;
struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
int nmemslots;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS];
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
KVM_PRIVATE_MEM_SLOTS];
/*
* Hash table of struct kvm_mmu_page.
*/
@ -529,6 +532,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
enum emulation_result {

View File

@ -660,7 +660,7 @@ int kvm_set_memory_region(struct kvm *kvm,
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
if (mem->slot >= KVM_MEMORY_SLOTS)
if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
@ -797,6 +797,8 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
kvm_userspace_memory_region *mem,
int user_alloc)
{
if (mem->slot >= KVM_MEMORY_SLOTS)
return -EINVAL;
return kvm_set_memory_region(kvm, mem, user_alloc);
}
@ -1010,6 +1012,22 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
return __gfn_to_memslot(kvm, gfn);
}
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
gfn = unalias_gfn(kvm, gfn);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *memslot = &kvm->memslots[i];
if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;
@ -3087,6 +3105,8 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
struct page *page;
pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
if (!kvm_is_visible_gfn(kvm, pgoff))
return NOPAGE_SIGBUS;
page = gfn_to_page(kvm, pgoff);
if (is_error_page(page)) {
kvm_release_page(page);