KVM: Let host know whether the guest can handle async PF in non-userspace context.

If guest can detect that it runs in non-preemptable context it can
handle async PFs at any time, so let host know that it can send async
PF even if guest cpu is not in userspace.

Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Gleb Natapov 2010-10-14 11:22:55 +02:00 committed by Avi Kivity
parent 6c047cd982
commit 6adba52742
5 changed files with 11 additions and 4 deletions

View file

@ -154,9 +154,10 @@ MSR_KVM_SYSTEM_TIME: 0x12
MSR_KVM_ASYNC_PF_EN: 0x4b564d02 MSR_KVM_ASYNC_PF_EN: 0x4b564d02
data: Bits 63-6 hold 64-byte aligned physical address of a data: Bits 63-6 hold 64-byte aligned physical address of a
64 byte memory area which must be in guest RAM and must be 64 byte memory area which must be in guest RAM and must be
zeroed. Bits 5-1 are reserved and should be zero. Bit 0 is 1 zeroed. Bits 5-2 are reserved and should be zero. Bit 0 is 1
when asynchronous page faults are enabled on the vcpu 0 when when asynchronous page faults are enabled on the vcpu 0 when
disabled. disabled. Bit 2 is 1 if asynchronous page faults can be injected
when vcpu is in cpl == 0.
First 4 byte of 64 byte memory location will be written to by First 4 byte of 64 byte memory location will be written to by
the hypervisor at the time of asynchronous page fault (APF) the hypervisor at the time of asynchronous page fault (APF)

View file

@ -422,6 +422,7 @@ struct kvm_vcpu_arch {
struct gfn_to_hva_cache data; struct gfn_to_hva_cache data;
u64 msr_val; u64 msr_val;
u32 id; u32 id;
bool send_user_only;
} apf; } apf;
}; };

View file

@ -38,6 +38,7 @@
#define KVM_MAX_MMU_OP_BATCH 32 #define KVM_MAX_MMU_OP_BATCH 32
#define KVM_ASYNC_PF_ENABLED (1 << 0) #define KVM_ASYNC_PF_ENABLED (1 << 0)
#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
/* Operations for KVM_HC_MMU_OP */ /* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1 #define KVM_MMU_OP_WRITE_PTE 1

View file

@ -449,6 +449,9 @@ void __cpuinit kvm_guest_cpu_init(void)
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
u64 pa = __pa(&__get_cpu_var(apf_reason)); u64 pa = __pa(&__get_cpu_var(apf_reason));
#ifdef CONFIG_PREEMPT
pa |= KVM_ASYNC_PF_SEND_ALWAYS;
#endif
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
__get_cpu_var(apf_reason).enabled = 1; __get_cpu_var(apf_reason).enabled = 1;
printk(KERN_INFO"KVM setup async PF for cpu %d\n", printk(KERN_INFO"KVM setup async PF for cpu %d\n",

View file

@ -1429,8 +1429,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{ {
gpa_t gpa = data & ~0x3f; gpa_t gpa = data & ~0x3f;
/* Bits 1:5 are resrved, Should be zero */ /* Bits 2:5 are resrved, Should be zero */
if (data & 0x3e) if (data & 0x3c)
return 1; return 1;
vcpu->arch.apf.msr_val = data; vcpu->arch.apf.msr_val = data;
@ -1444,6 +1444,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
return 1; return 1;
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
kvm_async_pf_wakeup_all(vcpu); kvm_async_pf_wakeup_all(vcpu);
return 0; return 0;
} }