mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-07 04:09:21 +00:00
KVM: arm/arm64: Close VMID generation race
commit f0cf47d939d0b4b4f660c5aaa4276fa3488f3391 upstream. Before entering the guest, we check whether our VMID is still part of the current generation. In order to avoid taking a lock, we start with checking that the generation is still current, and only if not current do we take the lock, recheck, and update the generation and VMID. This leaves open a small race: A vcpu can bump up the global generation number as well as the VM's, but has not updated the VMID itself yet. At that point another vcpu from the same VM comes in, checks the generation (and finds it not needing anything), and jumps into the guest. At this point, we end-up with two vcpus belonging to the same VM running with two different VMIDs. Eventually, the VMID used by the second vcpu will get reassigned, and things will really go wrong... A simple solution would be to drop this initial check, and always take the lock. This is likely to cause performance issues. A middle ground is to convert the spinlock to a rwlock, and only take the read lock on the fast path. If the check fails at that point, drop it and acquire the write lock, rechecking the condition. This ensures that the above scenario doesn't occur. Reported-by: Mark Rutland <mark.rutland@arm.com> Tested-by: Shannon Zhao <zhaoshenglong@huawei.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> [bwh: Backported to 3.16: adjust filename, context] Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
This commit is contained in:
parent
b2373da5e6
commit
aeea6f0c5d
1 changed files with 10 additions and 5 deletions
|
@ -59,7 +59,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
|
||||||
/* The VMID used in the VTTBR */
|
/* The VMID used in the VTTBR */
|
||||||
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
|
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
|
||||||
static u8 kvm_next_vmid;
|
static u8 kvm_next_vmid;
|
||||||
static DEFINE_SPINLOCK(kvm_vmid_lock);
|
static DEFINE_RWLOCK(kvm_vmid_lock);
|
||||||
|
|
||||||
static bool vgic_present;
|
static bool vgic_present;
|
||||||
|
|
||||||
|
@ -408,11 +408,16 @@ static void update_vttbr(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
phys_addr_t pgd_phys;
|
phys_addr_t pgd_phys;
|
||||||
u64 vmid;
|
u64 vmid;
|
||||||
|
bool new_gen;
|
||||||
|
|
||||||
if (!need_new_vmid_gen(kvm))
|
read_lock(&kvm_vmid_lock);
|
||||||
|
new_gen = need_new_vmid_gen(kvm);
|
||||||
|
read_unlock(&kvm_vmid_lock);
|
||||||
|
|
||||||
|
if (!new_gen)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock(&kvm_vmid_lock);
|
write_lock(&kvm_vmid_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to re-check the vmid_gen here to ensure that if another vcpu
|
* We need to re-check the vmid_gen here to ensure that if another vcpu
|
||||||
|
@ -420,7 +425,7 @@ static void update_vttbr(struct kvm *kvm)
|
||||||
* use the same vmid.
|
* use the same vmid.
|
||||||
*/
|
*/
|
||||||
if (!need_new_vmid_gen(kvm)) {
|
if (!need_new_vmid_gen(kvm)) {
|
||||||
spin_unlock(&kvm_vmid_lock);
|
write_unlock(&kvm_vmid_lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -453,7 +458,7 @@ static void update_vttbr(struct kvm *kvm)
|
||||||
kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
|
kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
|
||||||
kvm->arch.vttbr |= vmid;
|
kvm->arch.vttbr |= vmid;
|
||||||
|
|
||||||
spin_unlock(&kvm_vmid_lock);
|
write_unlock(&kvm_vmid_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
||||||
|
|
Loading…
Reference in a new issue