mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
kvm: fix excessive pages un-pinning in kvm_iommu_map error path.
commit3d32e4dbe7
upstream. The third parameter of kvm_unpin_pages() when called from kvm_iommu_map_pages() is wrong, it should be the number of pages to un-pin and not the page size. This error was facilitated with an inconsistent API: kvm_pin_pages() takes a size, but kvn_unpin_pages() takes a number of pages, so fix the problem by matching the two. This was introduced by commit350b8bd
("kvm: iommu: fix the third parameter of kvm_iommu_put_pages (CVE-2014-3601)"), which fixes the lack of un-pinning for pages intended to be un-pinned (i.e. memory leak) but unfortunately potentially aggravated the number of pages we un-pin that should have stayed pinned. As far as I understand though, the same practical mitigations apply. This issue was found during review of Red Hat 6.6 patches to prepare Ksplice rebootless updates. Thanks to Vegard for his time on a late Friday evening to help me in understanding this code. Fixes:350b8bd
("kvm: iommu: fix the third parameter of... (CVE-2014-3601)") Signed-off-by: Quentin Casasnovas <quentin.casasnovas@oracle.com> Signed-off-by: Vegard Nossum <vegard.nossum@oracle.com> Signed-off-by: Jamie Iles <jamie.iles@oracle.com> Reviewed-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> [lizf: Backported to 3.4: adjust context] Signed-off-by: Zefan Li <lizefan@huawei.com>
This commit is contained in:
parent
c7ea5dfa84
commit
ee7b626722
1 changed files with 4 additions and 4 deletions
|
@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
|
|||
gfn_t base_gfn, unsigned long npages);
|
||||
|
||||
static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
gfn_t gfn, unsigned long size)
|
||||
gfn_t gfn, unsigned long npages)
|
||||
{
|
||||
gfn_t end_gfn;
|
||||
pfn_t pfn;
|
||||
|
||||
pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
|
||||
end_gfn = gfn + (size >> PAGE_SHIFT);
|
||||
end_gfn = gfn + npages;
|
||||
gfn += 1;
|
||||
|
||||
if (is_error_pfn(pfn))
|
||||
|
@ -117,7 +117,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
|||
* Pin all pages we are about to map in memory. This is
|
||||
* important because we unmap and unpin in 4kb steps later.
|
||||
*/
|
||||
pfn = kvm_pin_pages(kvm, slot, gfn, page_size);
|
||||
pfn = kvm_pin_pages(kvm, slot, gfn, page_size >> PAGE_SHIFT);
|
||||
if (is_error_pfn(pfn)) {
|
||||
gfn += 1;
|
||||
continue;
|
||||
|
@ -129,7 +129,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
|
|||
if (r) {
|
||||
printk(KERN_ERR "kvm_iommu_map_address:"
|
||||
"iommu failed to map pfn=%llx\n", pfn);
|
||||
kvm_unpin_pages(kvm, pfn, page_size);
|
||||
kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
|
||||
goto unmap_pages;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue