iommu: leave sizing calculations on domain->ops->map

Running a loop and handlling complex tasks in iommu_map
causes severe perf hit. Anyways, different IOMMU drivers
are taking care of sizing by themselves. So, hand-off
this task to ops->map.

Some numbers collected to show the benefits of removing
redundant code. Here these numbers are collected with
ops defined by msm_iommu driver.

Default:
    size       iommu_map  iommu_unmap

      4K            6 us         2 us
     64K           11 us         0 us
      2M          436 us         3 us
     12M         1829 us        11 us
     20M         2843 us        17 us

With this patch:
    size       iommu_map  iommu_unmap

      4K            6 us         3 us
     64K            3 us         0 us
      2M           38 us         2 us
     12M          214 us        11 us
     20M          351 us        18 us

Change-Id: Id4e3cb4070cc6ec4d94d8323225fa5625804df4c
Signed-off-by: Chintan Pandya <cpandya@codeaurora.org>
This commit is contained in:
Chintan Pandya 2015-06-03 20:40:48 +05:30
parent 88fbecfce0
commit 6d3d5c278e
1 changed files with 3 additions and 69 deletions

View File

@ -785,78 +785,12 @@ EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
unsigned long orig_iova = iova;
unsigned int min_pagesz;
size_t orig_size = size;
int ret = 0;
if (unlikely(domain->ops->unmap == NULL ||
domain->ops->pgsize_bitmap == 0UL))
if (unlikely(domain->ops->map_range == NULL))
return -ENODEV;
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
BUG_ON((iova | paddr | size) & (~PAGE_MASK));
/*
* both the virtual address and the physical one, as well as
* the size of the mapping, must be aligned (at least) to the
* size of the smallest page supported by the hardware
*/
if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
"0x%x\n", iova, (unsigned long)paddr,
(unsigned long)size, min_pagesz);
return -EINVAL;
}
pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
(unsigned long)paddr, (unsigned long)size);
while (size) {
unsigned long pgsize, addr_merge = iova | paddr;
unsigned int pgsize_idx;
/* Max page size that still fits into 'size' */
pgsize_idx = __fls(size);
/* need to consider alignment requirements ? */
if (likely(addr_merge)) {
/* Max page size allowed by both iova and paddr */
unsigned int align_pgsize_idx = __ffs(addr_merge);
pgsize_idx = min(pgsize_idx, align_pgsize_idx);
}
/* build a mask of acceptable page sizes */
pgsize = (1UL << (pgsize_idx + 1)) - 1;
/* throw away page sizes not supported by the hardware */
pgsize &= domain->ops->pgsize_bitmap;
/* make sure we're still sane */
BUG_ON(!pgsize);
/* pick the biggest page */
pgsize_idx = __fls(pgsize);
pgsize = 1UL << pgsize_idx;
pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
(unsigned long)paddr, pgsize);
ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
if (ret)
break;
iova += pgsize;
paddr += pgsize;
size -= pgsize;
}
/* unroll mapping in case something went wrong */
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
return ret;
return domain->ops->map(domain, iova, paddr, size, prot);
}
EXPORT_SYMBOL_GPL(iommu_map);