mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
intel-iommu: Performance improvement for dma_pte_free_pagetable()
As with other functions, batch the CPU data cache flushes and don't keep recalculating PTE addresses. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
parent
3d7b0e4154
commit
f3a0a52fff
1 changed files with 18 additions and 11 deletions
|
@ -797,7 +797,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
|
||||||
unsigned long last_pfn)
|
unsigned long last_pfn)
|
||||||
{
|
{
|
||||||
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
|
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
|
||||||
struct dma_pte *pte;
|
struct dma_pte *first_pte, *pte;
|
||||||
int total = agaw_to_level(domain->agaw);
|
int total = agaw_to_level(domain->agaw);
|
||||||
int level;
|
int level;
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
@ -805,25 +805,32 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
|
||||||
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
|
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
|
||||||
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
|
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
|
||||||
|
|
||||||
/* we don't need lock here, nobody else touches the iova range */
|
/* We don't need lock here; nobody else touches the iova range */
|
||||||
level = 2;
|
level = 2;
|
||||||
while (level <= total) {
|
while (level <= total) {
|
||||||
tmp = align_to_level(start_pfn, level);
|
tmp = align_to_level(start_pfn, level);
|
||||||
|
|
||||||
/* Only clear this pte/pmd if we're asked to clear its
|
/* If we can't even clear one PTE at this level, we're done */
|
||||||
_whole_ range */
|
|
||||||
if (tmp + level_size(level) - 1 > last_pfn)
|
if (tmp + level_size(level) - 1 > last_pfn)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
while (tmp + level_size(level) - 1 <= last_pfn) {
|
while (tmp + level_size(level) - 1 <= last_pfn) {
|
||||||
pte = dma_pfn_level_pte(domain, tmp, level);
|
first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
|
||||||
if (pte) {
|
if (!pte) {
|
||||||
free_pgtable_page(
|
tmp = align_to_level(tmp + 1, level + 1);
|
||||||
phys_to_virt(dma_pte_addr(pte)));
|
continue;
|
||||||
dma_clear_pte(pte);
|
|
||||||
domain_flush_cache(domain, pte, sizeof(*pte));
|
|
||||||
}
|
}
|
||||||
tmp += level_size(level);
|
while (tmp + level_size(level) - 1 <= last_pfn &&
|
||||||
|
(unsigned long)pte >> VTD_PAGE_SHIFT ==
|
||||||
|
(unsigned long)first_pte >> VTD_PAGE_SHIFT) {
|
||||||
|
free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
|
||||||
|
dma_clear_pte(pte);
|
||||||
|
pte++;
|
||||||
|
tmp += level_size(level);
|
||||||
|
}
|
||||||
|
domain_flush_cache(domain, first_pte,
|
||||||
|
(void *)pte - (void *)first_pte);
|
||||||
|
|
||||||
}
|
}
|
||||||
level++;
|
level++;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue