drivers: iommu: Add iommu_map_sg for msm_iommu
iommu_map_sg is the newer, preferred API. Add a wrapper around the existing map_range API for map_sg. Once all clients have been successfully converted map_range can be removed. Change-Id: Ib77c86f6b12b00b2bd83a4938465dc685faea624 Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
This commit is contained in:
parent
6243093b11
commit
b7c64d2138
|
@ -1069,6 +1069,28 @@ static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static size_t msm_iommu_map_sg(struct iommu_domain *domain, unsigned long va,
|
||||
struct scatterlist *sg, unsigned int nr_entries,
|
||||
int prot)
|
||||
{
|
||||
int ret, i;
|
||||
struct scatterlist *tmp;
|
||||
unsigned long len = 0;
|
||||
|
||||
/*
|
||||
* Longer term work: convert over to generic page table management
|
||||
* which means we can work on scattergather lists and the whole range
|
||||
*/
|
||||
for_each_sg(sg, tmp, nr_entries, i)
|
||||
len += tmp->length;
|
||||
|
||||
ret = msm_iommu_map_range(domain, va, sg, len, prot);
|
||||
if (ret)
|
||||
return 0;
|
||||
else
|
||||
return len;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_LPAE
|
||||
static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
|
||||
{
|
||||
|
@ -1549,6 +1571,7 @@ static struct iommu_ops msm_iommu_ops = {
|
|||
.unmap = msm_iommu_unmap,
|
||||
.map_range = msm_iommu_map_range,
|
||||
.unmap_range = msm_iommu_unmap_range,
|
||||
.map_sg = msm_iommu_map_sg,
|
||||
.iova_to_phys = msm_iommu_iova_to_phys,
|
||||
.domain_has_cap = msm_iommu_domain_has_cap,
|
||||
.get_pt_base_addr = msm_iommu_get_pt_base_addr,
|
||||
|
|
|
@ -894,6 +894,27 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static size_t msm_iommu_map_sg(struct iommu_domain *domain, unsigned long va,
|
||||
struct scatterlist *sg, unsigned int nr_entries,
|
||||
int prot)
|
||||
{
|
||||
int ret, i;
|
||||
struct scatterlist *tmp;
|
||||
unsigned long len = 0;
|
||||
|
||||
/*
|
||||
* Longer term work: convert over to generic page table management
|
||||
* which means we can work on scattergather lists and the whole range
|
||||
*/
|
||||
for_each_sg(sg, tmp, nr_entries, i)
|
||||
len += tmp->length;
|
||||
|
||||
ret = msm_iommu_map_range(domain, va, sg, len, prot);
|
||||
if (ret)
|
||||
return 0;
|
||||
else
|
||||
return len;
|
||||
}
|
||||
|
||||
static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
|
||||
unsigned int len)
|
||||
|
@ -971,6 +992,7 @@ static struct iommu_ops msm_iommu_ops = {
|
|||
.map = msm_iommu_map,
|
||||
.unmap = msm_iommu_unmap,
|
||||
.map_range = msm_iommu_map_range,
|
||||
.map_sg = msm_iommu_map_sg,
|
||||
.unmap_range = msm_iommu_unmap_range,
|
||||
.iova_to_phys = msm_iommu_iova_to_phys,
|
||||
.domain_has_cap = msm_iommu_domain_has_cap,
|
||||
|
|
Loading…
Reference in New Issue