Merge "iommu: msm: make iommu map and unmap atomic"

This commit is contained in:
Linux Build Service Account 2015-05-03 21:16:53 -07:00 committed by Gerrit - the friendly Code Review server
commit b02aac101b
4 changed files with 133 additions and 70 deletions

View File

@ -47,11 +47,17 @@
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
#endif
#define IOMMU_MSEC_STEP 10
#define IOMMU_MSEC_TIMEOUT 5000
#define IOMMU_USEC_STEP 10
#define IOMMU_USEC_TIMEOUT 500
/*
* msm_iommu_spin_lock protects anything that can race with map
* and unmap. msm_iommu_lock for everything else.
*/
static DEFINE_MUTEX(msm_iommu_lock);
static DEFINE_SPINLOCK(msm_iommu_spin_lock);
struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS];
static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
@ -105,25 +111,21 @@ static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
{
int ret;
ret = clk_prepare_enable(drvdata->pclk);
ret = clk_enable(drvdata->pclk);
if (ret)
goto fail;
ret = clk_prepare_enable(drvdata->clk);
ret = clk_enable(drvdata->clk);
if (ret)
goto fail1;
if (drvdata->aclk) {
ret = clk_prepare_enable(drvdata->aclk);
if (ret)
goto fail2;
}
ret = clk_enable(drvdata->aclk);
if (ret)
goto fail2;
if (drvdata->aiclk) {
ret = clk_prepare_enable(drvdata->aiclk);
if (ret)
goto fail3;
}
ret = clk_enable(drvdata->aiclk);
if (ret)
goto fail3;
if (drvdata->clk_reg_virt) {
unsigned int value;
@ -137,24 +139,21 @@ static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
return 0;
fail3:
if (drvdata->aclk)
clk_disable_unprepare(drvdata->aclk);
clk_disable(drvdata->aclk);
fail2:
clk_disable_unprepare(drvdata->clk);
clk_disable(drvdata->clk);
fail1:
clk_disable_unprepare(drvdata->pclk);
clk_disable(drvdata->pclk);
fail:
return ret;
}
static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
{
if (drvdata->aiclk)
clk_disable_unprepare(drvdata->aiclk);
if (drvdata->aclk)
clk_disable_unprepare(drvdata->aclk);
clk_disable_unprepare(drvdata->clk);
clk_disable_unprepare(drvdata->pclk);
clk_disable(drvdata->aiclk);
clk_disable(drvdata->aclk);
clk_disable(drvdata->clk);
clk_disable(drvdata->pclk);
}
static void _iommu_lock_acquire(unsigned int need_extra_lock)
@ -177,11 +176,11 @@ struct iommu_access_ops iommu_access_ops_v1 = {
.iommu_lock_release = _iommu_lock_release,
};
static BLOCKING_NOTIFIER_HEAD(msm_iommu_notifier_list);
static ATOMIC_NOTIFIER_HEAD(msm_iommu_notifier_list);
void msm_iommu_register_notify(struct notifier_block *nb)
{
blocking_notifier_chain_register(&msm_iommu_notifier_list, nb);
atomic_notifier_chain_register(&msm_iommu_notifier_list, nb);
}
EXPORT_SYMBOL(msm_iommu_register_notify);
@ -217,18 +216,14 @@ static void __dump_vbif_state(void __iomem *base, void __iomem *vbif_base)
static int __check_vbif_state(struct msm_iommu_drvdata const *drvdata)
{
phys_addr_t addr = (phys_addr_t) (drvdata->phys_base
- (phys_addr_t) 0x4000);
void __iomem *base = ioremap(addr, 0x1000);
int ret = 0;
if (base) {
__dump_vbif_state(drvdata->base, base);
__halt_vbif_xin(base);
__dump_vbif_state(drvdata->base, base);
iounmap(base);
if (drvdata->vbif_base) {
__dump_vbif_state(drvdata->base, drvdata->vbif_base);
__halt_vbif_xin(drvdata->vbif_base);
__dump_vbif_state(drvdata->base, drvdata->vbif_base);
} else {
pr_err("%s: Unable to ioremap\n", __func__);
pr_err("%s: failed to get vbif state\n", __func__);
ret = -ENOMEM;
}
return ret;
@ -248,9 +243,9 @@ static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
pr_err("Checking if IOMMU halt completed for %s\n", name);
res = readl_tight_poll_timeout(
res = readl_poll_timeout_noirq(
GLB_REG(MICRO_MMU_CTRL, base), val,
(val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 5000000);
(val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 10000, 50);
if (res) {
pr_err("Timed out (again) waiting for IOMMU halt to complete for %s\n",
@ -271,7 +266,7 @@ static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
pr_err("Timed out waiting for TLB SYNC to complete for %s (client: %s)\n",
name, priv->client_name);
blocking_notifier_call_chain(&msm_iommu_notifier_list, TLB_SYNC_TIMEOUT,
atomic_notifier_call_chain(&msm_iommu_notifier_list, TLB_SYNC_TIMEOUT,
(void *) priv->client_name);
res = __check_vbif_state(drvdata);
if (res)
@ -279,8 +274,8 @@ static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
pr_err("Checking if TLB sync completed for %s\n", name);
res = readl_tight_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
(val & CB_TLBSTATUS_SACTIVE) == 0, 5000000);
res = readl_poll_timeout_noirq(CTX_REG(CB_TLBSTATUS, base, ctx), val,
(val & CB_TLBSTATUS_SACTIVE) == 0, 10000, 50);
if (res) {
pr_err("Timed out (again) waiting for TLB SYNC to complete for %s\n",
name);
@ -356,8 +351,8 @@ static void __sync_tlb(struct msm_iommu_drvdata *iommu_drvdata, int ctx,
SET_TLBSYNC(base, ctx, 0);
/* No barrier needed due to read dependency */
res = readl_tight_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
(val & CB_TLBSTATUS_SACTIVE) == 0, 5000000);
res = readl_poll_timeout_noirq(CTX_REG(CB_TLBSTATUS, base, ctx), val,
(val & CB_TLBSTATUS_SACTIVE) == 0, 10000, 50);
if (res)
check_tlb_sync_state(iommu_drvdata, ctx, priv);
}
@ -756,8 +751,10 @@ fail_nomem:
static void msm_iommu_domain_destroy(struct iommu_domain *domain)
{
struct msm_iommu_priv *priv;
unsigned long flags;
mutex_lock(&msm_iommu_lock);
spin_lock_irqsave(&msm_iommu_spin_lock, flags);
priv = domain->priv;
domain->priv = NULL;
@ -765,6 +762,7 @@ static void msm_iommu_domain_destroy(struct iommu_domain *domain)
msm_iommu_pagetable_free(&priv->pt);
kfree(priv);
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
mutex_unlock(&msm_iommu_lock);
}
@ -777,6 +775,7 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
int ret = 0;
int is_secure;
bool set_m2v = false;
unsigned long flags;
mutex_lock(&msm_iommu_lock);
@ -798,17 +797,22 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (ctx_drvdata->attach_count > 1)
goto already_attached;
spin_lock_irqsave(&msm_iommu_spin_lock, flags);
if (!list_empty(&ctx_drvdata->attached_elm)) {
ret = -EBUSY;
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
goto unlock;
}
list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
if (tmp_drvdata == ctx_drvdata) {
ret = -EBUSY;
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
goto unlock;
}
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
is_secure = iommu_drvdata->sec_id != -1;
ret = __enable_regulators(iommu_drvdata);
@ -859,7 +863,10 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
__disable_clocks(iommu_drvdata);
spin_lock_irqsave(&msm_iommu_spin_lock, flags);
list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
ctx_drvdata->attached_domain = domain;
++iommu_drvdata->ctx_attach_count;
@ -881,6 +888,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
struct msm_iommu_ctx_drvdata *ctx_drvdata;
int ret;
int is_secure;
unsigned long flags;
if (!dev)
return;
@ -933,7 +941,10 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
__disable_regulators(iommu_drvdata);
spin_lock_irqsave(&msm_iommu_spin_lock, flags);
list_del_init(&ctx_drvdata->attached_elm);
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
ctx_drvdata->attached_domain = NULL;
BUG_ON(iommu_drvdata->ctx_attach_count == 0);
--iommu_drvdata->ctx_attach_count;
@ -946,9 +957,9 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
{
struct msm_iommu_priv *priv;
int ret = 0;
unsigned long flags;
mutex_lock(&msm_iommu_lock);
spin_lock_irqsave(&msm_iommu_spin_lock, flags);
priv = domain->priv;
if (!priv) {
ret = -EINVAL;
@ -960,7 +971,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
goto fail;
fail:
mutex_unlock(&msm_iommu_lock);
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
return ret;
}
@ -969,9 +980,9 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
{
struct msm_iommu_priv *priv;
int ret = -ENODEV;
unsigned long flags;
mutex_lock(&msm_iommu_lock);
spin_lock_irqsave(&msm_iommu_spin_lock, flags);
priv = domain->priv;
if (!priv)
goto fail;
@ -984,8 +995,7 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
msm_iommu_pagetable_free_tables(&priv->pt, va, len);
fail:
mutex_unlock(&msm_iommu_lock);
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
/* the IOMMU API requires us to return how many bytes were unmapped */
len = ret ? 0 : len;
return len;
@ -997,9 +1007,9 @@ static int msm_iommu_map_range(struct iommu_domain *domain, unsigned long va,
{
int ret;
struct msm_iommu_priv *priv;
unsigned long flags;
mutex_lock(&msm_iommu_lock);
spin_lock_irqsave(&msm_iommu_spin_lock, flags);
priv = domain->priv;
if (!priv) {
ret = -EINVAL;
@ -1007,11 +1017,9 @@ static int msm_iommu_map_range(struct iommu_domain *domain, unsigned long va,
}
ret = msm_iommu_pagetable_map_range(&priv->pt, va, sg, len, prot);
if (ret)
goto fail;
fail:
mutex_unlock(&msm_iommu_lock);
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
return ret;
}
@ -1020,16 +1028,16 @@ static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned long va,
size_t len)
{
struct msm_iommu_priv *priv;
unsigned long flags;
mutex_lock(&msm_iommu_lock);
spin_lock_irqsave(&msm_iommu_spin_lock, flags);
priv = domain->priv;
msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
__flush_iotlb(domain);
msm_iommu_pagetable_free_tables(&priv->pt, va, len);
mutex_unlock(&msm_iommu_lock);
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
return 0;
}
@ -1089,6 +1097,7 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
phys_addr_t ret = 0;
int ctx;
int i;
unsigned long flags;
mutex_lock(&msm_iommu_lock);
@ -1096,12 +1105,17 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
if (list_empty(&priv->list_attached))
goto fail;
spin_lock_irqsave(&msm_iommu_spin_lock, flags);
ctx_drvdata = list_entry(priv->list_attached.next,
struct msm_iommu_ctx_drvdata, attached_elm);
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
if (iommu_drvdata->model == MMU_500) {
spin_lock_irqsave(&msm_iommu_spin_lock, flags);
ret = msm_iommu_iova_to_phys_soft(domain, va);
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
mutex_unlock(&msm_iommu_lock);
return ret;
}
@ -1115,15 +1129,16 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
goto fail;
}
spin_lock_irqsave(&msm_iommu_spin_lock, flags);
SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
mb();
for (i = 0; i < IOMMU_MSEC_TIMEOUT; i += IOMMU_MSEC_STEP)
for (i = 0; i < IOMMU_USEC_TIMEOUT; i += IOMMU_USEC_STEP)
if (GET_CB_ATSR_ACTIVE(base, ctx) == 0)
break;
else
msleep(IOMMU_MSEC_STEP);
udelay(IOMMU_USEC_STEP);
if (i >= IOMMU_MSEC_TIMEOUT) {
if (i >= IOMMU_USEC_TIMEOUT) {
pr_err("%s: iova to phys timed out on %pa for %s (%s)\n",
__func__, &va, iommu_drvdata->name, ctx_drvdata->name);
ret = 0;
@ -1131,6 +1146,8 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
}
par = GET_PAR(base, ctx);
spin_unlock_irqrestore(&msm_iommu_spin_lock, flags);
__disable_clocks(iommu_drvdata);
if (par & CB_PAR_F) {

View File

@ -313,6 +313,13 @@ static int msm_iommu_probe(struct platform_device *pdev)
drvdata->phys_base = r->start;
if (IS_ENABLED(CONFIG_MSM_IOMMU_VBIF_CHECK)) {
drvdata->vbif_base =
ioremap(drvdata->phys_base - (phys_addr_t) 0x4000,
0x1000);
WARN_ON_ONCE(!drvdata->vbif_base);
}
r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smmu_local_base");
if (r) {
@ -342,27 +349,55 @@ static int msm_iommu_probe(struct platform_device *pdev)
}
drvdata->pclk = devm_clk_get(&pdev->dev, "iface_clk");
if (IS_ERR(drvdata->pclk))
return PTR_ERR(drvdata->pclk);
if (IS_ERR(drvdata->pclk)) {
ret = PTR_ERR(drvdata->pclk);
drvdata->pclk = NULL;
goto fail;
}
ret = clk_prepare(drvdata->pclk);
if (ret)
return ret;
drvdata->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(drvdata->clk))
return PTR_ERR(drvdata->clk);
if (IS_ERR(drvdata->clk)) {
ret = PTR_ERR(drvdata->clk);
drvdata->clk = NULL;
goto fail;
}
ret = clk_prepare(drvdata->clk);
if (ret)
goto fail;
needs_alt_core_clk = of_property_read_bool(pdev->dev.of_node,
"qcom,needs-alt-core-clk");
if (needs_alt_core_clk) {
drvdata->aclk = devm_clk_get(&pdev->dev, "alt_core_clk");
if (IS_ERR(drvdata->aclk))
return PTR_ERR(drvdata->aclk);
if (IS_ERR(drvdata->aclk)) {
ret = PTR_ERR(drvdata->aclk);
drvdata->aclk = NULL;
goto fail;
}
ret = clk_prepare(drvdata->aclk);
if (ret)
goto fail;
}
needs_alt_iface_clk = of_property_read_bool(pdev->dev.of_node,
"qcom,needs-alt-iface-clk");
if (needs_alt_iface_clk) {
drvdata->aiclk = devm_clk_get(&pdev->dev, "alt_iface_clk");
if (IS_ERR(drvdata->aiclk))
return PTR_ERR(drvdata->aiclk);
if (IS_ERR(drvdata->aiclk)) {
ret = PTR_ERR(drvdata->aiclk);
drvdata->aiclk = NULL;
goto fail;
}
ret = clk_prepare(drvdata->aiclk);
if (ret)
goto fail;
}
if (!of_property_read_u32(pdev->dev.of_node,
@ -450,8 +485,14 @@ static int msm_iommu_probe(struct platform_device *pdev)
ret = of_platform_populate(pdev->dev.of_node, msm_iommu_ctx_match_table,
NULL, &pdev->dev);
if (ret)
fail:
if (ret) {
clk_unprepare(drvdata->clk);
clk_unprepare(drvdata->pclk);
clk_unprepare(drvdata->aclk);
clk_unprepare(drvdata->aiclk);
pr_err("Failed to create iommu context device\n");
}
return ret;
}
@ -466,6 +507,10 @@ static int msm_iommu_remove(struct platform_device *pdev)
drv = platform_get_drvdata(pdev);
if (drv) {
__put_bus_vote_client(drv);
clk_unprepare(drv->clk);
clk_unprepare(drv->pclk);
clk_unprepare(drv->aclk);
clk_unprepare(drv->aiclk);
msm_iommu_remove_drv(drv);
platform_set_drvdata(pdev, NULL);
}

View File

@ -206,7 +206,7 @@ static u32 *make_second_level(struct msm_iommu_pt *pt, u32 *fl_pte,
u32 *fl_pte_shadow)
{
u32 *sl;
sl = (u32 *) __get_free_pages(GFP_KERNEL,
sl = (u32 *) __get_free_pages(GFP_ATOMIC,
get_order(SZ_4K));
if (!sl) {

View File

@ -114,6 +114,7 @@ struct msm_iommu_drvdata {
void __iomem *glb_base;
void __iomem *cb_base;
void __iomem *smmu_local_base;
void __iomem *vbif_base;
int ncb;
int ttbr_split;
struct clk *clk;