iommu: msm: Introduce AARCH64 page table format
Some clients can generate 48/49 bit virtual address. Support those clients by AARCH64 page table format. Change-Id: Ic8d9a12e990f13ffebd6be6c81506d6bcc421f05 Signed-off-by: Chintan Pandya <cpandya@codeaurora.org>
This commit is contained in:
parent
18d8c93132
commit
f879574c6c
|
@ -103,6 +103,19 @@ config IOMMU_LPAE
|
|||
|
||||
If unsure, say N here.
|
||||
|
||||
config IOMMU_AARCH64
|
||||
bool "Enable support for AARCH64 in IOMMU"
|
||||
depends on (MSM_IOMMU && (!IOMMU_LPAE))
|
||||
help
|
||||
Enables AARCH64 format page tables for IOMMU. This allows clients of
|
||||
IOMMUs to use Virtual and physical addresses up-to 48 bits. This will
|
||||
also support the clients who can't generate addresses of more than 32
|
||||
bits. Presently, this config assumes that if SMMU global space is
|
||||
programmed by some secure environment, they configure all the CBs of
|
||||
all the SMMUs as AARCH64 formatted.
|
||||
|
||||
If unsure, say N here.
|
||||
|
||||
config MSM_IOMMU_VBIF_CHECK
|
||||
bool "Enable support for VBIF check when IOMMU gets stuck"
|
||||
depends on MSM_IOMMU
|
||||
|
|
|
@ -13,8 +13,12 @@ endif
|
|||
ifdef CONFIG_IOMMU_LPAE
|
||||
obj-$(CONFIG_MSM_IOMMU_V1) += msm_iommu_pagetable_lpae.o
|
||||
else
|
||||
ifdef CONFIG_IOMMU_AARCH64
|
||||
obj-$(CONFIG_MSM_IOMMU_V1) += msm_iommu_pagetable_aarch64.o
|
||||
else
|
||||
obj-$(CONFIG_MSM_IOMMU_V1) += msm_iommu_pagetable.o
|
||||
endif
|
||||
endif
|
||||
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
|
||||
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
|
||||
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
|
||||
|
|
|
@ -33,13 +33,14 @@
|
|||
#include <linux/notifier.h>
|
||||
#include <linux/qcom_iommu.h>
|
||||
#include <asm/sizes.h>
|
||||
#include <soc/qcom/scm.h>
|
||||
|
||||
#include "msm_iommu_hw-v1.h"
|
||||
#include "msm_iommu_priv.h"
|
||||
#include "msm_iommu_perfmon.h"
|
||||
#include "msm_iommu_pagetable.h"
|
||||
|
||||
#ifdef CONFIG_IOMMU_LPAE
|
||||
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)
|
||||
/* bitmap of the page sizes currently supported */
|
||||
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_2M | SZ_32M | SZ_1G)
|
||||
#else
|
||||
|
@ -50,6 +51,8 @@
|
|||
#define IOMMU_USEC_STEP 10
|
||||
#define IOMMU_USEC_TIMEOUT 500
|
||||
|
||||
/* commands for SCM_SVC_SMMU_PROGRAM */
|
||||
#define SMMU_CHANGE_PAGETABLE_FORMAT 0X01
|
||||
|
||||
/*
|
||||
* msm_iommu_spin_lock protects anything that can race with map
|
||||
|
@ -528,31 +531,15 @@ static void __release_smg(void __iomem *base)
|
|||
SET_SMR_VALID(base, i, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_LPAE
|
||||
static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
|
||||
unsigned int asid)
|
||||
#if defined(CONFIG_IOMMU_LPAE)
|
||||
static inline phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
|
||||
{
|
||||
SET_CB_TTBR0_ASID(base, ctx_num, asid);
|
||||
}
|
||||
#else
|
||||
static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
|
||||
unsigned int asid)
|
||||
{
|
||||
SET_CB_CONTEXTIDR_ASID(base, ctx_num, asid);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
|
||||
struct msm_iommu_ctx_drvdata *curr_ctx,
|
||||
struct msm_iommu_priv *priv)
|
||||
{
|
||||
void __iomem *cb_base = iommu_drvdata->cb_base;
|
||||
|
||||
curr_ctx->asid = curr_ctx->num;
|
||||
msm_iommu_set_ASID(cb_base, curr_ctx->num, curr_ctx->asid);
|
||||
phys_addr_t phy;
|
||||
/* Upper 28 bits from PAR, lower 12 from VA */
|
||||
phy = (par & 0x0000FFFFF000ULL) | (va & 0x000000000FFFULL);
|
||||
return phy;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_LPAE
|
||||
static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
|
||||
{
|
||||
SET_CB_TTBCR_EAE(base, ctx, 1); /* Extended Address Enable (EAE) */
|
||||
|
@ -575,14 +562,112 @@ static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
|
|||
SET_CB_TTBCR_IRGN0(base, ctx, 1); /* inner cachable*/
|
||||
SET_CB_TTBCR_T0SZ(base, ctx, 0); /* 0GB-4GB */
|
||||
|
||||
|
||||
SET_CB_TTBCR_SH1(base, ctx, 3); /* Inner shareable */
|
||||
SET_CB_TTBCR_ORGN1(base, ctx, 1); /* outer cachable*/
|
||||
SET_CB_TTBCR_IRGN1(base, ctx, 1); /* inner cachable*/
|
||||
SET_CB_TTBCR_T1SZ(base, ctx, 0); /* TTBR1 not used */
|
||||
}
|
||||
|
||||
#else
|
||||
static void __set_cb_format(struct msm_iommu_drvdata *iommu_drvdata,
|
||||
struct msm_iommu_ctx_drvdata *ctx_drvdata)
|
||||
{
|
||||
}
|
||||
|
||||
static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
|
||||
unsigned int asid)
|
||||
{
|
||||
SET_CB_TTBR0_ASID(base, ctx_num, asid);
|
||||
}
|
||||
#elif defined(CONFIG_IOMMU_AARCH64)
|
||||
static inline phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
|
||||
{
|
||||
phys_addr_t phy;
|
||||
/* Upper 48 bits from PAR, lower 12 from VA */
|
||||
phy = (par & 0xFFFFFFFFF000ULL) | (va & 0x000000000FFFULL);
|
||||
return phy;
|
||||
}
|
||||
|
||||
static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
|
||||
{
|
||||
/*
|
||||
* TCR2 presently sets PA size as 32-bits. When entire platform
|
||||
* gets more physical size, we need to change for SMMU too.
|
||||
* Change CB_TCR2_PA in that case.
|
||||
*/
|
||||
SET_CB_TCR2_SEP(base, ctx, 7); /* bit[48] as sign bit */
|
||||
}
|
||||
|
||||
static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
|
||||
{
|
||||
SET_CB_MAIR0(base, ctx, msm_iommu_get_mair0());
|
||||
SET_CB_MAIR1(base, ctx, msm_iommu_get_mair1());
|
||||
}
|
||||
|
||||
static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
|
||||
{
|
||||
/*
|
||||
* Configure page tables as inner-cacheable and shareable to reduce
|
||||
* the TLB miss penalty.
|
||||
*/
|
||||
SET_CB_TTBCR_SH0(base, ctx, 3); /* Inner shareable */
|
||||
SET_CB_TTBCR_ORGN0(base, ctx, 1); /* outer cachable*/
|
||||
SET_CB_TTBCR_IRGN0(base, ctx, 1); /* inner cachable*/
|
||||
SET_CB_TTBCR_T0SZ(base, ctx, 16); /* 48-bit VA */
|
||||
|
||||
SET_CB_TTBCR_SH1(base, ctx, 3); /* Inner shareable */
|
||||
SET_CB_TTBCR_ORGN1(base, ctx, 1); /* outer cachable*/
|
||||
SET_CB_TTBCR_IRGN1(base, ctx, 1); /* inner cachable*/
|
||||
SET_CB_TTBCR_T1SZ(base, ctx, 63); /*TTBR1 not used */
|
||||
}
|
||||
|
||||
static void __set_cb_format(struct msm_iommu_drvdata *iommu_drvdata,
|
||||
struct msm_iommu_ctx_drvdata *ctx_drvdata)
|
||||
{
|
||||
struct scm_desc desc = {0};
|
||||
unsigned int ret = 0;
|
||||
|
||||
if (iommu_drvdata->sec_id != -1) {
|
||||
desc.args[0] = iommu_drvdata->sec_id;
|
||||
desc.args[1] = ctx_drvdata->num;
|
||||
desc.args[2] = 1; /* Enable */
|
||||
desc.arginfo = SCM_ARGS(3, SCM_VAL, SCM_VAL, SCM_VAL);
|
||||
|
||||
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_SMMU_PROGRAM,
|
||||
SMMU_CHANGE_PAGETABLE_FORMAT), &desc);
|
||||
|
||||
/* At this stage, we cannot afford to fail because we have
|
||||
* chosen AARCH64 format at compile time and we have nothing
|
||||
* to fallback on.
|
||||
*/
|
||||
if (ret) {
|
||||
pr_err("Format change failed for CB %d with ret %d\n",
|
||||
ctx_drvdata->num, ret);
|
||||
BUG();
|
||||
}
|
||||
} else {
|
||||
/* Set page table format as AARCH64 */
|
||||
SET_CBA2R_VA64(iommu_drvdata->base, ctx_drvdata->num, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
|
||||
unsigned int asid)
|
||||
{
|
||||
SET_CB_TTBR0_ASID(base, ctx_num, asid);
|
||||
}
|
||||
#else /* v7S format */
|
||||
static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
|
||||
{
|
||||
phys_addr_t phy;
|
||||
|
||||
/* We are dealing with a supersection */
|
||||
if (par & CB_PAR_SS)
|
||||
phy = (par & 0x0000FF000000ULL) | (va & 0x000000FFFFFFULL);
|
||||
else /* Upper 20 bits from PAR, lower 12 from VA */
|
||||
phy = (par & 0x0000FFFFF000ULL) | (va & 0x000000000FFFULL);
|
||||
|
||||
return phy;
|
||||
}
|
||||
|
||||
static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
|
||||
{
|
||||
|
@ -608,8 +693,28 @@ static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
|
|||
SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
|
||||
}
|
||||
|
||||
static void __set_cb_format(struct msm_iommu_drvdata *iommu_drvdata,
|
||||
struct msm_iommu_ctx_drvdata *ctx_drvdata)
|
||||
{
|
||||
}
|
||||
|
||||
static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
|
||||
unsigned int asid)
|
||||
{
|
||||
SET_CB_CONTEXTIDR_ASID(base, ctx_num, asid);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
|
||||
struct msm_iommu_ctx_drvdata *curr_ctx,
|
||||
struct msm_iommu_priv *priv)
|
||||
{
|
||||
void __iomem *cb_base = iommu_drvdata->cb_base;
|
||||
|
||||
curr_ctx->asid = curr_ctx->num;
|
||||
msm_iommu_set_ASID(cb_base, curr_ctx->num, curr_ctx->asid);
|
||||
}
|
||||
|
||||
static int program_m2v_table(struct device *dev, void __iomem *base)
|
||||
{
|
||||
struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_get_drvdata(dev);
|
||||
|
@ -702,9 +807,9 @@ static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
|
|||
|
||||
/* Do not downgrade memory attributes */
|
||||
SET_CBAR_MEMATTR(base, ctx, 0x0A);
|
||||
|
||||
}
|
||||
|
||||
__set_cb_format(iommu_drvdata, ctx_drvdata);
|
||||
msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv);
|
||||
|
||||
/* Ensure that ASID assignment has completed before we use
|
||||
|
@ -1063,29 +1168,6 @@ static size_t msm_iommu_map_sg(struct iommu_domain *domain, unsigned long va,
|
|||
return len;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_LPAE
|
||||
static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
|
||||
{
|
||||
phys_addr_t phy;
|
||||
/* Upper 28 bits from PAR, lower 12 from VA */
|
||||
phy = (par & 0xFFFFFFF000ULL) | (va & 0x00000FFF);
|
||||
return phy;
|
||||
}
|
||||
#else
|
||||
static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
|
||||
{
|
||||
phys_addr_t phy;
|
||||
|
||||
/* We are dealing with a supersection */
|
||||
if (par & CB_PAR_SS)
|
||||
phy = (par & 0xFF000000) | (va & 0x00FFFFFF);
|
||||
else /* Upper 20 bits from PAR, lower 12 from VA */
|
||||
phy = (par & 0xFFFFF000) | (va & 0x00000FFF);
|
||||
|
||||
return phy;
|
||||
}
|
||||
#endif
|
||||
|
||||
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
phys_addr_t va)
|
||||
{
|
||||
|
@ -1184,7 +1266,7 @@ static int msm_iommu_domain_has_cap(struct iommu_domain *domain,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_LPAE
|
||||
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)
|
||||
static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[])
|
||||
{
|
||||
pr_err("MAIR0 = %08x MAIR1 = %08x\n",
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -114,13 +114,46 @@ struct device *msm_iommu_get_ctx(const char *ctx_name)
|
|||
}
|
||||
EXPORT_SYMBOL(msm_iommu_get_ctx);
|
||||
|
||||
#ifdef CONFIG_ARM
|
||||
#ifdef CONFIG_IOMMU_LPAE
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
/*
|
||||
* If CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are enabled we can use the MAIR
|
||||
* register directly
|
||||
* Selecting NMRR, PRRR, MAIR0 and MAIR1 for SMMU has a dependency on
|
||||
* the SMMU page table formate and a CPU mode. To simplify that, refer
|
||||
* the table below.
|
||||
*
|
||||
* +-----------+-------------+------+
|
||||
* | ARM | ARM_LPAE | ARM64|
|
||||
* +------------+-----------+-------------+------+
|
||||
* | SMMUv7S | RCP15_PRRR| PRRR | PRRR |
|
||||
* | | RCP15_NMRR| NMRR | NMRR |
|
||||
* +------------+-----------+-------------+------+
|
||||
* | SMMUv7L | MAIR0 | RCP15_MAIR0 | MAIR0|
|
||||
* | | MAIR1 | RCP15_MAIR1 | MAIR1|
|
||||
* +------------+-----------+-------------+------+
|
||||
* | SMMUv8L | MAIR0 | RCP15_MAIR0 | MAIR0|
|
||||
* | | MAIR1 | RCP15_MAIR1 | MAIR1|
|
||||
* +------------+-----------+-------------+------+
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
u32 msm_iommu_get_mair0(void)
|
||||
{
|
||||
return MAIR0_VALUE;
|
||||
}
|
||||
|
||||
u32 msm_iommu_get_mair1(void)
|
||||
{
|
||||
return MAIR1_VALUE;
|
||||
}
|
||||
|
||||
u32 msm_iommu_get_prrr(void)
|
||||
{
|
||||
return PRRR_VALUE;
|
||||
}
|
||||
|
||||
u32 msm_iommu_get_nmrr(void)
|
||||
{
|
||||
return NMRR_VALUE;
|
||||
}
|
||||
#elif defined(CONFIG_ARM_LPAE)
|
||||
u32 msm_iommu_get_mair0(void)
|
||||
{
|
||||
unsigned int mair0;
|
||||
|
@ -136,28 +169,7 @@ u32 msm_iommu_get_mair1(void)
|
|||
RCP15_MAIR1(mair1);
|
||||
return mair1;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* However, If CONFIG_ARM_LPAE is not enabled but CONFIG_IOMMU_LPAE is enabled
|
||||
* we'll just use the hard coded values directly..
|
||||
*/
|
||||
u32 msm_iommu_get_mair0(void)
|
||||
{
|
||||
return MAIR0_VALUE;
|
||||
}
|
||||
|
||||
u32 msm_iommu_get_mair1(void)
|
||||
{
|
||||
return MAIR1_VALUE;
|
||||
}
|
||||
#endif
|
||||
|
||||
#else
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
/*
|
||||
* If CONFIG_ARM_LPAE is enabled AND CONFIG_IOMMU_LPAE is disabled
|
||||
* we must use the hardcoded values.
|
||||
*/
|
||||
u32 msm_iommu_get_prrr(void)
|
||||
{
|
||||
return PRRR_VALUE;
|
||||
|
@ -168,12 +180,15 @@ u32 msm_iommu_get_nmrr(void)
|
|||
return NMRR_VALUE;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* If both CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are disabled
|
||||
* we can use the registers directly.
|
||||
*/
|
||||
#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
|
||||
#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
|
||||
u32 msm_iommu_get_mair0(void)
|
||||
{
|
||||
return MAIR0_VALUE;
|
||||
}
|
||||
|
||||
u32 msm_iommu_get_mair1(void)
|
||||
{
|
||||
return MAIR1_VALUE;
|
||||
}
|
||||
|
||||
u32 msm_iommu_get_prrr(void)
|
||||
{
|
||||
|
@ -191,16 +206,3 @@ u32 msm_iommu_get_nmrr(void)
|
|||
return nmrr;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64
|
||||
u32 msm_iommu_get_prrr(void)
|
||||
{
|
||||
return PRRR_VALUE;
|
||||
}
|
||||
|
||||
u32 msm_iommu_get_nmrr(void)
|
||||
{
|
||||
return NMRR_VALUE;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -167,9 +167,11 @@ do { \
|
|||
/* Global register space 1 setters / getters */
|
||||
#define SET_CBAR_N(b, N, v) SET_GLOBAL_REG_N(CBAR, N, (b), (v))
|
||||
#define SET_CBFRSYNRA_N(b, N, v) SET_GLOBAL_REG_N(CBFRSYNRA, N, (b), (v))
|
||||
#define SET_CBA2R_N(b, N, v) SET_GLOBAL_REG_N(CBA2R, N, (b), (v))
|
||||
|
||||
#define GET_CBAR_N(b, N) GET_GLOBAL_REG_N(CBAR, N, (b))
|
||||
#define GET_CBFRSYNRA_N(b, N) GET_GLOBAL_REG_N(CBFRSYNRA, N, (b))
|
||||
#define GET_CBA2R_N(b, N) GET_GLOBAL_REG_N(CBA2R, N, (b))
|
||||
|
||||
/* Implementation defined register setters/getters */
|
||||
#define SET_MICRO_MMU_CTRL_HALT_REQ(b, v) \
|
||||
|
@ -194,6 +196,7 @@ do { \
|
|||
#define SET_SCTLR(b, c, v) SET_CTX_REG(CB_SCTLR, (b), (c), (v))
|
||||
#define SET_ACTLR(b, c, v) SET_CTX_REG(CB_ACTLR, (b), (c), (v))
|
||||
#define SET_RESUME(b, c, v) SET_CTX_REG(CB_RESUME, (b), (c), (v))
|
||||
#define SET_TCR2(b, c, v) SET_CTX_REG(CB_TCR2, (b), (c), (v))
|
||||
#define SET_TTBCR(b, c, v) SET_CTX_REG(CB_TTBCR, (b), (c), (v))
|
||||
#define SET_CONTEXTIDR(b, c, v) SET_CTX_REG(CB_CONTEXTIDR, (b), (c), (v))
|
||||
#define SET_PRRR(b, c, v) SET_CTX_REG(CB_PRRR, (b), (c), (v))
|
||||
|
@ -223,6 +226,7 @@ do { \
|
|||
#define GET_RESUME(b, c) GET_CTX_REG(CB_RESUME, (b), (c))
|
||||
#define GET_TTBR0(b, c) GET_CTX_REG(CB_TTBR0, (b), (c))
|
||||
#define GET_TTBR1(b, c) GET_CTX_REG(CB_TTBR1, (b), (c))
|
||||
#define GET_TCR2(b, c) GET_CTX_REG(CB_TCR2, (b), (c))
|
||||
#define GET_TTBCR(b, c) GET_CTX_REG(CB_TTBCR, (b), (c))
|
||||
#define GET_CONTEXTIDR(b, c) GET_CTX_REG(CB_CONTEXTIDR, (b), (c))
|
||||
#define GET_PRRR(b, c) GET_CTX_REG(CB_PRRR, (b), (c))
|
||||
|
@ -602,6 +606,12 @@ do { \
|
|||
|
||||
#define GET_CBFRSYNRA_SID(b, n) GET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID)
|
||||
|
||||
/* Context Bank Attribute 2 Register: CBA2R_N */
|
||||
#define SET_CBA2R_VA64(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBA2R, VA64, v)
|
||||
#define SET_CBA2R_MONC(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBA2R, MONC, v)
|
||||
#define GET_CBA2R_VA64(b, n) GET_GLOBAL_FIELD_N(b, n, CBA2R, VA64)
|
||||
#define GET_CBA2R_MOC(b, n) GET_GLOBAL_FIELD_N(b, n, CBA2R, MONC)
|
||||
|
||||
/* Stage 1 Context Bank Format Fields */
|
||||
#define SET_CB_ACTLR_REQPRIORITY (b, c, v) \
|
||||
SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY, v)
|
||||
|
@ -936,7 +946,7 @@ do { \
|
|||
#define GET_CB_TTBCR_NSCFG1(b, c) \
|
||||
GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1)
|
||||
|
||||
#ifdef CONFIG_IOMMU_LPAE
|
||||
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)
|
||||
|
||||
/* LPAE format */
|
||||
|
||||
|
@ -951,6 +961,15 @@ do { \
|
|||
#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR)
|
||||
#define GET_CB_TTBR0(b, c) GET_CTX_REG_Q(CB_TTBR0, (b), (c))
|
||||
|
||||
/* Translation Control Register 2: CB_TCR2 */
|
||||
#define SET_CB_TCR2_PA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TCR2, PA, v)
|
||||
#define SET_CB_TCR2_AS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TCR2, AS, v)
|
||||
#define SET_CB_TCR2_SEP(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TCR2, SEP, v)
|
||||
|
||||
#define GET_CB_TCR2_PA(b, c) GET_CONTEXT_FIELD(b, c, CB_TCR2, PA)
|
||||
#define GET_CB_TCR2_AS(b, c) GET_CONTEXT_FIELD(b, c, CB_TCR2, AS)
|
||||
#define GET_CB_TCR2_SEP(b, c) GET_CONTEXT_FIELD(b, c, CB_TCR2, SEP)
|
||||
|
||||
/* Translation Table Base Control Register: CB_TTBCR */
|
||||
#define SET_CB_TTBCR_T0SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ, v)
|
||||
#define SET_CB_TTBCR_T1SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ, v)
|
||||
|
@ -1065,6 +1084,7 @@ do { \
|
|||
/* Global Register Space 1 */
|
||||
#define CBAR (0x1000)
|
||||
#define CBFRSYNRA (0x1400)
|
||||
#define CBA2R (0x1800)
|
||||
|
||||
/* Implementation defined Register Space */
|
||||
#define MICRO_MMU_CTRL (0x2000)
|
||||
|
@ -1097,6 +1117,7 @@ do { \
|
|||
#define CB_SCTLR (0x000)
|
||||
#define CB_ACTLR (0x004)
|
||||
#define CB_RESUME (0x008)
|
||||
#define CB_TCR2 (0x010)
|
||||
#define CB_TTBR0 (0x020)
|
||||
#define CB_TTBR1 (0x028)
|
||||
#define CB_TTBCR (0x030)
|
||||
|
@ -1317,6 +1338,10 @@ do { \
|
|||
/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
|
||||
#define CBFRSYNRA_SID (CBFRSYNRA_SID_MASK << CBFRSYNRA_SID_SHIFT)
|
||||
|
||||
/* Context Bank Attribute 2 Register: CBA2R */
|
||||
#define CBA2R_VA64 (CBA2R_VA64_MASK << CBA2R_VA64_SHIFT)
|
||||
#define CBA2R_MONC (CBA2R_MONC_MASK << CBA2R_MONC_SHIFT)
|
||||
|
||||
/* Performance Monitoring Register Fields */
|
||||
|
||||
/* Stage 1 Context Bank Format Fields */
|
||||
|
@ -1482,12 +1507,16 @@ do { \
|
|||
#define CB_TLBSTATUS_SACTIVE (CB_TLBSTATUS_SACTIVE_MASK << \
|
||||
CB_TLBSTATUS_SACTIVE_SHIFT)
|
||||
|
||||
/* Translation Control Register 2: CB_TCR2 */
|
||||
#define CB_TCR2_PA (CB_TCR2_PA_MASK << CB_TTBCR_PA_SHIFT)
|
||||
#define CB_TCR2_AS (CB_TCR2_AS_MASK << CB_TTBCR_AS_SHIFT)
|
||||
#define CB_TCR2_SEP (CB_TCR2_SEP_MASK << CB_TTBCR_SEP_SHIFT)
|
||||
|
||||
/* Translation Table Base Control Register: CB_TTBCR */
|
||||
#define CB_TTBCR_EAE (CB_TTBCR_EAE_MASK << CB_TTBCR_EAE_SHIFT)
|
||||
|
||||
#define CB_TTBR0_ADDR (CB_TTBR0_ADDR_MASK << CB_TTBR0_ADDR_SHIFT)
|
||||
|
||||
#ifdef CONFIG_IOMMU_LPAE
|
||||
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)
|
||||
/* Translation Table Base Register: CB_TTBR */
|
||||
#define CB_TTBR0_ASID (CB_TTBR0_ASID_MASK << CB_TTBR0_ASID_SHIFT)
|
||||
#define CB_TTBR1_ASID (CB_TTBR1_ASID_MASK << CB_TTBR1_ASID_SHIFT)
|
||||
|
@ -1723,6 +1752,10 @@ do { \
|
|||
/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
|
||||
#define CBFRSYNRA_SID_MASK 0x7FFF
|
||||
|
||||
/* Context Bank Attribute 2 Register: CBA2R */
|
||||
#define CBA2R_VA64_MASK 0x1
|
||||
#define CBA2R_MONC_MASK 0x1
|
||||
|
||||
/* Implementation defined register space masks */
|
||||
#define MICRO_MMU_CTRL_RESERVED_MASK 0x03
|
||||
#define MICRO_MMU_CTRL_HALT_REQ_MASK 0x01
|
||||
|
@ -1885,9 +1918,19 @@ do { \
|
|||
/* TLB Status: CB_TLBSTATUS */
|
||||
#define CB_TLBSTATUS_SACTIVE_MASK 0x01
|
||||
|
||||
/* Translation Control Register 2: CB_TCR2 */
|
||||
#define CB_TCR2_PA_MASK 0x07
|
||||
#define CB_TCR2_AS_MASK 0x01
|
||||
#define CB_TCR2_SEP_MASK 0x07
|
||||
|
||||
/* Translation Table Base Control Register: CB_TTBCR */
|
||||
#if defined(CONFIG_IOMMU_AARCH64)
|
||||
#define CB_TTBCR_T0SZ_MASK 0x03F
|
||||
#define CB_TTBCR_T1SZ_MASK 0x03F
|
||||
#else
|
||||
#define CB_TTBCR_T0SZ_MASK 0x07
|
||||
#define CB_TTBCR_T1SZ_MASK 0x07
|
||||
#endif
|
||||
#define CB_TTBCR_EPD0_MASK 0x01
|
||||
#define CB_TTBCR_EPD1_MASK 0x01
|
||||
#define CB_TTBCR_IRGN0_MASK 0x03
|
||||
|
@ -1902,7 +1945,7 @@ do { \
|
|||
#define CB_TTBCR_EAE_MASK 0x01
|
||||
|
||||
/* Translation Table Base Register 0/1: CB_TTBR */
|
||||
#ifdef CONFIG_IOMMU_LPAE
|
||||
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)
|
||||
#define CB_TTBR0_ADDR_MASK 0x7FFFFFFFFULL
|
||||
#define CB_TTBR0_ASID_MASK 0xFF
|
||||
#define CB_TTBR1_ASID_MASK 0xFF
|
||||
|
@ -2118,6 +2161,10 @@ do { \
|
|||
/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
|
||||
#define CBFRSYNRA_SID_SHIFT 0
|
||||
|
||||
/* Context Bank Attribute 2 Register: CBA2R */
|
||||
#define CBA2R_VA64_SHIFT 0
|
||||
#define CBA2R_MONC_SHIFT 1
|
||||
|
||||
/* Implementation defined register space shift */
|
||||
#define MICRO_MMU_CTRL_RESERVED_SHIFT 0x00
|
||||
#define MICRO_MMU_CTRL_HALT_REQ_SHIFT 0x02
|
||||
|
@ -2280,6 +2327,11 @@ do { \
|
|||
/* TLB Status: CB_TLBSTATUS */
|
||||
#define CB_TLBSTATUS_SACTIVE_SHIFT 0
|
||||
|
||||
/* Translation Control Register 2: CB_TCR2 */
|
||||
#define CB_TCR2_PA_SHIFT 0
|
||||
#define CB_TCR2_AS_SHIFT 4
|
||||
#define CB_TCR2_SEP_SHIFT 15
|
||||
|
||||
/* Translation Table Base Control Register: CB_TTBCR */
|
||||
#define CB_TTBCR_T0SZ_SHIFT 0
|
||||
#define CB_TTBCR_T1SZ_SHIFT 16
|
||||
|
@ -2297,7 +2349,7 @@ do { \
|
|||
#define CB_TTBCR_SH1_SHIFT 28
|
||||
|
||||
/* Translation Table Base Register 0/1: CB_TTBR */
|
||||
#ifdef CONFIG_IOMMU_LPAE
|
||||
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)
|
||||
#define CB_TTBR0_ADDR_SHIFT 5
|
||||
#define CB_TTBR0_ASID_SHIFT 48
|
||||
#define CB_TTBR1_ASID_SHIFT 48
|
||||
|
|
|
@ -0,0 +1,927 @@
|
|||
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include "msm_iommu_priv.h"
|
||||
#include <trace/events/kmem.h>
|
||||
#include "msm_iommu_pagetable.h"
|
||||
|
||||
#define NUM_PT_LEVEL 4
|
||||
#define NUM_FL_PTE 512 /* First level */
|
||||
#define NUM_SL_PTE 512 /* Second level */
|
||||
#define NUM_TL_PTE 512 /* Third level */
|
||||
#define NUM_LL_PTE 512 /* Fourth (Last) level */
|
||||
|
||||
#define PTE_SIZE 8
|
||||
|
||||
#define FL_ALIGN SZ_4K
|
||||
|
||||
/* First-level/second-level page table bits */
|
||||
#define FL_SHIFT 39
|
||||
#define FL_OFFSET(va) (((va) & 0xFF8000000000ULL) >> FL_SHIFT)
|
||||
|
||||
/* Second-level page table bits */
|
||||
#define SL_SHIFT 30
|
||||
#define SL_OFFSET(va) (((va) & 0x7FC0000000ULL) >> SL_SHIFT)
|
||||
|
||||
/* Third-level page table bits */
|
||||
#define TL_SHIFT 21
|
||||
#define TL_OFFSET(va) (((va) & 0x3FE00000ULL) >> TL_SHIFT)
|
||||
|
||||
/* Fourth-level (Last level) page table bits */
|
||||
#define LL_SHIFT 12
|
||||
#define LL_OFFSET(va) (((va) & 0x1FF000ULL) >> LL_SHIFT)
|
||||
|
||||
|
||||
#define FLSL_BASE_MASK (0xFFFFFFFFF000ULL)
|
||||
#define FLSL_1G_BLOCK_MASK (0xFFFFC0000000ULL)
|
||||
#define FLSL_BLOCK_MASK (0xFFFFE00000ULL)
|
||||
#define FLSL_TYPE_BLOCK (1 << 0)
|
||||
#define FLSL_TYPE_TABLE (3 << 0)
|
||||
#define FLSL_PTE_TYPE_MASK (3 << 0)
|
||||
#define FLSL_APTABLE_RO (2 << 61)
|
||||
#define FLSL_APTABLE_RW (0 << 61)
|
||||
|
||||
#define FL_TYPE_SECT (2 << 0)
|
||||
#define FL_SUPERSECTION (1 << 18)
|
||||
#define FL_AP0 (1 << 10)
|
||||
#define FL_AP1 (1 << 11)
|
||||
#define FL_AP2 (1 << 15)
|
||||
#define FL_SHARED (1 << 16)
|
||||
#define FL_BUFFERABLE (1 << 2)
|
||||
#define FL_CACHEABLE (1 << 3)
|
||||
#define FL_TEX0 (1 << 12)
|
||||
#define FL_NG (1 << 17)
|
||||
|
||||
#define LL_TYPE_PAGE (3 << 0)
|
||||
#define LL_PAGE_MASK (0xFFFFFFFFF000ULL)
|
||||
#define LL_ATTR_INDEX_MASK (0x7)
|
||||
#define LL_ATTR_INDEX_SHIFT (0x2)
|
||||
#define LL_NS (0x1 << 5)
|
||||
#define LL_AP_RO (0x3 << 6) /* Access Permission: R */
|
||||
#define LL_AP_RW (0x1 << 6) /* Access Permission: RW */
|
||||
#define LL_AP_PR_RW (0x0 << 6) /* Privileged Mode RW */
|
||||
#define LL_AP_PR_RO (0x2 << 6) /* Privileged Mode R */
|
||||
#define LL_SH_ISH (0x3 << 8) /* Inner shareable */
|
||||
#define LL_SH_OSH (0x2 << 8) /* Outer shareable */
|
||||
#define LL_SH_NSH (0x0 << 8) /* Non-shareable */
|
||||
#define LL_AF (0x1 << 10) /* Access Flag */
|
||||
#define LL_NG (0x1 << 11) /* Non-Global */
|
||||
#define LL_CH (0x1ULL << 52) /* Contiguous hint */
|
||||
#define LL_PXN (0x1ULL << 53) /* Privilege Execute Never */
|
||||
#define LL_XN (0x1ULL << 54) /* Execute Never */
|
||||
|
||||
/* normal non-cacheable */
|
||||
#define PTE_MT_BUFFERABLE (1 << 2)
|
||||
/* normal inner write-alloc */
|
||||
#define PTE_MT_WRITEALLOC (7 << 2)
|
||||
|
||||
#define PTE_MT_MASK (7 << 2)
|
||||
|
||||
#define FOLLOW_TO_NEXT_TABLE(pte) ((u64 *) __va(((*pte) & FLSL_BASE_MASK)))
|
||||
#define SUB_LEVEL_MAPPING_NOT_REQUIRED 1
|
||||
|
||||
static void __msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt,
|
||||
unsigned long va, size_t len,
|
||||
u32 silent);
|
||||
|
||||
static inline void clean_pte(u64 *start, u64 *end, s32 redirect)
|
||||
{
|
||||
if (!redirect)
|
||||
dmac_flush_range(start, end);
|
||||
}
|
||||
|
||||
s32 msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt)
|
||||
{
|
||||
pt->fl_table = (u64 *) get_zeroed_page(GFP_ATOMIC);
|
||||
if (!pt->fl_table)
|
||||
return -ENOMEM;
|
||||
|
||||
clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Everything in this page table (and sub-level tables) will be
|
||||
* cleared and freed
|
||||
*/
|
||||
static void free_pagetable_level(u64 phys, int level, int loop)
|
||||
{
|
||||
u64 *table = phys_to_virt(phys);
|
||||
int i;
|
||||
|
||||
if (level > NUM_PT_LEVEL)
|
||||
return;
|
||||
else if (level == NUM_PT_LEVEL || !loop)
|
||||
goto free_this_level;
|
||||
|
||||
for (i = 0; i < NUM_FL_PTE; ++i) {
|
||||
if ((table[i] & FLSL_TYPE_TABLE) == FLSL_TYPE_TABLE) {
|
||||
u64 p = table[i] & FLSL_BASE_MASK;
|
||||
if (p)
|
||||
free_pagetable_level(p, level + 1, 1);
|
||||
}
|
||||
}
|
||||
|
||||
free_this_level:
|
||||
free_page((unsigned long)table);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free the page tables at all the level irrespective of whether
|
||||
* mapping exists or not. This is to be called at domain_destroy
|
||||
*/
|
||||
void msm_iommu_pagetable_free(struct msm_iommu_pt *pt)
|
||||
{
|
||||
u64 *fl_table = pt->fl_table;
|
||||
|
||||
free_pagetable_level(virt_to_phys(fl_table), 1, 1);
|
||||
pt->fl_table = 0;
|
||||
}
|
||||
|
||||
static bool is_table_empty(u64 *table)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_FL_PTE; i++)
|
||||
if (table[i] != 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va,
|
||||
size_t len)
|
||||
{
|
||||
/*
|
||||
* We free the page tables at the time of msm_iommu_pagetable_free.
|
||||
* So, we really don't need to do anything here.
|
||||
*/
|
||||
}
|
||||
|
||||
static inline u32 __get_cache_attr(void)
|
||||
{
|
||||
return PTE_MT_WRITEALLOC;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the IOMMU attributes for the ARM AARCH64 long descriptor format page
|
||||
* table entry bits. The only upper attribute bits we currently use is the
|
||||
* contiguous bit which is set when we actually have a contiguous mapping.
|
||||
* Lower attribute bits specify memory attributes and the protection
|
||||
* (Read/Write/Execute).
|
||||
*/
|
||||
static void __get_attr(int prot, u64 *upper_attr, u64 *lower_attr)
|
||||
{
|
||||
u32 attr_idx = PTE_MT_BUFFERABLE;
|
||||
|
||||
*upper_attr = 0;
|
||||
*lower_attr = 0;
|
||||
|
||||
if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
|
||||
prot |= IOMMU_READ | IOMMU_WRITE;
|
||||
WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
|
||||
}
|
||||
|
||||
if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
|
||||
prot |= IOMMU_READ;
|
||||
WARN_ONCE(1, "Write-only unsupported; falling back to RW\n");
|
||||
}
|
||||
|
||||
if (prot & IOMMU_CACHE)
|
||||
attr_idx = __get_cache_attr();
|
||||
|
||||
*lower_attr |= attr_idx;
|
||||
*lower_attr |= LL_NG | LL_AF;
|
||||
*lower_attr |= (prot & IOMMU_CACHE) ? LL_SH_ISH : LL_SH_NSH;
|
||||
if (prot & IOMMU_PRIV)
|
||||
*lower_attr |= (prot & IOMMU_WRITE) ? LL_AP_PR_RW : LL_AP_PR_RO;
|
||||
else
|
||||
*lower_attr |= (prot & IOMMU_WRITE) ? LL_AP_RW : LL_AP_RO;
|
||||
}
|
||||
|
||||
static u64 *make_next_level_table(s32 redirect, u64 *pte)
|
||||
{
|
||||
u64 *next_level_table = (u64 *)get_zeroed_page(GFP_ATOMIC);
|
||||
|
||||
if (!next_level_table) {
|
||||
pr_err("Could not allocate next level table\n");
|
||||
goto fail;
|
||||
}
|
||||
clean_pte(next_level_table, next_level_table + NUM_FL_PTE, redirect);
|
||||
|
||||
/* Leave APTable bits 0 to let next level decide access permissions */
|
||||
*pte = (((phys_addr_t)__pa(next_level_table)) &
|
||||
FLSL_BASE_MASK) | FLSL_TYPE_TABLE;
|
||||
clean_pte(pte, pte + 1, redirect);
|
||||
fail:
|
||||
return next_level_table;
|
||||
}
|
||||
|
||||
static inline s32 ll_4k_map(u64 *ll_pte, phys_addr_t pa,
|
||||
u64 upper_attr, u64 lower_attr, s32 redirect)
|
||||
{
|
||||
s32 ret = 0;
|
||||
|
||||
if (*ll_pte) {
|
||||
ret = -EBUSY;
|
||||
pr_err("%s: Busy ll_pte %p -> %lx\n",
|
||||
__func__, ll_pte, (unsigned long) *ll_pte);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
*ll_pte = upper_attr | (pa & LL_PAGE_MASK) | lower_attr | LL_TYPE_PAGE;
|
||||
clean_pte(ll_pte, ll_pte + 1, redirect);
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline s32 ll_64k_map(u64 *ll_pte, phys_addr_t pa,
|
||||
u64 upper_attr, u64 lower_attr, s32 redirect)
|
||||
{
|
||||
s32 ret = 0;
|
||||
s32 i;
|
||||
|
||||
for (i = 0; i < 16; ++i) {
|
||||
if (*(ll_pte+i)) {
|
||||
ret = -EBUSY;
|
||||
pr_err("%s: Busy ll_pte %p -> %lx\n",
|
||||
__func__, ll_pte, (unsigned long) *ll_pte);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* Add Contiguous hint LL_CH */
|
||||
upper_attr |= LL_CH;
|
||||
|
||||
for (i = 0; i < 16; ++i)
|
||||
*(ll_pte+i) = upper_attr | (pa & LL_PAGE_MASK) |
|
||||
lower_attr | LL_TYPE_PAGE;
|
||||
clean_pte(ll_pte, ll_pte + 16, redirect);
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline s32 tl_2m_map(u64 *tl_pte, phys_addr_t pa,
|
||||
u64 upper_attr, u64 lower_attr, s32 redirect)
|
||||
{
|
||||
s32 ret = 0;
|
||||
|
||||
if (*tl_pte) {
|
||||
ret = -EBUSY;
|
||||
pr_err("%s: Busy tl_pte %p -> %lx\n",
|
||||
__func__, tl_pte, (unsigned long) *tl_pte);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
*tl_pte = upper_attr | (pa & FLSL_BLOCK_MASK) |
|
||||
lower_attr | FLSL_TYPE_BLOCK;
|
||||
clean_pte(tl_pte, tl_pte + 1, redirect);
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline s32 tl_32m_map(u64 *tl_pte, phys_addr_t pa,
|
||||
u64 upper_attr, u64 lower_attr, s32 redirect)
|
||||
{
|
||||
s32 i;
|
||||
s32 ret = 0;
|
||||
|
||||
for (i = 0; i < 16; ++i) {
|
||||
if (*(tl_pte+i)) {
|
||||
ret = -EBUSY;
|
||||
pr_err("%s: Busy tl_pte %p -> %lx\n",
|
||||
__func__, tl_pte, (unsigned long) *tl_pte);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* Add Contiguous hint TL_CH */
|
||||
upper_attr |= LL_CH;
|
||||
|
||||
for (i = 0; i < 16; ++i)
|
||||
*(tl_pte+i) = upper_attr | (pa & FLSL_BLOCK_MASK) |
|
||||
lower_attr | FLSL_TYPE_BLOCK;
|
||||
clean_pte(tl_pte, tl_pte + 16, redirect);
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline s32 sl_1G_map(u64 *sl_pte, phys_addr_t pa,
|
||||
u64 upper_attr, u64 lower_attr, s32 redirect)
|
||||
{
|
||||
s32 ret = 0;
|
||||
|
||||
if (*sl_pte) {
|
||||
ret = -EBUSY;
|
||||
pr_err("%s: Busy sl_pte %p -> %lx\n",
|
||||
__func__, sl_pte, (unsigned long) *sl_pte);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
*sl_pte = upper_attr | (pa & FLSL_1G_BLOCK_MASK) |
|
||||
lower_attr | FLSL_TYPE_BLOCK;
|
||||
|
||||
clean_pte(sl_pte, sl_pte + 1, redirect);
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline s32 handle_1st_lvl(struct msm_iommu_pt *pt, u64 *fl_pte,
|
||||
phys_addr_t pa, size_t len, u64 upper_attr,
|
||||
u64 lower_attr)
|
||||
{
|
||||
s32 ret = 0;
|
||||
|
||||
/* Need a 2nd level page table */
|
||||
if (*fl_pte == 0)
|
||||
if (!make_next_level_table(pt->redirect, fl_pte))
|
||||
ret = -ENOMEM;
|
||||
|
||||
if (!ret)
|
||||
if ((*fl_pte & FLSL_TYPE_TABLE) != FLSL_TYPE_TABLE)
|
||||
ret = -EBUSY;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline s32 handle_2nd_lvl(struct msm_iommu_pt *pt, u64 *sl_pte,
|
||||
phys_addr_t pa, size_t chunk_size,
|
||||
u64 upper_attr, u64 lower_attr)
|
||||
{
|
||||
s32 ret = 0;
|
||||
|
||||
if (chunk_size == SZ_1G) {
|
||||
ret = sl_1G_map(sl_pte, pa, upper_attr, lower_attr,
|
||||
pt->redirect);
|
||||
|
||||
if (!ret)
|
||||
return SUB_LEVEL_MAPPING_NOT_REQUIRED;
|
||||
}
|
||||
|
||||
/* Need a 3rd level page table */
|
||||
if (*sl_pte == 0)
|
||||
if (!make_next_level_table(pt->redirect, sl_pte))
|
||||
ret = -ENOMEM;
|
||||
|
||||
if (!ret)
|
||||
if ((*sl_pte & FLSL_TYPE_TABLE) != FLSL_TYPE_TABLE)
|
||||
ret = -EBUSY;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline s32 handle_3rd_lvl(struct msm_iommu_pt *pt, u64 *tl_pte,
|
||||
phys_addr_t pa, size_t chunk_size,
|
||||
u64 upper_attr, u64 lower_attr)
|
||||
{
|
||||
s32 ret = 0;
|
||||
|
||||
if (chunk_size == SZ_32M) {
|
||||
ret = tl_32m_map(tl_pte, pa, upper_attr, lower_attr,
|
||||
pt->redirect);
|
||||
if (!ret)
|
||||
return SUB_LEVEL_MAPPING_NOT_REQUIRED;
|
||||
} else if (chunk_size == SZ_2M) {
|
||||
ret = tl_2m_map(tl_pte, pa, upper_attr, lower_attr,
|
||||
pt->redirect);
|
||||
if (!ret)
|
||||
return SUB_LEVEL_MAPPING_NOT_REQUIRED;
|
||||
}
|
||||
|
||||
/* Need a 4th level page table */
|
||||
if (*tl_pte == 0)
|
||||
if (!make_next_level_table(pt->redirect, tl_pte))
|
||||
ret = -ENOMEM;
|
||||
|
||||
if (!ret)
|
||||
if ((*tl_pte & FLSL_TYPE_TABLE) != FLSL_TYPE_TABLE)
|
||||
ret = -EBUSY;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline s32 handle_4th_lvl(struct msm_iommu_pt *pt, u64 *ll_pte,
|
||||
phys_addr_t pa, size_t chunk_size,
|
||||
u64 upper_attr, u64 lower_attr)
|
||||
{
|
||||
s32 ret = 0;
|
||||
|
||||
if (chunk_size == SZ_64K)
|
||||
ret = ll_64k_map(ll_pte, pa, upper_attr, lower_attr,
|
||||
pt->redirect);
|
||||
else if (chunk_size == SZ_4K)
|
||||
ret = ll_4k_map(ll_pte, pa, upper_attr, lower_attr,
|
||||
pt->redirect);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static phys_addr_t __get_phys_sg(void *cookie)
|
||||
{
|
||||
struct scatterlist *sg = cookie;
|
||||
struct page *page = sg_page(sg);
|
||||
|
||||
BUG_ON(page == NULL);
|
||||
|
||||
return sg_phys(sg);
|
||||
}
|
||||
|
||||
static inline size_t __get_length_sg(void *cookie, unsigned int total)
|
||||
{
|
||||
struct scatterlist *sg = cookie;
|
||||
|
||||
return sg->length;
|
||||
}
|
||||
|
||||
static inline int __get_next_sg(void *old, void **new)
|
||||
{
|
||||
struct scatterlist *sg = old;
|
||||
*new = sg_next(sg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline phys_addr_t __get_phys_bare(void *cookie)
|
||||
{
|
||||
return (phys_addr_t)cookie;
|
||||
}
|
||||
|
||||
static inline size_t __get_length_bare(void *cookie, unsigned int total)
|
||||
{
|
||||
return total;
|
||||
}
|
||||
|
||||
static inline int __get_next_bare(void *old, void **new)
|
||||
{
|
||||
/* Put something here in hopes of catching errors... */
|
||||
*new = (void *)-1;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
struct msm_iommu_map_ops {
|
||||
phys_addr_t (*get_phys)(void *cookie);
|
||||
size_t (*get_length)(void *cookie, unsigned int total);
|
||||
int (*get_next)(void *old, void **new);
|
||||
};
|
||||
|
||||
static struct msm_iommu_map_ops regular_ops = {
|
||||
.get_phys = __get_phys_bare,
|
||||
.get_length = __get_length_bare,
|
||||
.get_next = __get_next_bare,
|
||||
};
|
||||
|
||||
static struct msm_iommu_map_ops sg_ops = {
|
||||
.get_phys = __get_phys_sg,
|
||||
.get_length = __get_length_sg,
|
||||
.get_next = __get_next_sg,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IOMMU_FORCE_4K_MAPPINGS
|
||||
static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
|
||||
int align)
|
||||
{
|
||||
if (align == SZ_4K)
|
||||
return IS_ALIGNED(va | pa | len, align) && (len >= align);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
|
||||
int align)
|
||||
{
|
||||
return IS_ALIGNED(va | pa | len, align) && (len >= align);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt,
|
||||
unsigned long va, void *cookie,
|
||||
struct msm_iommu_map_ops *ops,
|
||||
size_t len, int prot)
|
||||
{
|
||||
phys_addr_t pa;
|
||||
u64 offset = 0;
|
||||
u64 *fl_pte;
|
||||
u64 *sl_pte;
|
||||
u64 *tl_pte;
|
||||
u64 *ll_pte;
|
||||
u32 fl_offset;
|
||||
u32 sl_offset;
|
||||
u32 tl_offset;
|
||||
u32 ll_offset;
|
||||
u64 *sl_table = NULL;
|
||||
u64 *tl_table = NULL;
|
||||
u64 *ll_table = NULL;
|
||||
u64 chunk_size, chunk_offset = 0;
|
||||
s32 ret = 0;
|
||||
u64 up_at;
|
||||
u64 lo_at;
|
||||
unsigned long va_to_map = va;
|
||||
|
||||
BUG_ON(len & (SZ_4K - 1));
|
||||
|
||||
if (!pt->fl_table) {
|
||||
pr_err("Null page table\n");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
__get_attr(prot, &up_at, &lo_at);
|
||||
|
||||
pa = ops->get_phys(cookie);
|
||||
|
||||
while (offset < len) {
|
||||
u64 chunk_left = ops->get_length(cookie, len) - chunk_offset;
|
||||
|
||||
chunk_size = SZ_4K;
|
||||
if (is_fully_aligned(va_to_map, pa, chunk_left, SZ_1G))
|
||||
chunk_size = SZ_1G;
|
||||
else if (is_fully_aligned(va_to_map, pa, chunk_left, SZ_32M))
|
||||
chunk_size = SZ_32M;
|
||||
else if (is_fully_aligned(va_to_map, pa, chunk_left, SZ_2M))
|
||||
chunk_size = SZ_2M;
|
||||
else if (is_fully_aligned(va_to_map, pa, chunk_left, SZ_64K))
|
||||
chunk_size = SZ_64K;
|
||||
|
||||
trace_iommu_map_range(va_to_map, pa,
|
||||
ops->get_length(cookie, len),
|
||||
chunk_size);
|
||||
|
||||
/* First level */
|
||||
fl_offset = FL_OFFSET(va_to_map);
|
||||
fl_pte = pt->fl_table + fl_offset;
|
||||
ret = handle_1st_lvl(pt, fl_pte, pa, chunk_size, up_at, lo_at);
|
||||
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* Second level */
|
||||
sl_table = FOLLOW_TO_NEXT_TABLE(fl_pte);
|
||||
sl_offset = SL_OFFSET(va_to_map);
|
||||
sl_pte = sl_table + sl_offset;
|
||||
ret = handle_2nd_lvl(pt, sl_pte, pa, chunk_size, up_at, lo_at);
|
||||
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
if (ret == SUB_LEVEL_MAPPING_NOT_REQUIRED)
|
||||
goto proceed_further;
|
||||
|
||||
/* Third level */
|
||||
tl_table = FOLLOW_TO_NEXT_TABLE(sl_pte);
|
||||
tl_offset = TL_OFFSET(va_to_map);
|
||||
tl_pte = tl_table + tl_offset;
|
||||
ret = handle_3rd_lvl(pt, tl_pte, pa, chunk_size, up_at, lo_at);
|
||||
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
if (ret == SUB_LEVEL_MAPPING_NOT_REQUIRED)
|
||||
goto proceed_further;
|
||||
|
||||
/* Fourth level */
|
||||
ll_table = FOLLOW_TO_NEXT_TABLE(tl_pte);
|
||||
ll_offset = LL_OFFSET(va_to_map);
|
||||
ll_pte = ll_table + ll_offset;
|
||||
ret = handle_4th_lvl(pt, ll_pte, pa, chunk_size, up_at, lo_at);
|
||||
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
proceed_further:
|
||||
offset += chunk_size;
|
||||
chunk_offset += chunk_size;
|
||||
va_to_map += chunk_size;
|
||||
pa += chunk_size;
|
||||
|
||||
if (chunk_offset >= ops->get_length(cookie, len)
|
||||
&& offset < len) {
|
||||
chunk_offset = 0;
|
||||
if (ops->get_next(cookie, &cookie))
|
||||
break;
|
||||
pa = ops->get_phys(cookie);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
fail:
|
||||
if (ret && offset > 0) {
|
||||
pr_err("Something_wrong in mapping\n");
|
||||
__msm_iommu_pagetable_unmap_range(pt, va, offset, 1);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 clear_4th_level(u64 va, u64 *ll_pte, u64 len, u32 redirect,
|
||||
u32 silent)
|
||||
{
|
||||
u64 start_offset = LL_OFFSET(va);
|
||||
u64 offset, end_offset;
|
||||
u64 *pte = ll_pte;
|
||||
u64 num_pte;
|
||||
u64 chunk_size;
|
||||
|
||||
if ((len / SZ_4K) + start_offset < NUM_LL_PTE)
|
||||
end_offset = start_offset + LL_OFFSET(len);
|
||||
else
|
||||
end_offset = NUM_LL_PTE;
|
||||
|
||||
/* Clear multiple PTEs in the same loop */
|
||||
for (offset = start_offset; offset < end_offset; offset++) {
|
||||
if (*pte == 0) {
|
||||
if (!silent)
|
||||
pr_err("Last level PTE is 0 at 0x%p\n", pte);
|
||||
return 0;
|
||||
}
|
||||
|
||||
*pte = 0;
|
||||
pte++;
|
||||
}
|
||||
|
||||
num_pte = end_offset - start_offset;
|
||||
clean_pte(ll_pte, ll_pte + num_pte, redirect);
|
||||
chunk_size = SZ_4K * num_pte;
|
||||
|
||||
return chunk_size;
|
||||
}
|
||||
|
||||
static u64 clear_3rd_level(u64 va, u64 *tl_pte, u64 len, u32 redirect,
|
||||
u32 silent)
|
||||
{
|
||||
u64 chunk_size = 0;
|
||||
u64 type = 0;
|
||||
u64 *ll_table = NULL;
|
||||
u64 *ll_pte;
|
||||
u32 ll_offset;
|
||||
|
||||
if (*tl_pte == 0) {
|
||||
if (!silent)
|
||||
pr_err("Third level PTE is 0 at 0x%p\n", tl_pte);
|
||||
return 0;
|
||||
}
|
||||
|
||||
type = *tl_pte & FLSL_PTE_TYPE_MASK;
|
||||
if (type == FLSL_TYPE_BLOCK) {
|
||||
if (len < SZ_2M)
|
||||
BUG();
|
||||
|
||||
*tl_pte = 0;
|
||||
clean_pte(tl_pte, tl_pte + 1, redirect);
|
||||
return SZ_2M;
|
||||
} else if (type == FLSL_TYPE_TABLE) {
|
||||
ll_table = FOLLOW_TO_NEXT_TABLE(tl_pte);
|
||||
ll_offset = LL_OFFSET(va);
|
||||
ll_pte = ll_table + ll_offset;
|
||||
chunk_size = clear_4th_level(va, ll_pte, len,
|
||||
redirect, silent);
|
||||
|
||||
if (is_table_empty(ll_table)) {
|
||||
u64 p = (*tl_pte) & FLSL_BASE_MASK;
|
||||
if (p) {
|
||||
free_pagetable_level(p, 4, 0);
|
||||
*tl_pte = 0;
|
||||
clean_pte(tl_pte, tl_pte + 1, redirect);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pr_err("Third level PTE is corrupted at 0x%p -> 0x%lx\n",
|
||||
tl_pte, (unsigned long)*tl_pte);
|
||||
}
|
||||
|
||||
return chunk_size;
|
||||
}
|
||||
|
||||
static u64 clear_2nd_level(u64 va, u64 *sl_pte, u64 len, u32 redirect,
|
||||
u32 silent)
|
||||
{
|
||||
u64 chunk_size = 0;
|
||||
u64 type = 0;
|
||||
u64 *tl_table = NULL;
|
||||
u64 *tl_pte;
|
||||
u32 tl_offset;
|
||||
|
||||
if (*sl_pte == 0) {
|
||||
if (!silent)
|
||||
pr_err("Second level PTE is 0 at 0x%p\n", sl_pte);
|
||||
return 0;
|
||||
}
|
||||
|
||||
type = *sl_pte & FLSL_PTE_TYPE_MASK;
|
||||
if (type == FLSL_TYPE_BLOCK) {
|
||||
if (len < SZ_1G)
|
||||
BUG();
|
||||
|
||||
*sl_pte = 0;
|
||||
clean_pte(sl_pte, sl_pte + 1, redirect);
|
||||
return SZ_1G;
|
||||
} else if (type == FLSL_TYPE_TABLE) {
|
||||
tl_table = FOLLOW_TO_NEXT_TABLE(sl_pte);
|
||||
tl_offset = TL_OFFSET(va);
|
||||
tl_pte = tl_table + tl_offset;
|
||||
chunk_size = clear_3rd_level(va, tl_pte, len, redirect,
|
||||
silent);
|
||||
|
||||
if (is_table_empty(tl_table)) {
|
||||
u64 p = (*sl_pte) & FLSL_BASE_MASK;
|
||||
if (p) {
|
||||
free_pagetable_level(p, 3, 0);
|
||||
*sl_pte = 0;
|
||||
clean_pte(sl_pte, sl_pte + 1, redirect);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pr_err("Second level PTE is corrupted at 0x%p -> 0x%lx\n",
|
||||
sl_pte, (unsigned long)*sl_pte);
|
||||
}
|
||||
|
||||
return chunk_size;
|
||||
}
|
||||
|
||||
static u64 clear_1st_level(u64 va, u64 *fl_pte, u64 len, u32 redirect,
|
||||
u32 silent)
|
||||
{
|
||||
u64 chunk_size = 0;
|
||||
u64 type = 0;
|
||||
u64 *sl_table = NULL;
|
||||
u64 *sl_pte;
|
||||
u32 sl_offset;
|
||||
|
||||
if (*fl_pte == 0) {
|
||||
if (!silent)
|
||||
pr_err("First level PTE is 0 at 0x%p\n", fl_pte);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
type = *fl_pte & FLSL_PTE_TYPE_MASK;
|
||||
if (type == FLSL_TYPE_BLOCK) {
|
||||
if (!silent)
|
||||
pr_err("First level PTE has BLOCK mapping at 0x%p\n",
|
||||
fl_pte);
|
||||
return 0;
|
||||
} else if (type == FLSL_TYPE_TABLE) {
|
||||
sl_table = FOLLOW_TO_NEXT_TABLE(fl_pte);
|
||||
sl_offset = SL_OFFSET(va);
|
||||
sl_pte = sl_table + sl_offset;
|
||||
chunk_size = clear_2nd_level(va, sl_pte, len, redirect,
|
||||
silent);
|
||||
|
||||
if (is_table_empty(sl_table)) {
|
||||
u64 p = (*fl_pte) & FLSL_BASE_MASK;
|
||||
if (p) {
|
||||
free_pagetable_level(p, 2, 0);
|
||||
*fl_pte = 0;
|
||||
clean_pte(fl_pte, fl_pte + 1, redirect);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pr_err("First level PTE is corrupted at 0x%p -> 0x%lx\n",
|
||||
fl_pte, (unsigned long)*fl_pte);
|
||||
}
|
||||
|
||||
return chunk_size;
|
||||
}
|
||||
|
||||
static u64 clear_in_chunks(struct msm_iommu_pt *pt, u64 va, u64 len, u32 silent)
|
||||
{
|
||||
u64 *fl_pte;
|
||||
u32 fl_offset;
|
||||
|
||||
fl_offset = FL_OFFSET(va);
|
||||
fl_pte = pt->fl_table + fl_offset;
|
||||
|
||||
return clear_1st_level(va, fl_pte, len, pt->redirect, silent);
|
||||
}
|
||||
|
||||
static void __msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt,
|
||||
unsigned long va, size_t len,
|
||||
u32 silent)
|
||||
{
|
||||
u64 offset = 0;
|
||||
u64 va_to_unmap, left_to_unmap;
|
||||
u64 chunk_size = 0;
|
||||
|
||||
BUG_ON(len & (SZ_4K - 1));
|
||||
|
||||
while (offset < len) {
|
||||
left_to_unmap = len - offset;
|
||||
va_to_unmap = va + offset;
|
||||
chunk_size = clear_in_chunks(pt, va_to_unmap, left_to_unmap,
|
||||
silent);
|
||||
|
||||
if (!chunk_size) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
offset += chunk_size;
|
||||
}
|
||||
}
|
||||
|
||||
int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned long va,
|
||||
struct scatterlist *sg, size_t len, int prot)
|
||||
{
|
||||
return __msm_iommu_pagetable_map_range(pt, va, sg, &sg_ops, len, prot);
|
||||
}
|
||||
|
||||
void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned long va,
|
||||
size_t len)
|
||||
{
|
||||
__msm_iommu_pagetable_unmap_range(pt, va, len, 0);
|
||||
}
|
||||
|
||||
int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
|
||||
phys_addr_t pa, size_t len, int prot)
|
||||
{
|
||||
s32 ret;
|
||||
|
||||
ret = __msm_iommu_pagetable_map_range(pt, va, (void *) pa, ®ular_ops,
|
||||
len, prot);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
|
||||
size_t len)
|
||||
{
|
||||
msm_iommu_pagetable_unmap_range(pt, va, len);
|
||||
return len;
|
||||
}
|
||||
|
||||
static phys_addr_t get_phys_from_va(unsigned long va, u64 *table, int level)
|
||||
{
|
||||
u64 type;
|
||||
u64 mask; /* For single mapping */
|
||||
u64 section_mask; /* For section mapping */
|
||||
u64 *pte;
|
||||
|
||||
if (level <= NUM_PT_LEVEL) {
|
||||
switch (level) {
|
||||
case 1:
|
||||
pte = table + FL_OFFSET(va);
|
||||
break;
|
||||
case 2:
|
||||
pte = table + SL_OFFSET(va);
|
||||
mask = 0xFFFFC0000000ULL;
|
||||
break;
|
||||
case 3:
|
||||
pte = table + TL_OFFSET(va);
|
||||
mask = 0xFFFFFFE00000ULL;
|
||||
section_mask = 0xFFFFFE000000ULL;
|
||||
break;
|
||||
case 4:
|
||||
pte = table + LL_OFFSET(va);
|
||||
mask = 0xFFFFFFFFF000ULL;
|
||||
section_mask = 0xFFFFFFFF0000ULL;
|
||||
break;
|
||||
|
||||
default:
|
||||
pte = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
type = *pte & FLSL_PTE_TYPE_MASK;
|
||||
|
||||
if (type == FLSL_TYPE_BLOCK || level == NUM_PT_LEVEL) {
|
||||
if ((*pte & LL_CH) == LL_CH) {
|
||||
return (*pte & section_mask) |
|
||||
(va & ~section_mask);
|
||||
} else {
|
||||
return (*pte & mask) |
|
||||
(va & ~mask);
|
||||
}
|
||||
} else if (type == FLSL_TYPE_TABLE) {
|
||||
return get_phys_from_va(va, FOLLOW_TO_NEXT_TABLE(pte),
|
||||
level + 1);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain,
|
||||
dma_addr_t va)
|
||||
{
|
||||
struct msm_iommu_priv *priv = domain->priv;
|
||||
struct msm_iommu_pt *pt = &priv->pt;
|
||||
|
||||
return get_phys_from_va(va, pt->fl_table, 1);
|
||||
}
|
||||
|
||||
void __init msm_iommu_pagetable_init(void)
|
||||
{
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
|
||||
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
|
@ -34,7 +34,7 @@
|
|||
* sl_table_shadow uses the same concept as fl_table_shadow but for LPAE 2nd
|
||||
* level page tables.
|
||||
*/
|
||||
#ifdef CONFIG_IOMMU_LPAE
|
||||
#if defined(CONFIG_IOMMU_LPAE) || defined(CONFIG_IOMMU_AARCH64)
|
||||
struct msm_iommu_pt {
|
||||
u64 *fl_table;
|
||||
u64 **sl_table_shadow;
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#define SCM_SVC_HDCP 0x11
|
||||
#define SCM_SVC_MDTP 0x12
|
||||
#define SCM_SVC_LMH 0x13
|
||||
#define SCM_SVC_SMMU_PROGRAM 0x15
|
||||
#define SCM_SVC_TZSCHEDULER 0xFC
|
||||
|
||||
#define SCM_FUSE_READ 0x7
|
||||
|
|
Loading…
Reference in New Issue