Merge "msm: kgsl: Add content protection changes for A430"

This commit is contained in:
Linux Build Service Account 2014-08-15 04:51:23 -07:00 committed by Gerrit - the friendly Code Review server
commit 75c142f2ad
12 changed files with 293 additions and 80 deletions

View file

@ -19,9 +19,8 @@
msm_gpu: qcom,kgsl-3d0@fdb00000 {
label = "kgsl-3d0";
compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
reg = <0xfdb00000 0x20000
0xfdb20000 0x10000>;
reg-names = "kgsl_3d0_reg_memory" , "kgsl_3d0_shader_memory";
reg = <0xfdb00000 0x40000>;
reg-names = "kgsl_3d0_reg_memory";
interrupts = <0 33 0>;
interrupt-names = "kgsl_3d0_irq";
qcom,id = <0>;

View file

@ -440,6 +440,8 @@ enum a4xx_rb_perfctr_rb_sel {
#define A4XX_RBBM_PPD_EPOCH_INTRA_TH_2 0x1bb
#define A4XX_RBBM_PPD_EPOCH_INTER_TH_HI_CLR_TH 0x1bc
#define A4XX_RBBM_PPD_EPOCH_INTER_TH_LO 0x1bd
/* SECVID registers */
#define A4XX_RBBM_SECVID_TRUST_CONFIG 0xf000
#define A4XX_RBBM_SECVID_TRUST_CONTROL 0xf400
#define A4XX_RBBM_SECVID_TSB_TRUSTED_BASE 0xf800
#define A4XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0xf801

View file

@ -179,7 +179,8 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.minor = 0,
.patchid = ANY_ID,
.features = ADRENO_USES_OCMEM | ADRENO_WARM_START |
ADRENO_USE_BOOTSTRAP | ADRENO_SPTP_PC | ADRENO_PPD,
ADRENO_USE_BOOTSTRAP | ADRENO_SPTP_PC | ADRENO_PPD |
ADRENO_CONTENT_PROTECTION,
.pm4fw_name = "a420_pm4.fw",
.pfpfw_name = "a420_pfp.fw",
.gpudev = &adreno_a4xx_gpudev,

View file

@ -584,9 +584,19 @@ done:
}
static int adreno_of_get_iommu(struct device_node *parent,
static inline struct adreno_device *adreno_get_dev(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(adreno_match_table, &pdev->dev);
return of_id ? (struct adreno_device *) of_id->data : NULL;
}
static int adreno_of_get_iommu(struct platform_device *pdev,
struct kgsl_device_platform_data *pdata)
{
struct device_node *parent = pdev->dev.of_node;
struct adreno_device *adreno_dev = adreno_get_dev(pdev);
int result = -EINVAL;
struct device_node *node, *child;
struct kgsl_device_iommu_data *data = NULL;
@ -598,6 +608,10 @@ static int adreno_of_get_iommu(struct device_node *parent,
if (node == NULL)
return -EINVAL;
if (adreno_dev)
adreno_dev->dev.mmu.secured =
of_property_read_bool(node, "qcom,iommu-secure-id");
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL) {
result = -ENOMEM;
@ -718,7 +732,7 @@ static int adreno_of_get_pdata(struct platform_device *pdev)
goto err;
}
ret = adreno_of_get_iommu(pdev->dev.of_node, pdata);
ret = adreno_of_get_iommu(pdev, pdata);
if (ret)
goto err;
@ -784,14 +798,6 @@ adreno_ocmem_free(struct adreno_device *adreno_dev)
}
#endif
static inline struct adreno_device *adreno_get_dev(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(adreno_match_table, &pdev->dev);
return of_id ? (struct adreno_device *) of_id->data : NULL;
}
int adreno_probe(struct platform_device *pdev)
{
struct kgsl_device *device;
@ -817,6 +823,17 @@ int adreno_probe(struct platform_device *pdev)
/* Identify the specific GPU */
adreno_identify_gpu(adreno_dev);
/*
* qcom,iommu-secure-id is used to identify MMUs that can handle secure
* content but that is only part of the story - the GPU also has to be
* able to handle secure content. Unfortunately in a classic catch-22
* we cannot identify the GPU until after the DT is parsed. tl;dr -
* check the GPU capabilities here and modify mmu->secured accordingly
*/
if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
device->mmu.secured = false;
status = kgsl_device_platform_probe(device);
if (status) {
device->pdev = NULL;

View file

@ -81,6 +81,8 @@
#define ADRENO_PPD BIT(6)
/* The microcode supports register to register copy and compare */
#define ADRENO_HAS_REG_TO_REG_CMDS BIT(7)
/* The GPU supports content protection */
#define ADRENO_CONTENT_PROTECTION BIT(8)
/* Flags to control command packet settings */
#define KGSL_CMD_FLAGS_NONE 0

View file

@ -717,6 +717,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
unsigned int gpuaddr = rb->device->memstore.gpuaddr;
bool profile_ready;
struct adreno_context *drawctxt = rb->drawctxt_active;
bool secured_ctxt = false;
if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base) &&
!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
@ -736,8 +737,11 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
* here. As a result, any other code that accesses this variable
* must also use device->mutex.
*/
if (drawctxt)
if (drawctxt) {
drawctxt->internal_timestamp = rb->timestamp;
if (drawctxt->base.flags & KGSL_CONTEXT_SECURE)
secured_ctxt = true;
}
/*
* If in stream ib profiling is enabled and there are counters
@ -760,6 +764,8 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
/* internal ib command identifier for the ringbuffer */
total_sizedwords += (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) ? 2 : 0;
total_sizedwords += (secured_ctxt) ? 26 : 0;
/* Add two dwords for the CP_INTERRUPT */
total_sizedwords +=
(drawctxt || (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) ? 2 : 0;
@ -835,6 +841,33 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
KGSL_MEMSTORE_RB_OFFSET(rb, soptimestamp);
*ringcmds++ = timestamp;
if (secured_ctxt) {
*ringcmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*ringcmds++ = 0x00000000;
/*
* The two commands will stall the PFP until the PFP-ME-AHB
* is drained and the GPU is idle. As soon as this happens,
* the PFP will start moving again.
*/
*ringcmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
*ringcmds++ = 0x00000000;
/*
* Below commands are processed by ME. GPU will be
* idle when they are processed. But the PFP will continue
* to fetch instructions at the same time.
*/
*ringcmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
*ringcmds++ = 0;
*ringcmds++ = cp_type3_packet(CP_WIDE_REG_WRITE, 2);
*ringcmds++ = A4XX_RBBM_SECVID_TRUST_CONTROL;
*ringcmds++ = 1;
*ringcmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
*ringcmds++ = 1;
/* Stall PFP until all above commands are complete */
*ringcmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
*ringcmds++ = 0x00000000;
}
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* disable protected mode error checking */
*ringcmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
@ -905,6 +938,22 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
*ringcmds++ = 0x00000000;
}
if (secured_ctxt) {
*ringcmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*ringcmds++ = 0x00000000;
*ringcmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
*ringcmds++ = 0x00000000;
*ringcmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
*ringcmds++ = 0;
*ringcmds++ = cp_type3_packet(CP_WIDE_REG_WRITE, 2);
*ringcmds++ = A4XX_RBBM_SECVID_TRUST_CONTROL;
*ringcmds++ = 0;
*ringcmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
*ringcmds++ = 1;
*ringcmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
*ringcmds++ = 0x00000000;
}
adreno_ringbuffer_submit(rb);
return 0;
@ -1214,7 +1263,6 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
struct kgsl_context *context;
struct adreno_context *drawctxt;
bool use_preamble = true;
bool secured_ctxt = false;
bool cmdbatch_profiling = false;
int flags = KGSL_CMD_FLAGS_NONE;
int ret;
@ -1284,23 +1332,17 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
/*
* Worst case size:
* 2 - start of IB identifier
* 6 - secure IB start
* 2 - cmdbatch profiling
* 1 - skip preamble
* 3 * numibs - 3 per IB
* 2 - cmdbatch profiling
* 6 - secure IB end
* 2 - end of IB identifier
*/
if (context->flags & KGSL_CONTEXT_SECURE)
secured_ctxt = true;
if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING &&
adreno_is_a4xx(adreno_dev) && profile_buffer)
cmdbatch_profiling = true;
cmds = link = kzalloc(sizeof(unsigned int) * (numibs * 3 + 5 +
(secured_ctxt ? 14 : 0) +
(cmdbatch_profiling ? 4 : 0)),
GFP_KERNEL);
if (!link) {
@ -1311,16 +1353,6 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
*cmds++ = cp_nop_packet(1);
*cmds++ = KGSL_START_OF_IB_IDENTIFIER;
if (secured_ctxt) {
*cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
*cmds++ = 0;
*cmds++ = cp_type3_packet(CP_WIDE_REG_WRITE, 2);
*cmds++ = A4XX_RBBM_SECVID_TRUST_CONTROL;
*cmds++ = 1;
*cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
*cmds++ = 1;
}
/*
* Add cmds to read the GPU ticks at the start of the cmdbatch and
* write it into the appropriate cmdbatch profiling buffer offset
@ -1364,16 +1396,6 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
gpu_ticks_retired);
}
if (secured_ctxt) {
*cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
*cmds++ = 0;
*cmds++ = cp_type3_packet(CP_WIDE_REG_WRITE, 2);
*cmds++ = A4XX_RBBM_SECVID_TRUST_CONTROL;
*cmds++ = 0;
*cmds++ = cp_type3_packet(CP_SET_PROTECTED_MODE, 1);
*cmds++ = 1;
}
*cmds++ = cp_nop_packet(1);
*cmds++ = KGSL_END_OF_IB_IDENTIFIER;

View file

@ -3042,6 +3042,9 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
break;
case KGSL_MEM_ENTRY_ION:
if (kgsl_mmu_is_secured(&dev_priv->device->mmu) &&
(param->flags & KGSL_MEMFLAGS_SECURE))
entry->memdesc.priv &= ~KGSL_MEMDESC_GUARD_PAGE;
result = kgsl_setup_ion(entry, private->pagetable, data,
dev_priv->device);
break;
@ -3401,6 +3404,11 @@ long kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
if (result)
return result;
if (param->flags & KGSL_MEMFLAGS_SECURE) {
entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
entry->memdesc.priv &= ~KGSL_MEMDESC_GUARD_PAGE;
}
result = kgsl_mem_entry_attach_process(entry, dev_priv);
if (result != 0)
goto err;
@ -3436,6 +3444,9 @@ long kgsl_ioctl_gpumem_alloc_id(struct kgsl_device_private *dev_priv,
if (result != 0)
goto err;
if (param->flags & KGSL_MEMFLAGS_SECURE)
entry->memdesc.priv |= KGSL_MEMDESC_SECURE;
result = kgsl_mem_entry_attach_process(entry, dev_priv);
if (result != 0)
goto err;

View file

@ -129,6 +129,8 @@ struct kgsl_memdesc_ops {
#define KGSL_MEMDESC_PRIVATE BIT(7)
/* Memory is accessible in privileged mode */
#define KGSL_MEMDESC_PRIVILEGED BIT(8)
/* The memdesc is TZ locked content protection */
#define KGSL_MEMDESC_TZ_LOCKED BIT(9)
/* shared memory allocation */
struct kgsl_memdesc {

View file

@ -649,6 +649,44 @@ static void *kgsl_iommu_create_pagetable(void)
return NULL;
}
/*
* kgsl_iommu_create_secure_pagetable - Create a secure IOMMU pagetable
*
* Allocate memory to hold a pagetable and allocate the secure IOMMU
* domain which is the actual IOMMU pagetable
* Return - void
*/
static void *kgsl_iommu_create_secure_pagetable(void)
{
int domain_num;
struct kgsl_iommu_pt *iommu_pt;
struct msm_iova_layout kgsl_secure_layout = {
/* we manage VA space ourselves, so partitions aren't needed */
.partitions = NULL,
.npartitions = 0,
.client_name = "kgsl_secure",
.domain_flags = 0,
.is_secure = 1,
};
iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
if (!iommu_pt)
return NULL;
domain_num = msm_register_domain(&kgsl_secure_layout);
if (domain_num >= 0) {
iommu_pt->domain = msm_get_iommu_domain(domain_num);
if (iommu_pt->domain)
return iommu_pt;
}
KGSL_CORE_ERR("Failed to create secure iommu domain\n");
kfree(iommu_pt);
return NULL;
}
/*
* kgsl_detach_pagetable_iommu_domain - Detach the IOMMU unit from a
* pagetable
@ -1273,7 +1311,6 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu)
int status = 0;
struct kgsl_iommu *iommu;
struct platform_device *pdev = mmu->device->pdev;
struct kgsl_device *device = mmu->device;
size_t secured_pool_sz = 0;
atomic_set(&mmu->fault, 0);
@ -1289,20 +1326,14 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu)
if (status)
goto done;
if (mmu->secured)
secured_pool_sz = KGSL_IOMMU_SECURE_MEM_SIZE;
if (KGSL_MMU_USE_PER_PROCESS_PT &&
of_property_match_string(pdev->dev.of_node, "clock-names",
"gtcu_iface_clk") >= 0)
iommu->gtcu_iface_clk = clk_get(&pdev->dev, "gtcu_iface_clk");
if (mmu->secured) {
kgsl_regwrite(device, A4XX_RBBM_SECVID_TSB_CONTROL, 0x0);
kgsl_regwrite(device, A4XX_RBBM_SECVID_TSB_TRUSTED_BASE,
KGSL_IOMMU_SECURE_MEM_BASE);
kgsl_regwrite(device, A4XX_RBBM_SECVID_TSB_TRUSTED_SIZE,
KGSL_IOMMU_SECURE_MEM_SIZE);
secured_pool_sz = KGSL_IOMMU_SECURE_MEM_SIZE;
}
mmu->pt_base = KGSL_MMU_MAPPED_MEM_BASE;
mmu->pt_size = (KGSL_MMU_MAPPED_MEM_SIZE - secured_pool_sz);
@ -1538,7 +1569,12 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
if (!iommu_unit->dev[j].attached)
/*
* 1) HLOS cannot program secure context bank.
* 2) If context bank is not attached skip.
*/
if ((!iommu_unit->dev[j].attached) ||
(KGSL_IOMMU_CONTEXT_SECURE == j))
continue;
/*
@ -1575,6 +1611,15 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
if (mmu->secured) {
kgsl_regwrite(mmu->device, A4XX_RBBM_SECVID_TRUST_CONFIG, 0x2);
kgsl_regwrite(mmu->device, A4XX_RBBM_SECVID_TSB_CONTROL, 0x1);
kgsl_regwrite(mmu->device, A4XX_RBBM_SECVID_TSB_TRUSTED_BASE,
KGSL_IOMMU_SECURE_MEM_BASE);
kgsl_regwrite(mmu->device, A4XX_RBBM_SECVID_TSB_TRUSTED_SIZE,
KGSL_IOMMU_SECURE_MEM_SIZE);
}
done:
return status;
}
@ -1729,8 +1774,15 @@ static void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
struct kgsl_iommu_unit *iommu_unit =
&iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
if (!iommu_unit->dev[j].attached)
/*
* 1) HLOS cannot program secure context bank.
* 2) If context bank is not attached skip.
*/
if ((!iommu_unit->dev[j].attached) ||
(KGSL_IOMMU_CONTEXT_SECURE == j))
continue;
if (iommu_unit->dev[j].fault) {
_iommu_lock(iommu);
KGSL_IOMMU_SET_CTX_REG(iommu,
@ -2022,8 +2074,15 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
if (!iommu_unit->dev[j].attached)
/*
* 1) HLOS cannot program secure context bank.
* 2) If context bank is not attached skip.
*/
if ((!iommu_unit->dev[j].attached) ||
(KGSL_IOMMU_CONTEXT_SECURE == j))
continue;
sctlr_val = KGSL_IOMMU_GET_CTX_REG(iommu,
iommu_unit,
iommu_unit->dev[j].ctx_id,
@ -2067,8 +2126,15 @@ static void kgsl_iommu_set_pagefault(struct kgsl_mmu *mmu)
/* Loop through all IOMMU devices to check for fault */
for (i = 0; i < iommu->unit_count; i++) {
for (j = 0; j < iommu->iommu_units[i].dev_count; j++) {
if (!iommu->iommu_units[i].dev[j].attached)
/*
* 1) HLOS cannot program secure context bank.
* 2) If context bank is not attached skip.
*/
if ((!iommu->iommu_units[i].dev[j].attached) ||
(KGSL_IOMMU_CONTEXT_SECURE == j))
continue;
fsr = KGSL_IOMMU_GET_CTX_REG(iommu,
(&(iommu->iommu_units[i])),
iommu->iommu_units[i].dev[j].ctx_id, FSR);
@ -2137,6 +2203,7 @@ struct kgsl_mmu_pt_ops iommu_pt_ops = {
.mmu_map = kgsl_iommu_map,
.mmu_unmap = kgsl_iommu_unmap,
.mmu_create_pagetable = kgsl_iommu_create_pagetable,
.mmu_create_secure_pagetable = kgsl_iommu_create_secure_pagetable,
.mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
.get_ptbase = kgsl_iommu_get_ptbase,
};

View file

@ -491,8 +491,6 @@ int kgsl_mmu_init(struct kgsl_device *device)
{
int status = 0;
struct kgsl_mmu *mmu = &device->mmu;
struct platform_device *pdev = device->pdev;
mmu->device = device;
/*
@ -512,9 +510,6 @@ int kgsl_mmu_init(struct kgsl_device *device)
kgsl_sharedmem_set(device, &mmu->setstate_memory, 0, 0,
mmu->setstate_memory.size);
mmu->secured = of_property_read_bool(pdev->dev.of_node,
"qcom,secure-context");
if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) {
mmu->mmu_ops = &kgsl_iommu_ops;
status = mmu->mmu_ops->mmu_init(mmu);
@ -602,12 +597,19 @@ kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu,
if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
pagetable->pt_ops = &iommu_pt_ops;
pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
if (!pagetable->priv)
goto err;
if (mmu->secured && (KGSL_MMU_SECURE_PT == name))
pagetable->priv =
pagetable->pt_ops->mmu_create_secure_pagetable();
else {
pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
if (pagetable->priv) {
status = kgsl_map_global_pt_entries(pagetable);
if (status)
goto err;
}
}
status = kgsl_map_global_pt_entries(pagetable);
if (status)
if (!pagetable->priv)
goto err;
spin_lock_irqsave(&kgsl_driver.ptlock, flags);
@ -721,7 +723,8 @@ kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
* back to user region if that fails. All memory allocated by the user
* goes into the user region first.
*/
if ((KGSL_MEMFLAGS_USERMEM_MASK & memdesc->flags) != 0) {
if (((KGSL_MEMFLAGS_USERMEM_MASK | KGSL_MEMFLAGS_SECURE)
& memdesc->flags) != 0) {
unsigned int page_align = ilog2(PAGE_SIZE);
if (kgsl_memdesc_get_align(memdesc) > 0)
@ -736,6 +739,9 @@ kgsl_mmu_get_gpuaddr(struct kgsl_pagetable *pagetable,
}
}
if (((KGSL_MEMFLAGS_SECURE) & memdesc->flags) && (!memdesc->gpuaddr))
return -ENOMEM;
bit = bitmap_find_next_zero_area(pagetable->mem_bitmap,
KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT, 1,
(unsigned int) (size >> PAGE_SHIFT), 0);
@ -939,13 +945,7 @@ int kgsl_mmu_gpuaddr_in_range(struct kgsl_pagetable *pt, unsigned int gpuaddr)
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return (gpuaddr != 0);
if (kgsl_mmu_is_secured(pt->mmu)) {
if (gpuaddr >= KGSL_IOMMU_SECURE_MEM_BASE && gpuaddr <
(KGSL_IOMMU_SECURE_MEM_BASE + KGSL_IOMMU_SECURE_MEM_SIZE))
return 1;
else
return 0;
} else if (gpuaddr > 0 && gpuaddr < KGSL_MMU_GLOBAL_MEM_BASE)
if (gpuaddr > 0 && gpuaddr < KGSL_MMU_GLOBAL_MEM_BASE)
return 1;
return 0;

View file

@ -124,6 +124,7 @@ struct kgsl_mmu_pt_ops {
int (*mmu_unmap) (struct kgsl_pagetable *pt,
struct kgsl_memdesc *memdesc);
void *(*mmu_create_pagetable) (void);
void *(*mmu_create_secure_pagetable) (void);
void (*mmu_destroy_pagetable) (struct kgsl_pagetable *);
phys_addr_t (*get_ptbase) (struct kgsl_pagetable *);
};

View file

@ -23,6 +23,7 @@
#include "kgsl_sharedmem.h"
#include "kgsl_cffdump.h"
#include "kgsl_device.h"
#include "kgsl_log.h"
static DEFINE_MUTEX(kernel_map_global_lock);
@ -81,6 +82,9 @@ struct mem_entry_stats {
mem_entry_max_show), \
}
static int kgsl_cma_unlock_secure(struct kgsl_device *device,
struct kgsl_memdesc *memdesc);
/**
* Given a kobj, find the process structure attached to it
*/
@ -481,7 +485,14 @@ static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
static void kgsl_cma_coherent_free(struct kgsl_memdesc *memdesc)
{
if (memdesc->hostptr) {
kgsl_driver.stats.coherent -= memdesc->size;
if (memdesc->priv | KGSL_MEMDESC_SECURE) {
kgsl_driver.stats.secure -= memdesc->size;
if (memdesc->priv | KGSL_MEMDESC_TZ_LOCKED)
kgsl_cma_unlock_secure(
memdesc->pagetable->mmu->device, memdesc);
} else
kgsl_driver.stats.coherent -= memdesc->size;
dma_free_coherent(memdesc->dev, memdesc->size,
memdesc->hostptr, memdesc->physaddr);
}
@ -912,12 +923,16 @@ int kgsl_cma_alloc_secure(struct kgsl_device *device,
int result = 0;
struct cp2_lock_req request;
unsigned int resp;
unsigned int *chunk_list = NULL;
struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
if (size == 0)
return -EINVAL;
memdesc->size = ALIGN(size, SZ_1M);
/* Align size to 1M boundaries */
size = ALIGN(size, SZ_1M);
memdesc->size = size;
memdesc->pagetable = pagetable;
memdesc->ops = &kgsl_cma_ops;
memdesc->dev = device->dev->parent;
@ -938,10 +953,21 @@ int kgsl_cma_alloc_secure(struct kgsl_device *device,
* Flush the virt addr range before sending the memory to the
* secure environment to ensure the data is actually present
* in RAM
*
* Chunk_list holds the physical address of secure memory.
* Pass in the virtual address of chunk_list to flush.
* Chunk_list size is 1 because secure memory is physically
* contiguous.
*/
dmac_flush_range(memdesc->hostptr, memdesc->hostptr + memdesc->size);
chunk_list = kzalloc(sizeof(unsigned int), GFP_KERNEL);
if (!chunk_list) {
result = -ENOMEM;
goto err;
}
chunk_list[0] = memdesc->physaddr;
dmac_flush_range((void *)chunk_list, (void *)chunk_list + 1);
request.chunks.chunk_list = memdesc->physaddr;
request.chunks.chunk_list = virt_to_phys(chunk_list);
request.chunks.chunk_list_size = 1;
request.chunks.chunk_size = memdesc->size;
request.mem_usage = 0;
@ -952,8 +978,11 @@ int kgsl_cma_alloc_secure(struct kgsl_device *device,
result = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
&request, sizeof(request), &resp, sizeof(resp));
if (result) {
KGSL_DRV_ERR(device, "Secure buffer allocation failed\n");
if (result == 0)
memdesc->priv |= KGSL_MEMDESC_TZ_LOCKED;
else {
KGSL_DRV_ERR(device, "Secure buffer size %zx failed pt %d\n",
memdesc->size, pagetable->name);
goto err;
}
@ -962,9 +991,69 @@ int kgsl_cma_alloc_secure(struct kgsl_device *device,
kgsl_driver.stats.secure_max);
err:
kfree(chunk_list);
if (result)
kgsl_sharedmem_free(memdesc);
return result;
}
EXPORT_SYMBOL(kgsl_cma_alloc_secure);
/**
* kgsl_cma_unlock_secure() - Unlock secure memory by calling TZ
* @device: kgsl device pointer
* @memdesc: memory descriptor
*/
static int kgsl_cma_unlock_secure(struct kgsl_device *device,
struct kgsl_memdesc *memdesc)
{
int result = 0;
struct cp2_lock_req request;
unsigned int resp;
unsigned int *chunk_list;
struct kgsl_pagetable *pagetable = device->mmu.securepagetable;
if (!memdesc->size) {
KGSL_DRV_ERR(device, "Secure buffer invalid size 0\n");
return -EINVAL;
}
if (!IS_ALIGNED(memdesc->size, SZ_1M)) {
KGSL_DRV_ERR(device,
"Secure buffer size %zx must be %x aligned",
memdesc->size, SZ_1M);
return -EINVAL;
}
/*
* Flush the phys addr range before sending the memory to the
* secure environment to ensure the data is actually present
* in RAM
*/
chunk_list = kzalloc(sizeof(unsigned int), GFP_KERNEL);
if (!chunk_list)
return -ENOMEM;
chunk_list[0] = memdesc->physaddr;
dmac_flush_range((void *)chunk_list, (void *)chunk_list + 1);
request.chunks.chunk_list = virt_to_phys(chunk_list);
request.chunks.chunk_list_size = 1;
request.chunks.chunk_size = memdesc->size;
request.mem_usage = 0;
request.lock = 0;
kmap_flush_unused();
kmap_atomic_flush_unused();
result = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
&request, sizeof(request), &resp, sizeof(resp));
kfree(chunk_list);
if (result)
KGSL_DRV_ERR(device,
"Secure buffer unlock size %zx failed pt %d\n",
memdesc->size, pagetable->name);
return result;
}