msm: kgsl: Restructure IOMMU clock management

Restructure IOMMU clock management code to enable/disable clocks
based on iommu unit instead of context. In hardware IOMMU clocks
are associated with each IOMMU unit. There is no granularity of
clock control for each iommu context. Turning on the clocks for
one context in a IOMMU unit automatically turns on the clocks for
all the contexts in IOMMU unit. Reduce the number of clock
enable/disable calls and structure the software design as per
hardware design by doing clock management per IOMMU unit.

Change-Id: Ib54dbb7a31d6b30b65c07ef130bee149e3e587d8
Signed-off-by: Tarun Karra <tkarra@codeaurora.org>
This commit is contained in:
Tarun Karra 2014-04-22 16:16:11 -07:00 committed by Carter Cooper
parent 0972d34e21
commit cc62b3e282
5 changed files with 118 additions and 137 deletions

View file

@ -901,10 +901,7 @@ static int adreno_iommu_setstate(struct kgsl_device *device,
}
adreno_ctx = ADRENO_CONTEXT(context);
result = kgsl_mmu_enable_clk(&device->mmu,
KGSL_IOMMU_CONTEXT_USER);
if (result)
goto done;
kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_MAX_UNITS);
pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
device->mmu.hwpagetable);
@ -939,21 +936,19 @@ static int adreno_iommu_setstate(struct kgsl_device *device,
* This returns the per context timestamp but we need to
* use the global timestamp for iommu clock disablement
*/
adreno_ringbuffer_issuecmds(device, adreno_ctx, KGSL_CMD_FLAGS_PMODE,
&link[0], sizedwords);
result = adreno_ringbuffer_issuecmds(device, adreno_ctx,
KGSL_CMD_FLAGS_PMODE, &link[0], sizedwords);
/*
* On error disable the IOMMU clock right away otherwise turn it off
* after the command has been retired
*/
if (result)
kgsl_mmu_disable_clk(&device->mmu,
KGSL_IOMMU_CONTEXT_USER);
kgsl_mmu_disable_clk(&device->mmu, KGSL_IOMMU_MAX_UNITS);
else
kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts,
KGSL_IOMMU_CONTEXT_USER);
KGSL_IOMMU_MAX_UNITS);
done:
kgsl_context_put(context);
return result;
}

View file

@ -453,36 +453,29 @@ done:
/*
* kgsl_iommu_disable_clk - Disable iommu clocks
* @mmu - Pointer to mmu structure
* @unit - Iommu unit
*
* Disables iommu clocks
* Disables iommu clocks for an iommu unit
* Return - void
*/
static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu, int ctx_id)
static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu, int unit)
{
struct kgsl_iommu *iommu = mmu->priv;
struct msm_iommu_drvdata *iommu_drvdata;
int i, j;
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
if (ctx_id != iommu_unit->dev[j].ctx_id)
continue;
atomic_dec(&iommu_unit->dev[j].clk_enable_count);
BUG_ON(
atomic_read(&iommu_unit->dev[j].clk_enable_count) < 0);
/*
* the clock calls have a refcount so call them on every
* enable/disable call
*/
iommu_drvdata = dev_get_drvdata(
iommu_unit->dev[j].dev->parent);
if (iommu_drvdata->aclk)
clk_disable_unprepare(iommu_drvdata->aclk);
if (iommu_drvdata->clk)
clk_disable_unprepare(iommu_drvdata->clk);
clk_disable_unprepare(iommu_drvdata->pclk);
}
/* Turn off the clks for IOMMU unit requested */
if ((unit != i) && (unit != KGSL_IOMMU_MAX_UNITS))
continue;
atomic_dec(&iommu_unit->clk_enable_count);
BUG_ON(atomic_read(&iommu_unit->clk_enable_count) < 0);
for (j = (KGSL_IOMMU_MAX_CLKS - 1); j >= 0; j--)
if (iommu_unit->clks[j])
clk_disable_unprepare(iommu_unit->clks[j]);
}
}
@ -504,12 +497,10 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
{
struct kgsl_iommu_disable_clk_param *param = data;
if ((0 <= timestamp_cmp(ts, param->ts)) ||
(KGSL_EVENT_CANCELLED == type))
kgsl_iommu_disable_clk(param->mmu, param->ctx_id);
else
/* something went wrong with the event handling mechanism */
BUG_ON(1);
kgsl_iommu_disable_clk(param->mmu, param->unit);
/* Free param we are done using it */
kfree(param);
}
/*
@ -519,8 +510,7 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
* @ts_valid - Indicates whether ts parameter is valid, if this parameter
* is false then it means that the calling function wants to disable the
* IOMMU clocks immediately without waiting for any timestamp
* @ctx_id: Context id of the IOMMU context for which clocks are to be
* turned off
* @unit: IOMMU unit for which clocks are to be turned off
*
* Creates an event to disable the IOMMU clocks on timestamp and if event
* already exists then updates the timestamp of disabling the IOMMU clocks
@ -530,7 +520,7 @@ static void kgsl_iommu_clk_disable_event(struct kgsl_device *device, void *data,
*/
static void
kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
unsigned int ts, int ctx_id)
unsigned int ts, int unit)
{
struct kgsl_iommu_disable_clk_param *param;
@ -540,7 +530,7 @@ kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
return;
}
param->mmu = mmu;
param->ctx_id = ctx_id;
param->unit = unit;
param->ts = ts;
if (kgsl_add_event(mmu->device, KGSL_MEMSTORE_GLOBAL,
@ -551,71 +541,66 @@ kgsl_iommu_disable_clk_on_ts(struct kgsl_mmu *mmu,
}
}
/*
* kgsl_iommu_enable_clk_prepare_enable - Enable iommu clock
* @clk - clock to enable
*
* Prepare enables clock. Retries 3 times on enable failure, on 4th failure
* returns an error.
* Return: 0 on success else 1 on error
*/
static int kgsl_iommu_clk_prepare_enable(struct clk *clk)
{
int num_retries = 4;
while (num_retries--) {
if (!clk_prepare_enable(clk))
return 0;
}
return 1;
}
/*
* kgsl_iommu_enable_clk - Enable iommu clocks
* @mmu - Pointer to mmu structure
* @ctx_id - The context bank whose clocks are to be turned on
* @unit - The iommu unit whose clocks are to be turned on
*
* Enables iommu clocks of a given context
* Enables iommu clocks of a given iommu unit
* Return: 0 on success else error code
*/
static int kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
int ctx_id)
static void kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
int unit)
{
int ret = 0;
int i, j;
struct kgsl_iommu *iommu = mmu->priv;
struct msm_iommu_drvdata *iommu_drvdata;
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
if (ctx_id != iommu_unit->dev[j].ctx_id)
continue;
iommu_drvdata =
dev_get_drvdata(iommu_unit->dev[j].dev->parent);
ret = clk_prepare_enable(iommu_drvdata->pclk);
if (ret)
goto done;
if (iommu_drvdata->clk) {
ret = clk_prepare_enable(iommu_drvdata->clk);
if (ret) {
clk_disable_unprepare(
iommu_drvdata->pclk);
goto done;
}
}
if (iommu_drvdata->aclk) {
ret = clk_prepare_enable(iommu_drvdata->aclk);
if (ret) {
if (iommu_drvdata->clk)
clk_disable_unprepare(
iommu_drvdata->clk);
clk_disable_unprepare(
iommu_drvdata->pclk);
goto done;
}
}
atomic_inc(&iommu_unit->dev[j].clk_enable_count);
/* Turn on the clks for IOMMU unit requested */
if ((unit != i) && (unit != KGSL_IOMMU_MAX_UNITS))
continue;
for (j = 0; j < KGSL_IOMMU_MAX_CLKS; j++) {
if (iommu_unit->clks[j])
if (kgsl_iommu_clk_prepare_enable(
iommu_unit->clks[j]))
goto done;
}
atomic_inc(&iommu_unit->clk_enable_count);
}
return;
done:
if (ret) {
struct kgsl_iommu_unit *iommu_unit;
if (iommu->unit_count == i)
i--;
iommu_unit = &iommu->iommu_units[i];
do {
for (j--; j >= 0; j--)
kgsl_iommu_disable_clk(mmu, ctx_id);
i--;
if (i >= 0) {
iommu_unit = &iommu->iommu_units[i];
j = iommu_unit->dev_count;
}
} while (i >= 0);
}
return ret;
/*
* Any Clock enable failure should be fatal,
* System usually crashes when enabling clock fails
* BUG_ON here to catch the system in bad state for
* further debug
*/
KGSL_CORE_ERR("IOMMU clk enable failed\n");
BUG();
}
/*
@ -770,6 +755,7 @@ static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
struct kgsl_iommu_pt *iommu_pt;
struct kgsl_iommu *iommu = mmu->priv;
struct msm_iommu_drvdata *drvdata = 0;
int i, j, ret = 0;
/*
@ -801,6 +787,14 @@ static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
"iommu pt %p attached to dev %p, ctx_id %d\n",
iommu_pt->domain, iommu_unit->dev[j].dev,
iommu_unit->dev[j].ctx_id);
/* Init IOMMU unit clks here */
if (!drvdata) {
drvdata = dev_get_drvdata(
iommu_unit->dev[j].dev->parent);
iommu_unit->clks[0] = drvdata->pclk;
iommu_unit->clks[1] = drvdata->clk;
iommu_unit->clks[2] = drvdata->aclk;
}
}
}
}
@ -841,9 +835,7 @@ static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
KGSL_CORE_ERR("Context name invalid\n");
return -EINVAL;
}
atomic_set(
&(iommu_unit->dev[iommu_unit->dev_count].clk_enable_count),
0);
atomic_set(&(iommu_unit->clk_enable_count), 0);
iommu_unit->dev[iommu_unit->dev_count].dev =
msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
@ -1638,17 +1630,9 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
mmu->hwpagetable = NULL;
goto done;
}
status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
if (status) {
KGSL_CORE_ERR("clk enable failed\n");
goto done;
}
status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
if (status) {
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
KGSL_CORE_ERR("clk enable failed\n");
goto done;
}
kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
/* Get the lsb value of pagetables set in the IOMMU ttbr0 register as
* that value should not change when we change pagetables, so while
* changing pagetables we can use this lsb value of the pagetable w/o
@ -1700,8 +1684,7 @@ static int kgsl_iommu_start(struct kgsl_mmu *mmu)
KGSL_IOMMU_SETSTATE_NOP_OFFSET,
cp_nop_packet(1), sizeof(unsigned int));
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
mmu->flags |= KGSL_FLAGS_STARTED;
done:
@ -1813,12 +1796,12 @@ void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
int i, j;
if (atomic_read(&mmu->fault)) {
kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
for (i = 0; i < iommu->unit_count; i++) {
struct kgsl_iommu_unit *iommu_unit =
&iommu->iommu_units[i];
for (j = 0; j < iommu_unit->dev_count; j++) {
if (iommu_unit->dev[j].fault) {
kgsl_iommu_enable_clk(mmu, j);
_iommu_lock();
KGSL_IOMMU_SET_CTX_REG(iommu,
iommu_unit,
@ -1828,12 +1811,12 @@ void kgsl_iommu_pagefault_resume(struct kgsl_mmu *mmu)
iommu_unit,
iommu_unit->dev[j].ctx_id,
FSR, 0);
kgsl_iommu_disable_clk(mmu, j);
_iommu_unlock();
iommu_unit->dev[j].fault = 0;
}
}
}
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
atomic_set(&mmu->fault, 0);
}
}
@ -1905,11 +1888,11 @@ kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu)
if (in_interrupt())
return 0;
/* Return the current pt base by reading IOMMU pt_base register */
kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
pt_base = KGSL_IOMMU_GET_CTX_REG(iommu, (&iommu->iommu_units[0]),
KGSL_IOMMU_CONTEXT_USER,
TTBR0);
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
return pt_base & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
}
@ -1936,11 +1919,7 @@ static int kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
mmu->hwpagetable);
phys_addr_t pt_val;
ret = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
if (ret) {
KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
return ret;
}
kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
/* For v0 SMMU GPU needs to be idle for tlb invalidate as well */
/* naming mismatch for iommu */
@ -2027,7 +2006,8 @@ unlock:
msm_iommu_unlock();
/* Disable smmu clock */
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
kgsl_iommu_disable_clk(mmu, KGSL_IOMMU_MAX_UNITS);
return ret;
}

View file

@ -91,10 +91,16 @@ struct kgsl_iommu_register_list {
* Max number of iommu units that the gpu core can have
* On APQ8064, KGSL can control a maximum of 2 IOMMU units.
*/
#define KGSL_IOMMU_MAX_UNITS 2
enum kgsl_iommu_units {
KGSL_IOMMU_UNIT_0 = 0,
KGSL_IOMMU_UNIT_1 = 1,
KGSL_IOMMU_MAX_UNITS = 2,
};
/* Max number of iommu contexts per IOMMU unit */
#define KGSL_IOMMU_MAX_DEVS_PER_UNIT 2
/* Max number of iommu clks per IOMMU unit */
#define KGSL_IOMMU_MAX_CLKS 3
/* Macros to read/write IOMMU registers */
#define KGSL_IOMMU_SET_CTX_REG_LL(iommu, iommu_unit, ctx, REG, val) \
@ -143,7 +149,6 @@ struct kgsl_iommu_register_list {
* are on, else the clocks are off
* fault: Flag when set indicates that this iommu device has caused a page
* fault
* @clk_enable_count: The ref count of clock enable calls
*/
struct kgsl_iommu_device {
struct device *dev;
@ -153,7 +158,6 @@ struct kgsl_iommu_device {
bool clk_enabled;
struct kgsl_device *kgsldev;
int fault;
atomic_t clk_enable_count;
};
/*
@ -169,6 +173,8 @@ struct kgsl_iommu_device {
* @iommu_halt_enable: Valid only on IOMMU-v1, when set indicates that the iommu
* unit supports halting of the IOMMU, which can be enabled while programming
* the IOMMU registers for synchronization
* @clk_enable_count: The ref count of clock enable calls
* @clks: iommu unit clks
*/
struct kgsl_iommu_unit {
struct kgsl_iommu_device dev[KGSL_IOMMU_MAX_DEVS_PER_UNIT];
@ -176,6 +182,8 @@ struct kgsl_iommu_unit {
struct kgsl_memdesc reg_map;
unsigned int ahb_base;
int iommu_halt_enable;
atomic_t clk_enable_count;
struct clk *clks[KGSL_IOMMU_MAX_CLKS];
};
/*
@ -222,13 +230,13 @@ struct kgsl_iommu_pt {
* struct kgsl_iommu_disable_clk_param - Parameter struct for disble clk event
* @mmu: The mmu pointer
* @rb_level: the rb level in which the timestamp of the event belongs to
* @ctx_id: The IOMMU context whose clock is to be turned off
* @unit: The IOMMU unit whose clock is to be turned off
* @ts: Timestamp on which clock is to be disabled
*/
struct kgsl_iommu_disable_clk_param {
struct kgsl_mmu *mmu;
int rb_level;
int ctx_id;
int unit;
unsigned int ts;
};

View file

@ -149,11 +149,11 @@ struct kgsl_mmu_ops {
(struct kgsl_mmu *mmu);
void (*mmu_disable_clk_on_ts)
(struct kgsl_mmu *mmu,
uint32_t ts, int ctx_id);
int (*mmu_enable_clk)
(struct kgsl_mmu *mmu, int ctx_id);
uint32_t ts, int unit);
void (*mmu_enable_clk)
(struct kgsl_mmu *mmu, int unit);
void (*mmu_disable_clk)
(struct kgsl_mmu *mmu, int ctx_id);
(struct kgsl_mmu *mmu, int unit);
phys_addr_t (*mmu_get_default_ttbr0)(struct kgsl_mmu *mmu,
unsigned int unit_id,
enum kgsl_iommu_context_id ctx_id);
@ -322,27 +322,25 @@ static inline phys_addr_t kgsl_mmu_get_default_ttbr0(struct kgsl_mmu *mmu,
return 0;
}
static inline int kgsl_mmu_enable_clk(struct kgsl_mmu *mmu,
int ctx_id)
static inline void kgsl_mmu_enable_clk(struct kgsl_mmu *mmu, int unit)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_enable_clk)
return mmu->mmu_ops->mmu_enable_clk(mmu, ctx_id);
mmu->mmu_ops->mmu_enable_clk(mmu, unit);
else
return 0;
return;
}
static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu, int ctx_id)
static inline void kgsl_mmu_disable_clk(struct kgsl_mmu *mmu, int unit)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk)
mmu->mmu_ops->mmu_disable_clk(mmu, ctx_id);
mmu->mmu_ops->mmu_disable_clk(mmu, unit);
}
static inline void kgsl_mmu_disable_clk_on_ts(struct kgsl_mmu *mmu,
unsigned int ts,
int ctx_id)
unsigned int ts, int unit)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_disable_clk_on_ts)
mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ctx_id);
mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, unit);
}
static inline unsigned int kgsl_mmu_get_int_mask(void)

View file

@ -393,7 +393,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
}
EXPORT_SYMBOL(hci_le_start_enc);
void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
void hci_le_ltk_reply(struct hci_conn *conn, __u8 ltk[16])
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_le_ltk_reply cp;
@ -403,7 +403,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
memset(&cp, 0, sizeof(cp));
cp.handle = cpu_to_le16(conn->handle);
memcpy(cp.ltk, ltk, sizeof(ltk));
memcpy(cp.ltk, ltk, sizeof(cp.ltk));
hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
}