msm: kgsl: Check if GPU is hung when reserving space in rb

A busy loop is executed when we allocate space from the ringbuffer.
If the GPU is hung then we can wait indefinitely in the busy loop.
Add a wait time to this busy loop, and if the wait time elapses then
report a GPU hang instead of waiting indefinitely.

Change-Id: I035c39063cbfa25380702720f929df5319e73e61
Signed-off-by: Shubhraprakash Das <sadas@codeaurora.org>
This commit is contained in:
Shubhraprakash Das 2012-06-08 16:33:03 -06:00 committed by Stephen Boyd
parent 9c9bb87b87
commit 06f2ea67a5
13 changed files with 159 additions and 75 deletions

View file

@ -246,6 +246,7 @@ error:
}
static void adreno_iommu_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
unsigned int pt_val, reg_pt_val;
@ -256,11 +257,17 @@ static void adreno_iommu_setstate(struct kgsl_device *device,
struct kgsl_memdesc **reg_map_desc;
void *reg_map_array = NULL;
int num_iommu_units, i;
struct kgsl_context *context;
struct adreno_context *adreno_ctx = NULL;
if (!adreno_dev->drawctxt_active)
return kgsl_mmu_device_setstate(&device->mmu, flags);
num_iommu_units = kgsl_mmu_get_reg_map_desc(&device->mmu,
&reg_map_array);
context = idr_find(&device->context_idr, context_id);
adreno_ctx = context->devctxt;
reg_map_desc = reg_map_array;
if (kgsl_mmu_enable_clk(&device->mmu,
@ -375,7 +382,6 @@ static void adreno_iommu_setstate(struct kgsl_device *device,
sizedwords += (cmds - &link[0]);
if (sizedwords) {
unsigned int ts;
/*
* add an interrupt at the end of commands so that the smmu
* disable clock off function will get called
@ -383,9 +389,13 @@ static void adreno_iommu_setstate(struct kgsl_device *device,
*cmds++ = cp_type3_packet(CP_INTERRUPT, 1);
*cmds++ = CP_INT_CNTL__RB_INT_MASK;
sizedwords += 2;
ts = adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
/* This returns the per context timestamp but we need to
* use the global timestamp for iommu clock disablement */
adreno_ringbuffer_issuecmds(device, adreno_ctx,
KGSL_CMD_FLAGS_PMODE,
&link[0], sizedwords);
kgsl_mmu_disable_clk_on_ts(&device->mmu, ts, true);
kgsl_mmu_disable_clk_on_ts(&device->mmu,
adreno_dev->ringbuffer.timestamp[KGSL_MEMSTORE_GLOBAL], true);
}
done:
if (num_iommu_units)
@ -393,6 +403,7 @@ done:
}
static void adreno_gpummu_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@ -400,6 +411,8 @@ static void adreno_gpummu_setstate(struct kgsl_device *device,
unsigned int *cmds = &link[0];
int sizedwords = 0;
unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
struct kgsl_context *context;
struct adreno_context *adreno_ctx = NULL;
/*
* Fix target freeze issue by adding TLB flush for each submit
@ -414,6 +427,9 @@ static void adreno_gpummu_setstate(struct kgsl_device *device,
* easier to filter out the mmu accesses from the dump
*/
if (!kgsl_cff_dump_enable && adreno_dev->drawctxt_active) {
context = idr_find(&device->context_idr, context_id);
adreno_ctx = context->devctxt;
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
/* wait for graphics pipe to be idle */
*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
@ -486,7 +502,8 @@ static void adreno_gpummu_setstate(struct kgsl_device *device,
sizedwords += 2;
}
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
adreno_ringbuffer_issuecmds(device, adreno_ctx,
KGSL_CMD_FLAGS_PMODE,
&link[0], sizedwords);
} else {
kgsl_mmu_device_setstate(&device->mmu, flags);
@ -494,13 +511,14 @@ static void adreno_gpummu_setstate(struct kgsl_device *device,
}
static void adreno_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
/* call the mmu specific handler */
if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
return adreno_gpummu_setstate(device, flags);
return adreno_gpummu_setstate(device, context_id, flags);
else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
return adreno_iommu_setstate(device, flags);
return adreno_iommu_setstate(device, context_id, flags);
}
static unsigned int
@ -896,8 +914,7 @@ done:
return ret;
}
static int
adreno_dump_and_recover(struct kgsl_device *device)
int adreno_dump_and_recover(struct kgsl_device *device)
{
int result = -ETIMEDOUT;
@ -937,6 +954,7 @@ adreno_dump_and_recover(struct kgsl_device *device)
done:
return result;
}
EXPORT_SYMBOL(adreno_dump_and_recover);
static int adreno_getproperty(struct kgsl_device *device,
enum kgsl_property_type type,
@ -1325,6 +1343,7 @@ static int kgsl_check_interrupt_timestamp(struct kgsl_device *device,
int status;
unsigned int ref_ts, enableflag;
unsigned int context_id;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
mutex_lock(&device->mutex);
context_id = _get_context_id(context);
@ -1370,8 +1389,15 @@ static int kgsl_check_interrupt_timestamp(struct kgsl_device *device,
* get an interrupt */
cmds[0] = cp_type3_packet(CP_NOP, 1);
cmds[1] = 0;
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
&cmds[0], 2);
if (adreno_dev->drawctxt_active)
adreno_ringbuffer_issuecmds(device,
adreno_dev->drawctxt_active,
KGSL_CMD_FLAGS_NONE, &cmds[0], 2);
else
/* We would never call this function if there
* was no active contexts running */
BUG();
}
}
unlock:

View file

@ -99,7 +99,8 @@ struct adreno_gpudev {
int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
void (*ctxt_draw_workaround)(struct adreno_device *);
void (*ctxt_draw_workaround)(struct adreno_device *,
struct adreno_context *);
irqreturn_t (*irq_handler)(struct adreno_device *);
void (*irq_control)(struct adreno_device *, int);
void * (*snapshot)(struct adreno_device *, void *, int *, int);
@ -143,6 +144,8 @@ struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
int hang);
int adreno_dump_and_recover(struct kgsl_device *device);
static inline int adreno_is_a200(struct adreno_device *adreno_dev)
{
return (adreno_dev->gpurev == ADRENO_REV_A200);

View file

@ -1450,7 +1450,8 @@ done:
return ret;
}
static void a2xx_drawctxt_workaround(struct adreno_device *adreno_dev)
static void a2xx_drawctxt_draw_workaround(struct adreno_device *adreno_dev,
struct adreno_context *context)
{
struct kgsl_device *device = &adreno_dev->dev;
unsigned int cmd[11];
@ -1497,7 +1498,7 @@ static void a2xx_drawctxt_workaround(struct adreno_device *adreno_dev)
| adreno_dev->pix_shader_start;
}
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_PMODE,
&cmd[0], cmds - cmd);
}
@ -1516,12 +1517,13 @@ static void a2xx_drawctxt_save(struct adreno_device *adreno_dev,
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
/* save registers and constants. */
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->reg_save, 3);
if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
/* save shader partitioning and instructions. */
adreno_ringbuffer_issuecmds(device,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->shader_save, 3);
@ -1529,7 +1531,8 @@ static void a2xx_drawctxt_save(struct adreno_device *adreno_dev,
* fixup shader partitioning parameter for
* SET_SHADER_BASES.
*/
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_fixup, 3);
context->flags |= CTXT_FLAGS_SHADER_RESTORE;
@ -1541,19 +1544,21 @@ static void a2xx_drawctxt_save(struct adreno_device *adreno_dev,
/* save gmem.
* (note: changes shader. shader must already be saved.)
*/
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.gmem_save, 3);
/* Restore TP0_CHICKEN */
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->chicken_restore, 3);
}
adreno_dev->gpudev->ctx_switches_since_last_draw = 0;
context->flags |= CTXT_FLAGS_GMEM_RESTORE;
} else if (adreno_is_a2xx(adreno_dev))
a2xx_drawctxt_workaround(adreno_dev);
a2xx_drawctxt_draw_workaround(adreno_dev, context);
}
static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
@ -1564,7 +1569,8 @@ static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
if (context == NULL) {
/* No context - set the default apgetable and thats it */
kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable);
kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
adreno_dev->drawctxt_active->id);
return;
}
@ -1576,8 +1582,9 @@ static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->id;
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5);
kgsl_mmu_setstate(&device->mmu, context->pagetable);
adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
cmds, 5);
kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
#ifndef CONFIG_MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
kgsl_cffdump_syncmem(NULL, &context->gpustate,
@ -1589,12 +1596,14 @@ static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
* (note: changes shader. shader must not already be restored.)
*/
if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.gmem_restore, 3);
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
/* Restore TP0_CHICKEN */
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->chicken_restore, 3);
}
@ -1604,12 +1613,12 @@ static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
/* restore registers and constants. */
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
context->reg_restore, 3);
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
/* restore shader instructions & partitioning. */
if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
adreno_ringbuffer_issuecmds(device,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_restore, 3);
}
@ -1618,8 +1627,8 @@ static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
if (adreno_is_a20x(adreno_dev)) {
cmds[0] = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
cmds[1] = context->bin_base_offset;
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
cmds, 2);
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, cmds, 2);
}
}
@ -2011,7 +2020,7 @@ struct adreno_gpudev adreno_a2xx_gpudev = {
.ctxt_create = a2xx_drawctxt_create,
.ctxt_save = a2xx_drawctxt_save,
.ctxt_restore = a2xx_drawctxt_restore,
.ctxt_draw_workaround = a2xx_drawctxt_workaround,
.ctxt_draw_workaround = a2xx_drawctxt_draw_workaround,
.irq_handler = a2xx_irq_handler,
.irq_control = a2xx_irq_control,
.snapshot = a2xx_snapshot,

View file

@ -2226,16 +2226,17 @@ static void a3xx_drawctxt_save(struct adreno_device *adreno_dev,
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
/* Fixup self modifying IBs for save operations */
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
context->save_fixup, 3);
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, context->save_fixup, 3);
/* save registers and constants. */
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->regconstant_save, 3);
if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
/* Save shader instructions */
adreno_ringbuffer_issuecmds(device,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE, context->shader_save, 3);
context->flags |= CTXT_FLAGS_SHADER_RESTORE;
@ -2249,7 +2250,8 @@ static void a3xx_drawctxt_save(struct adreno_device *adreno_dev,
* already be saved.)
*/
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.
gmem_save, 3);
context->flags |= CTXT_FLAGS_GMEM_RESTORE;
@ -2264,7 +2266,8 @@ static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
if (context == NULL) {
/* No context - set the default pagetable and thats it */
kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable);
kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
adreno_dev->drawctxt_active->id);
return;
}
@ -2276,8 +2279,9 @@ static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
cmds[3] = device->memstore.gpuaddr +
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
cmds[4] = context->id;
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5);
kgsl_mmu_setstate(&device->mmu, context->pagetable);
adreno_ringbuffer_issuecmds(device, context, KGSL_CMD_FLAGS_NONE,
cmds, 5);
kgsl_mmu_setstate(&device->mmu, context->pagetable, context->id);
/*
* Restore GMEM. (note: changes shader.
@ -2285,29 +2289,34 @@ static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
*/
if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_PMODE,
context->context_gmem_shadow.
gmem_restore, 3);
context->flags &= ~CTXT_FLAGS_GMEM_RESTORE;
}
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
context->reg_restore, 3);
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE, context->reg_restore, 3);
/* Fixup self modifying IBs for restore operations */
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->restore_fixup, 3);
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->constant_restore, 3);
if (context->flags & CTXT_FLAGS_SHADER_RESTORE)
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->shader_restore, 3);
/* Restore HLSQ_CONTROL_0 register */
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE,
adreno_ringbuffer_issuecmds(device, context,
KGSL_CMD_FLAGS_NONE,
context->hlsqcontrol_restore, 3);
}
}

View file

@ -274,7 +274,7 @@ void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
if (adreno_dev->gpudev->ctxt_draw_workaround &&
adreno_is_a225(adreno_dev))
adreno_dev->gpudev->ctxt_draw_workaround(
adreno_dev);
adreno_dev, drawctxt);
return;
}

View file

@ -53,6 +53,9 @@ adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
unsigned int freecmds;
unsigned int *cmds;
uint cmds_gpu;
struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
unsigned long wait_timeout = msecs_to_jiffies(adreno_dev->wait_timeout);
unsigned long wait_time;
/* if wptr ahead, fill the remaining with NOPs */
if (wptr_ahead) {
@ -79,13 +82,27 @@ adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
rb->wptr = 0;
}
wait_time = jiffies + wait_timeout;
/* wait for space in ringbuffer */
do {
while (1) {
GSL_RB_GET_READPTR(rb, &rb->rptr);
freecmds = rb->rptr - rb->wptr;
} while ((freecmds != 0) && (freecmds <= numcmds));
if (freecmds == 0 || freecmds > numcmds)
break;
if (time_after(jiffies, wait_time)) {
KGSL_DRV_ERR(rb->device,
"Timed out while waiting for freespace in ringbuffer "
"rptr: 0x%x, wptr: 0x%x\n", rb->rptr, rb->wptr);
if (!adreno_dump_and_recover(rb->device))
wait_time = jiffies + wait_timeout;
else
/* GPU is hung and we cannot recover */
BUG();
}
}
}
unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
@ -439,15 +456,13 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
unsigned int gpuaddr = rb->device->memstore.gpuaddr;
if (context != NULL) {
/*
* if the context was not created with per context timestamp
* support, we must use the global timestamp since issueibcmds
* will be returning that one.
*/
if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
context_id = context->id;
}
/*
* if the context was not created with per context timestamp
* support, we must use the global timestamp since issueibcmds
* will be returning that one.
*/
if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
context_id = context->id;
/* reserve space to temporarily turn off protected mode
* error checking if needed
@ -460,7 +475,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
total_sizedwords += 7;
total_sizedwords += 2; /* scratchpad ts for recovery */
if (context) {
if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) {
total_sizedwords += 3; /* sop timestamp */
total_sizedwords += 4; /* eop timestamp */
total_sizedwords += 3; /* global timestamp without cache
@ -470,6 +485,15 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
}
ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
/* GPU may hang during space allocation, if thats the case the current
* context may have hung the GPU */
if (context->flags & CTXT_FLAGS_GPU_HANG) {
KGSL_CTXT_WARN(rb->device,
"Context %p caused a gpu hang. Will not accept commands for context %d\n",
context, context->id);
return rb->timestamp[context_id];
}
rcmd_gpu = rb->buffer_desc.gpuaddr
+ sizeof(uint)*(rb->wptr-total_sizedwords);
@ -525,7 +549,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
}
if (context) {
if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) {
/* start-of-pipeline timestamp */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
cp_type3_packet(CP_MEM_WRITE, 2));
@ -593,6 +617,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
unsigned int
adreno_ringbuffer_issuecmds(struct kgsl_device *device,
struct adreno_context *drawctxt,
unsigned int flags,
unsigned int *cmds,
int sizedwords)
@ -603,7 +628,7 @@ adreno_ringbuffer_issuecmds(struct kgsl_device *device,
if (device->state & KGSL_STATE_HUNG)
return kgsl_readtimestamp(device, KGSL_MEMSTORE_GLOBAL,
KGSL_TIMESTAMP_RETIRED);
return adreno_ringbuffer_addcmds(rb, NULL, flags, cmds, sizedwords);
return adreno_ringbuffer_addcmds(rb, drawctxt, flags, cmds, sizedwords);
}
static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
@ -870,7 +895,7 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
*cmds++ = cp_nop_packet(1);
*cmds++ = KGSL_END_OF_IB_IDENTIFIER;
kgsl_setstate(&device->mmu,
kgsl_setstate(&device->mmu, context->id,
kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
device->id));

View file

@ -104,6 +104,7 @@ void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb);
void adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
unsigned int adreno_ringbuffer_issuecmds(struct kgsl_device *device,
struct adreno_context *drawctxt,
unsigned int flags,
unsigned int *cmdaddr,
int sizedwords);

View file

@ -97,7 +97,8 @@ struct kgsl_functable {
/* Optional functions - these functions are not mandatory. The
driver will check that the function pointer is not NULL before
calling the hook */
void (*setstate) (struct kgsl_device *device, uint32_t flags);
void (*setstate) (struct kgsl_device *device, unsigned int context_id,
uint32_t flags);
int (*drawctxt_create) (struct kgsl_device *device,
struct kgsl_pagetable *pagetable, struct kgsl_context *context,
uint32_t flags);

View file

@ -485,7 +485,8 @@ static void kgsl_gpummu_default_setstate(struct kgsl_mmu *mmu,
}
static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable)
struct kgsl_pagetable *pagetable,
unsigned int context_id)
{
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
@ -499,7 +500,7 @@ static void kgsl_gpummu_setstate(struct kgsl_mmu *mmu,
kgsl_mmu_pt_get_flags(pagetable, mmu->device->id);
/* call device specific set page table */
kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH |
kgsl_setstate(mmu, context_id, KGSL_MMUFLAGS_TLBFLUSH |
KGSL_MMUFLAGS_PTUPDATE);
}
}
@ -583,7 +584,7 @@ static int kgsl_gpummu_start(struct kgsl_mmu *mmu)
kgsl_regwrite(mmu->device, MH_MMU_VA_RANGE,
(KGSL_PAGETABLE_BASE |
(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
kgsl_setstate(mmu, KGSL_MMUFLAGS_TLBFLUSH);
kgsl_setstate(mmu, KGSL_MEMSTORE_GLOBAL, KGSL_MMUFLAGS_TLBFLUSH);
mmu->flags |= KGSL_FLAGS_STARTED;
return 0;

View file

@ -617,7 +617,8 @@ static int kgsl_iommu_get_pt_lsb(struct kgsl_mmu *mmu,
}
static void kgsl_iommu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable)
struct kgsl_pagetable *pagetable,
unsigned int context_id)
{
if (mmu->flags & KGSL_FLAGS_STARTED) {
struct kgsl_iommu *iommu = mmu->priv;
@ -634,7 +635,8 @@ static void kgsl_iommu_setstate(struct kgsl_mmu *mmu,
flags |= KGSL_MMUFLAGS_TLBFLUSH;
flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
mmu->device->id);
kgsl_setstate(mmu, KGSL_MMUFLAGS_PTUPDATE | flags);
kgsl_setstate(mmu, context_id,
KGSL_MMUFLAGS_PTUPDATE | flags);
}
}
}

View file

@ -543,13 +543,14 @@ void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
}
EXPORT_SYMBOL(kgsl_mmu_putpagetable);
void kgsl_setstate(struct kgsl_mmu *mmu, uint32_t flags)
void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
uint32_t flags)
{
struct kgsl_device *device = mmu->device;
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return;
else if (device->ftbl->setstate)
device->ftbl->setstate(device, flags);
device->ftbl->setstate(device, context_id, flags);
else if (mmu->mmu_ops->mmu_device_setstate)
mmu->mmu_ops->mmu_device_setstate(mmu, flags);
}

View file

@ -125,7 +125,8 @@ struct kgsl_mmu_ops {
int (*mmu_start) (struct kgsl_mmu *mmu);
void (*mmu_stop) (struct kgsl_mmu *mmu);
void (*mmu_setstate) (struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable);
struct kgsl_pagetable *pagetable,
unsigned int context_id);
void (*mmu_device_setstate) (struct kgsl_mmu *mmu,
uint32_t flags);
void (*mmu_pagefault) (struct kgsl_mmu *mmu);
@ -193,7 +194,8 @@ int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
void kgsl_setstate(struct kgsl_mmu *mmu, uint32_t flags);
void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
uint32_t flags);
int kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base);
int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
enum kgsl_deviceid id);
@ -219,10 +221,11 @@ static inline unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_mmu *mmu)
}
static inline void kgsl_mmu_setstate(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pagetable)
struct kgsl_pagetable *pagetable,
unsigned int context_id)
{
if (mmu->mmu_ops && mmu->mmu_ops->mmu_setstate)
mmu->mmu_ops->mmu_setstate(mmu, pagetable);
mmu->mmu_ops->mmu_setstate(mmu, pagetable, context_id);
}
static inline void kgsl_mmu_device_setstate(struct kgsl_mmu *mmu,

View file

@ -444,11 +444,13 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
(ctrl & KGSL_CONTEXT_CTX_SWITCH)) {
KGSL_CMD_INFO(device, "context switch %d -> %d\n",
context->id, z180_dev->ringbuffer.prevctx);
kgsl_mmu_setstate(&device->mmu, pagetable);
kgsl_mmu_setstate(&device->mmu, pagetable,
KGSL_MEMSTORE_GLOBAL);
cnt = PACKETSIZE_STATESTREAM;
ofs = 0;
}
kgsl_setstate(&device->mmu,
KGSL_MEMSTORE_GLOBAL,
kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
device->id));
@ -861,7 +863,8 @@ z180_drawctxt_destroy(struct kgsl_device *device,
if (z180_dev->ringbuffer.prevctx == context->id) {
z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
device->mmu.hwpagetable = device->mmu.defaultpagetable;
kgsl_setstate(&device->mmu, KGSL_MMUFLAGS_PTUPDATE);
kgsl_setstate(&device->mmu, KGSL_MEMSTORE_GLOBAL,
KGSL_MMUFLAGS_PTUPDATE);
}
}