mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
msm: kgsl: embed kgsl_context struct in adreno_context struct
Having a separate allocated struct for the device specific context makes ownership unclear, which could lead to reference counting problems or invalid pointers. Also, duplicate members were starting to appear in adreno_context because there wasn't a safe way to reach the kgsl_context from some parts of the adreno code. This can now be done via container_of(). This change alters the lifecycle of the context->id, which is now freed when the context reference count hits zero rather than in kgsl_context_detach(). It also changes the context creation and destruction sequence. The device specific code must allocate a structure containing a struct kgsl_context and passes a pointer it to kgsl_init_context() before doing any device specific initialization. There is also a separate drawctxt_detach() callback for doing device specific cleanup. This is separate from freeing memory, which is done by the drawctxt_destroy() callback. Change-Id: I7d238476a3bfec98fd8dbc28971cf3187a81dac2 Signed-off-by: Jeremy Gebben <jgebben@codeaurora.org> Signed-off-by: Carter Cooper <ccooper@codeaurora.org>
This commit is contained in:
parent
b8692c4700
commit
c10fe0b567
8 changed files with 33 additions and 43 deletions
|
@ -814,8 +814,6 @@ static int adreno_iommu_setstate(struct kgsl_device *device,
|
|||
|
||||
adreno_ctx = ADRENO_CONTEXT(context);
|
||||
|
||||
|
||||
|
||||
result = kgsl_mmu_enable_clk(&device->mmu,
|
||||
KGSL_IOMMU_CONTEXT_USER);
|
||||
if (result)
|
||||
|
@ -894,7 +892,6 @@ static int adreno_gpummu_setstate(struct kgsl_device *device,
|
|||
context = kgsl_context_get(device, context_id);
|
||||
if (context == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
adreno_ctx = ADRENO_CONTEXT(context);
|
||||
|
||||
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
|
||||
|
@ -2048,8 +2045,6 @@ static bool adreno_hw_isidle(struct kgsl_device *device)
|
|||
} else if (adreno_is_a3xx(adreno_dev)) {
|
||||
if (!(reg_rbbm_status & 0x80000000))
|
||||
return true;
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -2100,11 +2095,12 @@ int adreno_idle(struct kgsl_device *device)
|
|||
|
||||
if (adreno_is_a2xx(adreno_dev))
|
||||
kgsl_cffdump_regpoll(device,
|
||||
adreno_dev->gpudev->reg_rbbm_status << 2, 0x110, 0x110);
|
||||
adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
|
||||
0x110, 0x110);
|
||||
else
|
||||
kgsl_cffdump_regpoll(device,
|
||||
adreno_dev->gpudev->reg_rbbm_status << 2, 0,
|
||||
0x80000000);
|
||||
adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
|
||||
0x00000000, 0x80000000);
|
||||
|
||||
while (time_before(jiffies, wait)) {
|
||||
if (adreno_isidle(device))
|
||||
|
|
|
@ -1350,7 +1350,6 @@ static int a2xx_create_gmem_shadow(struct adreno_device *adreno_dev,
|
|||
struct adreno_context *drawctxt)
|
||||
{
|
||||
int result;
|
||||
struct kgsl_device *device = &adreno_dev->dev;
|
||||
|
||||
calc_gmemsize(&drawctxt->context_gmem_shadow, adreno_dev->gmem_size);
|
||||
tmp_ctx.gmem_base = adreno_dev->gmem_base;
|
||||
|
@ -1365,7 +1364,7 @@ static int a2xx_create_gmem_shadow(struct adreno_device *adreno_dev,
|
|||
drawctxt->flags |= CTXT_FLAGS_GMEM_SHADOW;
|
||||
|
||||
/* blank out gmem shadow. */
|
||||
kgsl_sharedmem_set(device,
|
||||
kgsl_sharedmem_set(drawctxt->base.device,
|
||||
&drawctxt->context_gmem_shadow.gmemshadow, 0, 0,
|
||||
drawctxt->context_gmem_shadow.size);
|
||||
|
||||
|
@ -1390,7 +1389,7 @@ static int a2xx_create_gmem_shadow(struct adreno_device *adreno_dev,
|
|||
kgsl_cache_range_op(&drawctxt->context_gmem_shadow.gmemshadow,
|
||||
KGSL_CACHE_OP_FLUSH);
|
||||
|
||||
kgsl_cffdump_syncmem(drawctxt->dev_priv,
|
||||
kgsl_cffdump_syncmem(drawctxt->base.device,
|
||||
&drawctxt->context_gmem_shadow.gmemshadow,
|
||||
drawctxt->context_gmem_shadow.gmemshadow.gpuaddr,
|
||||
drawctxt->context_gmem_shadow.gmemshadow.size, false);
|
||||
|
@ -1402,7 +1401,6 @@ static int a2xx_drawctxt_create(struct adreno_device *adreno_dev,
|
|||
struct adreno_context *drawctxt)
|
||||
{
|
||||
int ret;
|
||||
struct kgsl_device *device = &adreno_dev->dev;
|
||||
|
||||
/*
|
||||
* Allocate memory for the GPU state and the context commands.
|
||||
|
@ -1417,8 +1415,8 @@ static int a2xx_drawctxt_create(struct adreno_device *adreno_dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
kgsl_sharedmem_set(device, &drawctxt->gpustate, 0,
|
||||
0, _context_size(adreno_dev));
|
||||
kgsl_sharedmem_set(drawctxt->base.device, &drawctxt->gpustate,
|
||||
0, 0, _context_size(adreno_dev));
|
||||
|
||||
tmp_ctx.cmd = tmp_ctx.start
|
||||
= (unsigned int *)((char *)drawctxt->gpustate.hostptr + CMD_OFFSET);
|
||||
|
@ -1442,8 +1440,8 @@ static int a2xx_drawctxt_create(struct adreno_device *adreno_dev,
|
|||
kgsl_cache_range_op(&drawctxt->gpustate,
|
||||
KGSL_CACHE_OP_FLUSH);
|
||||
|
||||
kgsl_cffdump_syncmem(drawctxt->dev_priv, &drawctxt->gpustate,
|
||||
drawctxt->gpustate.gpuaddr,
|
||||
kgsl_cffdump_syncmem(drawctxt->base.device,
|
||||
&drawctxt->gpustate, drawctxt->gpustate.gpuaddr,
|
||||
drawctxt->gpustate.size, false);
|
||||
|
||||
done:
|
||||
|
@ -1518,7 +1516,7 @@ static int a2xx_drawctxt_save(struct adreno_device *adreno_dev,
|
|||
return 0;
|
||||
|
||||
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
|
||||
kgsl_cffdump_syncmem(context->dev_priv, &context->gpustate,
|
||||
kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
|
||||
context->reg_save[1],
|
||||
context->reg_save[2] << 2, true);
|
||||
/* save registers and constants. */
|
||||
|
@ -1530,7 +1528,7 @@ static int a2xx_drawctxt_save(struct adreno_device *adreno_dev,
|
|||
return ret;
|
||||
|
||||
if (context->flags & CTXT_FLAGS_SHADER_SAVE) {
|
||||
kgsl_cffdump_syncmem(context->dev_priv,
|
||||
kgsl_cffdump_syncmem(context->base.device,
|
||||
&context->gpustate,
|
||||
context->shader_save[1],
|
||||
context->shader_save[2] << 2, true);
|
||||
|
@ -1539,7 +1537,7 @@ static int a2xx_drawctxt_save(struct adreno_device *adreno_dev,
|
|||
KGSL_CMD_FLAGS_PMODE,
|
||||
context->shader_save, 3);
|
||||
|
||||
kgsl_cffdump_syncmem(context->dev_priv,
|
||||
kgsl_cffdump_syncmem(context->base.device,
|
||||
&context->gpustate,
|
||||
context->shader_fixup[1],
|
||||
context->shader_fixup[2] << 2, true);
|
||||
|
@ -1560,7 +1558,7 @@ static int a2xx_drawctxt_save(struct adreno_device *adreno_dev,
|
|||
|
||||
if ((context->flags & CTXT_FLAGS_GMEM_SAVE) &&
|
||||
(context->flags & CTXT_FLAGS_GMEM_SHADOW)) {
|
||||
kgsl_cffdump_syncmem(context->dev_priv, &context->gpustate,
|
||||
kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
|
||||
context->context_gmem_shadow.gmem_save[1],
|
||||
context->context_gmem_shadow.gmem_save[2] << 2, true);
|
||||
/* save gmem.
|
||||
|
@ -1572,7 +1570,7 @@ static int a2xx_drawctxt_save(struct adreno_device *adreno_dev,
|
|||
|
||||
if (ret)
|
||||
return ret;
|
||||
kgsl_cffdump_syncmem(context->dev_priv, &context->gpustate,
|
||||
kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
|
||||
context->chicken_restore[1],
|
||||
context->chicken_restore[2] << 2, true);
|
||||
|
||||
|
@ -1635,7 +1633,7 @@ static int a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
|
|||
* (note: changes shader. shader must not already be restored.)
|
||||
*/
|
||||
if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
|
||||
kgsl_cffdump_syncmem(context->dev_priv, &context->gpustate,
|
||||
kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
|
||||
context->context_gmem_shadow.gmem_restore[1],
|
||||
context->context_gmem_shadow.gmem_restore[2] << 2,
|
||||
true);
|
||||
|
@ -1647,7 +1645,7 @@ static int a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
|
|||
return ret;
|
||||
|
||||
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
|
||||
kgsl_cffdump_syncmem(context->dev_priv,
|
||||
kgsl_cffdump_syncmem(context->base.device,
|
||||
&context->gpustate,
|
||||
context->chicken_restore[1],
|
||||
context->chicken_restore[2] << 2, true);
|
||||
|
@ -1664,7 +1662,7 @@ static int a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
|
|||
}
|
||||
|
||||
if (!(context->flags & CTXT_FLAGS_PREAMBLE)) {
|
||||
kgsl_cffdump_syncmem(context->dev_priv, &context->gpustate,
|
||||
kgsl_cffdump_syncmem(context->base.device, &context->gpustate,
|
||||
context->reg_restore[1],
|
||||
context->reg_restore[2] << 2, true);
|
||||
|
||||
|
@ -1676,7 +1674,7 @@ static int a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
|
|||
|
||||
/* restore shader instructions & partitioning. */
|
||||
if (context->flags & CTXT_FLAGS_SHADER_RESTORE) {
|
||||
kgsl_cffdump_syncmem(context->dev_priv,
|
||||
kgsl_cffdump_syncmem(context->base.device,
|
||||
&context->gpustate,
|
||||
context->shader_restore[1],
|
||||
context->shader_restore[2] << 2, true);
|
||||
|
|
|
@ -2426,7 +2426,7 @@ static int a3xx_drawctxt_save(struct adreno_device *adreno_dev,
|
|||
* already be saved.)
|
||||
*/
|
||||
|
||||
kgsl_cffdump_syncmem(context->dev_priv,
|
||||
kgsl_cffdump_syncmem(context->base.device,
|
||||
&context->gpustate,
|
||||
context->context_gmem_shadow.gmem_save[1],
|
||||
context->context_gmem_shadow.gmem_save[2] << 2, true);
|
||||
|
@ -2487,7 +2487,7 @@ static int a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
|
|||
*/
|
||||
|
||||
if (context->flags & CTXT_FLAGS_GMEM_RESTORE) {
|
||||
kgsl_cffdump_syncmem(context->dev_priv,
|
||||
kgsl_cffdump_syncmem(context->base.device,
|
||||
&context->gpustate,
|
||||
context->context_gmem_shadow.gmem_restore[1],
|
||||
context->context_gmem_shadow.gmem_restore[2] << 2,
|
||||
|
|
|
@ -375,17 +375,14 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device,
|
|||
|
||||
/**
|
||||
* adreno_drawctxt_create - create a new adreno draw context
|
||||
* @device - KGSL device to create the context on
|
||||
* @pagetable - Pagetable for the context
|
||||
* @context- Generic KGSL context structure
|
||||
* @flags - flags for the context (passed from user space)
|
||||
* @dev_priv: the owner of the context
|
||||
* @flags: flags for the context (passed from user space)
|
||||
*
|
||||
* Create a new draw context for the 3D core. Return 0 on success,
|
||||
* or error code on failure.
|
||||
* Create and return a new draw context for the 3D core.
|
||||
*/
|
||||
struct kgsl_context *
|
||||
struct kgsl_context *
|
||||
adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
|
||||
uint32_t *flags)
|
||||
uint32_t *flags)
|
||||
{
|
||||
struct adreno_context *drawctxt;
|
||||
struct kgsl_device *device = dev_priv->device;
|
||||
|
@ -393,7 +390,6 @@ adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
|
|||
int ret;
|
||||
|
||||
drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
|
||||
|
||||
if (drawctxt == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -410,7 +410,7 @@ static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry)
|
|||
* function to initialize the common members of its context struct.
|
||||
* If this function succeeds, reference counting is active in the context
|
||||
* struct and the caller should kgsl_context_put() it on error.
|
||||
* If it fails, the caller should just free the context structer
|
||||
* If it fails, the caller should just free the context structure
|
||||
* it passed in.
|
||||
*/
|
||||
int kgsl_context_init(struct kgsl_device_private *dev_priv,
|
||||
|
@ -454,9 +454,8 @@ int kgsl_context_init(struct kgsl_device_private *dev_priv,
|
|||
context->pid = dev_priv->process_priv->pid;
|
||||
|
||||
ret = kgsl_sync_timeline_create(context);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
goto fail_free_id;
|
||||
}
|
||||
|
||||
/* Initialize the pending event list */
|
||||
INIT_LIST_HEAD(&context->events);
|
||||
|
@ -514,11 +513,12 @@ int kgsl_context_detach(struct kgsl_context *context)
|
|||
|
||||
/*
|
||||
* Cancel events after the device-specific context is
|
||||
* destroyed, to avoid possibly freeing memory while
|
||||
* detached, to avoid possibly freeing memory while
|
||||
* it is still in use by the GPU.
|
||||
*/
|
||||
|
||||
kgsl_context_cancel_events(device, context);
|
||||
|
||||
kgsl_context_put(context);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -414,7 +414,6 @@ void kgsl_cffdump_syncmem(struct kgsl_device *device,
|
|||
struct kgsl_memdesc *memdesc, uint gpuaddr,
|
||||
uint sizebytes, bool clean_cache)
|
||||
{
|
||||
struct kgsl_device *device = dev_priv->device;
|
||||
const void *src;
|
||||
|
||||
if (!device->cff_dump_enable)
|
||||
|
|
|
@ -74,7 +74,7 @@ static inline void kgsl_cffdump_close(struct kgsl_device *device)
|
|||
return;
|
||||
}
|
||||
|
||||
static inline void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
|
||||
static inline void kgsl_cffdump_syncmem(struct kgsl_device *device,
|
||||
struct kgsl_memdesc *memdesc, uint physaddr, uint sizebytes,
|
||||
bool clean_cache)
|
||||
{
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include "kgsl.h"
|
||||
#include "kgsl_mmu.h"
|
||||
|
@ -526,7 +527,7 @@ kgsl_context_put(struct kgsl_context *context)
|
|||
*
|
||||
* Check if a context has been destroyed by userspace and is only waiting
|
||||
* for reference counts to go away. This check is used to weed out
|
||||
* contexts that shouldn't use the gpu, so NULL is considered detached.
|
||||
* contexts that shouldn't use the gpu so NULL is considered detached.
|
||||
*/
|
||||
static inline bool kgsl_context_detached(struct kgsl_context *context)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue