mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
msm: kgsl: Add IB submission gate for device suspend
Add IB submission gate to stop the system from making requests while suspend is requested. This will force users to wait for the system to resume before making more requests. Change-Id: I2460ef9b4237b586a31d4cea64b6dd74d30b08d9 Signed-off-by: Carter Cooper <ccooper@codeaurora.org>
This commit is contained in:
parent
7a8fee7870
commit
ec7e738fc5
6 changed files with 58 additions and 91 deletions
|
@ -1743,7 +1743,7 @@ static int adreno_start(struct kgsl_device *device)
|
||||||
goto error_rb_stop;
|
goto error_rb_stop;
|
||||||
|
|
||||||
/* Start the dispatcher */
|
/* Start the dispatcher */
|
||||||
adreno_dispatcher_start(adreno_dev);
|
adreno_dispatcher_start(device);
|
||||||
|
|
||||||
device->reset_counter++;
|
device->reset_counter++;
|
||||||
|
|
||||||
|
@ -2411,14 +2411,13 @@ int adreno_idle(struct kgsl_device *device)
|
||||||
* adreno_drain() - Drain the dispatch queue
|
* adreno_drain() - Drain the dispatch queue
|
||||||
* @device: Pointer to the KGSL device structure for the GPU
|
* @device: Pointer to the KGSL device structure for the GPU
|
||||||
*
|
*
|
||||||
* Tell the dispatcher to pause - this has the effect of draining the inflight
|
* Drain the dispatcher of existing command batches. This halts
|
||||||
* command batches
|
* additional commands from being issued until the gate is completed.
|
||||||
*/
|
*/
|
||||||
static int adreno_drain(struct kgsl_device *device)
|
static int adreno_drain(struct kgsl_device *device)
|
||||||
{
|
{
|
||||||
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
|
INIT_COMPLETION(device->cmdbatch_gate);
|
||||||
|
|
||||||
adreno_dispatcher_pause(adreno_dev);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2817,13 +2816,6 @@ static unsigned int adreno_gpuid(struct kgsl_device *device,
|
||||||
return (0x0003 << 16) | ((int) adreno_dev->gpurev);
|
return (0x0003 << 16) | ((int) adreno_dev->gpurev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void adreno_resume(struct kgsl_device *device)
|
|
||||||
{
|
|
||||||
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
|
|
||||||
|
|
||||||
adreno_dispatcher_resume(adreno_dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct kgsl_functable adreno_functable = {
|
static const struct kgsl_functable adreno_functable = {
|
||||||
/* Mandatory functions */
|
/* Mandatory functions */
|
||||||
.regread = adreno_regread,
|
.regread = adreno_regread,
|
||||||
|
@ -2855,7 +2847,7 @@ static const struct kgsl_functable adreno_functable = {
|
||||||
.setproperty = adreno_setproperty,
|
.setproperty = adreno_setproperty,
|
||||||
.postmortem_dump = adreno_dump,
|
.postmortem_dump = adreno_dump,
|
||||||
.drawctxt_sched = adreno_drawctxt_sched,
|
.drawctxt_sched = adreno_drawctxt_sched,
|
||||||
.resume = adreno_resume,
|
.resume = adreno_dispatcher_start,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_driver adreno_platform_driver = {
|
static struct platform_driver adreno_platform_driver = {
|
||||||
|
|
|
@ -126,7 +126,7 @@ enum coresight_debug_reg {
|
||||||
*/
|
*/
|
||||||
struct adreno_dispatcher {
|
struct adreno_dispatcher {
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
unsigned int state;
|
unsigned long priv;
|
||||||
struct timer_list timer;
|
struct timer_list timer;
|
||||||
struct timer_list fault_timer;
|
struct timer_list fault_timer;
|
||||||
unsigned int inflight;
|
unsigned int inflight;
|
||||||
|
@ -140,6 +140,10 @@ struct adreno_dispatcher {
|
||||||
struct kobject kobj;
|
struct kobject kobj;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum adreno_dispatcher_flags {
|
||||||
|
ADRENO_DISPATCHER_POWER = 0,
|
||||||
|
};
|
||||||
|
|
||||||
struct adreno_gpudev;
|
struct adreno_gpudev;
|
||||||
|
|
||||||
struct adreno_device {
|
struct adreno_device {
|
||||||
|
@ -481,7 +485,7 @@ struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
|
||||||
void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
|
void *adreno_snapshot(struct kgsl_device *device, void *snapshot, int *remain,
|
||||||
int hang);
|
int hang);
|
||||||
|
|
||||||
void adreno_dispatcher_start(struct adreno_device *adreno_dev);
|
void adreno_dispatcher_start(struct kgsl_device *device);
|
||||||
int adreno_dispatcher_init(struct adreno_device *adreno_dev);
|
int adreno_dispatcher_init(struct adreno_device *adreno_dev);
|
||||||
void adreno_dispatcher_close(struct adreno_device *adreno_dev);
|
void adreno_dispatcher_close(struct adreno_device *adreno_dev);
|
||||||
int adreno_dispatcher_idle(struct adreno_device *adreno_dev,
|
int adreno_dispatcher_idle(struct adreno_device *adreno_dev,
|
||||||
|
@ -495,7 +499,6 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
|
||||||
|
|
||||||
void adreno_dispatcher_schedule(struct kgsl_device *device);
|
void adreno_dispatcher_schedule(struct kgsl_device *device);
|
||||||
void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
|
void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
|
||||||
void adreno_dispatcher_resume(struct adreno_device *adreno_dev);
|
|
||||||
void adreno_dispatcher_queue_context(struct kgsl_device *device,
|
void adreno_dispatcher_queue_context(struct kgsl_device *device,
|
||||||
struct adreno_context *drawctxt);
|
struct adreno_context *drawctxt);
|
||||||
int adreno_reset(struct kgsl_device *device);
|
int adreno_reset(struct kgsl_device *device);
|
||||||
|
|
|
@ -22,9 +22,6 @@
|
||||||
#include "adreno_ringbuffer.h"
|
#include "adreno_ringbuffer.h"
|
||||||
#include "adreno_trace.h"
|
#include "adreno_trace.h"
|
||||||
|
|
||||||
#define ADRENO_DISPATCHER_ACTIVE 0
|
|
||||||
#define ADRENO_DISPATCHER_PAUSE 1
|
|
||||||
|
|
||||||
#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
|
#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
|
||||||
|
|
||||||
/* Number of commands that can be queued in a context before it sleeps */
|
/* Number of commands that can be queued in a context before it sleeps */
|
||||||
|
@ -258,7 +255,9 @@ static int sendcmd(struct adreno_device *adreno_dev,
|
||||||
|
|
||||||
dispatcher->inflight++;
|
dispatcher->inflight++;
|
||||||
|
|
||||||
if (dispatcher->inflight == 1) {
|
if (dispatcher->inflight == 1 &&
|
||||||
|
!test_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv)) {
|
||||||
|
|
||||||
/* Time to make the donuts. Turn on the GPU */
|
/* Time to make the donuts. Turn on the GPU */
|
||||||
ret = kgsl_active_count_get(device);
|
ret = kgsl_active_count_get(device);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -266,6 +265,8 @@ static int sendcmd(struct adreno_device *adreno_dev,
|
||||||
mutex_unlock(&device->mutex);
|
mutex_unlock(&device->mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
set_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch);
|
ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch);
|
||||||
|
@ -278,8 +279,10 @@ static int sendcmd(struct adreno_device *adreno_dev,
|
||||||
if (dispatcher->inflight == 1) {
|
if (dispatcher->inflight == 1) {
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
fault_detect_read(device);
|
fault_detect_read(device);
|
||||||
else
|
else {
|
||||||
kgsl_active_count_put(device);
|
kgsl_active_count_put(device);
|
||||||
|
clear_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&device->mutex);
|
mutex_unlock(&device->mutex);
|
||||||
|
@ -342,9 +345,6 @@ static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
|
||||||
int ret;
|
int ret;
|
||||||
struct kgsl_cmdbatch *cmdbatch;
|
struct kgsl_cmdbatch *cmdbatch;
|
||||||
|
|
||||||
if (dispatcher->state != ADRENO_DISPATCHER_ACTIVE)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (adreno_gpu_fault(adreno_dev) != 0)
|
if (adreno_gpu_fault(adreno_dev) != 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -427,8 +427,7 @@ static int _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Leave early if the dispatcher isn't in a happy state */
|
/* Leave early if the dispatcher isn't in a happy state */
|
||||||
if ((dispatcher->state != ADRENO_DISPATCHER_ACTIVE) ||
|
if (adreno_gpu_fault(adreno_dev) != 0)
|
||||||
adreno_gpu_fault(adreno_dev) != 0)
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
plist_head_init(&requeue);
|
plist_head_init(&requeue);
|
||||||
|
@ -437,8 +436,7 @@ static int _adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
|
||||||
while (dispatcher->inflight < _dispatcher_inflight) {
|
while (dispatcher->inflight < _dispatcher_inflight) {
|
||||||
|
|
||||||
/* Stop doing things if the dispatcher is paused or faulted */
|
/* Stop doing things if the dispatcher is paused or faulted */
|
||||||
if ((dispatcher->state != ADRENO_DISPATCHER_ACTIVE) ||
|
if (adreno_gpu_fault(adreno_dev) != 0)
|
||||||
adreno_gpu_fault(adreno_dev) != 0)
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
spin_lock(&dispatcher->plist_lock);
|
spin_lock(&dispatcher->plist_lock);
|
||||||
|
@ -1156,10 +1154,6 @@ replay:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&device->mutex);
|
|
||||||
kgsl_active_count_put(device);
|
|
||||||
mutex_unlock(&device->mutex);
|
|
||||||
|
|
||||||
kfree(replay);
|
kfree(replay);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -1327,19 +1321,16 @@ static void adreno_dispatcher_work(struct work_struct *work)
|
||||||
*/
|
*/
|
||||||
if (!fault_handled && dispatcher_do_fault(device))
|
if (!fault_handled && dispatcher_do_fault(device))
|
||||||
goto done;
|
goto done;
|
||||||
/*
|
|
||||||
* Decrement the active count to 0 - this will allow the system to go
|
|
||||||
* into suspend even if there are queued command batches
|
|
||||||
*/
|
|
||||||
|
|
||||||
mutex_lock(&device->mutex);
|
/*
|
||||||
|
* If inflight went to 0, queue back up the event processor to catch
|
||||||
|
* stragglers
|
||||||
|
*/
|
||||||
if (count && dispatcher->inflight == 0) {
|
if (count && dispatcher->inflight == 0) {
|
||||||
del_timer_sync(&dispatcher->fault_timer);
|
mutex_lock(&device->mutex);
|
||||||
kgsl_active_count_put(device);
|
|
||||||
/* Queue back up the event processor to catch stragglers */
|
|
||||||
queue_work(device->work_queue, &device->ts_expired_ws);
|
queue_work(device->work_queue, &device->ts_expired_ws);
|
||||||
|
mutex_unlock(&device->mutex);
|
||||||
}
|
}
|
||||||
mutex_unlock(&device->mutex);
|
|
||||||
|
|
||||||
/* Dispatch new commands if we have the room */
|
/* Dispatch new commands if we have the room */
|
||||||
if (dispatcher->inflight < _dispatcher_inflight)
|
if (dispatcher->inflight < _dispatcher_inflight)
|
||||||
|
@ -1353,12 +1344,29 @@ done:
|
||||||
|
|
||||||
/* Update the timeout timer for the next command batch */
|
/* Update the timeout timer for the next command batch */
|
||||||
mod_timer(&dispatcher->timer, cmdbatch->expires);
|
mod_timer(&dispatcher->timer, cmdbatch->expires);
|
||||||
}
|
|
||||||
|
|
||||||
/* Before leaving update the pwrscale information */
|
/* There are still things in flight - update the idle counts */
|
||||||
mutex_lock(&device->mutex);
|
mutex_lock(&device->mutex);
|
||||||
kgsl_pwrscale_idle(device);
|
kgsl_pwrscale_idle(device);
|
||||||
mutex_unlock(&device->mutex);
|
mutex_unlock(&device->mutex);
|
||||||
|
} else {
|
||||||
|
/* There is nothing left in the pipeline. Shut 'er down boys */
|
||||||
|
mutex_lock(&device->mutex);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Stop the fault timer before decrementing the active count to
|
||||||
|
* avoid reading the hardware registers while we are trying to
|
||||||
|
* turn clocks off
|
||||||
|
*/
|
||||||
|
del_timer_sync(&dispatcher->fault_timer);
|
||||||
|
|
||||||
|
if (test_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv)) {
|
||||||
|
kgsl_active_count_put(device);
|
||||||
|
clear_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&device->mutex);
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&dispatcher->mutex);
|
mutex_unlock(&dispatcher->mutex);
|
||||||
}
|
}
|
||||||
|
@ -1451,55 +1459,18 @@ void adreno_dispatcher_irq_fault(struct kgsl_device *device)
|
||||||
adreno_dispatcher_schedule(device);
|
adreno_dispatcher_schedule(device);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* adreno_dispatcher_pause() - stop the dispatcher
|
|
||||||
* @adreno_dev: pointer to the adreno device structure
|
|
||||||
*
|
|
||||||
* Pause the dispather so it doesn't accept any new commands
|
|
||||||
*/
|
|
||||||
void adreno_dispatcher_pause(struct adreno_device *adreno_dev)
|
|
||||||
{
|
|
||||||
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This will probably get called while holding other mutexes so don't
|
|
||||||
* take the dispatcher mutex. The biggest penalty is that another
|
|
||||||
* command might be submitted while we are in here but thats okay
|
|
||||||
* because whoever is waiting for the drain will just have another
|
|
||||||
* command batch to wait for
|
|
||||||
*/
|
|
||||||
|
|
||||||
dispatcher->state = ADRENO_DISPATCHER_PAUSE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* adreno_dispatcher_resume() - resume the dispatcher
|
|
||||||
* @adreno_dev: pointer to the adreno device structure
|
|
||||||
*
|
|
||||||
* Set the dispatcher active so it can start accepting commands again
|
|
||||||
*/
|
|
||||||
void adreno_dispatcher_resume(struct adreno_device *adreno_dev)
|
|
||||||
{
|
|
||||||
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
|
|
||||||
|
|
||||||
dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
|
|
||||||
adreno_dispatcher_schedule(&adreno_dev->dev);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* adreno_dispatcher_start() - activate the dispatcher
|
* adreno_dispatcher_start() - activate the dispatcher
|
||||||
* @adreno_dev: pointer to the adreno device structure
|
* @adreno_dev: pointer to the adreno device structure
|
||||||
*
|
*
|
||||||
* Set the disaptcher active and start the loop once to get things going
|
* Set the disaptcher active and start the loop once to get things going
|
||||||
*/
|
*/
|
||||||
void adreno_dispatcher_start(struct adreno_device *adreno_dev)
|
void adreno_dispatcher_start(struct kgsl_device *device)
|
||||||
{
|
{
|
||||||
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
|
complete_all(&device->cmdbatch_gate);
|
||||||
|
|
||||||
dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
|
|
||||||
|
|
||||||
/* Schedule the work loop to get things going */
|
/* Schedule the work loop to get things going */
|
||||||
adreno_dispatcher_schedule(&adreno_dev->dev);
|
adreno_dispatcher_schedule(device);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1514,8 +1485,6 @@ void adreno_dispatcher_stop(struct adreno_device *adreno_dev)
|
||||||
|
|
||||||
del_timer_sync(&dispatcher->timer);
|
del_timer_sync(&dispatcher->timer);
|
||||||
del_timer_sync(&dispatcher->fault_timer);
|
del_timer_sync(&dispatcher->fault_timer);
|
||||||
|
|
||||||
dispatcher->state = ADRENO_DISPATCHER_PAUSE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1688,8 +1657,6 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev)
|
||||||
plist_head_init(&dispatcher->pending);
|
plist_head_init(&dispatcher->pending);
|
||||||
spin_lock_init(&dispatcher->plist_lock);
|
spin_lock_init(&dispatcher->plist_lock);
|
||||||
|
|
||||||
dispatcher->state = ADRENO_DISPATCHER_ACTIVE;
|
|
||||||
|
|
||||||
ret = kobject_init_and_add(&dispatcher->kobj, &ktype_dispatcher,
|
ret = kobject_init_and_add(&dispatcher->kobj, &ktype_dispatcher,
|
||||||
&device->dev->kobj, "dispatch");
|
&device->dev->kobj, "dispatch");
|
||||||
|
|
||||||
|
|
|
@ -626,7 +626,7 @@ error_vfree:
|
||||||
end:
|
end:
|
||||||
/* Restart the dispatcher after a manually triggered dump */
|
/* Restart the dispatcher after a manually triggered dump */
|
||||||
if (manual)
|
if (manual)
|
||||||
adreno_dispatcher_start(adreno_dev);
|
adreno_dispatcher_start(device);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1006,6 +1006,9 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
|
||||||
/* For now everybody has the same priority */
|
/* For now everybody has the same priority */
|
||||||
cmdbatch->priority = ADRENO_CONTEXT_DEFAULT_PRIORITY;
|
cmdbatch->priority = ADRENO_CONTEXT_DEFAULT_PRIORITY;
|
||||||
|
|
||||||
|
/* wait for the suspend gate */
|
||||||
|
wait_for_completion(&device->cmdbatch_gate);
|
||||||
|
|
||||||
/* Queue the command in the ringbuffer */
|
/* Queue the command in the ringbuffer */
|
||||||
ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
|
ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
|
||||||
timestamp);
|
timestamp);
|
||||||
|
|
|
@ -242,6 +242,7 @@ struct kgsl_device {
|
||||||
struct kgsl_mh mh;
|
struct kgsl_mh mh;
|
||||||
struct kgsl_mmu mmu;
|
struct kgsl_mmu mmu;
|
||||||
struct completion hwaccess_gate;
|
struct completion hwaccess_gate;
|
||||||
|
struct completion cmdbatch_gate;
|
||||||
const struct kgsl_functable *ftbl;
|
const struct kgsl_functable *ftbl;
|
||||||
struct work_struct idle_check_ws;
|
struct work_struct idle_check_ws;
|
||||||
struct timer_list idle_timer;
|
struct timer_list idle_timer;
|
||||||
|
@ -314,6 +315,7 @@ void kgsl_check_fences(struct work_struct *work);
|
||||||
|
|
||||||
#define KGSL_DEVICE_COMMON_INIT(_dev) \
|
#define KGSL_DEVICE_COMMON_INIT(_dev) \
|
||||||
.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
|
.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
|
||||||
|
.cmdbatch_gate = COMPLETION_INITIALIZER((_dev).cmdbatch_gate),\
|
||||||
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
|
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
|
||||||
kgsl_idle_check),\
|
kgsl_idle_check),\
|
||||||
.ts_expired_ws = __WORK_INITIALIZER((_dev).ts_expired_ws,\
|
.ts_expired_ws = __WORK_INITIALIZER((_dev).ts_expired_ws,\
|
||||||
|
|
Loading…
Reference in a new issue