mirror of
https://github.com/S3NEO/android_kernel_samsung_msm8226.git
synced 2024-11-07 03:47:13 +00:00
msm: kgsl: Add support for KGSL_CMDBATCH_MARKER
Sometimes the user mode driver assigns a timestamp and goes to all the work of constructing a command before it discovered it doesn't need to be executed on the GPU. If that happens the driver can set the MARKER bit and let the kernel figure how to keep timestamps moving forward in a linear fashion. If no commands are ahead of the marker the dispatcher will discard it in software (and update the timestamps accordingly). If there are command ahead of it the marker will wait patiently for those to expire. If new commands come in after the marker the marker can be dispatched with NOPs in place of the IBs so the timestamp accounting stays correct. Change-Id: Ic0dedbada4006e3cf9d4698a419be93b1620d35a Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org> Signed-off-by: Sunil Khatri <sunilkh@codeaurora.org>
This commit is contained in:
parent
8edf8418a5
commit
fe0a2966b7
6 changed files with 211 additions and 67 deletions
|
@ -18,6 +18,7 @@
|
|||
#include <linux/err.h>
|
||||
|
||||
#include "kgsl.h"
|
||||
#include "kgsl_sharedmem.h"
|
||||
#include "adreno.h"
|
||||
#include "adreno_ringbuffer.h"
|
||||
#include "adreno_trace.h"
|
||||
|
@ -166,63 +167,134 @@ static int _check_context_queue(struct adreno_context *drawctxt)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* _retire_marker() - Retire a marker command batch without sending it to the
|
||||
* hardware
|
||||
* @cmdbatch: Pointer to the cmdbatch to retire
|
||||
*
|
||||
* In some cases marker commands can be retired by the software without going to
|
||||
* the GPU. In those cases, update the memstore from the CPU, kick off the
|
||||
* event engine to handle expired events and destroy the command batch.
|
||||
*/
|
||||
static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
|
||||
{
|
||||
struct kgsl_context *context = cmdbatch->context;
|
||||
struct kgsl_device *device = context->device;
|
||||
|
||||
/*
|
||||
* Write the start and end timestamp to the memstore to keep the
|
||||
* accounting sane
|
||||
*/
|
||||
kgsl_sharedmem_writel(device, &device->memstore,
|
||||
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
|
||||
cmdbatch->timestamp);
|
||||
|
||||
kgsl_sharedmem_writel(device, &device->memstore,
|
||||
KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
|
||||
cmdbatch->timestamp);
|
||||
|
||||
|
||||
/* Retire pending GPU events for the object */
|
||||
kgsl_process_event_group(device, &context->events);
|
||||
|
||||
trace_adreno_cmdbatch_retired(cmdbatch, -1);
|
||||
kgsl_cmdbatch_destroy(cmdbatch);
|
||||
}
|
||||
|
||||
/*
|
||||
* return true if this is a marker command and the dependent timestamp has
|
||||
* retired
|
||||
*/
|
||||
static bool _marker_expired(struct kgsl_cmdbatch *cmdbatch)
|
||||
{
|
||||
return (cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
|
||||
kgsl_check_timestamp(cmdbatch->device, cmdbatch->context,
|
||||
cmdbatch->marker_timestamp);
|
||||
}
|
||||
|
||||
static inline void _pop_cmdbatch(struct adreno_context *drawctxt)
|
||||
{
|
||||
drawctxt->cmdqueue_head = CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
|
||||
ADRENO_CONTEXT_CMDQUEUE_SIZE);
|
||||
drawctxt->queued--;
|
||||
}
|
||||
|
||||
static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
|
||||
{
|
||||
struct kgsl_cmdbatch *cmdbatch = NULL;
|
||||
bool pending = false;
|
||||
|
||||
if (drawctxt->cmdqueue_head == drawctxt->cmdqueue_tail)
|
||||
return NULL;
|
||||
|
||||
cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
|
||||
|
||||
/* Check to see if this is a marker we can skip over */
|
||||
if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) {
|
||||
if (_marker_expired(cmdbatch)) {
|
||||
_pop_cmdbatch(drawctxt);
|
||||
_retire_marker(cmdbatch);
|
||||
|
||||
/* Get the next thing in the queue */
|
||||
return _get_cmdbatch(drawctxt);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the marker isn't expired but the SKIP bit is set
|
||||
* then there are real commands following this one in
|
||||
* the queue. This means that we need to dispatch the
|
||||
* command so that we can keep the timestamp accounting
|
||||
* correct. If skip isn't set then we block this queue
|
||||
* until the dependent timestamp expires
|
||||
*/
|
||||
|
||||
if (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))
|
||||
pending = true;
|
||||
}
|
||||
|
||||
spin_lock(&cmdbatch->lock);
|
||||
if (!list_empty(&cmdbatch->synclist))
|
||||
pending = true;
|
||||
spin_unlock(&cmdbatch->lock);
|
||||
|
||||
/*
|
||||
* If changes are pending and the canary timer hasn't been
|
||||
* started yet, start it
|
||||
*/
|
||||
if (pending) {
|
||||
/*
|
||||
* If syncpoints are pending start the canary timer if
|
||||
* it hasn't already been started
|
||||
*/
|
||||
if (!timer_pending(&cmdbatch->timer))
|
||||
mod_timer(&cmdbatch->timer, jiffies + (5 * HZ));
|
||||
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
/*
|
||||
* Otherwise, delete the timer to make sure it is good
|
||||
* and dead before queuing the buffer
|
||||
*/
|
||||
del_timer_sync(&cmdbatch->timer);
|
||||
|
||||
_pop_cmdbatch(drawctxt);
|
||||
return cmdbatch;
|
||||
}
|
||||
|
||||
/**
|
||||
* adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
|
||||
* @drawctxt: Pointer to the adreno draw context
|
||||
*
|
||||
* Dequeue a new command batch from the context list
|
||||
*/
|
||||
static inline struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
|
||||
static struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
|
||||
struct adreno_context *drawctxt)
|
||||
{
|
||||
struct kgsl_cmdbatch *cmdbatch = NULL;
|
||||
int pending;
|
||||
struct kgsl_cmdbatch *cmdbatch;
|
||||
|
||||
spin_lock(&drawctxt->lock);
|
||||
if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
|
||||
cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
|
||||
|
||||
/*
|
||||
* Don't dequeue a cmdbatch that is still waiting for other
|
||||
* events
|
||||
*/
|
||||
|
||||
spin_lock(&cmdbatch->lock);
|
||||
pending = list_empty(&cmdbatch->synclist) ? 0 : 1;
|
||||
|
||||
/*
|
||||
* If changes are pending and the canary timer hasn't been
|
||||
* started yet, start it
|
||||
*/
|
||||
if (pending) {
|
||||
/*
|
||||
* If syncpoints are pending start the canary timer if
|
||||
* it hasn't already been started
|
||||
*/
|
||||
if (!timer_pending(&cmdbatch->timer))
|
||||
mod_timer(&cmdbatch->timer, jiffies + (5 * HZ));
|
||||
spin_unlock(&cmdbatch->lock);
|
||||
} else {
|
||||
/*
|
||||
* Otherwise, delete the timer to make sure it is good
|
||||
* and dead before queuing the buffer
|
||||
*/
|
||||
spin_unlock(&cmdbatch->lock);
|
||||
del_timer_sync(&cmdbatch->timer);
|
||||
}
|
||||
|
||||
if (pending) {
|
||||
cmdbatch = ERR_PTR(-EAGAIN);
|
||||
goto done;
|
||||
}
|
||||
|
||||
drawctxt->cmdqueue_head =
|
||||
CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
|
||||
ADRENO_CONTEXT_CMDQUEUE_SIZE);
|
||||
drawctxt->queued--;
|
||||
}
|
||||
|
||||
done:
|
||||
cmdbatch = _get_cmdbatch(drawctxt);
|
||||
spin_unlock(&drawctxt->lock);
|
||||
|
||||
return cmdbatch;
|
||||
|
@ -361,7 +433,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
trace_adreno_cmdbatch_submitted(cmdbatch, dispatcher->inflight);
|
||||
trace_adreno_cmdbatch_submitted(cmdbatch, (int) dispatcher->inflight);
|
||||
|
||||
dispatcher->cmdqueue[dispatcher->tail] = cmdbatch;
|
||||
dispatcher->tail = (dispatcher->tail + 1) %
|
||||
|
@ -735,6 +807,32 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
|
|||
|
||||
cmdbatch->timestamp = *timestamp;
|
||||
|
||||
if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) {
|
||||
|
||||
/*
|
||||
* See if we can fastpath this thing - if nothing is queued
|
||||
* and nothing is inflight retire without bothering the GPU
|
||||
*/
|
||||
|
||||
if (!drawctxt->queued && kgsl_check_timestamp(cmdbatch->device,
|
||||
cmdbatch->context, drawctxt->queued_timestamp)) {
|
||||
trace_adreno_cmdbatch_queued(cmdbatch,
|
||||
drawctxt->queued);
|
||||
|
||||
_retire_marker(cmdbatch);
|
||||
spin_unlock(&drawctxt->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remember the last queued timestamp - the marker will block
|
||||
* until that timestamp is expired (unless another command
|
||||
* comes along and forces the marker to execute)
|
||||
*/
|
||||
|
||||
cmdbatch->marker_timestamp = drawctxt->queued_timestamp;
|
||||
}
|
||||
|
||||
/* SYNC commands have timestamp 0 and will get optimized out anyway */
|
||||
if (!(cmdbatch->flags & KGSL_CONTEXT_SYNC))
|
||||
drawctxt->queued_timestamp = *timestamp;
|
||||
|
@ -754,6 +852,24 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
|
|||
drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
|
||||
ADRENO_CONTEXT_CMDQUEUE_SIZE;
|
||||
|
||||
/*
|
||||
* If this is a real command then we need to force any markers queued
|
||||
* before it to dispatch to keep time linear - set the skip bit so
|
||||
* the commands get NOPed.
|
||||
*/
|
||||
|
||||
if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)) {
|
||||
unsigned int i = drawctxt->cmdqueue_head;
|
||||
|
||||
while (i != drawctxt->cmdqueue_tail) {
|
||||
if (drawctxt->cmdqueue[i]->flags & KGSL_CMDBATCH_MARKER)
|
||||
set_bit(CMDBATCH_FLAG_SKIP,
|
||||
&drawctxt->cmdqueue[i]->priv);
|
||||
|
||||
i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
drawctxt->queued++;
|
||||
trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);
|
||||
|
||||
|
@ -1460,7 +1576,7 @@ static void adreno_dispatcher_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
trace_adreno_cmdbatch_retired(cmdbatch,
|
||||
dispatcher->inflight - 1);
|
||||
(int) (dispatcher->inflight - 1));
|
||||
|
||||
/* Reduce the number of inflight command batches */
|
||||
dispatcher->inflight--;
|
||||
|
|
|
@ -51,17 +51,21 @@ DECLARE_EVENT_CLASS(adreno_cmdbatch_template,
|
|||
TP_STRUCT__entry(
|
||||
__field(unsigned int, id)
|
||||
__field(unsigned int, timestamp)
|
||||
__field(unsigned int, inflight)
|
||||
__field(int, inflight)
|
||||
__field(unsigned int, flags)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->id = cmdbatch->context->id;
|
||||
__entry->timestamp = cmdbatch->timestamp;
|
||||
__entry->inflight = inflight;
|
||||
__entry->flags = cmdbatch->flags;
|
||||
),
|
||||
TP_printk(
|
||||
"ctx=%u ts=%u inflight=%u",
|
||||
"ctx=%u ts=%u inflight=%d flags=%s",
|
||||
__entry->id, __entry->timestamp,
|
||||
__entry->inflight
|
||||
__entry->inflight,
|
||||
__entry->flags ? __print_flags(__entry->flags, "|",
|
||||
{ KGSL_CMDBATCH_MARKER, "MARKER" }) : "none"
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -76,22 +80,26 @@ TRACE_EVENT(adreno_cmdbatch_retired,
|
|||
TP_STRUCT__entry(
|
||||
__field(unsigned int, id)
|
||||
__field(unsigned int, timestamp)
|
||||
__field(unsigned int, inflight)
|
||||
__field(int, inflight)
|
||||
__field(unsigned int, recovery)
|
||||
__field(unsigned int, flags)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->id = cmdbatch->context->id;
|
||||
__entry->timestamp = cmdbatch->timestamp;
|
||||
__entry->inflight = inflight;
|
||||
__entry->recovery = cmdbatch->fault_recovery;
|
||||
__entry->flags = cmdbatch->flags;
|
||||
),
|
||||
TP_printk(
|
||||
"ctx=%u ts=%u inflight=%u recovery=%s",
|
||||
"ctx=%u ts=%u inflight=%d recovery=%s flags=%s",
|
||||
__entry->id, __entry->timestamp,
|
||||
__entry->inflight,
|
||||
__entry->recovery ?
|
||||
__print_flags(__entry->recovery, "|",
|
||||
ADRENO_FT_TYPES) : "none"
|
||||
ADRENO_FT_TYPES) : "none",
|
||||
__entry->flags ? __print_flags(__entry->flags, "|",
|
||||
{ KGSL_CMDBATCH_MARKER, "MARKER" }) : "none"
|
||||
)
|
||||
);
|
||||
|
||||
|
|
|
@ -2272,7 +2272,7 @@ static struct kgsl_cmdbatch *_kgsl_cmdbatch_create(struct kgsl_device *device,
|
|||
if (IS_ERR(cmdbatch))
|
||||
return cmdbatch;
|
||||
|
||||
if (!(flags & KGSL_CMDBATCH_SYNC)) {
|
||||
if (!(flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))) {
|
||||
struct kgsl_ibdesc ibdesc;
|
||||
void __user *uptr = (void __user *) cmdlist;
|
||||
|
||||
|
@ -2332,7 +2332,7 @@ static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
|
|||
long result = -EINVAL;
|
||||
|
||||
/* The legacy functions don't support synchronization commands */
|
||||
if (param->flags & KGSL_CMDBATCH_SYNC)
|
||||
if ((param->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Get the context */
|
||||
|
@ -2392,14 +2392,21 @@ static long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
|
|||
|
||||
long result = -EINVAL;
|
||||
|
||||
/* The number of IBs are completely ignored for sync commands */
|
||||
if (!(param->flags & KGSL_CMDBATCH_SYNC)) {
|
||||
if (param->numcmds == 0 || param->numcmds > KGSL_MAX_NUMIBS)
|
||||
return -EINVAL;
|
||||
} else if (param->numcmds != 0) {
|
||||
/*
|
||||
* The SYNC bit is supposed to identify a dummy sync object so warn the
|
||||
* user if they specified any IBs with it. A MARKER command can either
|
||||
* have IBs or not but if the command has 0 IBs it is automatically
|
||||
* assumed to be a marker. If none of the above make sure that the user
|
||||
* specified a sane number of IBs
|
||||
*/
|
||||
|
||||
if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds)
|
||||
KGSL_DEV_ERR_ONCE(device,
|
||||
"Commands specified with the SYNC flag. They will be ignored\n");
|
||||
}
|
||||
else if (param->numcmds > KGSL_MAX_NUMIBS)
|
||||
return -EINVAL;
|
||||
else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0)
|
||||
param->flags |= KGSL_CMDBATCH_MARKER;
|
||||
|
||||
context = kgsl_context_get_owner(dev_priv, param->context_id);
|
||||
if (context == NULL)
|
||||
|
|
|
@ -237,6 +237,8 @@ struct kgsl_memobj_node {
|
|||
* @memlist: List of all memory used in this command batch
|
||||
* @synclist: List of context/timestamp tuples to wait for before issuing
|
||||
* @timer: a timer used to track possible sync timeouts for this cmdbatch
|
||||
* @marker_timestamp: For markers, the timestamp of the last "real" command that
|
||||
* was queued
|
||||
*
|
||||
* This struture defines an atomic batch of command buffers issued from
|
||||
* userspace.
|
||||
|
@ -256,6 +258,7 @@ struct kgsl_cmdbatch {
|
|||
struct list_head memlist;
|
||||
struct list_head synclist;
|
||||
struct timer_list timer;
|
||||
unsigned int marker_timestamp;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -603,6 +606,9 @@ void kgsl_cancel_event(struct kgsl_device *device,
|
|||
kgsl_event_func func, void *priv);
|
||||
int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group,
|
||||
unsigned int timestamp, kgsl_event_func func, void *priv);
|
||||
void kgsl_process_event_group(struct kgsl_device *device,
|
||||
struct kgsl_event_group *group);
|
||||
|
||||
void kgsl_process_events(struct work_struct *work);
|
||||
|
||||
static inline struct kgsl_device_platform_data *
|
||||
|
|
|
@ -56,16 +56,21 @@ static void _kgsl_event_worker(struct work_struct *work)
|
|||
}
|
||||
|
||||
/**
|
||||
* retire_events() - Handle all the retired events in a group
|
||||
* kgsl_process_event_group() - Handle all the retired events in a group
|
||||
* @device: Pointer to a KGSL device
|
||||
* @group: Pointer to a GPU events group to process
|
||||
*/
|
||||
static void retire_events(struct kgsl_device *device,
|
||||
void kgsl_process_event_group(struct kgsl_device *device,
|
||||
struct kgsl_event_group *group)
|
||||
{
|
||||
struct kgsl_event *event, *tmp;
|
||||
unsigned int timestamp;
|
||||
struct kgsl_context *context = group->context;
|
||||
struct kgsl_context *context;
|
||||
|
||||
if (group == NULL)
|
||||
return;
|
||||
|
||||
context = group->context;
|
||||
|
||||
_kgsl_context_get(context);
|
||||
|
||||
|
@ -91,6 +96,7 @@ out:
|
|||
spin_unlock(&group->lock);
|
||||
kgsl_context_put(context);
|
||||
}
|
||||
EXPORT_SYMBOL(kgsl_process_event_group);
|
||||
|
||||
/**
|
||||
* kgsl_cancel_events_timestamp() - Cancel pending events for a given timestamp
|
||||
|
@ -244,7 +250,7 @@ void kgsl_process_events(struct work_struct *work)
|
|||
|
||||
read_lock(&group_lock);
|
||||
list_for_each_entry(group, &group_list, group)
|
||||
retire_events(device, group);
|
||||
kgsl_process_event_group(device, group);
|
||||
read_unlock(&group_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(kgsl_process_events);
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
* cmdbatch only flags as needed
|
||||
*/
|
||||
#define KGSL_CMDBATCH_MEMLIST 0x00000001
|
||||
#define KGSL_CMDBATCH_MARKER 0x00000002
|
||||
#define KGSL_CMDBATCH_SUBMIT_IB_LIST KGSL_CONTEXT_SUBMIT_IB_LIST /* 0x004 */
|
||||
#define KGSL_CMDBATCH_CTX_SWITCH KGSL_CONTEXT_CTX_SWITCH /* 0x008 */
|
||||
#define KGSL_CMDBATCH_END_OF_FRAME KGSL_CONTEXT_END_OF_FRAME /* 0x100 */
|
||||
|
|
Loading…
Reference in a new issue