block: row: Insert dispatch_quantum into struct row_queue

There is really no point in keeping the dispatch quantum
of a queue outside of it. By inserting it to the row_queue
structure we spare extra level in accessing it.

Change-Id: Ic77571818b643e71f9aafbb2ca93d0a92158b199
Signed-off-by: Tatyana Brokhman <tlinder@codeaurora.org>
This commit is contained in:
Tatyana Brokhman 2013-01-12 16:21:12 +02:00 committed by Stephen Boyd
parent 98e48591e3
commit fa56654524

View file

@ -105,6 +105,8 @@ struct rowq_idling_data {
* the current dispatch cycle
* @slice: number of requests to dispatch in a cycle
* @nr_req: number of requests in queue
* @dispatch quantum: number of requests this queue may
* dispatch in a dispatch cycle
* @idle_data: data for idling on queues
*
*/
@ -117,6 +119,7 @@ struct row_queue {
unsigned int slice;
unsigned int nr_req;
int disp_quantum;
/* used only for READ queues */
struct rowq_idling_data idle_data;
@ -141,8 +144,7 @@ struct idling_data {
/**
* struct row_queue - Per block device rqueue structure
* @dispatch_queue: dispatch rqueue
* @row_queues: array of priority request queues with
* dispatch quantum per rqueue
* @row_queues: array of priority request queues
* @curr_queue: index in the row_queues array of the
* currently serviced rqueue
* @read_idle: data for idling after READ request
@ -155,10 +157,7 @@ struct idling_data {
struct row_data {
struct request_queue *dispatch_queue;
struct {
struct row_queue rqueue;
int disp_quantum;
} row_queues[ROWQ_MAX_PRIO];
struct row_queue row_queues[ROWQ_MAX_PRIO];
enum row_queue_prio curr_queue;
@ -198,8 +197,7 @@ static inline void __maybe_unused row_dump_queues_stat(struct row_data *rd)
{
int i;
row_log(rd->dispatch_queue, " Queues status (curr_queue=%d):",
rd->curr_queue);
row_log(rd->dispatch_queue, " Queues status:");
for (i = 0; i < ROWQ_MAX_PRIO; i++)
row_log(rd->dispatch_queue,
"queue%d: dispatched= %d, nr_req=%d", i,
@ -226,7 +224,7 @@ static void kick_queue(struct work_struct *work)
row_log_rowq(rd, rd->curr_queue, "Performing delayed work");
/* Mark idling process as done */
rd->row_queues[rd->curr_queue].rqueue.idle_data.begin_idling = false;
rd->row_queues[rd->curr_queue].idle_data.begin_idling = false;
if (!(rd->nr_reqs[0] + rd->nr_reqs[1]))
row_log(rd->dispatch_queue, "No requests in scheduler");
@ -251,7 +249,7 @@ static inline void row_restart_disp_cycle(struct row_data *rd)
int i;
for (i = 0; i < ROWQ_MAX_PRIO; i++)
rd->row_queues[i].rqueue.nr_dispatched = 0;
rd->row_queues[i].nr_dispatched = 0;
rd->curr_queue = ROWQ_PRIO_HIGH_READ;
row_log(rd->dispatch_queue, "Restarting cycle");
@ -355,7 +353,7 @@ static bool row_urgent_pending(struct request_queue *q)
for (i = 0; i < ROWQ_MAX_PRIO; i++)
if (urgent_queues[i] && row_rowq_unserved(rd, i) &&
!list_empty(&rd->row_queues[i].rqueue.fifo)) {
!list_empty(&rd->row_queues[i].fifo)) {
row_log_rowq(rd, i,
"Urgent request pending (curr=%i)",
rd->curr_queue);
@ -394,13 +392,13 @@ static void row_dispatch_insert(struct row_data *rd)
{
struct request *rq;
rq = rq_entry_fifo(rd->row_queues[rd->curr_queue].rqueue.fifo.next);
rq = rq_entry_fifo(rd->row_queues[rd->curr_queue].fifo.next);
row_remove_request(rd->dispatch_queue, rq);
elv_dispatch_add_tail(rd->dispatch_queue, rq);
rd->row_queues[rd->curr_queue].rqueue.nr_dispatched++;
rd->row_queues[rd->curr_queue].nr_dispatched++;
row_clear_rowq_unserved(rd, rd->curr_queue);
row_log_rowq(rd, rd->curr_queue, " Dispatched request nr_disp = %d",
rd->row_queues[rd->curr_queue].rqueue.nr_dispatched);
rd->row_queues[rd->curr_queue].nr_dispatched);
}
/*
@ -426,7 +424,7 @@ static int row_choose_queue(struct row_data *rd)
* Loop over all queues to find the next queue that is not empty.
* Stop when you get back to curr_queue
*/
while (list_empty(&rd->row_queues[rd->curr_queue].rqueue.fifo)
while (list_empty(&rd->row_queues[rd->curr_queue].fifo)
&& rd->curr_queue != prev_curr_queue) {
/* Mark rqueue as unserved */
row_mark_rowq_unserved(rd, rd->curr_queue);
@ -458,10 +456,10 @@ static int row_dispatch_requests(struct request_queue *q, int force)
*/
for (i = 0; i < currq; i++) {
if (row_rowq_unserved(rd, i) &&
!list_empty(&rd->row_queues[i].rqueue.fifo)) {
!list_empty(&rd->row_queues[i].fifo)) {
row_log_rowq(rd, currq,
" Preemting for unserved rowq%d. (nr_req=%u)",
i, rd->row_queues[currq].rqueue.nr_req);
i, rd->row_queues[currq].nr_req);
rd->curr_queue = i;
row_dispatch_insert(rd);
ret = 1;
@ -469,9 +467,9 @@ static int row_dispatch_requests(struct request_queue *q, int force)
}
}
if (rd->row_queues[currq].rqueue.nr_dispatched >=
if (rd->row_queues[currq].nr_dispatched >=
rd->row_queues[currq].disp_quantum) {
rd->row_queues[currq].rqueue.nr_dispatched = 0;
rd->row_queues[currq].nr_dispatched = 0;
row_log_rowq(rd, currq, "Expiring rqueue");
ret = row_choose_queue(rd);
if (ret)
@ -480,7 +478,7 @@ static int row_dispatch_requests(struct request_queue *q, int force)
}
/* Dispatch from curr_queue */
if (list_empty(&rd->row_queues[currq].rqueue.fifo)) {
if (list_empty(&rd->row_queues[currq].fifo)) {
/* check idling */
if (delayed_work_pending(&rd->read_idle.idle_work)) {
if (force) {
@ -496,7 +494,7 @@ static int row_dispatch_requests(struct request_queue *q, int force)
}
if (!force && queue_idling_enabled[currq] &&
rd->row_queues[currq].rqueue.idle_data.begin_idling) {
rd->row_queues[currq].idle_data.begin_idling) {
if (!queue_delayed_work(rd->read_idle.idle_workqueue,
&rd->read_idle.idle_work,
rd->read_idle.idle_time)) {
@ -543,12 +541,12 @@ static int row_init_queue(struct request_queue *q)
return -ENOMEM;
for (i = 0; i < ROWQ_MAX_PRIO; i++) {
INIT_LIST_HEAD(&rdata->row_queues[i].rqueue.fifo);
INIT_LIST_HEAD(&rdata->row_queues[i].fifo);
rdata->row_queues[i].disp_quantum = queue_quantum[i];
rdata->row_queues[i].rqueue.rdata = rdata;
rdata->row_queues[i].rqueue.prio = i;
rdata->row_queues[i].rqueue.idle_data.begin_idling = false;
rdata->row_queues[i].rqueue.idle_data.last_insert_time =
rdata->row_queues[i].rdata = rdata;
rdata->row_queues[i].prio = i;
rdata->row_queues[i].idle_data.begin_idling = false;
rdata->row_queues[i].idle_data.last_insert_time =
ktime_set(0, 0);
}
@ -588,7 +586,7 @@ static void row_exit_queue(struct elevator_queue *e)
int i;
for (i = 0; i < ROWQ_MAX_PRIO; i++)
BUG_ON(!list_empty(&rd->row_queues[i].rqueue.fifo));
BUG_ON(!list_empty(&rd->row_queues[i].fifo));
(void)cancel_delayed_work_sync(&rd->read_idle.idle_work);
BUG_ON(delayed_work_pending(&rd->read_idle.idle_work));
destroy_workqueue(rd->read_idle.idle_workqueue);