block, bfq: add Early Queue Merge (EQM) to BFQ-v7r8 for 3.10.8+

A set of processes may happen  to  perform interleaved reads, i.e.,requests
whose union would give rise to a  sequential read  pattern.  There are two
typical  cases: in the first  case,   processes  read  fixed-size chunks of
data at a fixed distance from each other, while in the second case processes
may read variable-size chunks at  variable distances. The latter case occurs
for  example with  QEMU, which  splits the  I/O generated  by the  guest into
multiple chunks,  and lets these chunks  be served by a  pool of cooperating
processes,  iteratively  assigning  the  next  chunk of  I/O  to  the first
available  process. CFQ  uses actual  queue merging  for the  first type of
rocesses, whereas it  uses preemption to get a sequential  read pattern out
of the read requests  performed by the second type of  processes. In the end
it uses  two different  mechanisms to  achieve the  same goal: boosting the
throughput with interleaved I/O.

This patch introduces  Early Queue Merge (EQM), a unified mechanism to get a
sequential  read pattern  with both  types of  processes. The  main idea is
checking newly arrived requests against the next request of the active queue
both in case of actual request insert and in case of request merge. By doing
so, both the types of processes can be handled by just merging their queues.
EQM is  then simpler and  more compact than the  pair of mechanisms used in
CFQ.

Finally, EQM  also preserves the  typical low-latency properties of BFQ, by
properly restoring the weight-raising state of  a queue when it gets back to
a non-merged state.

Change-Id: I31d48c463273603c6c49ec675c7a524a6937da2a
Signed-off-by: Mauro Andreolini <mauro.andreolini@unimore.it>
Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
This commit is contained in:
Mauro Andreolini 2015-06-06 23:44:03 +02:00 committed by LuK1337
parent ac922cd71c
commit 78b11b701c
3 changed files with 626 additions and 298 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1085,34 +1085,6 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
return bfqq;
}
/*
* Forced extraction of the given queue.
*/
static void bfq_get_next_queue_forced(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
struct bfq_entity *entity;
struct bfq_sched_data *sd;
BUG_ON(bfqd->in_service_queue != NULL);
entity = &bfqq->entity;
/*
* Bubble up extraction/update from the leaf to the root.
*/
for_each_entity(entity) {
sd = entity->sched_data;
bfq_update_budget(entity);
bfq_update_vtime(bfq_entity_service_tree(entity));
bfq_active_extract(bfq_entity_service_tree(entity), entity);
sd->in_service_entity = entity;
sd->next_in_service = NULL;
entity->service = 0;
}
return;
}
static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
{
if (bfqd->in_service_bic != NULL) {

View File

@ -218,18 +218,21 @@ struct bfq_group;
* idle @bfq_queue with no outstanding requests, then
* the task associated with the queue it is deemed as
* soft real-time (see the comments to the function
* bfq_bfqq_softrt_next_start()).
* bfq_bfqq_softrt_next_start())
* @last_idle_bklogged: time of the last transition of the @bfq_queue from
* idle to backlogged
* @service_from_backlogged: cumulative service received from the @bfq_queue
* since the last transition from idle to
* backlogged
* @bic: pointer to the bfq_io_cq owning the bfq_queue, set to %NULL if the
* queue is shared
*
* A bfq_queue is a leaf request queue; it can be associated with an io_context
* or more, if it is async or shared between cooperating processes. @cgroup
* holds a reference to the cgroup, to be sure that it does not disappear while
* a bfqq still references it (mostly to avoid races between request issuing and
* task migration followed by cgroup destruction).
* A bfq_queue is a leaf request queue; it can be associated with an
* io_context or more, if it is async or shared between cooperating
* processes. @cgroup holds a reference to the cgroup, to be sure that it
* does not disappear while a bfqq still references it (mostly to avoid
* races between request issuing and task migration followed by cgroup
* destruction).
* All the fields are protected by the queue lock of the containing bfqd.
*/
struct bfq_queue {
@ -269,6 +272,7 @@ struct bfq_queue {
unsigned int requests_within_timer;
pid_t pid;
struct bfq_io_cq *bic;
/* weight-raising fields */
unsigned long wr_cur_max_time;
@ -298,12 +302,42 @@ struct bfq_ttime {
* @icq: associated io_cq structure
* @bfqq: array of two process queues, the sync and the async
* @ttime: associated @bfq_ttime struct
* @wr_time_left: snapshot of the time left before weight raising ends
* for the sync queue associated to this process; this
* snapshot is taken to remember this value while the weight
* raising is suspended because the queue is merged with a
* shared queue, and is used to set @raising_cur_max_time
* when the queue is split from the shared queue and its
* weight is raised again
* @saved_idle_window: same purpose as the previous field for the idle
* window
* @saved_IO_bound: same purpose as the previous two fields for the I/O
* bound classification of a queue
* @saved_in_large_burst: same purpose as the previous fields for the
* value of the field keeping the queue's belonging
* to a large burst
* @was_in_burst_list: true if the queue belonged to a burst list
* before its merge with another cooperating queue
* @cooperations: counter of consecutive successful queue merges underwent
* by any of the process' @bfq_queues
* @failed_cooperations: counter of consecutive failed queue merges of any
* of the process' @bfq_queues
*/
struct bfq_io_cq {
struct io_cq icq; /* must be the first member */
struct bfq_queue *bfqq[2];
struct bfq_ttime ttime;
int ioprio;
unsigned int wr_time_left;
bool saved_idle_window;
bool saved_IO_bound;
bool saved_in_large_burst;
bool was_in_burst_list;
unsigned int cooperations;
unsigned int failed_cooperations;
};
enum bfq_device_speed {
@ -536,7 +570,7 @@ enum bfqq_state_flags {
BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */
BFQ_BFQQ_FLAG_sync, /* synchronous queue */
BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */
BFQ_BFQQ_FLAG_IO_bound, /*
BFQ_BFQQ_FLAG_IO_bound, /*
* bfqq has timed-out at least once
* having consumed at most 2/10 of
* its budget
@ -549,12 +583,13 @@ enum bfqq_state_flags {
* bfqq has proved to be slow and
* seeky until budget timeout
*/
BFQ_BFQQ_FLAG_softrt_update, /*
BFQ_BFQQ_FLAG_softrt_update, /*
* may need softrt-next-start
* update
*/
BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be splitted */
BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be split */
BFQ_BFQQ_FLAG_just_split, /* queue has just been split */
};
#define BFQ_BFQQ_FNS(name) \
@ -583,6 +618,7 @@ BFQ_BFQQ_FNS(in_large_burst);
BFQ_BFQQ_FNS(constantly_seeky);
BFQ_BFQQ_FNS(coop);
BFQ_BFQQ_FNS(split_coop);
BFQ_BFQQ_FNS(just_split);
BFQ_BFQQ_FNS(softrt_update);
#undef BFQ_BFQQ_FNS