block: row: change hrtimer_cancel to hrtimer_try_to_cancel

Calling hrtimer_cancel with interrupts disabled can result in a livelock.
When flushing plug list in the block layer interrupts are disabled and an
hrtimer is used when adding requests from that plug list to the scheduler.
In this code flow, if the hrtimer (which is used for idling) is set, it's
being canceled by calling hrtimer_cancel. hrtimer_cancel will perform
the following in an endless loop:
1. try cancel the timer
2. if fails - rest_cpu
the cancellation can fail if the timer function already started. Since
interrupts are disabled it can never complete.
This patch reduced the number of times the hrtimer lock is taken while
interrupts are disabled by calling hrtimer_try_co_cancel. the later will
try to cancel the timer just once and return with an error code if fails.

CRs-fixed: 499887
Change-Id: I25f79c357426d72ad67c261ce7cb503ae97dc7b9
Signed-off-by: Tatyana Brokhman <tlinder@codeaurora.org>
This commit is contained in:
Tatyana Brokhman 2013-07-02 14:43:13 +03:00 committed by Stephen Boyd
parent 2c9ebd38ea
commit ca231e14f3
1 changed files with 21 additions and 17 deletions

View File

@ -350,11 +350,14 @@ static void row_add_request(struct request_queue *q,
if (row_queues_def[rqueue->prio].idling_enabled) {
if (rd->rd_idle_data.idling_queue_idx == rqueue->prio &&
hrtimer_active(&rd->rd_idle_data.hr_timer)) {
(void)hrtimer_cancel(&rd->rd_idle_data.hr_timer);
row_log_rowq(rd, rqueue->prio,
"Canceled delayed work on %d",
rd->rd_idle_data.idling_queue_idx);
rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
if (hrtimer_try_to_cancel(
&rd->rd_idle_data.hr_timer) >= 0) {
row_log_rowq(rd, rqueue->prio,
"Canceled delayed work on %d",
rd->rd_idle_data.idling_queue_idx);
rd->rd_idle_data.idling_queue_idx =
ROWQ_MAX_PRIO;
}
}
diff_ms = ktime_to_ms(ktime_sub(ktime_get(),
rqueue->idle_data.last_insert_time));
@ -583,14 +586,14 @@ static int row_get_ioprio_class_to_serve(struct row_data *rd, int force)
for (i = 0; i < ROWQ_REG_PRIO_IDX; i++) {
if (!list_empty(&rd->row_queues[i].fifo)) {
if (hrtimer_active(&rd->rd_idle_data.hr_timer)) {
(void)hrtimer_cancel(
&rd->rd_idle_data.hr_timer);
row_log_rowq(rd,
rd->rd_idle_data.idling_queue_idx,
if (hrtimer_try_to_cancel(
&rd->rd_idle_data.hr_timer) >= 0) {
row_log(rd->dispatch_queue,
"Canceling delayed work on %d. RT pending",
rd->rd_idle_data.idling_queue_idx);
rd->rd_idle_data.idling_queue_idx =
ROWQ_MAX_PRIO;
rd->rd_idle_data.idling_queue_idx);
rd->rd_idle_data.idling_queue_idx =
ROWQ_MAX_PRIO;
}
}
if (row_regular_req_pending(rd) &&
@ -726,11 +729,12 @@ static int row_dispatch_requests(struct request_queue *q, int force)
int ret = 0, currq, ioprio_class_to_serve, start_idx, end_idx;
if (force && hrtimer_active(&rd->rd_idle_data.hr_timer)) {
(void)hrtimer_cancel(&rd->rd_idle_data.hr_timer);
row_log_rowq(rd, rd->rd_idle_data.idling_queue_idx,
"Canceled delayed work on %d - forced dispatch",
rd->rd_idle_data.idling_queue_idx);
rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
if (hrtimer_try_to_cancel(&rd->rd_idle_data.hr_timer) >= 0) {
row_log(rd->dispatch_queue,
"Canceled delayed work on %d - forced dispatch",
rd->rd_idle_data.idling_queue_idx);
rd->rd_idle_data.idling_queue_idx = ROWQ_MAX_PRIO;
}
}
if (rd->pending_urgent_rq) {