mirror of
https://github.com/S3NEO/android_kernel_samsung_msm8226.git
synced 2024-11-07 03:47:13 +00:00
block: urgent request: remove unnecessary urgent marking
An urgent request is marked by the scheduler in rq->cmd_flags with the REQ_URGENT flag. There is no need to add an additional marking by the block layer. Change-Id: I05d5e9539d2f6c1bfa80240b0671db197a5d3b3f Signed-off-by: Tatyana Brokhman <tlinder@codeaurora.org>
This commit is contained in:
parent
fe6fd2f844
commit
2c41518357
4 changed files with 5 additions and 20 deletions
|
@ -2131,13 +2131,9 @@ struct request *blk_fetch_request(struct request_queue *q)
|
|||
|
||||
rq = blk_peek_request(q);
|
||||
if (rq) {
|
||||
/*
|
||||
* Assumption: the next request fetched from scheduler after we
|
||||
* notified "urgent request pending" - will be the urgent one
|
||||
*/
|
||||
if (q->notified_urgent && !q->dispatched_urgent) {
|
||||
if (rq->cmd_flags & REQ_URGENT) {
|
||||
WARN_ON(q->dispatched_urgent);
|
||||
q->dispatched_urgent = true;
|
||||
(void)blk_mark_rq_urgent(rq);
|
||||
}
|
||||
blk_start_request(rq);
|
||||
}
|
||||
|
|
11
block/blk.h
11
block/blk.h
|
@ -39,7 +39,6 @@ void __generic_unplug_device(struct request_queue *);
|
|||
*/
|
||||
enum rq_atomic_flags {
|
||||
REQ_ATOM_COMPLETE = 0,
|
||||
REQ_ATOM_URGENT = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -56,16 +55,6 @@ static inline void blk_clear_rq_complete(struct request *rq)
|
|||
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
|
||||
}
|
||||
|
||||
static inline int blk_mark_rq_urgent(struct request *rq)
|
||||
{
|
||||
return test_and_set_bit(REQ_ATOM_URGENT, &rq->atomic_flags);
|
||||
}
|
||||
|
||||
static inline void blk_clear_rq_urgent(struct request *rq)
|
||||
{
|
||||
clear_bit(REQ_ATOM_URGENT, &rq->atomic_flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal elevator interface
|
||||
*/
|
||||
|
|
|
@ -814,10 +814,10 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
|
|||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (test_bit(REQ_ATOM_URGENT, &rq->atomic_flags)) {
|
||||
if (rq->cmd_flags & REQ_URGENT) {
|
||||
q->notified_urgent = false;
|
||||
WARN_ON(!q->dispatched_urgent);
|
||||
q->dispatched_urgent = false;
|
||||
blk_clear_rq_urgent(rq);
|
||||
}
|
||||
/*
|
||||
* request is released from the driver, io must be done
|
||||
|
|
|
@ -1191,7 +1191,7 @@ static bool test_urgent_pending(struct request_queue *q)
|
|||
void test_iosched_add_urgent_req(struct test_request *test_rq)
|
||||
{
|
||||
spin_lock_irq(&ptd->lock);
|
||||
blk_mark_rq_urgent(test_rq->rq);
|
||||
test_rq->rq->cmd_flags |= REQ_URGENT;
|
||||
list_add_tail(&test_rq->queuelist, &ptd->urgent_queue);
|
||||
ptd->urgent_count++;
|
||||
spin_unlock_irq(&ptd->lock);
|
||||
|
|
Loading…
Reference in a new issue