diff --git a/block/blk-core.c b/block/blk-core.c index fe1c7e009ca..04604cfc745 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2131,13 +2131,9 @@ struct request *blk_fetch_request(struct request_queue *q) rq = blk_peek_request(q); if (rq) { - /* - * Assumption: the next request fetched from scheduler after we - * notified "urgent request pending" - will be the urgent one - */ - if (q->notified_urgent && !q->dispatched_urgent) { + if (rq->cmd_flags & REQ_URGENT) { + WARN_ON(q->dispatched_urgent); q->dispatched_urgent = true; - (void)blk_mark_rq_urgent(rq); } blk_start_request(rq); } diff --git a/block/blk.h b/block/blk.h index a52209fdbc1..d45be871329 100644 --- a/block/blk.h +++ b/block/blk.h @@ -39,7 +39,6 @@ void __generic_unplug_device(struct request_queue *); */ enum rq_atomic_flags { REQ_ATOM_COMPLETE = 0, - REQ_ATOM_URGENT = 1, }; /* @@ -56,16 +55,6 @@ static inline void blk_clear_rq_complete(struct request *rq) clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); } -static inline int blk_mark_rq_urgent(struct request *rq) -{ - return test_and_set_bit(REQ_ATOM_URGENT, &rq->atomic_flags); -} - -static inline void blk_clear_rq_urgent(struct request *rq) -{ - clear_bit(REQ_ATOM_URGENT, &rq->atomic_flags); -} - /* * Internal elevator interface */ diff --git a/block/elevator.c b/block/elevator.c index efec4576b67..27adf7c0372 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -814,10 +814,10 @@ void elv_completed_request(struct request_queue *q, struct request *rq) { struct elevator_queue *e = q->elevator; - if (test_bit(REQ_ATOM_URGENT, &rq->atomic_flags)) { + if (rq->cmd_flags & REQ_URGENT) { q->notified_urgent = false; + WARN_ON(!q->dispatched_urgent); q->dispatched_urgent = false; - blk_clear_rq_urgent(rq); } /* * request is released from the driver, io must be done diff --git a/block/test-iosched.c b/block/test-iosched.c index c4cfb1795be..2ba216040fb 100644 --- a/block/test-iosched.c +++ b/block/test-iosched.c @@ -1191,7 +1191,7 @@ static bool test_urgent_pending(struct request_queue *q) void test_iosched_add_urgent_req(struct test_request *test_rq) { spin_lock_irq(&ptd->lock); - blk_mark_rq_urgent(test_rq->rq); + test_rq->rq->cmd_flags |= REQ_URGENT; list_add_tail(&test_rq->queuelist, &ptd->urgent_queue); ptd->urgent_count++; spin_unlock_irq(&ptd->lock);