2008-01-29 13:53:40 +00:00
|
|
|
/*
|
|
|
|
* Functions related to setting various queue properties from drivers
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/bio.h>
|
|
|
|
#include <linux/blkdev.h>
|
2013-02-07 15:46:59 +00:00
|
|
|
#include <linux/sched/sysctl.h>
|
2008-01-29 13:53:40 +00:00
|
|
|
|
|
|
|
#include "blk.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* for max sense size
|
|
|
|
*/
|
|
|
|
#include <scsi/scsi_cmnd.h>
|
|
|
|
|
|
|
|
/**
|
|
|
|
* blk_end_sync_rq - executes a completion event on a request
|
|
|
|
* @rq: request to complete
|
2008-08-19 18:13:11 +00:00
|
|
|
* @error: end I/O status of the request
|
2008-01-29 13:53:40 +00:00
|
|
|
*/
|
2008-07-15 19:21:45 +00:00
|
|
|
static void blk_end_sync_rq(struct request *rq, int error)
|
2008-01-29 13:53:40 +00:00
|
|
|
{
|
|
|
|
struct completion *waiting = rq->end_io_data;
|
|
|
|
|
|
|
|
rq->end_io_data = NULL;
|
|
|
|
__blk_put_request(rq->q, rq);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* complete last, if this is a stack request the process (and thus
|
|
|
|
* the rq pointer) could be invalid right after this complete()
|
|
|
|
*/
|
|
|
|
complete(waiting);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* blk_execute_rq_nowait - insert a request into queue for execution
|
|
|
|
* @q: queue to insert the request in
|
|
|
|
* @bd_disk: matching gendisk
|
|
|
|
* @rq: request to insert
|
|
|
|
* @at_head: insert request at head or tail of queue
|
|
|
|
* @done: I/O completion handler
|
|
|
|
*
|
|
|
|
* Description:
|
2008-08-19 18:13:11 +00:00
|
|
|
* Insert a fully prepared request at the back of the I/O scheduler queue
|
2008-01-29 13:53:40 +00:00
|
|
|
* for execution. Don't wait for completion.
|
2012-06-29 15:31:49 +00:00
|
|
|
*
|
|
|
|
* Note:
|
|
|
|
* This function will invoke @done directly if the queue is dead.
|
2008-01-29 13:53:40 +00:00
|
|
|
*/
|
|
|
|
void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|
|
|
struct request *rq, int at_head,
|
|
|
|
rq_end_io_fn *done)
|
|
|
|
{
|
|
|
|
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
2012-11-22 10:00:11 +00:00
|
|
|
bool is_pm_resume;
|
2008-01-29 13:53:40 +00:00
|
|
|
|
2011-12-13 23:33:37 +00:00
|
|
|
WARN_ON(irqs_disabled());
|
2012-06-29 15:31:49 +00:00
|
|
|
|
|
|
|
rq->rq_disk = bd_disk;
|
|
|
|
rq->end_io = done;
|
2012-11-22 10:00:11 +00:00
|
|
|
/*
|
|
|
|
* need to check this before __blk_run_queue(), because rq can
|
|
|
|
* be freed before that returns.
|
|
|
|
*/
|
|
|
|
is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
|
2012-06-29 15:31:49 +00:00
|
|
|
|
2011-12-13 23:33:37 +00:00
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
|
|
2012-11-28 12:42:38 +00:00
|
|
|
if (unlikely(blk_queue_dying(q))) {
|
2011-07-07 20:45:40 +00:00
|
|
|
rq->errors = -ENXIO;
|
|
|
|
if (rq->end_io)
|
|
|
|
rq->end_io(rq, rq->errors);
|
2012-06-29 15:31:49 +00:00
|
|
|
spin_unlock_irq(q->queue_lock);
|
2011-07-07 20:45:40 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-03-10 07:52:07 +00:00
|
|
|
__elv_add_request(q, rq, where);
|
2011-04-18 09:41:33 +00:00
|
|
|
__blk_run_queue(q);
|
2011-05-05 21:10:05 +00:00
|
|
|
/* the queue is stopped so it won't be run */
|
2012-11-22 10:00:11 +00:00
|
|
|
if (is_pm_resume)
|
2012-12-06 13:32:01 +00:00
|
|
|
__blk_run_queue_uncond(q);
|
2008-01-29 13:53:40 +00:00
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* blk_execute_rq - insert a request into queue for execution
|
|
|
|
* @q: queue to insert the request in
|
|
|
|
* @bd_disk: matching gendisk
|
|
|
|
* @rq: request to insert
|
|
|
|
* @at_head: insert request at head or tail of queue
|
|
|
|
*
|
|
|
|
* Description:
|
2008-08-19 18:13:11 +00:00
|
|
|
* Insert a fully prepared request at the back of the I/O scheduler queue
|
2008-01-29 13:53:40 +00:00
|
|
|
* for execution and wait for completion.
|
|
|
|
*/
|
|
|
|
int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
|
|
|
|
struct request *rq, int at_head)
|
|
|
|
{
|
|
|
|
DECLARE_COMPLETION_ONSTACK(wait);
|
|
|
|
char sense[SCSI_SENSE_BUFFERSIZE];
|
|
|
|
int err = 0;
|
2010-09-24 13:51:13 +00:00
|
|
|
unsigned long hang_check;
|
2008-01-29 13:53:40 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* we need an extra reference to the request, so we can look at
|
|
|
|
* it after io completion
|
|
|
|
*/
|
|
|
|
rq->ref_count++;
|
|
|
|
|
|
|
|
if (!rq->sense) {
|
|
|
|
memset(sense, 0, sizeof(sense));
|
|
|
|
rq->sense = sense;
|
|
|
|
rq->sense_len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
rq->end_io_data = &wait;
|
|
|
|
blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
|
2010-09-24 13:51:13 +00:00
|
|
|
|
|
|
|
/* Prevent hang_check timer from firing at us during very long I/O */
|
|
|
|
hang_check = sysctl_hung_task_timeout_secs;
|
|
|
|
if (hang_check)
|
2013-02-14 14:19:59 +00:00
|
|
|
while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
|
2010-09-24 13:51:13 +00:00
|
|
|
else
|
2013-02-14 14:19:59 +00:00
|
|
|
wait_for_completion_io(&wait);
|
2008-01-29 13:53:40 +00:00
|
|
|
|
|
|
|
if (rq->errors)
|
|
|
|
err = -EIO;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(blk_execute_rq);
|