mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
bio: first step in sanitizing the bio->bi_rw flag testing
Get rid of any functions that test for these bits and make callers use bio_rw_flagged() directly. Then it is at least directly apparent what variable and flag they check. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
e7e503aedb
commit
1f98a13f62
17 changed files with 54 additions and 60 deletions
|
@ -1114,24 +1114,24 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
||||||
* Inherit FAILFAST from bio (for read-ahead, and explicit
|
* Inherit FAILFAST from bio (for read-ahead, and explicit
|
||||||
* FAILFAST). FAILFAST flags are identical for req and bio.
|
* FAILFAST). FAILFAST flags are identical for req and bio.
|
||||||
*/
|
*/
|
||||||
if (bio_rw_ahead(bio))
|
if (bio_rw_flagged(bio, BIO_RW_AHEAD))
|
||||||
req->cmd_flags |= REQ_FAILFAST_MASK;
|
req->cmd_flags |= REQ_FAILFAST_MASK;
|
||||||
else
|
else
|
||||||
req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
|
req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
|
||||||
|
|
||||||
if (unlikely(bio_discard(bio))) {
|
if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
|
||||||
req->cmd_flags |= REQ_DISCARD;
|
req->cmd_flags |= REQ_DISCARD;
|
||||||
if (bio_barrier(bio))
|
if (bio_rw_flagged(bio, BIO_RW_BARRIER))
|
||||||
req->cmd_flags |= REQ_SOFTBARRIER;
|
req->cmd_flags |= REQ_SOFTBARRIER;
|
||||||
req->q->prepare_discard_fn(req->q, req);
|
req->q->prepare_discard_fn(req->q, req);
|
||||||
} else if (unlikely(bio_barrier(bio)))
|
} else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
|
||||||
req->cmd_flags |= REQ_HARDBARRIER;
|
req->cmd_flags |= REQ_HARDBARRIER;
|
||||||
|
|
||||||
if (bio_sync(bio))
|
if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
|
||||||
req->cmd_flags |= REQ_RW_SYNC;
|
req->cmd_flags |= REQ_RW_SYNC;
|
||||||
if (bio_rw_meta(bio))
|
if (bio_rw_flagged(bio, BIO_RW_META))
|
||||||
req->cmd_flags |= REQ_RW_META;
|
req->cmd_flags |= REQ_RW_META;
|
||||||
if (bio_noidle(bio))
|
if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
|
||||||
req->cmd_flags |= REQ_NOIDLE;
|
req->cmd_flags |= REQ_NOIDLE;
|
||||||
|
|
||||||
req->errors = 0;
|
req->errors = 0;
|
||||||
|
@ -1155,12 +1155,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
|
||||||
int el_ret;
|
int el_ret;
|
||||||
unsigned int bytes = bio->bi_size;
|
unsigned int bytes = bio->bi_size;
|
||||||
const unsigned short prio = bio_prio(bio);
|
const unsigned short prio = bio_prio(bio);
|
||||||
const int sync = bio_sync(bio);
|
const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
|
||||||
const int unplug = bio_unplug(bio);
|
const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
|
||||||
const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
|
const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
|
||||||
int rw_flags;
|
int rw_flags;
|
||||||
|
|
||||||
if (bio_barrier(bio) && bio_has_data(bio) &&
|
if (bio_rw_flagged(bio, BIO_RW_BARRIER) && bio_has_data(bio) &&
|
||||||
(q->next_ordered == QUEUE_ORDERED_NONE)) {
|
(q->next_ordered == QUEUE_ORDERED_NONE)) {
|
||||||
bio_endio(bio, -EOPNOTSUPP);
|
bio_endio(bio, -EOPNOTSUPP);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1174,7 +1174,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
|
|
||||||
if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
|
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
|
||||||
goto get_rq;
|
goto get_rq;
|
||||||
|
|
||||||
el_ret = elv_merge(q, &req, bio);
|
el_ret = elv_merge(q, &req, bio);
|
||||||
|
@ -1470,7 +1470,8 @@ static inline void __generic_make_request(struct bio *bio)
|
||||||
if (bio_check_eod(bio, nr_sectors))
|
if (bio_check_eod(bio, nr_sectors))
|
||||||
goto end_io;
|
goto end_io;
|
||||||
|
|
||||||
if (bio_discard(bio) && !q->prepare_discard_fn) {
|
if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
|
||||||
|
!q->prepare_discard_fn) {
|
||||||
err = -EOPNOTSUPP;
|
err = -EOPNOTSUPP;
|
||||||
goto end_io;
|
goto end_io;
|
||||||
}
|
}
|
||||||
|
|
|
@ -257,7 +257,7 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic,
|
||||||
*/
|
*/
|
||||||
static inline int cfq_bio_sync(struct bio *bio)
|
static inline int cfq_bio_sync(struct bio *bio)
|
||||||
{
|
{
|
||||||
if (bio_data_dir(bio) == READ || bio_sync(bio))
|
if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -79,7 +79,8 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||||
/*
|
/*
|
||||||
* Don't merge file system requests and discard requests
|
* Don't merge file system requests and discard requests
|
||||||
*/
|
*/
|
||||||
if (bio_discard(bio) != bio_discard(rq->bio))
|
if (bio_rw_flagged(bio, BIO_RW_DISCARD) !=
|
||||||
|
bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -475,7 +475,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
|
||||||
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
|
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
|
||||||
|
|
||||||
if (bio_rw(bio) == WRITE) {
|
if (bio_rw(bio) == WRITE) {
|
||||||
int barrier = bio_barrier(bio);
|
bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
|
||||||
struct file *file = lo->lo_backing_file;
|
struct file *file = lo->lo_backing_file;
|
||||||
|
|
||||||
if (barrier) {
|
if (barrier) {
|
||||||
|
|
|
@ -1129,7 +1129,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
|
||||||
if (error == -EOPNOTSUPP)
|
if (error == -EOPNOTSUPP)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
|
if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (unlikely(error)) {
|
if (unlikely(error)) {
|
||||||
|
|
|
@ -285,7 +285,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
|
||||||
if (!error)
|
if (!error)
|
||||||
return 0; /* I/O complete */
|
return 0; /* I/O complete */
|
||||||
|
|
||||||
if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
|
if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
if (error == -EOPNOTSUPP)
|
if (error == -EOPNOTSUPP)
|
||||||
|
|
|
@ -586,7 +586,7 @@ static void dec_pending(struct dm_io *io, int error)
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&md->deferred_lock, flags);
|
spin_lock_irqsave(&md->deferred_lock, flags);
|
||||||
if (__noflush_suspending(md)) {
|
if (__noflush_suspending(md)) {
|
||||||
if (!bio_barrier(io->bio))
|
if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
|
||||||
bio_list_add_head(&md->deferred,
|
bio_list_add_head(&md->deferred,
|
||||||
io->bio);
|
io->bio);
|
||||||
} else
|
} else
|
||||||
|
@ -598,7 +598,7 @@ static void dec_pending(struct dm_io *io, int error)
|
||||||
io_error = io->error;
|
io_error = io->error;
|
||||||
bio = io->bio;
|
bio = io->bio;
|
||||||
|
|
||||||
if (bio_barrier(bio)) {
|
if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
|
||||||
/*
|
/*
|
||||||
* There can be just one barrier request so we use
|
* There can be just one barrier request so we use
|
||||||
* a per-device variable for error reporting.
|
* a per-device variable for error reporting.
|
||||||
|
@ -1209,7 +1209,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
|
||||||
|
|
||||||
ci.map = dm_get_table(md);
|
ci.map = dm_get_table(md);
|
||||||
if (unlikely(!ci.map)) {
|
if (unlikely(!ci.map)) {
|
||||||
if (!bio_barrier(bio))
|
if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
|
||||||
bio_io_error(bio);
|
bio_io_error(bio);
|
||||||
else
|
else
|
||||||
if (!md->barrier_error)
|
if (!md->barrier_error)
|
||||||
|
@ -1321,7 +1321,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
|
||||||
* we have to queue this io for later.
|
* we have to queue this io for later.
|
||||||
*/
|
*/
|
||||||
if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
|
if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
|
||||||
unlikely(bio_barrier(bio))) {
|
unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||||
up_read(&md->io_lock);
|
up_read(&md->io_lock);
|
||||||
|
|
||||||
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
|
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
|
||||||
|
@ -1344,7 +1344,7 @@ static int dm_make_request(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
struct mapped_device *md = q->queuedata;
|
struct mapped_device *md = q->queuedata;
|
||||||
|
|
||||||
if (unlikely(bio_barrier(bio))) {
|
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||||
bio_endio(bio, -EOPNOTSUPP);
|
bio_endio(bio, -EOPNOTSUPP);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2164,7 +2164,7 @@ static void dm_wq_work(struct work_struct *work)
|
||||||
if (dm_request_based(md))
|
if (dm_request_based(md))
|
||||||
generic_make_request(c);
|
generic_make_request(c);
|
||||||
else {
|
else {
|
||||||
if (bio_barrier(c))
|
if (bio_rw_flagged(c, BIO_RW_BARRIER))
|
||||||
process_barrier(md, c);
|
process_barrier(md, c);
|
||||||
else
|
else
|
||||||
__split_and_process_bio(md, c);
|
__split_and_process_bio(md, c);
|
||||||
|
|
|
@ -288,7 +288,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
|
||||||
sector_t start_sector;
|
sector_t start_sector;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
if (unlikely(bio_barrier(bio))) {
|
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||||
bio_endio(bio, -EOPNOTSUPP);
|
bio_endio(bio, -EOPNOTSUPP);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,7 +90,7 @@ static void multipath_end_request(struct bio *bio, int error)
|
||||||
|
|
||||||
if (uptodate)
|
if (uptodate)
|
||||||
multipath_end_bh_io(mp_bh, 0);
|
multipath_end_bh_io(mp_bh, 0);
|
||||||
else if (!bio_rw_ahead(bio)) {
|
else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
|
||||||
/*
|
/*
|
||||||
* oops, IO error:
|
* oops, IO error:
|
||||||
*/
|
*/
|
||||||
|
@ -144,7 +144,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
|
||||||
const int rw = bio_data_dir(bio);
|
const int rw = bio_data_dir(bio);
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
if (unlikely(bio_barrier(bio))) {
|
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||||
bio_endio(bio, -EOPNOTSUPP);
|
bio_endio(bio, -EOPNOTSUPP);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -448,7 +448,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
|
||||||
const int rw = bio_data_dir(bio);
|
const int rw = bio_data_dir(bio);
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
if (unlikely(bio_barrier(bio))) {
|
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||||
bio_endio(bio, -EOPNOTSUPP);
|
bio_endio(bio, -EOPNOTSUPP);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -782,8 +782,9 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||||
struct bio_list bl;
|
struct bio_list bl;
|
||||||
struct page **behind_pages = NULL;
|
struct page **behind_pages = NULL;
|
||||||
const int rw = bio_data_dir(bio);
|
const int rw = bio_data_dir(bio);
|
||||||
const int do_sync = bio_sync(bio);
|
const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
|
||||||
int cpu, do_barriers;
|
int cpu;
|
||||||
|
bool do_barriers;
|
||||||
mdk_rdev_t *blocked_rdev;
|
mdk_rdev_t *blocked_rdev;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -797,7 +798,8 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||||
|
|
||||||
md_write_start(mddev, bio); /* wait on superblock update early */
|
md_write_start(mddev, bio); /* wait on superblock update early */
|
||||||
|
|
||||||
if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
|
if (unlikely(!mddev->barriers_work &&
|
||||||
|
bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||||
if (rw == WRITE)
|
if (rw == WRITE)
|
||||||
md_write_end(mddev);
|
md_write_end(mddev);
|
||||||
bio_endio(bio, -EOPNOTSUPP);
|
bio_endio(bio, -EOPNOTSUPP);
|
||||||
|
@ -925,7 +927,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||||
atomic_set(&r1_bio->remaining, 0);
|
atomic_set(&r1_bio->remaining, 0);
|
||||||
atomic_set(&r1_bio->behind_remaining, 0);
|
atomic_set(&r1_bio->behind_remaining, 0);
|
||||||
|
|
||||||
do_barriers = bio_barrier(bio);
|
do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
|
||||||
if (do_barriers)
|
if (do_barriers)
|
||||||
set_bit(R1BIO_Barrier, &r1_bio->state);
|
set_bit(R1BIO_Barrier, &r1_bio->state);
|
||||||
|
|
||||||
|
@ -1600,7 +1602,7 @@ static void raid1d(mddev_t *mddev)
|
||||||
* We already have a nr_pending reference on these rdevs.
|
* We already have a nr_pending reference on these rdevs.
|
||||||
*/
|
*/
|
||||||
int i;
|
int i;
|
||||||
const int do_sync = bio_sync(r1_bio->master_bio);
|
const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
|
||||||
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
|
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
|
||||||
clear_bit(R1BIO_Barrier, &r1_bio->state);
|
clear_bit(R1BIO_Barrier, &r1_bio->state);
|
||||||
for (i=0; i < conf->raid_disks; i++)
|
for (i=0; i < conf->raid_disks; i++)
|
||||||
|
@ -1654,7 +1656,7 @@ static void raid1d(mddev_t *mddev)
|
||||||
(unsigned long long)r1_bio->sector);
|
(unsigned long long)r1_bio->sector);
|
||||||
raid_end_bio_io(r1_bio);
|
raid_end_bio_io(r1_bio);
|
||||||
} else {
|
} else {
|
||||||
const int do_sync = bio_sync(r1_bio->master_bio);
|
const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
|
||||||
r1_bio->bios[r1_bio->read_disk] =
|
r1_bio->bios[r1_bio->read_disk] =
|
||||||
mddev->ro ? IO_BLOCKED : NULL;
|
mddev->ro ? IO_BLOCKED : NULL;
|
||||||
r1_bio->read_disk = disk;
|
r1_bio->read_disk = disk;
|
||||||
|
|
|
@ -796,12 +796,12 @@ static int make_request(struct request_queue *q, struct bio * bio)
|
||||||
int i;
|
int i;
|
||||||
int chunk_sects = conf->chunk_mask + 1;
|
int chunk_sects = conf->chunk_mask + 1;
|
||||||
const int rw = bio_data_dir(bio);
|
const int rw = bio_data_dir(bio);
|
||||||
const int do_sync = bio_sync(bio);
|
const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
|
||||||
struct bio_list bl;
|
struct bio_list bl;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
mdk_rdev_t *blocked_rdev;
|
mdk_rdev_t *blocked_rdev;
|
||||||
|
|
||||||
if (unlikely(bio_barrier(bio))) {
|
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
||||||
bio_endio(bio, -EOPNOTSUPP);
|
bio_endio(bio, -EOPNOTSUPP);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1610,7 +1610,7 @@ static void raid10d(mddev_t *mddev)
|
||||||
raid_end_bio_io(r10_bio);
|
raid_end_bio_io(r10_bio);
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
} else {
|
} else {
|
||||||
const int do_sync = bio_sync(r10_bio->master_bio);
|
const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
rdev = conf->mirrors[mirror].rdev;
|
rdev = conf->mirrors[mirror].rdev;
|
||||||
if (printk_ratelimit())
|
if (printk_ratelimit())
|
||||||
|
|
|
@ -3606,7 +3606,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
|
||||||
const int rw = bio_data_dir(bi);
|
const int rw = bio_data_dir(bi);
|
||||||
int cpu, remaining;
|
int cpu, remaining;
|
||||||
|
|
||||||
if (unlikely(bio_barrier(bi))) {
|
if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
|
||||||
bio_endio(bi, -EOPNOTSUPP);
|
bio_endio(bi, -EOPNOTSUPP);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,8 +112,9 @@ static int dst_request(struct request_queue *q, struct bio *bio)
|
||||||
* I worked with.
|
* I worked with.
|
||||||
*
|
*
|
||||||
* Empty barriers are not allowed anyway, see 51fd77bd9f512
|
* Empty barriers are not allowed anyway, see 51fd77bd9f512
|
||||||
* for example, although later it was changed to bio_discard()
|
* for example, although later it was changed to
|
||||||
* only, which does not work in this case.
|
* bio_rw_flagged(bio, BIO_RW_DISCARD) only, which does not
|
||||||
|
* work in this case.
|
||||||
*/
|
*/
|
||||||
//err = -EOPNOTSUPP;
|
//err = -EOPNOTSUPP;
|
||||||
err = 0;
|
err = 0;
|
||||||
|
|
|
@ -260,7 +260,7 @@ loop_lock:
|
||||||
num_run++;
|
num_run++;
|
||||||
batch_run++;
|
batch_run++;
|
||||||
|
|
||||||
if (bio_sync(cur))
|
if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
|
||||||
num_sync_run++;
|
num_sync_run++;
|
||||||
|
|
||||||
if (need_resched()) {
|
if (need_resched()) {
|
||||||
|
@ -2903,7 +2903,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
|
||||||
bio->bi_rw |= rw;
|
bio->bi_rw |= rw;
|
||||||
|
|
||||||
spin_lock(&device->io_lock);
|
spin_lock(&device->io_lock);
|
||||||
if (bio_sync(bio))
|
if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
|
||||||
pending_bios = &device->pending_sync_bios;
|
pending_bios = &device->pending_sync_bios;
|
||||||
else
|
else
|
||||||
pending_bios = &device->pending_bios;
|
pending_bios = &device->pending_bios;
|
||||||
|
|
|
@ -177,28 +177,17 @@ enum bio_rw_flags {
|
||||||
BIO_RW_NOIDLE,
|
BIO_RW_NOIDLE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* First four bits must match between bio->bi_rw and rq->cmd_flags, make
|
||||||
|
* that explicit here.
|
||||||
|
*/
|
||||||
|
#define BIO_RW_RQ_MASK 0xf
|
||||||
|
|
||||||
static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
|
static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
|
||||||
{
|
{
|
||||||
return (bio->bi_rw & (1 << flag)) != 0;
|
return (bio->bi_rw & (1 << flag)) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Old defines, these should eventually be replaced by direct usage of
|
|
||||||
* bio_rw_flagged()
|
|
||||||
*/
|
|
||||||
#define bio_barrier(bio) bio_rw_flagged(bio, BIO_RW_BARRIER)
|
|
||||||
#define bio_sync(bio) bio_rw_flagged(bio, BIO_RW_SYNCIO)
|
|
||||||
#define bio_unplug(bio) bio_rw_flagged(bio, BIO_RW_UNPLUG)
|
|
||||||
#define bio_failfast_dev(bio) bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV)
|
|
||||||
#define bio_failfast_transport(bio) \
|
|
||||||
bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT)
|
|
||||||
#define bio_failfast_driver(bio) \
|
|
||||||
bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER)
|
|
||||||
#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD)
|
|
||||||
#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META)
|
|
||||||
#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD)
|
|
||||||
#define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* upper 16 bits of bi_rw define the io priority of this bio
|
* upper 16 bits of bi_rw define the io priority of this bio
|
||||||
*/
|
*/
|
||||||
|
@ -222,7 +211,7 @@ static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
|
||||||
#define bio_offset(bio) bio_iovec((bio))->bv_offset
|
#define bio_offset(bio) bio_iovec((bio))->bv_offset
|
||||||
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
|
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
|
||||||
#define bio_sectors(bio) ((bio)->bi_size >> 9)
|
#define bio_sectors(bio) ((bio)->bi_size >> 9)
|
||||||
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
|
#define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD))
|
||||||
|
|
||||||
static inline unsigned int bio_cur_bytes(struct bio *bio)
|
static inline unsigned int bio_cur_bytes(struct bio *bio)
|
||||||
{
|
{
|
||||||
|
|
|
@ -86,7 +86,7 @@ enum {
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* request type modified bits. first two bits match BIO_RW* bits, important
|
* request type modified bits. first four bits match BIO_RW* bits, important
|
||||||
*/
|
*/
|
||||||
enum rq_flag_bits {
|
enum rq_flag_bits {
|
||||||
__REQ_RW, /* not set, read. set, write */
|
__REQ_RW, /* not set, read. set, write */
|
||||||
|
|
Loading…
Reference in a new issue