mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
block: Add bio_for_each_segment_all()
__bio_for_each_segment() iterates bvecs from the specified index instead of bio->bv_idx. Currently, the only usage is to walk all the bvecs after the bio has been advanced by specifying 0 index. For immutable bvecs, we need to split these apart; bio_for_each_segment() is going to have a different implementation. This will also help document the intent of code that's using it - bio_for_each_segment_all() is only legal to use for code that owns the bio. Signed-off-by: Kent Overstreet <koverstreet@google.com> CC: Jens Axboe <axboe@kernel.dk> CC: Neil Brown <neilb@suse.de> CC: Boaz Harrosh <bharrosh@panasas.com>
This commit is contained in:
parent
6bc454d150
commit
d74c6d514f
7 changed files with 25 additions and 14 deletions
|
@ -952,7 +952,7 @@ static struct bio *bio_clone_range(struct bio *bio_src,
|
|||
/* Find first affected segment... */
|
||||
|
||||
resid = offset;
|
||||
__bio_for_each_segment(bv, bio_src, idx, 0) {
|
||||
bio_for_each_segment(bv, bio_src, idx) {
|
||||
if (resid < bv->bv_len)
|
||||
break;
|
||||
resid -= bv->bv_len;
|
||||
|
|
|
@ -1291,7 +1291,7 @@ read_again:
|
|||
* know the original bi_idx, so we just free
|
||||
* them all
|
||||
*/
|
||||
__bio_for_each_segment(bvec, mbio, j, 0)
|
||||
bio_for_each_segment_all(bvec, mbio, j)
|
||||
bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
|
||||
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
|
||||
atomic_inc(&r1_bio->behind_remaining);
|
||||
|
|
12
fs/bio.c
12
fs/bio.c
|
@ -961,7 +961,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
|
|||
int iov_idx = 0;
|
||||
unsigned int iov_off = 0;
|
||||
|
||||
__bio_for_each_segment(bvec, bio, i, 0) {
|
||||
bio_for_each_segment_all(bvec, bio, i) {
|
||||
char *bv_addr = page_address(bvec->bv_page);
|
||||
unsigned int bv_len = iovecs[i].bv_len;
|
||||
|
||||
|
@ -1143,7 +1143,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
|
|||
return bio;
|
||||
cleanup:
|
||||
if (!map_data)
|
||||
bio_for_each_segment(bvec, bio, i)
|
||||
bio_for_each_segment_all(bvec, bio, i)
|
||||
__free_page(bvec->bv_page);
|
||||
|
||||
bio_put(bio);
|
||||
|
@ -1357,7 +1357,7 @@ static void __bio_unmap_user(struct bio *bio)
|
|||
/*
|
||||
* make sure we dirty pages we wrote to
|
||||
*/
|
||||
__bio_for_each_segment(bvec, bio, i, 0) {
|
||||
bio_for_each_segment_all(bvec, bio, i) {
|
||||
if (bio_data_dir(bio) == READ)
|
||||
set_page_dirty_lock(bvec->bv_page);
|
||||
|
||||
|
@ -1463,7 +1463,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
|
|||
int i;
|
||||
char *p = bmd->sgvecs[0].iov_base;
|
||||
|
||||
__bio_for_each_segment(bvec, bio, i, 0) {
|
||||
bio_for_each_segment_all(bvec, bio, i) {
|
||||
char *addr = page_address(bvec->bv_page);
|
||||
int len = bmd->iovecs[i].bv_len;
|
||||
|
||||
|
@ -1503,7 +1503,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
|
|||
if (!reading) {
|
||||
void *p = data;
|
||||
|
||||
bio_for_each_segment(bvec, bio, i) {
|
||||
bio_for_each_segment_all(bvec, bio, i) {
|
||||
char *addr = page_address(bvec->bv_page);
|
||||
|
||||
memcpy(addr, p, bvec->bv_len);
|
||||
|
@ -1789,7 +1789,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index,
|
|||
if (index >= bio->bi_idx)
|
||||
index = bio->bi_vcnt - 1;
|
||||
|
||||
__bio_for_each_segment(bv, bio, i, 0) {
|
||||
bio_for_each_segment_all(bv, bio, i) {
|
||||
if (i == index) {
|
||||
if (offset > bv->bv_offset)
|
||||
sectors += (offset - bv->bv_offset) / sector_sz;
|
||||
|
|
|
@ -401,7 +401,7 @@ static void _clear_bio(struct bio *bio)
|
|||
struct bio_vec *bv;
|
||||
unsigned i;
|
||||
|
||||
__bio_for_each_segment(bv, bio, i, 0) {
|
||||
bio_for_each_segment_all(bv, bio, i) {
|
||||
unsigned this_count = bv->bv_len;
|
||||
|
||||
if (likely(PAGE_SIZE == this_count))
|
||||
|
|
|
@ -432,7 +432,7 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
|
|||
if (!bio)
|
||||
continue;
|
||||
|
||||
__bio_for_each_segment(bv, bio, i, 0) {
|
||||
bio_for_each_segment_all(bv, bio, i) {
|
||||
struct page *page = bv->bv_page;
|
||||
|
||||
SetPageUptodate(page);
|
||||
|
|
|
@ -137,16 +137,27 @@ static inline int bio_has_allocated_vec(struct bio *bio)
|
|||
#define bio_io_error(bio) bio_endio((bio), -EIO)
|
||||
|
||||
/*
|
||||
* drivers should not use the __ version unless they _really_ want to
|
||||
* run through the entire bio and not just pending pieces
|
||||
* drivers should not use the __ version unless they _really_ know what
|
||||
* they're doing
|
||||
*/
|
||||
#define __bio_for_each_segment(bvl, bio, i, start_idx) \
|
||||
for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
|
||||
i < (bio)->bi_vcnt; \
|
||||
bvl++, i++)
|
||||
|
||||
/*
|
||||
* drivers should _never_ use the all version - the bio may have been split
|
||||
* before it got to the driver and the driver won't own all of it
|
||||
*/
|
||||
#define bio_for_each_segment_all(bvl, bio, i) \
|
||||
for (i = 0; \
|
||||
bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
|
||||
i++)
|
||||
|
||||
#define bio_for_each_segment(bvl, bio, i) \
|
||||
__bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
|
||||
for (i = (bio)->bi_idx; \
|
||||
bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
|
||||
i++)
|
||||
|
||||
/*
|
||||
* get a reference to a bio, so it won't disappear. the intended use is
|
||||
|
|
|
@ -134,7 +134,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
|
|||
/*
|
||||
* free up bounce indirect pages used
|
||||
*/
|
||||
__bio_for_each_segment(bvec, bio, i, 0) {
|
||||
bio_for_each_segment_all(bvec, bio, i) {
|
||||
org_vec = bio_orig->bi_io_vec + i;
|
||||
if (bvec->bv_page == org_vec->bv_page)
|
||||
continue;
|
||||
|
|
Loading…
Reference in a new issue