mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-01 02:21:16 +00:00
block: Use accessor functions for queue limits
Convert all external users of queue limits to using wrapper functions instead of poking the request queue variables directly. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
e1defc4ff0
commit
ae03bf639a
25 changed files with 147 additions and 97 deletions
|
@ -388,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,
|
||||||
|
|
||||||
bio->bi_sector = sector;
|
bio->bi_sector = sector;
|
||||||
|
|
||||||
if (nr_sects > q->max_hw_sectors) {
|
if (nr_sects > queue_max_hw_sectors(q)) {
|
||||||
bio->bi_size = q->max_hw_sectors << 9;
|
bio->bi_size = queue_max_hw_sectors(q) << 9;
|
||||||
nr_sects -= q->max_hw_sectors;
|
nr_sects -= queue_max_hw_sectors(q);
|
||||||
sector += q->max_hw_sectors;
|
sector += queue_max_hw_sectors(q);
|
||||||
} else {
|
} else {
|
||||||
bio->bi_size = nr_sects << 9;
|
bio->bi_size = nr_sects << 9;
|
||||||
nr_sects = 0;
|
nr_sects = 0;
|
||||||
|
|
|
@ -1437,11 +1437,11 @@ static inline void __generic_make_request(struct bio *bio)
|
||||||
goto end_io;
|
goto end_io;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(nr_sectors > q->max_hw_sectors)) {
|
if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
|
||||||
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
|
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
|
||||||
bdevname(bio->bi_bdev, b),
|
bdevname(bio->bi_bdev, b),
|
||||||
bio_sectors(bio),
|
bio_sectors(bio),
|
||||||
q->max_hw_sectors);
|
queue_max_hw_sectors(q));
|
||||||
goto end_io;
|
goto end_io;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1608,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio);
|
||||||
*/
|
*/
|
||||||
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
|
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
if (blk_rq_sectors(rq) > q->max_sectors ||
|
if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
|
||||||
blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
|
blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
|
||||||
printk(KERN_ERR "%s: over max size limit.\n", __func__);
|
printk(KERN_ERR "%s: over max size limit.\n", __func__);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
@ -1621,8 +1621,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
|
||||||
* limitation.
|
* limitation.
|
||||||
*/
|
*/
|
||||||
blk_recalc_rq_segments(rq);
|
blk_recalc_rq_segments(rq);
|
||||||
if (rq->nr_phys_segments > q->max_phys_segments ||
|
if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
|
||||||
rq->nr_phys_segments > q->max_hw_segments) {
|
rq->nr_phys_segments > queue_max_hw_segments(q)) {
|
||||||
printk(KERN_ERR "%s: over max segments limit.\n", __func__);
|
printk(KERN_ERR "%s: over max segments limit.\n", __func__);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
|
||||||
struct bio *bio = NULL;
|
struct bio *bio = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (len > (q->max_hw_sectors << 9))
|
if (len > (queue_max_hw_sectors(q) << 9))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (!len)
|
if (!len)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -292,7 +292,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (len > (q->max_hw_sectors << 9))
|
if (len > (queue_max_hw_sectors(q) << 9))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (!len || !kbuf)
|
if (!len || !kbuf)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
||||||
* never considered part of another segment, since that
|
* never considered part of another segment, since that
|
||||||
* might change with the bounce page.
|
* might change with the bounce page.
|
||||||
*/
|
*/
|
||||||
high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
|
high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
|
||||||
if (high || highprv)
|
if (high || highprv)
|
||||||
goto new_segment;
|
goto new_segment;
|
||||||
if (cluster) {
|
if (cluster) {
|
||||||
if (seg_size + bv->bv_len > q->max_segment_size)
|
if (seg_size + bv->bv_len
|
||||||
|
> queue_max_segment_size(q))
|
||||||
goto new_segment;
|
goto new_segment;
|
||||||
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
|
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
|
||||||
goto new_segment;
|
goto new_segment;
|
||||||
|
@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
|
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
|
||||||
q->max_segment_size)
|
queue_max_segment_size(q))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!bio_has_data(bio))
|
if (!bio_has_data(bio))
|
||||||
|
@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
||||||
int nbytes = bvec->bv_len;
|
int nbytes = bvec->bv_len;
|
||||||
|
|
||||||
if (bvprv && cluster) {
|
if (bvprv && cluster) {
|
||||||
if (sg->length + nbytes > q->max_segment_size)
|
if (sg->length + nbytes > queue_max_segment_size(q))
|
||||||
goto new_segment;
|
goto new_segment;
|
||||||
|
|
||||||
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
|
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
|
||||||
|
@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
|
||||||
{
|
{
|
||||||
int nr_phys_segs = bio_phys_segments(q, bio);
|
int nr_phys_segs = bio_phys_segments(q, bio);
|
||||||
|
|
||||||
if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
|
if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
|
||||||
|| req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
|
req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
|
||||||
req->cmd_flags |= REQ_NOMERGE;
|
req->cmd_flags |= REQ_NOMERGE;
|
||||||
if (req == q->last_merge)
|
if (req == q->last_merge)
|
||||||
q->last_merge = NULL;
|
q->last_merge = NULL;
|
||||||
|
@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
||||||
unsigned short max_sectors;
|
unsigned short max_sectors;
|
||||||
|
|
||||||
if (unlikely(blk_pc_request(req)))
|
if (unlikely(blk_pc_request(req)))
|
||||||
max_sectors = q->max_hw_sectors;
|
max_sectors = queue_max_hw_sectors(q);
|
||||||
else
|
else
|
||||||
max_sectors = q->max_sectors;
|
max_sectors = queue_max_sectors(q);
|
||||||
|
|
||||||
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
|
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
|
||||||
req->cmd_flags |= REQ_NOMERGE;
|
req->cmd_flags |= REQ_NOMERGE;
|
||||||
|
@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
||||||
unsigned short max_sectors;
|
unsigned short max_sectors;
|
||||||
|
|
||||||
if (unlikely(blk_pc_request(req)))
|
if (unlikely(blk_pc_request(req)))
|
||||||
max_sectors = q->max_hw_sectors;
|
max_sectors = queue_max_hw_sectors(q);
|
||||||
else
|
else
|
||||||
max_sectors = q->max_sectors;
|
max_sectors = queue_max_sectors(q);
|
||||||
|
|
||||||
|
|
||||||
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
|
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
|
||||||
|
@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
|
||||||
/*
|
/*
|
||||||
* Will it become too large?
|
* Will it become too large?
|
||||||
*/
|
*/
|
||||||
if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
|
if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
|
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
|
||||||
|
@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
|
||||||
total_phys_segments--;
|
total_phys_segments--;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (total_phys_segments > q->max_phys_segments)
|
if (total_phys_segments > queue_max_phys_segments(q))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (total_phys_segments > q->max_hw_segments)
|
if (total_phys_segments > queue_max_hw_segments(q))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Merge is OK... */
|
/* Merge is OK... */
|
||||||
|
|
|
@ -219,6 +219,15 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_queue_max_sectors);
|
EXPORT_SYMBOL(blk_queue_max_sectors);
|
||||||
|
|
||||||
|
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
|
||||||
|
{
|
||||||
|
if (BLK_DEF_MAX_SECTORS > max_sectors)
|
||||||
|
q->max_hw_sectors = BLK_DEF_MAX_SECTORS;
|
||||||
|
else
|
||||||
|
q->max_hw_sectors = max_sectors;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_queue_max_phys_segments - set max phys segments for a request for this queue
|
* blk_queue_max_phys_segments - set max phys segments for a request for this queue
|
||||||
* @q: the request queue for the device
|
* @q: the request queue for the device
|
||||||
|
@ -395,11 +404,11 @@ int blk_queue_dma_drain(struct request_queue *q,
|
||||||
dma_drain_needed_fn *dma_drain_needed,
|
dma_drain_needed_fn *dma_drain_needed,
|
||||||
void *buf, unsigned int size)
|
void *buf, unsigned int size)
|
||||||
{
|
{
|
||||||
if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
|
if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
/* make room for appending the drain */
|
/* make room for appending the drain */
|
||||||
--q->max_hw_segments;
|
blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
|
||||||
--q->max_phys_segments;
|
blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
|
||||||
q->dma_drain_needed = dma_drain_needed;
|
q->dma_drain_needed = dma_drain_needed;
|
||||||
q->dma_drain_buffer = buf;
|
q->dma_drain_buffer = buf;
|
||||||
q->dma_drain_size = size;
|
q->dma_drain_size = size;
|
||||||
|
|
|
@ -95,7 +95,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
|
||||||
|
|
||||||
static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
|
static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
|
||||||
{
|
{
|
||||||
int max_sectors_kb = q->max_sectors >> 1;
|
int max_sectors_kb = queue_max_sectors(q) >> 1;
|
||||||
|
|
||||||
return queue_var_show(max_sectors_kb, (page));
|
return queue_var_show(max_sectors_kb, (page));
|
||||||
}
|
}
|
||||||
|
@ -109,7 +109,7 @@ static ssize_t
|
||||||
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
|
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
|
||||||
{
|
{
|
||||||
unsigned long max_sectors_kb,
|
unsigned long max_sectors_kb,
|
||||||
max_hw_sectors_kb = q->max_hw_sectors >> 1,
|
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
|
||||||
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
|
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
|
||||||
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
|
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
q->max_sectors = max_sectors_kb << 1;
|
blk_queue_max_sectors(q, max_sectors_kb << 1);
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -125,7 +125,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
|
||||||
|
|
||||||
static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
|
static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
|
||||||
{
|
{
|
||||||
int max_hw_sectors_kb = q->max_hw_sectors >> 1;
|
int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
|
||||||
|
|
||||||
return queue_var_show(max_hw_sectors_kb, (page));
|
return queue_var_show(max_hw_sectors_kb, (page));
|
||||||
}
|
}
|
||||||
|
|
|
@ -766,7 +766,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||||
return compat_put_int(arg, bdev_logical_block_size(bdev));
|
return compat_put_int(arg, bdev_logical_block_size(bdev));
|
||||||
case BLKSECTGET:
|
case BLKSECTGET:
|
||||||
return compat_put_ushort(arg,
|
return compat_put_ushort(arg,
|
||||||
bdev_get_queue(bdev)->max_sectors);
|
queue_max_sectors(bdev_get_queue(bdev)));
|
||||||
case BLKRASET: /* compatible, but no compat_ptr (!) */
|
case BLKRASET: /* compatible, but no compat_ptr (!) */
|
||||||
case BLKFRASET:
|
case BLKFRASET:
|
||||||
if (!capable(CAP_SYS_ADMIN))
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
|
|
|
@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
|
||||||
bio->bi_private = &wait;
|
bio->bi_private = &wait;
|
||||||
bio->bi_sector = start;
|
bio->bi_sector = start;
|
||||||
|
|
||||||
if (len > q->max_hw_sectors) {
|
if (len > queue_max_hw_sectors(q)) {
|
||||||
bio->bi_size = q->max_hw_sectors << 9;
|
bio->bi_size = queue_max_hw_sectors(q) << 9;
|
||||||
len -= q->max_hw_sectors;
|
len -= queue_max_hw_sectors(q);
|
||||||
start += q->max_hw_sectors;
|
start += queue_max_hw_sectors(q);
|
||||||
} else {
|
} else {
|
||||||
bio->bi_size = len << 9;
|
bio->bi_size = len << 9;
|
||||||
len = 0;
|
len = 0;
|
||||||
|
@ -313,7 +313,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||||
case BLKSSZGET: /* get block device hardware sector size */
|
case BLKSSZGET: /* get block device hardware sector size */
|
||||||
return put_int(arg, bdev_logical_block_size(bdev));
|
return put_int(arg, bdev_logical_block_size(bdev));
|
||||||
case BLKSECTGET:
|
case BLKSECTGET:
|
||||||
return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
|
return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
|
||||||
case BLKRASET:
|
case BLKRASET:
|
||||||
case BLKFRASET:
|
case BLKFRASET:
|
||||||
if(!capable(CAP_SYS_ADMIN))
|
if(!capable(CAP_SYS_ADMIN))
|
||||||
|
|
|
@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
|
||||||
|
|
||||||
static int sg_get_reserved_size(struct request_queue *q, int __user *p)
|
static int sg_get_reserved_size(struct request_queue *q, int __user *p)
|
||||||
{
|
{
|
||||||
unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
|
unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
|
||||||
|
|
||||||
return put_user(val, p);
|
return put_user(val, p);
|
||||||
}
|
}
|
||||||
|
@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p)
|
||||||
|
|
||||||
if (size < 0)
|
if (size < 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (size > (q->max_sectors << 9))
|
if (size > (queue_max_sectors(q) << 9))
|
||||||
size = q->max_sectors << 9;
|
size = queue_max_sectors(q) << 9;
|
||||||
|
|
||||||
q->sg_reserved_size = size;
|
q->sg_reserved_size = size;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
|
||||||
if (hdr->cmd_len > BLK_MAX_CDB)
|
if (hdr->cmd_len > BLK_MAX_CDB)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (hdr->dxfer_len > (q->max_hw_sectors << 9))
|
if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
if (hdr->dxfer_len)
|
if (hdr->dxfer_len)
|
||||||
|
|
|
@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
|
||||||
*/
|
*/
|
||||||
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
|
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
|
||||||
{
|
{
|
||||||
if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
|
if ((pd->settings.size << 9) / CD_FRAMESIZE
|
||||||
|
<= queue_max_phys_segments(q)) {
|
||||||
/*
|
/*
|
||||||
* The cdrom device can handle one segment/frame
|
* The cdrom device can handle one segment/frame
|
||||||
*/
|
*/
|
||||||
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
|
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
|
||||||
return 0;
|
return 0;
|
||||||
} else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
|
} else if ((pd->settings.size << 9) / PAGE_SIZE
|
||||||
|
<= queue_max_phys_segments(q)) {
|
||||||
/*
|
/*
|
||||||
* We can handle this case at the expense of some extra memory
|
* We can handle this case at the expense of some extra memory
|
||||||
* copies during write operations
|
* copies during write operations
|
||||||
|
|
|
@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
|
||||||
nr = nframes;
|
nr = nframes;
|
||||||
if (cdi->cdda_method == CDDA_BPC_SINGLE)
|
if (cdi->cdda_method == CDDA_BPC_SINGLE)
|
||||||
nr = 1;
|
nr = 1;
|
||||||
if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9))
|
if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
|
||||||
nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW;
|
nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
|
||||||
|
|
||||||
len = nr * CD_FRAMESIZE_RAW;
|
len = nr * CD_FRAMESIZE_RAW;
|
||||||
|
|
||||||
|
|
|
@ -510,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
|
||||||
* combine_restrictions_low()
|
* combine_restrictions_low()
|
||||||
*/
|
*/
|
||||||
rs->max_sectors =
|
rs->max_sectors =
|
||||||
min_not_zero(rs->max_sectors, q->max_sectors);
|
min_not_zero(rs->max_sectors, queue_max_sectors(q));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if merge fn is supported.
|
* Check if merge fn is supported.
|
||||||
|
@ -525,25 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
|
||||||
|
|
||||||
rs->max_phys_segments =
|
rs->max_phys_segments =
|
||||||
min_not_zero(rs->max_phys_segments,
|
min_not_zero(rs->max_phys_segments,
|
||||||
q->max_phys_segments);
|
queue_max_phys_segments(q));
|
||||||
|
|
||||||
rs->max_hw_segments =
|
rs->max_hw_segments =
|
||||||
min_not_zero(rs->max_hw_segments, q->max_hw_segments);
|
min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
|
||||||
|
|
||||||
rs->logical_block_size = max(rs->logical_block_size,
|
rs->logical_block_size = max(rs->logical_block_size,
|
||||||
queue_logical_block_size(q));
|
queue_logical_block_size(q));
|
||||||
|
|
||||||
rs->max_segment_size =
|
rs->max_segment_size =
|
||||||
min_not_zero(rs->max_segment_size, q->max_segment_size);
|
min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
|
||||||
|
|
||||||
rs->max_hw_sectors =
|
rs->max_hw_sectors =
|
||||||
min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
|
min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
|
||||||
|
|
||||||
rs->seg_boundary_mask =
|
rs->seg_boundary_mask =
|
||||||
min_not_zero(rs->seg_boundary_mask,
|
min_not_zero(rs->seg_boundary_mask,
|
||||||
q->seg_boundary_mask);
|
queue_segment_boundary(q));
|
||||||
|
|
||||||
rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
|
rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
|
||||||
|
|
||||||
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
||||||
}
|
}
|
||||||
|
@ -914,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
|
||||||
* restrictions.
|
* restrictions.
|
||||||
*/
|
*/
|
||||||
blk_queue_max_sectors(q, t->limits.max_sectors);
|
blk_queue_max_sectors(q, t->limits.max_sectors);
|
||||||
q->max_phys_segments = t->limits.max_phys_segments;
|
blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
|
||||||
q->max_hw_segments = t->limits.max_hw_segments;
|
blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
|
||||||
q->logical_block_size = t->limits.logical_block_size;
|
blk_queue_logical_block_size(q, t->limits.logical_block_size);
|
||||||
q->max_segment_size = t->limits.max_segment_size;
|
blk_queue_max_segment_size(q, t->limits.max_segment_size);
|
||||||
q->max_hw_sectors = t->limits.max_hw_sectors;
|
blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
|
||||||
q->seg_boundary_mask = t->limits.seg_boundary_mask;
|
blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
|
||||||
q->bounce_pfn = t->limits.bounce_pfn;
|
blk_queue_bounce_limit(q, t->limits.bounce_pfn);
|
||||||
|
|
||||||
if (t->limits.no_cluster)
|
if (t->limits.no_cluster)
|
||||||
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
|
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
|
||||||
|
|
|
@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
|
||||||
* a one page request is never in violation.
|
* a one page request is never in violation.
|
||||||
*/
|
*/
|
||||||
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||||
|
|
||||||
disk->num_sectors = rdev->sectors;
|
disk->num_sectors = rdev->sectors;
|
||||||
|
|
|
@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||||
* merge_bvec_fn will be involved in multipath.)
|
* merge_bvec_fn will be involved in multipath.)
|
||||||
*/
|
*/
|
||||||
if (q->merge_bvec_fn &&
|
if (q->merge_bvec_fn &&
|
||||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
queue_max_sectors(q) > (PAGE_SIZE>>9))
|
||||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||||
|
|
||||||
conf->working_disks++;
|
conf->working_disks++;
|
||||||
|
@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)
|
||||||
* violating it, not that we ever expect a device with
|
* violating it, not that we ever expect a device with
|
||||||
* a merge_bvec_fn to be involved in multipath */
|
* a merge_bvec_fn to be involved in multipath */
|
||||||
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||||
|
|
||||||
if (!test_bit(Faulty, &rdev->flags))
|
if (!test_bit(Faulty, &rdev->flags))
|
||||||
|
|
|
@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
|
if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||||
|
|
||||||
if (!smallest || (rdev1->sectors < smallest->sectors))
|
if (!smallest || (rdev1->sectors < smallest->sectors))
|
||||||
|
|
|
@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||||
* a one page request is never in violation.
|
* a one page request is never in violation.
|
||||||
*/
|
*/
|
||||||
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||||
|
|
||||||
p->head_position = 0;
|
p->head_position = 0;
|
||||||
|
@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)
|
||||||
* a one page request is never in violation.
|
* a one page request is never in violation.
|
||||||
*/
|
*/
|
||||||
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||||
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||||
|
|
||||||
disk->head_position = 0;
|
disk->head_position = 0;
|
||||||
|
|
|
@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||||
* a one page request is never in violation.
|
* a one page request is never in violation.
|
||||||
*/
|
*/
|
||||||
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||||
mddev->queue->max_sectors = (PAGE_SIZE>>9);
|
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||||
|
|
||||||
p->head_position = 0;
|
p->head_position = 0;
|
||||||
rdev->raid_disk = mirror;
|
rdev->raid_disk = mirror;
|
||||||
|
@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)
|
||||||
* a one page request is never in violation.
|
* a one page request is never in violation.
|
||||||
*/
|
*/
|
||||||
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
||||||
mddev->queue->max_sectors > (PAGE_SIZE>>9))
|
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
||||||
mddev->queue->max_sectors = (PAGE_SIZE>>9);
|
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
|
||||||
|
|
||||||
disk->head_position = 0;
|
disk->head_position = 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(bi->bi_bdev);
|
struct request_queue *q = bdev_get_queue(bi->bi_bdev);
|
||||||
|
|
||||||
if ((bi->bi_size>>9) > q->max_sectors)
|
if ((bi->bi_size>>9) > queue_max_sectors(q))
|
||||||
return 0;
|
return 0;
|
||||||
blk_recount_segments(q, bi);
|
blk_recount_segments(q, bi);
|
||||||
if (bi->bi_phys_segments > q->max_phys_segments)
|
if (bi->bi_phys_segments > queue_max_phys_segments(q))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (q->merge_bvec_fn)
|
if (q->merge_bvec_fn)
|
||||||
|
|
|
@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp)
|
||||||
if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
|
if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
|
||||||
sdp->sgdebug = 0;
|
sdp->sgdebug = 0;
|
||||||
q = sdp->device->request_queue;
|
q = sdp->device->request_queue;
|
||||||
sdp->sg_tablesize = min(q->max_hw_segments,
|
sdp->sg_tablesize = min(queue_max_hw_segments(q),
|
||||||
q->max_phys_segments);
|
queue_max_phys_segments(q));
|
||||||
}
|
}
|
||||||
if ((sfp = sg_add_sfp(sdp, dev)))
|
if ((sfp = sg_add_sfp(sdp, dev)))
|
||||||
filp->private_data = sfp;
|
filp->private_data = sfp;
|
||||||
|
@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
|
||||||
if (val < 0)
|
if (val < 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
val = min_t(int, val,
|
val = min_t(int, val,
|
||||||
sdp->device->request_queue->max_sectors * 512);
|
queue_max_sectors(sdp->device->request_queue) * 512);
|
||||||
if (val != sfp->reserve.bufflen) {
|
if (val != sfp->reserve.bufflen) {
|
||||||
if (sg_res_in_use(sfp) || sfp->mmap_called)
|
if (sg_res_in_use(sfp) || sfp->mmap_called)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
@ -919,7 +919,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
|
||||||
return 0;
|
return 0;
|
||||||
case SG_GET_RESERVED_SIZE:
|
case SG_GET_RESERVED_SIZE:
|
||||||
val = min_t(int, sfp->reserve.bufflen,
|
val = min_t(int, sfp->reserve.bufflen,
|
||||||
sdp->device->request_queue->max_sectors * 512);
|
queue_max_sectors(sdp->device->request_queue) * 512);
|
||||||
return put_user(val, ip);
|
return put_user(val, ip);
|
||||||
case SG_SET_COMMAND_Q:
|
case SG_SET_COMMAND_Q:
|
||||||
result = get_user(val, ip);
|
result = get_user(val, ip);
|
||||||
|
@ -1059,7 +1059,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
return scsi_ioctl(sdp->device, cmd_in, p);
|
return scsi_ioctl(sdp->device, cmd_in, p);
|
||||||
case BLKSECTGET:
|
case BLKSECTGET:
|
||||||
return put_user(sdp->device->request_queue->max_sectors * 512,
|
return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
|
||||||
ip);
|
ip);
|
||||||
case BLKTRACESETUP:
|
case BLKTRACESETUP:
|
||||||
return blk_trace_setup(sdp->device->request_queue,
|
return blk_trace_setup(sdp->device->request_queue,
|
||||||
|
@ -1377,7 +1377,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
|
||||||
sdp->device = scsidp;
|
sdp->device = scsidp;
|
||||||
INIT_LIST_HEAD(&sdp->sfds);
|
INIT_LIST_HEAD(&sdp->sfds);
|
||||||
init_waitqueue_head(&sdp->o_excl_wait);
|
init_waitqueue_head(&sdp->o_excl_wait);
|
||||||
sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
|
sdp->sg_tablesize = min(queue_max_hw_segments(q),
|
||||||
|
queue_max_phys_segments(q));
|
||||||
sdp->index = k;
|
sdp->index = k;
|
||||||
kref_init(&sdp->d_ref);
|
kref_init(&sdp->d_ref);
|
||||||
|
|
||||||
|
@ -2055,7 +2056,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
|
||||||
sg_big_buff = def_reserved_size;
|
sg_big_buff = def_reserved_size;
|
||||||
|
|
||||||
bufflen = min_t(int, sg_big_buff,
|
bufflen = min_t(int, sg_big_buff,
|
||||||
sdp->device->request_queue->max_sectors * 512);
|
queue_max_sectors(sdp->device->request_queue) * 512);
|
||||||
sg_build_reserve(sfp, bufflen);
|
sg_build_reserve(sfp, bufflen);
|
||||||
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
|
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
|
||||||
sfp->reserve.bufflen, sfp->reserve.k_use_sg));
|
sfp->reserve.bufflen, sfp->reserve.k_use_sg));
|
||||||
|
|
|
@ -3983,8 +3983,8 @@ static int st_probe(struct device *dev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
i = min(SDp->request_queue->max_hw_segments,
|
i = min(queue_max_hw_segments(SDp->request_queue),
|
||||||
SDp->request_queue->max_phys_segments);
|
queue_max_phys_segments(SDp->request_queue));
|
||||||
if (st_max_sg_segs < i)
|
if (st_max_sg_segs < i)
|
||||||
i = st_max_sg_segs;
|
i = st_max_sg_segs;
|
||||||
buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
|
buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
|
||||||
|
|
|
@ -132,7 +132,7 @@ static int slave_configure(struct scsi_device *sdev)
|
||||||
|
|
||||||
if (us->fflags & US_FL_MAX_SECTORS_MIN)
|
if (us->fflags & US_FL_MAX_SECTORS_MIN)
|
||||||
max_sectors = PAGE_CACHE_SIZE >> 9;
|
max_sectors = PAGE_CACHE_SIZE >> 9;
|
||||||
if (sdev->request_queue->max_sectors > max_sectors)
|
if (queue_max_sectors(sdev->request_queue) > max_sectors)
|
||||||
blk_queue_max_sectors(sdev->request_queue,
|
blk_queue_max_sectors(sdev->request_queue,
|
||||||
max_sectors);
|
max_sectors);
|
||||||
} else if (sdev->type == TYPE_TAPE) {
|
} else if (sdev->type == TYPE_TAPE) {
|
||||||
|
@ -483,7 +483,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
|
||||||
{
|
{
|
||||||
struct scsi_device *sdev = to_scsi_device(dev);
|
struct scsi_device *sdev = to_scsi_device(dev);
|
||||||
|
|
||||||
return sprintf(buf, "%u\n", sdev->request_queue->max_sectors);
|
return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Input routine for the sysfs max_sectors file */
|
/* Input routine for the sysfs max_sectors file */
|
||||||
|
|
19
fs/bio.c
19
fs/bio.c
|
@ -499,11 +499,11 @@ int bio_get_nr_vecs(struct block_device *bdev)
|
||||||
struct request_queue *q = bdev_get_queue(bdev);
|
struct request_queue *q = bdev_get_queue(bdev);
|
||||||
int nr_pages;
|
int nr_pages;
|
||||||
|
|
||||||
nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
if (nr_pages > q->max_phys_segments)
|
if (nr_pages > queue_max_phys_segments(q))
|
||||||
nr_pages = q->max_phys_segments;
|
nr_pages = queue_max_phys_segments(q);
|
||||||
if (nr_pages > q->max_hw_segments)
|
if (nr_pages > queue_max_hw_segments(q))
|
||||||
nr_pages = q->max_hw_segments;
|
nr_pages = queue_max_hw_segments(q);
|
||||||
|
|
||||||
return nr_pages;
|
return nr_pages;
|
||||||
}
|
}
|
||||||
|
@ -562,8 +562,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
|
||||||
* make this too complex.
|
* make this too complex.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
while (bio->bi_phys_segments >= q->max_phys_segments
|
while (bio->bi_phys_segments >= queue_max_phys_segments(q)
|
||||||
|| bio->bi_phys_segments >= q->max_hw_segments) {
|
|| bio->bi_phys_segments >= queue_max_hw_segments(q)) {
|
||||||
|
|
||||||
if (retried_segments)
|
if (retried_segments)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -634,7 +634,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
|
||||||
int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
|
int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
|
||||||
unsigned int len, unsigned int offset)
|
unsigned int len, unsigned int offset)
|
||||||
{
|
{
|
||||||
return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
|
return __bio_add_page(q, bio, page, len, offset,
|
||||||
|
queue_max_hw_sectors(q));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -654,7 +655,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
|
||||||
unsigned int offset)
|
unsigned int offset)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||||
return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
|
return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bio_map_data {
|
struct bio_map_data {
|
||||||
|
|
|
@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio)
|
||||||
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
|
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
|
||||||
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
|
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
|
||||||
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
|
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
|
||||||
__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
|
__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
|
||||||
#define BIO_SEG_BOUNDARY(q, b1, b2) \
|
#define BIO_SEG_BOUNDARY(q, b1, b2) \
|
||||||
BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
|
BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
|
||||||
|
|
||||||
|
|
|
@ -898,6 +898,7 @@ extern void blk_cleanup_queue(struct request_queue *);
|
||||||
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
|
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
|
||||||
extern void blk_queue_bounce_limit(struct request_queue *, u64);
|
extern void blk_queue_bounce_limit(struct request_queue *, u64);
|
||||||
extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
|
extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
|
||||||
|
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
|
||||||
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
|
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
|
||||||
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
|
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
|
||||||
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
||||||
|
@ -988,6 +989,41 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
|
||||||
|
|
||||||
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
|
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
|
||||||
|
|
||||||
|
static inline unsigned long queue_bounce_pfn(struct request_queue *q)
|
||||||
|
{
|
||||||
|
return q->bounce_pfn;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long queue_segment_boundary(struct request_queue *q)
|
||||||
|
{
|
||||||
|
return q->seg_boundary_mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned int queue_max_sectors(struct request_queue *q)
|
||||||
|
{
|
||||||
|
return q->max_sectors;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
|
||||||
|
{
|
||||||
|
return q->max_hw_sectors;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned short queue_max_hw_segments(struct request_queue *q)
|
||||||
|
{
|
||||||
|
return q->max_hw_segments;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned short queue_max_phys_segments(struct request_queue *q)
|
||||||
|
{
|
||||||
|
return q->max_phys_segments;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned int queue_max_segment_size(struct request_queue *q)
|
||||||
|
{
|
||||||
|
return q->max_segment_size;
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned short queue_logical_block_size(struct request_queue *q)
|
static inline unsigned short queue_logical_block_size(struct request_queue *q)
|
||||||
{
|
{
|
||||||
int retval = 512;
|
int retval = 512;
|
||||||
|
|
|
@ -192,7 +192,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||||
/*
|
/*
|
||||||
* is destination page below bounce pfn?
|
* is destination page below bounce pfn?
|
||||||
*/
|
*/
|
||||||
if (page_to_pfn(page) <= q->bounce_pfn)
|
if (page_to_pfn(page) <= queue_bounce_pfn(q))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -284,7 +284,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
|
||||||
* don't waste time iterating over bio segments
|
* don't waste time iterating over bio segments
|
||||||
*/
|
*/
|
||||||
if (!(q->bounce_gfp & GFP_DMA)) {
|
if (!(q->bounce_gfp & GFP_DMA)) {
|
||||||
if (q->bounce_pfn >= blk_max_pfn)
|
if (queue_bounce_pfn(q) >= blk_max_pfn)
|
||||||
return;
|
return;
|
||||||
pool = page_pool;
|
pool = page_pool;
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in a new issue