block: ioctl support for sanitize in eMMC 4.5

Adding a new ioctl to support sanitize operation in eMMC
cards version 4.5.
The sanitize ioctl support helps performing this operation
via user application.

Change-Id: I79aa4163e7753a75bed5a26a9a92de902b4b9c21
Signed-off-by: Yaniv Gardi <ygardi@codeaurora.org>
Signed-off-by: Maya Erez <merez@codeaurora.org>
(cherry picked from commit 73937f5face75e05ec2a72966d04f4e20aa18379)

Conflicts:

	block/blk-core.c
	block/elevator.c
	include/linux/blk_types.h
	include/linux/blkdev.h
	include/linux/fs.h
This commit is contained in:
Maya Erez 2012-05-24 23:33:05 +03:00 committed by Stephen Boyd
parent ad9f4504bc
commit e7b054e8c1
9 changed files with 126 additions and 6 deletions

View file

@ -1546,7 +1546,7 @@ generic_make_request_checks(struct bio *bio)
goto end_io; goto end_io;
} }
if (unlikely(!(bio->bi_rw & REQ_DISCARD) && if (unlikely(!(bio->bi_rw & (REQ_DISCARD | REQ_SANITIZE)) &&
nr_sectors > queue_max_hw_sectors(q))) { nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n", printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b), bdevname(bio->bi_bdev, b),
@ -1594,6 +1594,14 @@ generic_make_request_checks(struct bio *bio)
goto end_io; goto end_io;
} }
if ((bio->bi_rw & REQ_SANITIZE) &&
(!blk_queue_sanitize(q))) {
pr_info("%s - got a SANITIZE request but the queue "
"doesn't support sanitize requests", __func__);
err = -EOPNOTSUPP;
goto end_io;
}
if (blk_throtl_bio(q, bio)) if (blk_throtl_bio(q, bio))
return false; /* throttled, will be resubmitted later */ return false; /* throttled, will be resubmitted later */
@ -1699,7 +1707,8 @@ void submit_bio(int rw, struct bio *bio)
* If it's a regular read/write or a barrier with data attached, * If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission. * go through the normal accounting stuff before submission.
*/ */
if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { if (bio_has_data(bio) &&
(!(rw & (REQ_DISCARD | REQ_SANITIZE)))) {
if (rw & WRITE) { if (rw & WRITE) {
count_vm_events(PGPGOUT, count); count_vm_events(PGPGOUT, count);
} else { } else {
@ -1745,7 +1754,7 @@ EXPORT_SYMBOL(submit_bio);
*/ */
int blk_rq_check_limits(struct request_queue *q, struct request *rq) int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{ {
if (rq->cmd_flags & REQ_DISCARD) if (rq->cmd_flags & (REQ_DISCARD | REQ_SANITIZE))
return 0; return 0;
if (blk_rq_sectors(rq) > queue_max_sectors(q) || if (blk_rq_sectors(rq) > queue_max_sectors(q) ||

View file

@ -114,6 +114,57 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
} }
EXPORT_SYMBOL(blkdev_issue_discard); EXPORT_SYMBOL(blkdev_issue_discard);
/**
* blkdev_issue_sanitize - queue a sanitize request
* @bdev: blockdev to issue sanitize for
* @gfp_mask: memory allocation flags (for bio_alloc)
*
* Description:
* Issue a sanitize request for the specified block device
*/
int blkdev_issue_sanitize(struct block_device *bdev, gfp_t gfp_mask)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_SANITIZE;
struct bio_batch bb;
struct bio *bio;
int ret = 0;
if (!q)
return -ENXIO;
if (!blk_queue_sanitize(q)) {
pr_err("%s - card doesn't support sanitize", __func__);
return -EOPNOTSUPP;
}
bio = bio_alloc(gfp_mask, 1);
if (!bio)
return -ENOMEM;
atomic_set(&bb.done, 1);
bb.flags = 1 << BIO_UPTODATE;
bb.wait = &wait;
bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
bio->bi_private = &bb;
atomic_inc(&bb.done);
submit_bio(type, bio);
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
wait_for_completion(&wait);
if (!test_bit(BIO_UPTODATE, &bb.flags))
ret = -EIO;
return ret;
}
EXPORT_SYMBOL(blkdev_issue_sanitize);
/** /**
* blkdev_issue_zeroout - generate number of zero filed write bios * blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue * @bdev: blockdev to issue

View file

@ -382,6 +382,12 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE)) if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
return 0; return 0;
/*
* Don't merge file system requests and sanitize requests
*/
if ((req->cmd_flags & REQ_SANITIZE) != (next->cmd_flags & REQ_SANITIZE))
return 0;
/* /*
* not contiguous * not contiguous
*/ */

View file

@ -72,7 +72,43 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
*/ */
bool elv_rq_merge_ok(struct request *rq, struct bio *bio) bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
{ {
if (!blk_rq_merge_ok(rq, bio)) if (!rq_mergeable(rq))
return 0;
/*
* Don't merge file system requests and discard requests
*/
if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
return 0;
/*
* Don't merge discard requests and secure discard requests
*/
if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
return 0;
/*
* Don't merge sanitize requests
*/
if ((bio->bi_rw & REQ_SANITIZE) != (rq->bio->bi_rw & REQ_SANITIZE))
return 0;
/*
* different data direction or already started, don't merge
*/
if (bio_data_dir(bio) != rq_data_dir(rq))
return 0;
/*
* must be same device and not a special request
*/
if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
return 0;
/*
* only merge integrity protected bio into ditto rq
*/
if (bio_integrity(bio) != blk_integrity_rq(rq))
return 0; return 0;
if (!elv_iosched_allow_merge(rq, bio)) if (!elv_iosched_allow_merge(rq, bio))
@ -592,7 +628,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
if (rq->cmd_flags & REQ_SOFTBARRIER) { if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */ /* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS || if (rq->cmd_type == REQ_TYPE_FS ||
(rq->cmd_flags & REQ_DISCARD)) { (rq->cmd_flags & (REQ_DISCARD | REQ_SANITIZE))) {
q->end_sector = rq_end_sector(rq); q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq; q->boundary_rq = rq;
} }

View file

@ -132,6 +132,11 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags); return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
} }
static int blk_ioctl_sanitize(struct block_device *bdev)
{
return blkdev_issue_sanitize(bdev, GFP_KERNEL);
}
static int put_ushort(unsigned long arg, unsigned short val) static int put_ushort(unsigned long arg, unsigned short val)
{ {
return put_user(val, (unsigned short __user *)arg); return put_user(val, (unsigned short __user *)arg);
@ -234,6 +239,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
set_device_ro(bdev, n); set_device_ro(bdev, n);
return 0; return 0;
case BLKSANITIZE:
ret = blk_ioctl_sanitize(bdev);
break;
case BLKDISCARD: case BLKDISCARD:
case BLKSECDISCARD: { case BLKSECDISCARD: {
uint64_t range[2]; uint64_t range[2];

View file

@ -150,6 +150,7 @@ enum rq_flag_bits {
__REQ_FLUSH_SEQ, /* request for flush sequence */ __REQ_FLUSH_SEQ, /* request for flush sequence */
__REQ_IO_STAT, /* account I/O stat */ __REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */ __REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_SANITIZE, /* sanitize */
__REQ_NR_BITS, /* stops here */ __REQ_NR_BITS, /* stops here */
}; };
@ -161,13 +162,15 @@ enum rq_flag_bits {
#define REQ_META (1 << __REQ_META) #define REQ_META (1 << __REQ_META)
#define REQ_PRIO (1 << __REQ_PRIO) #define REQ_PRIO (1 << __REQ_PRIO)
#define REQ_DISCARD (1 << __REQ_DISCARD) #define REQ_DISCARD (1 << __REQ_DISCARD)
#define REQ_SANITIZE (1 << __REQ_SANITIZE)
#define REQ_NOIDLE (1 << __REQ_NOIDLE) #define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \ #define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \ #define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE | \
REQ_SANITIZE)
#define REQ_CLONE_MASK REQ_COMMON_MASK #define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_RAHEAD (1 << __REQ_RAHEAD) #define REQ_RAHEAD (1 << __REQ_RAHEAD)

View file

@ -421,6 +421,7 @@ struct request_queue {
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
#define QUEUE_FLAG_SANITIZE 19 /* supports SANITIZE */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \ (1 << QUEUE_FLAG_STACKABLE) | \
@ -500,6 +501,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_stackable(q) \ #define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_sanitize(q) test_bit(QUEUE_FLAG_SANITIZE, &(q)->queue_flags)
#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
@ -953,6 +955,7 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
extern int blkdev_issue_sanitize(struct block_device *bdev, gfp_t gfp_mask);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask); sector_t nr_sects, gfp_t gfp_mask);
static inline int sb_issue_discard(struct super_block *sb, sector_t block, static inline int sb_issue_discard(struct super_block *sb, sector_t block,

View file

@ -324,6 +324,7 @@ struct inodes_stat_t {
#define BLKDISCARDZEROES _IO(0x12,124) #define BLKDISCARDZEROES _IO(0x12,124)
#define BLKSECDISCARD _IO(0x12,125) #define BLKSECDISCARD _IO(0x12,125)
#define BLKROTATIONAL _IO(0x12,126) #define BLKROTATIONAL _IO(0x12,126)
#define BLKSANITIZE _IO(0x12, 127)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */ #define FIBMAP _IO(0x00,1) /* bmap access */

View file

@ -1788,6 +1788,8 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
rwbs[i++] = 'W'; rwbs[i++] = 'W';
else if (rw & REQ_DISCARD) else if (rw & REQ_DISCARD)
rwbs[i++] = 'D'; rwbs[i++] = 'D';
else if (rw & REQ_SANITIZE)
rwbs[i++] = 'Z';
else if (bytes) else if (bytes)
rwbs[i++] = 'R'; rwbs[i++] = 'R';
else else