dm: Request based dm-crypt

dm-crypt provides bios based device mapper module. dm-crypt
operates on packets with 512 bytes size which is not effiicent
way for HW based crypto blocks. dm-req-crypt is developed to
address this. dm-req-crypt works on requests which carry upto
512KB of data for unmerged requests.

Change-Id: I7d6a63d516dc2dbe80f46c06dd0722847d55bc9f
Signed-off-by: Dinesh K Garg <dineshg@codeaurora.org>
This commit is contained in:
Dinesh K Garg 2013-12-20 16:37:10 -08:00
parent a36eecd518
commit e6304b0351
7 changed files with 789 additions and 3 deletions

View File

@ -1355,8 +1355,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
elv_completed_request(q, req);
/* this is a bio leak */
WARN_ON(req->bio != NULL);
/* this is a bio leak if the bio is not tagged with BIO_DONTFREE */
WARN_ON(req->bio && !bio_flagged(req->bio, BIO_DONTFREE));
/*
* Request may not have originated from ll_rw_blk. if not,
@ -2410,6 +2410,15 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
blk_account_io_completion(req, nr_bytes);
total_bytes = 0;
/*
* Check for this if flagged, Req based dm needs to perform
* post processing, hence dont end bios or request.DM
* layer takes care.
*/
if (bio_flagged(req->bio, BIO_DONTFREE))
return false;
while (req->bio) {
struct bio *bio = req->bio;
unsigned bio_bytes = min(bio->bi_size, nr_bytes);

View File

@ -235,6 +235,23 @@ config DM_CRYPT
If unsure, say N.
config DM_REQ_CRYPT
tristate "Crypt target support"
depends on BLK_DEV_DM
select XTS
select CRYPTO_XTS
---help---
This request based device-mapper target allows you to create a device that
transparently encrypts the data on it. You'll need to activate
the ciphers you're going to use in the cryptoapi configuration.
The DM REQ CRYPT operates on requests (bigger payloads) to utilize
crypto hardware better.
To compile this code as a module, choose M here: the module will
be called dm-req-crypt.
If unsure, say N.
config DM_SNAPSHOT
tristate "Snapshot target"
depends on BLK_DEV_DM

View File

@ -51,6 +51,7 @@ obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
obj-$(CONFIG_DM_REQ_CRYPT) += dm-req-crypt.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o

751
drivers/md/dm-req-crypt.c Normal file
View File

@ -0,0 +1,751 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/completion.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/workqueue.h>
#include <linux/backing-dev.h>
#include <linux/atomic.h>
#include <linux/scatterlist.h>
#include <crypto/scatterwalk.h>
#include <asm/page.h>
#include <asm/unaligned.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
#include <crypto/algapi.h>
#include <mach/qcrypto.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "req-crypt"
#define MAX_SG_LIST 1024
#define REQ_DM_512_KB (512*1024)
#define MAX_ENCRYPTION_BUFFERS 1
#define MIN_IOS 16
#define MIN_POOL_PAGES 32
#define KEY_SIZE_XTS 32
#define AES_XTS_IV_LEN 16
#define DM_REQ_CRYPT_ERROR -1
struct req_crypt_result {
struct completion completion;
int err;
};
struct dm_dev *dev;
static struct kmem_cache *_req_crypt_io_pool;
sector_t start_sector_orig;
struct workqueue_struct *req_crypt_queue;
mempool_t *req_io_pool;
mempool_t *req_page_pool;
struct crypto_ablkcipher *tfm;
struct req_dm_crypt_io {
struct work_struct work;
struct request *cloned_request;
int error;
atomic_t pending;
struct timespec start_time;
};
static void req_crypt_cipher_complete
(struct crypto_async_request *req, int err);
static void req_crypt_inc_pending(struct req_dm_crypt_io *io)
{
atomic_inc(&io->pending);
}
static void req_crypt_dec_pending_encrypt(struct req_dm_crypt_io *io)
{
int error = 0;
struct request *clone = NULL;
if (io) {
error = io->error;
if (io->cloned_request) {
clone = io->cloned_request;
} else {
DMERR("%s io->cloned_request is NULL\n",
__func__);
/*
* If Clone is NULL we cannot do anything,
* this should never happen
*/
BUG();
}
} else {
DMERR("%s io is NULL\n", __func__);
/*
* If Clone is NULL we cannot do anything,
* this should never happen
*/
BUG();
}
atomic_dec(&io->pending);
if (error < 0)
dm_kill_unmapped_request(clone, error);
else
dm_dispatch_request(clone);
}
static void req_crypt_dec_pending_decrypt(struct req_dm_crypt_io *io)
{
int error = 0;
struct request *clone = NULL;
if (io) {
error = io->error;
if (io->cloned_request) {
clone = io->cloned_request;
} else {
DMERR("%s io->cloned_request is NULL\n",
__func__);
/*
* If Clone is NULL we cannot do anything,
* this should never happen
*/
BUG();
}
} else {
DMERR("%s io is NULL\n",
__func__);
/*
* If Clone is NULL we cannot do anything,
* this should never happen
*/
BUG();
}
/* Should never get here if io or Clone is NULL */
dm_end_request(clone, error);
atomic_dec(&io->pending);
mempool_free(io, req_io_pool);
}
/*
* The callback that will be called by the worker queue to perform Decryption
* for reads and use the dm function to complete the bios and requests.
*/
static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
{
struct request *clone = NULL;
int error = 0;
int total_sg_len = 0, rc = 0, total_bytes_in_req = 0;
struct ablkcipher_request *req = NULL;
struct req_crypt_result result;
struct scatterlist *req_sg_read = NULL;
int err = 0;
struct req_iterator iter2;
struct bio_vec *bvec = NULL;
u8 IV[AES_XTS_IV_LEN];
if (io) {
error = io->error;
if (io->cloned_request) {
clone = io->cloned_request;
} else {
DMERR("%s io->cloned_request is NULL\n",
__func__);
error = DM_REQ_CRYPT_ERROR;
goto submit_request;
}
} else {
DMERR("%s io is NULL\n",
__func__);
error = DM_REQ_CRYPT_ERROR;
goto submit_request;
}
req_crypt_inc_pending(io);
if (error != 0) {
err = error;
goto submit_request;
}
req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
DMERR("%s ablkcipher request allocation failed\n", __func__);
err = DM_REQ_CRYPT_ERROR;
goto ablkcipher_req_alloc_failure;
}
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req_crypt_cipher_complete, &result);
init_completion(&result.completion);
qcrypto_cipher_set_flag(req,
QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
crypto_ablkcipher_clear_flags(tfm, ~0);
crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
req_sg_read = kzalloc(sizeof(struct scatterlist) *
MAX_SG_LIST, GFP_KERNEL);
if (!req_sg_read) {
DMERR("%s req_sg_read allocation failed\n",
__func__);
err = DM_REQ_CRYPT_ERROR;
goto ablkcipher_req_alloc_failure;
}
total_sg_len = blk_rq_map_sg(clone->q, clone, req_sg_read);
if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) {
DMERR("%s Request Error%d", __func__, total_sg_len);
err = DM_REQ_CRYPT_ERROR;
goto ablkcipher_req_alloc_failure;
}
/* total bytes to copy */
bvec = NULL;
rq_for_each_segment(bvec, clone, iter2) {
total_bytes_in_req = total_bytes_in_req + bvec->bv_len;
}
memset(IV, 0, AES_XTS_IV_LEN);
memcpy(IV, &clone->__sector, sizeof(sector_t));
ablkcipher_request_set_crypt(req, req_sg_read, req_sg_read,
total_bytes_in_req, (void *) IV);
rc = crypto_ablkcipher_decrypt(req);
switch (rc) {
case 0:
break;
case -EBUSY:
/*
* Lets make this synchronous request by waiting on
* in progress as well
*/
case -EINPROGRESS:
wait_for_completion_io(&result.completion);
if (result.err) {
DMERR("%s error = %d encrypting the request\n",
__func__, result.err);
err = DM_REQ_CRYPT_ERROR;
}
break;
default:
err = DM_REQ_CRYPT_ERROR;
break;
}
ablkcipher_req_alloc_failure:
if (req)
ablkcipher_request_free(req);
kfree(req_sg_read);
submit_request:
io->error = err;
req_crypt_dec_pending_decrypt(io);
}
/*
* The callback that will be called by the worker queue to perform Encryption
* for writes and submit the request using the elevelator.
*/
static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
{
struct request *clone = NULL;
struct bio *bio_src = NULL;
unsigned int total_sg_len_req_in = 0, total_sg_len_req_out = 0,
total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0;
struct req_iterator iter;
struct ablkcipher_request *req = NULL;
struct req_crypt_result result;
struct bio_vec *bvec = NULL;
struct scatterlist *req_sg_in = NULL;
struct scatterlist *req_sg_out = NULL;
int copy_bio_sector_to_req = 0;
gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
struct page *page = NULL;
u8 IV[AES_XTS_IV_LEN];
int remaining_size = 0;
if (io) {
if (io->cloned_request) {
clone = io->cloned_request;
} else {
DMERR("%s io->cloned_request is NULL\n",
__func__);
error = DM_REQ_CRYPT_ERROR;
goto submit_request;
}
} else {
DMERR("%s io is NULL\n",
__func__);
error = DM_REQ_CRYPT_ERROR;
goto submit_request;
}
req_crypt_inc_pending(io);
req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
DMERR("%s ablkcipher request allocation failed\n",
__func__);
error = DM_REQ_CRYPT_ERROR;
goto ablkcipher_req_alloc_failure;
}
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
req_crypt_cipher_complete, &result);
init_completion(&result.completion);
qcrypto_cipher_set_flag(req,
QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
crypto_ablkcipher_clear_flags(tfm, ~0);
crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
req_sg_in = kzalloc(sizeof(struct scatterlist) * MAX_SG_LIST,
GFP_KERNEL);
if (!req_sg_in) {
DMERR("%s req_sg_in allocation failed\n",
__func__);
error = DM_REQ_CRYPT_ERROR;
goto ablkcipher_req_alloc_failure;
}
req_sg_out = kzalloc(sizeof(struct scatterlist) * MAX_SG_LIST,
GFP_KERNEL);
if (!req_sg_out) {
DMERR("%s req_sg_out allocation failed\n",
__func__);
error = DM_REQ_CRYPT_ERROR;
goto ablkcipher_req_alloc_failure;
}
total_sg_len_req_in = blk_rq_map_sg(clone->q, clone, req_sg_in);
if ((total_sg_len_req_in <= 0) ||
(total_sg_len_req_in > MAX_SG_LIST)) {
DMERR("%s Request Error%d", __func__, total_sg_len_req_in);
error = DM_REQ_CRYPT_ERROR;
goto ablkcipher_req_alloc_failure;
}
rq_for_each_segment(bvec, clone, iter) {
try_again:
if (bvec->bv_len > remaining_size) {
page = NULL;
page = mempool_alloc(req_page_pool, gfp_mask);
if (!page) {
DMERR("%s Crypt page alloc failed", __func__);
congestion_wait(BLK_RW_ASYNC, HZ/100);
goto try_again;
}
bvec->bv_page = page;
bvec->bv_offset = 0;
total_bytes_in_req = total_bytes_in_req + bvec->bv_len;
remaining_size = PAGE_SIZE - bvec->bv_len;
if (remaining_size < 0)
BUG();
} else {
bvec->bv_page = page;
bvec->bv_offset = PAGE_SIZE - remaining_size;
remaining_size = remaining_size - bvec->bv_len;
total_bytes_in_req = total_bytes_in_req + bvec->bv_len;
}
}
total_sg_len_req_out = blk_rq_map_sg(clone->q, clone, req_sg_out);
if ((total_sg_len_req_out <= 0) ||
(total_sg_len_req_out > MAX_SG_LIST)) {
DMERR("%s Request Error %d", __func__, total_sg_len_req_out);
error = DM_REQ_CRYPT_ERROR;
goto ablkcipher_req_alloc_failure;
}
memset(IV, 0, AES_XTS_IV_LEN);
memcpy(IV, &clone->__sector, sizeof(sector_t));
ablkcipher_request_set_crypt(req, req_sg_in, req_sg_out,
total_bytes_in_req, (void *) IV);
rc = crypto_ablkcipher_encrypt(req);
switch (rc) {
case 0:
break;
case -EBUSY:
/*
* Lets make this synchronous request by waiting on
* in progress as well
*/
case -EINPROGRESS:
wait_for_completion_interruptible(&result.completion);
if (result.err) {
DMERR("%s error = %d encrypting the request\n",
__func__, result.err);
error = DM_REQ_CRYPT_ERROR;
goto ablkcipher_req_alloc_failure;
}
break;
default:
error = DM_REQ_CRYPT_ERROR;
goto ablkcipher_req_alloc_failure;
}
__rq_for_each_bio(bio_src, clone) {
if (copy_bio_sector_to_req == 0) {
clone->buffer = bio_data(bio_src);
copy_bio_sector_to_req++;
}
blk_queue_bounce(clone->q, &bio_src);
}
ablkcipher_req_alloc_failure:
if (req)
ablkcipher_request_free(req);
kfree(req_sg_in);
kfree(req_sg_out);
submit_request:
io->error = error;
req_crypt_dec_pending_encrypt(io);
}
/* Queue callback function that will get triggered */
static void req_cryptd_crypt(struct work_struct *work)
{
struct req_dm_crypt_io *io =
container_of(work, struct req_dm_crypt_io, work);
if (rq_data_dir(io->cloned_request) == WRITE)
req_cryptd_crypt_write_convert(io);
else if (rq_data_dir(io->cloned_request) == READ)
req_cryptd_crypt_read_convert(io);
else
DMERR("%s received non-write request for Clone %u\n",
__func__, (unsigned int)io->cloned_request);
}
static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io)
{
INIT_WORK(&io->work, req_cryptd_crypt);
queue_work(req_crypt_queue, &io->work);
}
/*
* Cipher complete callback, this is triggered by the Linux crypto api once
* the operation is done. This signals the waiting thread that the crypto
* operation is complete.
*/
static void req_crypt_cipher_complete(struct crypto_async_request *req, int err)
{
struct req_crypt_result *res = req->data;
if (err == -EINPROGRESS)
return;
res->err = err;
complete(&res->completion);
}
/*
* If bio->bi_dev is a partition, remap the location
*/
static inline void req_crypt_blk_partition_remap(struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
if (bio_sectors(bio) && bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part;
bio->bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains;
}
}
/*
* The endio function is called from ksoftirqd context (atomic).
* For write operations the new pages created form the mempool
* is freed and returned. * For read operations, decryption is
* required, since this is called in a atomic * context, the
* request is sent to a worker queue to complete decryptiona and
* free the request once done.
*/
static int req_crypt_endio(struct dm_target *ti, struct request *clone,
int error, union map_info *map_context)
{
int err = 0;
struct req_iterator iter1;
struct bio_vec *bvec = NULL;
struct req_dm_crypt_io *req_io = map_context->ptr;
/* If it is a write request, do nothing just return. */
bvec = NULL;
if (rq_data_dir(clone) == WRITE) {
rq_for_each_segment(bvec, clone, iter1) {
if (bvec->bv_offset == 0) {
mempool_free(bvec->bv_page, req_page_pool);
bvec->bv_page = NULL;
} else
bvec->bv_page = NULL;
}
mempool_free(req_io, req_io_pool);
goto submit_request;
} else if (rq_data_dir(clone) == READ) {
req_io->error = error;
req_cryptd_queue_crypt(req_io);
err = DM_ENDIO_INCOMPLETE;
goto submit_request;
}
submit_request:
return err;
}
/*
* This function is called with interrupts disabled
* The function remaps the clone for the underlying device.
* If it is a write request, it calls into the worker queue to
* encrypt the data
* and submit the request directly using the elevator
* For a read request no pre-processing is required the request
* is returned to dm once mapping is done
*/
static int req_crypt_map(struct dm_target *ti, struct request *clone,
union map_info *map_context)
{
struct req_dm_crypt_io *req_io = NULL;
int error = DM_MAPIO_REMAPPED, copy_bio_sector_to_req = 0;
struct bio *bio_src = NULL;
req_io = mempool_alloc(req_io_pool, GFP_NOWAIT);
if (!req_io) {
DMERR("%s req_io allocation failed\n", __func__);
error = DM_REQ_CRYPT_ERROR;
goto submit_request;
}
/* Save the clone in the req_io, the callback to the worker
* queue will get the req_io
*/
req_io->cloned_request = clone;
map_context->ptr = req_io;
atomic_set(&req_io->pending, 0);
/* Get the queue of the underlying original device */
clone->q = bdev_get_queue(dev->bdev);
clone->rq_disk = dev->bdev->bd_disk;
__rq_for_each_bio(bio_src, clone) {
bio_src->bi_bdev = dev->bdev;
/* Currently the way req-dm works is that once the underlying
* device driver completes the request by calling into the
* block layer. The block layer completes the bios (clones) and
* then the cloned request. This is undesirable for req-dm-crypt
* hence added a flag BIO_DONTFREE, this flag will ensure that
* blk layer does not complete the cloned bios before completing
* the request. When the crypt endio is called, post-processsing
* is done and then the dm layer will complete the bios (clones)
* and free them.
*/
bio_src->bi_flags |= 1 << BIO_DONTFREE;
/*
* If this device has partitions, remap block n
* of partition p to block n+start(p) of the disk.
*/
req_crypt_blk_partition_remap(bio_src);
if (copy_bio_sector_to_req == 0) {
clone->__sector = bio_src->bi_sector;
clone->buffer = bio_data(bio_src);
copy_bio_sector_to_req++;
}
blk_queue_bounce(clone->q, &bio_src);
}
if (rq_data_dir(clone) == READ) {
error = DM_MAPIO_REMAPPED;
goto submit_request;
} else if (rq_data_dir(clone) == WRITE) {
req_cryptd_queue_crypt(req_io);
error = DM_MAPIO_SUBMITTED;
goto submit_request;
} else {
error = DM_REQ_CRYPT_ERROR;
DMERR("%s Unknown request\n", __func__);
}
submit_request:
return error;
}
static void req_crypt_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
}
static void req_crypt_dtr(struct dm_target *ti)
{
if (req_crypt_queue)
destroy_workqueue(req_crypt_queue);
if (req_io_pool)
mempool_destroy(req_io_pool);
if (req_page_pool)
mempool_destroy(req_page_pool);
if (tfm)
crypto_free_ablkcipher(tfm);
}
/*
* Construct an encryption mapping:
* <cipher> <key> <iv_offset> <dev_path> <start>
*/
static int req_crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
unsigned long long tmpll;
char dummy;
if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &dev)) {
DMERR(" %s Device Lookup failed\n", __func__);
return DM_REQ_CRYPT_ERROR;
}
if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
DMERR("%s Invalid device sector\n", __func__);
return DM_REQ_CRYPT_ERROR;
}
start_sector_orig = tmpll;
req_crypt_queue = alloc_workqueue("req_cryptd",
WQ_HIGHPRI |
WQ_CPU_INTENSIVE|
WQ_MEM_RECLAIM,
1);
if (!req_crypt_queue) {
DMERR("%s req_crypt_queue not allocated\n", __func__);
return DM_REQ_CRYPT_ERROR;
}
/* Allocate the crypto alloc blk cipher and keep the handle */
tfm = crypto_alloc_ablkcipher("xts(aes)", 0, 0);
if (IS_ERR(tfm)) {
DMERR("%s ablkcipher tfm allocation failed : error = %lu\n",
__func__, PTR_ERR(tfm));
return DM_REQ_CRYPT_ERROR;
}
req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool);
if (!req_io_pool) {
DMERR("%s req_io_pool not allocated\n", __func__);
return DM_REQ_CRYPT_ERROR;
}
req_page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
if (!req_page_pool) {
DMERR("%s req_page_pool not allocated\n", __func__);
return DM_REQ_CRYPT_ERROR;
}
return 0;
}
static void req_crypt_postsuspend(struct dm_target *ti)
{
}
static int req_crypt_preresume(struct dm_target *ti)
{
return 0;
}
static void req_crypt_resume(struct dm_target *ti)
{
}
/* Message interface
* key set <key>
* key wipe
*/
static int req_crypt_message(struct dm_target *ti, unsigned argc, char **argv)
{
return 0;
}
static int req_crypt_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
return fn(ti, dev, start_sector_orig, ti->len, data);
}
static struct target_type req_crypt_target = {
.name = "req-crypt",
.version = {1, 0, 0},
.module = THIS_MODULE,
.ctr = req_crypt_ctr,
.dtr = req_crypt_dtr,
.map_rq = req_crypt_map,
.rq_end_io = req_crypt_endio,
.status = req_crypt_status,
.postsuspend = req_crypt_postsuspend,
.preresume = req_crypt_preresume,
.resume = req_crypt_resume,
.message = req_crypt_message,
.iterate_devices = req_crypt_iterate_devices,
};
static int __init req_dm_crypt_init(void)
{
int r;
_req_crypt_io_pool = KMEM_CACHE(req_dm_crypt_io, 0);
if (!_req_crypt_io_pool)
return -ENOMEM;
r = dm_register_target(&req_crypt_target);
if (r < 0)
DMERR("register failed %d", r);
return r;
}
static void __exit req_dm_crypt_exit(void)
{
kmem_cache_destroy(_req_crypt_io_pool);
dm_unregister_target(&req_crypt_target);
}
module_init(req_dm_crypt_init);
module_exit(req_dm_crypt_exit);
MODULE_DESCRIPTION(DM_NAME " target for request based transparent encryption / decryption");
MODULE_LICENSE("GPL v2");

View File

@ -737,7 +737,7 @@ static void free_rq_clone(struct request *clone)
* Complete the clone and the original request.
* Must be called without queue lock.
*/
static void dm_end_request(struct request *clone, int error)
void dm_end_request(struct request *clone, int error)
{
int rw = rq_data_dir(clone);
struct dm_rq_target_io *tio = clone->end_io_data;

View File

@ -120,6 +120,13 @@ struct bio {
#define BIO_RESET_BITS 13
#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */
/*
* Added for Req based dm which need to perform post processing. This flag
* ensures blk_update_request does not free the bios or request, this is done
* at the dm level
*/
#define BIO_DONTFREE 14
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*

View File

@ -594,5 +594,6 @@ void dm_dispatch_request(struct request *rq);
void dm_requeue_unmapped_request(struct request *rq);
void dm_kill_unmapped_request(struct request *rq, int error);
int dm_underlying_device_busy(struct request_queue *q);
void dm_end_request(struct request *clone, int error);
#endif /* _LINUX_DEVICE_MAPPER_H */