bsg: address various review comments

This address most of the comments made by Andrew. The two remaining
are conversion to idr, and dynamic major.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
Jens Axboe 2007-07-17 08:52:29 +02:00
parent a5fcaa2106
commit 25fd164303
1 changed files with 44 additions and 58 deletions

View File

@ -33,7 +33,7 @@
#include <scsi/scsi_driver.h>
#include <scsi/sg.h>
static char bsg_version[] = "block layer sg (bsg) 0.4";
const static char bsg_version[] = "block layer sg (bsg) 0.4";
struct bsg_device {
request_queue_t *queue;
@ -68,8 +68,6 @@ enum {
#define dprintk(fmt, args...)
#endif
#define list_entry_bc(entry) list_entry((entry), struct bsg_command, list)
/*
* just for testing
*/
@ -78,9 +76,9 @@ enum {
static DEFINE_MUTEX(bsg_mutex);
static int bsg_device_nr, bsg_minor_idx;
#define BSG_LIST_SIZE (8)
#define bsg_list_idx(minor) ((minor) & (BSG_LIST_SIZE - 1))
static struct hlist_head bsg_device_list[BSG_LIST_SIZE];
#define BSG_LIST_ARRAY_SIZE 8
#define bsg_list_idx(minor) ((minor) & (BSG_LIST_ARRAY_SIZE - 1))
static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
static struct class *bsg_class;
static LIST_HEAD(bsg_class_list);
@ -128,7 +126,7 @@ static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
bd->queued_cmds++;
spin_unlock_irq(&bd->lock);
bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER);
bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
if (unlikely(!bc)) {
spin_lock_irq(&bd->lock);
bd->queued_cmds--;
@ -136,7 +134,6 @@ static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
goto out;
}
memset(bc, 0, sizeof(*bc));
bc->bd = bd;
INIT_LIST_HEAD(&bc->list);
dprintk("%s: returning free cmd %p\n", bd->name, bc);
@ -146,22 +143,12 @@ out:
return bc;
}
static inline void
bsg_del_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
{
bd->done_cmds--;
list_del(&bc->list);
}
static inline void
bsg_add_done_cmd(struct bsg_device *bd, struct bsg_command *bc)
{
bd->done_cmds++;
list_add_tail(&bc->list, &bd->done_list);
wake_up(&bd->wq_done);
}
static inline int bsg_io_schedule(struct bsg_device *bd, int state)
static int bsg_io_schedule(struct bsg_device *bd)
{
DEFINE_WAIT(wait);
int ret = 0;
@ -186,14 +173,11 @@ static inline int bsg_io_schedule(struct bsg_device *bd, int state)
goto unlock;
}
prepare_to_wait(&bd->wq_done, &wait, state);
prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bd->lock);
io_schedule();
finish_wait(&bd->wq_done, &wait);
if ((state == TASK_INTERRUPTIBLE) && signal_pending(current))
ret = -ERESTARTSYS;
return ret;
unlock:
spin_unlock_irq(&bd->lock);
@ -272,7 +256,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
{
request_queue_t *q = bd->queue;
struct request *rq, *next_rq = NULL;
int ret, rw = 0; /* shut up gcc */
int ret, rw;
unsigned int dxfer_len;
void *dxferp = NULL;
@ -354,9 +338,11 @@ static void bsg_rq_end_io(struct request *rq, int uptodate)
bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
spin_lock_irqsave(&bd->lock, flags);
list_del(&bc->list);
bsg_add_done_cmd(bd, bc);
list_move_tail(&bc->list, &bd->done_list);
bd->done_cmds++;
spin_unlock_irqrestore(&bd->lock, flags);
wake_up(&bd->wq_done);
}
/*
@ -387,14 +373,15 @@ static void bsg_add_command(struct bsg_device *bd, request_queue_t *q,
blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
}
static inline struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
{
struct bsg_command *bc = NULL;
spin_lock_irq(&bd->lock);
if (bd->done_cmds) {
bc = list_entry_bc(bd->done_list.next);
bsg_del_done_cmd(bd, bc);
bc = list_entry(bd->done_list.next, struct bsg_command, list);
list_del(&bc->list);
bd->done_cmds--;
}
spin_unlock_irq(&bd->lock);
@ -450,8 +437,8 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
hdr->response_len = 0;
if (rq->sense_len && hdr->response) {
int len = min((unsigned int) hdr->max_response_len,
rq->sense_len);
int len = min_t(unsigned int, hdr->max_response_len,
rq->sense_len);
ret = copy_to_user((void*)(unsigned long)hdr->response,
rq->sense, len);
@ -486,7 +473,7 @@ static int bsg_complete_all_commands(struct bsg_device *bd)
*/
ret = 0;
do {
ret = bsg_io_schedule(bd, TASK_UNINTERRUPTIBLE);
ret = bsg_io_schedule(bd);
/*
* look for -ENODATA specifically -- we'll sometimes get
* -ERESTARTSYS when we've taken a signal, but we can't
@ -523,7 +510,7 @@ static int bsg_complete_all_commands(struct bsg_device *bd)
return ret;
}
static ssize_t
static int
__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
const struct iovec *iov, ssize_t *bytes_read)
{
@ -550,7 +537,7 @@ __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
bc->bidi_bio);
if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr)))
if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
ret = -EFAULT;
bsg_free_command(bc);
@ -582,6 +569,9 @@ static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file)
clear_bit(BSG_F_WRITE_PERM, &bd->flags);
}
/*
* Check if the error is a "real" error that we should return.
*/
static inline int err_block_err(int ret)
{
if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
@ -610,8 +600,8 @@ bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
return bytes_read;
}
static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf,
size_t count, ssize_t *bytes_read)
static int __bsg_write(struct bsg_device *bd, const char __user *buf,
size_t count, ssize_t *bytes_written)
{
struct bsg_command *bc;
struct request *rq;
@ -655,7 +645,7 @@ static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf,
rq = NULL;
nr_commands--;
buf += sizeof(struct sg_io_v4);
*bytes_read += sizeof(struct sg_io_v4);
*bytes_written += sizeof(struct sg_io_v4);
}
if (bc)
@ -668,7 +658,7 @@ static ssize_t
bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct bsg_device *bd = file->private_data;
ssize_t bytes_read;
ssize_t bytes_written;
int ret;
dprintk("%s: write %Zd bytes\n", bd->name, count);
@ -676,18 +666,18 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
bsg_set_block(bd, file);
bsg_set_write_perm(bd, file);
bytes_read = 0;
ret = __bsg_write(bd, buf, count, &bytes_read);
*ppos = bytes_read;
bytes_written = 0;
ret = __bsg_write(bd, buf, count, &bytes_written);
*ppos = bytes_written;
/*
* return bytes written on non-fatal errors
*/
if (!bytes_read || (bytes_read && err_block_err(ret)))
bytes_read = ret;
if (!bytes_written || (bytes_written && err_block_err(ret)))
bytes_written = ret;
dprintk("%s: returning %Zd\n", bd->name, bytes_read);
return bytes_read;
dprintk("%s: returning %Zd\n", bd->name, bytes_written);
return bytes_written;
}
static struct bsg_device *bsg_alloc_device(void)
@ -746,7 +736,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
struct request_queue *rq,
struct file *file)
{
struct bsg_device *bd = NULL;
struct bsg_device *bd;
#ifdef BSG_DEBUG
unsigned char buf[32];
#endif
@ -762,7 +752,8 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
atomic_set(&bd->ref_count, 1);
bd->minor = iminor(inode);
mutex_lock(&bsg_mutex);
hlist_add_head(&bd->dev_list, &bsg_device_list[bsg_list_idx(bd->minor)]);
hlist_add_head(&bd->dev_list,
&bsg_device_list[bd->minor & (BSG_LIST_ARRAY_SIZE - 1)]);
strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1);
dprintk("bound to <%s>, max queue %d\n",
@ -774,12 +765,13 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
static struct bsg_device *__bsg_get_device(int minor)
{
struct hlist_head *list = &bsg_device_list[bsg_list_idx(minor)];
struct hlist_head *list;
struct bsg_device *bd = NULL;
struct hlist_node *entry;
mutex_lock(&bsg_mutex);
list = &bsg_device_list[minor & (BSG_LIST_ARRAY_SIZE - 1)];
hlist_for_each(entry, list) {
bd = hlist_entry(entry, struct bsg_device, dev_list);
if (bd->minor == minor) {
@ -858,16 +850,11 @@ static unsigned int bsg_poll(struct file *file, poll_table *wait)
return mask;
}
static int
bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct bsg_device *bd = file->private_data;
int __user *uarg = (int __user *) arg;
if (!bd)
return -ENXIO;
switch (cmd) {
/*
* our own ioctls
@ -944,7 +931,7 @@ static struct file_operations bsg_fops = {
.poll = bsg_poll,
.open = bsg_open,
.release = bsg_release,
.ioctl = bsg_ioctl,
.unlocked_ioctl = bsg_ioctl,
.owner = THIS_MODULE,
};
@ -952,8 +939,7 @@ void bsg_unregister_queue(struct request_queue *q)
{
struct bsg_class_device *bcd = &q->bsg_dev;
if (!bcd->class_dev)
return;
WARN_ON(!bcd->class_dev);
mutex_lock(&bsg_mutex);
sysfs_remove_link(&q->kobj, "bsg");
@ -1069,7 +1055,7 @@ static int __init bsg_init(void)
return -ENOMEM;
}
for (i = 0; i < BSG_LIST_SIZE; i++)
for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
INIT_HLIST_HEAD(&bsg_device_list[i]);
bsg_class = class_create(THIS_MODULE, "bsg");