[PATCH] Fix soft lockup with iSeries viocd driver

Fix soft lockup with iSeries viocd driver, caused by eventually calling
end_that_request_first() with nr_bytes 0.

Some versions of hald do an SG_IO ioctl on the viocd device which becomes a
request with hard_nr_sectors and hard_cur_sectors set to zero.  Passing zero
as the number of sectors to end_request() (which calls
end_that_request_first()) causes an infinite loop when the bio is being freed.

This patch makes sure that the zero is never passed.  It only requires some
number larger the the request size the terminate the loop.

The lockup is triggered by hald, interrogating the device.

Signed-off-by: Tony Breeds <tony@bakeyournoodle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Tony Breeds 2007-03-05 00:30:14 -08:00 committed by Linus Torvalds
parent 5fdc2abe39
commit 1ad7c31107

View file

@ -376,6 +376,25 @@ static int send_request(struct request *req)
return 0;
}
static void viocd_end_request(struct request *req, int uptodate)
{
int nsectors = req->hard_nr_sectors;
/*
* Make sure it's fully ended, and ensure that we process
* at least one sector.
*/
if (blk_pc_request(req))
nsectors = (req->data_len + 511) >> 9;
if (!nsectors)
nsectors = 1;
if (end_that_request_first(req, uptodate, nsectors))
BUG();
add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req);
end_that_request_last(req, uptodate);
}
static int rwreq;
@ -385,11 +404,11 @@ static void do_viocd_request(request_queue_t *q)
while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
if (!blk_fs_request(req))
end_request(req, 0);
viocd_end_request(req, 0);
else if (send_request(req) < 0) {
printk(VIOCD_KERN_WARNING
"unable to send message to OS/400!");
end_request(req, 0);
viocd_end_request(req, 0);
} else
rwreq++;
}
@ -601,9 +620,9 @@ return_complete:
"with rc %d:0x%04X: %s\n",
req, event->xRc,
bevent->sub_result, err->msg);
end_request(req, 0);
viocd_end_request(req, 0);
} else
end_request(req, 1);
viocd_end_request(req, 1);
/* restart handling of incoming requests */
spin_unlock_irqrestore(&viocd_reqlock, flags);