mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
raid5: make_request use batch stripe release
make_request() does stripe release for every stripe and the stripe usually has count 1, which makes previous release_stripe() optimization not work. In my test, this release_stripe() becomes the heaviest pleace to take conf->device_lock after previous patches applied. Below patch makes stripe release batch. All the stripes will be released in unplug. The STRIPE_ON_UNPLUG_LIST bit is to protect concurrent access stripe lru. Signed-off-by: Shaohua Li <shli@fusionio.com> Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
parent
74018dc306
commit
8811b5968f
2 changed files with 60 additions and 3 deletions
|
@ -471,7 +471,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
|
|||
} else {
|
||||
if (atomic_read(&sh->count)) {
|
||||
BUG_ON(!list_empty(&sh->lru)
|
||||
&& !test_bit(STRIPE_EXPANDING, &sh->state));
|
||||
&& !test_bit(STRIPE_EXPANDING, &sh->state)
|
||||
&& !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state));
|
||||
} else {
|
||||
if (!test_bit(STRIPE_HANDLE, &sh->state))
|
||||
atomic_inc(&conf->active_stripes);
|
||||
|
@ -3988,6 +3989,62 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf)
|
|||
return sh;
|
||||
}
|
||||
|
||||
struct raid5_plug_cb {
|
||||
struct blk_plug_cb cb;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
|
||||
{
|
||||
struct raid5_plug_cb *cb = container_of(
|
||||
blk_cb, struct raid5_plug_cb, cb);
|
||||
struct stripe_head *sh;
|
||||
struct mddev *mddev = cb->cb.data;
|
||||
struct r5conf *conf = mddev->private;
|
||||
|
||||
if (cb->list.next && !list_empty(&cb->list)) {
|
||||
spin_lock_irq(&conf->device_lock);
|
||||
while (!list_empty(&cb->list)) {
|
||||
sh = list_first_entry(&cb->list, struct stripe_head, lru);
|
||||
list_del_init(&sh->lru);
|
||||
/*
|
||||
* avoid race release_stripe_plug() sees
|
||||
* STRIPE_ON_UNPLUG_LIST clear but the stripe
|
||||
* is still in our list
|
||||
*/
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state);
|
||||
__release_stripe(conf, sh);
|
||||
}
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
}
|
||||
kfree(cb);
|
||||
}
|
||||
|
||||
static void release_stripe_plug(struct mddev *mddev,
|
||||
struct stripe_head *sh)
|
||||
{
|
||||
struct blk_plug_cb *blk_cb = blk_check_plugged(
|
||||
raid5_unplug, mddev,
|
||||
sizeof(struct raid5_plug_cb));
|
||||
struct raid5_plug_cb *cb;
|
||||
|
||||
if (!blk_cb) {
|
||||
release_stripe(sh);
|
||||
return;
|
||||
}
|
||||
|
||||
cb = container_of(blk_cb, struct raid5_plug_cb, cb);
|
||||
|
||||
if (cb->list.next == NULL)
|
||||
INIT_LIST_HEAD(&cb->list);
|
||||
|
||||
if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state))
|
||||
list_add_tail(&sh->lru, &cb->list);
|
||||
else
|
||||
release_stripe(sh);
|
||||
}
|
||||
|
||||
static void make_request(struct mddev *mddev, struct bio * bi)
|
||||
{
|
||||
struct r5conf *conf = mddev->private;
|
||||
|
@ -4116,8 +4173,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
|
|||
if ((bi->bi_rw & REQ_SYNC) &&
|
||||
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
|
||||
atomic_inc(&conf->preread_active_stripes);
|
||||
mddev_check_plugged(mddev);
|
||||
release_stripe(sh);
|
||||
release_stripe_plug(mddev, sh);
|
||||
} else {
|
||||
/* cannot get stripe for read-ahead, just give-up */
|
||||
clear_bit(BIO_UPTODATE, &bi->bi_flags);
|
||||
|
|
|
@ -319,6 +319,7 @@ enum {
|
|||
STRIPE_BIOFILL_RUN,
|
||||
STRIPE_COMPUTE_RUN,
|
||||
STRIPE_OPS_REQ_PENDING,
|
||||
STRIPE_ON_UNPLUG_LIST,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue