block: queue work on power efficient wq
Block layer uses workqueues for multiple purposes. There is no real dependency of scheduling these on the cpu which scheduled them. On a idle system, it is observed that and idle cpu wakes up many times just to service this work. It would be better if we can schedule it on a cpu which the scheduler believes to be the most appropriate one. This patch replaces normal workqueues with power efficient versions. Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
fd6e9ff26f
commit
d0dcfbd319
|
@ -3344,7 +3344,8 @@ int __init blk_dev_init(void)
|
||||||
|
|
||||||
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
|
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
|
||||||
kblockd_workqueue = alloc_workqueue("kblockd",
|
kblockd_workqueue = alloc_workqueue("kblockd",
|
||||||
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
|
WQ_MEM_RECLAIM | WQ_HIGHPRI |
|
||||||
|
WQ_POWER_EFFICIENT, 0);
|
||||||
if (!kblockd_workqueue)
|
if (!kblockd_workqueue)
|
||||||
panic("Failed to create kblockd\n");
|
panic("Failed to create kblockd\n");
|
||||||
|
|
||||||
|
|
|
@ -144,7 +144,8 @@ void put_io_context(struct io_context *ioc)
|
||||||
if (atomic_long_dec_and_test(&ioc->refcount)) {
|
if (atomic_long_dec_and_test(&ioc->refcount)) {
|
||||||
spin_lock_irqsave(&ioc->lock, flags);
|
spin_lock_irqsave(&ioc->lock, flags);
|
||||||
if (!hlist_empty(&ioc->icq_list))
|
if (!hlist_empty(&ioc->icq_list))
|
||||||
schedule_work(&ioc->release_work);
|
queue_work(system_power_efficient_wq,
|
||||||
|
&ioc->release_work);
|
||||||
else
|
else
|
||||||
free_ioc = true;
|
free_ioc = true;
|
||||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||||
|
|
|
@ -1522,9 +1522,11 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
|
||||||
intv = disk_events_poll_jiffies(disk);
|
intv = disk_events_poll_jiffies(disk);
|
||||||
set_timer_slack(&ev->dwork.timer, intv / 4);
|
set_timer_slack(&ev->dwork.timer, intv / 4);
|
||||||
if (check_now)
|
if (check_now)
|
||||||
queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
|
queue_delayed_work(system_freezable_power_efficient_wq,
|
||||||
|
&ev->dwork, 0);
|
||||||
else if (intv)
|
else if (intv)
|
||||||
queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
|
queue_delayed_work(system_freezable_power_efficient_wq,
|
||||||
|
&ev->dwork, intv);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock_irqrestore(&ev->lock, flags);
|
spin_unlock_irqrestore(&ev->lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -1567,7 +1569,8 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
|
||||||
spin_lock_irq(&ev->lock);
|
spin_lock_irq(&ev->lock);
|
||||||
ev->clearing |= mask;
|
ev->clearing |= mask;
|
||||||
if (!ev->block)
|
if (!ev->block)
|
||||||
mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
|
mod_delayed_work(system_freezable_power_efficient_wq,
|
||||||
|
&ev->dwork, 0);
|
||||||
spin_unlock_irq(&ev->lock);
|
spin_unlock_irq(&ev->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1667,7 +1670,8 @@ static void disk_check_events(struct disk_events *ev,
|
||||||
|
|
||||||
intv = disk_events_poll_jiffies(disk);
|
intv = disk_events_poll_jiffies(disk);
|
||||||
if (!ev->block && intv)
|
if (!ev->block && intv)
|
||||||
queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
|
queue_delayed_work(system_freezable_power_efficient_wq,
|
||||||
|
&ev->dwork, intv);
|
||||||
|
|
||||||
spin_unlock_irq(&ev->lock);
|
spin_unlock_irq(&ev->lock);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue