mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
blkcg: make root blkcg allocation use %GFP_KERNEL
Currently, blkcg_activate_policy() depends on %GFP_ATOMIC allocation from __blkg_lookup_create() for root blkcg creation. This could make policy fail unnecessarily. Make blkg_alloc() take @gfp_mask, __blkg_lookup_create() take an optional @new_blkg for preallocated blkg, and blkcg_activate_policy() preload radix tree and preallocate blkg with %GFP_KERNEL before trying to create the root blkg. v2: __blkg_lookup_create() was returning %NULL on blkg alloc failure instead of ERR_PTR() value. Fixed. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
13589864be
commit
159749937a
1 changed files with 43 additions and 16 deletions
|
@ -91,16 +91,18 @@ static void blkg_free(struct blkcg_gq *blkg)
|
||||||
* blkg_alloc - allocate a blkg
|
* blkg_alloc - allocate a blkg
|
||||||
* @blkcg: block cgroup the new blkg is associated with
|
* @blkcg: block cgroup the new blkg is associated with
|
||||||
* @q: request_queue the new blkg is associated with
|
* @q: request_queue the new blkg is associated with
|
||||||
|
* @gfp_mask: allocation mask to use
|
||||||
*
|
*
|
||||||
* Allocate a new blkg assocating @blkcg and @q.
|
* Allocate a new blkg assocating @blkcg and @q.
|
||||||
*/
|
*/
|
||||||
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
|
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
|
||||||
|
gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
struct blkcg_gq *blkg;
|
struct blkcg_gq *blkg;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* alloc and init base part */
|
/* alloc and init base part */
|
||||||
blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
|
blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
|
||||||
if (!blkg)
|
if (!blkg)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -117,7 +119,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* alloc per-policy data and attach it to blkg */
|
/* alloc per-policy data and attach it to blkg */
|
||||||
pd = kzalloc_node(pol->pd_size, GFP_ATOMIC, q->node);
|
pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
|
||||||
if (!pd) {
|
if (!pd) {
|
||||||
blkg_free(blkg);
|
blkg_free(blkg);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -175,8 +177,13 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blkg_lookup);
|
EXPORT_SYMBOL_GPL(blkg_lookup);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If @new_blkg is %NULL, this function tries to allocate a new one as
|
||||||
|
* necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
|
||||||
|
*/
|
||||||
static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
|
static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
|
||||||
struct request_queue *q)
|
struct request_queue *q,
|
||||||
|
struct blkcg_gq *new_blkg)
|
||||||
{
|
{
|
||||||
struct blkcg_gq *blkg;
|
struct blkcg_gq *blkg;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -188,18 +195,24 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
|
||||||
blkg = __blkg_lookup(blkcg, q);
|
blkg = __blkg_lookup(blkcg, q);
|
||||||
if (blkg) {
|
if (blkg) {
|
||||||
rcu_assign_pointer(blkcg->blkg_hint, blkg);
|
rcu_assign_pointer(blkcg->blkg_hint, blkg);
|
||||||
return blkg;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* blkg holds a reference to blkcg */
|
/* blkg holds a reference to blkcg */
|
||||||
if (!css_tryget(&blkcg->css))
|
if (!css_tryget(&blkcg->css)) {
|
||||||
return ERR_PTR(-EINVAL);
|
blkg = ERR_PTR(-EINVAL);
|
||||||
|
goto out_free;
|
||||||
|
}
|
||||||
|
|
||||||
/* allocate */
|
/* allocate */
|
||||||
ret = -ENOMEM;
|
if (!new_blkg) {
|
||||||
blkg = blkg_alloc(blkcg, q);
|
new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
|
||||||
if (unlikely(!blkg))
|
if (unlikely(!new_blkg)) {
|
||||||
goto err_put;
|
blkg = ERR_PTR(-ENOMEM);
|
||||||
|
goto out_put;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
blkg = new_blkg;
|
||||||
|
|
||||||
/* insert */
|
/* insert */
|
||||||
spin_lock(&blkcg->lock);
|
spin_lock(&blkcg->lock);
|
||||||
|
@ -212,10 +225,13 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return blkg;
|
return blkg;
|
||||||
err_put:
|
|
||||||
|
blkg = ERR_PTR(ret);
|
||||||
|
out_put:
|
||||||
css_put(&blkcg->css);
|
css_put(&blkcg->css);
|
||||||
blkg_free(blkg);
|
out_free:
|
||||||
return ERR_PTR(ret);
|
blkg_free(new_blkg);
|
||||||
|
return blkg;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
|
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
|
||||||
|
@ -227,7 +243,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
|
||||||
*/
|
*/
|
||||||
if (unlikely(blk_queue_bypass(q)))
|
if (unlikely(blk_queue_bypass(q)))
|
||||||
return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
|
return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
|
||||||
return __blkg_lookup_create(blkcg, q);
|
return __blkg_lookup_create(blkcg, q, NULL);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blkg_lookup_create);
|
EXPORT_SYMBOL_GPL(blkg_lookup_create);
|
||||||
|
|
||||||
|
@ -726,19 +742,30 @@ int blkcg_activate_policy(struct request_queue *q,
|
||||||
struct blkcg_gq *blkg;
|
struct blkcg_gq *blkg;
|
||||||
struct blkg_policy_data *pd, *n;
|
struct blkg_policy_data *pd, *n;
|
||||||
int cnt = 0, ret;
|
int cnt = 0, ret;
|
||||||
|
bool preloaded;
|
||||||
|
|
||||||
if (blkcg_policy_enabled(q, pol))
|
if (blkcg_policy_enabled(q, pol))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/* preallocations for root blkg */
|
||||||
|
blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
|
||||||
|
if (!blkg)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
preloaded = !radix_tree_preload(GFP_KERNEL);
|
||||||
|
|
||||||
blk_queue_bypass_start(q);
|
blk_queue_bypass_start(q);
|
||||||
|
|
||||||
/* make sure the root blkg exists and count the existing blkgs */
|
/* make sure the root blkg exists and count the existing blkgs */
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
blkg = __blkg_lookup_create(&blkcg_root, q);
|
blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
if (preloaded)
|
||||||
|
radix_tree_preload_end();
|
||||||
|
|
||||||
if (IS_ERR(blkg)) {
|
if (IS_ERR(blkg)) {
|
||||||
ret = PTR_ERR(blkg);
|
ret = PTR_ERR(blkg);
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
Loading…
Reference in a new issue