mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
slab/mempolicy: always use local policy from interrupt context
commit e7b691b085
upstream.
slab_node() could access current->mempolicy from interrupt context.
However there's a race condition during exit where the mempolicy
is first freed and then the pointer zeroed.
Using this from interrupts seems bogus anyways. The interrupt
will interrupt a random process and therefore get a random
mempolicy. Many times, this will be idle's, which noone can change.
Just disable this here and always use local for slab
from interrupts. I also cleaned up the callers of slab_node a bit
which always passed the same argument.
I believe the original mempolicy code did that in fact,
so it's likely a regression.
v2: send version with correct logic
v3: simplify. fix typo.
Reported-by: Arun Sharma <asharma@fb.com>
Cc: penberg@kernel.org
Cc: cl@linux.com
Signed-off-by: Andi Kleen <ak@linux.intel.com>
[tdmackey@twitter.com: Rework control flow based on feedback from
cl@linux.com, fix logic, and cleanup current task_struct reference]
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: David Mackey <tdmackey@twitter.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Zefan Li <lizefan@huawei.com>
This commit is contained in:
parent
ab22539512
commit
69db2d4044
4 changed files with 11 additions and 5 deletions
|
@ -205,7 +205,7 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
|
|||
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
|
||||
extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
|
||||
const nodemask_t *mask);
|
||||
extern unsigned slab_node(struct mempolicy *policy);
|
||||
extern unsigned slab_node(void);
|
||||
|
||||
extern enum zone_type policy_zone;
|
||||
|
||||
|
|
|
@ -1609,8 +1609,14 @@ static unsigned interleave_nodes(struct mempolicy *policy)
|
|||
* task can change it's policy. The system default policy requires no
|
||||
* such protection.
|
||||
*/
|
||||
unsigned slab_node(struct mempolicy *policy)
|
||||
unsigned slab_node(void)
|
||||
{
|
||||
struct mempolicy *policy;
|
||||
|
||||
if (in_interrupt())
|
||||
return numa_node_id();
|
||||
|
||||
policy = current->mempolicy;
|
||||
if (!policy || policy->flags & MPOL_F_LOCAL)
|
||||
return numa_node_id();
|
||||
|
||||
|
|
|
@ -3336,7 +3336,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
|
|||
if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
|
||||
nid_alloc = cpuset_slab_spread_node();
|
||||
else if (current->mempolicy)
|
||||
nid_alloc = slab_node(current->mempolicy);
|
||||
nid_alloc = slab_node();
|
||||
if (nid_alloc != nid_here)
|
||||
return ____cache_alloc_node(cachep, flags, nid_alloc);
|
||||
return NULL;
|
||||
|
@ -3368,7 +3368,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
|
|||
|
||||
retry_cpuset:
|
||||
cpuset_mems_cookie = get_mems_allowed();
|
||||
zonelist = node_zonelist(slab_node(current->mempolicy), flags);
|
||||
zonelist = node_zonelist(slab_node(), flags);
|
||||
|
||||
retry:
|
||||
/*
|
||||
|
|
|
@ -1617,7 +1617,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
|
|||
|
||||
do {
|
||||
cpuset_mems_cookie = get_mems_allowed();
|
||||
zonelist = node_zonelist(slab_node(current->mempolicy), flags);
|
||||
zonelist = node_zonelist(slab_node(), flags);
|
||||
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
|
||||
struct kmem_cache_node *n;
|
||||
|
||||
|
|
Loading…
Reference in a new issue