mm: Fix boot crash in mm_alloc()

Thomas Gleixner reports that we now have a boot crash triggered by
CONFIG_CPUMASK_OFFSTACK=y:

    BUG: unable to handle kernel NULL pointer dereference at   (null)
    IP: [<c11ae035>] find_next_bit+0x55/0xb0
    Call Trace:
     [<c11addda>] cpumask_any_but+0x2a/0x70
     [<c102396b>] flush_tlb_mm+0x2b/0x80
     [<c1022705>] pud_populate+0x35/0x50
     [<c10227ba>] pgd_alloc+0x9a/0xf0
     [<c103a3fc>] mm_init+0xec/0x120
     [<c103a7a3>] mm_alloc+0x53/0xd0

which was introduced by commit de03c72cfc ("mm: convert
mm->cpu_vm_cpumask into cpumask_var_t"), and is due to wrong ordering of
mm_init() vs mm_init_cpumask

Thomas wrote a patch to just fix the ordering of initialization, but I
hate the new double allocation in the fork path, so I ended up instead
doing some more radical surgery to clean it all up.

Reported-by: Thomas Gleixner <tglx@linutronix.de>
Reported-by: Ingo Molnar <mingo@elte.hu>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds 2011-05-29 11:32:28 -07:00
parent cab0d85c8d
commit 6345d24daf
4 changed files with 23 additions and 36 deletions

View File

@ -264,6 +264,8 @@ struct mm_struct {
struct linux_binfmt *binfmt;
cpumask_var_t cpu_vm_mask_var;
/* Architecture-specific MM context */
mm_context_t context;
@ -311,10 +313,18 @@ struct mm_struct {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
cpumask_var_t cpu_vm_mask_var;
#ifdef CONFIG_CPUMASK_OFFSTACK
struct cpumask cpumask_allocation;
#endif
};
static inline void mm_init_cpumask(struct mm_struct *mm)
{
#ifdef CONFIG_CPUMASK_OFFSTACK
mm->cpu_vm_mask_var = &mm->cpumask_allocation;
#endif
}
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
{

View File

@ -2194,7 +2194,6 @@ static inline void mmdrop(struct mm_struct * mm)
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
extern int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm);
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);

View File

@ -487,6 +487,7 @@ asmlinkage void __init start_kernel(void)
printk(KERN_NOTICE "%s", linux_banner);
setup_arch(&command_line);
mm_init_owner(&init_mm, &init_task);
mm_init_cpumask(&init_mm);
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
@ -510,7 +511,6 @@ asmlinkage void __init start_kernel(void)
sort_main_extable();
trap_init();
mm_init();
BUG_ON(mm_init_cpumask(&init_mm, 0));
/*
* Set up the scheduler prior starting any interrupts (such as the

View File

@ -484,20 +484,6 @@ static void mm_init_aio(struct mm_struct *mm)
#endif
}
int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm)
{
#ifdef CONFIG_CPUMASK_OFFSTACK
if (!alloc_cpumask_var(&mm->cpu_vm_mask_var, GFP_KERNEL))
return -ENOMEM;
if (oldmm)
cpumask_copy(mm_cpumask(mm), mm_cpumask(oldmm));
else
memset(mm_cpumask(mm), 0, cpumask_size());
#endif
return 0;
}
static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
{
atomic_set(&mm->mm_users, 1);
@ -538,17 +524,8 @@ struct mm_struct * mm_alloc(void)
return NULL;
memset(mm, 0, sizeof(*mm));
mm = mm_init(mm, current);
if (!mm)
return NULL;
if (mm_init_cpumask(mm, NULL)) {
mm_free_pgd(mm);
free_mm(mm);
return NULL;
}
return mm;
mm_init_cpumask(mm);
return mm_init(mm, current);
}
/*
@ -559,7 +536,6 @@ struct mm_struct * mm_alloc(void)
void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
free_cpumask_var(mm->cpu_vm_mask_var);
mm_free_pgd(mm);
destroy_context(mm);
mmu_notifier_mm_destroy(mm);
@ -753,6 +729,7 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
goto fail_nomem;
memcpy(mm, oldmm, sizeof(*mm));
mm_init_cpumask(mm);
/* Initializing for Swap token stuff */
mm->token_priority = 0;
@ -765,9 +742,6 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
if (!mm_init(mm, tsk))
goto fail_nomem;
if (mm_init_cpumask(mm, oldmm))
goto fail_nocpumask;
if (init_new_context(tsk, mm))
goto fail_nocontext;
@ -794,9 +768,6 @@ fail_nomem:
return NULL;
fail_nocontext:
free_cpumask_var(mm->cpu_vm_mask_var);
fail_nocpumask:
/*
* If init_new_context() failed, we cannot use mmput() to free the mm
* because it calls destroy_context()
@ -1591,6 +1562,13 @@ void __init proc_caches_init(void)
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
/*
* FIXME! The "sizeof(struct mm_struct)" currently includes the
* whole struct cpumask for the OFFSTACK case. We could change
* this to *only* allocate as much of it as required by the
* maximum number of CPU's we can ever have. The cpumask_allocation
* is at the end of the structure, exactly for that reason.
*/
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);