mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
[PATCH] coredump: some code relocations
This is a preparation for the next patch. No functional changes. Basically, this patch moves '->flags & SIGNAL_GROUP_EXIT' check into zap_threads(), and 'complete(vfork_done)' into coredump_wait outside of ->mmap_sem protected area. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
7b1c6154fa
commit
dcf560c593
1 changed files with 39 additions and 29 deletions
68
fs/exec.c
68
fs/exec.c
|
@ -1393,20 +1393,22 @@ static void zap_process(struct task_struct *start)
|
||||||
unlock_task_sighand(start, &flags);
|
unlock_task_sighand(start, &flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void zap_threads(struct mm_struct *mm)
|
static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
|
int exit_code)
|
||||||
{
|
{
|
||||||
struct task_struct *g, *p;
|
struct task_struct *g, *p;
|
||||||
struct task_struct *tsk = current;
|
int err = -EAGAIN;
|
||||||
struct completion *vfork_done = tsk->vfork_done;
|
|
||||||
|
|
||||||
/*
|
spin_lock_irq(&tsk->sighand->siglock);
|
||||||
* Make sure nobody is waiting for us to release the VM,
|
if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
|
||||||
* otherwise we can deadlock when we wait on each other
|
tsk->signal->flags = SIGNAL_GROUP_EXIT;
|
||||||
*/
|
tsk->signal->group_exit_code = exit_code;
|
||||||
if (vfork_done) {
|
tsk->signal->group_stop_count = 0;
|
||||||
tsk->vfork_done = NULL;
|
err = 0;
|
||||||
complete(vfork_done);
|
|
||||||
}
|
}
|
||||||
|
spin_unlock_irq(&tsk->sighand->siglock);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
for_each_process(g) {
|
for_each_process(g) {
|
||||||
|
@ -1420,22 +1422,43 @@ static void zap_threads(struct mm_struct *mm)
|
||||||
} while ((p = next_thread(p)) != g);
|
} while ((p = next_thread(p)) != g);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
return mm->core_waiters;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void coredump_wait(struct mm_struct *mm)
|
static int coredump_wait(int exit_code)
|
||||||
{
|
{
|
||||||
DECLARE_COMPLETION(startup_done);
|
struct task_struct *tsk = current;
|
||||||
|
struct mm_struct *mm = tsk->mm;
|
||||||
|
struct completion startup_done;
|
||||||
|
struct completion *vfork_done;
|
||||||
int core_waiters;
|
int core_waiters;
|
||||||
|
|
||||||
|
init_completion(&mm->core_done);
|
||||||
|
init_completion(&startup_done);
|
||||||
mm->core_startup_done = &startup_done;
|
mm->core_startup_done = &startup_done;
|
||||||
|
|
||||||
zap_threads(mm);
|
core_waiters = zap_threads(tsk, mm, exit_code);
|
||||||
core_waiters = mm->core_waiters;
|
|
||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
|
|
||||||
|
if (unlikely(core_waiters < 0))
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure nobody is waiting for us to release the VM,
|
||||||
|
* otherwise we can deadlock when we wait on each other
|
||||||
|
*/
|
||||||
|
vfork_done = tsk->vfork_done;
|
||||||
|
if (vfork_done) {
|
||||||
|
tsk->vfork_done = NULL;
|
||||||
|
complete(vfork_done);
|
||||||
|
}
|
||||||
|
|
||||||
if (core_waiters)
|
if (core_waiters)
|
||||||
wait_for_completion(&startup_done);
|
wait_for_completion(&startup_done);
|
||||||
|
fail:
|
||||||
BUG_ON(mm->core_waiters);
|
BUG_ON(mm->core_waiters);
|
||||||
|
return core_waiters;
|
||||||
}
|
}
|
||||||
|
|
||||||
int do_coredump(long signr, int exit_code, struct pt_regs * regs)
|
int do_coredump(long signr, int exit_code, struct pt_regs * regs)
|
||||||
|
@ -1469,22 +1492,9 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
|
||||||
}
|
}
|
||||||
mm->dumpable = 0;
|
mm->dumpable = 0;
|
||||||
|
|
||||||
retval = -EAGAIN;
|
retval = coredump_wait(exit_code);
|
||||||
spin_lock_irq(¤t->sighand->siglock);
|
if (retval < 0)
|
||||||
if (!(current->signal->flags & SIGNAL_GROUP_EXIT)) {
|
|
||||||
current->signal->flags = SIGNAL_GROUP_EXIT;
|
|
||||||
current->signal->group_exit_code = exit_code;
|
|
||||||
current->signal->group_stop_count = 0;
|
|
||||||
retval = 0;
|
|
||||||
}
|
|
||||||
spin_unlock_irq(¤t->sighand->siglock);
|
|
||||||
if (retval) {
|
|
||||||
up_write(&mm->mmap_sem);
|
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
|
||||||
|
|
||||||
init_completion(&mm->core_done);
|
|
||||||
coredump_wait(mm);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clear any false indication of pending signals that might
|
* Clear any false indication of pending signals that might
|
||||||
|
|
Loading…
Reference in a new issue