[PATCH] copy_process: cleanup bad_fork_cleanup_signal

__exit_signal() does important cleanups atomically under ->siglock.  It is
also called from copy_process's error path.  This is not good, for example we
can't move __unhash_process() under ->siglock for that reason.

We should not mix these 2 paths, just look at ugly 'if (p->sighand)' under
'bad_fork_cleanup_sighand:' label.  For copy_process() case it is sufficient
to just backout copy_signal(), nothing more.

Again, nobody can see this task yet.  For CLONE_THREAD case we just decrement
signal->count, otherwise nobody can see this ->signal and we can free it
lockless.

This patch assumes it is safe to do exit_thread_group_keys() without
tasklist_lock.

Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Acked-by: David Howells <dhowells@redhat.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Oleg Nesterov 2006-03-28 16:11:16 -08:00 committed by Linus Torvalds
parent 7001510d0c
commit 6b3934ef52
4 changed files with 21 additions and 20 deletions

View file

@ -1149,7 +1149,7 @@ extern void flush_thread(void);
extern void exit_thread(void);
extern void exit_files(struct task_struct *);
extern void exit_signal(struct task_struct *);
extern void __cleanup_signal(struct signal_struct *);
extern void __exit_signal(struct task_struct *);
extern void __exit_sighand(struct task_struct *);
extern void exit_itimers(struct signal_struct *);

View file

@ -210,7 +210,6 @@ extern kmem_cache_t *names_cachep;
extern kmem_cache_t *files_cachep;
extern kmem_cache_t *filp_cachep;
extern kmem_cache_t *fs_cachep;
extern kmem_cache_t *signal_cachep;
extern kmem_cache_t *sighand_cachep;
extern kmem_cache_t *bio_cachep;

View file

@ -84,7 +84,7 @@ static kmem_cache_t *task_struct_cachep;
#endif
/* SLAB cache for signal_struct structures (tsk->signal) */
kmem_cache_t *signal_cachep;
static kmem_cache_t *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
kmem_cache_t *sighand_cachep;
@ -872,6 +872,22 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
return 0;
}
void __cleanup_signal(struct signal_struct *sig)
{
exit_thread_group_keys(sig);
kmem_cache_free(signal_cachep, sig);
}
static inline void cleanup_signal(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
atomic_dec(&sig->live);
if (atomic_dec_and_test(&sig->count))
__cleanup_signal(sig);
}
static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
@ -1206,10 +1222,9 @@ bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
exit_signal(p);
cleanup_signal(p);
bad_fork_cleanup_sighand:
if (p->sighand)
__exit_sighand(p);
__exit_sighand(p);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:

View file

@ -395,23 +395,10 @@ void __exit_signal(struct task_struct *tsk)
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
flush_sigqueue(&tsk->pending);
if (sig) {
/*
* We are cleaning up the signal_struct here.
*/
exit_thread_group_keys(sig);
kmem_cache_free(signal_cachep, sig);
__cleanup_signal(sig);
}
}
void exit_signal(struct task_struct *tsk)
{
atomic_dec(&tsk->signal->live);
write_lock_irq(&tasklist_lock);
__exit_signal(tsk);
write_unlock_irq(&tasklist_lock);
}
/*
* Flush all handlers for a task.
*/