mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
mm, oom: fold oom_kill_task() into oom_kill_process()
oom_kill_task() has a single caller, so fold it into its parent function, oom_kill_process(). Slightly reduces the number of lines in the oom killer. Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: David Rientjes <rientjes@google.com> Acked-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2a1c9b1fc0
commit
647f2bdf4a
1 changed files with 38 additions and 47 deletions
|
@ -434,52 +434,6 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
|
|||
}
|
||||
|
||||
#define K(x) ((x) << (PAGE_SHIFT-10))
|
||||
static void oom_kill_task(struct task_struct *p)
|
||||
{
|
||||
struct task_struct *q;
|
||||
struct mm_struct *mm;
|
||||
|
||||
p = find_lock_task_mm(p);
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
/* mm cannot be safely dereferenced after task_unlock(p) */
|
||||
mm = p->mm;
|
||||
|
||||
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
|
||||
task_pid_nr(p), p->comm, K(p->mm->total_vm),
|
||||
K(get_mm_counter(p->mm, MM_ANONPAGES)),
|
||||
K(get_mm_counter(p->mm, MM_FILEPAGES)));
|
||||
task_unlock(p);
|
||||
|
||||
/*
|
||||
* Kill all user processes sharing p->mm in other thread groups, if any.
|
||||
* They don't get access to memory reserves or a higher scheduler
|
||||
* priority, though, to avoid depletion of all memory or task
|
||||
* starvation. This prevents mm->mmap_sem livelock when an oom killed
|
||||
* task cannot exit because it requires the semaphore and its contended
|
||||
* by another thread trying to allocate memory itself. That thread will
|
||||
* now get access to memory reserves since it has a pending fatal
|
||||
* signal.
|
||||
*/
|
||||
for_each_process(q)
|
||||
if (q->mm == mm && !same_thread_group(q, p) &&
|
||||
!(q->flags & PF_KTHREAD)) {
|
||||
if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
|
||||
continue;
|
||||
|
||||
task_lock(q); /* Protect ->comm from prctl() */
|
||||
pr_err("Kill process %d (%s) sharing same memory\n",
|
||||
task_pid_nr(q), q->comm);
|
||||
task_unlock(q);
|
||||
force_sig(SIGKILL, q);
|
||||
}
|
||||
|
||||
set_tsk_thread_flag(p, TIF_MEMDIE);
|
||||
force_sig(SIGKILL, p);
|
||||
}
|
||||
#undef K
|
||||
|
||||
static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
||||
unsigned int points, unsigned long totalpages,
|
||||
struct mem_cgroup *memcg, nodemask_t *nodemask,
|
||||
|
@ -488,6 +442,7 @@ static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
|||
struct task_struct *victim = p;
|
||||
struct task_struct *child;
|
||||
struct task_struct *t = p;
|
||||
struct mm_struct *mm;
|
||||
unsigned int victim_points = 0;
|
||||
|
||||
if (printk_ratelimit())
|
||||
|
@ -531,8 +486,44 @@ static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
|||
}
|
||||
} while_each_thread(p, t);
|
||||
|
||||
oom_kill_task(victim);
|
||||
victim = find_lock_task_mm(victim);
|
||||
if (!victim)
|
||||
return;
|
||||
|
||||
/* mm cannot safely be dereferenced after task_unlock(victim) */
|
||||
mm = victim->mm;
|
||||
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
|
||||
task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
|
||||
K(get_mm_counter(victim->mm, MM_ANONPAGES)),
|
||||
K(get_mm_counter(victim->mm, MM_FILEPAGES)));
|
||||
task_unlock(victim);
|
||||
|
||||
/*
|
||||
* Kill all user processes sharing victim->mm in other thread groups, if
|
||||
* any. They don't get access to memory reserves, though, to avoid
|
||||
* depletion of all memory. This prevents mm->mmap_sem livelock when an
|
||||
* oom killed thread cannot exit because it requires the semaphore and
|
||||
* its contended by another thread trying to allocate memory itself.
|
||||
* That thread will now get access to memory reserves since it has a
|
||||
* pending fatal signal.
|
||||
*/
|
||||
for_each_process(p)
|
||||
if (p->mm == mm && !same_thread_group(p, victim) &&
|
||||
!(p->flags & PF_KTHREAD)) {
|
||||
if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
|
||||
continue;
|
||||
|
||||
task_lock(p); /* Protect ->comm from prctl() */
|
||||
pr_err("Kill process %d (%s) sharing same memory\n",
|
||||
task_pid_nr(p), p->comm);
|
||||
task_unlock(p);
|
||||
force_sig(SIGKILL, p);
|
||||
}
|
||||
|
||||
set_tsk_thread_flag(victim, TIF_MEMDIE);
|
||||
force_sig(SIGKILL, victim);
|
||||
}
|
||||
#undef K
|
||||
|
||||
/*
|
||||
* Determines whether the kernel must panic because of the panic_on_oom sysctl.
|
||||
|
|
Loading…
Reference in a new issue