mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
Memory controller: make page_referenced() cgroup aware
Make page_referenced() cgroup aware. Without this patch, page_referenced() can cause a page to be skipped while reclaiming pages. This patch ensures that other cgroups do not hold pages in a particular cgroup hostage. It is required to ensure that shared pages are freed from a cgroup when they are not actively referenced from the cgroup that brought them in Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Pavel Emelianov <xemul@openvz.org> Cc: Paul Menage <menage@google.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Kirill Korotaev <dev@sw.ru> Cc: Herbert Poetzl <herbert@13thfloor.at> Cc: David Rientjes <rientjes@google.com> Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8697d33194
commit
bed7161a51
5 changed files with 40 additions and 10 deletions
|
@ -43,6 +43,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
|
|||
int active);
|
||||
extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
|
||||
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm);
|
||||
extern struct mem_cgroup *mm_cgroup(struct mm_struct *mm);
|
||||
|
||||
static inline void mem_cgroup_uncharge_page(struct page *page)
|
||||
{
|
||||
|
@ -93,6 +94,11 @@ static inline int mem_cgroup_cache_charge(struct page *page,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *mm_cgroup(struct mm_struct *mm)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CGROUP_MEM_CONT */
|
||||
|
||||
#endif /* _LINUX_MEMCONTROL_H */
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/memcontrol.h>
|
||||
|
||||
/*
|
||||
* The anon_vma heads a list of private "related" vmas, to scan if
|
||||
|
@ -86,7 +87,7 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma,
|
|||
/*
|
||||
* Called from mm/vmscan.c to handle paging out
|
||||
*/
|
||||
int page_referenced(struct page *, int is_locked);
|
||||
int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt);
|
||||
int try_to_unmap(struct page *, int ignore_refs);
|
||||
|
||||
/*
|
||||
|
@ -114,7 +115,7 @@ int page_mkclean(struct page *);
|
|||
#define anon_vma_prepare(vma) (0)
|
||||
#define anon_vma_link(vma) do {} while (0)
|
||||
|
||||
#define page_referenced(page,l) TestClearPageReferenced(page)
|
||||
#define page_referenced(page,l,cnt) TestClearPageReferenced(page)
|
||||
#define try_to_unmap(page, refs) SWAP_FAIL
|
||||
|
||||
static inline int page_mkclean(struct page *page)
|
||||
|
|
|
@ -110,6 +110,11 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
|
|||
struct mem_cgroup, css);
|
||||
}
|
||||
|
||||
inline struct mem_cgroup *mm_cgroup(struct mm_struct *mm)
|
||||
{
|
||||
return rcu_dereference(mm->mem_cgroup);
|
||||
}
|
||||
|
||||
void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
|
||||
{
|
||||
struct mem_cgroup *mem;
|
||||
|
|
30
mm/rmap.c
30
mm/rmap.c
|
@ -302,7 +302,8 @@ out:
|
|||
return referenced;
|
||||
}
|
||||
|
||||
static int page_referenced_anon(struct page *page)
|
||||
static int page_referenced_anon(struct page *page,
|
||||
struct mem_cgroup *mem_cont)
|
||||
{
|
||||
unsigned int mapcount;
|
||||
struct anon_vma *anon_vma;
|
||||
|
@ -315,6 +316,13 @@ static int page_referenced_anon(struct page *page)
|
|||
|
||||
mapcount = page_mapcount(page);
|
||||
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
|
||||
/*
|
||||
* If we are reclaiming on behalf of a cgroup, skip
|
||||
* counting on behalf of references from different
|
||||
* cgroups
|
||||
*/
|
||||
if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
|
||||
continue;
|
||||
referenced += page_referenced_one(page, vma, &mapcount);
|
||||
if (!mapcount)
|
||||
break;
|
||||
|
@ -335,7 +343,8 @@ static int page_referenced_anon(struct page *page)
|
|||
*
|
||||
* This function is only called from page_referenced for object-based pages.
|
||||
*/
|
||||
static int page_referenced_file(struct page *page)
|
||||
static int page_referenced_file(struct page *page,
|
||||
struct mem_cgroup *mem_cont)
|
||||
{
|
||||
unsigned int mapcount;
|
||||
struct address_space *mapping = page->mapping;
|
||||
|
@ -368,6 +377,13 @@ static int page_referenced_file(struct page *page)
|
|||
mapcount = page_mapcount(page);
|
||||
|
||||
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
||||
/*
|
||||
* If we are reclaiming on behalf of a cgroup, skip
|
||||
* counting on behalf of references from different
|
||||
* cgroups
|
||||
*/
|
||||
if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
|
||||
continue;
|
||||
if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
|
||||
== (VM_LOCKED|VM_MAYSHARE)) {
|
||||
referenced++;
|
||||
|
@ -390,7 +406,8 @@ static int page_referenced_file(struct page *page)
|
|||
* Quick test_and_clear_referenced for all mappings to a page,
|
||||
* returns the number of ptes which referenced the page.
|
||||
*/
|
||||
int page_referenced(struct page *page, int is_locked)
|
||||
int page_referenced(struct page *page, int is_locked,
|
||||
struct mem_cgroup *mem_cont)
|
||||
{
|
||||
int referenced = 0;
|
||||
|
||||
|
@ -402,14 +419,15 @@ int page_referenced(struct page *page, int is_locked)
|
|||
|
||||
if (page_mapped(page) && page->mapping) {
|
||||
if (PageAnon(page))
|
||||
referenced += page_referenced_anon(page);
|
||||
referenced += page_referenced_anon(page, mem_cont);
|
||||
else if (is_locked)
|
||||
referenced += page_referenced_file(page);
|
||||
referenced += page_referenced_file(page, mem_cont);
|
||||
else if (TestSetPageLocked(page))
|
||||
referenced++;
|
||||
else {
|
||||
if (page->mapping)
|
||||
referenced += page_referenced_file(page);
|
||||
referenced +=
|
||||
page_referenced_file(page, mem_cont);
|
||||
unlock_page(page);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -503,7 +503,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|||
goto keep_locked;
|
||||
}
|
||||
|
||||
referenced = page_referenced(page, 1);
|
||||
referenced = page_referenced(page, 1, sc->mem_cgroup);
|
||||
/* In active use or really unfreeable? Activate it. */
|
||||
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
|
||||
referenced && page_mapping_inuse(page))
|
||||
|
@ -1057,7 +1057,7 @@ force_reclaim_mapped:
|
|||
if (page_mapped(page)) {
|
||||
if (!reclaim_mapped ||
|
||||
(total_swap_pages == 0 && PageAnon(page)) ||
|
||||
page_referenced(page, 0)) {
|
||||
page_referenced(page, 0, sc->mem_cgroup)) {
|
||||
list_add(&page->lru, &l_active);
|
||||
continue;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue