mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
mm: Enhance per process reclaim to consider shared pages
Some pages could be shared by several processes. (ex, libc) In case of that, it's too bad to reclaim them from the beginnig. This patch causes VM to keep them on memory until last task try to reclaim them so shared pages will be reclaimed only if all of task has gone swapping out. This feature doesn't handle non-linear mapping on ramfs because it's very time-consuming and doesn't make sure of reclaiming and not common. Change-Id: I7e5f34f2e947f5db6d405867fe2ad34863ca40f7 Signed-off-by: Sangseok Lee <sangseok.lee@lge.com> Signed-off-by: Minchan Kim <minchan@kernel.org> Patch-mainline: linux-mm @ 9 May 2013 16:21:27 [vinmenon@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
parent
c5280d96c8
commit
ddecf36b47
9 changed files with 76 additions and 30 deletions
|
@ -1208,7 +1208,7 @@ cont:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
pte_unmap_unlock(pte - 1, ptl);
|
pte_unmap_unlock(pte - 1, ptl);
|
||||||
reclaim_pages_from_list(&page_list);
|
reclaim_pages_from_list(&page_list, vma);
|
||||||
if (addr != end)
|
if (addr != end)
|
||||||
goto cont;
|
goto cont;
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,8 @@ struct page *ksm_might_need_to_copy(struct page *page,
|
||||||
|
|
||||||
int page_referenced_ksm(struct page *page,
|
int page_referenced_ksm(struct page *page,
|
||||||
struct mem_cgroup *memcg, unsigned long *vm_flags);
|
struct mem_cgroup *memcg, unsigned long *vm_flags);
|
||||||
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
|
int try_to_unmap_ksm(struct page *page,
|
||||||
|
enum ttu_flags flags, struct vm_area_struct *vma);
|
||||||
int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
|
int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
|
||||||
struct vm_area_struct *, unsigned long, void *), void *arg);
|
struct vm_area_struct *, unsigned long, void *), void *arg);
|
||||||
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
|
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
|
||||||
|
@ -115,7 +116,8 @@ static inline int page_referenced_ksm(struct page *page,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
|
static inline int try_to_unmap_ksm(struct page *page,
|
||||||
|
enum ttu_flags flags, struct vm_area_struct *target_vma)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,8 @@
|
||||||
|
|
||||||
extern int isolate_lru_page(struct page *page);
|
extern int isolate_lru_page(struct page *page);
|
||||||
extern void putback_lru_page(struct page *page);
|
extern void putback_lru_page(struct page *page);
|
||||||
extern unsigned long reclaim_pages_from_list(struct list_head *page_list);
|
extern unsigned long reclaim_pages_from_list(struct list_head *page_list,
|
||||||
|
struct vm_area_struct *vma);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The anon_vma heads a list of private "related" vmas, to scan if
|
* The anon_vma heads a list of private "related" vmas, to scan if
|
||||||
|
@ -192,7 +193,8 @@ int page_referenced_one(struct page *, struct vm_area_struct *,
|
||||||
|
|
||||||
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
|
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
|
||||||
|
|
||||||
int try_to_unmap(struct page *, enum ttu_flags flags);
|
int try_to_unmap(struct page *, enum ttu_flags flags,
|
||||||
|
struct vm_area_struct *vma);
|
||||||
int try_to_unmap_one(struct page *, struct vm_area_struct *,
|
int try_to_unmap_one(struct page *, struct vm_area_struct *,
|
||||||
unsigned long address, enum ttu_flags flags);
|
unsigned long address, enum ttu_flags flags);
|
||||||
|
|
||||||
|
@ -259,7 +261,7 @@ static inline int page_referenced(struct page *page, int is_locked,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define try_to_unmap(page, refs) SWAP_FAIL
|
#define try_to_unmap(page, refs, vma) SWAP_FAIL
|
||||||
|
|
||||||
static inline int page_mkclean(struct page *page)
|
static inline int page_mkclean(struct page *page)
|
||||||
{
|
{
|
||||||
|
|
|
@ -254,10 +254,8 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
|
||||||
|
|
||||||
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
|
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
||||||
extern unsigned long vma_address(struct page *page,
|
extern unsigned long vma_address(struct page *page,
|
||||||
struct vm_area_struct *vma);
|
struct vm_area_struct *vma);
|
||||||
#endif
|
|
||||||
#else /* !CONFIG_MMU */
|
#else /* !CONFIG_MMU */
|
||||||
static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
|
static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
|
||||||
{
|
{
|
||||||
|
|
9
mm/ksm.c
9
mm/ksm.c
|
@ -2003,7 +2003,8 @@ out:
|
||||||
return referenced;
|
return referenced;
|
||||||
}
|
}
|
||||||
|
|
||||||
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
|
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags,
|
||||||
|
struct vm_area_struct *target_vma)
|
||||||
{
|
{
|
||||||
struct stable_node *stable_node;
|
struct stable_node *stable_node;
|
||||||
struct rmap_item *rmap_item;
|
struct rmap_item *rmap_item;
|
||||||
|
@ -2016,6 +2017,12 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
|
||||||
stable_node = page_stable_node(page);
|
stable_node = page_stable_node(page);
|
||||||
if (!stable_node)
|
if (!stable_node)
|
||||||
return SWAP_FAIL;
|
return SWAP_FAIL;
|
||||||
|
|
||||||
|
if (target_vma) {
|
||||||
|
unsigned long address = vma_address(page, target_vma);
|
||||||
|
ret = try_to_unmap_one(page, target_vma, address, flags);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
again:
|
again:
|
||||||
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
|
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
|
||||||
struct anon_vma *anon_vma = rmap_item->anon_vma;
|
struct anon_vma *anon_vma = rmap_item->anon_vma;
|
||||||
|
|
|
@ -970,7 +970,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||||
if (kill)
|
if (kill)
|
||||||
collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
|
collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
|
||||||
|
|
||||||
ret = try_to_unmap(ppage, ttu);
|
ret = try_to_unmap(ppage, ttu, NULL);
|
||||||
if (ret != SWAP_SUCCESS)
|
if (ret != SWAP_SUCCESS)
|
||||||
printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
|
printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
|
||||||
pfn, page_mapcount(ppage));
|
pfn, page_mapcount(ppage));
|
||||||
|
|
|
@ -834,7 +834,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Establish migration ptes or remove ptes */
|
/* Establish migration ptes or remove ptes */
|
||||||
try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
|
try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS,
|
||||||
|
NULL);
|
||||||
|
|
||||||
skip_unmap:
|
skip_unmap:
|
||||||
if (!page_mapped(page))
|
if (!page_mapped(page))
|
||||||
|
@ -970,7 +971,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
||||||
if (PageAnon(hpage))
|
if (PageAnon(hpage))
|
||||||
anon_vma = page_get_anon_vma(hpage);
|
anon_vma = page_get_anon_vma(hpage);
|
||||||
|
|
||||||
try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
|
try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS,
|
||||||
|
NULL);
|
||||||
|
|
||||||
if (!page_mapped(hpage))
|
if (!page_mapped(hpage))
|
||||||
rc = move_to_new_page(new_hpage, hpage, 1, mode);
|
rc = move_to_new_page(new_hpage, hpage, 1, mode);
|
||||||
|
|
51
mm/rmap.c
51
mm/rmap.c
|
@ -1451,13 +1451,16 @@ bool is_vma_temporary_stack(struct vm_area_struct *vma)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based
|
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based
|
||||||
* rmap method
|
* rmap method if @vma is NULL
|
||||||
* @page: the page to unmap/unlock
|
* @page: the page to unmap/unlock
|
||||||
* @flags: action and flags
|
* @flags: action and flags
|
||||||
|
* @target_vma: vma for unmapping a @page
|
||||||
*
|
*
|
||||||
* Find all the mappings of a page using the mapping pointer and the vma chains
|
* Find all the mappings of a page using the mapping pointer and the vma chains
|
||||||
* contained in the anon_vma struct it points to.
|
* contained in the anon_vma struct it points to.
|
||||||
*
|
*
|
||||||
|
* If @target_vma isn't NULL, this function unmap a page from the vma
|
||||||
|
*
|
||||||
* This function is only called from try_to_unmap/try_to_munlock for
|
* This function is only called from try_to_unmap/try_to_munlock for
|
||||||
* anonymous pages.
|
* anonymous pages.
|
||||||
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
|
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
|
||||||
|
@ -1465,12 +1468,19 @@ bool is_vma_temporary_stack(struct vm_area_struct *vma)
|
||||||
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
|
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
|
||||||
* 'LOCKED.
|
* 'LOCKED.
|
||||||
*/
|
*/
|
||||||
static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
static int try_to_unmap_anon(struct page *page, enum ttu_flags flags,
|
||||||
|
struct vm_area_struct *target_vma)
|
||||||
{
|
{
|
||||||
|
int ret = SWAP_AGAIN;
|
||||||
|
unsigned long address;
|
||||||
struct anon_vma *anon_vma;
|
struct anon_vma *anon_vma;
|
||||||
pgoff_t pgoff;
|
pgoff_t pgoff;
|
||||||
struct anon_vma_chain *avc;
|
struct anon_vma_chain *avc;
|
||||||
int ret = SWAP_AGAIN;
|
|
||||||
|
if (target_vma) {
|
||||||
|
address = vma_address(page, target_vma);
|
||||||
|
return try_to_unmap_one(page, target_vma, address, flags);
|
||||||
|
}
|
||||||
|
|
||||||
anon_vma = page_lock_anon_vma_read(page);
|
anon_vma = page_lock_anon_vma_read(page);
|
||||||
if (!anon_vma)
|
if (!anon_vma)
|
||||||
|
@ -1479,7 +1489,6 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
||||||
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||||
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
|
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
|
||||||
struct vm_area_struct *vma = avc->vma;
|
struct vm_area_struct *vma = avc->vma;
|
||||||
unsigned long address;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* During exec, a temporary VMA is setup and later moved.
|
* During exec, a temporary VMA is setup and later moved.
|
||||||
|
@ -1507,6 +1516,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
||||||
* try_to_unmap_file - unmap/unlock file page using the object-based rmap method
|
* try_to_unmap_file - unmap/unlock file page using the object-based rmap method
|
||||||
* @page: the page to unmap/unlock
|
* @page: the page to unmap/unlock
|
||||||
* @flags: action and flags
|
* @flags: action and flags
|
||||||
|
* @target_vma: vma for unmapping @page
|
||||||
*
|
*
|
||||||
* Find all the mappings of a page using the mapping pointer and the vma chains
|
* Find all the mappings of a page using the mapping pointer and the vma chains
|
||||||
* contained in the address_space struct it points to.
|
* contained in the address_space struct it points to.
|
||||||
|
@ -1518,7 +1528,8 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
|
||||||
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
|
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
|
||||||
* 'LOCKED.
|
* 'LOCKED.
|
||||||
*/
|
*/
|
||||||
static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
static int try_to_unmap_file(struct page *page, enum ttu_flags flags,
|
||||||
|
struct vm_area_struct *target_vma)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = page->mapping;
|
struct address_space *mapping = page->mapping;
|
||||||
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||||
|
@ -1528,17 +1539,27 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
|
||||||
unsigned long max_nl_cursor = 0;
|
unsigned long max_nl_cursor = 0;
|
||||||
unsigned long max_nl_size = 0;
|
unsigned long max_nl_size = 0;
|
||||||
unsigned int mapcount;
|
unsigned int mapcount;
|
||||||
|
unsigned long address;
|
||||||
|
|
||||||
if (PageHuge(page))
|
if (PageHuge(page))
|
||||||
pgoff = page->index << compound_order(page);
|
pgoff = page->index << compound_order(page);
|
||||||
|
|
||||||
mutex_lock(&mapping->i_mmap_mutex);
|
mutex_lock(&mapping->i_mmap_mutex);
|
||||||
|
if (target_vma) {
|
||||||
|
/* We don't handle non-linear vma on ramfs */
|
||||||
|
if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
|
||||||
|
goto out;
|
||||||
|
address = vma_address(page, target_vma);
|
||||||
|
ret = try_to_unmap_one(page, target_vma, address, flags);
|
||||||
|
goto out;
|
||||||
|
} else {
|
||||||
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
|
||||||
unsigned long address = vma_address(page, vma);
|
address = vma_address(page, vma);
|
||||||
ret = try_to_unmap_one(page, vma, address, flags);
|
ret = try_to_unmap_one(page, vma, address, flags);
|
||||||
if (ret != SWAP_AGAIN || !page_mapped(page))
|
if (ret != SWAP_AGAIN || !page_mapped(page))
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (list_empty(&mapping->i_mmap_nonlinear))
|
if (list_empty(&mapping->i_mmap_nonlinear))
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1618,9 +1639,12 @@ out:
|
||||||
* try_to_unmap - try to remove all page table mappings to a page
|
* try_to_unmap - try to remove all page table mappings to a page
|
||||||
* @page: the page to get unmapped
|
* @page: the page to get unmapped
|
||||||
* @flags: action and flags
|
* @flags: action and flags
|
||||||
|
* @vma : target vma for reclaim
|
||||||
*
|
*
|
||||||
* Tries to remove all the page table entries which are mapping this
|
* Tries to remove all the page table entries which are mapping this
|
||||||
* page, used in the pageout path. Caller must hold the page lock.
|
* page, used in the pageout path. Caller must hold the page lock.
|
||||||
|
* If @vma is not NULL, this function try to remove @page from only @vma
|
||||||
|
* without peeking all mapped vma for @page.
|
||||||
* Return values are:
|
* Return values are:
|
||||||
*
|
*
|
||||||
* SWAP_SUCCESS - we succeeded in removing all mappings
|
* SWAP_SUCCESS - we succeeded in removing all mappings
|
||||||
|
@ -1628,7 +1652,8 @@ out:
|
||||||
* SWAP_FAIL - the page is unswappable
|
* SWAP_FAIL - the page is unswappable
|
||||||
* SWAP_MLOCK - page is mlocked.
|
* SWAP_MLOCK - page is mlocked.
|
||||||
*/
|
*/
|
||||||
int try_to_unmap(struct page *page, enum ttu_flags flags)
|
int try_to_unmap(struct page *page, enum ttu_flags flags,
|
||||||
|
struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -1636,11 +1661,11 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
|
||||||
VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
|
VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
|
||||||
|
|
||||||
if (unlikely(PageKsm(page)))
|
if (unlikely(PageKsm(page)))
|
||||||
ret = try_to_unmap_ksm(page, flags);
|
ret = try_to_unmap_ksm(page, flags, vma);
|
||||||
else if (PageAnon(page))
|
else if (PageAnon(page))
|
||||||
ret = try_to_unmap_anon(page, flags);
|
ret = try_to_unmap_anon(page, flags, vma);
|
||||||
else
|
else
|
||||||
ret = try_to_unmap_file(page, flags);
|
ret = try_to_unmap_file(page, flags, vma);
|
||||||
if (ret != SWAP_MLOCK && !page_mapped(page))
|
if (ret != SWAP_MLOCK && !page_mapped(page))
|
||||||
ret = SWAP_SUCCESS;
|
ret = SWAP_SUCCESS;
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1666,11 +1691,11 @@ int try_to_munlock(struct page *page)
|
||||||
VM_BUG_ON(!PageLocked(page) || PageLRU(page));
|
VM_BUG_ON(!PageLocked(page) || PageLRU(page));
|
||||||
|
|
||||||
if (unlikely(PageKsm(page)))
|
if (unlikely(PageKsm(page)))
|
||||||
return try_to_unmap_ksm(page, TTU_MUNLOCK);
|
return try_to_unmap_ksm(page, TTU_MUNLOCK, NULL);
|
||||||
else if (PageAnon(page))
|
else if (PageAnon(page))
|
||||||
return try_to_unmap_anon(page, TTU_MUNLOCK);
|
return try_to_unmap_anon(page, TTU_MUNLOCK, NULL);
|
||||||
else
|
else
|
||||||
return try_to_unmap_file(page, TTU_MUNLOCK);
|
return try_to_unmap_file(page, TTU_MUNLOCK, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __put_anon_vma(struct anon_vma *anon_vma)
|
void __put_anon_vma(struct anon_vma *anon_vma)
|
||||||
|
|
14
mm/vmscan.c
14
mm/vmscan.c
|
@ -95,6 +95,13 @@ struct scan_control {
|
||||||
* are scanned.
|
* are scanned.
|
||||||
*/
|
*/
|
||||||
nodemask_t *nodemask;
|
nodemask_t *nodemask;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reclaim pages from a vma. If the page is shared by other tasks
|
||||||
|
* it is zapped from a vma without reclaim so it ends up remaining
|
||||||
|
* on memory until last task zap it.
|
||||||
|
*/
|
||||||
|
struct vm_area_struct *target_vma;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
|
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
|
||||||
|
@ -974,7 +981,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
||||||
* processes. Try to unmap it here.
|
* processes. Try to unmap it here.
|
||||||
*/
|
*/
|
||||||
if (page_mapped(page) && mapping) {
|
if (page_mapped(page) && mapping) {
|
||||||
switch (try_to_unmap(page, ttu_flags)) {
|
switch (try_to_unmap(page,
|
||||||
|
ttu_flags, sc->target_vma)) {
|
||||||
case SWAP_FAIL:
|
case SWAP_FAIL:
|
||||||
goto activate_locked;
|
goto activate_locked;
|
||||||
case SWAP_AGAIN:
|
case SWAP_AGAIN:
|
||||||
|
@ -1175,7 +1183,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PROCESS_RECLAIM
|
#ifdef CONFIG_PROCESS_RECLAIM
|
||||||
unsigned long reclaim_pages_from_list(struct list_head *page_list)
|
unsigned long reclaim_pages_from_list(struct list_head *page_list,
|
||||||
|
struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
struct scan_control sc = {
|
struct scan_control sc = {
|
||||||
.gfp_mask = GFP_KERNEL,
|
.gfp_mask = GFP_KERNEL,
|
||||||
|
@ -1183,6 +1192,7 @@ unsigned long reclaim_pages_from_list(struct list_head *page_list)
|
||||||
.may_writepage = 1,
|
.may_writepage = 1,
|
||||||
.may_unmap = 1,
|
.may_unmap = 1,
|
||||||
.may_swap = 1,
|
.may_swap = 1,
|
||||||
|
.target_vma = vma,
|
||||||
};
|
};
|
||||||
|
|
||||||
unsigned long nr_reclaimed;
|
unsigned long nr_reclaimed;
|
||||||
|
|
Loading…
Reference in a new issue