lowmemorykiller: Don't count swap cache pages twice

The lowmem_shrink function discounts all the swap cache pages from
the file cache count. The zone aware code also discounts all file
cache pages from a certain zone.  This results in some swap cache
pages being discounted twice, which can result in the low memory
killer being unnecessarily aggressive.

Fix the low memory killer to only discount the swap cache pages
once.

Change-Id: I650bbfbf0fbbabd01d82bdb3502b57ff59c3e14f
Signed-off-by: Liam Mark <lmark@codeaurora.org>
Signed-off-by: Kevin F. Haggerty <haggertk@lineageos.org>
This commit is contained in:
Liam Mark 2015-02-27 12:59:00 -08:00 committed by Francescodario Cuzzocrea
parent a9c03d2c63
commit 2298cb2c9f
4 changed files with 6 additions and 1 deletions

View File

@ -144,7 +144,8 @@ void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
if (other_file != NULL)
*other_file -= zone_page_state(zone,
NR_FILE_PAGES)
- zone_page_state(zone, NR_SHMEM);
- zone_page_state(zone, NR_SHMEM)
- zone_page_state(zone, NR_SWAPCACHE);
} else if (zone_idx < classzone_idx) {
if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0)) {
if (!use_cma_pages) {

View File

@ -158,6 +158,7 @@ enum zone_stat_item {
NR_CMA_ACTIVE_FILE,
NR_CMA_UNEVICTABLE,
#endif
NR_SWAPCACHE,
NR_VM_ZONE_STAT_ITEMS };
/*

View File

@ -97,6 +97,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry)
if (likely(!error)) {
address_space->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(page, NR_SWAPCACHE);
INC_CACHE_INFO(add_total);
}
spin_unlock_irq(&address_space->tree_lock);
@ -149,6 +150,7 @@ void __delete_from_swap_cache(struct page *page)
ClearPageSwapCache(page);
address_space->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
__dec_zone_page_state(page, NR_SWAPCACHE);
INC_CACHE_INFO(del_total);
}

View File

@ -733,6 +733,7 @@ const char * const vmstat_text[] = {
"nr_cma_active_file",
"nr_cma_unevictable",
#endif
"nr_swapcache",
"nr_dirty_threshold",
"nr_dirty_background_threshold",