ANDROID: Remove conflicting Samsung options for upstream changes
In order to bring lowmemorykiller in sync with Google sources, the following Samsung specific changes have been removed: SEC_TIMEOUT_LOW_MEMORY_KILLER SEC_DEBUG_LMK_MEMINFO SEC_DEBUG_LMK_COUNT_INFO These options are not used upstream and conflict.
This commit is contained in:
parent
9697139d52
commit
d132d6387f
|
@ -536,7 +536,6 @@ CONFIG_STAGING=y
|
|||
CONFIG_ANDROID=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
|
||||
CONFIG_SEC_TIMEOUT_LOW_MEMORY_KILLER=y
|
||||
CONFIG_ANDROID_INTF_ALARM_DEV=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_ION_MSM=y
|
||||
|
|
|
@ -535,7 +535,6 @@ CONFIG_STAGING=y
|
|||
CONFIG_ANDROID=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
|
||||
CONFIG_SEC_TIMEOUT_LOW_MEMORY_KILLER=y
|
||||
CONFIG_ANDROID_INTF_ALARM_DEV=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_ION_MSM=y
|
||||
|
|
|
@ -536,7 +536,6 @@ CONFIG_STAGING=y
|
|||
CONFIG_ANDROID=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
|
||||
CONFIG_SEC_TIMEOUT_LOW_MEMORY_KILLER=y
|
||||
CONFIG_ANDROID_INTF_ALARM_DEV=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_ION_MSM=y
|
||||
|
|
|
@ -535,7 +535,6 @@ CONFIG_STAGING=y
|
|||
CONFIG_ANDROID=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
|
||||
CONFIG_SEC_TIMEOUT_LOW_MEMORY_KILLER=y
|
||||
CONFIG_ANDROID_INTF_ALARM_DEV=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_ION_MSM=y
|
||||
|
|
|
@ -69,28 +69,6 @@ config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
|
|||
/sys/module/lowmemorykiller/parameters/adj and convert them
|
||||
to oom_score_adj values.
|
||||
|
||||
config SEC_TIMEOUT_LOW_MEMORY_KILLER
|
||||
bool "Android timeout Low Memory Killer"
|
||||
default n
|
||||
help
|
||||
This enables 'Android-Style oom-killer'
|
||||
The way to select victim by oom-killer provided by
|
||||
linux kernel is totally different from android policy.
|
||||
Hence, it makes more sense that we select the oom victim
|
||||
as android does when LMK is invoked.
|
||||
|
||||
config SEC_DEBUG_LMK_MEMINFO
|
||||
bool "Show Meminfo when lmk invoked"
|
||||
default n
|
||||
help
|
||||
Show memory information when lmk kills process
|
||||
|
||||
config SEC_DEBUG_LMK_COUNT_INFO
|
||||
bool "Record LMK execution count"
|
||||
default n
|
||||
help
|
||||
Show LMK execution count information when lmk invoked
|
||||
|
||||
config ANDROID_INTF_ALARM_DEV
|
||||
bool "Android alarm driver"
|
||||
depends on RTC_CLASS
|
||||
|
|
|
@ -68,6 +68,11 @@ static unsigned long lowmem_deathpending_timeout;
|
|||
pr_info(x); \
|
||||
} while (0)
|
||||
|
||||
#if defined(CONFIG_ZSWAP)
|
||||
extern u64 zswap_pool_pages;
|
||||
extern atomic_t zswap_stored_pages;
|
||||
#endif
|
||||
|
||||
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
|
@ -117,7 +122,8 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
|
|||
struct task_struct *p;
|
||||
short oom_score_adj;
|
||||
|
||||
if (tsk->flags & PF_KTHREAD)
|
||||
if (tsk->flags & PF_KTHREAD ||
|
||||
tsk->state & TASK_UNINTERRUPTIBLE)
|
||||
continue;
|
||||
|
||||
p = find_lock_task_mm(tsk);
|
||||
|
@ -136,6 +142,15 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
|
|||
continue;
|
||||
}
|
||||
tasksize = get_mm_rss(p->mm);
|
||||
#if defined(CONFIG_ZSWAP)
|
||||
if (atomic_read(&zswap_stored_pages)) {
|
||||
lowmem_print(3, "shown tasksize : %d\n", tasksize);
|
||||
tasksize += (int)zswap_pool_pages * get_mm_counter(p->mm, MM_SWAPENTS)
|
||||
/ atomic_read(&zswap_stored_pages);
|
||||
lowmem_print(3, "real tasksize : %d\n", tasksize);
|
||||
}
|
||||
#endif
|
||||
|
||||
task_unlock(p);
|
||||
if (tasksize <= 0)
|
||||
continue;
|
||||
|
|
|
@ -2598,10 +2598,6 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
|
|||
return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SEC_TIMEOUT_LOW_MEMORY_KILLER
|
||||
extern int timeout_lmk(void);
|
||||
#endif
|
||||
|
||||
static inline struct page *
|
||||
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
||||
|
@ -2616,9 +2612,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|||
bool sync_migration = false;
|
||||
bool deferred_compaction = false;
|
||||
bool contended_compaction = false;
|
||||
#ifdef CONFIG_SEC_TIMEOUT_LOW_MEMORY_KILLER
|
||||
unsigned long lmk_timeout = jiffies + HZ/4;
|
||||
#endif
|
||||
/*
|
||||
* In the slowpath, we sanity check order to avoid ever trying to
|
||||
* reclaim >= MAX_ORDER areas which will never succeed. Callers may
|
||||
|
@ -2735,71 +2728,54 @@ rebalance:
|
|||
if (page)
|
||||
goto got_pg;
|
||||
|
||||
pages_reclaimed += did_some_progress;
|
||||
|
||||
if (boot_mode == 1)
|
||||
goto no_OOMK;
|
||||
if (!(gfp_mask & __GFP_FS) || (gfp_mask & __GFP_NORETRY))
|
||||
goto no_OOMK;
|
||||
|
||||
/*
|
||||
* If we failed to make any progress reclaiming, then we are
|
||||
* running out of options and have to consider going OOM
|
||||
*/
|
||||
#ifdef CONFIG_SEC_TIMEOUT_LOW_MEMORY_KILLER
|
||||
if (!did_some_progress || time_after(jiffies, lmk_timeout)) {
|
||||
if (!(gfp_mask & __GFP_NOFAIL)) {
|
||||
if ((order > PAGE_ALLOC_COSTLY_ORDER)
|
||||
|| (high_zoneidx < ZONE_NORMAL)
|
||||
|| (gfp_mask & __GFP_THISNODE))
|
||||
goto no_OOMK;
|
||||
}
|
||||
pr_info("time's up pages_reclaimed:%lu, order:%d, gfp:0x%x\n",
|
||||
pages_reclaimed, order, gfp_mask);
|
||||
if (timeout_lmk()) {
|
||||
lmk_timeout = jiffies + HZ/4;
|
||||
goto no_OOMK;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (!did_some_progress) {
|
||||
if (oom_killer_disabled)
|
||||
goto nopage;
|
||||
/* Coredumps can quickly deplete all memory reserves */
|
||||
if ((current->flags & PF_DUMPCORE) &&
|
||||
!(gfp_mask & __GFP_NOFAIL))
|
||||
goto nopage;
|
||||
|
||||
page = __alloc_pages_may_oom(gfp_mask, order,
|
||||
zonelist, high_zoneidx,
|
||||
nodemask, preferred_zone,
|
||||
migratetype);
|
||||
if (page)
|
||||
goto got_pg;
|
||||
|
||||
if (!(gfp_mask & __GFP_NOFAIL)) {
|
||||
/*
|
||||
* The oom killer is not called for high-order
|
||||
* allocations that may fail, so if no progress
|
||||
* is being made, there are no other options and
|
||||
* retrying is unlikely to help.
|
||||
*/
|
||||
if (order > PAGE_ALLOC_COSTLY_ORDER)
|
||||
if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
|
||||
if (oom_killer_disabled)
|
||||
goto nopage;
|
||||
/*
|
||||
* The oom killer is not called for lowmem
|
||||
* allocations to prevent needlessly killing
|
||||
* innocent tasks.
|
||||
*/
|
||||
if (high_zoneidx < ZONE_NORMAL)
|
||||
/* Coredumps can quickly deplete all memory reserves */
|
||||
if ((current->flags & PF_DUMPCORE) &&
|
||||
!(gfp_mask & __GFP_NOFAIL))
|
||||
goto nopage;
|
||||
|
||||
page = __alloc_pages_may_oom(gfp_mask, order,
|
||||
zonelist, high_zoneidx,
|
||||
nodemask, preferred_zone,
|
||||
migratetype);
|
||||
if (page)
|
||||
goto got_pg;
|
||||
|
||||
if (!(gfp_mask & __GFP_NOFAIL)) {
|
||||
/*
|
||||
* The oom killer is not called for high-order
|
||||
* allocations that may fail, so if no progress
|
||||
* is being made, there are no other options and
|
||||
* retrying is unlikely to help.
|
||||
*/
|
||||
if (order > PAGE_ALLOC_COSTLY_ORDER)
|
||||
goto nopage;
|
||||
/*
|
||||
* The oom killer is not called for lowmem
|
||||
* allocations to prevent needlessly killing
|
||||
* innocent tasks.
|
||||
*/
|
||||
if (high_zoneidx < ZONE_NORMAL)
|
||||
goto nopage;
|
||||
}
|
||||
|
||||
goto restart;
|
||||
}
|
||||
|
||||
goto restart;
|
||||
}
|
||||
no_OOMK:
|
||||
|
||||
/* Check if we should retry the allocation */
|
||||
pages_reclaimed += did_some_progress;
|
||||
if (should_alloc_retry(gfp_mask, order, did_some_progress,
|
||||
pages_reclaimed)) {
|
||||
/* Wait for some write requests to complete then retry */
|
||||
|
|
Loading…
Reference in New Issue