mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
mm: frontswap: split out __frontswap_unuse_pages
An attempt at making frontswap_shrink shorter and more readable. This patch splits out walking through the swap list to find an entry with enough pages to unuse. Also, assert that the internal __frontswap_unuse_pages is called under swap lock, since that part of code was previously directly happen inside the lock. Signed-off-by: Sasha Levin <levinsasha928@gmail.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
This commit is contained in:
parent
96253444db
commit
f116695a50
1 changed files with 39 additions and 20 deletions
|
@ -230,6 +230,41 @@ static unsigned long __frontswap_curr_pages(void)
|
|||
return totalpages;
|
||||
}
|
||||
|
||||
static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
|
||||
int *swapid)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct swap_info_struct *si = NULL;
|
||||
int si_frontswap_pages;
|
||||
unsigned long total_pages_to_unuse = total;
|
||||
unsigned long pages = 0, pages_to_unuse = 0;
|
||||
int type;
|
||||
|
||||
assert_spin_locked(&swap_lock);
|
||||
for (type = swap_list.head; type >= 0; type = si->next) {
|
||||
si = swap_info[type];
|
||||
si_frontswap_pages = atomic_read(&si->frontswap_pages);
|
||||
if (total_pages_to_unuse < si_frontswap_pages) {
|
||||
pages = pages_to_unuse = total_pages_to_unuse;
|
||||
} else {
|
||||
pages = si_frontswap_pages;
|
||||
pages_to_unuse = 0; /* unuse all */
|
||||
}
|
||||
/* ensure there is enough RAM to fetch pages from frontswap */
|
||||
if (security_vm_enough_memory_mm(current->mm, pages)) {
|
||||
ret = -ENOMEM;
|
||||
continue;
|
||||
}
|
||||
vm_unacct_memory(pages);
|
||||
*unused = pages_to_unuse;
|
||||
*swapid = type;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Frontswap, like a true swap device, may unnecessarily retain pages
|
||||
* under certain circumstances; "shrink" frontswap is essentially a
|
||||
|
@ -240,11 +275,9 @@ static unsigned long __frontswap_curr_pages(void)
|
|||
*/
|
||||
void frontswap_shrink(unsigned long target_pages)
|
||||
{
|
||||
struct swap_info_struct *si = NULL;
|
||||
int si_frontswap_pages;
|
||||
unsigned long total_pages = 0, total_pages_to_unuse;
|
||||
unsigned long pages = 0, pages_to_unuse = 0;
|
||||
int type;
|
||||
unsigned long pages_to_unuse = 0;
|
||||
int type, ret;
|
||||
bool locked = false;
|
||||
|
||||
/*
|
||||
|
@ -258,22 +291,8 @@ void frontswap_shrink(unsigned long target_pages)
|
|||
if (total_pages <= target_pages)
|
||||
goto out;
|
||||
total_pages_to_unuse = total_pages - target_pages;
|
||||
for (type = swap_list.head; type >= 0; type = si->next) {
|
||||
si = swap_info[type];
|
||||
si_frontswap_pages = atomic_read(&si->frontswap_pages);
|
||||
if (total_pages_to_unuse < si_frontswap_pages) {
|
||||
pages = pages_to_unuse = total_pages_to_unuse;
|
||||
} else {
|
||||
pages = si_frontswap_pages;
|
||||
pages_to_unuse = 0; /* unuse all */
|
||||
}
|
||||
/* ensure there is enough RAM to fetch pages from frontswap */
|
||||
if (security_vm_enough_memory_mm(current->mm, pages))
|
||||
continue;
|
||||
vm_unacct_memory(pages);
|
||||
break;
|
||||
}
|
||||
if (type < 0)
|
||||
ret = __frontswap_unuse_pages(total_pages_to_unuse, &pages_to_unuse, &type);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
locked = false;
|
||||
spin_unlock(&swap_lock);
|
||||
|
|
Loading…
Reference in a new issue