mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
mm/huge_memory.c: use new hashtable implementation
Switch hugemem to use the new hashtable implementation. This reduces the amount of generic unrelated code in the hugemem. This also removes the dymanic allocation of the hash table. The upside is that we save a pointer dereference when accessing the hashtable, but we lose 8KB if CONFIG_TRANSPARENT_HUGEPAGE is enabled but the processor doesn't support hugepages. Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: David Rientjes <rientjes@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a9aacbccf3
commit
43b5fbbd28
1 changed files with 9 additions and 45 deletions
|
@ -20,6 +20,7 @@
|
|||
#include <linux/mman.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/migrate.h>
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
@ -62,12 +63,11 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
|
|||
static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
|
||||
|
||||
static int khugepaged(void *none);
|
||||
static int mm_slots_hash_init(void);
|
||||
static int khugepaged_slab_init(void);
|
||||
static void khugepaged_slab_free(void);
|
||||
|
||||
#define MM_SLOTS_HASH_HEADS 1024
|
||||
static struct hlist_head *mm_slots_hash __read_mostly;
|
||||
#define MM_SLOTS_HASH_BITS 10
|
||||
static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
|
||||
|
||||
static struct kmem_cache *mm_slot_cache __read_mostly;
|
||||
|
||||
/**
|
||||
|
@ -634,12 +634,6 @@ static int __init hugepage_init(void)
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
err = mm_slots_hash_init();
|
||||
if (err) {
|
||||
khugepaged_slab_free();
|
||||
goto out;
|
||||
}
|
||||
|
||||
register_shrinker(&huge_zero_page_shrinker);
|
||||
|
||||
/*
|
||||
|
@ -1908,12 +1902,6 @@ static int __init khugepaged_slab_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __init khugepaged_slab_free(void)
|
||||
{
|
||||
kmem_cache_destroy(mm_slot_cache);
|
||||
mm_slot_cache = NULL;
|
||||
}
|
||||
|
||||
static inline struct mm_slot *alloc_mm_slot(void)
|
||||
{
|
||||
if (!mm_slot_cache) /* initialization failed */
|
||||
|
@ -1926,47 +1914,23 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
|
|||
kmem_cache_free(mm_slot_cache, mm_slot);
|
||||
}
|
||||
|
||||
static int __init mm_slots_hash_init(void)
|
||||
{
|
||||
mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
|
||||
GFP_KERNEL);
|
||||
if (!mm_slots_hash)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static void __init mm_slots_hash_free(void)
|
||||
{
|
||||
kfree(mm_slots_hash);
|
||||
mm_slots_hash = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct mm_slot *get_mm_slot(struct mm_struct *mm)
|
||||
{
|
||||
struct mm_slot *mm_slot;
|
||||
struct hlist_head *bucket;
|
||||
struct hlist_node *node;
|
||||
|
||||
bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
|
||||
% MM_SLOTS_HASH_HEADS];
|
||||
hlist_for_each_entry(mm_slot, node, bucket, hash) {
|
||||
hash_for_each_possible(mm_slots_hash, mm_slot, node, hash, (unsigned long)mm)
|
||||
if (mm == mm_slot->mm)
|
||||
return mm_slot;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void insert_to_mm_slots_hash(struct mm_struct *mm,
|
||||
struct mm_slot *mm_slot)
|
||||
{
|
||||
struct hlist_head *bucket;
|
||||
|
||||
bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
|
||||
% MM_SLOTS_HASH_HEADS];
|
||||
mm_slot->mm = mm;
|
||||
hlist_add_head(&mm_slot->hash, bucket);
|
||||
hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
|
||||
}
|
||||
|
||||
static inline int khugepaged_test_exit(struct mm_struct *mm)
|
||||
|
@ -2035,7 +1999,7 @@ void __khugepaged_exit(struct mm_struct *mm)
|
|||
spin_lock(&khugepaged_mm_lock);
|
||||
mm_slot = get_mm_slot(mm);
|
||||
if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
|
||||
hlist_del(&mm_slot->hash);
|
||||
hash_del(&mm_slot->hash);
|
||||
list_del(&mm_slot->mm_node);
|
||||
free = 1;
|
||||
}
|
||||
|
@ -2484,7 +2448,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
|
|||
|
||||
if (khugepaged_test_exit(mm)) {
|
||||
/* free mm_slot */
|
||||
hlist_del(&mm_slot->hash);
|
||||
hash_del(&mm_slot->hash);
|
||||
list_del(&mm_slot->mm_node);
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue