mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
mm: make is_vmalloc_addr work properly.
There was a typo in the config guard for CONFIG_ENABLE_VMALLOC_SAVING which meant that the code was never actually being compiled. As a result, it was never noticed that the code had major flaws. Fix the code to actually work as intended. Change-Id: Ief3c00d16cf54e3b945ffb1bfde6b1fea2fa142e Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
This commit is contained in:
parent
cb544cb068
commit
acce1041cd
1 changed files with 42 additions and 31 deletions
73
mm/vmalloc.c
73
mm/vmalloc.c
|
@ -204,36 +204,6 @@ static int vmap_page_range(unsigned long start, unsigned long end,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef ENABLE_VMALLOC_SAVING
|
||||
int is_vmalloc_addr(const void *x)
|
||||
{
|
||||
struct rb_node *n;
|
||||
struct vmap_area *va;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&vmap_area_lock);
|
||||
|
||||
for (n = rb_first(vmap_area_root); n; rb_next(n)) {
|
||||
va = rb_entry(n, struct vmap_area, rb_node);
|
||||
if (x >= va->va_start && x < va->va_end) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&vmap_area_lock);
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
int is_vmalloc_addr(const void *x)
|
||||
{
|
||||
unsigned long addr = (unsigned long)x;
|
||||
|
||||
return addr >= VMALLOC_START && addr < VMALLOC_END;
|
||||
}
|
||||
#endif
|
||||
EXPORT_SYMBOL(is_vmalloc_addr);
|
||||
|
||||
int is_vmalloc_or_module_addr(const void *x)
|
||||
{
|
||||
/*
|
||||
|
@ -299,9 +269,9 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
|
|||
#define VM_LAZY_FREEING 0x02
|
||||
#define VM_VM_AREA 0x04
|
||||
|
||||
static DEFINE_SPINLOCK(vmap_area_lock);
|
||||
/* Export for kexec only */
|
||||
LIST_HEAD(vmap_area_list);
|
||||
static DEFINE_SPINLOCK(vmap_area_lock);
|
||||
static struct rb_root vmap_area_root = RB_ROOT;
|
||||
|
||||
/* The vmap cache globals are protected by vmap_area_lock */
|
||||
|
@ -312,6 +282,47 @@ static unsigned long cached_align;
|
|||
|
||||
static unsigned long vmap_area_pcpu_hole;
|
||||
|
||||
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
|
||||
int is_vmalloc_addr(const void *x)
|
||||
{
|
||||
struct vmap_area *va;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&vmap_area_lock);
|
||||
list_for_each_entry(va, &vmap_area_list, list) {
|
||||
if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
|
||||
continue;
|
||||
|
||||
if (!(va->flags & VM_VM_AREA))
|
||||
continue;
|
||||
|
||||
if (va->vm == NULL)
|
||||
continue;
|
||||
|
||||
if (va->vm->flags & VM_LOWMEM)
|
||||
continue;
|
||||
|
||||
if ((unsigned long)x >= va->va_start &&
|
||||
(unsigned long)x < va->va_end) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&vmap_area_lock);
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
int is_vmalloc_addr(const void *x)
|
||||
{
|
||||
unsigned long addr = (unsigned long)x;
|
||||
|
||||
return addr >= VMALLOC_START && addr < VMALLOC_END;
|
||||
}
|
||||
#endif
|
||||
EXPORT_SYMBOL(is_vmalloc_addr);
|
||||
|
||||
|
||||
|
||||
static struct vmap_area *__find_vmap_area(unsigned long addr)
|
||||
{
|
||||
struct rb_node *n = vmap_area_root.rb_node;
|
||||
|
|
Loading…
Reference in a new issue