mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
mm: Update is_vmalloc_addr to account for vmalloc savings
is_vmalloc_addr current assumes that all vmalloc addresses exist between VMALLOC_START and VMALLOC_END. This may not be the case when interleaving vmalloc and lowmem. Update the is_vmalloc_addr to properly check for this. Change-Id: I5def3d6ae1a4de59ea36f095b8c73649a37b1f36 Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
This commit is contained in:
parent
850103c46e
commit
f3fff09b29
4 changed files with 41 additions and 7 deletions
|
@ -1575,7 +1575,7 @@ static void __init map_lowmem(void)
|
|||
vm->addr = (void *)(vaddr & PAGE_MASK);
|
||||
vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
|
||||
vm->phys_addr = __pfn_to_phys(pfn);
|
||||
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
|
||||
vm->flags = VM_LOWMEM | VM_ARM_STATIC_MAPPING;
|
||||
vm->flags |= VM_ARM_MTYPE(type);
|
||||
vm->caller = map_lowmem;
|
||||
vm_area_add_early(vm++);
|
||||
|
|
|
@ -308,16 +308,16 @@ unsigned long vmalloc_to_pfn(const void *addr);
|
|||
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
|
||||
* is no special casing required.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern int is_vmalloc_addr(const void *x);
|
||||
#else
|
||||
static inline int is_vmalloc_addr(const void *x)
|
||||
{
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned long addr = (unsigned long)x;
|
||||
|
||||
return addr >= VMALLOC_START && addr < VMALLOC_END;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern int is_vmalloc_or_module_addr(const void *x);
|
||||
#else
|
||||
|
|
|
@ -16,6 +16,7 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
|
|||
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
|
||||
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
|
||||
#define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
|
||||
#define VM_LOWMEM 0x00000040 /* Tracking of direct mapped lowmem */
|
||||
/* bits [20..32] reserved for arch specific ioremap internals */
|
||||
|
||||
/*
|
||||
|
|
33
mm/vmalloc.c
33
mm/vmalloc.c
|
@ -204,6 +204,36 @@ static int vmap_page_range(unsigned long start, unsigned long end,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef ENABLE_VMALLOC_SAVING
|
||||
int is_vmalloc_addr(const void *x)
|
||||
{
|
||||
struct rb_node *n;
|
||||
struct vmap_area *va;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&vmap_area_lock);
|
||||
|
||||
for (n = rb_first(vmap_area_root); n; rb_next(n)) {
|
||||
va = rb_entry(n, struct vmap_area, rb_node);
|
||||
if (x >= va->va_start && x < va->va_end) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&vmap_area_lock);
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
int is_vmalloc_addr(const void *x)
|
||||
{
|
||||
unsigned long addr = (unsigned long)x;
|
||||
|
||||
return addr >= VMALLOC_START && addr < VMALLOC_END;
|
||||
}
|
||||
#endif
|
||||
EXPORT_SYMBOL(is_vmalloc_addr);
|
||||
|
||||
int is_vmalloc_or_module_addr(const void *x)
|
||||
{
|
||||
/*
|
||||
|
@ -2695,6 +2725,9 @@ static int s_show(struct seq_file *m, void *p)
|
|||
if (v->flags & VM_VPAGES)
|
||||
seq_printf(m, " vpages");
|
||||
|
||||
if (v->flags & VM_LOWMEM)
|
||||
seq_printf(m, " lowmem");
|
||||
|
||||
show_numa_info(m, v);
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
|
|
Loading…
Reference in a new issue