mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-09-21 20:04:01 +00:00
msm: Allow lowmem to be non contiguous and mixed.
Any image that is expected to have a lifetime of the entire system can give the virtual address space back for use in vmalloc. Change-Id: I81ce848cd37e8573d706fa5d1aa52147b3c8da12 Signed-off-by: Neeti Desai <neetid@codeaurora.org>
This commit is contained in:
parent
56624b4526
commit
679ece4533
4 changed files with 67 additions and 2 deletions
|
@ -91,6 +91,8 @@ void __init add_static_vm_early(struct static_vm *svm)
|
|||
void *vaddr;
|
||||
|
||||
vm = &svm->vm;
|
||||
if (vm_area_check_early(vm))
|
||||
return;
|
||||
vm_area_add_early(vm);
|
||||
vaddr = vm->addr;
|
||||
|
||||
|
|
|
@ -1459,12 +1459,21 @@ extern char __init_data[];
|
|||
static void __init map_lowmem(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
struct vm_struct *vm;
|
||||
phys_addr_t start;
|
||||
phys_addr_t end;
|
||||
unsigned long vaddr;
|
||||
unsigned long pfn;
|
||||
unsigned long length;
|
||||
unsigned int type;
|
||||
int nr = 0;
|
||||
|
||||
/* Map all the lowmem memory banks. */
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t start = reg->base;
|
||||
phys_addr_t end = start + reg->size;
|
||||
struct map_desc map;
|
||||
nr++;
|
||||
start = reg->base;
|
||||
end = start + reg->size;
|
||||
|
||||
if (end > arm_lowmem_limit)
|
||||
end = arm_lowmem_limit;
|
||||
|
@ -1516,6 +1525,32 @@ static void __init map_lowmem(void)
|
|||
|
||||
create_mapping(&map);
|
||||
}
|
||||
|
||||
vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
|
||||
start = reg->base;
|
||||
end = start + reg->size;
|
||||
|
||||
if (end > arm_lowmem_limit)
|
||||
end = arm_lowmem_limit;
|
||||
if (start >= end)
|
||||
break;
|
||||
|
||||
pfn = __phys_to_pfn(start);
|
||||
vaddr = __phys_to_virt(start);
|
||||
length = end - start;
|
||||
type = MT_MEMORY;
|
||||
|
||||
vm->addr = (void *)(vaddr & PAGE_MASK);
|
||||
vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
|
||||
vm->phys_addr = __pfn_to_phys(pfn);
|
||||
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
|
||||
vm->flags |= VM_ARM_MTYPE(type);
|
||||
vm->caller = map_lowmem;
|
||||
vm_area_add_early(vm++);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -146,6 +146,7 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
|
|||
extern struct list_head vmap_area_list;
|
||||
extern __init void vm_area_add_early(struct vm_struct *vm);
|
||||
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
|
||||
extern __init int vm_area_check_early(struct vm_struct *vm);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# ifdef CONFIG_MMU
|
||||
|
|
27
mm/vmalloc.c
27
mm/vmalloc.c
|
@ -1136,6 +1136,33 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
|
|||
EXPORT_SYMBOL(vm_map_ram);
|
||||
|
||||
static struct vm_struct *vmlist __initdata;
|
||||
|
||||
/**
|
||||
* vm_area_check_early - check if vmap area is already mapped
|
||||
* @vm: vm_struct to be checked
|
||||
*
|
||||
* This function is used to check if the vmap area has been
|
||||
* mapped already. @vm->addr, @vm->size and @vm->flags should
|
||||
* contain proper values.
|
||||
*
|
||||
*/
|
||||
int __init vm_area_check_early(struct vm_struct *vm)
|
||||
{
|
||||
struct vm_struct *tmp, **p;
|
||||
|
||||
BUG_ON(vmap_initialized);
|
||||
for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
|
||||
if (tmp->addr >= vm->addr) {
|
||||
if (tmp->addr < vm->addr + vm->size)
|
||||
return 1;
|
||||
} else {
|
||||
if (tmp->addr + tmp->size > vm->addr)
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vm_area_add_early - add vmap area early during boot
|
||||
* @vm: vm_struct to add
|
||||
|
|
Loading…
Reference in a new issue