arm64: Allow remapping lowmem as 4K pages

For certain debug features the lowmem needs to be
mapped as pages instead of sections. Add config
option to allow remapping of lowmem as 4K pages

Change-Id: I50179311facd91b97ecde720da38ec7e47512e95
Signed-off-by: Neeti Desai <neetid@codeaurora.org>
This commit is contained in:
Neeti Desai 2014-06-25 17:17:35 -07:00
parent 3536f4b9d6
commit b74ddb13e3
2 changed files with 82 additions and 0 deletions

View file

@ -34,4 +34,14 @@ config DEBUG_SET_MODULE_RONX
against certain classes of kernel exploits.
If in doubt, say "N".
config FORCE_PAGES
bool "Force lowmem to be mapped with 4K pages"
help
There are some advanced debug features that can only be done when
memory is mapped with pages instead of sections. Enable this option
to always map lowmem pages with pages. This may have a performance
cost due to increased TLB pressure.
If unsure say N.
endmenu

View file

@ -419,6 +419,77 @@ static void __init map_mem(void)
/* Limit no longer required. */
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
}
#ifdef CONFIG_FORCE_PAGES
static noinline void split_pmd(pmd_t *pmd, unsigned long addr,
unsigned long end, unsigned long pfn)
{
pte_t *pte, *start_pte;
start_pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
pte = start_pte;
do {
set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
set_pmd(pmd, __pmd((__pa(start_pte)) | PMD_TYPE_TABLE));
}
static noinline void __init remap_pages(void)
{
struct memblock_region *reg;
for_each_memblock(memory, reg) {
phys_addr_t phys_pgd = reg->base;
phys_addr_t phys_end = reg->base + reg->size;
unsigned long addr_pgd = (unsigned long)__va(phys_pgd);
unsigned long end = (unsigned long)__va(phys_end);
pmd_t *pmd = NULL;
pud_t *pud = NULL;
pgd_t *pgd = NULL;
unsigned long next_pud, next_pmd, next_pgd;
unsigned long addr_pmd, addr_pud;
phys_addr_t phys_pud, phys_pmd;
if (phys_pgd >= phys_end)
break;
pgd = pgd_offset(&init_mm, addr_pgd);
do {
next_pgd = pgd_addr_end(addr_pgd, end);
pud = pud_offset(pgd, addr_pgd);
addr_pud = addr_pgd;
phys_pud = phys_pgd;
do {
next_pud = pud_addr_end(addr_pud, next_pgd);
pmd = pmd_offset(pud, addr_pud);
addr_pmd = addr_pud;
phys_pmd = phys_pud;
do {
next_pmd = pmd_addr_end(addr_pmd,
next_pud);
if (pmd_none(*pmd) || pmd_bad(*pmd))
split_pmd(pmd, addr_pmd,
next_pmd, __phys_to_pfn(phys_pmd));
pmd++;
phys_pmd += next_pmd - addr_pmd;
} while (addr_pmd = next_pmd,
addr_pmd < next_pud);
phys_pud += next_pud - addr_pud;
} while (pud++, addr_pud = next_pud,
addr_pud < next_pgd);
phys_pgd += next_pgd - addr_pgd;
} while (pgd++, addr_pgd = next_pgd, addr_pgd < end);
}
}
#else
static void __init remap_pages(void)
{
}
#endif
/*
* paging_init() sets up the page tables, initialises the zone memory
@ -431,6 +502,7 @@ void __init paging_init(void)
init_mem_pgprot();
map_mem();
dma_contiguous_remap();
remap_pages();
/*
* Finally flush the caches and tlb to ensure that we're in a