arm64: implement CONFIG_STRICT_MEMORY_RWX for arm64

In order to improve security, protect the sections
the kernel uses for the 1-to-1 mapping of memory
as strictly as possible (i.e. no writing on text,
executing data, etc.)

Change-Id: I6ef6837502b8af4f914512f95ff902276e191351
Signed-off-by: Larry Bassel <lbassel@codeaurora.org>
This commit is contained in:
Larry Bassel 2014-03-18 16:02:49 -07:00
parent 72bd72cf80
commit 029e7622bd
6 changed files with 67 additions and 1 deletions

View file

@ -139,7 +139,12 @@ tsk .req x28 // current thread_info
* Interrupt handling.
*/
.macro irq_handler
#ifdef CONFIG_STRICT_MEMORY_RWX
ldr x1, =handle_arch_irq
ldr x1, [x1]
#else
ldr x1, handle_arch_irq
#endif
mov x0, sp
blr x1
.endm
@ -676,5 +681,9 @@ ENTRY(sys_rt_sigreturn_wrapper)
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
#ifdef CONFIG_STRICT_MEMORY_RWX
.data
#endif
ENTRY(handle_arch_irq)
.quad 0

View file

@ -207,7 +207,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
msr vttbr_el2, xzr
/* Hypervisor stub */
#ifndef CONFIG_STRICT_MEMORY_RWX
adr x0, __hyp_stub_vectors
#else
ldr x0, =__hyp_stub_vectors
#endif
msr vbar_el2, x0
/* spsr */

View file

@ -8,6 +8,9 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
#ifdef CONFIG_STRICT_MEMORY_RWX
#include <asm/pgtable.h>
#endif
#define ARM_EXIT_KEEP(x)
#define ARM_EXIT_DISCARD(x) x
@ -52,6 +55,9 @@ SECTIONS
_text = .;
HEAD_TEXT
}
#ifdef CONFIG_STRICT_MEMORY_RWX
. = ALIGN(1<<SECTION_SHIFT);
#endif
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
__exception_text_start = .;
@ -68,19 +74,31 @@ SECTIONS
*(.got) /* Global offset table */
}
#ifdef CONFIG_STRICT_MEMORY_RWX
. = ALIGN(1<<SECTION_SHIFT);
#endif
RO_DATA(PAGE_SIZE)
EXCEPTION_TABLE(8)
NOTES
_etext = .; /* End of text and rodata section */
#ifdef CONFIG_STRICT_MEMORY_RWX
. = ALIGN(1<<SECTION_SHIFT);
#else
. = ALIGN(PAGE_SIZE);
#endif
__init_begin = .;
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
}
#ifdef CONFIG_STRICT_MEMORY_RWX
. = ALIGN(1<<SECTION_SHIFT);
__init_data_begin = .;
#else
. = ALIGN(16);
#endif
.init.data : {
INIT_DATA
INIT_SETUP(16)

View file

@ -343,11 +343,21 @@ void __init mem_init(void)
}
}
#ifdef CONFIG_STRICT_MEMORY_RWX
void free_initmem(void)
{
poison_init_mem(__init_data_begin, __init_end - __init_data_begin);
free_reserved_area(PAGE_ALIGN((unsigned long)&__init_data_begin),
((unsigned long)&__init_end) & PAGE_MASK,
0, "unused kernel");
}
#else
void free_initmem(void)
{
poison_init_mem(__init_begin, __init_end - __init_begin);
free_initmem_default(0);
}
#endif
#ifdef CONFIG_BLK_DEV_INITRD

View file

@ -185,6 +185,26 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
#ifdef CONFIG_STRICT_MEMORY_RWX
pmdval_t get_pmd_prot_sect_kernel(unsigned long addr)
{
if (addr >= (unsigned long)__init_data_begin)
return prot_sect_kernel | PMD_SECT_PXN;
if (addr >= (unsigned long)__init_begin)
return prot_sect_kernel | PMD_SECT_RDONLY;
if (addr >= (unsigned long)__start_rodata)
return prot_sect_kernel | PMD_SECT_RDONLY | PMD_SECT_PXN;
if (addr >= (unsigned long)_stext)
return prot_sect_kernel | PMD_SECT_RDONLY;
return prot_sect_kernel | PMD_SECT_PXN;
}
#else
pmdval_t get_pmd_prot_sect_kernel(unsigned long addr)
{
return prot_sect_kernel;
}
#endif
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys)
{
@ -204,7 +224,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
next = pmd_addr_end(addr, end);
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0)
set_pmd(pmd, __pmd(phys | prot_sect_kernel));
set_pmd(pmd,
__pmd(phys | get_pmd_prot_sect_kernel(addr)));
else
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
phys += next - addr;

View file

@ -18,6 +18,10 @@
#include <linux/bit_spinlock.h>
#include <linux/shrinker.h>
#ifdef CONFIG_STRICT_MEMORY_RWX
extern char __init_data_begin[];
#endif
struct mempolicy;
struct anon_vma;
struct anon_vma_chain;