mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-01 02:21:16 +00:00
mm: slub: add kernel address sanitizer support for slub allocator
With this patch kasan will be able to catch bugs in memory allocated by slub. Initially all objects in newly allocated slab page, marked as redzone. Later, when allocation of slub object happens, requested by caller number of bytes marked as accessible, and the rest of the object (including slub's metadata) marked as redzone (inaccessible). We also mark object as accessible if ksize was called for this object. There is some places in kernel where ksize function is called to inquire size of really allocated area. Such callers could validly access whole allocated memory, so it should be marked as accessible. Code in slub.c and slab_common.c files could validly access to object's metadata, so instrumentation for this files are disabled. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Signed-off-by: Dmitry Chernenkov <dmitryc@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> [dkeitel@codeaurora.org: resolve merge conflicts, also remove pieces of that do not apply to 3.10 version of kernel] Git-commit: 0316bec22ec95ea2faca6406437b0b5950553b7c Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Signed-off-by: David Keitel <dkeitel@codeaurora.org> Change-Id: I306a4d3851670d8a237c6da1b7244eee24bc1d8e
This commit is contained in:
parent
4701295283
commit
7fccfb53db
10 changed files with 171 additions and 2 deletions
|
@ -37,6 +37,18 @@ void kasan_unpoison_shadow(const void *address, size_t size);
|
||||||
void kasan_alloc_pages(struct page *page, unsigned int order);
|
void kasan_alloc_pages(struct page *page, unsigned int order);
|
||||||
void kasan_free_pages(struct page *page, unsigned int order);
|
void kasan_free_pages(struct page *page, unsigned int order);
|
||||||
|
|
||||||
|
void kasan_poison_slab(struct page *page);
|
||||||
|
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
|
||||||
|
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
|
||||||
|
|
||||||
|
void kasan_kmalloc_large(const void *ptr, size_t size);
|
||||||
|
void kasan_kfree_large(const void *ptr);
|
||||||
|
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size);
|
||||||
|
void kasan_krealloc(const void *object, size_t new_size);
|
||||||
|
|
||||||
|
void kasan_slab_alloc(struct kmem_cache *s, void *object);
|
||||||
|
void kasan_slab_free(struct kmem_cache *s, void *object);
|
||||||
|
|
||||||
#else /* CONFIG_KASAN */
|
#else /* CONFIG_KASAN */
|
||||||
|
|
||||||
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
|
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
|
||||||
|
@ -47,6 +59,21 @@ static inline void kasan_disable_current(void) {}
|
||||||
static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
|
static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
|
||||||
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
|
static inline void kasan_free_pages(struct page *page, unsigned int order) {}
|
||||||
|
|
||||||
|
static inline void kasan_poison_slab(struct page *page) {}
|
||||||
|
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
|
||||||
|
void *object) {}
|
||||||
|
static inline void kasan_poison_object_data(struct kmem_cache *cache,
|
||||||
|
void *object) {}
|
||||||
|
|
||||||
|
static inline void kasan_kmalloc_large(void *ptr, size_t size) {}
|
||||||
|
static inline void kasan_kfree_large(const void *ptr) {}
|
||||||
|
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
|
||||||
|
size_t size) {}
|
||||||
|
static inline void kasan_krealloc(const void *object, size_t new_size) {}
|
||||||
|
|
||||||
|
static inline void kasan_slab_alloc(struct kmem_cache *s, void *object) {}
|
||||||
|
static inline void kasan_slab_free(struct kmem_cache *s, void *object) {}
|
||||||
|
|
||||||
#endif /* CONFIG_KASAN */
|
#endif /* CONFIG_KASAN */
|
||||||
|
|
||||||
#endif /* LINUX_KASAN_H */
|
#endif /* LINUX_KASAN_H */
|
||||||
|
|
|
@ -94,6 +94,8 @@
|
||||||
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
|
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
|
||||||
(unsigned long)ZERO_SIZE_PTR)
|
(unsigned long)ZERO_SIZE_PTR)
|
||||||
|
|
||||||
|
#include <linux/kmemleak.h>
|
||||||
|
#include <linux/kasan.h>
|
||||||
|
|
||||||
struct mem_cgroup;
|
struct mem_cgroup;
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -5,6 +5,7 @@ if HAVE_ARCH_KASAN
|
||||||
|
|
||||||
config KASAN
|
config KASAN
|
||||||
bool "KASan: runtime memory debugger"
|
bool "KASan: runtime memory debugger"
|
||||||
|
depends on SLUB_DEBUG
|
||||||
help
|
help
|
||||||
Enables kernel address sanitizer - runtime memory debugger,
|
Enables kernel address sanitizer - runtime memory debugger,
|
||||||
designed to find out-of-bounds accesses and use-after-free bugs.
|
designed to find out-of-bounds accesses and use-after-free bugs.
|
||||||
|
|
|
@ -2,6 +2,9 @@
|
||||||
# Makefile for the linux memory manager.
|
# Makefile for the linux memory manager.
|
||||||
#
|
#
|
||||||
|
|
||||||
|
KASAN_SANITIZE_slab_common.o := n
|
||||||
|
KASAN_SANITIZE_slub.o := n
|
||||||
|
|
||||||
mmu-y := nommu.o
|
mmu-y := nommu.o
|
||||||
mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
|
mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
|
||||||
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
|
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include <linux/kasan.h>
|
#include <linux/kasan.h>
|
||||||
|
|
||||||
#include "kasan.h"
|
#include "kasan.h"
|
||||||
|
#include "../slab.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Poisons the shadow memory for 'size' bytes starting from 'addr'.
|
* Poisons the shadow memory for 'size' bytes starting from 'addr'.
|
||||||
|
@ -268,6 +269,103 @@ void kasan_free_pages(struct page *page, unsigned int order)
|
||||||
KASAN_FREE_PAGE);
|
KASAN_FREE_PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kasan_poison_slab(struct page *page)
|
||||||
|
{
|
||||||
|
kasan_poison_shadow(page_address(page),
|
||||||
|
PAGE_SIZE << compound_order(page),
|
||||||
|
KASAN_KMALLOC_REDZONE);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
|
||||||
|
{
|
||||||
|
kasan_unpoison_shadow(object, cache->object_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kasan_poison_object_data(struct kmem_cache *cache, void *object)
|
||||||
|
{
|
||||||
|
kasan_poison_shadow(object,
|
||||||
|
round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
|
||||||
|
KASAN_KMALLOC_REDZONE);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kasan_slab_alloc(struct kmem_cache *cache, void *object)
|
||||||
|
{
|
||||||
|
kasan_kmalloc(cache, object, cache->object_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kasan_slab_free(struct kmem_cache *cache, void *object)
|
||||||
|
{
|
||||||
|
unsigned long size = cache->object_size;
|
||||||
|
unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
|
||||||
|
|
||||||
|
/* RCU slabs could be legally used after free within the RCU period */
|
||||||
|
if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
|
||||||
|
return;
|
||||||
|
|
||||||
|
kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
|
||||||
|
{
|
||||||
|
unsigned long redzone_start;
|
||||||
|
unsigned long redzone_end;
|
||||||
|
|
||||||
|
if (unlikely(object == NULL))
|
||||||
|
return;
|
||||||
|
|
||||||
|
redzone_start = round_up((unsigned long)(object + size),
|
||||||
|
KASAN_SHADOW_SCALE_SIZE);
|
||||||
|
redzone_end = round_up((unsigned long)object + cache->object_size,
|
||||||
|
KASAN_SHADOW_SCALE_SIZE);
|
||||||
|
|
||||||
|
kasan_unpoison_shadow(object, size);
|
||||||
|
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
|
||||||
|
KASAN_KMALLOC_REDZONE);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(kasan_kmalloc);
|
||||||
|
|
||||||
|
void kasan_kmalloc_large(const void *ptr, size_t size)
|
||||||
|
{
|
||||||
|
struct page *page;
|
||||||
|
unsigned long redzone_start;
|
||||||
|
unsigned long redzone_end;
|
||||||
|
|
||||||
|
if (unlikely(ptr == NULL))
|
||||||
|
return;
|
||||||
|
|
||||||
|
page = virt_to_page(ptr);
|
||||||
|
redzone_start = round_up((unsigned long)(ptr + size),
|
||||||
|
KASAN_SHADOW_SCALE_SIZE);
|
||||||
|
redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
|
||||||
|
|
||||||
|
kasan_unpoison_shadow(ptr, size);
|
||||||
|
kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
|
||||||
|
KASAN_PAGE_REDZONE);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kasan_krealloc(const void *object, size_t size)
|
||||||
|
{
|
||||||
|
struct page *page;
|
||||||
|
|
||||||
|
if (unlikely(object == ZERO_SIZE_PTR))
|
||||||
|
return;
|
||||||
|
|
||||||
|
page = virt_to_head_page(object);
|
||||||
|
|
||||||
|
if (unlikely(!PageSlab(page)))
|
||||||
|
kasan_kmalloc_large(object, size);
|
||||||
|
else
|
||||||
|
kasan_kmalloc(page->slab_cache, object, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
void kasan_kfree_large(const void *ptr)
|
||||||
|
{
|
||||||
|
struct page *page = virt_to_page(ptr);
|
||||||
|
|
||||||
|
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
|
||||||
|
KASAN_FREE_PAGE);
|
||||||
|
}
|
||||||
|
|
||||||
#define DEFINE_ASAN_LOAD_STORE(size) \
|
#define DEFINE_ASAN_LOAD_STORE(size) \
|
||||||
void __asan_load##size(unsigned long addr) \
|
void __asan_load##size(unsigned long addr) \
|
||||||
{ \
|
{ \
|
||||||
|
|
|
@ -7,6 +7,11 @@
|
||||||
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
|
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
|
||||||
|
|
||||||
#define KASAN_FREE_PAGE 0xFF /* page was freed */
|
#define KASAN_FREE_PAGE 0xFF /* page was freed */
|
||||||
|
#define KASAN_FREE_PAGE 0xFF /* page was freed */
|
||||||
|
#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
|
||||||
|
#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
|
||||||
|
#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
|
||||||
|
|
||||||
|
|
||||||
struct kasan_access_info {
|
struct kasan_access_info {
|
||||||
const void *access_addr;
|
const void *access_addr;
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
#include <linux/kasan.h>
|
#include <linux/kasan.h>
|
||||||
|
|
||||||
#include "kasan.h"
|
#include "kasan.h"
|
||||||
|
#include "../slab.h"
|
||||||
|
|
||||||
/* Shadow layout customization. */
|
/* Shadow layout customization. */
|
||||||
#define SHADOW_BYTES_PER_BLOCK 1
|
#define SHADOW_BYTES_PER_BLOCK 1
|
||||||
|
@ -55,8 +56,11 @@ static void print_error_description(struct kasan_access_info *info)
|
||||||
|
|
||||||
switch (shadow_val) {
|
switch (shadow_val) {
|
||||||
case KASAN_FREE_PAGE:
|
case KASAN_FREE_PAGE:
|
||||||
|
case KASAN_KMALLOC_FREE:
|
||||||
bug_type = "use after free";
|
bug_type = "use after free";
|
||||||
break;
|
break;
|
||||||
|
case KASAN_PAGE_REDZONE:
|
||||||
|
case KASAN_KMALLOC_REDZONE:
|
||||||
case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
|
case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
|
||||||
bug_type = "out of bounds access";
|
bug_type = "out of bounds access";
|
||||||
break;
|
break;
|
||||||
|
@ -77,6 +81,23 @@ static void print_address_description(struct kasan_access_info *info)
|
||||||
if ((addr >= (void *)PAGE_OFFSET) &&
|
if ((addr >= (void *)PAGE_OFFSET) &&
|
||||||
(addr < high_memory)) {
|
(addr < high_memory)) {
|
||||||
struct page *page = virt_to_head_page(addr);
|
struct page *page = virt_to_head_page(addr);
|
||||||
|
|
||||||
|
if (PageSlab(page)) {
|
||||||
|
void *object;
|
||||||
|
struct kmem_cache *cache = page->slab_cache;
|
||||||
|
void *last_object;
|
||||||
|
|
||||||
|
object = virt_to_obj(cache, page_address(page), addr);
|
||||||
|
last_object = page_address(page) +
|
||||||
|
page->objects * cache->size;
|
||||||
|
|
||||||
|
if (unlikely(object > last_object))
|
||||||
|
object = last_object; /* we hit into padding */
|
||||||
|
|
||||||
|
object_err(cache, page, object,
|
||||||
|
"kasan: bad access detected");
|
||||||
|
return;
|
||||||
|
}
|
||||||
dump_page(page, "kasan: bad access detected");
|
dump_page(page, "kasan: bad access detected");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3668,6 +3668,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
|
||||||
trace_kmalloc_node(_RET_IP_, ret,
|
trace_kmalloc_node(_RET_IP_, ret,
|
||||||
size, cachep->size,
|
size, cachep->size,
|
||||||
flags, nodeid);
|
flags, nodeid);
|
||||||
|
kasan_kmalloc(s, ret, size);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
|
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
|
||||||
|
|
|
@ -497,7 +497,6 @@ void __init create_kmalloc_caches(unsigned long flags)
|
||||||
}
|
}
|
||||||
#endif /* !CONFIG_SLOB */
|
#endif /* !CONFIG_SLOB */
|
||||||
|
|
||||||
|
|
||||||
#ifdef CONFIG_SLABINFO
|
#ifdef CONFIG_SLABINFO
|
||||||
void print_slabinfo_header(struct seq_file *m)
|
void print_slabinfo_header(struct seq_file *m)
|
||||||
{
|
{
|
||||||
|
|
14
mm/slub.c
14
mm/slub.c
|
@ -970,6 +970,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void
|
||||||
flags &= gfp_allowed_mask;
|
flags &= gfp_allowed_mask;
|
||||||
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
|
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
|
||||||
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
|
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
|
||||||
|
kasan_slab_alloc(s, object);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
||||||
|
@ -993,6 +994,7 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
||||||
#endif
|
#endif
|
||||||
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
||||||
debug_check_no_obj_freed(x, s->object_size);
|
debug_check_no_obj_freed(x, s->object_size);
|
||||||
|
kasan_slab_free(s, x);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1373,8 +1375,11 @@ static void setup_object(struct kmem_cache *s, struct page *page,
|
||||||
void *object)
|
void *object)
|
||||||
{
|
{
|
||||||
setup_object_debug(s, page, object);
|
setup_object_debug(s, page, object);
|
||||||
if (unlikely(s->ctor))
|
if (unlikely(s->ctor)) {
|
||||||
|
kasan_unpoison_object_data(s, object);
|
||||||
s->ctor(object);
|
s->ctor(object);
|
||||||
|
kasan_poison_object_data(s, object);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||||
|
@ -1406,6 +1411,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||||
memset(start, POISON_INUSE, PAGE_SIZE << order);
|
memset(start, POISON_INUSE, PAGE_SIZE << order);
|
||||||
|
|
||||||
last = start;
|
last = start;
|
||||||
|
kasan_poison_slab(page);
|
||||||
for_each_object(p, s, start, page->objects) {
|
for_each_object(p, s, start, page->objects) {
|
||||||
setup_object(s, page, last);
|
setup_object(s, page, last);
|
||||||
set_freepointer(s, last, p);
|
set_freepointer(s, last, p);
|
||||||
|
@ -2454,6 +2460,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
|
||||||
{
|
{
|
||||||
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
|
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
|
||||||
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
|
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
|
||||||
|
kasan_kmalloc(s, ret, size);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_alloc_trace);
|
EXPORT_SYMBOL(kmem_cache_alloc_trace);
|
||||||
|
@ -2488,6 +2495,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||||
|
|
||||||
trace_kmalloc_node(_RET_IP_, ret,
|
trace_kmalloc_node(_RET_IP_, ret,
|
||||||
size, s->size, gfpflags, node);
|
size, s->size, gfpflags, node);
|
||||||
|
|
||||||
|
kasan_kmalloc(s, ret, size);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
|
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
|
||||||
|
@ -2875,6 +2884,7 @@ static void early_kmem_cache_node_alloc(int node)
|
||||||
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
|
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
|
||||||
init_tracking(kmem_cache_node, n);
|
init_tracking(kmem_cache_node, n);
|
||||||
#endif
|
#endif
|
||||||
|
kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node));
|
||||||
init_kmem_cache_node(n);
|
init_kmem_cache_node(n);
|
||||||
inc_slabs_node(kmem_cache_node, node, page->objects);
|
inc_slabs_node(kmem_cache_node, node, page->objects);
|
||||||
|
|
||||||
|
@ -3272,6 +3282,8 @@ void *__kmalloc(size_t size, gfp_t flags)
|
||||||
|
|
||||||
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
|
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
|
||||||
|
|
||||||
|
kasan_kmalloc(s, ret, size);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__kmalloc);
|
EXPORT_SYMBOL(__kmalloc);
|
||||||
|
|
Loading…
Reference in a new issue