mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
262de8b688
CMA pages are designed to be used as fallback for movable allocations and cannot be used for non-movable allocations. If CMA pages are utilized poorly, non-movable allocations may end up getting starved if all regular movable pages are allocated and the only pages left are CMA. Always using CMA pages first creates unacceptable performance problems. As a midway alternative, use CMA pages for certain userspace allocations. The userspace pages can be migrated or dropped quickly which giving decent utilization. Change-Id: I6165dda01b705309eebabc6dfa67146b7a95c174 CRs-Fixed: 452508 [lauraa@codeaurora.org: Missing CONFIG_CMA guards, add commit text] Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
302 lines
7.5 KiB
C
302 lines
7.5 KiB
C
#ifndef _LINUX_HIGHMEM_H
|
|
#define _LINUX_HIGHMEM_H
|
|
|
|
#include <linux/fs.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/bug.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/hardirq.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#ifndef ARCH_HAS_FLUSH_ANON_PAGE
|
|
static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
|
static inline void flush_kernel_dcache_page(struct page *page)
|
|
{
|
|
}
|
|
static inline void flush_kernel_vmap_range(void *vaddr, int size)
|
|
{
|
|
}
|
|
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#include <asm/kmap_types.h>
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
#include <asm/highmem.h>
|
|
|
|
/* declarations for linux/mm/highmem.c */
|
|
unsigned int nr_free_highpages(void);
|
|
extern unsigned long totalhigh_pages;
|
|
|
|
void kmap_flush_unused(void);
|
|
|
|
#else /* CONFIG_HIGHMEM */
|
|
|
|
static inline unsigned int nr_free_highpages(void) { return 0; }
|
|
|
|
#define totalhigh_pages 0UL
|
|
|
|
#ifndef ARCH_HAS_KMAP
|
|
static inline void *kmap(struct page *page)
|
|
{
|
|
might_sleep();
|
|
return page_address(page);
|
|
}
|
|
|
|
static inline void kunmap(struct page *page)
|
|
{
|
|
}
|
|
|
|
static inline void *kmap_atomic(struct page *page)
|
|
{
|
|
pagefault_disable();
|
|
return page_address(page);
|
|
}
|
|
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
|
|
|
|
static inline void __kunmap_atomic(void *addr)
|
|
{
|
|
pagefault_enable();
|
|
}
|
|
|
|
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
|
|
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
|
|
|
|
#define kmap_flush_unused() do {} while(0)
|
|
#endif
|
|
|
|
#endif /* CONFIG_HIGHMEM */
|
|
|
|
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
|
|
|
|
DECLARE_PER_CPU(int, __kmap_atomic_idx);
|
|
|
|
static inline int kmap_atomic_idx_push(void)
|
|
{
|
|
int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
|
|
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
WARN_ON_ONCE(in_irq() && !irqs_disabled());
|
|
BUG_ON(idx > KM_TYPE_NR);
|
|
#endif
|
|
return idx;
|
|
}
|
|
|
|
static inline int kmap_atomic_idx(void)
|
|
{
|
|
return __this_cpu_read(__kmap_atomic_idx) - 1;
|
|
}
|
|
|
|
static inline void kmap_atomic_idx_pop(void)
|
|
{
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
int idx = __this_cpu_dec_return(__kmap_atomic_idx);
|
|
|
|
BUG_ON(idx < 0);
|
|
#else
|
|
__this_cpu_dec(__kmap_atomic_idx);
|
|
#endif
|
|
}
|
|
|
|
#endif
|
|
|
|
/*
|
|
* NOTE:
|
|
* kmap_atomic() and kunmap_atomic() with two arguments are deprecated.
|
|
* We only keep them for backward compatibility, any usage of them
|
|
* are now warned.
|
|
*/
|
|
|
|
#define PASTE(a, b) a ## b
|
|
#define PASTE2(a, b) PASTE(a, b)
|
|
|
|
#define NARG_(_2, _1, n, ...) n
|
|
#define NARG(...) NARG_(__VA_ARGS__, 2, 1, :)
|
|
|
|
static inline void __deprecated *kmap_atomic_deprecated(struct page *page,
|
|
enum km_type km)
|
|
{
|
|
return kmap_atomic(page);
|
|
}
|
|
|
|
#define kmap_atomic1(...) kmap_atomic(__VA_ARGS__)
|
|
#define kmap_atomic2(...) kmap_atomic_deprecated(__VA_ARGS__)
|
|
#define kmap_atomic(...) PASTE2(kmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
|
|
|
|
static inline void __deprecated __kunmap_atomic_deprecated(void *addr,
|
|
enum km_type km)
|
|
{
|
|
__kunmap_atomic(addr);
|
|
}
|
|
|
|
/*
|
|
* Prevent people trying to call kunmap_atomic() as if it were kunmap()
|
|
* kunmap_atomic() should get the return value of kmap_atomic, not the page.
|
|
*/
|
|
#define kunmap_atomic_deprecated(addr, km) \
|
|
do { \
|
|
BUILD_BUG_ON(__same_type((addr), struct page *)); \
|
|
__kunmap_atomic_deprecated(addr, km); \
|
|
} while (0)
|
|
|
|
#define kunmap_atomic_withcheck(addr) \
|
|
do { \
|
|
BUILD_BUG_ON(__same_type((addr), struct page *)); \
|
|
__kunmap_atomic(addr); \
|
|
} while (0)
|
|
|
|
#define kunmap_atomic1(...) kunmap_atomic_withcheck(__VA_ARGS__)
|
|
#define kunmap_atomic2(...) kunmap_atomic_deprecated(__VA_ARGS__)
|
|
#define kunmap_atomic(...) PASTE2(kunmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
|
|
/**** End of C pre-processor tricks for deprecated macros ****/
|
|
|
|
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
|
|
#ifndef clear_user_highpage
|
|
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
|
|
{
|
|
void *addr = kmap_atomic(page);
|
|
clear_user_page(addr, vaddr, page);
|
|
kunmap_atomic(addr);
|
|
}
|
|
#endif
|
|
|
|
#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
|
|
/**
|
|
* __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
|
|
* @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
|
|
* @vma: The VMA the page is to be allocated for
|
|
* @vaddr: The virtual address the page will be inserted into
|
|
*
|
|
* This function will allocate a page for a VMA but the caller is expected
|
|
* to specify via movableflags whether the page will be movable in the
|
|
* future or not
|
|
*
|
|
* An architecture may override this function by defining
|
|
* __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
|
|
* implementation.
|
|
*/
|
|
static inline struct page *
|
|
__alloc_zeroed_user_highpage(gfp_t movableflags,
|
|
struct vm_area_struct *vma,
|
|
unsigned long vaddr)
|
|
{
|
|
struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
|
|
vma, vaddr);
|
|
|
|
if (page)
|
|
clear_user_highpage(page, vaddr);
|
|
|
|
return page;
|
|
}
|
|
#endif
|
|
|
|
/**
|
|
* alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
|
|
* @vma: The VMA the page is to be allocated for
|
|
* @vaddr: The virtual address the page will be inserted into
|
|
*
|
|
* This function will allocate a page for a VMA that the caller knows will
|
|
* be able to migrate in the future using move_pages() or reclaimed
|
|
*/
|
|
static inline struct page *
|
|
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
|
|
unsigned long vaddr)
|
|
{
|
|
#ifndef CONFIG_CMA
|
|
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
|
|
#else
|
|
return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
|
|
vaddr);
|
|
#endif
|
|
}
|
|
|
|
#ifdef CONFIG_CMA
|
|
static inline struct page *
|
|
alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma,
|
|
unsigned long vaddr)
|
|
{
|
|
return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
|
|
vaddr);
|
|
}
|
|
#endif
|
|
|
|
static inline void clear_highpage(struct page *page)
|
|
{
|
|
void *kaddr = kmap_atomic(page);
|
|
clear_page(kaddr);
|
|
kunmap_atomic(kaddr);
|
|
}
|
|
|
|
static inline void zero_user_segments(struct page *page,
|
|
unsigned start1, unsigned end1,
|
|
unsigned start2, unsigned end2)
|
|
{
|
|
void *kaddr = kmap_atomic(page);
|
|
|
|
BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
|
|
|
|
if (end1 > start1)
|
|
memset(kaddr + start1, 0, end1 - start1);
|
|
|
|
if (end2 > start2)
|
|
memset(kaddr + start2, 0, end2 - start2);
|
|
|
|
kunmap_atomic(kaddr);
|
|
flush_dcache_page(page);
|
|
}
|
|
|
|
static inline void zero_user_segment(struct page *page,
|
|
unsigned start, unsigned end)
|
|
{
|
|
zero_user_segments(page, start, end, 0, 0);
|
|
}
|
|
|
|
static inline void zero_user(struct page *page,
|
|
unsigned start, unsigned size)
|
|
{
|
|
zero_user_segments(page, start, start + size, 0, 0);
|
|
}
|
|
|
|
static inline void __deprecated memclear_highpage_flush(struct page *page,
|
|
unsigned int offset, unsigned int size)
|
|
{
|
|
zero_user(page, offset, size);
|
|
}
|
|
|
|
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
|
|
|
|
static inline void copy_user_highpage(struct page *to, struct page *from,
|
|
unsigned long vaddr, struct vm_area_struct *vma)
|
|
{
|
|
char *vfrom, *vto;
|
|
|
|
vfrom = kmap_atomic(from);
|
|
vto = kmap_atomic(to);
|
|
copy_user_page(vto, vfrom, vaddr, to);
|
|
kunmap_atomic(vto);
|
|
kunmap_atomic(vfrom);
|
|
}
|
|
|
|
#endif
|
|
|
|
static inline void copy_highpage(struct page *to, struct page *from)
|
|
{
|
|
char *vfrom, *vto;
|
|
|
|
vfrom = kmap_atomic(from);
|
|
vto = kmap_atomic(to);
|
|
copy_page(vto, vfrom);
|
|
kunmap_atomic(vto);
|
|
kunmap_atomic(vfrom);
|
|
}
|
|
|
|
#endif /* _LINUX_HIGHMEM_H */
|