mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
vmalloc: add un/map_kernel_range_noflush()
Impact: two more public map/unmap functions Implement map_kernel_range_noflush() and unmap_kernel_range_noflush(). These functions respectively map and unmap address range in kernel VM area but doesn't do any vcache or tlb flushing. These will be used by new percpu allocator. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Nick Piggin <nickpiggin@yahoo.com.au>
This commit is contained in:
parent
f0aa661790
commit
8fc4898500
2 changed files with 67 additions and 3 deletions
|
@ -91,6 +91,9 @@ extern struct vm_struct *remove_vm_area(const void *addr);
|
|||
|
||||
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
|
||||
struct page ***pages);
|
||||
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
|
||||
pgprot_t prot, struct page **pages);
|
||||
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
|
||||
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
|
||||
|
||||
/* Allocate/destroy a 'vmalloc' VM area. */
|
||||
|
|
67
mm/vmalloc.c
67
mm/vmalloc.c
|
@ -153,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
|
|||
*
|
||||
* Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
|
||||
*/
|
||||
static int vmap_page_range(unsigned long start, unsigned long end,
|
||||
pgprot_t prot, struct page **pages)
|
||||
static int vmap_page_range_noflush(unsigned long start, unsigned long end,
|
||||
pgprot_t prot, struct page **pages)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
unsigned long next;
|
||||
|
@ -170,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end,
|
|||
if (err)
|
||||
break;
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
flush_cache_vmap(start, end);
|
||||
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
return nr;
|
||||
}
|
||||
|
||||
static int vmap_page_range(unsigned long start, unsigned long end,
|
||||
pgprot_t prot, struct page **pages)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = vmap_page_range_noflush(start, end, prot, pages);
|
||||
flush_cache_vmap(start, end);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int is_vmalloc_or_module_addr(const void *x)
|
||||
{
|
||||
/*
|
||||
|
@ -1033,6 +1042,58 @@ void __init vmalloc_init(void)
|
|||
vmap_initialized = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* map_kernel_range_noflush - map kernel VM area with the specified pages
|
||||
* @addr: start of the VM area to map
|
||||
* @size: size of the VM area to map
|
||||
* @prot: page protection flags to use
|
||||
* @pages: pages to map
|
||||
*
|
||||
* Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
|
||||
* specify should have been allocated using get_vm_area() and its
|
||||
* friends.
|
||||
*
|
||||
* NOTE:
|
||||
* This function does NOT do any cache flushing. The caller is
|
||||
* responsible for calling flush_cache_vmap() on to-be-mapped areas
|
||||
* before calling this function.
|
||||
*
|
||||
* RETURNS:
|
||||
* The number of pages mapped on success, -errno on failure.
|
||||
*/
|
||||
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
|
||||
pgprot_t prot, struct page **pages)
|
||||
{
|
||||
return vmap_page_range_noflush(addr, addr + size, prot, pages);
|
||||
}
|
||||
|
||||
/**
|
||||
* unmap_kernel_range_noflush - unmap kernel VM area
|
||||
* @addr: start of the VM area to unmap
|
||||
* @size: size of the VM area to unmap
|
||||
*
|
||||
* Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
|
||||
* specify should have been allocated using get_vm_area() and its
|
||||
* friends.
|
||||
*
|
||||
* NOTE:
|
||||
* This function does NOT do any cache flushing. The caller is
|
||||
* responsible for calling flush_cache_vunmap() on to-be-mapped areas
|
||||
* before calling this function and flush_tlb_kernel_range() after.
|
||||
*/
|
||||
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
|
||||
{
|
||||
vunmap_page_range(addr, addr + size);
|
||||
}
|
||||
|
||||
/**
|
||||
* unmap_kernel_range - unmap kernel VM area and flush cache and TLB
|
||||
* @addr: start of the VM area to unmap
|
||||
* @size: size of the VM area to unmap
|
||||
*
|
||||
* Similar to unmap_kernel_range_noflush() but flushes vcache before
|
||||
* the unmapping and tlb after.
|
||||
*/
|
||||
void unmap_kernel_range(unsigned long addr, unsigned long size)
|
||||
{
|
||||
unsigned long end = addr + size;
|
||||
|
|
Loading…
Reference in a new issue