mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-10-31 18:09:19 +00:00
BACKPORT: mm/zsmalloc: adjust order of functions
(cherry-pick from commit 66cdef663cd7a97aff6bbbf41a81a0205dc81ba2) Currently functions in zsmalloc.c does not arranged in a readable and reasonable sequence. With the more and more functions added, we may meet below inconvenience. For example: Current functions: void zs_init() { } static void get_maxobj_per_zspage() { } Then I want to add a func_1() which is called from zs_init(), and this new added function func_1() will used get_maxobj_per_zspage() which is defined below zs_init(). void func_1() { get_maxobj_per_zspage() } void zs_init() { func_1() } static void get_maxobj_per_zspage() { } This will cause compiling issue. So we must add a declaration: static void get_maxobj_per_zspage(); before func_1() if we do not put get_maxobj_per_zspage() before func_1(). In addition, puting module_[init|exit] functions at the bottom of the file conforms to our habit. So, this patch ajusts function sequence as: /* helper functions */ ... obj_location_to_handle() ... /* Some exported functions */ ... zs_map_object() zs_unmap_object() zs_malloc() zs_free() zs_init() zs_exit() Bug: 25951511 Change-Id: I68377a213ade041b34e99a4280ebd57a933dfa83 Signed-off-by: Ganesh Mahendran <opensource.ganesh@gmail.com> Cc: Nitin Gupta <ngupta@vflare.org> Acked-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
caeae3dbac
commit
bc7a7b9717
1 changed files with 224 additions and 224 deletions
448
mm/zsmalloc.c
448
mm/zsmalloc.c
|
@ -885,19 +885,6 @@ static struct notifier_block zs_cpu_nb = {
|
|||
.notifier_call = zs_cpu_notifier
|
||||
};
|
||||
|
||||
static void zs_unregister_cpu_notifier(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
|
||||
__unregister_cpu_notifier(&zs_cpu_nb);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
}
|
||||
|
||||
static int zs_register_cpu_notifier(void)
|
||||
{
|
||||
int cpu, uninitialized_var(ret);
|
||||
|
@ -915,6 +902,19 @@ static int zs_register_cpu_notifier(void)
|
|||
return notifier_to_errno(ret);
|
||||
}
|
||||
|
||||
static void zs_unregister_cpu_notifier(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
|
||||
__unregister_cpu_notifier(&zs_cpu_nb);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
}
|
||||
|
||||
static void init_zs_size_classes(void)
|
||||
{
|
||||
int nr;
|
||||
|
@ -926,31 +926,6 @@ static void init_zs_size_classes(void)
|
|||
zs_size_classes = nr;
|
||||
}
|
||||
|
||||
static void __exit zs_exit(void)
|
||||
{
|
||||
#ifdef CONFIG_ZPOOL
|
||||
zpool_unregister_driver(&zs_zpool_driver);
|
||||
#endif
|
||||
zs_unregister_cpu_notifier();
|
||||
}
|
||||
|
||||
static int __init zs_init(void)
|
||||
{
|
||||
int ret = zs_register_cpu_notifier();
|
||||
|
||||
if (ret) {
|
||||
zs_unregister_cpu_notifier();
|
||||
return ret;
|
||||
}
|
||||
|
||||
init_zs_size_classes();
|
||||
|
||||
#ifdef CONFIG_ZPOOL
|
||||
zpool_register_driver(&zs_zpool_driver);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
|
||||
{
|
||||
return pages_per_zspage * PAGE_SIZE / size;
|
||||
|
@ -968,6 +943,202 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
|
|||
return true;
|
||||
}
|
||||
|
||||
unsigned long zs_get_total_pages(struct zs_pool *pool)
|
||||
{
|
||||
return atomic_long_read(&pool->pages_allocated);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_get_total_pages);
|
||||
|
||||
/**
|
||||
* zs_map_object - get address of allocated object from handle.
|
||||
* @pool: pool from which the object was allocated
|
||||
* @handle: handle returned from zs_malloc
|
||||
*
|
||||
* Before using an object allocated from zs_malloc, it must be mapped using
|
||||
* this function. When done with the object, it must be unmapped using
|
||||
* zs_unmap_object.
|
||||
*
|
||||
* Only one object can be mapped per cpu at a time. There is no protection
|
||||
* against nested mappings.
|
||||
*
|
||||
* This function returns with preemption and page faults disabled.
|
||||
*/
|
||||
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
||||
enum zs_mapmode mm)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long obj_idx, off;
|
||||
|
||||
unsigned int class_idx;
|
||||
enum fullness_group fg;
|
||||
struct size_class *class;
|
||||
struct mapping_area *area;
|
||||
struct page *pages[2];
|
||||
|
||||
BUG_ON(!handle);
|
||||
|
||||
/*
|
||||
* Because we use per-cpu mapping areas shared among the
|
||||
* pools/users, we can't allow mapping in interrupt context
|
||||
* because it can corrupt another users mappings.
|
||||
*/
|
||||
BUG_ON(in_interrupt());
|
||||
|
||||
obj_handle_to_location(handle, &page, &obj_idx);
|
||||
get_zspage_mapping(get_first_page(page), &class_idx, &fg);
|
||||
class = pool->size_class[class_idx];
|
||||
off = obj_idx_to_offset(page, obj_idx, class->size);
|
||||
|
||||
area = &get_cpu_var(zs_map_area);
|
||||
area->vm_mm = mm;
|
||||
if (off + class->size <= PAGE_SIZE) {
|
||||
/* this object is contained entirely within a page */
|
||||
area->vm_addr = kmap_atomic(page);
|
||||
return area->vm_addr + off;
|
||||
}
|
||||
|
||||
/* this object spans two pages */
|
||||
pages[0] = page;
|
||||
pages[1] = get_next_page(page);
|
||||
BUG_ON(!pages[1]);
|
||||
|
||||
return __zs_map_object(area, pages, off, class->size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_map_object);
|
||||
|
||||
void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long obj_idx, off;
|
||||
|
||||
unsigned int class_idx;
|
||||
enum fullness_group fg;
|
||||
struct size_class *class;
|
||||
struct mapping_area *area;
|
||||
|
||||
BUG_ON(!handle);
|
||||
|
||||
obj_handle_to_location(handle, &page, &obj_idx);
|
||||
get_zspage_mapping(get_first_page(page), &class_idx, &fg);
|
||||
class = pool->size_class[class_idx];
|
||||
off = obj_idx_to_offset(page, obj_idx, class->size);
|
||||
|
||||
area = this_cpu_ptr(&zs_map_area);
|
||||
if (off + class->size <= PAGE_SIZE)
|
||||
kunmap_atomic(area->vm_addr);
|
||||
else {
|
||||
struct page *pages[2];
|
||||
|
||||
pages[0] = page;
|
||||
pages[1] = get_next_page(page);
|
||||
BUG_ON(!pages[1]);
|
||||
|
||||
__zs_unmap_object(area, pages, off, class->size);
|
||||
}
|
||||
put_cpu_var(zs_map_area);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_unmap_object);
|
||||
|
||||
/**
|
||||
* zs_malloc - Allocate block of given size from pool.
|
||||
* @pool: pool to allocate from
|
||||
* @size: size of block to allocate
|
||||
*
|
||||
* On success, handle to the allocated object is returned,
|
||||
* otherwise 0.
|
||||
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
|
||||
*/
|
||||
unsigned long zs_malloc(struct zs_pool *pool, size_t size)
|
||||
{
|
||||
unsigned long obj;
|
||||
struct link_free *link;
|
||||
struct size_class *class;
|
||||
void *vaddr;
|
||||
|
||||
struct page *first_page, *m_page;
|
||||
unsigned long m_objidx, m_offset;
|
||||
|
||||
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
|
||||
return 0;
|
||||
|
||||
class = pool->size_class[get_size_class_index(size)];
|
||||
|
||||
spin_lock(&class->lock);
|
||||
first_page = find_get_zspage(class);
|
||||
|
||||
if (!first_page) {
|
||||
spin_unlock(&class->lock);
|
||||
first_page = alloc_zspage(class, pool->flags);
|
||||
if (unlikely(!first_page))
|
||||
return 0;
|
||||
|
||||
set_zspage_mapping(first_page, class->index, ZS_EMPTY);
|
||||
atomic_long_add(class->pages_per_zspage,
|
||||
&pool->pages_allocated);
|
||||
spin_lock(&class->lock);
|
||||
}
|
||||
|
||||
obj = (unsigned long)first_page->freelist;
|
||||
obj_handle_to_location(obj, &m_page, &m_objidx);
|
||||
m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
|
||||
|
||||
vaddr = kmap_atomic(m_page);
|
||||
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
|
||||
first_page->freelist = link->next;
|
||||
memset(link, POISON_INUSE, sizeof(*link));
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
first_page->inuse++;
|
||||
/* Now move the zspage to another fullness group, if required */
|
||||
fix_fullness_group(pool, first_page);
|
||||
spin_unlock(&class->lock);
|
||||
|
||||
return obj;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_malloc);
|
||||
|
||||
void zs_free(struct zs_pool *pool, unsigned long obj)
|
||||
{
|
||||
struct link_free *link;
|
||||
struct page *first_page, *f_page;
|
||||
unsigned long f_objidx, f_offset;
|
||||
void *vaddr;
|
||||
|
||||
int class_idx;
|
||||
struct size_class *class;
|
||||
enum fullness_group fullness;
|
||||
|
||||
if (unlikely(!obj))
|
||||
return;
|
||||
|
||||
obj_handle_to_location(obj, &f_page, &f_objidx);
|
||||
first_page = get_first_page(f_page);
|
||||
|
||||
get_zspage_mapping(first_page, &class_idx, &fullness);
|
||||
class = pool->size_class[class_idx];
|
||||
f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
|
||||
|
||||
spin_lock(&class->lock);
|
||||
|
||||
/* Insert this object in containing zspage's freelist */
|
||||
vaddr = kmap_atomic(f_page);
|
||||
link = (struct link_free *)(vaddr + f_offset);
|
||||
link->next = first_page->freelist;
|
||||
kunmap_atomic(vaddr);
|
||||
first_page->freelist = (void *)obj;
|
||||
|
||||
first_page->inuse--;
|
||||
fullness = fix_fullness_group(pool, first_page);
|
||||
spin_unlock(&class->lock);
|
||||
|
||||
if (fullness == ZS_EMPTY) {
|
||||
atomic_long_sub(class->pages_per_zspage,
|
||||
&pool->pages_allocated);
|
||||
free_zspage(first_page);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_free);
|
||||
|
||||
/**
|
||||
* zs_create_pool - Creates an allocation pool to work from.
|
||||
* @flags: allocation flags used to allocate pool metadata
|
||||
|
@ -1076,201 +1247,30 @@ void zs_destroy_pool(struct zs_pool *pool)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(zs_destroy_pool);
|
||||
|
||||
/**
|
||||
* zs_malloc - Allocate block of given size from pool.
|
||||
* @pool: pool to allocate from
|
||||
* @size: size of block to allocate
|
||||
*
|
||||
* On success, handle to the allocated object is returned,
|
||||
* otherwise 0.
|
||||
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
|
||||
*/
|
||||
unsigned long zs_malloc(struct zs_pool *pool, size_t size)
|
||||
static int __init zs_init(void)
|
||||
{
|
||||
unsigned long obj;
|
||||
struct link_free *link;
|
||||
struct size_class *class;
|
||||
void *vaddr;
|
||||
int ret = zs_register_cpu_notifier();
|
||||
|
||||
struct page *first_page, *m_page;
|
||||
unsigned long m_objidx, m_offset;
|
||||
|
||||
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
|
||||
return 0;
|
||||
|
||||
class = pool->size_class[get_size_class_index(size)];
|
||||
|
||||
spin_lock(&class->lock);
|
||||
first_page = find_get_zspage(class);
|
||||
|
||||
if (!first_page) {
|
||||
spin_unlock(&class->lock);
|
||||
first_page = alloc_zspage(class, pool->flags);
|
||||
if (unlikely(!first_page))
|
||||
return 0;
|
||||
|
||||
set_zspage_mapping(first_page, class->index, ZS_EMPTY);
|
||||
atomic_long_add(class->pages_per_zspage,
|
||||
&pool->pages_allocated);
|
||||
spin_lock(&class->lock);
|
||||
if (ret) {
|
||||
zs_unregister_cpu_notifier();
|
||||
return ret;
|
||||
}
|
||||
|
||||
obj = (unsigned long)first_page->freelist;
|
||||
obj_handle_to_location(obj, &m_page, &m_objidx);
|
||||
m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
|
||||
init_zs_size_classes();
|
||||
|
||||
vaddr = kmap_atomic(m_page);
|
||||
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
|
||||
first_page->freelist = link->next;
|
||||
memset(link, POISON_INUSE, sizeof(*link));
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
first_page->inuse++;
|
||||
/* Now move the zspage to another fullness group, if required */
|
||||
fix_fullness_group(pool, first_page);
|
||||
spin_unlock(&class->lock);
|
||||
|
||||
return obj;
|
||||
#ifdef CONFIG_ZPOOL
|
||||
zpool_register_driver(&zs_zpool_driver);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_malloc);
|
||||
|
||||
void zs_free(struct zs_pool *pool, unsigned long obj)
|
||||
static void __exit zs_exit(void)
|
||||
{
|
||||
struct link_free *link;
|
||||
struct page *first_page, *f_page;
|
||||
unsigned long f_objidx, f_offset;
|
||||
void *vaddr;
|
||||
|
||||
int class_idx;
|
||||
struct size_class *class;
|
||||
enum fullness_group fullness;
|
||||
|
||||
if (unlikely(!obj))
|
||||
return;
|
||||
|
||||
obj_handle_to_location(obj, &f_page, &f_objidx);
|
||||
first_page = get_first_page(f_page);
|
||||
|
||||
get_zspage_mapping(first_page, &class_idx, &fullness);
|
||||
class = pool->size_class[class_idx];
|
||||
f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
|
||||
|
||||
spin_lock(&class->lock);
|
||||
|
||||
/* Insert this object in containing zspage's freelist */
|
||||
vaddr = kmap_atomic(f_page);
|
||||
link = (struct link_free *)(vaddr + f_offset);
|
||||
link->next = first_page->freelist;
|
||||
kunmap_atomic(vaddr);
|
||||
first_page->freelist = (void *)obj;
|
||||
|
||||
first_page->inuse--;
|
||||
fullness = fix_fullness_group(pool, first_page);
|
||||
spin_unlock(&class->lock);
|
||||
|
||||
if (fullness == ZS_EMPTY) {
|
||||
atomic_long_sub(class->pages_per_zspage,
|
||||
&pool->pages_allocated);
|
||||
free_zspage(first_page);
|
||||
}
|
||||
#ifdef CONFIG_ZPOOL
|
||||
zpool_unregister_driver(&zs_zpool_driver);
|
||||
#endif
|
||||
zs_unregister_cpu_notifier();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_free);
|
||||
|
||||
/**
|
||||
* zs_map_object - get address of allocated object from handle.
|
||||
* @pool: pool from which the object was allocated
|
||||
* @handle: handle returned from zs_malloc
|
||||
*
|
||||
* Before using an object allocated from zs_malloc, it must be mapped using
|
||||
* this function. When done with the object, it must be unmapped using
|
||||
* zs_unmap_object.
|
||||
*
|
||||
* Only one object can be mapped per cpu at a time. There is no protection
|
||||
* against nested mappings.
|
||||
*
|
||||
* This function returns with preemption and page faults disabled.
|
||||
*/
|
||||
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
||||
enum zs_mapmode mm)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long obj_idx, off;
|
||||
|
||||
unsigned int class_idx;
|
||||
enum fullness_group fg;
|
||||
struct size_class *class;
|
||||
struct mapping_area *area;
|
||||
struct page *pages[2];
|
||||
|
||||
BUG_ON(!handle);
|
||||
|
||||
/*
|
||||
* Because we use per-cpu mapping areas shared among the
|
||||
* pools/users, we can't allow mapping in interrupt context
|
||||
* because it can corrupt another users mappings.
|
||||
*/
|
||||
BUG_ON(in_interrupt());
|
||||
|
||||
obj_handle_to_location(handle, &page, &obj_idx);
|
||||
get_zspage_mapping(get_first_page(page), &class_idx, &fg);
|
||||
class = pool->size_class[class_idx];
|
||||
off = obj_idx_to_offset(page, obj_idx, class->size);
|
||||
|
||||
area = &get_cpu_var(zs_map_area);
|
||||
area->vm_mm = mm;
|
||||
if (off + class->size <= PAGE_SIZE) {
|
||||
/* this object is contained entirely within a page */
|
||||
area->vm_addr = kmap_atomic(page);
|
||||
return area->vm_addr + off;
|
||||
}
|
||||
|
||||
/* this object spans two pages */
|
||||
pages[0] = page;
|
||||
pages[1] = get_next_page(page);
|
||||
BUG_ON(!pages[1]);
|
||||
|
||||
return __zs_map_object(area, pages, off, class->size);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_map_object);
|
||||
|
||||
void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long obj_idx, off;
|
||||
|
||||
unsigned int class_idx;
|
||||
enum fullness_group fg;
|
||||
struct size_class *class;
|
||||
struct mapping_area *area;
|
||||
|
||||
BUG_ON(!handle);
|
||||
|
||||
obj_handle_to_location(handle, &page, &obj_idx);
|
||||
get_zspage_mapping(get_first_page(page), &class_idx, &fg);
|
||||
class = pool->size_class[class_idx];
|
||||
off = obj_idx_to_offset(page, obj_idx, class->size);
|
||||
|
||||
area = &__get_cpu_var(zs_map_area);
|
||||
if (off + class->size <= PAGE_SIZE)
|
||||
kunmap_atomic(area->vm_addr);
|
||||
else {
|
||||
struct page *pages[2];
|
||||
|
||||
pages[0] = page;
|
||||
pages[1] = get_next_page(page);
|
||||
BUG_ON(!pages[1]);
|
||||
|
||||
__zs_unmap_object(area, pages, off, class->size);
|
||||
}
|
||||
put_cpu_var(zs_map_area);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_unmap_object);
|
||||
|
||||
unsigned long zs_get_total_pages(struct zs_pool *pool)
|
||||
{
|
||||
return atomic_long_read(&pool->pages_allocated);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_get_total_pages);
|
||||
|
||||
module_init(zs_init);
|
||||
module_exit(zs_exit);
|
||||
|
|
Loading…
Reference in a new issue