binder: always allocate/map first BINDER_MIN_ALLOC pages

Certain usecases like camera are constantly allocating and freeing
binder buffers beyond the first 4k resulting in mmap_sem contention.

If we expand the allocated range from 4k to something higher, we can
reduce the contention. Tests show that 6 pages is enough to cause very
little update_page_range operations and reduces contention.

Bug: 36727951

Change-Id: I28bc3fb9b33c764c257e28487712fce2a3c1078b
Reported-by: Tim Murray <timmurray@google.com>
Signed-off-by: Joel Fernandes <joelaf@google.com>

Pre-allocate 1 instead of 6 pages as in the original patch,
as we use this pre-allocated page to prevent the first page
from getting unpinned after removing the buffer headers,
rather than pinning pages to speedup larger transactions.

Change-Id: I7c3e4884a9538ecfd86601d31c5bcfd6611d37a4
Signed-off-by: Sherry Yang <sherryy@android.com>
This commit is contained in:
Joel Fernandes 2017-08-01 15:49:20 -07:00 committed by Luca Stefani
parent 51f6a39bcc
commit fac40cdf7a
1 changed files with 22 additions and 4 deletions

View File

@ -46,6 +46,8 @@
#include <uapi/linux/android/binder.h>
#include "binder_trace.h"
#define BINDER_MIN_ALLOC (1 * PAGE_SIZE)
static DEFINE_MUTEX(binder_main_lock);
static DEFINE_MUTEX(binder_deferred_lock);
static DEFINE_MUTEX(binder_mmap_lock);
@ -554,9 +556,9 @@ static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
return NULL;
}
static int binder_update_page_range(struct binder_proc *proc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
static int __binder_update_page_range(struct binder_proc *proc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
{
void *page_addr;
unsigned long user_page_addr;
@ -656,6 +658,20 @@ err_no_vma:
return -ENOMEM;
}
static int binder_update_page_range(struct binder_proc *proc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
{
/*
* For regular updates, move up start if needed since MIN_ALLOC pages
* are always mapped
*/
if (start - proc->buffer < BINDER_MIN_ALLOC)
start = proc->buffer + BINDER_MIN_ALLOC;
return __binder_update_page_range(proc, allocate, start, end, vma);
}
static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
size_t data_size,
size_t offsets_size, int is_async)
@ -2939,7 +2955,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
goto err_alloc_buf_struct_failed;
}
if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
ret = __binder_update_page_range(proc, 1, proc->buffer,
proc->buffer + BINDER_MIN_ALLOC, vma);
if (ret) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;