android: binder: Disable preemption while holding the global binder lock.

(cherry pick from commit f681f0264fd2c51aa12190ff9be04622a7c8ca3f)

Signed-off-by: Riley Andrews <riandrews@google.com>
Bug: 30141999
Change-Id: I66ed44990d5c347d197e61dc49a37b5228c748d0
This commit is contained in:
Riley Andrews 2015-10-21 16:30:51 -07:00 committed by LuK1337
parent 64d4f8340f
commit 2812c6d9de
1 changed files with 111 additions and 39 deletions

View File

@ -458,6 +458,7 @@ static inline void binder_lock(const char *tag)
{
trace_binder_lock(tag);
mutex_lock(&binder_main_lock);
preempt_disable();
trace_binder_locked(tag);
}
@ -465,8 +466,62 @@ static inline void binder_unlock(const char *tag)
{
trace_binder_unlock(tag);
mutex_unlock(&binder_main_lock);
preempt_enable();
}
static inline void *kzalloc_preempt_disabled(size_t size)
{
void *ptr;
ptr = kzalloc(size, GFP_NOWAIT);
if (ptr)
return ptr;
preempt_enable_no_resched();
ptr = kzalloc(size, GFP_KERNEL);
preempt_disable();
return ptr;
}
static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n)
{
long ret;
preempt_enable_no_resched();
ret = copy_to_user(to, from, n);
preempt_disable();
return ret;
}
static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n)
{
long ret;
preempt_enable_no_resched();
ret = copy_from_user(to, from, n);
preempt_disable();
return ret;
}
#define get_user_preempt_disabled(x, ptr) \
({ \
int __ret; \
preempt_enable_no_resched(); \
__ret = get_user(x, ptr); \
preempt_disable(); \
__ret; \
})
#define put_user_preempt_disabled(x, ptr) \
({ \
int __ret; \
preempt_enable_no_resched(); \
__ret = put_user(x, ptr); \
preempt_disable(); \
__ret; \
})
static void binder_set_nice(long nice)
{
long min_nice;
@ -608,6 +663,8 @@ static int __binder_update_page_range(struct binder_proc *proc, int allocate,
else
mm = get_task_mm(proc->tsk);
preempt_enable_no_resched();
if (mm) {
down_write(&mm->mmap_sem);
vma = proc->vma;
@ -662,6 +719,9 @@ static int __binder_update_page_range(struct binder_proc *proc, int allocate,
up_write(&mm->mmap_sem);
mmput(mm);
}
preempt_disable();
return 0;
free_range:
@ -684,6 +744,9 @@ err_no_vma:
up_write(&mm->mmap_sem);
mmput(mm);
}
preempt_disable();
return -ENOMEM;
}
@ -977,7 +1040,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
return NULL;
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
node = kzalloc_preempt_disabled(sizeof(*node));
if (node == NULL)
return NULL;
binder_stats_created(BINDER_STAT_NODE);
@ -1121,7 +1184,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
else
return ref;
}
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
new_ref = kzalloc_preempt_disabled(sizeof(*ref));
if (new_ref == NULL)
return NULL;
binder_stats_created(BINDER_STAT_REF);
@ -1994,14 +2057,14 @@ static void binder_transaction(struct binder_proc *proc,
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
t = kzalloc_preempt_disabled(sizeof(*t));
if (t == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
goto err_alloc_tcomplete_failed;
@ -2062,14 +2125,14 @@ static void binder_transaction(struct binder_proc *proc,
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
@ -2242,9 +2305,7 @@ static void binder_transaction(struct binder_proc *proc,
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait) {
if (reply || !(t->flags & TF_ONE_WAY)) {
preempt_disable();
wake_up_interruptible_sync(target_wait);
sched_preempt_enable_no_resched();
} else {
wake_up_interruptible(target_wait);
}
@ -2304,7 +2365,7 @@ int binder_thread_write(struct binder_proc *proc,
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
@ -2322,7 +2383,7 @@ int binder_thread_write(struct binder_proc *proc,
struct binder_ref *ref;
const char *debug_string;
if (get_user(target, (uint32_t __user *)ptr))
if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (target == 0 && context->binder_context_mgr_node &&
@ -2381,10 +2442,10 @@ int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t cookie;
struct binder_node *node;
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
node = binder_get_node(proc, node_ptr);
@ -2442,7 +2503,7 @@ int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
@ -2498,7 +2559,7 @@ int binder_thread_write(struct binder_proc *proc,
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr,
@ -2549,10 +2610,10 @@ int binder_thread_write(struct binder_proc *proc,
struct binder_ref *ref;
struct binder_ref_death *death;
if (get_user(target, (uint32_t __user *)ptr))
if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
ref = binder_get_ref(proc, target, false);
@ -2581,7 +2642,7 @@ int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid);
break;
}
death = kzalloc(sizeof(*death), GFP_KERNEL);
death = kzalloc_preempt_disabled(sizeof(*death));
if (death == NULL) {
thread->return_error = BR_ERROR;
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@ -2636,7 +2697,7 @@ int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(cookie);
@ -2717,7 +2778,7 @@ static int binder_thread_read(struct binder_proc *proc,
int wait_for_proc_work;
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
@ -2728,7 +2789,7 @@ retry:
if (thread->return_error != BR_OK && ptr < end) {
if (thread->return_error2 != BR_OK) {
if (put_user(thread->return_error2, (uint32_t __user *)ptr))
if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, thread->return_error2);
@ -2736,7 +2797,7 @@ retry:
goto done;
thread->return_error2 = BR_OK;
}
if (put_user(thread->return_error, (uint32_t __user *)ptr))
if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, thread->return_error);
@ -2814,7 +2875,7 @@ retry:
} break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
if (put_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
@ -2856,14 +2917,14 @@ retry:
node->has_weak_ref = 0;
}
if (cmd != BR_NOOP) {
if (put_user(cmd, (uint32_t __user *)ptr))
if (put_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user(node->ptr,
if (put_user_preempt_disabled(node->ptr,
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
if (put_user(node->cookie,
if (put_user_preempt_disabled(node->cookie,
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
@ -2907,10 +2968,10 @@ retry:
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
if (put_user(cmd, (uint32_t __user *)ptr))
if (put_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (put_user(death->cookie,
if (put_user_preempt_disabled(death->cookie,
(binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
@ -2978,10 +3039,10 @@ retry:
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
if (put_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
@ -3023,7 +3084,7 @@ done:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
}
@ -3098,7 +3159,7 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
break;
}
if (*p == NULL) {
thread = kzalloc(sizeof(*thread), GFP_KERNEL);
thread = kzalloc_preempt_disabled(sizeof(*thread));
if (thread == NULL)
return NULL;
binder_stats_created(BINDER_STAT_THREAD);
@ -3202,7 +3263,7 @@ static int binder_ioctl_write_read(struct file *filp,
ret = -EINVAL;
goto out;
}
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
@ -3220,7 +3281,7 @@ static int binder_ioctl_write_read(struct file *filp,
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
@ -3234,7 +3295,7 @@ static int binder_ioctl_write_read(struct file *filp,
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
@ -3244,7 +3305,7 @@ static int binder_ioctl_write_read(struct file *filp,
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
@ -3324,7 +3385,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
break;
case BINDER_SET_MAX_THREADS:
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
@ -3347,7 +3408,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = -EINVAL;
goto err;
}
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION,
&ver->protocol_version)) {
ret = -EINVAL;
goto err;
@ -3410,6 +3472,7 @@ static struct vm_operations_struct binder_vm_ops = {
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
@ -3482,8 +3545,11 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
goto err_alloc_buf_struct_failed;
}
/* binder_update_page_range assumes preemption is disabled */
preempt_disable();
ret = __binder_update_page_range(proc, 1, proc->buffer,
proc->buffer + BINDER_MIN_ALLOC, vma);
preempt_enable_no_resched();
if (ret) {
ret = -ENOMEM;
failure_string = "alloc small buf";
@ -3781,8 +3847,12 @@ static void binder_deferred_func(struct work_struct *work)
int defer;
do {
binder_lock(__func__);
trace_binder_lock(__func__);
mutex_lock(&binder_main_lock);
trace_binder_locked(__func__);
mutex_lock(&binder_deferred_lock);
preempt_disable();
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
struct binder_proc, deferred_work_node);
@ -3808,7 +3878,9 @@ static void binder_deferred_func(struct work_struct *work)
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
binder_unlock(__func__);
trace_binder_unlock(__func__);
mutex_unlock(&binder_main_lock);
preempt_enable_no_resched();
if (files)
put_files_struct(files);
} while (proc);