mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
flo/deb: Update Android binder
This commit is contained in:
parent
8564b84a62
commit
9ab3bf6892
9 changed files with 5495 additions and 1771 deletions
|
@ -32,6 +32,16 @@ config ANDROID_BINDER_DEVICES
|
|||
created. Each binder device has its own context manager, and is
|
||||
therefore logically separated from the other devices.
|
||||
|
||||
config ANDROID_BINDER_IPC_SELFTEST
|
||||
bool "Android Binder IPC Driver Selftest"
|
||||
depends on ANDROID_BINDER_IPC
|
||||
---help---
|
||||
This feature allows binder selftest to run.
|
||||
|
||||
Binder selftest checks the allocation and free of binder buffers
|
||||
exhaustively with combinations of various buffer sizes and
|
||||
alignments.
|
||||
|
||||
config ASHMEM
|
||||
bool "Enable the Anonymous Shared Memory Subsystem"
|
||||
default n
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
ccflags-y += -I$(src) # needed for trace events
|
||||
|
||||
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
|
||||
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
|
||||
obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
|
||||
obj-$(CONFIG_ASHMEM) += ashmem.o
|
||||
obj-$(CONFIG_ANDROID_PERSISTENT_RAM) += persistent_ram.o
|
||||
obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2008 Google, Inc.
|
||||
*
|
||||
* Based on, but no longer compatible with, the original
|
||||
* OpenBinder.org binder driver interface, which is:
|
||||
*
|
||||
* Copyright (c) 2005 Palmsource, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_BINDER_H
|
||||
#define _LINUX_BINDER_H
|
||||
|
||||
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
|
||||
#define BINDER_IPC_32BIT 1
|
||||
#endif
|
||||
|
||||
#include "uapi/binder.h"
|
||||
|
||||
#endif /* _LINUX_BINDER_H */
|
||||
|
1190
drivers/staging/android/binder_alloc.c
Normal file
1190
drivers/staging/android/binder_alloc.c
Normal file
File diff suppressed because it is too large
Load diff
182
drivers/staging/android/binder_alloc.h
Normal file
182
drivers/staging/android/binder_alloc.h
Normal file
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Copyright (C) 2017 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_BINDER_ALLOC_H
|
||||
#define _LINUX_BINDER_ALLOC_H
|
||||
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/rtmutex.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/list_lru.h>
|
||||
#include <uapi/binder.h>
|
||||
|
||||
extern struct list_lru binder_alloc_lru;
|
||||
struct binder_transaction;
|
||||
|
||||
/**
|
||||
* struct binder_buffer - buffer used for binder transactions
|
||||
* @entry: entry alloc->buffers
|
||||
* @rb_node: node for allocated_buffers/free_buffers rb trees
|
||||
* @free: %true if buffer is free
|
||||
* @allow_user_free: %true if user is allowed to free buffer
|
||||
* @async_transaction: %true if buffer is in use for an async txn
|
||||
* @debug_id: unique ID for debugging
|
||||
* @transaction: pointer to associated struct binder_transaction
|
||||
* @target_node: struct binder_node associated with this buffer
|
||||
* @data_size: size of @transaction data
|
||||
* @offsets_size: size of array of offsets
|
||||
* @extra_buffers_size: size of space for other objects (like sg lists)
|
||||
* @user_data: user pointer to base of buffer space
|
||||
*
|
||||
* Bookkeeping structure for binder transaction buffers
|
||||
*/
|
||||
struct binder_buffer {
|
||||
struct list_head entry; /* free and allocated entries by address */
|
||||
struct rb_node rb_node; /* free entry by size or allocated entry */
|
||||
/* by address */
|
||||
unsigned free:1;
|
||||
unsigned allow_user_free:1;
|
||||
unsigned async_transaction:1;
|
||||
unsigned debug_id:29;
|
||||
|
||||
struct binder_transaction *transaction;
|
||||
|
||||
struct binder_node *target_node;
|
||||
size_t data_size;
|
||||
size_t offsets_size;
|
||||
size_t extra_buffers_size;
|
||||
void __user *user_data;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_lru_page - page object used for binder shrinker
|
||||
* @page_ptr: pointer to physical page in mmap'd space
|
||||
* @lru: entry in binder_alloc_lru
|
||||
* @alloc: binder_alloc for a proc
|
||||
*/
|
||||
struct binder_lru_page {
|
||||
struct list_head lru;
|
||||
struct page *page_ptr;
|
||||
struct binder_alloc *alloc;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_alloc - per-binder proc state for binder allocator
|
||||
* @vma: vm_area_struct passed to mmap_handler
|
||||
* (invarient after mmap)
|
||||
* @tsk: tid for task that called init for this proc
|
||||
* (invariant after init)
|
||||
* @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
|
||||
* @buffer: base of per-proc address space mapped via mmap
|
||||
* @buffers: list of all buffers for this proc
|
||||
* @free_buffers: rb tree of buffers available for allocation
|
||||
* sorted by size
|
||||
* @allocated_buffers: rb tree of allocated buffers sorted by address
|
||||
* @free_async_space: VA space available for async buffers. This is
|
||||
* initialized at mmap time to 1/2 the full VA space
|
||||
* @pages: array of binder_lru_page
|
||||
* @buffer_size: size of address space specified via mmap
|
||||
* @pid: pid for associated binder_proc (invariant after init)
|
||||
* @pages_high: high watermark of offset in @pages
|
||||
*
|
||||
* Bookkeeping structure for per-proc address space management for binder
|
||||
* buffers. It is normally initialized during binder_init() and binder_mmap()
|
||||
* calls. The address space is used for both user-visible buffers and for
|
||||
* struct binder_buffer objects used to track the user buffers
|
||||
*/
|
||||
struct binder_alloc {
|
||||
struct mutex mutex;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *vma_vm_mm;
|
||||
void __user *buffer;
|
||||
struct list_head buffers;
|
||||
struct rb_root free_buffers;
|
||||
struct rb_root allocated_buffers;
|
||||
size_t free_async_space;
|
||||
struct binder_lru_page *pages;
|
||||
size_t buffer_size;
|
||||
uint32_t buffer_free;
|
||||
int pid;
|
||||
size_t pages_high;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
|
||||
void binder_selftest_alloc(struct binder_alloc *alloc);
|
||||
#else
|
||||
static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
|
||||
#endif
|
||||
enum lru_status binder_alloc_free_page(struct list_head *item,
|
||||
spinlock_t *lock, void *cb_arg);
|
||||
extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
|
||||
size_t data_size,
|
||||
size_t offsets_size,
|
||||
size_t extra_buffers_size,
|
||||
int is_async);
|
||||
extern void binder_alloc_init(struct binder_alloc *alloc);
|
||||
void binder_alloc_shrinker_init(void);
|
||||
extern void binder_alloc_vma_close(struct binder_alloc *alloc);
|
||||
extern struct binder_buffer *
|
||||
binder_alloc_prepare_to_free(struct binder_alloc *alloc,
|
||||
uintptr_t user_ptr);
|
||||
extern void binder_alloc_free_buf(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer);
|
||||
extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
||||
struct vm_area_struct *vma);
|
||||
extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
|
||||
extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
|
||||
extern void binder_alloc_print_allocated(struct seq_file *m,
|
||||
struct binder_alloc *alloc);
|
||||
void binder_alloc_print_pages(struct seq_file *m,
|
||||
struct binder_alloc *alloc);
|
||||
|
||||
/**
|
||||
* binder_alloc_get_free_async_space() - get free space available for async
|
||||
* @alloc: binder_alloc for this proc
|
||||
*
|
||||
* Return: the bytes remaining in the address-space for async transactions
|
||||
*/
|
||||
static inline size_t
|
||||
binder_alloc_get_free_async_space(struct binder_alloc *alloc)
|
||||
{
|
||||
size_t free_async_space;
|
||||
|
||||
mutex_lock(&alloc->mutex);
|
||||
free_async_space = alloc->free_async_space;
|
||||
mutex_unlock(&alloc->mutex);
|
||||
return free_async_space;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer,
|
||||
binder_size_t buffer_offset,
|
||||
const void __user *from,
|
||||
size_t bytes);
|
||||
|
||||
void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer,
|
||||
binder_size_t buffer_offset,
|
||||
void *src,
|
||||
size_t bytes);
|
||||
|
||||
void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
|
||||
void *dest,
|
||||
struct binder_buffer *buffer,
|
||||
binder_size_t buffer_offset,
|
||||
size_t bytes);
|
||||
|
||||
#endif /* _LINUX_BINDER_ALLOC_H */
|
311
drivers/staging/android/binder_alloc_selftest.c
Normal file
311
drivers/staging/android/binder_alloc_selftest.c
Normal file
|
@ -0,0 +1,311 @@
|
|||
/* binder_alloc_selftest.c
|
||||
*
|
||||
* Android IPC Subsystem
|
||||
*
|
||||
* Copyright (C) 2017 Google, Inc.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/err.h>
|
||||
#include "binder_alloc.h"
|
||||
|
||||
#define BUFFER_NUM 5
|
||||
#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
|
||||
|
||||
static bool binder_selftest_run = true;
|
||||
static int binder_selftest_failures;
|
||||
static DEFINE_MUTEX(binder_selftest_lock);
|
||||
|
||||
/**
|
||||
* enum buf_end_align_type - Page alignment of a buffer
|
||||
* end with regard to the end of the previous buffer.
|
||||
*
|
||||
* In the pictures below, buf2 refers to the buffer we
|
||||
* are aligning. buf1 refers to previous buffer by addr.
|
||||
* Symbol [ means the start of a buffer, ] means the end
|
||||
* of a buffer, and | means page boundaries.
|
||||
*/
|
||||
enum buf_end_align_type {
|
||||
/**
|
||||
* @SAME_PAGE_UNALIGNED: The end of this buffer is on
|
||||
* the same page as the end of the previous buffer and
|
||||
* is not page aligned. Examples:
|
||||
* buf1 ][ buf2 ][ ...
|
||||
* buf1 ]|[ buf2 ][ ...
|
||||
*/
|
||||
SAME_PAGE_UNALIGNED = 0,
|
||||
/**
|
||||
* @SAME_PAGE_ALIGNED: When the end of the previous buffer
|
||||
* is not page aligned, the end of this buffer is on the
|
||||
* same page as the end of the previous buffer and is page
|
||||
* aligned. When the previous buffer is page aligned, the
|
||||
* end of this buffer is aligned to the next page boundary.
|
||||
* Examples:
|
||||
* buf1 ][ buf2 ]| ...
|
||||
* buf1 ]|[ buf2 ]| ...
|
||||
*/
|
||||
SAME_PAGE_ALIGNED,
|
||||
/**
|
||||
* @NEXT_PAGE_UNALIGNED: The end of this buffer is on
|
||||
* the page next to the end of the previous buffer and
|
||||
* is not page aligned. Examples:
|
||||
* buf1 ][ buf2 | buf2 ][ ...
|
||||
* buf1 ]|[ buf2 | buf2 ][ ...
|
||||
*/
|
||||
NEXT_PAGE_UNALIGNED,
|
||||
/**
|
||||
* @NEXT_PAGE_ALIGNED: The end of this buffer is on
|
||||
* the page next to the end of the previous buffer and
|
||||
* is page aligned. Examples:
|
||||
* buf1 ][ buf2 | buf2 ]| ...
|
||||
* buf1 ]|[ buf2 | buf2 ]| ...
|
||||
*/
|
||||
NEXT_PAGE_ALIGNED,
|
||||
/**
|
||||
* @NEXT_NEXT_UNALIGNED: The end of this buffer is on
|
||||
* the page that follows the page after the end of the
|
||||
* previous buffer and is not page aligned. Examples:
|
||||
* buf1 ][ buf2 | buf2 | buf2 ][ ...
|
||||
* buf1 ]|[ buf2 | buf2 | buf2 ][ ...
|
||||
*/
|
||||
NEXT_NEXT_UNALIGNED,
|
||||
LOOP_END,
|
||||
};
|
||||
|
||||
static void pr_err_size_seq(size_t *sizes, int *seq)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_err("alloc sizes: ");
|
||||
for (i = 0; i < BUFFER_NUM; i++)
|
||||
pr_cont("[%zu]", sizes[i]);
|
||||
pr_cont("\n");
|
||||
pr_err("free seq: ");
|
||||
for (i = 0; i < BUFFER_NUM; i++)
|
||||
pr_cont("[%d]", seq[i]);
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffer,
|
||||
size_t size)
|
||||
{
|
||||
void __user *page_addr;
|
||||
void __user *end;
|
||||
int page_index;
|
||||
|
||||
end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
|
||||
page_addr = buffer->user_data;
|
||||
for (; page_addr < end; page_addr += PAGE_SIZE) {
|
||||
page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
|
||||
if (!alloc->pages[page_index].page_ptr ||
|
||||
!list_empty(&alloc->pages[page_index].lru)) {
|
||||
pr_err("expect alloc but is %s at page index %d\n",
|
||||
alloc->pages[page_index].page_ptr ?
|
||||
"lru" : "free", page_index);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffers[],
|
||||
size_t *sizes, int *seq)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BUFFER_NUM; i++) {
|
||||
buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
|
||||
if (IS_ERR(buffers[i]) ||
|
||||
!check_buffer_pages_allocated(alloc, buffers[i],
|
||||
sizes[i])) {
|
||||
pr_err_size_seq(sizes, seq);
|
||||
binder_selftest_failures++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void binder_selftest_free_buf(struct binder_alloc *alloc,
|
||||
struct binder_buffer *buffers[],
|
||||
size_t *sizes, int *seq, size_t end)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BUFFER_NUM; i++)
|
||||
binder_alloc_free_buf(alloc, buffers[seq[i]]);
|
||||
|
||||
for (i = 0; i < end / PAGE_SIZE; i++) {
|
||||
/**
|
||||
* Error message on a free page can be false positive
|
||||
* if binder shrinker ran during binder_alloc_free_buf
|
||||
* calls above.
|
||||
*/
|
||||
if (list_empty(&alloc->pages[i].lru)) {
|
||||
pr_err_size_seq(sizes, seq);
|
||||
pr_err("expect lru but is %s at page index %d\n",
|
||||
alloc->pages[i].page_ptr ? "alloc" : "free", i);
|
||||
binder_selftest_failures++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void binder_selftest_free_page(struct binder_alloc *alloc)
|
||||
{
|
||||
int i;
|
||||
unsigned long count;
|
||||
|
||||
while ((count = list_lru_count(&binder_alloc_lru))) {
|
||||
list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
|
||||
NULL, count);
|
||||
}
|
||||
|
||||
for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
|
||||
if (alloc->pages[i].page_ptr) {
|
||||
pr_err("expect free but is %s at page index %d\n",
|
||||
list_empty(&alloc->pages[i].lru) ?
|
||||
"alloc" : "lru", i);
|
||||
binder_selftest_failures++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void binder_selftest_alloc_free(struct binder_alloc *alloc,
|
||||
size_t *sizes, int *seq, size_t end)
|
||||
{
|
||||
struct binder_buffer *buffers[BUFFER_NUM];
|
||||
|
||||
binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
|
||||
binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
|
||||
|
||||
/* Allocate from lru. */
|
||||
binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
|
||||
if (list_lru_count(&binder_alloc_lru))
|
||||
pr_err("lru list should be empty but is not\n");
|
||||
|
||||
binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
|
||||
binder_selftest_free_page(alloc);
|
||||
}
|
||||
|
||||
static bool is_dup(int *seq, int index, int val)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < index; i++) {
|
||||
if (seq[i] == val)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Generate BUFFER_NUM factorial free orders. */
|
||||
static void binder_selftest_free_seq(struct binder_alloc *alloc,
|
||||
size_t *sizes, int *seq,
|
||||
int index, size_t end)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (index == BUFFER_NUM) {
|
||||
binder_selftest_alloc_free(alloc, sizes, seq, end);
|
||||
return;
|
||||
}
|
||||
for (i = 0; i < BUFFER_NUM; i++) {
|
||||
if (is_dup(seq, index, i))
|
||||
continue;
|
||||
seq[index] = i;
|
||||
binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
|
||||
}
|
||||
}
|
||||
|
||||
static void binder_selftest_alloc_size(struct binder_alloc *alloc,
|
||||
size_t *end_offset)
|
||||
{
|
||||
int i;
|
||||
int seq[BUFFER_NUM] = {0};
|
||||
size_t front_sizes[BUFFER_NUM];
|
||||
size_t back_sizes[BUFFER_NUM];
|
||||
size_t last_offset, offset = 0;
|
||||
|
||||
for (i = 0; i < BUFFER_NUM; i++) {
|
||||
last_offset = offset;
|
||||
offset = end_offset[i];
|
||||
front_sizes[i] = offset - last_offset;
|
||||
back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
|
||||
}
|
||||
/*
|
||||
* Buffers share the first or last few pages.
|
||||
* Only BUFFER_NUM - 1 buffer sizes are adjustable since
|
||||
* we need one giant buffer before getting to the last page.
|
||||
*/
|
||||
back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
|
||||
binder_selftest_free_seq(alloc, front_sizes, seq, 0,
|
||||
end_offset[BUFFER_NUM - 1]);
|
||||
binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
|
||||
}
|
||||
|
||||
static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
|
||||
size_t *end_offset, int index)
|
||||
{
|
||||
int align;
|
||||
size_t end, prev;
|
||||
|
||||
if (index == BUFFER_NUM) {
|
||||
binder_selftest_alloc_size(alloc, end_offset);
|
||||
return;
|
||||
}
|
||||
prev = index == 0 ? 0 : end_offset[index - 1];
|
||||
end = prev;
|
||||
|
||||
BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
|
||||
|
||||
for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
|
||||
if (align % 2)
|
||||
end = ALIGN(end, PAGE_SIZE);
|
||||
else
|
||||
end += BUFFER_MIN_SIZE;
|
||||
end_offset[index] = end;
|
||||
binder_selftest_alloc_offset(alloc, end_offset, index + 1);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_selftest_alloc() - Test alloc and free of buffer pages.
|
||||
* @alloc: Pointer to alloc struct.
|
||||
*
|
||||
* Allocate BUFFER_NUM buffers to cover all page alignment cases,
|
||||
* then free them in all orders possible. Check that pages are
|
||||
* correctly allocated, put onto lru when buffers are freed, and
|
||||
* are freed when binder_alloc_free_page is called.
|
||||
*/
|
||||
void binder_selftest_alloc(struct binder_alloc *alloc)
|
||||
{
|
||||
size_t end_offset[BUFFER_NUM];
|
||||
|
||||
if (!binder_selftest_run)
|
||||
return;
|
||||
mutex_lock(&binder_selftest_lock);
|
||||
if (!binder_selftest_run || !alloc->vma)
|
||||
goto done;
|
||||
pr_info("STARTED\n");
|
||||
binder_selftest_alloc_offset(alloc, end_offset, 0);
|
||||
binder_selftest_run = false;
|
||||
if (binder_selftest_failures > 0)
|
||||
pr_info("%d tests FAILED\n", binder_selftest_failures);
|
||||
else
|
||||
pr_info("PASSED\n");
|
||||
|
||||
done:
|
||||
mutex_unlock(&binder_selftest_lock);
|
||||
}
|
|
@ -23,7 +23,8 @@
|
|||
struct binder_buffer;
|
||||
struct binder_node;
|
||||
struct binder_proc;
|
||||
struct binder_ref;
|
||||
struct binder_alloc;
|
||||
struct binder_ref_data;
|
||||
struct binder_thread;
|
||||
struct binder_transaction;
|
||||
|
||||
|
@ -84,6 +85,30 @@ DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done);
|
|||
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
|
||||
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
|
||||
|
||||
TRACE_EVENT(binder_set_priority,
|
||||
TP_PROTO(int proc, int thread, unsigned int old_prio,
|
||||
unsigned int desired_prio, unsigned int new_prio),
|
||||
TP_ARGS(proc, thread, old_prio, new_prio, desired_prio),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, proc)
|
||||
__field(int, thread)
|
||||
__field(unsigned int, old_prio)
|
||||
__field(unsigned int, new_prio)
|
||||
__field(unsigned int, desired_prio)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->proc = proc;
|
||||
__entry->thread = thread;
|
||||
__entry->old_prio = old_prio;
|
||||
__entry->new_prio = new_prio;
|
||||
__entry->desired_prio = desired_prio;
|
||||
),
|
||||
TP_printk("proc=%d thread=%d old=%d => new=%d desired=%d",
|
||||
__entry->proc, __entry->thread, __entry->old_prio,
|
||||
__entry->new_prio, __entry->desired_prio)
|
||||
);
|
||||
|
||||
TRACE_EVENT(binder_wait_for_work,
|
||||
TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
|
||||
TP_ARGS(proc_work, transaction_stack, thread_todo),
|
||||
|
@ -146,8 +171,8 @@ TRACE_EVENT(binder_transaction_received,
|
|||
|
||||
TRACE_EVENT(binder_transaction_node_to_ref,
|
||||
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
|
||||
struct binder_ref *ref),
|
||||
TP_ARGS(t, node, ref),
|
||||
struct binder_ref_data *rdata),
|
||||
TP_ARGS(t, node, rdata),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, debug_id)
|
||||
|
@ -160,8 +185,8 @@ TRACE_EVENT(binder_transaction_node_to_ref,
|
|||
__entry->debug_id = t->debug_id;
|
||||
__entry->node_debug_id = node->debug_id;
|
||||
__entry->node_ptr = node->ptr;
|
||||
__entry->ref_debug_id = ref->debug_id;
|
||||
__entry->ref_desc = ref->desc;
|
||||
__entry->ref_debug_id = rdata->debug_id;
|
||||
__entry->ref_desc = rdata->desc;
|
||||
),
|
||||
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
|
||||
__entry->debug_id, __entry->node_debug_id,
|
||||
|
@ -170,8 +195,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
|
|||
);
|
||||
|
||||
TRACE_EVENT(binder_transaction_ref_to_node,
|
||||
TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
|
||||
TP_ARGS(t, ref),
|
||||
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
|
||||
struct binder_ref_data *rdata),
|
||||
TP_ARGS(t, node, rdata),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, debug_id)
|
||||
|
@ -182,10 +208,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
|
|||
),
|
||||
TP_fast_assign(
|
||||
__entry->debug_id = t->debug_id;
|
||||
__entry->ref_debug_id = ref->debug_id;
|
||||
__entry->ref_desc = ref->desc;
|
||||
__entry->node_debug_id = ref->node->debug_id;
|
||||
__entry->node_ptr = ref->node->ptr;
|
||||
__entry->ref_debug_id = rdata->debug_id;
|
||||
__entry->ref_desc = rdata->desc;
|
||||
__entry->node_debug_id = node->debug_id;
|
||||
__entry->node_ptr = node->ptr;
|
||||
),
|
||||
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
|
||||
__entry->debug_id, __entry->node_debug_id,
|
||||
|
@ -194,9 +220,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
|
|||
);
|
||||
|
||||
TRACE_EVENT(binder_transaction_ref_to_ref,
|
||||
TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
|
||||
struct binder_ref *dest_ref),
|
||||
TP_ARGS(t, src_ref, dest_ref),
|
||||
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
|
||||
struct binder_ref_data *src_ref,
|
||||
struct binder_ref_data *dest_ref),
|
||||
TP_ARGS(t, node, src_ref, dest_ref),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, debug_id)
|
||||
|
@ -208,7 +235,7 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
|
|||
),
|
||||
TP_fast_assign(
|
||||
__entry->debug_id = t->debug_id;
|
||||
__entry->node_debug_id = src_ref->node->debug_id;
|
||||
__entry->node_debug_id = node->debug_id;
|
||||
__entry->src_ref_debug_id = src_ref->debug_id;
|
||||
__entry->src_ref_desc = src_ref->desc;
|
||||
__entry->dest_ref_debug_id = dest_ref->debug_id;
|
||||
|
@ -245,14 +272,17 @@ DECLARE_EVENT_CLASS(binder_buffer_class,
|
|||
__field(int, debug_id)
|
||||
__field(size_t, data_size)
|
||||
__field(size_t, offsets_size)
|
||||
__field(size_t, extra_buffers_size)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->debug_id = buf->debug_id;
|
||||
__entry->data_size = buf->data_size;
|
||||
__entry->offsets_size = buf->offsets_size;
|
||||
__entry->extra_buffers_size = buf->extra_buffers_size;
|
||||
),
|
||||
TP_printk("transaction=%d data_size=%zd offsets_size=%zd",
|
||||
__entry->debug_id, __entry->data_size, __entry->offsets_size)
|
||||
TP_printk("transaction=%d data_size=%zd offsets_size=%zd extra_buffers_size=%zd",
|
||||
__entry->debug_id, __entry->data_size, __entry->offsets_size,
|
||||
__entry->extra_buffers_size)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf,
|
||||
|
@ -268,9 +298,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
|
|||
TP_ARGS(buffer));
|
||||
|
||||
TRACE_EVENT(binder_update_page_range,
|
||||
TP_PROTO(struct binder_proc *proc, bool allocate,
|
||||
void *start, void *end),
|
||||
TP_ARGS(proc, allocate, start, end),
|
||||
TP_PROTO(struct binder_alloc *alloc, bool allocate,
|
||||
void __user *start, void __user *end),
|
||||
TP_ARGS(alloc, allocate, start, end),
|
||||
TP_STRUCT__entry(
|
||||
__field(int, proc)
|
||||
__field(bool, allocate)
|
||||
|
@ -278,9 +308,9 @@ TRACE_EVENT(binder_update_page_range,
|
|||
__field(size_t, size)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->proc = proc->pid;
|
||||
__entry->proc = alloc->pid;
|
||||
__entry->allocate = allocate;
|
||||
__entry->offset = start - proc->buffer;
|
||||
__entry->offset = start - alloc->buffer;
|
||||
__entry->size = end - start;
|
||||
),
|
||||
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
|
||||
|
@ -288,6 +318,61 @@ TRACE_EVENT(binder_update_page_range,
|
|||
__entry->offset, __entry->size)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(binder_lru_page_class,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index),
|
||||
TP_STRUCT__entry(
|
||||
__field(int, proc)
|
||||
__field(size_t, page_index)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->proc = alloc->pid;
|
||||
__entry->page_index = page_index;
|
||||
),
|
||||
TP_printk("proc=%d page_index=%zu",
|
||||
__entry->proc, __entry->page_index)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
|
||||
TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
|
||||
TP_ARGS(alloc, page_index));
|
||||
|
||||
TRACE_EVENT(binder_command,
|
||||
TP_PROTO(uint32_t cmd),
|
||||
TP_ARGS(cmd),
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#ifndef _UAPI_LINUX_BINDER_H
|
||||
#define _UAPI_LINUX_BINDER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define B_PACK_CHARS(c1, c2, c3, c4) \
|
||||
|
@ -36,9 +37,64 @@ enum {
|
|||
BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
|
||||
};
|
||||
|
||||
enum {
|
||||
/**
|
||||
* enum flat_binder_object_shifts: shift values for flat_binder_object_flags
|
||||
* @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
|
||||
*
|
||||
*/
|
||||
enum flat_binder_object_shifts {
|
||||
FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum flat_binder_object_flags - flags for use in flat_binder_object.flags
|
||||
*/
|
||||
enum flat_binder_object_flags {
|
||||
/**
|
||||
* @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
|
||||
*
|
||||
* These bits can be used to set the minimum scheduler priority
|
||||
* at which transactions into this node should run. Valid values
|
||||
* in these bits depend on the scheduler policy encoded in
|
||||
* @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
|
||||
*
|
||||
* For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
|
||||
* For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
|
||||
*/
|
||||
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
|
||||
/**
|
||||
* @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
|
||||
*/
|
||||
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
|
||||
/**
|
||||
* @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
|
||||
*
|
||||
* These two bits can be used to set the min scheduling policy at which
|
||||
* transactions on this node should run. These match the UAPI
|
||||
* scheduler policy values, eg:
|
||||
* 00b: SCHED_NORMAL
|
||||
* 01b: SCHED_FIFO
|
||||
* 10b: SCHED_RR
|
||||
* 11b: SCHED_BATCH
|
||||
*/
|
||||
FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
|
||||
3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
|
||||
|
||||
/**
|
||||
* @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
|
||||
*
|
||||
* Only when set, calls into this node will inherit a real-time
|
||||
* scheduling policy from the caller (for synchronous transactions).
|
||||
*/
|
||||
FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
|
||||
|
||||
/**
|
||||
* @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
|
||||
*
|
||||
* Only when set, causes senders to include their security
|
||||
* context
|
||||
*/
|
||||
FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
|
||||
};
|
||||
|
||||
#ifdef BINDER_IPC_32BIT
|
||||
|
@ -185,13 +241,38 @@ struct binder_version {
|
|||
#define BINDER_CURRENT_PROTOCOL_VERSION 8
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
|
||||
* Set ptr to NULL for the first call to get the info for the first node, and
|
||||
* then repeat the call passing the previously returned value to get the next
|
||||
* nodes. ptr will be 0 when there are no more nodes.
|
||||
*/
|
||||
struct binder_node_debug_info {
|
||||
binder_uintptr_t ptr;
|
||||
binder_uintptr_t cookie;
|
||||
__u32 has_strong_ref;
|
||||
__u32 has_weak_ref;
|
||||
};
|
||||
|
||||
struct binder_node_info_for_ref {
|
||||
__u32 handle;
|
||||
__u32 strong_count;
|
||||
__u32 weak_count;
|
||||
__u32 reserved1;
|
||||
__u32 reserved2;
|
||||
__u32 reserved3;
|
||||
};
|
||||
|
||||
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
|
||||
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
|
||||
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
|
||||
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
|
||||
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
|
||||
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
|
||||
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
|
||||
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
|
||||
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
|
||||
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
|
||||
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
|
||||
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
|
||||
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
|
||||
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
|
||||
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
|
||||
|
||||
/*
|
||||
* NOTE: Two special error codes you should check for when calling
|
||||
|
@ -220,8 +301,10 @@ struct binder_transaction_data {
|
|||
* identifying the target and contents of the transaction.
|
||||
*/
|
||||
union {
|
||||
__u32 handle; /* target descriptor of command transaction */
|
||||
binder_uintptr_t ptr; /* target descriptor of return transaction */
|
||||
/* target descriptor of command transaction */
|
||||
__u32 handle;
|
||||
/* target descriptor of return transaction */
|
||||
binder_uintptr_t ptr;
|
||||
} target;
|
||||
binder_uintptr_t cookie; /* target object cookie */
|
||||
__u32 code; /* transaction command */
|
||||
|
@ -248,6 +331,11 @@ struct binder_transaction_data {
|
|||
} data;
|
||||
};
|
||||
|
||||
struct binder_transaction_data_secctx {
|
||||
struct binder_transaction_data transaction_data;
|
||||
binder_uintptr_t secctx;
|
||||
};
|
||||
|
||||
struct binder_transaction_data_sg {
|
||||
struct binder_transaction_data transaction_data;
|
||||
binder_size_t buffers_size;
|
||||
|
@ -261,7 +349,7 @@ struct binder_ptr_cookie {
|
|||
struct binder_handle_cookie {
|
||||
__u32 handle;
|
||||
binder_uintptr_t cookie;
|
||||
} __attribute__((packed));
|
||||
} __packed;
|
||||
|
||||
struct binder_pri_desc {
|
||||
__s32 priority;
|
||||
|
@ -283,6 +371,11 @@ enum binder_driver_return_protocol {
|
|||
BR_OK = _IO('r', 1),
|
||||
/* No parameters! */
|
||||
|
||||
BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
|
||||
struct binder_transaction_data_secctx),
|
||||
/*
|
||||
* binder_transaction_data_secctx: the received command.
|
||||
*/
|
||||
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
|
||||
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
|
||||
/*
|
||||
|
@ -419,13 +512,15 @@ enum binder_driver_command_protocol {
|
|||
* of looping threads it has available.
|
||||
*/
|
||||
|
||||
BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_handle_cookie),
|
||||
BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14,
|
||||
struct binder_handle_cookie),
|
||||
/*
|
||||
* int: handle
|
||||
* void *: cookie
|
||||
*/
|
||||
|
||||
BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_handle_cookie),
|
||||
BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15,
|
||||
struct binder_handle_cookie),
|
||||
/*
|
||||
* int: handle
|
||||
* void *: cookie
|
||||
|
@ -444,4 +539,3 @@ enum binder_driver_command_protocol {
|
|||
};
|
||||
|
||||
#endif /* _UAPI_LINUX_BINDER_H */
|
||||
|
||||
|
|
Loading…
Reference in a new issue