diff options
author | Todd Kjos <tkjos@android.com> | 2017-06-29 12:01:40 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-07-17 14:47:28 +0200 |
commit | 19c987241ca1216a51118b2bd0185b8bc5081783 (patch) | |
tree | 873682dda02a96f5eb3ed6a8a084b19f9dc9ac66 | |
parent | 7c03f0d6163a500b655467bd8306d435bef18a83 (diff) | |
download | linux-19c987241ca1216a51118b2bd0185b8bc5081783.tar.bz2 |
binder: separate out binder_alloc functions
Continuation of splitting the binder allocator from the binder
driver. Separate binder_alloc functions from normal binder
functions. Protect the allocator with a separate mutex.
Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/android/binder.c | 649 | ||||
-rw-r--r-- | drivers/android/binder_trace.h | 9 |
2 files changed, 410 insertions, 248 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 2cbd7558fc3f..6c1d0b5b352b 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -49,7 +49,7 @@ static DEFINE_MUTEX(binder_main_lock); static DEFINE_MUTEX(binder_deferred_lock); -static DEFINE_MUTEX(binder_mmap_lock); +static DEFINE_MUTEX(binder_alloc_mmap_lock); static HLIST_HEAD(binder_devices); static HLIST_HEAD(binder_procs); @@ -104,9 +104,7 @@ enum { BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, BINDER_DEBUG_FREE_BUFFER = 1U << 11, BINDER_DEBUG_INTERNAL_REFS = 1U << 12, - BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, - BINDER_DEBUG_PRIORITY_CAP = 1U << 14, - BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, + BINDER_DEBUG_PRIORITY_CAP = 1U << 13, }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; @@ -159,6 +157,27 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error, #define to_binder_fd_array_object(hdr) \ container_of(hdr, struct binder_fd_array_object, hdr) +/* + * debug declarations for binder_alloc. To be + * moved to binder_alloc.c + */ +enum { + BINDER_ALLOC_DEBUG_OPEN_CLOSE = 1U << 1, + BINDER_ALLOC_DEBUG_BUFFER_ALLOC = 1U << 2, + BINDER_ALLOC_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, +}; +static uint32_t binder_alloc_debug_mask; + +module_param_named(alloc_debug_mask, binder_alloc_debug_mask, + uint, 0644); + +#define binder_alloc_debug(mask, x...) \ + do { \ + if (binder_alloc_debug_mask & mask) \ + pr_info(x); \ + } while (0) +/* end of binder_alloc debug declarations */ + enum binder_stat_types { BINDER_STAT_PROC, BINDER_STAT_THREAD, @@ -342,6 +361,8 @@ enum binder_deferred_state { * struct binder_buffer objects used to track the user buffers */ struct binder_alloc { + struct mutex mutex; + struct task_struct *tsk; struct vm_area_struct *vma; struct mm_struct *vma_vm_mm; void *buffer; @@ -352,6 +373,7 @@ struct binder_alloc { size_t free_async_space; struct page **pages; size_t buffer_size; + int pid; }; struct binder_proc { @@ -423,6 +445,56 @@ struct binder_transaction { kuid_t sender_euid; }; +/* + * Forward declarations of binder_alloc functions. + * These will be moved to binder_alloc.h when + * binder_alloc is moved to its own files. + */ +extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async); +extern void binder_alloc_init(struct binder_alloc *alloc); +extern void binder_alloc_vma_close(struct binder_alloc *alloc); +extern struct binder_buffer * +binder_alloc_buffer_lookup(struct binder_alloc *alloc, + uintptr_t user_ptr); +extern void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer); +extern int binder_alloc_mmap_handler(struct binder_alloc *alloc, + struct vm_area_struct *vma); +extern void binder_alloc_deferred_release(struct binder_alloc *alloc); +extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc); +extern void binder_alloc_print_allocated(struct seq_file *m, + struct binder_alloc *alloc); + +static inline size_t +binder_alloc_get_free_async_space(struct binder_alloc *alloc) +{ + size_t free_async_space; + + mutex_lock(&alloc->mutex); + free_async_space = alloc->free_async_space; + mutex_unlock(&alloc->mutex); + return free_async_space; +} + +static inline ptrdiff_t +binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc) +{ + /* + * user_buffer_offset is constant if vma is set and + * undefined if vma is not set. It is possible to + * get here with !alloc->vma if the target process + * is dying while a transaction is being initiated. + * Returning the old value is ok in this case and + * the transaction will fail. + */ + return alloc->user_buffer_offset; +} +/* end of binder_alloc declarations */ + static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); @@ -506,21 +578,20 @@ static void binder_set_nice(long nice) binder_user_error("%d RLIMIT_NICE not set\n", current->pid); } -static size_t binder_buffer_size(struct binder_proc *proc, - struct binder_buffer *buffer) +static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, + struct binder_buffer *buffer) { - if (list_is_last(&buffer->entry, &proc->alloc.buffers)) - return proc->alloc.buffer + - proc->alloc.buffer_size - - (void *)buffer->data; + if (list_is_last(&buffer->entry, &alloc->buffers)) + return alloc->buffer + + alloc->buffer_size - (void *)buffer->data; return (size_t)list_entry(buffer->entry.next, struct binder_buffer, entry) - (size_t)buffer->data; } -static void binder_insert_free_buffer(struct binder_proc *proc, +static void binder_insert_free_buffer(struct binder_alloc *alloc, struct binder_buffer *new_buffer) { - struct rb_node **p = &proc->alloc.free_buffers.rb_node; + struct rb_node **p = &alloc->free_buffers.rb_node; struct rb_node *parent = NULL; struct binder_buffer *buffer; size_t buffer_size; @@ -528,18 +599,18 @@ static void binder_insert_free_buffer(struct binder_proc *proc, BUG_ON(!new_buffer->free); - new_buffer_size = binder_buffer_size(proc, new_buffer); + new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: add free buffer, size %zd, at %p\n", - proc->pid, new_buffer_size, new_buffer); + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC, + "%d: add free buffer, size %zd, at %pK\n", + alloc->pid, new_buffer_size, new_buffer); while (*p) { parent = *p; buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(!buffer->free); - buffer_size = binder_buffer_size(proc, buffer); + buffer_size = binder_alloc_buffer_size(alloc, buffer); if (new_buffer_size < buffer_size) p = &parent->rb_left; @@ -547,13 +618,13 @@ static void binder_insert_free_buffer(struct binder_proc *proc, p = &parent->rb_right; } rb_link_node(&new_buffer->rb_node, parent, p); - rb_insert_color(&new_buffer->rb_node, &proc->alloc.free_buffers); + rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); } -static void binder_insert_allocated_buffer(struct binder_proc *proc, +static void binder_insert_allocated_buffer(struct binder_alloc *alloc, struct binder_buffer *new_buffer) { - struct rb_node **p = &proc->alloc.allocated_buffers.rb_node; + struct rb_node **p = &alloc->allocated_buffers.rb_node; struct rb_node *parent = NULL; struct binder_buffer *buffer; @@ -572,19 +643,19 @@ static void binder_insert_allocated_buffer(struct binder_proc *proc, BUG(); } rb_link_node(&new_buffer->rb_node, parent, p); - rb_insert_color(&new_buffer->rb_node, &proc->alloc.allocated_buffers); + rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); } -static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, - uintptr_t user_ptr) +static struct binder_buffer *binder_alloc_buffer_lookup_locked( + struct binder_alloc *alloc, + uintptr_t user_ptr) { - struct rb_node *n = proc->alloc.allocated_buffers.rb_node; + struct rb_node *n = alloc->allocated_buffers.rb_node; struct binder_buffer *buffer; struct binder_buffer *kern_ptr; - kern_ptr = (struct binder_buffer *) - (user_ptr - proc->alloc.user_buffer_offset - - offsetof(struct binder_buffer, data)); + kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset + - offsetof(struct binder_buffer, data)); while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); @@ -600,7 +671,18 @@ static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, return NULL; } -static int binder_update_page_range(struct binder_proc *proc, int allocate, +struct binder_buffer *binder_alloc_buffer_lookup(struct binder_alloc *alloc, + uintptr_t user_ptr) +{ + struct binder_buffer *buffer; + + mutex_lock(&alloc->mutex); + buffer = binder_alloc_buffer_lookup_locked(alloc, user_ptr); + mutex_unlock(&alloc->mutex); + return buffer; +} + +static int binder_update_page_range(struct binder_alloc *alloc, int allocate, void *start, void *end, struct vm_area_struct *vma) { @@ -609,26 +691,26 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, struct page **page; struct mm_struct *mm; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: %s pages %p-%p\n", proc->pid, + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC, + "%d: %s pages %pK-%pK\n", alloc->pid, allocate ? "allocate" : "free", start, end); if (end <= start) return 0; - trace_binder_update_page_range(proc, allocate, start, end); + trace_binder_update_page_range(alloc, allocate, start, end); if (vma) mm = NULL; else - mm = get_task_mm(proc->tsk); + mm = get_task_mm(alloc->tsk); if (mm) { down_write(&mm->mmap_sem); - vma = proc->alloc.vma; - if (vma && mm != proc->alloc.vma_vm_mm) { + vma = alloc->vma; + if (vma && mm != alloc->vma_vm_mm) { pr_err("%d: vma mm and task mm mismatch\n", - proc->pid); + alloc->pid); vma = NULL; } } @@ -638,21 +720,20 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, if (vma == NULL) { pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", - proc->pid); + alloc->pid); goto err_no_vma; } for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { int ret; - page = &proc->alloc.pages[ - (page_addr - proc->alloc.buffer) / PAGE_SIZE]; + page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; BUG_ON(*page); *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); if (*page == NULL) { - pr_err("%d: binder_alloc_buf failed for page at %p\n", - proc->pid, page_addr); + pr_err("%d: binder_alloc_buf failed for page at %pK\n", + alloc->pid, page_addr); goto err_alloc_page_failed; } ret = map_kernel_range_noflush((unsigned long)page_addr, @@ -660,16 +741,16 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, flush_cache_vmap((unsigned long)page_addr, (unsigned long)page_addr + PAGE_SIZE); if (ret != 1) { - pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", - proc->pid, page_addr); + pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", + alloc->pid, page_addr); goto err_map_kernel_failed; } user_page_addr = - (uintptr_t)page_addr + proc->alloc.user_buffer_offset; + (uintptr_t)page_addr + alloc->user_buffer_offset; ret = vm_insert_page(vma, user_page_addr, page[0]); if (ret) { pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", - proc->pid, user_page_addr); + alloc->pid, user_page_addr); goto err_vm_insert_page_failed; } /* vm_insert_page does not seem to increment the refcount */ @@ -683,11 +764,10 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, free_range: for (page_addr = end - PAGE_SIZE; page_addr >= start; page_addr -= PAGE_SIZE) { - page = &proc->alloc.pages[ - (page_addr - proc->alloc.buffer) / PAGE_SIZE]; + page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; if (vma) zap_page_range(vma, (uintptr_t)page_addr + - proc->alloc.user_buffer_offset, PAGE_SIZE); + alloc->user_buffer_offset, PAGE_SIZE); err_vm_insert_page_failed: unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); err_map_kernel_failed: @@ -704,13 +784,11 @@ err_no_vma: return -ENOMEM; } -static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async) +static struct binder_buffer *binder_alloc_new_buf_locked( + struct binder_alloc *alloc, size_t data_size, + size_t offsets_size, size_t extra_buffers_size, int is_async) { - struct rb_node *n = proc->alloc.free_buffers.rb_node; + struct rb_node *n = alloc->free_buffers.rb_node; struct binder_buffer *buffer; size_t buffer_size; struct rb_node *best_fit = NULL; @@ -718,9 +796,9 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, void *end_page_addr; size_t size, data_offsets_size; - if (proc->alloc.vma == NULL) { + if (alloc->vma == NULL) { pr_err("%d: binder_alloc_buf, no vma\n", - proc->pid); + alloc->pid); return NULL; } @@ -728,29 +806,30 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, ALIGN(offsets_size, sizeof(void *)); if (data_offsets_size < data_size || data_offsets_size < offsets_size) { - binder_user_error("%d: got transaction with invalid size %zd-%zd\n", - proc->pid, data_size, offsets_size); + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC, + "%d: got transaction with invalid size %zd-%zd\n", + alloc->pid, data_size, offsets_size); return NULL; } size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); if (size < data_offsets_size || size < extra_buffers_size) { - binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n", - proc->pid, extra_buffers_size); + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC, + "%d: got transaction with invalid extra_buffers_size %zd\n", + alloc->pid, extra_buffers_size); return NULL; } if (is_async && - proc->alloc.free_async_space < - size + sizeof(struct binder_buffer)) { - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + alloc->free_async_space < size + sizeof(struct binder_buffer)) { + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd failed, no async space left\n", - proc->pid, size); + alloc->pid, size); return NULL; } while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(!buffer->free); - buffer_size = binder_buffer_size(proc, buffer); + buffer_size = binder_alloc_buffer_size(alloc, buffer); if (size < buffer_size) { best_fit = n; @@ -764,17 +843,17 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, } if (best_fit == NULL) { pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", - proc->pid, size); + alloc->pid, size); return NULL; } if (n == NULL) { buffer = rb_entry(best_fit, struct binder_buffer, rb_node); - buffer_size = binder_buffer_size(proc, buffer); + buffer_size = binder_alloc_buffer_size(alloc, buffer); } - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", - proc->pid, size, buffer, buffer_size); + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC, + "%d: %s size %zd got buffer %pK size %zd\n", + alloc->pid, __func__, size, buffer, buffer_size); has_page_addr = (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); @@ -788,38 +867,52 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; - if (binder_update_page_range(proc, 1, + if (binder_update_page_range(alloc, 1, (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) return NULL; - rb_erase(best_fit, &proc->alloc.free_buffers); + rb_erase(best_fit, &alloc->free_buffers); buffer->free = 0; - binder_insert_allocated_buffer(proc, buffer); + binder_insert_allocated_buffer(alloc, buffer); if (buffer_size != size) { struct binder_buffer *new_buffer = (void *)buffer->data + size; list_add(&new_buffer->entry, &buffer->entry); new_buffer->free = 1; - binder_insert_free_buffer(proc, new_buffer); + binder_insert_free_buffer(alloc, new_buffer); } - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got %p\n", - proc->pid, size, buffer); + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC, + "%d: %s size %zd got %pK\n", + alloc->pid, __func__, size, buffer); buffer->data_size = data_size; buffer->offsets_size = offsets_size; - buffer->extra_buffers_size = extra_buffers_size; buffer->async_transaction = is_async; + buffer->extra_buffers_size = extra_buffers_size; if (is_async) { - proc->alloc.free_async_space -= - size + sizeof(struct binder_buffer); - binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, + alloc->free_async_space -= size + sizeof(struct binder_buffer); + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_alloc_buf size %zd async free %zd\n", - proc->pid, size, proc->alloc.free_async_space); + alloc->pid, size, alloc->free_async_space); } return buffer; } +struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async) +{ + struct binder_buffer *buffer; + + mutex_lock(&alloc->mutex); + buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, + extra_buffers_size, is_async); + mutex_unlock(&alloc->mutex); + return buffer; +} + static void *buffer_start_page(struct binder_buffer *buffer) { return (void *)((uintptr_t)buffer & PAGE_MASK); @@ -830,26 +923,26 @@ static void *buffer_end_page(struct binder_buffer *buffer) return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); } -static void binder_delete_free_buffer(struct binder_proc *proc, +static void binder_delete_free_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer) { struct binder_buffer *prev, *next = NULL; int free_page_end = 1; int free_page_start = 1; - BUG_ON(proc->alloc.buffers.next == &buffer->entry); + BUG_ON(alloc->buffers.next == &buffer->entry); prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); BUG_ON(!prev->free); if (buffer_end_page(prev) == buffer_start_page(buffer)) { free_page_start = 0; if (buffer_end_page(prev) == buffer_end_page(buffer)) free_page_end = 0; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p share page with %p\n", - proc->pid, buffer, prev); + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK share page with %pK\n", + alloc->pid, buffer, prev); } - if (!list_is_last(&buffer->entry, &proc->alloc.buffers)) { + if (!list_is_last(&buffer->entry, &alloc->buffers)) { next = list_entry(buffer->entry.next, struct binder_buffer, entry); if (buffer_start_page(next) == buffer_end_page(buffer)) { @@ -857,80 +950,88 @@ static void binder_delete_free_buffer(struct binder_proc *proc, if (buffer_start_page(next) == buffer_start_page(buffer)) free_page_start = 0; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p share page with %p\n", - proc->pid, buffer, prev); + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK share page with %pK\n", + alloc->pid, buffer, prev); } } list_del(&buffer->entry); if (free_page_start || free_page_end) { - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", - proc->pid, buffer, free_page_start ? "" : " end", + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC, + "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n", + alloc->pid, buffer, free_page_start ? "" : " end", free_page_end ? "" : " start", prev, next); - binder_update_page_range(proc, 0, free_page_start ? + binder_update_page_range(alloc, 0, free_page_start ? buffer_start_page(buffer) : buffer_end_page(buffer), (free_page_end ? buffer_end_page(buffer) : buffer_start_page(buffer)) + PAGE_SIZE, NULL); } } -static void binder_free_buf(struct binder_proc *proc, - struct binder_buffer *buffer) +static void binder_free_buf_locked(struct binder_alloc *alloc, + struct binder_buffer *buffer) { size_t size, buffer_size; - buffer_size = binder_buffer_size(proc, buffer); + buffer_size = binder_alloc_buffer_size(alloc, buffer); size = ALIGN(buffer->data_size, sizeof(void *)) + ALIGN(buffer->offsets_size, sizeof(void *)) + ALIGN(buffer->extra_buffers_size, sizeof(void *)); - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_free_buf %p size %zd buffer_size %zd\n", - proc->pid, buffer, size, buffer_size); + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC, + "%d: binder_free_buf %pK size %zd buffer_size %zd\n", + alloc->pid, buffer, size, buffer_size); BUG_ON(buffer->free); BUG_ON(size > buffer_size); BUG_ON(buffer->transaction != NULL); - BUG_ON((void *)buffer < proc->alloc.buffer); - BUG_ON((void *)buffer > proc->alloc.buffer + proc->alloc.buffer_size); + BUG_ON((void *)buffer < alloc->buffer); + BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size); if (buffer->async_transaction) { - proc->alloc.free_async_space += - size + sizeof(struct binder_buffer); + alloc->free_async_space += size + sizeof(struct binder_buffer); - binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_free_buf size %zd async free %zd\n", - proc->pid, size, proc->alloc.free_async_space); + alloc->pid, size, alloc->free_async_space); } - binder_update_page_range(proc, 0, + binder_update_page_range(alloc, 0, (void *)PAGE_ALIGN((uintptr_t)buffer->data), (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), NULL); - rb_erase(&buffer->rb_node, &proc->alloc.allocated_buffers); + + rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; - if (!list_is_last(&buffer->entry, &proc->alloc.buffers)) { + if (!list_is_last(&buffer->entry, &alloc->buffers)) { struct binder_buffer *next = list_entry(buffer->entry.next, struct binder_buffer, entry); if (next->free) { - rb_erase(&next->rb_node, &proc->alloc.free_buffers); - binder_delete_free_buffer(proc, next); + rb_erase(&next->rb_node, &alloc->free_buffers); + binder_delete_free_buffer(alloc, next); } } - if (proc->alloc.buffers.next != &buffer->entry) { + if (alloc->buffers.next != &buffer->entry) { struct binder_buffer *prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); if (prev->free) { - binder_delete_free_buffer(proc, buffer); - rb_erase(&prev->rb_node, &proc->alloc.free_buffers); + binder_delete_free_buffer(alloc, buffer); + rb_erase(&prev->rb_node, &alloc->free_buffers); buffer = prev; } } - binder_insert_free_buffer(proc, buffer); + binder_insert_free_buffer(alloc, buffer); +} + +void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer) +{ + mutex_lock(&alloc->mutex); + binder_free_buf_locked(alloc, buffer); + mutex_unlock(&alloc->mutex); } static struct binder_node *binder_get_node(struct binder_proc *proc, @@ -1564,7 +1665,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, * back to kernel address space to access it */ parent_buffer = parent->buffer - - proc->alloc.user_buffer_offset; + binder_alloc_get_user_buffer_offset( + &proc->alloc); fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { @@ -1782,7 +1884,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, * Since the parent was already fixed up, convert it * back to the kernel address space to access it */ - parent_buffer = parent->buffer - target_proc->alloc.user_buffer_offset; + parent_buffer = parent->buffer - + binder_alloc_get_user_buffer_offset(&target_proc->alloc); fd_array = (u32 *)(parent_buffer + fda->parent_offset); if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", @@ -1850,7 +1953,8 @@ static int binder_fixup_parent(struct binder_transaction *t, return -EINVAL; } parent_buffer = (u8 *)(parent->buffer - - target_proc->alloc.user_buffer_offset); + binder_alloc_get_user_buffer_offset( + &target_proc->alloc)); *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; return 0; @@ -2036,7 +2140,7 @@ static void binder_transaction(struct binder_proc *proc, trace_binder_transaction(reply, t, target_node); - t->buffer = binder_alloc_buf(target_proc, tr->data_size, + t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, !reply && (t->flags & TF_ONE_WAY)); if (t->buffer == NULL) { @@ -2190,7 +2294,8 @@ static void binder_transaction(struct binder_proc *proc, } /* Fixup buffer pointer to target proc address space */ bp->buffer = (uintptr_t)sg_bufp + - target_proc->alloc.user_buffer_offset; + binder_alloc_get_user_buffer_offset( + &target_proc->alloc); sg_bufp += ALIGN(bp->length, sizeof(u64)); ret = binder_fixup_parent(t, thread, bp, off_start, @@ -2248,7 +2353,7 @@ err_copy_data_failed: trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, t->buffer, offp); t->buffer->transaction = NULL; - binder_free_buf(target_proc, t->buffer); + binder_alloc_free_buf(&target_proc->alloc, t->buffer); err_binder_alloc_buf_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); @@ -2428,7 +2533,8 @@ static int binder_thread_write(struct binder_proc *proc, return -EFAULT; ptr += sizeof(binder_uintptr_t); - buffer = binder_buffer_lookup(proc, data_ptr); + buffer = binder_alloc_buffer_lookup(&proc->alloc, + data_ptr); if (buffer == NULL) { binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", proc->pid, thread->pid, (u64)data_ptr); @@ -2458,7 +2564,7 @@ static int binder_thread_write(struct binder_proc *proc, } trace_binder_transaction_buffer_release(buffer); binder_transaction_buffer_release(proc, buffer, NULL); - binder_free_buf(proc, buffer); + binder_alloc_free_buf(&proc->alloc, buffer); break; } @@ -2950,9 +3056,9 @@ retry: tr.data_size = t->buffer->data_size; tr.offsets_size = t->buffer->offsets_size; - tr.data.ptr.buffer = (binder_uintptr_t)( - (uintptr_t)t->buffer->data + - proc->alloc.user_buffer_offset); + tr.data.ptr.buffer = (binder_uintptr_t) + ((uintptr_t)t->buffer->data + + binder_alloc_get_user_buffer_offset(&proc->alloc)); tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); @@ -3361,6 +3467,12 @@ static void binder_vma_open(struct vm_area_struct *vma) (unsigned long)pgprot_val(vma->vm_page_prot)); } +void binder_alloc_vma_close(struct binder_alloc *alloc) +{ + WRITE_ONCE(alloc->vma, NULL); + WRITE_ONCE(alloc->vma_vm_mm, NULL); +} + static void binder_vma_close(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; @@ -3370,8 +3482,7 @@ static void binder_vma_close(struct vm_area_struct *vma) proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); - proc->alloc.vma = NULL; - proc->alloc.vma_vm_mm = NULL; + binder_alloc_vma_close(&proc->alloc); binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } @@ -3386,35 +3497,16 @@ static const struct vm_operations_struct binder_vm_ops = { .fault = binder_vm_fault, }; -static int binder_mmap(struct file *filp, struct vm_area_struct *vma) +int binder_alloc_mmap_handler(struct binder_alloc *alloc, + struct vm_area_struct *vma) { int ret; struct vm_struct *area; - struct binder_proc *proc = filp->private_data; const char *failure_string; struct binder_buffer *buffer; - if (proc->tsk != current) - return -EINVAL; - - if ((vma->vm_end - vma->vm_start) > SZ_4M) - vma->vm_end = vma->vm_start + SZ_4M; - - binder_debug(BINDER_DEBUG_OPEN_CLOSE, - "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", - proc->pid, vma->vm_start, vma->vm_end, - (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, - (unsigned long)pgprot_val(vma->vm_page_prot)); - - if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { - ret = -EPERM; - failure_string = "bad vm_flags"; - goto err_bad_arg; - } - vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; - - mutex_lock(&binder_mmap_lock); - if (proc->alloc.buffer) { + mutex_lock(&binder_alloc_mmap_lock); + if (alloc->buffer) { ret = -EBUSY; failure_string = "already mapped"; goto err_already_mapped; @@ -3426,75 +3518,111 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) failure_string = "get_vm_area"; goto err_get_vm_area_failed; } - proc->alloc.buffer = area->addr; - proc->alloc.user_buffer_offset = - vma->vm_start - (uintptr_t)proc->alloc.buffer; - mutex_unlock(&binder_mmap_lock); + alloc->buffer = area->addr; + alloc->user_buffer_offset = + vma->vm_start - (uintptr_t)alloc->buffer; + mutex_unlock(&binder_alloc_mmap_lock); #ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { while (CACHE_COLOUR( - (vma->vm_start ^ (uint32_t)proc->alloc.buffer))) { pr_info("%s: %d %lx-%lx maps %pK bad alignment\n", __func__, - proc->pid, vma->vm_start, - vma->vm_end, proc->alloc.buffer); + alloc->pid, vma->vm_start, vma->vm_end, + alloc->buffer); vma->vm_start += PAGE_SIZE; } } #endif - proc->alloc.pages = - kzalloc(sizeof(proc->alloc.pages[0]) * - ((vma->vm_end - vma->vm_start) / PAGE_SIZE), - GFP_KERNEL); - if (proc->alloc.pages == NULL) { + alloc->pages = kzalloc(sizeof(alloc->pages[0]) * + ((vma->vm_end - vma->vm_start) / PAGE_SIZE), + GFP_KERNEL); + if (alloc->pages == NULL) { ret = -ENOMEM; failure_string = "alloc page array"; goto err_alloc_pages_failed; } - proc->alloc.buffer_size = vma->vm_end - vma->vm_start; + alloc->buffer_size = vma->vm_end - vma->vm_start; - vma->vm_ops = &binder_vm_ops; - vma->vm_private_data = proc; - - if (binder_update_page_range(proc, 1, proc->alloc.buffer, - proc->alloc.buffer + PAGE_SIZE, vma)) { + if (binder_update_page_range(alloc, 1, alloc->buffer, + alloc->buffer + PAGE_SIZE, vma)) { ret = -ENOMEM; failure_string = "alloc small buf"; goto err_alloc_small_buf_failed; } - buffer = proc->alloc.buffer; - INIT_LIST_HEAD(&proc->alloc.buffers); - list_add(&buffer->entry, &proc->alloc.buffers); + buffer = alloc->buffer; + INIT_LIST_HEAD(&alloc->buffers); + list_add(&buffer->entry, &alloc->buffers); buffer->free = 1; - binder_insert_free_buffer(proc, buffer); - proc->alloc.free_async_space = proc->alloc.buffer_size / 2; + binder_insert_free_buffer(alloc, buffer); + alloc->free_async_space = alloc->buffer_size / 2; barrier(); - proc->files = get_files_struct(current); - proc->alloc.vma = vma; - proc->alloc.vma_vm_mm = vma->vm_mm; + alloc->vma = vma; + alloc->vma_vm_mm = vma->vm_mm; - /*pr_info("binder_mmap: %d %lx-%lx maps %pK\n", - * proc->pid, vma->vm_start, vma->vm_end, proc->alloc.buffer); - */ return 0; err_alloc_small_buf_failed: - kfree(proc->alloc.pages); - proc->alloc.pages = NULL; + kfree(alloc->pages); + alloc->pages = NULL; err_alloc_pages_failed: - mutex_lock(&binder_mmap_lock); - vfree(proc->alloc.buffer); - proc->alloc.buffer = NULL; + mutex_lock(&binder_alloc_mmap_lock); + vfree(alloc->buffer); + alloc->buffer = NULL; err_get_vm_area_failed: err_already_mapped: - mutex_unlock(&binder_mmap_lock); + mutex_unlock(&binder_alloc_mmap_lock); + pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, + alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); + return ret; +} + +static int binder_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret; + struct binder_proc *proc = filp->private_data; + const char *failure_string; + + if (proc->tsk != current->group_leader) + return -EINVAL; + + if ((vma->vm_end - vma->vm_start) > SZ_4M) + vma->vm_end = vma->vm_start + SZ_4M; + + binder_debug(BINDER_DEBUG_OPEN_CLOSE, + "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", + __func__, proc->pid, vma->vm_start, vma->vm_end, + (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, + (unsigned long)pgprot_val(vma->vm_page_prot)); + + if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { + ret = -EPERM; + failure_string = "bad vm_flags"; + goto err_bad_arg; + } + vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; + vma->vm_ops = &binder_vm_ops; + vma->vm_private_data = proc; + + ret = binder_alloc_mmap_handler(&proc->alloc, vma); + if (ret) + return ret; + proc->files = get_files_struct(current); + return 0; + err_bad_arg: pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } +void binder_alloc_init(struct binder_alloc *alloc) +{ + alloc->tsk = current->group_leader; + alloc->pid = current->group_leader->pid; + mutex_init(&alloc->mutex); +} + static int binder_open(struct inode *nodp, struct file *filp) { struct binder_proc *proc; @@ -3514,6 +3642,7 @@ static int binder_open(struct inode *nodp, struct file *filp) binder_dev = container_of(filp->private_data, struct binder_device, miscdev); proc->context = &binder_dev->context; + binder_alloc_init(&proc->alloc); binder_lock(__func__); @@ -3629,14 +3758,61 @@ static int binder_node_release(struct binder_node *node, int refs) return refs; } +void binder_alloc_deferred_release(struct binder_alloc *alloc) +{ + struct rb_node *n; + int buffers, page_count; + + BUG_ON(alloc->vma); + + buffers = 0; + mutex_lock(&alloc->mutex); + while ((n = rb_first(&alloc->allocated_buffers))) { + struct binder_buffer *buffer; + + buffer = rb_entry(n, struct binder_buffer, rb_node); + + /* Transaction should already have been freed */ + BUG_ON(buffer->transaction); + + binder_free_buf_locked(alloc, buffer); + buffers++; + } + + page_count = 0; + if (alloc->pages) { + int i; + + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + void *page_addr; + + if (!alloc->pages[i]) + continue; + + page_addr = alloc->buffer + i * PAGE_SIZE; + binder_alloc_debug(BINDER_ALLOC_DEBUG_BUFFER_ALLOC, + "%s: %d: page %d at %pK not freed\n", + __func__, alloc->pid, i, page_addr); + unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); + __free_page(alloc->pages[i]); + page_count++; + } + kfree(alloc->pages); + vfree(alloc->buffer); + } + mutex_unlock(&alloc->mutex); + + binder_alloc_debug(BINDER_ALLOC_DEBUG_OPEN_CLOSE, + "%s: %d buffers %d, pages %d\n", + __func__, alloc->pid, buffers, page_count); +} + static void binder_deferred_release(struct binder_proc *proc) { struct binder_context *context = proc->context; struct rb_node *n; - int threads, nodes, incoming_refs, outgoing_refs, buffers, - active_transactions, page_count; + int threads, nodes, incoming_refs, outgoing_refs, active_transactions; - BUG_ON(proc->alloc.vma); BUG_ON(proc->files); hlist_del(&proc->proc_node); @@ -3682,49 +3858,15 @@ static void binder_deferred_release(struct binder_proc *proc) binder_release_work(&proc->todo); binder_release_work(&proc->delivered_death); - buffers = 0; - while ((n = rb_first(&proc->alloc.allocated_buffers))) { - struct binder_buffer *buffer; - - buffer = rb_entry(n, struct binder_buffer, rb_node); - - /* Transaction should already have been freed */ - BUG_ON(buffer->transaction); - - binder_free_buf(proc, buffer); - buffers++; - } - + binder_alloc_deferred_release(&proc->alloc); binder_stats_deleted(BINDER_STAT_PROC); - page_count = 0; - if (proc->alloc.pages) { - int i; - - for (i = 0; i < proc->alloc.buffer_size / PAGE_SIZE; i++) { - void *page_addr; - - if (!proc->alloc.pages[i]) - continue; - - page_addr = proc->alloc.buffer + i * PAGE_SIZE; - binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%s: %d: page %d at %p not freed\n", - __func__, proc->pid, i, page_addr); - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); - __free_page(proc->alloc.pages[i]); - page_count++; - } - kfree(proc->alloc.pages); - vfree(proc->alloc.buffer); - } - put_task_struct(proc->tsk); binder_debug(BINDER_DEBUG_OPEN_CLOSE, - "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", + "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", __func__, proc->pid, threads, nodes, incoming_refs, - outgoing_refs, active_transactions, buffers, page_count); + outgoing_refs, active_transactions); kfree(proc); } @@ -3807,15 +3949,6 @@ static void print_binder_transaction(struct seq_file *m, const char *prefix, t->buffer->data); } -static void print_binder_buffer(struct seq_file *m, const char *prefix, - struct binder_buffer *buffer) -{ - seq_printf(m, "%s %d: %p size %zd:%zd %s\n", - prefix, buffer->debug_id, buffer->data, - buffer->data_size, buffer->offsets_size, - buffer->transaction ? "active" : "delivered"); -} - static void print_binder_work(struct seq_file *m, const char *prefix, const char *transaction_prefix, struct binder_work *w) @@ -3918,6 +4051,27 @@ static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) ref->node->debug_id, ref->strong, ref->weak, ref->death); } +static void print_binder_buffer(struct seq_file *m, const char *prefix, + struct binder_buffer *buffer) +{ + seq_printf(m, "%s %d: %pK size %zd:%zd %s\n", + prefix, buffer->debug_id, buffer->data, + buffer->data_size, buffer->offsets_size, + buffer->transaction ? "active" : "delivered"); +} + +void binder_alloc_print_allocated(struct seq_file *m, + struct binder_alloc *alloc) +{ + struct rb_node *n; + + mutex_lock(&alloc->mutex); + for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) + print_binder_buffer(m, " buffer", + rb_entry(n, struct binder_buffer, rb_node)); + mutex_unlock(&alloc->mutex); +} + static void print_binder_proc(struct seq_file *m, struct binder_proc *proc, int print_all) { @@ -3946,10 +4100,7 @@ static void print_binder_proc(struct seq_file *m, print_binder_ref(m, rb_entry(n, struct binder_ref, rb_node_desc)); } - for (n = rb_first(&proc->alloc.allocated_buffers); - n != NULL; n = rb_next(n)) - print_binder_buffer(m, " buffer", - rb_entry(n, struct binder_buffer, rb_node)); + binder_alloc_print_allocated(m, &proc->alloc); list_for_each_entry(w, &proc->todo, entry) print_binder_work(m, " ", " pending transaction", w); list_for_each_entry(w, &proc->delivered_death, entry) { @@ -4047,6 +4198,18 @@ static void print_binder_stats(struct seq_file *m, const char *prefix, } } +int binder_alloc_get_allocated_count(struct binder_alloc *alloc) +{ + struct rb_node *n; + int count = 0; + + mutex_lock(&alloc->mutex); + for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) + count++; + mutex_unlock(&alloc->mutex); + return count; +} + static void print_binder_proc_stats(struct seq_file *m, struct binder_proc *proc) { @@ -4064,7 +4227,8 @@ static void print_binder_proc_stats(struct seq_file *m, " ready threads %d\n" " free async space %zd\n", proc->requested_threads, proc->requested_threads_started, proc->max_threads, - proc->ready_threads, proc->alloc.free_async_space); + proc->ready_threads, + binder_alloc_get_free_async_space(&proc->alloc)); count = 0; for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) count++; @@ -4081,10 +4245,7 @@ static void print_binder_proc_stats(struct seq_file *m, } seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); - count = 0; - for (n = rb_first(&proc->alloc.allocated_buffers); - n != NULL; n = rb_next(n)) - count++; + count = binder_alloc_get_allocated_count(&proc->alloc); seq_printf(m, " buffers: %d\n", count); count = 0; diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index c835f09656c1..50b0d21f42cf 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -23,6 +23,7 @@ struct binder_buffer; struct binder_node; struct binder_proc; +struct binder_alloc; struct binder_ref; struct binder_thread; struct binder_transaction; @@ -268,9 +269,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, TP_ARGS(buffer)); TRACE_EVENT(binder_update_page_range, - TP_PROTO(struct binder_proc *proc, bool allocate, + TP_PROTO(struct binder_alloc *alloc, bool allocate, void *start, void *end), - TP_ARGS(proc, allocate, start, end), + TP_ARGS(alloc, allocate, start, end), TP_STRUCT__entry( __field(int, proc) __field(bool, allocate) @@ -278,9 +279,9 @@ TRACE_EVENT(binder_update_page_range, __field(size_t, size) ), TP_fast_assign( - __entry->proc = proc->pid; + __entry->proc = alloc->pid; __entry->allocate = allocate; - __entry->offset = start - proc->alloc.buffer; + __entry->offset = start - alloc->buffer; __entry->size = end - start; ), TP_printk("proc=%d allocate=%d offset=%zu size=%zu", |