3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/pid_namespace.h>
41 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
42 #define BINDER_IPC_32BIT 1
45 #include <uapi/linux/android/binder.h>
46 #include "binder_trace.h"
48 static DEFINE_MUTEX(binder_main_lock);
49 static DEFINE_MUTEX(binder_deferred_lock);
50 static DEFINE_MUTEX(binder_mmap_lock);
52 static HLIST_HEAD(binder_procs);
53 static HLIST_HEAD(binder_deferred_list);
54 static HLIST_HEAD(binder_dead_nodes);
56 static struct dentry *binder_debugfs_dir_entry_root;
57 static struct dentry *binder_debugfs_dir_entry_proc;
58 static struct binder_node *binder_context_mgr_node;
59 static kuid_t binder_context_mgr_uid = INVALID_UID;
60 static int binder_last_id;
61 static struct workqueue_struct *binder_deferred_workqueue;
63 #define BINDER_DEBUG_ENTRY(name) \
64 static int binder_##name##_open(struct inode *inode, struct file *file) \
66 return single_open(file, binder_##name##_show, inode->i_private); \
69 static const struct file_operations binder_##name##_fops = { \
70 .owner = THIS_MODULE, \
71 .open = binder_##name##_open, \
73 .llseek = seq_lseek, \
74 .release = single_release, \
77 static int binder_proc_show(struct seq_file *m, void *unused);
78 BINDER_DEBUG_ENTRY(proc);
80 /* This is only defined in include/asm-arm/sizes.h */
86 #define SZ_4M 0x400000
89 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
91 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
94 BINDER_DEBUG_USER_ERROR = 1U << 0,
95 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
96 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
97 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
98 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
99 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
100 BINDER_DEBUG_READ_WRITE = 1U << 6,
101 BINDER_DEBUG_USER_REFS = 1U << 7,
102 BINDER_DEBUG_THREADS = 1U << 8,
103 BINDER_DEBUG_TRANSACTION = 1U << 9,
104 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
105 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
106 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
107 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
108 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
109 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
111 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
112 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
113 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
115 static bool binder_debug_no_lock;
116 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
118 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
119 static int binder_stop_on_user_error;
121 static int binder_set_stop_on_user_error(const char *val,
122 struct kernel_param *kp)
126 ret = param_set_int(val, kp);
127 if (binder_stop_on_user_error < 2)
128 wake_up(&binder_user_error_wait);
131 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
132 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
134 #define binder_debug(mask, x...) \
136 if (binder_debug_mask & mask) \
140 #define binder_user_error(x...) \
142 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
144 if (binder_stop_on_user_error) \
145 binder_stop_on_user_error = 2; \
148 enum binder_stat_types {
154 BINDER_STAT_TRANSACTION,
155 BINDER_STAT_TRANSACTION_COMPLETE,
159 struct binder_stats {
160 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
161 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
162 int obj_created[BINDER_STAT_COUNT];
163 int obj_deleted[BINDER_STAT_COUNT];
166 static struct binder_stats binder_stats;
168 static inline void binder_stats_deleted(enum binder_stat_types type)
170 binder_stats.obj_deleted[type]++;
173 static inline void binder_stats_created(enum binder_stat_types type)
175 binder_stats.obj_created[type]++;
178 struct binder_transaction_log_entry {
190 struct binder_transaction_log {
193 struct binder_transaction_log_entry entry[32];
195 static struct binder_transaction_log binder_transaction_log;
196 static struct binder_transaction_log binder_transaction_log_failed;
198 static struct binder_transaction_log_entry *binder_transaction_log_add(
199 struct binder_transaction_log *log)
201 struct binder_transaction_log_entry *e;
203 e = &log->entry[log->next];
204 memset(e, 0, sizeof(*e));
206 if (log->next == ARRAY_SIZE(log->entry)) {
214 struct list_head entry;
216 BINDER_WORK_TRANSACTION = 1,
217 BINDER_WORK_TRANSACTION_COMPLETE,
219 BINDER_WORK_DEAD_BINDER,
220 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
221 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
227 struct binder_work work;
229 struct rb_node rb_node;
230 struct hlist_node dead_node;
232 struct binder_proc *proc;
233 struct hlist_head refs;
234 int internal_strong_refs;
236 int local_strong_refs;
237 binder_uintptr_t ptr;
238 binder_uintptr_t cookie;
239 unsigned has_strong_ref:1;
240 unsigned pending_strong_ref:1;
241 unsigned has_weak_ref:1;
242 unsigned pending_weak_ref:1;
243 unsigned has_async_transaction:1;
244 unsigned accept_fds:1;
245 unsigned min_priority:8;
246 struct list_head async_todo;
249 struct binder_ref_death {
250 struct binder_work work;
251 binder_uintptr_t cookie;
255 /* Lookups needed: */
256 /* node + proc => ref (transaction) */
257 /* desc + proc => ref (transaction, inc/dec ref) */
258 /* node => refs + procs (proc exit) */
260 struct rb_node rb_node_desc;
261 struct rb_node rb_node_node;
262 struct hlist_node node_entry;
263 struct binder_proc *proc;
264 struct binder_node *node;
268 struct binder_ref_death *death;
271 struct binder_buffer {
272 struct list_head entry; /* free and allocated entries by address */
273 struct rb_node rb_node; /* free entry by size or allocated entry */
276 unsigned allow_user_free:1;
277 unsigned async_transaction:1;
278 unsigned debug_id:29;
280 struct binder_transaction *transaction;
282 struct binder_node *target_node;
288 enum binder_deferred_state {
289 BINDER_DEFERRED_PUT_FILES = 0x01,
290 BINDER_DEFERRED_FLUSH = 0x02,
291 BINDER_DEFERRED_RELEASE = 0x04,
295 struct hlist_node proc_node;
296 struct rb_root threads;
297 struct rb_root nodes;
298 struct rb_root refs_by_desc;
299 struct rb_root refs_by_node;
301 struct vm_area_struct *vma;
302 struct mm_struct *vma_vm_mm;
303 struct task_struct *tsk;
304 struct files_struct *files;
305 struct hlist_node deferred_work_node;
308 ptrdiff_t user_buffer_offset;
310 struct list_head buffers;
311 struct rb_root free_buffers;
312 struct rb_root allocated_buffers;
313 size_t free_async_space;
317 uint32_t buffer_free;
318 struct list_head todo;
319 wait_queue_head_t wait;
320 struct binder_stats stats;
321 struct list_head delivered_death;
323 int requested_threads;
324 int requested_threads_started;
326 long default_priority;
327 struct dentry *debugfs_entry;
331 BINDER_LOOPER_STATE_REGISTERED = 0x01,
332 BINDER_LOOPER_STATE_ENTERED = 0x02,
333 BINDER_LOOPER_STATE_EXITED = 0x04,
334 BINDER_LOOPER_STATE_INVALID = 0x08,
335 BINDER_LOOPER_STATE_WAITING = 0x10,
336 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
339 struct binder_thread {
340 struct binder_proc *proc;
341 struct rb_node rb_node;
344 struct binder_transaction *transaction_stack;
345 struct list_head todo;
346 uint32_t return_error; /* Write failed, return error code in read buf */
347 uint32_t return_error2; /* Write failed, return error code in read */
348 /* buffer. Used when sending a reply to a dead process that */
349 /* we are also waiting on */
350 wait_queue_head_t wait;
351 struct binder_stats stats;
354 struct binder_transaction {
356 struct binder_work work;
357 struct binder_thread *from;
358 struct binder_transaction *from_parent;
359 struct binder_proc *to_proc;
360 struct binder_thread *to_thread;
361 struct binder_transaction *to_parent;
362 unsigned need_reply:1;
363 /* unsigned is_dead:1; */ /* not used at the moment */
365 struct binder_buffer *buffer;
374 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
376 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
378 struct files_struct *files = proc->files;
379 unsigned long rlim_cur;
385 if (!lock_task_sighand(proc->tsk, &irqs))
388 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
389 unlock_task_sighand(proc->tsk, &irqs);
391 return __alloc_fd(files, 0, rlim_cur, flags);
395 * copied from fd_install
397 static void task_fd_install(
398 struct binder_proc *proc, unsigned int fd, struct file *file)
401 __fd_install(proc->files, fd, file);
405 * copied from sys_close
407 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
411 if (proc->files == NULL)
414 retval = __close_fd(proc->files, fd);
415 /* can't restart close syscall because file table entry was cleared */
416 if (unlikely(retval == -ERESTARTSYS ||
417 retval == -ERESTARTNOINTR ||
418 retval == -ERESTARTNOHAND ||
419 retval == -ERESTART_RESTARTBLOCK))
425 static inline void binder_lock(const char *tag)
427 trace_binder_lock(tag);
428 mutex_lock(&binder_main_lock);
429 trace_binder_locked(tag);
432 static inline void binder_unlock(const char *tag)
434 trace_binder_unlock(tag);
435 mutex_unlock(&binder_main_lock);
438 static void binder_set_nice(long nice)
442 if (can_nice(current, nice)) {
443 set_user_nice(current, nice);
446 min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
447 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
448 "%d: nice value %ld not allowed use %ld instead\n",
449 current->pid, nice, min_nice);
450 set_user_nice(current, min_nice);
453 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
456 static size_t binder_buffer_size(struct binder_proc *proc,
457 struct binder_buffer *buffer)
459 if (list_is_last(&buffer->entry, &proc->buffers))
460 return proc->buffer + proc->buffer_size - (void *)buffer->data;
461 return (size_t)list_entry(buffer->entry.next,
462 struct binder_buffer, entry) - (size_t)buffer->data;
465 static void binder_insert_free_buffer(struct binder_proc *proc,
466 struct binder_buffer *new_buffer)
468 struct rb_node **p = &proc->free_buffers.rb_node;
469 struct rb_node *parent = NULL;
470 struct binder_buffer *buffer;
472 size_t new_buffer_size;
474 BUG_ON(!new_buffer->free);
476 new_buffer_size = binder_buffer_size(proc, new_buffer);
478 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
479 "%d: add free buffer, size %zd, at %p\n",
480 proc->pid, new_buffer_size, new_buffer);
484 buffer = rb_entry(parent, struct binder_buffer, rb_node);
485 BUG_ON(!buffer->free);
487 buffer_size = binder_buffer_size(proc, buffer);
489 if (new_buffer_size < buffer_size)
490 p = &parent->rb_left;
492 p = &parent->rb_right;
494 rb_link_node(&new_buffer->rb_node, parent, p);
495 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
498 static void binder_insert_allocated_buffer(struct binder_proc *proc,
499 struct binder_buffer *new_buffer)
501 struct rb_node **p = &proc->allocated_buffers.rb_node;
502 struct rb_node *parent = NULL;
503 struct binder_buffer *buffer;
505 BUG_ON(new_buffer->free);
509 buffer = rb_entry(parent, struct binder_buffer, rb_node);
510 BUG_ON(buffer->free);
512 if (new_buffer < buffer)
513 p = &parent->rb_left;
514 else if (new_buffer > buffer)
515 p = &parent->rb_right;
519 rb_link_node(&new_buffer->rb_node, parent, p);
520 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
523 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
526 struct rb_node *n = proc->allocated_buffers.rb_node;
527 struct binder_buffer *buffer;
528 struct binder_buffer *kern_ptr;
530 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
531 - offsetof(struct binder_buffer, data));
534 buffer = rb_entry(n, struct binder_buffer, rb_node);
535 BUG_ON(buffer->free);
537 if (kern_ptr < buffer)
539 else if (kern_ptr > buffer)
547 static int binder_update_page_range(struct binder_proc *proc, int allocate,
548 void *start, void *end,
549 struct vm_area_struct *vma)
552 unsigned long user_page_addr;
553 struct vm_struct tmp_area;
555 struct mm_struct *mm;
557 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
558 "%d: %s pages %p-%p\n", proc->pid,
559 allocate ? "allocate" : "free", start, end);
564 trace_binder_update_page_range(proc, allocate, start, end);
569 mm = get_task_mm(proc->tsk);
572 down_write(&mm->mmap_sem);
574 if (vma && mm != proc->vma_vm_mm) {
575 pr_err("%d: vma mm and task mm mismatch\n",
585 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
590 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
592 struct page **page_array_ptr;
594 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
597 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
599 pr_err("%d: binder_alloc_buf failed for page at %p\n",
600 proc->pid, page_addr);
601 goto err_alloc_page_failed;
603 tmp_area.addr = page_addr;
604 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
605 page_array_ptr = page;
606 ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
608 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
609 proc->pid, page_addr);
610 goto err_map_kernel_failed;
613 (uintptr_t)page_addr + proc->user_buffer_offset;
614 ret = vm_insert_page(vma, user_page_addr, page[0]);
616 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
617 proc->pid, user_page_addr);
618 goto err_vm_insert_page_failed;
620 /* vm_insert_page does not seem to increment the refcount */
623 up_write(&mm->mmap_sem);
629 for (page_addr = end - PAGE_SIZE; page_addr >= start;
630 page_addr -= PAGE_SIZE) {
631 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
633 zap_page_range(vma, (uintptr_t)page_addr +
634 proc->user_buffer_offset, PAGE_SIZE, NULL);
635 err_vm_insert_page_failed:
636 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
637 err_map_kernel_failed:
640 err_alloc_page_failed:
645 up_write(&mm->mmap_sem);
651 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
653 size_t offsets_size, int is_async)
655 struct rb_node *n = proc->free_buffers.rb_node;
656 struct binder_buffer *buffer;
658 struct rb_node *best_fit = NULL;
663 if (proc->vma == NULL) {
664 pr_err("%d: binder_alloc_buf, no vma\n",
669 size = ALIGN(data_size, sizeof(void *)) +
670 ALIGN(offsets_size, sizeof(void *));
672 if (size < data_size || size < offsets_size) {
673 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
674 proc->pid, data_size, offsets_size);
679 proc->free_async_space < size + sizeof(struct binder_buffer)) {
680 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
681 "%d: binder_alloc_buf size %zd failed, no async space left\n",
687 buffer = rb_entry(n, struct binder_buffer, rb_node);
688 BUG_ON(!buffer->free);
689 buffer_size = binder_buffer_size(proc, buffer);
691 if (size < buffer_size) {
694 } else if (size > buffer_size)
701 if (best_fit == NULL) {
702 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
707 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
708 buffer_size = binder_buffer_size(proc, buffer);
711 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
712 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
713 proc->pid, size, buffer, buffer_size);
716 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
718 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
719 buffer_size = size; /* no room for other buffers */
721 buffer_size = size + sizeof(struct binder_buffer);
724 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
725 if (end_page_addr > has_page_addr)
726 end_page_addr = has_page_addr;
727 if (binder_update_page_range(proc, 1,
728 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
731 rb_erase(best_fit, &proc->free_buffers);
733 binder_insert_allocated_buffer(proc, buffer);
734 if (buffer_size != size) {
735 struct binder_buffer *new_buffer = (void *)buffer->data + size;
737 list_add(&new_buffer->entry, &buffer->entry);
738 new_buffer->free = 1;
739 binder_insert_free_buffer(proc, new_buffer);
741 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
742 "%d: binder_alloc_buf size %zd got %p\n",
743 proc->pid, size, buffer);
744 buffer->data_size = data_size;
745 buffer->offsets_size = offsets_size;
746 buffer->async_transaction = is_async;
748 proc->free_async_space -= size + sizeof(struct binder_buffer);
749 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
750 "%d: binder_alloc_buf size %zd async free %zd\n",
751 proc->pid, size, proc->free_async_space);
757 static void *buffer_start_page(struct binder_buffer *buffer)
759 return (void *)((uintptr_t)buffer & PAGE_MASK);
762 static void *buffer_end_page(struct binder_buffer *buffer)
764 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
767 static void binder_delete_free_buffer(struct binder_proc *proc,
768 struct binder_buffer *buffer)
770 struct binder_buffer *prev, *next = NULL;
771 int free_page_end = 1;
772 int free_page_start = 1;
774 BUG_ON(proc->buffers.next == &buffer->entry);
775 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
777 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
779 if (buffer_end_page(prev) == buffer_end_page(buffer))
781 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
782 "%d: merge free, buffer %p share page with %p\n",
783 proc->pid, buffer, prev);
786 if (!list_is_last(&buffer->entry, &proc->buffers)) {
787 next = list_entry(buffer->entry.next,
788 struct binder_buffer, entry);
789 if (buffer_start_page(next) == buffer_end_page(buffer)) {
791 if (buffer_start_page(next) ==
792 buffer_start_page(buffer))
794 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
795 "%d: merge free, buffer %p share page with %p\n",
796 proc->pid, buffer, prev);
799 list_del(&buffer->entry);
800 if (free_page_start || free_page_end) {
801 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
802 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
803 proc->pid, buffer, free_page_start ? "" : " end",
804 free_page_end ? "" : " start", prev, next);
805 binder_update_page_range(proc, 0, free_page_start ?
806 buffer_start_page(buffer) : buffer_end_page(buffer),
807 (free_page_end ? buffer_end_page(buffer) :
808 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
812 static void binder_free_buf(struct binder_proc *proc,
813 struct binder_buffer *buffer)
815 size_t size, buffer_size;
817 buffer_size = binder_buffer_size(proc, buffer);
819 size = ALIGN(buffer->data_size, sizeof(void *)) +
820 ALIGN(buffer->offsets_size, sizeof(void *));
822 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
823 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
824 proc->pid, buffer, size, buffer_size);
826 BUG_ON(buffer->free);
827 BUG_ON(size > buffer_size);
828 BUG_ON(buffer->transaction != NULL);
829 BUG_ON((void *)buffer < proc->buffer);
830 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
832 if (buffer->async_transaction) {
833 proc->free_async_space += size + sizeof(struct binder_buffer);
835 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
836 "%d: binder_free_buf size %zd async free %zd\n",
837 proc->pid, size, proc->free_async_space);
840 binder_update_page_range(proc, 0,
841 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
842 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
844 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
846 if (!list_is_last(&buffer->entry, &proc->buffers)) {
847 struct binder_buffer *next = list_entry(buffer->entry.next,
848 struct binder_buffer, entry);
851 rb_erase(&next->rb_node, &proc->free_buffers);
852 binder_delete_free_buffer(proc, next);
855 if (proc->buffers.next != &buffer->entry) {
856 struct binder_buffer *prev = list_entry(buffer->entry.prev,
857 struct binder_buffer, entry);
860 binder_delete_free_buffer(proc, buffer);
861 rb_erase(&prev->rb_node, &proc->free_buffers);
865 binder_insert_free_buffer(proc, buffer);
868 static struct binder_node *binder_get_node(struct binder_proc *proc,
869 binder_uintptr_t ptr)
871 struct rb_node *n = proc->nodes.rb_node;
872 struct binder_node *node;
875 node = rb_entry(n, struct binder_node, rb_node);
879 else if (ptr > node->ptr)
887 static struct binder_node *binder_new_node(struct binder_proc *proc,
888 binder_uintptr_t ptr,
889 binder_uintptr_t cookie)
891 struct rb_node **p = &proc->nodes.rb_node;
892 struct rb_node *parent = NULL;
893 struct binder_node *node;
897 node = rb_entry(parent, struct binder_node, rb_node);
901 else if (ptr > node->ptr)
907 node = kzalloc(sizeof(*node), GFP_KERNEL);
910 binder_stats_created(BINDER_STAT_NODE);
911 rb_link_node(&node->rb_node, parent, p);
912 rb_insert_color(&node->rb_node, &proc->nodes);
913 node->debug_id = ++binder_last_id;
916 node->cookie = cookie;
917 node->work.type = BINDER_WORK_NODE;
918 INIT_LIST_HEAD(&node->work.entry);
919 INIT_LIST_HEAD(&node->async_todo);
920 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
921 "%d:%d node %d u%016llx c%016llx created\n",
922 proc->pid, current->pid, node->debug_id,
923 (u64)node->ptr, (u64)node->cookie);
927 static int binder_inc_node(struct binder_node *node, int strong, int internal,
928 struct list_head *target_list)
932 if (target_list == NULL &&
933 node->internal_strong_refs == 0 &&
934 !(node == binder_context_mgr_node &&
935 node->has_strong_ref)) {
936 pr_err("invalid inc strong node for %d\n",
940 node->internal_strong_refs++;
942 node->local_strong_refs++;
943 if (!node->has_strong_ref && target_list) {
944 list_del_init(&node->work.entry);
945 list_add_tail(&node->work.entry, target_list);
949 node->local_weak_refs++;
950 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
951 if (target_list == NULL) {
952 pr_err("invalid inc weak node for %d\n",
956 list_add_tail(&node->work.entry, target_list);
962 static int binder_dec_node(struct binder_node *node, int strong, int internal)
966 node->internal_strong_refs--;
968 node->local_strong_refs--;
969 if (node->local_strong_refs || node->internal_strong_refs)
973 node->local_weak_refs--;
974 if (node->local_weak_refs || !hlist_empty(&node->refs))
977 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
978 if (list_empty(&node->work.entry)) {
979 list_add_tail(&node->work.entry, &node->proc->todo);
980 wake_up_interruptible(&node->proc->wait);
983 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
984 !node->local_weak_refs) {
985 list_del_init(&node->work.entry);
987 rb_erase(&node->rb_node, &node->proc->nodes);
988 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
989 "refless node %d deleted\n",
992 hlist_del(&node->dead_node);
993 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
994 "dead node %d deleted\n",
998 binder_stats_deleted(BINDER_STAT_NODE);
1006 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1009 struct rb_node *n = proc->refs_by_desc.rb_node;
1010 struct binder_ref *ref;
1013 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1015 if (desc < ref->desc)
1017 else if (desc > ref->desc)
1025 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1026 struct binder_node *node)
1029 struct rb_node **p = &proc->refs_by_node.rb_node;
1030 struct rb_node *parent = NULL;
1031 struct binder_ref *ref, *new_ref;
1035 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1037 if (node < ref->node)
1039 else if (node > ref->node)
1040 p = &(*p)->rb_right;
1044 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1045 if (new_ref == NULL)
1047 binder_stats_created(BINDER_STAT_REF);
1048 new_ref->debug_id = ++binder_last_id;
1049 new_ref->proc = proc;
1050 new_ref->node = node;
1051 rb_link_node(&new_ref->rb_node_node, parent, p);
1052 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1054 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1055 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1056 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1057 if (ref->desc > new_ref->desc)
1059 new_ref->desc = ref->desc + 1;
1062 p = &proc->refs_by_desc.rb_node;
1065 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1067 if (new_ref->desc < ref->desc)
1069 else if (new_ref->desc > ref->desc)
1070 p = &(*p)->rb_right;
1074 rb_link_node(&new_ref->rb_node_desc, parent, p);
1075 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1077 hlist_add_head(&new_ref->node_entry, &node->refs);
1079 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1080 "%d new ref %d desc %d for node %d\n",
1081 proc->pid, new_ref->debug_id, new_ref->desc,
1084 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1085 "%d new ref %d desc %d for dead node\n",
1086 proc->pid, new_ref->debug_id, new_ref->desc);
1091 static void binder_delete_ref(struct binder_ref *ref)
1093 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1094 "%d delete ref %d desc %d for node %d\n",
1095 ref->proc->pid, ref->debug_id, ref->desc,
1096 ref->node->debug_id);
1098 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1099 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1101 binder_dec_node(ref->node, 1, 1);
1102 hlist_del(&ref->node_entry);
1103 binder_dec_node(ref->node, 0, 1);
1105 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1106 "%d delete ref %d desc %d has death notification\n",
1107 ref->proc->pid, ref->debug_id, ref->desc);
1108 list_del(&ref->death->work.entry);
1110 binder_stats_deleted(BINDER_STAT_DEATH);
1113 binder_stats_deleted(BINDER_STAT_REF);
1116 static int binder_inc_ref(struct binder_ref *ref, int strong,
1117 struct list_head *target_list)
1122 if (ref->strong == 0) {
1123 ret = binder_inc_node(ref->node, 1, 1, target_list);
1129 if (ref->weak == 0) {
1130 ret = binder_inc_node(ref->node, 0, 1, target_list);
1140 static int binder_dec_ref(struct binder_ref *ref, int strong)
1143 if (ref->strong == 0) {
1144 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1145 ref->proc->pid, ref->debug_id,
1146 ref->desc, ref->strong, ref->weak);
1150 if (ref->strong == 0) {
1153 ret = binder_dec_node(ref->node, strong, 1);
1158 if (ref->weak == 0) {
1159 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1160 ref->proc->pid, ref->debug_id,
1161 ref->desc, ref->strong, ref->weak);
1166 if (ref->strong == 0 && ref->weak == 0)
1167 binder_delete_ref(ref);
1171 static void binder_pop_transaction(struct binder_thread *target_thread,
1172 struct binder_transaction *t)
1174 if (target_thread) {
1175 BUG_ON(target_thread->transaction_stack != t);
1176 BUG_ON(target_thread->transaction_stack->from != target_thread);
1177 target_thread->transaction_stack =
1178 target_thread->transaction_stack->from_parent;
1183 t->buffer->transaction = NULL;
1185 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1188 static void binder_send_failed_reply(struct binder_transaction *t,
1189 uint32_t error_code)
1191 struct binder_thread *target_thread;
1192 struct binder_transaction *next;
1194 BUG_ON(t->flags & TF_ONE_WAY);
1196 target_thread = t->from;
1197 if (target_thread) {
1198 if (target_thread->return_error != BR_OK &&
1199 target_thread->return_error2 == BR_OK) {
1200 target_thread->return_error2 =
1201 target_thread->return_error;
1202 target_thread->return_error = BR_OK;
1204 if (target_thread->return_error == BR_OK) {
1205 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1206 "send failed reply for transaction %d to %d:%d\n",
1208 target_thread->proc->pid,
1209 target_thread->pid);
1211 binder_pop_transaction(target_thread, t);
1212 target_thread->return_error = error_code;
1213 wake_up_interruptible(&target_thread->wait);
1215 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1216 target_thread->proc->pid,
1218 target_thread->return_error);
1222 next = t->from_parent;
1224 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1225 "send failed reply for transaction %d, target dead\n",
1228 binder_pop_transaction(target_thread, t);
1230 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1231 "reply failed, no target thread at root\n");
1235 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1236 "reply failed, no target thread -- retry %d\n",
1241 static void binder_transaction_buffer_release(struct binder_proc *proc,
1242 struct binder_buffer *buffer,
1243 binder_size_t *failed_at)
1245 binder_size_t *offp, *off_end;
1246 int debug_id = buffer->debug_id;
1248 binder_debug(BINDER_DEBUG_TRANSACTION,
1249 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1250 proc->pid, buffer->debug_id,
1251 buffer->data_size, buffer->offsets_size, failed_at);
1253 if (buffer->target_node)
1254 binder_dec_node(buffer->target_node, 1, 0);
1256 offp = (binder_size_t *)(buffer->data +
1257 ALIGN(buffer->data_size, sizeof(void *)));
1259 off_end = failed_at;
1261 off_end = (void *)offp + buffer->offsets_size;
1262 for (; offp < off_end; offp++) {
1263 struct flat_binder_object *fp;
1265 if (*offp > buffer->data_size - sizeof(*fp) ||
1266 buffer->data_size < sizeof(*fp) ||
1267 !IS_ALIGNED(*offp, sizeof(u32))) {
1268 pr_err("transaction release %d bad offset %lld, size %zd\n",
1269 debug_id, (u64)*offp, buffer->data_size);
1272 fp = (struct flat_binder_object *)(buffer->data + *offp);
1274 case BINDER_TYPE_BINDER:
1275 case BINDER_TYPE_WEAK_BINDER: {
1276 struct binder_node *node = binder_get_node(proc, fp->binder);
1279 pr_err("transaction release %d bad node %016llx\n",
1280 debug_id, (u64)fp->binder);
1283 binder_debug(BINDER_DEBUG_TRANSACTION,
1284 " node %d u%016llx\n",
1285 node->debug_id, (u64)node->ptr);
1286 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1288 case BINDER_TYPE_HANDLE:
1289 case BINDER_TYPE_WEAK_HANDLE: {
1290 struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1293 pr_err("transaction release %d bad handle %d\n",
1294 debug_id, fp->handle);
1297 binder_debug(BINDER_DEBUG_TRANSACTION,
1298 " ref %d desc %d (node %d)\n",
1299 ref->debug_id, ref->desc, ref->node->debug_id);
1300 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1303 case BINDER_TYPE_FD:
1304 binder_debug(BINDER_DEBUG_TRANSACTION,
1305 " fd %d\n", fp->handle);
1307 task_close_fd(proc, fp->handle);
1311 pr_err("transaction release %d bad object type %x\n",
1312 debug_id, fp->type);
1318 static void binder_transaction(struct binder_proc *proc,
1319 struct binder_thread *thread,
1320 struct binder_transaction_data *tr, int reply)
1322 struct binder_transaction *t;
1323 struct binder_work *tcomplete;
1324 binder_size_t *offp, *off_end;
1325 binder_size_t off_min;
1326 struct binder_proc *target_proc;
1327 struct binder_thread *target_thread = NULL;
1328 struct binder_node *target_node = NULL;
1329 struct list_head *target_list;
1330 wait_queue_head_t *target_wait;
1331 struct binder_transaction *in_reply_to = NULL;
1332 struct binder_transaction_log_entry *e;
1333 uint32_t return_error;
1335 e = binder_transaction_log_add(&binder_transaction_log);
1336 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1337 e->from_proc = proc->pid;
1338 e->from_thread = thread->pid;
1339 e->target_handle = tr->target.handle;
1340 e->data_size = tr->data_size;
1341 e->offsets_size = tr->offsets_size;
1344 in_reply_to = thread->transaction_stack;
1345 if (in_reply_to == NULL) {
1346 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1347 proc->pid, thread->pid);
1348 return_error = BR_FAILED_REPLY;
1349 goto err_empty_call_stack;
1351 binder_set_nice(in_reply_to->saved_priority);
1352 if (in_reply_to->to_thread != thread) {
1353 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1354 proc->pid, thread->pid, in_reply_to->debug_id,
1355 in_reply_to->to_proc ?
1356 in_reply_to->to_proc->pid : 0,
1357 in_reply_to->to_thread ?
1358 in_reply_to->to_thread->pid : 0);
1359 return_error = BR_FAILED_REPLY;
1361 goto err_bad_call_stack;
1363 thread->transaction_stack = in_reply_to->to_parent;
1364 target_thread = in_reply_to->from;
1365 if (target_thread == NULL) {
1366 return_error = BR_DEAD_REPLY;
1367 goto err_dead_binder;
1369 if (target_thread->transaction_stack != in_reply_to) {
1370 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1371 proc->pid, thread->pid,
1372 target_thread->transaction_stack ?
1373 target_thread->transaction_stack->debug_id : 0,
1374 in_reply_to->debug_id);
1375 return_error = BR_FAILED_REPLY;
1377 target_thread = NULL;
1378 goto err_dead_binder;
1380 target_proc = target_thread->proc;
1382 if (tr->target.handle) {
1383 struct binder_ref *ref;
1385 ref = binder_get_ref(proc, tr->target.handle);
1387 binder_user_error("%d:%d got transaction to invalid handle\n",
1388 proc->pid, thread->pid);
1389 return_error = BR_FAILED_REPLY;
1390 goto err_invalid_target_handle;
1392 target_node = ref->node;
1394 target_node = binder_context_mgr_node;
1395 if (target_node == NULL) {
1396 return_error = BR_DEAD_REPLY;
1397 goto err_no_context_mgr_node;
1400 e->to_node = target_node->debug_id;
1401 target_proc = target_node->proc;
1402 if (target_proc == NULL) {
1403 return_error = BR_DEAD_REPLY;
1404 goto err_dead_binder;
1406 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1407 struct binder_transaction *tmp;
1409 tmp = thread->transaction_stack;
1410 if (tmp->to_thread != thread) {
1411 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1412 proc->pid, thread->pid, tmp->debug_id,
1413 tmp->to_proc ? tmp->to_proc->pid : 0,
1415 tmp->to_thread->pid : 0);
1416 return_error = BR_FAILED_REPLY;
1417 goto err_bad_call_stack;
1420 if (tmp->from && tmp->from->proc == target_proc)
1421 target_thread = tmp->from;
1422 tmp = tmp->from_parent;
1426 if (target_thread) {
1427 e->to_thread = target_thread->pid;
1428 target_list = &target_thread->todo;
1429 target_wait = &target_thread->wait;
1431 target_list = &target_proc->todo;
1432 target_wait = &target_proc->wait;
1434 e->to_proc = target_proc->pid;
1436 /* TODO: reuse incoming transaction for reply */
1437 t = kzalloc(sizeof(*t), GFP_KERNEL);
1439 return_error = BR_FAILED_REPLY;
1440 goto err_alloc_t_failed;
1442 binder_stats_created(BINDER_STAT_TRANSACTION);
1444 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1445 if (tcomplete == NULL) {
1446 return_error = BR_FAILED_REPLY;
1447 goto err_alloc_tcomplete_failed;
1449 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1451 t->debug_id = ++binder_last_id;
1452 e->debug_id = t->debug_id;
1455 binder_debug(BINDER_DEBUG_TRANSACTION,
1456 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
1457 proc->pid, thread->pid, t->debug_id,
1458 target_proc->pid, target_thread->pid,
1459 (u64)tr->data.ptr.buffer,
1460 (u64)tr->data.ptr.offsets,
1461 (u64)tr->data_size, (u64)tr->offsets_size);
1463 binder_debug(BINDER_DEBUG_TRANSACTION,
1464 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
1465 proc->pid, thread->pid, t->debug_id,
1466 target_proc->pid, target_node->debug_id,
1467 (u64)tr->data.ptr.buffer,
1468 (u64)tr->data.ptr.offsets,
1469 (u64)tr->data_size, (u64)tr->offsets_size);
1471 if (!reply && !(tr->flags & TF_ONE_WAY))
1475 t->sender_euid = task_euid(proc->tsk);
1476 t->to_proc = target_proc;
1477 t->to_thread = target_thread;
1479 t->flags = tr->flags;
1480 t->priority = task_nice(current);
1482 trace_binder_transaction(reply, t, target_node);
1484 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1485 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1486 if (t->buffer == NULL) {
1487 return_error = BR_FAILED_REPLY;
1488 goto err_binder_alloc_buf_failed;
1490 t->buffer->allow_user_free = 0;
1491 t->buffer->debug_id = t->debug_id;
1492 t->buffer->transaction = t;
1493 t->buffer->target_node = target_node;
1494 trace_binder_transaction_alloc_buf(t->buffer);
1496 binder_inc_node(target_node, 1, 0, NULL);
1498 offp = (binder_size_t *)(t->buffer->data +
1499 ALIGN(tr->data_size, sizeof(void *)));
1501 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1502 tr->data.ptr.buffer, tr->data_size)) {
1503 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1504 proc->pid, thread->pid);
1505 return_error = BR_FAILED_REPLY;
1506 goto err_copy_data_failed;
1508 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1509 tr->data.ptr.offsets, tr->offsets_size)) {
1510 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1511 proc->pid, thread->pid);
1512 return_error = BR_FAILED_REPLY;
1513 goto err_copy_data_failed;
1515 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1516 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1517 proc->pid, thread->pid, (u64)tr->offsets_size);
1518 return_error = BR_FAILED_REPLY;
1519 goto err_bad_offset;
1521 off_end = (void *)offp + tr->offsets_size;
1523 for (; offp < off_end; offp++) {
1524 struct flat_binder_object *fp;
1526 if (*offp > t->buffer->data_size - sizeof(*fp) ||
1528 t->buffer->data_size < sizeof(*fp) ||
1529 !IS_ALIGNED(*offp, sizeof(u32))) {
1530 binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
1531 proc->pid, thread->pid, (u64)*offp,
1533 (u64)(t->buffer->data_size -
1535 return_error = BR_FAILED_REPLY;
1536 goto err_bad_offset;
1538 fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1539 off_min = *offp + sizeof(struct flat_binder_object);
1541 case BINDER_TYPE_BINDER:
1542 case BINDER_TYPE_WEAK_BINDER: {
1543 struct binder_ref *ref;
1544 struct binder_node *node = binder_get_node(proc, fp->binder);
1547 node = binder_new_node(proc, fp->binder, fp->cookie);
1549 return_error = BR_FAILED_REPLY;
1550 goto err_binder_new_node_failed;
1552 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1553 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1555 if (fp->cookie != node->cookie) {
1556 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1557 proc->pid, thread->pid,
1558 (u64)fp->binder, node->debug_id,
1559 (u64)fp->cookie, (u64)node->cookie);
1560 return_error = BR_FAILED_REPLY;
1561 goto err_binder_get_ref_for_node_failed;
1563 ref = binder_get_ref_for_node(target_proc, node);
1565 return_error = BR_FAILED_REPLY;
1566 goto err_binder_get_ref_for_node_failed;
1568 if (fp->type == BINDER_TYPE_BINDER)
1569 fp->type = BINDER_TYPE_HANDLE;
1571 fp->type = BINDER_TYPE_WEAK_HANDLE;
1572 fp->handle = ref->desc;
1573 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1576 trace_binder_transaction_node_to_ref(t, node, ref);
1577 binder_debug(BINDER_DEBUG_TRANSACTION,
1578 " node %d u%016llx -> ref %d desc %d\n",
1579 node->debug_id, (u64)node->ptr,
1580 ref->debug_id, ref->desc);
1582 case BINDER_TYPE_HANDLE:
1583 case BINDER_TYPE_WEAK_HANDLE: {
1584 struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1587 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1589 thread->pid, fp->handle);
1590 return_error = BR_FAILED_REPLY;
1591 goto err_binder_get_ref_failed;
1593 if (ref->node->proc == target_proc) {
1594 if (fp->type == BINDER_TYPE_HANDLE)
1595 fp->type = BINDER_TYPE_BINDER;
1597 fp->type = BINDER_TYPE_WEAK_BINDER;
1598 fp->binder = ref->node->ptr;
1599 fp->cookie = ref->node->cookie;
1600 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1601 trace_binder_transaction_ref_to_node(t, ref);
1602 binder_debug(BINDER_DEBUG_TRANSACTION,
1603 " ref %d desc %d -> node %d u%016llx\n",
1604 ref->debug_id, ref->desc, ref->node->debug_id,
1605 (u64)ref->node->ptr);
1607 struct binder_ref *new_ref;
1609 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1610 if (new_ref == NULL) {
1611 return_error = BR_FAILED_REPLY;
1612 goto err_binder_get_ref_for_node_failed;
1614 fp->handle = new_ref->desc;
1615 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1616 trace_binder_transaction_ref_to_ref(t, ref,
1618 binder_debug(BINDER_DEBUG_TRANSACTION,
1619 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1620 ref->debug_id, ref->desc, new_ref->debug_id,
1621 new_ref->desc, ref->node->debug_id);
1625 case BINDER_TYPE_FD: {
1630 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1631 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
1632 proc->pid, thread->pid, fp->handle);
1633 return_error = BR_FAILED_REPLY;
1634 goto err_fd_not_allowed;
1636 } else if (!target_node->accept_fds) {
1637 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
1638 proc->pid, thread->pid, fp->handle);
1639 return_error = BR_FAILED_REPLY;
1640 goto err_fd_not_allowed;
1643 file = fget(fp->handle);
1645 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1646 proc->pid, thread->pid, fp->handle);
1647 return_error = BR_FAILED_REPLY;
1648 goto err_fget_failed;
1650 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1651 if (target_fd < 0) {
1653 return_error = BR_FAILED_REPLY;
1654 goto err_get_unused_fd_failed;
1656 task_fd_install(target_proc, target_fd, file);
1657 trace_binder_transaction_fd(t, fp->handle, target_fd);
1658 binder_debug(BINDER_DEBUG_TRANSACTION,
1659 " fd %d -> %d\n", fp->handle, target_fd);
1661 fp->handle = target_fd;
1665 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1666 proc->pid, thread->pid, fp->type);
1667 return_error = BR_FAILED_REPLY;
1668 goto err_bad_object_type;
1672 BUG_ON(t->buffer->async_transaction != 0);
1673 binder_pop_transaction(target_thread, in_reply_to);
1674 } else if (!(t->flags & TF_ONE_WAY)) {
1675 BUG_ON(t->buffer->async_transaction != 0);
1677 t->from_parent = thread->transaction_stack;
1678 thread->transaction_stack = t;
1680 BUG_ON(target_node == NULL);
1681 BUG_ON(t->buffer->async_transaction != 1);
1682 if (target_node->has_async_transaction) {
1683 target_list = &target_node->async_todo;
1686 target_node->has_async_transaction = 1;
1688 t->work.type = BINDER_WORK_TRANSACTION;
1689 list_add_tail(&t->work.entry, target_list);
1690 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1691 list_add_tail(&tcomplete->entry, &thread->todo);
1693 wake_up_interruptible(target_wait);
1696 err_get_unused_fd_failed:
1699 err_binder_get_ref_for_node_failed:
1700 err_binder_get_ref_failed:
1701 err_binder_new_node_failed:
1702 err_bad_object_type:
1704 err_copy_data_failed:
1705 trace_binder_transaction_failed_buffer_release(t->buffer);
1706 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1707 t->buffer->transaction = NULL;
1708 binder_free_buf(target_proc, t->buffer);
1709 err_binder_alloc_buf_failed:
1711 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1712 err_alloc_tcomplete_failed:
1714 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1717 err_empty_call_stack:
1719 err_invalid_target_handle:
1720 err_no_context_mgr_node:
1721 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1722 "%d:%d transaction failed %d, size %lld-%lld\n",
1723 proc->pid, thread->pid, return_error,
1724 (u64)tr->data_size, (u64)tr->offsets_size);
1727 struct binder_transaction_log_entry *fe;
1729 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1733 BUG_ON(thread->return_error != BR_OK);
1735 thread->return_error = BR_TRANSACTION_COMPLETE;
1736 binder_send_failed_reply(in_reply_to, return_error);
1738 thread->return_error = return_error;
1741 static int binder_thread_write(struct binder_proc *proc,
1742 struct binder_thread *thread,
1743 binder_uintptr_t binder_buffer, size_t size,
1744 binder_size_t *consumed)
1747 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
1748 void __user *ptr = buffer + *consumed;
1749 void __user *end = buffer + size;
1751 while (ptr < end && thread->return_error == BR_OK) {
1752 if (get_user(cmd, (uint32_t __user *)ptr))
1754 ptr += sizeof(uint32_t);
1755 trace_binder_command(cmd);
1756 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1757 binder_stats.bc[_IOC_NR(cmd)]++;
1758 proc->stats.bc[_IOC_NR(cmd)]++;
1759 thread->stats.bc[_IOC_NR(cmd)]++;
1767 struct binder_ref *ref;
1768 const char *debug_string;
1770 if (get_user(target, (uint32_t __user *)ptr))
1772 ptr += sizeof(uint32_t);
1773 if (target == 0 && binder_context_mgr_node &&
1774 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1775 ref = binder_get_ref_for_node(proc,
1776 binder_context_mgr_node);
1777 if (ref->desc != target) {
1778 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1779 proc->pid, thread->pid,
1783 ref = binder_get_ref(proc, target);
1785 binder_user_error("%d:%d refcount change on invalid ref %d\n",
1786 proc->pid, thread->pid, target);
1791 debug_string = "IncRefs";
1792 binder_inc_ref(ref, 0, NULL);
1795 debug_string = "Acquire";
1796 binder_inc_ref(ref, 1, NULL);
1799 debug_string = "Release";
1800 binder_dec_ref(ref, 1);
1804 debug_string = "DecRefs";
1805 binder_dec_ref(ref, 0);
1808 binder_debug(BINDER_DEBUG_USER_REFS,
1809 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
1810 proc->pid, thread->pid, debug_string, ref->debug_id,
1811 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1814 case BC_INCREFS_DONE:
1815 case BC_ACQUIRE_DONE: {
1816 binder_uintptr_t node_ptr;
1817 binder_uintptr_t cookie;
1818 struct binder_node *node;
1820 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
1822 ptr += sizeof(binder_uintptr_t);
1823 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1825 ptr += sizeof(binder_uintptr_t);
1826 node = binder_get_node(proc, node_ptr);
1828 binder_user_error("%d:%d %s u%016llx no match\n",
1829 proc->pid, thread->pid,
1830 cmd == BC_INCREFS_DONE ?
1836 if (cookie != node->cookie) {
1837 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
1838 proc->pid, thread->pid,
1839 cmd == BC_INCREFS_DONE ?
1840 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1841 (u64)node_ptr, node->debug_id,
1842 (u64)cookie, (u64)node->cookie);
1845 if (cmd == BC_ACQUIRE_DONE) {
1846 if (node->pending_strong_ref == 0) {
1847 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
1848 proc->pid, thread->pid,
1852 node->pending_strong_ref = 0;
1854 if (node->pending_weak_ref == 0) {
1855 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
1856 proc->pid, thread->pid,
1860 node->pending_weak_ref = 0;
1862 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1863 binder_debug(BINDER_DEBUG_USER_REFS,
1864 "%d:%d %s node %d ls %d lw %d\n",
1865 proc->pid, thread->pid,
1866 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1867 node->debug_id, node->local_strong_refs, node->local_weak_refs);
1870 case BC_ATTEMPT_ACQUIRE:
1871 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
1873 case BC_ACQUIRE_RESULT:
1874 pr_err("BC_ACQUIRE_RESULT not supported\n");
1877 case BC_FREE_BUFFER: {
1878 binder_uintptr_t data_ptr;
1879 struct binder_buffer *buffer;
1881 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
1883 ptr += sizeof(binder_uintptr_t);
1885 buffer = binder_buffer_lookup(proc, data_ptr);
1886 if (buffer == NULL) {
1887 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
1888 proc->pid, thread->pid, (u64)data_ptr);
1891 if (!buffer->allow_user_free) {
1892 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
1893 proc->pid, thread->pid, (u64)data_ptr);
1896 binder_debug(BINDER_DEBUG_FREE_BUFFER,
1897 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
1898 proc->pid, thread->pid, (u64)data_ptr,
1900 buffer->transaction ? "active" : "finished");
1902 if (buffer->transaction) {
1903 buffer->transaction->buffer = NULL;
1904 buffer->transaction = NULL;
1906 if (buffer->async_transaction && buffer->target_node) {
1907 BUG_ON(!buffer->target_node->has_async_transaction);
1908 if (list_empty(&buffer->target_node->async_todo))
1909 buffer->target_node->has_async_transaction = 0;
1911 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1913 trace_binder_transaction_buffer_release(buffer);
1914 binder_transaction_buffer_release(proc, buffer, NULL);
1915 binder_free_buf(proc, buffer);
1919 case BC_TRANSACTION:
1921 struct binder_transaction_data tr;
1923 if (copy_from_user(&tr, ptr, sizeof(tr)))
1926 binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
1930 case BC_REGISTER_LOOPER:
1931 binder_debug(BINDER_DEBUG_THREADS,
1932 "%d:%d BC_REGISTER_LOOPER\n",
1933 proc->pid, thread->pid);
1934 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
1935 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1936 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
1937 proc->pid, thread->pid);
1938 } else if (proc->requested_threads == 0) {
1939 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1940 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
1941 proc->pid, thread->pid);
1943 proc->requested_threads--;
1944 proc->requested_threads_started++;
1946 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
1948 case BC_ENTER_LOOPER:
1949 binder_debug(BINDER_DEBUG_THREADS,
1950 "%d:%d BC_ENTER_LOOPER\n",
1951 proc->pid, thread->pid);
1952 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
1953 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1954 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
1955 proc->pid, thread->pid);
1957 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
1959 case BC_EXIT_LOOPER:
1960 binder_debug(BINDER_DEBUG_THREADS,
1961 "%d:%d BC_EXIT_LOOPER\n",
1962 proc->pid, thread->pid);
1963 thread->looper |= BINDER_LOOPER_STATE_EXITED;
1966 case BC_REQUEST_DEATH_NOTIFICATION:
1967 case BC_CLEAR_DEATH_NOTIFICATION: {
1969 binder_uintptr_t cookie;
1970 struct binder_ref *ref;
1971 struct binder_ref_death *death;
1973 if (get_user(target, (uint32_t __user *)ptr))
1975 ptr += sizeof(uint32_t);
1976 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1978 ptr += sizeof(binder_uintptr_t);
1979 ref = binder_get_ref(proc, target);
1981 binder_user_error("%d:%d %s invalid ref %d\n",
1982 proc->pid, thread->pid,
1983 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
1984 "BC_REQUEST_DEATH_NOTIFICATION" :
1985 "BC_CLEAR_DEATH_NOTIFICATION",
1990 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
1991 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
1992 proc->pid, thread->pid,
1993 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
1994 "BC_REQUEST_DEATH_NOTIFICATION" :
1995 "BC_CLEAR_DEATH_NOTIFICATION",
1996 (u64)cookie, ref->debug_id, ref->desc,
1997 ref->strong, ref->weak, ref->node->debug_id);
1999 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2001 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2002 proc->pid, thread->pid);
2005 death = kzalloc(sizeof(*death), GFP_KERNEL);
2006 if (death == NULL) {
2007 thread->return_error = BR_ERROR;
2008 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2009 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2010 proc->pid, thread->pid);
2013 binder_stats_created(BINDER_STAT_DEATH);
2014 INIT_LIST_HEAD(&death->work.entry);
2015 death->cookie = cookie;
2017 if (ref->node->proc == NULL) {
2018 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2019 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2020 list_add_tail(&ref->death->work.entry, &thread->todo);
2022 list_add_tail(&ref->death->work.entry, &proc->todo);
2023 wake_up_interruptible(&proc->wait);
2027 if (ref->death == NULL) {
2028 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2029 proc->pid, thread->pid);
2033 if (death->cookie != cookie) {
2034 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2035 proc->pid, thread->pid,
2041 if (list_empty(&death->work.entry)) {
2042 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2043 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2044 list_add_tail(&death->work.entry, &thread->todo);
2046 list_add_tail(&death->work.entry, &proc->todo);
2047 wake_up_interruptible(&proc->wait);
2050 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2051 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2055 case BC_DEAD_BINDER_DONE: {
2056 struct binder_work *w;
2057 binder_uintptr_t cookie;
2058 struct binder_ref_death *death = NULL;
2060 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2063 ptr += sizeof(void *);
2064 list_for_each_entry(w, &proc->delivered_death, entry) {
2065 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2067 if (tmp_death->cookie == cookie) {
2072 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2073 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2074 proc->pid, thread->pid, (u64)cookie,
2076 if (death == NULL) {
2077 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2078 proc->pid, thread->pid, (u64)cookie);
2082 list_del_init(&death->work.entry);
2083 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2084 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2085 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2086 list_add_tail(&death->work.entry, &thread->todo);
2088 list_add_tail(&death->work.entry, &proc->todo);
2089 wake_up_interruptible(&proc->wait);
2095 pr_err("%d:%d unknown command %d\n",
2096 proc->pid, thread->pid, cmd);
2099 *consumed = ptr - buffer;
2104 static void binder_stat_br(struct binder_proc *proc,
2105 struct binder_thread *thread, uint32_t cmd)
2107 trace_binder_return(cmd);
2108 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2109 binder_stats.br[_IOC_NR(cmd)]++;
2110 proc->stats.br[_IOC_NR(cmd)]++;
2111 thread->stats.br[_IOC_NR(cmd)]++;
2115 static int binder_has_proc_work(struct binder_proc *proc,
2116 struct binder_thread *thread)
2118 return !list_empty(&proc->todo) ||
2119 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2122 static int binder_has_thread_work(struct binder_thread *thread)
2124 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2125 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2128 static int binder_thread_read(struct binder_proc *proc,
2129 struct binder_thread *thread,
2130 binder_uintptr_t binder_buffer, size_t size,
2131 binder_size_t *consumed, int non_block)
2133 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2134 void __user *ptr = buffer + *consumed;
2135 void __user *end = buffer + size;
2138 int wait_for_proc_work;
2140 if (*consumed == 0) {
2141 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2143 ptr += sizeof(uint32_t);
2147 wait_for_proc_work = thread->transaction_stack == NULL &&
2148 list_empty(&thread->todo);
2150 if (thread->return_error != BR_OK && ptr < end) {
2151 if (thread->return_error2 != BR_OK) {
2152 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2154 ptr += sizeof(uint32_t);
2155 binder_stat_br(proc, thread, thread->return_error2);
2158 thread->return_error2 = BR_OK;
2160 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2162 ptr += sizeof(uint32_t);
2163 binder_stat_br(proc, thread, thread->return_error);
2164 thread->return_error = BR_OK;
2169 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2170 if (wait_for_proc_work)
2171 proc->ready_threads++;
2173 binder_unlock(__func__);
2175 trace_binder_wait_for_work(wait_for_proc_work,
2176 !!thread->transaction_stack,
2177 !list_empty(&thread->todo));
2178 if (wait_for_proc_work) {
2179 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2180 BINDER_LOOPER_STATE_ENTERED))) {
2181 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2182 proc->pid, thread->pid, thread->looper);
2183 wait_event_interruptible(binder_user_error_wait,
2184 binder_stop_on_user_error < 2);
2186 binder_set_nice(proc->default_priority);
2188 if (!binder_has_proc_work(proc, thread))
2191 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2194 if (!binder_has_thread_work(thread))
2197 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2200 binder_lock(__func__);
2202 if (wait_for_proc_work)
2203 proc->ready_threads--;
2204 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2211 struct binder_transaction_data tr;
2212 struct binder_work *w;
2213 struct binder_transaction *t = NULL;
2215 if (!list_empty(&thread->todo)) {
2216 w = list_first_entry(&thread->todo, struct binder_work,
2218 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2219 w = list_first_entry(&proc->todo, struct binder_work,
2223 if (ptr - buffer == 4 &&
2224 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
2229 if (end - ptr < sizeof(tr) + 4)
2233 case BINDER_WORK_TRANSACTION: {
2234 t = container_of(w, struct binder_transaction, work);
2236 case BINDER_WORK_TRANSACTION_COMPLETE: {
2237 cmd = BR_TRANSACTION_COMPLETE;
2238 if (put_user(cmd, (uint32_t __user *)ptr))
2240 ptr += sizeof(uint32_t);
2242 binder_stat_br(proc, thread, cmd);
2243 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2244 "%d:%d BR_TRANSACTION_COMPLETE\n",
2245 proc->pid, thread->pid);
2247 list_del(&w->entry);
2249 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2251 case BINDER_WORK_NODE: {
2252 struct binder_node *node = container_of(w, struct binder_node, work);
2253 uint32_t cmd = BR_NOOP;
2254 const char *cmd_name;
2255 int strong = node->internal_strong_refs || node->local_strong_refs;
2256 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2258 if (weak && !node->has_weak_ref) {
2260 cmd_name = "BR_INCREFS";
2261 node->has_weak_ref = 1;
2262 node->pending_weak_ref = 1;
2263 node->local_weak_refs++;
2264 } else if (strong && !node->has_strong_ref) {
2266 cmd_name = "BR_ACQUIRE";
2267 node->has_strong_ref = 1;
2268 node->pending_strong_ref = 1;
2269 node->local_strong_refs++;
2270 } else if (!strong && node->has_strong_ref) {
2272 cmd_name = "BR_RELEASE";
2273 node->has_strong_ref = 0;
2274 } else if (!weak && node->has_weak_ref) {
2276 cmd_name = "BR_DECREFS";
2277 node->has_weak_ref = 0;
2279 if (cmd != BR_NOOP) {
2280 if (put_user(cmd, (uint32_t __user *)ptr))
2282 ptr += sizeof(uint32_t);
2283 if (put_user(node->ptr,
2284 (binder_uintptr_t __user *)ptr))
2286 ptr += sizeof(binder_uintptr_t);
2287 if (put_user(node->cookie,
2288 (binder_uintptr_t __user *)ptr))
2290 ptr += sizeof(binder_uintptr_t);
2292 binder_stat_br(proc, thread, cmd);
2293 binder_debug(BINDER_DEBUG_USER_REFS,
2294 "%d:%d %s %d u%016llx c%016llx\n",
2295 proc->pid, thread->pid, cmd_name,
2297 (u64)node->ptr, (u64)node->cookie);
2299 list_del_init(&w->entry);
2300 if (!weak && !strong) {
2301 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2302 "%d:%d node %d u%016llx c%016llx deleted\n",
2303 proc->pid, thread->pid,
2307 rb_erase(&node->rb_node, &proc->nodes);
2309 binder_stats_deleted(BINDER_STAT_NODE);
2311 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2312 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2313 proc->pid, thread->pid,
2320 case BINDER_WORK_DEAD_BINDER:
2321 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2322 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2323 struct binder_ref_death *death;
2326 death = container_of(w, struct binder_ref_death, work);
2327 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2328 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2330 cmd = BR_DEAD_BINDER;
2331 if (put_user(cmd, (uint32_t __user *)ptr))
2333 ptr += sizeof(uint32_t);
2334 if (put_user(death->cookie,
2335 (binder_uintptr_t __user *)ptr))
2337 ptr += sizeof(binder_uintptr_t);
2338 binder_stat_br(proc, thread, cmd);
2339 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2340 "%d:%d %s %016llx\n",
2341 proc->pid, thread->pid,
2342 cmd == BR_DEAD_BINDER ?
2344 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2345 (u64)death->cookie);
2347 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2348 list_del(&w->entry);
2350 binder_stats_deleted(BINDER_STAT_DEATH);
2352 list_move(&w->entry, &proc->delivered_death);
2353 if (cmd == BR_DEAD_BINDER)
2354 goto done; /* DEAD_BINDER notifications can cause transactions */
2361 BUG_ON(t->buffer == NULL);
2362 if (t->buffer->target_node) {
2363 struct binder_node *target_node = t->buffer->target_node;
2365 tr.target.ptr = target_node->ptr;
2366 tr.cookie = target_node->cookie;
2367 t->saved_priority = task_nice(current);
2368 if (t->priority < target_node->min_priority &&
2369 !(t->flags & TF_ONE_WAY))
2370 binder_set_nice(t->priority);
2371 else if (!(t->flags & TF_ONE_WAY) ||
2372 t->saved_priority > target_node->min_priority)
2373 binder_set_nice(target_node->min_priority);
2374 cmd = BR_TRANSACTION;
2381 tr.flags = t->flags;
2382 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2385 struct task_struct *sender = t->from->proc->tsk;
2387 tr.sender_pid = task_tgid_nr_ns(sender,
2388 task_active_pid_ns(current));
2393 tr.data_size = t->buffer->data_size;
2394 tr.offsets_size = t->buffer->offsets_size;
2395 tr.data.ptr.buffer = (binder_uintptr_t)(
2396 (uintptr_t)t->buffer->data +
2397 proc->user_buffer_offset);
2398 tr.data.ptr.offsets = tr.data.ptr.buffer +
2399 ALIGN(t->buffer->data_size,
2402 if (put_user(cmd, (uint32_t __user *)ptr))
2404 ptr += sizeof(uint32_t);
2405 if (copy_to_user(ptr, &tr, sizeof(tr)))
2409 trace_binder_transaction_received(t);
2410 binder_stat_br(proc, thread, cmd);
2411 binder_debug(BINDER_DEBUG_TRANSACTION,
2412 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2413 proc->pid, thread->pid,
2414 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2416 t->debug_id, t->from ? t->from->proc->pid : 0,
2417 t->from ? t->from->pid : 0, cmd,
2418 t->buffer->data_size, t->buffer->offsets_size,
2419 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2421 list_del(&t->work.entry);
2422 t->buffer->allow_user_free = 1;
2423 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2424 t->to_parent = thread->transaction_stack;
2425 t->to_thread = thread;
2426 thread->transaction_stack = t;
2428 t->buffer->transaction = NULL;
2430 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2437 *consumed = ptr - buffer;
2438 if (proc->requested_threads + proc->ready_threads == 0 &&
2439 proc->requested_threads_started < proc->max_threads &&
2440 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2441 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2442 /*spawn a new thread if we leave this out */) {
2443 proc->requested_threads++;
2444 binder_debug(BINDER_DEBUG_THREADS,
2445 "%d:%d BR_SPAWN_LOOPER\n",
2446 proc->pid, thread->pid);
2447 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2449 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2454 static void binder_release_work(struct list_head *list)
2456 struct binder_work *w;
2458 while (!list_empty(list)) {
2459 w = list_first_entry(list, struct binder_work, entry);
2460 list_del_init(&w->entry);
2462 case BINDER_WORK_TRANSACTION: {
2463 struct binder_transaction *t;
2465 t = container_of(w, struct binder_transaction, work);
2466 if (t->buffer->target_node &&
2467 !(t->flags & TF_ONE_WAY)) {
2468 binder_send_failed_reply(t, BR_DEAD_REPLY);
2470 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2471 "undelivered transaction %d\n",
2473 t->buffer->transaction = NULL;
2475 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2478 case BINDER_WORK_TRANSACTION_COMPLETE: {
2479 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2480 "undelivered TRANSACTION_COMPLETE\n");
2482 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2484 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2485 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2486 struct binder_ref_death *death;
2488 death = container_of(w, struct binder_ref_death, work);
2489 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2490 "undelivered death notification, %016llx\n",
2491 (u64)death->cookie);
2493 binder_stats_deleted(BINDER_STAT_DEATH);
2496 pr_err("unexpected work type, %d, not freed\n",
2504 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2506 struct binder_thread *thread = NULL;
2507 struct rb_node *parent = NULL;
2508 struct rb_node **p = &proc->threads.rb_node;
2512 thread = rb_entry(parent, struct binder_thread, rb_node);
2514 if (current->pid < thread->pid)
2516 else if (current->pid > thread->pid)
2517 p = &(*p)->rb_right;
2522 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2525 binder_stats_created(BINDER_STAT_THREAD);
2526 thread->proc = proc;
2527 thread->pid = current->pid;
2528 init_waitqueue_head(&thread->wait);
2529 INIT_LIST_HEAD(&thread->todo);
2530 rb_link_node(&thread->rb_node, parent, p);
2531 rb_insert_color(&thread->rb_node, &proc->threads);
2532 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2533 thread->return_error = BR_OK;
2534 thread->return_error2 = BR_OK;
2539 static int binder_free_thread(struct binder_proc *proc,
2540 struct binder_thread *thread)
2542 struct binder_transaction *t;
2543 struct binder_transaction *send_reply = NULL;
2544 int active_transactions = 0;
2546 rb_erase(&thread->rb_node, &proc->threads);
2547 t = thread->transaction_stack;
2548 if (t && t->to_thread == thread)
2551 active_transactions++;
2552 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2553 "release %d:%d transaction %d %s, still active\n",
2554 proc->pid, thread->pid,
2556 (t->to_thread == thread) ? "in" : "out");
2558 if (t->to_thread == thread) {
2560 t->to_thread = NULL;
2562 t->buffer->transaction = NULL;
2566 } else if (t->from == thread) {
2573 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2574 binder_release_work(&thread->todo);
2576 binder_stats_deleted(BINDER_STAT_THREAD);
2577 return active_transactions;
2580 static unsigned int binder_poll(struct file *filp,
2581 struct poll_table_struct *wait)
2583 struct binder_proc *proc = filp->private_data;
2584 struct binder_thread *thread = NULL;
2585 int wait_for_proc_work;
2587 binder_lock(__func__);
2589 thread = binder_get_thread(proc);
2591 wait_for_proc_work = thread->transaction_stack == NULL &&
2592 list_empty(&thread->todo) && thread->return_error == BR_OK;
2594 binder_unlock(__func__);
2596 if (wait_for_proc_work) {
2597 if (binder_has_proc_work(proc, thread))
2599 poll_wait(filp, &proc->wait, wait);
2600 if (binder_has_proc_work(proc, thread))
2603 if (binder_has_thread_work(thread))
2605 poll_wait(filp, &thread->wait, wait);
2606 if (binder_has_thread_work(thread))
2612 static int binder_ioctl_write_read(struct file *filp,
2613 unsigned int cmd, unsigned long arg,
2614 struct binder_thread *thread)
2617 struct binder_proc *proc = filp->private_data;
2618 unsigned int size = _IOC_SIZE(cmd);
2619 void __user *ubuf = (void __user *)arg;
2620 struct binder_write_read bwr;
2622 if (size != sizeof(struct binder_write_read)) {
2626 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2630 binder_debug(BINDER_DEBUG_READ_WRITE,
2631 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2632 proc->pid, thread->pid,
2633 (u64)bwr.write_size, (u64)bwr.write_buffer,
2634 (u64)bwr.read_size, (u64)bwr.read_buffer);
2636 if (bwr.write_size > 0) {
2637 ret = binder_thread_write(proc, thread,
2640 &bwr.write_consumed);
2641 trace_binder_write_done(ret);
2643 bwr.read_consumed = 0;
2644 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2649 if (bwr.read_size > 0) {
2650 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2653 filp->f_flags & O_NONBLOCK);
2654 trace_binder_read_done(ret);
2655 if (!list_empty(&proc->todo))
2656 wake_up_interruptible(&proc->wait);
2658 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2663 binder_debug(BINDER_DEBUG_READ_WRITE,
2664 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2665 proc->pid, thread->pid,
2666 (u64)bwr.write_consumed, (u64)bwr.write_size,
2667 (u64)bwr.read_consumed, (u64)bwr.read_size);
2668 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2676 static int binder_ioctl_set_ctx_mgr(struct file *filp)
2679 struct binder_proc *proc = filp->private_data;
2680 kuid_t curr_euid = current_euid();
2682 if (binder_context_mgr_node != NULL) {
2683 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2687 if (uid_valid(binder_context_mgr_uid)) {
2688 if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
2689 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2690 from_kuid(&init_user_ns, curr_euid),
2691 from_kuid(&init_user_ns,
2692 binder_context_mgr_uid));
2697 binder_context_mgr_uid = curr_euid;
2699 binder_context_mgr_node = binder_new_node(proc, 0, 0);
2700 if (binder_context_mgr_node == NULL) {
2704 binder_context_mgr_node->local_weak_refs++;
2705 binder_context_mgr_node->local_strong_refs++;
2706 binder_context_mgr_node->has_strong_ref = 1;
2707 binder_context_mgr_node->has_weak_ref = 1;
2712 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2715 struct binder_proc *proc = filp->private_data;
2716 struct binder_thread *thread;
2717 unsigned int size = _IOC_SIZE(cmd);
2718 void __user *ubuf = (void __user *)arg;
2720 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
2721 proc->pid, current->pid, cmd, arg);*/
2723 trace_binder_ioctl(cmd, arg);
2725 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2729 binder_lock(__func__);
2730 thread = binder_get_thread(proc);
2731 if (thread == NULL) {
2737 case BINDER_WRITE_READ:
2738 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2742 case BINDER_SET_MAX_THREADS:
2743 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2748 case BINDER_SET_CONTEXT_MGR:
2749 ret = binder_ioctl_set_ctx_mgr(filp);
2753 case BINDER_THREAD_EXIT:
2754 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
2755 proc->pid, thread->pid);
2756 binder_free_thread(proc, thread);
2759 case BINDER_VERSION: {
2760 struct binder_version __user *ver = ubuf;
2762 if (size != sizeof(struct binder_version)) {
2766 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2767 &ver->protocol_version)) {
2780 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2781 binder_unlock(__func__);
2782 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2783 if (ret && ret != -ERESTARTSYS)
2784 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2786 trace_binder_ioctl_done(ret);
2790 static void binder_vma_open(struct vm_area_struct *vma)
2792 struct binder_proc *proc = vma->vm_private_data;
2794 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2795 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2796 proc->pid, vma->vm_start, vma->vm_end,
2797 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2798 (unsigned long)pgprot_val(vma->vm_page_prot));
2801 static void binder_vma_close(struct vm_area_struct *vma)
2803 struct binder_proc *proc = vma->vm_private_data;
2805 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2806 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2807 proc->pid, vma->vm_start, vma->vm_end,
2808 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2809 (unsigned long)pgprot_val(vma->vm_page_prot));
2811 proc->vma_vm_mm = NULL;
2812 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2815 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2817 return VM_FAULT_SIGBUS;
2820 static struct vm_operations_struct binder_vm_ops = {
2821 .open = binder_vma_open,
2822 .close = binder_vma_close,
2823 .fault = binder_vm_fault,
2826 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2829 struct vm_struct *area;
2830 struct binder_proc *proc = filp->private_data;
2831 const char *failure_string;
2832 struct binder_buffer *buffer;
2834 if (proc->tsk != current)
2837 if ((vma->vm_end - vma->vm_start) > SZ_4M)
2838 vma->vm_end = vma->vm_start + SZ_4M;
2840 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2841 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2842 proc->pid, vma->vm_start, vma->vm_end,
2843 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2844 (unsigned long)pgprot_val(vma->vm_page_prot));
2846 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2848 failure_string = "bad vm_flags";
2851 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2853 mutex_lock(&binder_mmap_lock);
2856 failure_string = "already mapped";
2857 goto err_already_mapped;
2860 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2863 failure_string = "get_vm_area";
2864 goto err_get_vm_area_failed;
2866 proc->buffer = area->addr;
2867 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2868 mutex_unlock(&binder_mmap_lock);
2870 #ifdef CONFIG_CPU_CACHE_VIPT
2871 if (cache_is_vipt_aliasing()) {
2872 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2873 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2874 vma->vm_start += PAGE_SIZE;
2878 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2879 if (proc->pages == NULL) {
2881 failure_string = "alloc page array";
2882 goto err_alloc_pages_failed;
2884 proc->buffer_size = vma->vm_end - vma->vm_start;
2886 vma->vm_ops = &binder_vm_ops;
2887 vma->vm_private_data = proc;
2889 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2891 failure_string = "alloc small buf";
2892 goto err_alloc_small_buf_failed;
2894 buffer = proc->buffer;
2895 INIT_LIST_HEAD(&proc->buffers);
2896 list_add(&buffer->entry, &proc->buffers);
2898 binder_insert_free_buffer(proc, buffer);
2899 proc->free_async_space = proc->buffer_size / 2;
2901 proc->files = get_files_struct(current);
2903 proc->vma_vm_mm = vma->vm_mm;
2905 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
2906 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2909 err_alloc_small_buf_failed:
2912 err_alloc_pages_failed:
2913 mutex_lock(&binder_mmap_lock);
2914 vfree(proc->buffer);
2915 proc->buffer = NULL;
2916 err_get_vm_area_failed:
2918 mutex_unlock(&binder_mmap_lock);
2920 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
2921 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2925 static int binder_open(struct inode *nodp, struct file *filp)
2927 struct binder_proc *proc;
2929 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2930 current->group_leader->pid, current->pid);
2932 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2935 get_task_struct(current);
2936 proc->tsk = current;
2937 INIT_LIST_HEAD(&proc->todo);
2938 init_waitqueue_head(&proc->wait);
2939 proc->default_priority = task_nice(current);
2941 binder_lock(__func__);
2943 binder_stats_created(BINDER_STAT_PROC);
2944 hlist_add_head(&proc->proc_node, &binder_procs);
2945 proc->pid = current->group_leader->pid;
2946 INIT_LIST_HEAD(&proc->delivered_death);
2947 filp->private_data = proc;
2949 binder_unlock(__func__);
2951 if (binder_debugfs_dir_entry_proc) {
2954 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2955 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
2956 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
2962 static int binder_flush(struct file *filp, fl_owner_t id)
2964 struct binder_proc *proc = filp->private_data;
2966 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
2971 static void binder_deferred_flush(struct binder_proc *proc)
2976 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
2977 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2979 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2980 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
2981 wake_up_interruptible(&thread->wait);
2985 wake_up_interruptible_all(&proc->wait);
2987 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2988 "binder_flush: %d woke %d threads\n", proc->pid,
2992 static int binder_release(struct inode *nodp, struct file *filp)
2994 struct binder_proc *proc = filp->private_data;
2996 debugfs_remove(proc->debugfs_entry);
2997 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3002 static int binder_node_release(struct binder_node *node, int refs)
3004 struct binder_ref *ref;
3007 list_del_init(&node->work.entry);
3008 binder_release_work(&node->async_todo);
3010 if (hlist_empty(&node->refs)) {
3012 binder_stats_deleted(BINDER_STAT_NODE);
3018 node->local_strong_refs = 0;
3019 node->local_weak_refs = 0;
3020 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3022 hlist_for_each_entry(ref, &node->refs, node_entry) {
3030 if (list_empty(&ref->death->work.entry)) {
3031 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3032 list_add_tail(&ref->death->work.entry,
3034 wake_up_interruptible(&ref->proc->wait);
3039 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3040 "node %d now dead, refs %d, death %d\n",
3041 node->debug_id, refs, death);
3046 static void binder_deferred_release(struct binder_proc *proc)
3048 struct binder_transaction *t;
3050 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3051 active_transactions, page_count;
3054 BUG_ON(proc->files);
3056 hlist_del(&proc->proc_node);
3058 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
3059 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3060 "%s: %d context_mgr_node gone\n",
3061 __func__, proc->pid);
3062 binder_context_mgr_node = NULL;
3066 active_transactions = 0;
3067 while ((n = rb_first(&proc->threads))) {
3068 struct binder_thread *thread;
3070 thread = rb_entry(n, struct binder_thread, rb_node);
3072 active_transactions += binder_free_thread(proc, thread);
3077 while ((n = rb_first(&proc->nodes))) {
3078 struct binder_node *node;
3080 node = rb_entry(n, struct binder_node, rb_node);
3082 rb_erase(&node->rb_node, &proc->nodes);
3083 incoming_refs = binder_node_release(node, incoming_refs);
3087 while ((n = rb_first(&proc->refs_by_desc))) {
3088 struct binder_ref *ref;
3090 ref = rb_entry(n, struct binder_ref, rb_node_desc);
3092 binder_delete_ref(ref);
3095 binder_release_work(&proc->todo);
3096 binder_release_work(&proc->delivered_death);
3099 while ((n = rb_first(&proc->allocated_buffers))) {
3100 struct binder_buffer *buffer;
3102 buffer = rb_entry(n, struct binder_buffer, rb_node);
3104 t = buffer->transaction;
3107 buffer->transaction = NULL;
3108 pr_err("release proc %d, transaction %d, not freed\n",
3109 proc->pid, t->debug_id);
3113 binder_free_buf(proc, buffer);
3117 binder_stats_deleted(BINDER_STAT_PROC);
3123 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3126 if (!proc->pages[i])
3129 page_addr = proc->buffer + i * PAGE_SIZE;
3130 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3131 "%s: %d: page %d at %p not freed\n",
3132 __func__, proc->pid, i, page_addr);
3133 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3134 __free_page(proc->pages[i]);
3138 vfree(proc->buffer);
3141 put_task_struct(proc->tsk);
3143 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3144 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3145 __func__, proc->pid, threads, nodes, incoming_refs,
3146 outgoing_refs, active_transactions, buffers, page_count);
3151 static void binder_deferred_func(struct work_struct *work)
3153 struct binder_proc *proc;
3154 struct files_struct *files;
3159 binder_lock(__func__);
3160 mutex_lock(&binder_deferred_lock);
3161 if (!hlist_empty(&binder_deferred_list)) {
3162 proc = hlist_entry(binder_deferred_list.first,
3163 struct binder_proc, deferred_work_node);
3164 hlist_del_init(&proc->deferred_work_node);
3165 defer = proc->deferred_work;
3166 proc->deferred_work = 0;
3171 mutex_unlock(&binder_deferred_lock);
3174 if (defer & BINDER_DEFERRED_PUT_FILES) {
3175 files = proc->files;
3180 if (defer & BINDER_DEFERRED_FLUSH)
3181 binder_deferred_flush(proc);
3183 if (defer & BINDER_DEFERRED_RELEASE)
3184 binder_deferred_release(proc); /* frees proc */
3186 binder_unlock(__func__);
3188 put_files_struct(files);
3191 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3194 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3196 mutex_lock(&binder_deferred_lock);
3197 proc->deferred_work |= defer;
3198 if (hlist_unhashed(&proc->deferred_work_node)) {
3199 hlist_add_head(&proc->deferred_work_node,
3200 &binder_deferred_list);
3201 queue_work(binder_deferred_workqueue, &binder_deferred_work);
3203 mutex_unlock(&binder_deferred_lock);
3206 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3207 struct binder_transaction *t)
3210 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3211 prefix, t->debug_id, t,
3212 t->from ? t->from->proc->pid : 0,
3213 t->from ? t->from->pid : 0,
3214 t->to_proc ? t->to_proc->pid : 0,
3215 t->to_thread ? t->to_thread->pid : 0,
3216 t->code, t->flags, t->priority, t->need_reply);
3217 if (t->buffer == NULL) {
3218 seq_puts(m, " buffer free\n");
3221 if (t->buffer->target_node)
3222 seq_printf(m, " node %d",
3223 t->buffer->target_node->debug_id);
3224 seq_printf(m, " size %zd:%zd data %p\n",
3225 t->buffer->data_size, t->buffer->offsets_size,
3229 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3230 struct binder_buffer *buffer)
3232 seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3233 prefix, buffer->debug_id, buffer->data,
3234 buffer->data_size, buffer->offsets_size,
3235 buffer->transaction ? "active" : "delivered");
3238 static void print_binder_work(struct seq_file *m, const char *prefix,
3239 const char *transaction_prefix,
3240 struct binder_work *w)
3242 struct binder_node *node;
3243 struct binder_transaction *t;
3246 case BINDER_WORK_TRANSACTION:
3247 t = container_of(w, struct binder_transaction, work);
3248 print_binder_transaction(m, transaction_prefix, t);
3250 case BINDER_WORK_TRANSACTION_COMPLETE:
3251 seq_printf(m, "%stransaction complete\n", prefix);
3253 case BINDER_WORK_NODE:
3254 node = container_of(w, struct binder_node, work);
3255 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3256 prefix, node->debug_id,
3257 (u64)node->ptr, (u64)node->cookie);
3259 case BINDER_WORK_DEAD_BINDER:
3260 seq_printf(m, "%shas dead binder\n", prefix);
3262 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3263 seq_printf(m, "%shas cleared dead binder\n", prefix);
3265 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3266 seq_printf(m, "%shas cleared death notification\n", prefix);
3269 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3274 static void print_binder_thread(struct seq_file *m,
3275 struct binder_thread *thread,
3278 struct binder_transaction *t;
3279 struct binder_work *w;
3280 size_t start_pos = m->count;
3283 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3284 header_pos = m->count;
3285 t = thread->transaction_stack;
3287 if (t->from == thread) {
3288 print_binder_transaction(m,
3289 " outgoing transaction", t);
3291 } else if (t->to_thread == thread) {
3292 print_binder_transaction(m,
3293 " incoming transaction", t);
3296 print_binder_transaction(m, " bad transaction", t);
3300 list_for_each_entry(w, &thread->todo, entry) {
3301 print_binder_work(m, " ", " pending transaction", w);
3303 if (!print_always && m->count == header_pos)
3304 m->count = start_pos;
3307 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3309 struct binder_ref *ref;
3310 struct binder_work *w;
3314 hlist_for_each_entry(ref, &node->refs, node_entry)
3317 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3318 node->debug_id, (u64)node->ptr, (u64)node->cookie,
3319 node->has_strong_ref, node->has_weak_ref,
3320 node->local_strong_refs, node->local_weak_refs,
3321 node->internal_strong_refs, count);
3323 seq_puts(m, " proc");
3324 hlist_for_each_entry(ref, &node->refs, node_entry)
3325 seq_printf(m, " %d", ref->proc->pid);
3328 list_for_each_entry(w, &node->async_todo, entry)
3329 print_binder_work(m, " ",
3330 " pending async transaction", w);
3333 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3335 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3336 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3337 ref->node->debug_id, ref->strong, ref->weak, ref->death);
3340 static void print_binder_proc(struct seq_file *m,
3341 struct binder_proc *proc, int print_all)
3343 struct binder_work *w;
3345 size_t start_pos = m->count;
3348 seq_printf(m, "proc %d\n", proc->pid);
3349 header_pos = m->count;
3351 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3352 print_binder_thread(m, rb_entry(n, struct binder_thread,
3353 rb_node), print_all);
3354 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3355 struct binder_node *node = rb_entry(n, struct binder_node,
3357 if (print_all || node->has_async_transaction)
3358 print_binder_node(m, node);
3361 for (n = rb_first(&proc->refs_by_desc);
3364 print_binder_ref(m, rb_entry(n, struct binder_ref,
3367 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3368 print_binder_buffer(m, " buffer",
3369 rb_entry(n, struct binder_buffer, rb_node));
3370 list_for_each_entry(w, &proc->todo, entry)
3371 print_binder_work(m, " ", " pending transaction", w);
3372 list_for_each_entry(w, &proc->delivered_death, entry) {
3373 seq_puts(m, " has delivered dead binder\n");
3376 if (!print_all && m->count == header_pos)
3377 m->count = start_pos;
3380 static const char * const binder_return_strings[] = {
3385 "BR_ACQUIRE_RESULT",
3387 "BR_TRANSACTION_COMPLETE",
3392 "BR_ATTEMPT_ACQUIRE",
3397 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3401 static const char * const binder_command_strings[] = {
3404 "BC_ACQUIRE_RESULT",
3412 "BC_ATTEMPT_ACQUIRE",
3413 "BC_REGISTER_LOOPER",
3416 "BC_REQUEST_DEATH_NOTIFICATION",
3417 "BC_CLEAR_DEATH_NOTIFICATION",
3418 "BC_DEAD_BINDER_DONE"
3421 static const char * const binder_objstat_strings[] = {
3428 "transaction_complete"
3431 static void print_binder_stats(struct seq_file *m, const char *prefix,
3432 struct binder_stats *stats)
3436 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3437 ARRAY_SIZE(binder_command_strings));
3438 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3440 seq_printf(m, "%s%s: %d\n", prefix,
3441 binder_command_strings[i], stats->bc[i]);
3444 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3445 ARRAY_SIZE(binder_return_strings));
3446 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3448 seq_printf(m, "%s%s: %d\n", prefix,
3449 binder_return_strings[i], stats->br[i]);
3452 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3453 ARRAY_SIZE(binder_objstat_strings));
3454 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3455 ARRAY_SIZE(stats->obj_deleted));
3456 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3457 if (stats->obj_created[i] || stats->obj_deleted[i])
3458 seq_printf(m, "%s%s: active %d total %d\n", prefix,
3459 binder_objstat_strings[i],
3460 stats->obj_created[i] - stats->obj_deleted[i],
3461 stats->obj_created[i]);
3465 static void print_binder_proc_stats(struct seq_file *m,
3466 struct binder_proc *proc)
3468 struct binder_work *w;
3470 int count, strong, weak;
3472 seq_printf(m, "proc %d\n", proc->pid);
3474 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3476 seq_printf(m, " threads: %d\n", count);
3477 seq_printf(m, " requested threads: %d+%d/%d\n"
3478 " ready threads %d\n"
3479 " free async space %zd\n", proc->requested_threads,
3480 proc->requested_threads_started, proc->max_threads,
3481 proc->ready_threads, proc->free_async_space);
3483 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3485 seq_printf(m, " nodes: %d\n", count);
3489 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3490 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3493 strong += ref->strong;
3496 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
3499 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3501 seq_printf(m, " buffers: %d\n", count);
3504 list_for_each_entry(w, &proc->todo, entry) {
3506 case BINDER_WORK_TRANSACTION:
3513 seq_printf(m, " pending transactions: %d\n", count);
3515 print_binder_stats(m, " ", &proc->stats);
3519 static int binder_state_show(struct seq_file *m, void *unused)
3521 struct binder_proc *proc;
3522 struct binder_node *node;
3523 int do_lock = !binder_debug_no_lock;
3526 binder_lock(__func__);
3528 seq_puts(m, "binder state:\n");
3530 if (!hlist_empty(&binder_dead_nodes))
3531 seq_puts(m, "dead nodes:\n");
3532 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3533 print_binder_node(m, node);
3535 hlist_for_each_entry(proc, &binder_procs, proc_node)
3536 print_binder_proc(m, proc, 1);
3538 binder_unlock(__func__);
3542 static int binder_stats_show(struct seq_file *m, void *unused)
3544 struct binder_proc *proc;
3545 int do_lock = !binder_debug_no_lock;
3548 binder_lock(__func__);
3550 seq_puts(m, "binder stats:\n");
3552 print_binder_stats(m, "", &binder_stats);
3554 hlist_for_each_entry(proc, &binder_procs, proc_node)
3555 print_binder_proc_stats(m, proc);
3557 binder_unlock(__func__);
3561 static int binder_transactions_show(struct seq_file *m, void *unused)
3563 struct binder_proc *proc;
3564 int do_lock = !binder_debug_no_lock;
3567 binder_lock(__func__);
3569 seq_puts(m, "binder transactions:\n");
3570 hlist_for_each_entry(proc, &binder_procs, proc_node)
3571 print_binder_proc(m, proc, 0);
3573 binder_unlock(__func__);
3577 static int binder_proc_show(struct seq_file *m, void *unused)
3579 struct binder_proc *proc = m->private;
3580 int do_lock = !binder_debug_no_lock;
3583 binder_lock(__func__);
3584 seq_puts(m, "binder proc state:\n");
3585 print_binder_proc(m, proc, 1);
3587 binder_unlock(__func__);
3591 static void print_binder_transaction_log_entry(struct seq_file *m,
3592 struct binder_transaction_log_entry *e)
3595 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3596 e->debug_id, (e->call_type == 2) ? "reply" :
3597 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3598 e->from_thread, e->to_proc, e->to_thread, e->to_node,
3599 e->target_handle, e->data_size, e->offsets_size);
3602 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3604 struct binder_transaction_log *log = m->private;
3608 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3609 print_binder_transaction_log_entry(m, &log->entry[i]);
3611 for (i = 0; i < log->next; i++)
3612 print_binder_transaction_log_entry(m, &log->entry[i]);
3616 static const struct file_operations binder_fops = {
3617 .owner = THIS_MODULE,
3618 .poll = binder_poll,
3619 .unlocked_ioctl = binder_ioctl,
3620 .compat_ioctl = binder_ioctl,
3621 .mmap = binder_mmap,
3622 .open = binder_open,
3623 .flush = binder_flush,
3624 .release = binder_release,
3627 static struct miscdevice binder_miscdev = {
3628 .minor = MISC_DYNAMIC_MINOR,
3630 .fops = &binder_fops
3633 BINDER_DEBUG_ENTRY(state);
3634 BINDER_DEBUG_ENTRY(stats);
3635 BINDER_DEBUG_ENTRY(transactions);
3636 BINDER_DEBUG_ENTRY(transaction_log);
3638 static int __init binder_init(void)
3642 binder_deferred_workqueue = create_singlethread_workqueue("binder");
3643 if (!binder_deferred_workqueue)
3646 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3647 if (binder_debugfs_dir_entry_root)
3648 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3649 binder_debugfs_dir_entry_root);
3650 ret = misc_register(&binder_miscdev);
3651 if (binder_debugfs_dir_entry_root) {
3652 debugfs_create_file("state",
3654 binder_debugfs_dir_entry_root,
3656 &binder_state_fops);
3657 debugfs_create_file("stats",
3659 binder_debugfs_dir_entry_root,
3661 &binder_stats_fops);
3662 debugfs_create_file("transactions",
3664 binder_debugfs_dir_entry_root,
3666 &binder_transactions_fops);
3667 debugfs_create_file("transaction_log",
3669 binder_debugfs_dir_entry_root,
3670 &binder_transaction_log,
3671 &binder_transaction_log_fops);
3672 debugfs_create_file("failed_transaction_log",
3674 binder_debugfs_dir_entry_root,
3675 &binder_transaction_log_failed,
3676 &binder_transaction_log_fops);
3681 device_initcall(binder_init);
3683 #define CREATE_TRACE_POINTS
3684 #include "binder_trace.h"
3686 MODULE_LICENSE("GPL v2");