3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
28 #include <linux/module.h>
29 #include <linux/rtmutex.h>
30 #include <linux/mutex.h>
31 #include <linux/nsproxy.h>
32 #include <linux/poll.h>
33 #include <linux/debugfs.h>
34 #include <linux/rbtree.h>
35 #include <linux/sched.h>
36 #include <linux/seq_file.h>
37 #include <linux/uaccess.h>
38 #include <linux/vmalloc.h>
39 #include <linux/slab.h>
40 #include <linux/pid_namespace.h>
41 #include <linux/security.h>
43 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
44 #define BINDER_IPC_32BIT 1
47 #include <uapi/linux/android/binder.h>
48 #include "binder_trace.h"
50 static DEFINE_RT_MUTEX(binder_main_lock);
51 static DEFINE_MUTEX(binder_deferred_lock);
52 static DEFINE_MUTEX(binder_mmap_lock);
54 static HLIST_HEAD(binder_procs);
55 static HLIST_HEAD(binder_deferred_list);
56 static HLIST_HEAD(binder_dead_nodes);
58 static struct dentry *binder_debugfs_dir_entry_root;
59 static struct dentry *binder_debugfs_dir_entry_proc;
60 static struct binder_node *binder_context_mgr_node;
61 static kuid_t binder_context_mgr_uid = INVALID_UID;
62 static int binder_last_id;
63 static struct workqueue_struct *binder_deferred_workqueue;
65 #define BINDER_DEBUG_ENTRY(name) \
66 static int binder_##name##_open(struct inode *inode, struct file *file) \
68 return single_open(file, binder_##name##_show, inode->i_private); \
71 static const struct file_operations binder_##name##_fops = { \
72 .owner = THIS_MODULE, \
73 .open = binder_##name##_open, \
75 .llseek = seq_lseek, \
76 .release = single_release, \
79 static int binder_proc_show(struct seq_file *m, void *unused);
80 BINDER_DEBUG_ENTRY(proc);
82 /* This is only defined in include/asm-arm/sizes.h */
88 #define SZ_4M 0x400000
91 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
93 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
96 BINDER_DEBUG_USER_ERROR = 1U << 0,
97 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
98 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
99 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
100 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
101 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
102 BINDER_DEBUG_READ_WRITE = 1U << 6,
103 BINDER_DEBUG_USER_REFS = 1U << 7,
104 BINDER_DEBUG_THREADS = 1U << 8,
105 BINDER_DEBUG_TRANSACTION = 1U << 9,
106 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
107 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
108 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
109 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
110 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
111 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
117 static bool binder_debug_no_lock;
118 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121 static int binder_stop_on_user_error;
123 static int binder_set_stop_on_user_error(const char *val,
124 struct kernel_param *kp)
128 ret = param_set_int(val, kp);
129 if (binder_stop_on_user_error < 2)
130 wake_up(&binder_user_error_wait);
133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
136 #define binder_debug(mask, x...) \
138 if (binder_debug_mask & mask) \
142 #define binder_user_error(x...) \
144 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
146 if (binder_stop_on_user_error) \
147 binder_stop_on_user_error = 2; \
150 enum binder_stat_types {
156 BINDER_STAT_TRANSACTION,
157 BINDER_STAT_TRANSACTION_COMPLETE,
161 struct binder_stats {
162 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
163 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
164 int obj_created[BINDER_STAT_COUNT];
165 int obj_deleted[BINDER_STAT_COUNT];
168 static struct binder_stats binder_stats;
170 static inline void binder_stats_deleted(enum binder_stat_types type)
172 binder_stats.obj_deleted[type]++;
175 static inline void binder_stats_created(enum binder_stat_types type)
177 binder_stats.obj_created[type]++;
180 struct binder_transaction_log_entry {
192 struct binder_transaction_log {
195 struct binder_transaction_log_entry entry[32];
197 static struct binder_transaction_log binder_transaction_log;
198 static struct binder_transaction_log binder_transaction_log_failed;
200 static struct binder_transaction_log_entry *binder_transaction_log_add(
201 struct binder_transaction_log *log)
203 struct binder_transaction_log_entry *e;
205 e = &log->entry[log->next];
206 memset(e, 0, sizeof(*e));
208 if (log->next == ARRAY_SIZE(log->entry)) {
216 struct list_head entry;
218 BINDER_WORK_TRANSACTION = 1,
219 BINDER_WORK_TRANSACTION_COMPLETE,
221 BINDER_WORK_DEAD_BINDER,
222 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
223 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
229 struct binder_work work;
231 struct rb_node rb_node;
232 struct hlist_node dead_node;
234 struct binder_proc *proc;
235 struct hlist_head refs;
236 int internal_strong_refs;
238 int local_strong_refs;
239 binder_uintptr_t ptr;
240 binder_uintptr_t cookie;
241 unsigned has_strong_ref:1;
242 unsigned pending_strong_ref:1;
243 unsigned has_weak_ref:1;
244 unsigned pending_weak_ref:1;
245 unsigned has_async_transaction:1;
246 unsigned accept_fds:1;
247 unsigned min_priority:8;
248 struct list_head async_todo;
251 struct binder_ref_death {
252 struct binder_work work;
253 binder_uintptr_t cookie;
257 /* Lookups needed: */
258 /* node + proc => ref (transaction) */
259 /* desc + proc => ref (transaction, inc/dec ref) */
260 /* node => refs + procs (proc exit) */
262 struct rb_node rb_node_desc;
263 struct rb_node rb_node_node;
264 struct hlist_node node_entry;
265 struct binder_proc *proc;
266 struct binder_node *node;
270 struct binder_ref_death *death;
273 struct binder_buffer {
274 struct list_head entry; /* free and allocated entries by address */
275 struct rb_node rb_node; /* free entry by size or allocated entry */
278 unsigned allow_user_free:1;
279 unsigned async_transaction:1;
280 unsigned debug_id:29;
282 struct binder_transaction *transaction;
284 struct binder_node *target_node;
290 enum binder_deferred_state {
291 BINDER_DEFERRED_PUT_FILES = 0x01,
292 BINDER_DEFERRED_FLUSH = 0x02,
293 BINDER_DEFERRED_RELEASE = 0x04,
297 struct hlist_node proc_node;
298 struct rb_root threads;
299 struct rb_root nodes;
300 struct rb_root refs_by_desc;
301 struct rb_root refs_by_node;
303 struct vm_area_struct *vma;
304 struct mm_struct *vma_vm_mm;
305 struct task_struct *tsk;
306 struct files_struct *files;
307 struct hlist_node deferred_work_node;
310 ptrdiff_t user_buffer_offset;
312 struct list_head buffers;
313 struct rb_root free_buffers;
314 struct rb_root allocated_buffers;
315 size_t free_async_space;
319 uint32_t buffer_free;
320 struct list_head todo;
321 wait_queue_head_t wait;
322 struct binder_stats stats;
323 struct list_head delivered_death;
325 int requested_threads;
326 int requested_threads_started;
328 long default_priority;
329 struct dentry *debugfs_entry;
333 BINDER_LOOPER_STATE_REGISTERED = 0x01,
334 BINDER_LOOPER_STATE_ENTERED = 0x02,
335 BINDER_LOOPER_STATE_EXITED = 0x04,
336 BINDER_LOOPER_STATE_INVALID = 0x08,
337 BINDER_LOOPER_STATE_WAITING = 0x10,
338 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
341 struct binder_thread {
342 struct binder_proc *proc;
343 struct rb_node rb_node;
346 struct binder_transaction *transaction_stack;
347 struct list_head todo;
348 uint32_t return_error; /* Write failed, return error code in read buf */
349 uint32_t return_error2; /* Write failed, return error code in read */
350 /* buffer. Used when sending a reply to a dead process that */
351 /* we are also waiting on */
352 wait_queue_head_t wait;
353 struct binder_stats stats;
356 struct binder_transaction {
358 struct binder_work work;
359 struct binder_thread *from;
360 struct binder_transaction *from_parent;
361 struct binder_proc *to_proc;
362 struct binder_thread *to_thread;
363 struct binder_transaction *to_parent;
364 unsigned need_reply:1;
365 /* unsigned is_dead:1; */ /* not used at the moment */
367 struct binder_buffer *buffer;
376 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
378 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
380 struct files_struct *files = proc->files;
381 unsigned long rlim_cur;
387 if (!lock_task_sighand(proc->tsk, &irqs))
390 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
391 unlock_task_sighand(proc->tsk, &irqs);
393 return __alloc_fd(files, 0, rlim_cur, flags);
397 * copied from fd_install
399 static void task_fd_install(
400 struct binder_proc *proc, unsigned int fd, struct file *file)
403 __fd_install(proc->files, fd, file);
407 * copied from sys_close
409 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
413 if (proc->files == NULL)
416 retval = __close_fd(proc->files, fd);
417 /* can't restart close syscall because file table entry was cleared */
418 if (unlikely(retval == -ERESTARTSYS ||
419 retval == -ERESTARTNOINTR ||
420 retval == -ERESTARTNOHAND ||
421 retval == -ERESTART_RESTARTBLOCK))
427 static inline void binder_lock(const char *tag)
429 trace_binder_lock(tag);
430 rt_mutex_lock(&binder_main_lock);
431 trace_binder_locked(tag);
434 static inline void binder_unlock(const char *tag)
436 trace_binder_unlock(tag);
437 rt_mutex_unlock(&binder_main_lock);
440 static void binder_set_nice(long nice)
444 if (can_nice(current, nice)) {
445 set_user_nice(current, nice);
448 min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
449 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
450 "%d: nice value %ld not allowed use %ld instead\n",
451 current->pid, nice, min_nice);
452 set_user_nice(current, min_nice);
455 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
458 static size_t binder_buffer_size(struct binder_proc *proc,
459 struct binder_buffer *buffer)
461 if (list_is_last(&buffer->entry, &proc->buffers))
462 return proc->buffer + proc->buffer_size - (void *)buffer->data;
463 return (size_t)list_entry(buffer->entry.next,
464 struct binder_buffer, entry) - (size_t)buffer->data;
467 static void binder_insert_free_buffer(struct binder_proc *proc,
468 struct binder_buffer *new_buffer)
470 struct rb_node **p = &proc->free_buffers.rb_node;
471 struct rb_node *parent = NULL;
472 struct binder_buffer *buffer;
474 size_t new_buffer_size;
476 BUG_ON(!new_buffer->free);
478 new_buffer_size = binder_buffer_size(proc, new_buffer);
480 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
481 "%d: add free buffer, size %zd, at %p\n",
482 proc->pid, new_buffer_size, new_buffer);
486 buffer = rb_entry(parent, struct binder_buffer, rb_node);
487 BUG_ON(!buffer->free);
489 buffer_size = binder_buffer_size(proc, buffer);
491 if (new_buffer_size < buffer_size)
492 p = &parent->rb_left;
494 p = &parent->rb_right;
496 rb_link_node(&new_buffer->rb_node, parent, p);
497 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
500 static void binder_insert_allocated_buffer(struct binder_proc *proc,
501 struct binder_buffer *new_buffer)
503 struct rb_node **p = &proc->allocated_buffers.rb_node;
504 struct rb_node *parent = NULL;
505 struct binder_buffer *buffer;
507 BUG_ON(new_buffer->free);
511 buffer = rb_entry(parent, struct binder_buffer, rb_node);
512 BUG_ON(buffer->free);
514 if (new_buffer < buffer)
515 p = &parent->rb_left;
516 else if (new_buffer > buffer)
517 p = &parent->rb_right;
521 rb_link_node(&new_buffer->rb_node, parent, p);
522 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
525 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
528 struct rb_node *n = proc->allocated_buffers.rb_node;
529 struct binder_buffer *buffer;
530 struct binder_buffer *kern_ptr;
532 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
533 - offsetof(struct binder_buffer, data));
536 buffer = rb_entry(n, struct binder_buffer, rb_node);
537 BUG_ON(buffer->free);
539 if (kern_ptr < buffer)
541 else if (kern_ptr > buffer)
549 static int binder_update_page_range(struct binder_proc *proc, int allocate,
550 void *start, void *end,
551 struct vm_area_struct *vma)
554 unsigned long user_page_addr;
555 struct vm_struct tmp_area;
557 struct mm_struct *mm;
559 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
560 "%d: %s pages %p-%p\n", proc->pid,
561 allocate ? "allocate" : "free", start, end);
566 trace_binder_update_page_range(proc, allocate, start, end);
571 mm = get_task_mm(proc->tsk);
574 down_write(&mm->mmap_sem);
576 if (vma && mm != proc->vma_vm_mm) {
577 pr_err("%d: vma mm and task mm mismatch\n",
587 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
592 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
594 struct page **page_array_ptr;
596 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
599 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
601 pr_err("%d: binder_alloc_buf failed for page at %p\n",
602 proc->pid, page_addr);
603 goto err_alloc_page_failed;
605 tmp_area.addr = page_addr;
606 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
607 page_array_ptr = page;
608 ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
610 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
611 proc->pid, page_addr);
612 goto err_map_kernel_failed;
615 (uintptr_t)page_addr + proc->user_buffer_offset;
616 ret = vm_insert_page(vma, user_page_addr, page[0]);
618 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
619 proc->pid, user_page_addr);
620 goto err_vm_insert_page_failed;
622 /* vm_insert_page does not seem to increment the refcount */
625 up_write(&mm->mmap_sem);
631 for (page_addr = end - PAGE_SIZE; page_addr >= start;
632 page_addr -= PAGE_SIZE) {
633 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
635 zap_page_range(vma, (uintptr_t)page_addr +
636 proc->user_buffer_offset, PAGE_SIZE, NULL);
637 err_vm_insert_page_failed:
638 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
639 err_map_kernel_failed:
642 err_alloc_page_failed:
647 up_write(&mm->mmap_sem);
653 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
655 size_t offsets_size, int is_async)
657 struct rb_node *n = proc->free_buffers.rb_node;
658 struct binder_buffer *buffer;
660 struct rb_node *best_fit = NULL;
665 if (proc->vma == NULL) {
666 pr_err("%d: binder_alloc_buf, no vma\n",
671 size = ALIGN(data_size, sizeof(void *)) +
672 ALIGN(offsets_size, sizeof(void *));
674 if (size < data_size || size < offsets_size) {
675 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
676 proc->pid, data_size, offsets_size);
681 proc->free_async_space < size + sizeof(struct binder_buffer)) {
682 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
683 "%d: binder_alloc_buf size %zd failed, no async space left\n",
689 buffer = rb_entry(n, struct binder_buffer, rb_node);
690 BUG_ON(!buffer->free);
691 buffer_size = binder_buffer_size(proc, buffer);
693 if (size < buffer_size) {
696 } else if (size > buffer_size)
703 if (best_fit == NULL) {
704 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
709 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
710 buffer_size = binder_buffer_size(proc, buffer);
713 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
714 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
715 proc->pid, size, buffer, buffer_size);
718 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
720 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
721 buffer_size = size; /* no room for other buffers */
723 buffer_size = size + sizeof(struct binder_buffer);
726 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
727 if (end_page_addr > has_page_addr)
728 end_page_addr = has_page_addr;
729 if (binder_update_page_range(proc, 1,
730 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
733 rb_erase(best_fit, &proc->free_buffers);
735 binder_insert_allocated_buffer(proc, buffer);
736 if (buffer_size != size) {
737 struct binder_buffer *new_buffer = (void *)buffer->data + size;
739 list_add(&new_buffer->entry, &buffer->entry);
740 new_buffer->free = 1;
741 binder_insert_free_buffer(proc, new_buffer);
743 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
744 "%d: binder_alloc_buf size %zd got %p\n",
745 proc->pid, size, buffer);
746 buffer->data_size = data_size;
747 buffer->offsets_size = offsets_size;
748 buffer->async_transaction = is_async;
750 proc->free_async_space -= size + sizeof(struct binder_buffer);
751 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
752 "%d: binder_alloc_buf size %zd async free %zd\n",
753 proc->pid, size, proc->free_async_space);
759 static void *buffer_start_page(struct binder_buffer *buffer)
761 return (void *)((uintptr_t)buffer & PAGE_MASK);
764 static void *buffer_end_page(struct binder_buffer *buffer)
766 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
769 static void binder_delete_free_buffer(struct binder_proc *proc,
770 struct binder_buffer *buffer)
772 struct binder_buffer *prev, *next = NULL;
773 int free_page_end = 1;
774 int free_page_start = 1;
776 BUG_ON(proc->buffers.next == &buffer->entry);
777 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
779 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
781 if (buffer_end_page(prev) == buffer_end_page(buffer))
783 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
784 "%d: merge free, buffer %p share page with %p\n",
785 proc->pid, buffer, prev);
788 if (!list_is_last(&buffer->entry, &proc->buffers)) {
789 next = list_entry(buffer->entry.next,
790 struct binder_buffer, entry);
791 if (buffer_start_page(next) == buffer_end_page(buffer)) {
793 if (buffer_start_page(next) ==
794 buffer_start_page(buffer))
796 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
797 "%d: merge free, buffer %p share page with %p\n",
798 proc->pid, buffer, prev);
801 list_del(&buffer->entry);
802 if (free_page_start || free_page_end) {
803 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
804 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
805 proc->pid, buffer, free_page_start ? "" : " end",
806 free_page_end ? "" : " start", prev, next);
807 binder_update_page_range(proc, 0, free_page_start ?
808 buffer_start_page(buffer) : buffer_end_page(buffer),
809 (free_page_end ? buffer_end_page(buffer) :
810 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
814 static void binder_free_buf(struct binder_proc *proc,
815 struct binder_buffer *buffer)
817 size_t size, buffer_size;
819 buffer_size = binder_buffer_size(proc, buffer);
821 size = ALIGN(buffer->data_size, sizeof(void *)) +
822 ALIGN(buffer->offsets_size, sizeof(void *));
824 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
825 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
826 proc->pid, buffer, size, buffer_size);
828 BUG_ON(buffer->free);
829 BUG_ON(size > buffer_size);
830 BUG_ON(buffer->transaction != NULL);
831 BUG_ON((void *)buffer < proc->buffer);
832 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
834 if (buffer->async_transaction) {
835 proc->free_async_space += size + sizeof(struct binder_buffer);
837 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
838 "%d: binder_free_buf size %zd async free %zd\n",
839 proc->pid, size, proc->free_async_space);
842 binder_update_page_range(proc, 0,
843 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
844 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
846 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
848 if (!list_is_last(&buffer->entry, &proc->buffers)) {
849 struct binder_buffer *next = list_entry(buffer->entry.next,
850 struct binder_buffer, entry);
853 rb_erase(&next->rb_node, &proc->free_buffers);
854 binder_delete_free_buffer(proc, next);
857 if (proc->buffers.next != &buffer->entry) {
858 struct binder_buffer *prev = list_entry(buffer->entry.prev,
859 struct binder_buffer, entry);
862 binder_delete_free_buffer(proc, buffer);
863 rb_erase(&prev->rb_node, &proc->free_buffers);
867 binder_insert_free_buffer(proc, buffer);
870 static struct binder_node *binder_get_node(struct binder_proc *proc,
871 binder_uintptr_t ptr)
873 struct rb_node *n = proc->nodes.rb_node;
874 struct binder_node *node;
877 node = rb_entry(n, struct binder_node, rb_node);
881 else if (ptr > node->ptr)
889 static struct binder_node *binder_new_node(struct binder_proc *proc,
890 binder_uintptr_t ptr,
891 binder_uintptr_t cookie)
893 struct rb_node **p = &proc->nodes.rb_node;
894 struct rb_node *parent = NULL;
895 struct binder_node *node;
899 node = rb_entry(parent, struct binder_node, rb_node);
903 else if (ptr > node->ptr)
909 node = kzalloc(sizeof(*node), GFP_KERNEL);
912 binder_stats_created(BINDER_STAT_NODE);
913 rb_link_node(&node->rb_node, parent, p);
914 rb_insert_color(&node->rb_node, &proc->nodes);
915 node->debug_id = ++binder_last_id;
918 node->cookie = cookie;
919 node->work.type = BINDER_WORK_NODE;
920 INIT_LIST_HEAD(&node->work.entry);
921 INIT_LIST_HEAD(&node->async_todo);
922 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
923 "%d:%d node %d u%016llx c%016llx created\n",
924 proc->pid, current->pid, node->debug_id,
925 (u64)node->ptr, (u64)node->cookie);
929 static int binder_inc_node(struct binder_node *node, int strong, int internal,
930 struct list_head *target_list)
934 if (target_list == NULL &&
935 node->internal_strong_refs == 0 &&
936 !(node == binder_context_mgr_node &&
937 node->has_strong_ref)) {
938 pr_err("invalid inc strong node for %d\n",
942 node->internal_strong_refs++;
944 node->local_strong_refs++;
945 if (!node->has_strong_ref && target_list) {
946 list_del_init(&node->work.entry);
947 list_add_tail(&node->work.entry, target_list);
951 node->local_weak_refs++;
952 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
953 if (target_list == NULL) {
954 pr_err("invalid inc weak node for %d\n",
958 list_add_tail(&node->work.entry, target_list);
964 static int binder_dec_node(struct binder_node *node, int strong, int internal)
968 node->internal_strong_refs--;
970 node->local_strong_refs--;
971 if (node->local_strong_refs || node->internal_strong_refs)
975 node->local_weak_refs--;
976 if (node->local_weak_refs || !hlist_empty(&node->refs))
979 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
980 if (list_empty(&node->work.entry)) {
981 list_add_tail(&node->work.entry, &node->proc->todo);
982 wake_up_interruptible(&node->proc->wait);
985 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
986 !node->local_weak_refs) {
987 list_del_init(&node->work.entry);
989 rb_erase(&node->rb_node, &node->proc->nodes);
990 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
991 "refless node %d deleted\n",
994 hlist_del(&node->dead_node);
995 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
996 "dead node %d deleted\n",
1000 binder_stats_deleted(BINDER_STAT_NODE);
1008 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1011 struct rb_node *n = proc->refs_by_desc.rb_node;
1012 struct binder_ref *ref;
1015 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1017 if (desc < ref->desc)
1019 else if (desc > ref->desc)
1027 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1028 struct binder_node *node)
1031 struct rb_node **p = &proc->refs_by_node.rb_node;
1032 struct rb_node *parent = NULL;
1033 struct binder_ref *ref, *new_ref;
1037 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1039 if (node < ref->node)
1041 else if (node > ref->node)
1042 p = &(*p)->rb_right;
1046 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1047 if (new_ref == NULL)
1049 binder_stats_created(BINDER_STAT_REF);
1050 new_ref->debug_id = ++binder_last_id;
1051 new_ref->proc = proc;
1052 new_ref->node = node;
1053 rb_link_node(&new_ref->rb_node_node, parent, p);
1054 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1056 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1057 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1058 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1059 if (ref->desc > new_ref->desc)
1061 new_ref->desc = ref->desc + 1;
1064 p = &proc->refs_by_desc.rb_node;
1067 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1069 if (new_ref->desc < ref->desc)
1071 else if (new_ref->desc > ref->desc)
1072 p = &(*p)->rb_right;
1076 rb_link_node(&new_ref->rb_node_desc, parent, p);
1077 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1079 hlist_add_head(&new_ref->node_entry, &node->refs);
1081 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1082 "%d new ref %d desc %d for node %d\n",
1083 proc->pid, new_ref->debug_id, new_ref->desc,
1086 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1087 "%d new ref %d desc %d for dead node\n",
1088 proc->pid, new_ref->debug_id, new_ref->desc);
1093 static void binder_delete_ref(struct binder_ref *ref)
1095 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1096 "%d delete ref %d desc %d for node %d\n",
1097 ref->proc->pid, ref->debug_id, ref->desc,
1098 ref->node->debug_id);
1100 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1101 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1103 binder_dec_node(ref->node, 1, 1);
1104 hlist_del(&ref->node_entry);
1105 binder_dec_node(ref->node, 0, 1);
1107 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1108 "%d delete ref %d desc %d has death notification\n",
1109 ref->proc->pid, ref->debug_id, ref->desc);
1110 list_del(&ref->death->work.entry);
1112 binder_stats_deleted(BINDER_STAT_DEATH);
1115 binder_stats_deleted(BINDER_STAT_REF);
1118 static int binder_inc_ref(struct binder_ref *ref, int strong,
1119 struct list_head *target_list)
1124 if (ref->strong == 0) {
1125 ret = binder_inc_node(ref->node, 1, 1, target_list);
1131 if (ref->weak == 0) {
1132 ret = binder_inc_node(ref->node, 0, 1, target_list);
1142 static int binder_dec_ref(struct binder_ref *ref, int strong)
1145 if (ref->strong == 0) {
1146 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1147 ref->proc->pid, ref->debug_id,
1148 ref->desc, ref->strong, ref->weak);
1152 if (ref->strong == 0) {
1155 ret = binder_dec_node(ref->node, strong, 1);
1160 if (ref->weak == 0) {
1161 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1162 ref->proc->pid, ref->debug_id,
1163 ref->desc, ref->strong, ref->weak);
1168 if (ref->strong == 0 && ref->weak == 0)
1169 binder_delete_ref(ref);
1173 static void binder_pop_transaction(struct binder_thread *target_thread,
1174 struct binder_transaction *t)
1176 if (target_thread) {
1177 BUG_ON(target_thread->transaction_stack != t);
1178 BUG_ON(target_thread->transaction_stack->from != target_thread);
1179 target_thread->transaction_stack =
1180 target_thread->transaction_stack->from_parent;
1185 t->buffer->transaction = NULL;
1187 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1190 static void binder_send_failed_reply(struct binder_transaction *t,
1191 uint32_t error_code)
1193 struct binder_thread *target_thread;
1194 struct binder_transaction *next;
1196 BUG_ON(t->flags & TF_ONE_WAY);
1198 target_thread = t->from;
1199 if (target_thread) {
1200 if (target_thread->return_error != BR_OK &&
1201 target_thread->return_error2 == BR_OK) {
1202 target_thread->return_error2 =
1203 target_thread->return_error;
1204 target_thread->return_error = BR_OK;
1206 if (target_thread->return_error == BR_OK) {
1207 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1208 "send failed reply for transaction %d to %d:%d\n",
1210 target_thread->proc->pid,
1211 target_thread->pid);
1213 binder_pop_transaction(target_thread, t);
1214 target_thread->return_error = error_code;
1215 wake_up_interruptible(&target_thread->wait);
1217 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1218 target_thread->proc->pid,
1220 target_thread->return_error);
1224 next = t->from_parent;
1226 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1227 "send failed reply for transaction %d, target dead\n",
1230 binder_pop_transaction(target_thread, t);
1232 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1233 "reply failed, no target thread at root\n");
1237 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1238 "reply failed, no target thread -- retry %d\n",
1243 static void binder_transaction_buffer_release(struct binder_proc *proc,
1244 struct binder_buffer *buffer,
1245 binder_size_t *failed_at)
1247 binder_size_t *offp, *off_end;
1248 int debug_id = buffer->debug_id;
1250 binder_debug(BINDER_DEBUG_TRANSACTION,
1251 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1252 proc->pid, buffer->debug_id,
1253 buffer->data_size, buffer->offsets_size, failed_at);
1255 if (buffer->target_node)
1256 binder_dec_node(buffer->target_node, 1, 0);
1258 offp = (binder_size_t *)(buffer->data +
1259 ALIGN(buffer->data_size, sizeof(void *)));
1261 off_end = failed_at;
1263 off_end = (void *)offp + buffer->offsets_size;
1264 for (; offp < off_end; offp++) {
1265 struct flat_binder_object *fp;
1267 if (*offp > buffer->data_size - sizeof(*fp) ||
1268 buffer->data_size < sizeof(*fp) ||
1269 !IS_ALIGNED(*offp, sizeof(u32))) {
1270 pr_err("transaction release %d bad offset %lld, size %zd\n",
1271 debug_id, (u64)*offp, buffer->data_size);
1274 fp = (struct flat_binder_object *)(buffer->data + *offp);
1276 case BINDER_TYPE_BINDER:
1277 case BINDER_TYPE_WEAK_BINDER: {
1278 struct binder_node *node = binder_get_node(proc, fp->binder);
1281 pr_err("transaction release %d bad node %016llx\n",
1282 debug_id, (u64)fp->binder);
1285 binder_debug(BINDER_DEBUG_TRANSACTION,
1286 " node %d u%016llx\n",
1287 node->debug_id, (u64)node->ptr);
1288 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1290 case BINDER_TYPE_HANDLE:
1291 case BINDER_TYPE_WEAK_HANDLE: {
1292 struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1295 pr_err("transaction release %d bad handle %d\n",
1296 debug_id, fp->handle);
1299 binder_debug(BINDER_DEBUG_TRANSACTION,
1300 " ref %d desc %d (node %d)\n",
1301 ref->debug_id, ref->desc, ref->node->debug_id);
1302 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1305 case BINDER_TYPE_FD:
1306 binder_debug(BINDER_DEBUG_TRANSACTION,
1307 " fd %d\n", fp->handle);
1309 task_close_fd(proc, fp->handle);
1313 pr_err("transaction release %d bad object type %x\n",
1314 debug_id, fp->type);
1320 static void binder_transaction(struct binder_proc *proc,
1321 struct binder_thread *thread,
1322 struct binder_transaction_data *tr, int reply)
1324 struct binder_transaction *t;
1325 struct binder_work *tcomplete;
1326 binder_size_t *offp, *off_end;
1327 binder_size_t off_min;
1328 struct binder_proc *target_proc;
1329 struct binder_thread *target_thread = NULL;
1330 struct binder_node *target_node = NULL;
1331 struct list_head *target_list;
1332 wait_queue_head_t *target_wait;
1333 struct binder_transaction *in_reply_to = NULL;
1334 struct binder_transaction_log_entry *e;
1335 uint32_t return_error;
1337 e = binder_transaction_log_add(&binder_transaction_log);
1338 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1339 e->from_proc = proc->pid;
1340 e->from_thread = thread->pid;
1341 e->target_handle = tr->target.handle;
1342 e->data_size = tr->data_size;
1343 e->offsets_size = tr->offsets_size;
1346 in_reply_to = thread->transaction_stack;
1347 if (in_reply_to == NULL) {
1348 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1349 proc->pid, thread->pid);
1350 return_error = BR_FAILED_REPLY;
1351 goto err_empty_call_stack;
1353 binder_set_nice(in_reply_to->saved_priority);
1354 if (in_reply_to->to_thread != thread) {
1355 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1356 proc->pid, thread->pid, in_reply_to->debug_id,
1357 in_reply_to->to_proc ?
1358 in_reply_to->to_proc->pid : 0,
1359 in_reply_to->to_thread ?
1360 in_reply_to->to_thread->pid : 0);
1361 return_error = BR_FAILED_REPLY;
1363 goto err_bad_call_stack;
1365 thread->transaction_stack = in_reply_to->to_parent;
1366 target_thread = in_reply_to->from;
1367 if (target_thread == NULL) {
1368 return_error = BR_DEAD_REPLY;
1369 goto err_dead_binder;
1371 if (target_thread->transaction_stack != in_reply_to) {
1372 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1373 proc->pid, thread->pid,
1374 target_thread->transaction_stack ?
1375 target_thread->transaction_stack->debug_id : 0,
1376 in_reply_to->debug_id);
1377 return_error = BR_FAILED_REPLY;
1379 target_thread = NULL;
1380 goto err_dead_binder;
1382 target_proc = target_thread->proc;
1384 if (tr->target.handle) {
1385 struct binder_ref *ref;
1387 ref = binder_get_ref(proc, tr->target.handle);
1389 binder_user_error("%d:%d got transaction to invalid handle\n",
1390 proc->pid, thread->pid);
1391 return_error = BR_FAILED_REPLY;
1392 goto err_invalid_target_handle;
1394 target_node = ref->node;
1396 target_node = binder_context_mgr_node;
1397 if (target_node == NULL) {
1398 return_error = BR_DEAD_REPLY;
1399 goto err_no_context_mgr_node;
1402 e->to_node = target_node->debug_id;
1403 target_proc = target_node->proc;
1404 if (target_proc == NULL) {
1405 return_error = BR_DEAD_REPLY;
1406 goto err_dead_binder;
1408 if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
1409 return_error = BR_FAILED_REPLY;
1410 goto err_invalid_target_handle;
1412 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1413 struct binder_transaction *tmp;
1415 tmp = thread->transaction_stack;
1416 if (tmp->to_thread != thread) {
1417 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1418 proc->pid, thread->pid, tmp->debug_id,
1419 tmp->to_proc ? tmp->to_proc->pid : 0,
1421 tmp->to_thread->pid : 0);
1422 return_error = BR_FAILED_REPLY;
1423 goto err_bad_call_stack;
1426 if (tmp->from && tmp->from->proc == target_proc)
1427 target_thread = tmp->from;
1428 tmp = tmp->from_parent;
1432 if (target_thread) {
1433 e->to_thread = target_thread->pid;
1434 target_list = &target_thread->todo;
1435 target_wait = &target_thread->wait;
1437 target_list = &target_proc->todo;
1438 target_wait = &target_proc->wait;
1440 e->to_proc = target_proc->pid;
1442 /* TODO: reuse incoming transaction for reply */
1443 t = kzalloc(sizeof(*t), GFP_KERNEL);
1445 return_error = BR_FAILED_REPLY;
1446 goto err_alloc_t_failed;
1448 binder_stats_created(BINDER_STAT_TRANSACTION);
1450 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1451 if (tcomplete == NULL) {
1452 return_error = BR_FAILED_REPLY;
1453 goto err_alloc_tcomplete_failed;
1455 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1457 t->debug_id = ++binder_last_id;
1458 e->debug_id = t->debug_id;
1461 binder_debug(BINDER_DEBUG_TRANSACTION,
1462 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
1463 proc->pid, thread->pid, t->debug_id,
1464 target_proc->pid, target_thread->pid,
1465 (u64)tr->data.ptr.buffer,
1466 (u64)tr->data.ptr.offsets,
1467 (u64)tr->data_size, (u64)tr->offsets_size);
1469 binder_debug(BINDER_DEBUG_TRANSACTION,
1470 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
1471 proc->pid, thread->pid, t->debug_id,
1472 target_proc->pid, target_node->debug_id,
1473 (u64)tr->data.ptr.buffer,
1474 (u64)tr->data.ptr.offsets,
1475 (u64)tr->data_size, (u64)tr->offsets_size);
1477 if (!reply && !(tr->flags & TF_ONE_WAY))
1481 t->sender_euid = task_euid(proc->tsk);
1482 t->to_proc = target_proc;
1483 t->to_thread = target_thread;
1485 t->flags = tr->flags;
1486 t->priority = task_nice(current);
1488 trace_binder_transaction(reply, t, target_node);
1490 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1491 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1492 if (t->buffer == NULL) {
1493 return_error = BR_FAILED_REPLY;
1494 goto err_binder_alloc_buf_failed;
1496 t->buffer->allow_user_free = 0;
1497 t->buffer->debug_id = t->debug_id;
1498 t->buffer->transaction = t;
1499 t->buffer->target_node = target_node;
1500 trace_binder_transaction_alloc_buf(t->buffer);
1502 binder_inc_node(target_node, 1, 0, NULL);
1504 offp = (binder_size_t *)(t->buffer->data +
1505 ALIGN(tr->data_size, sizeof(void *)));
1507 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1508 tr->data.ptr.buffer, tr->data_size)) {
1509 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1510 proc->pid, thread->pid);
1511 return_error = BR_FAILED_REPLY;
1512 goto err_copy_data_failed;
1514 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1515 tr->data.ptr.offsets, tr->offsets_size)) {
1516 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1517 proc->pid, thread->pid);
1518 return_error = BR_FAILED_REPLY;
1519 goto err_copy_data_failed;
1521 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1522 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1523 proc->pid, thread->pid, (u64)tr->offsets_size);
1524 return_error = BR_FAILED_REPLY;
1525 goto err_bad_offset;
1527 off_end = (void *)offp + tr->offsets_size;
1529 for (; offp < off_end; offp++) {
1530 struct flat_binder_object *fp;
1532 if (*offp > t->buffer->data_size - sizeof(*fp) ||
1534 t->buffer->data_size < sizeof(*fp) ||
1535 !IS_ALIGNED(*offp, sizeof(u32))) {
1536 binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
1537 proc->pid, thread->pid, (u64)*offp,
1539 (u64)(t->buffer->data_size -
1541 return_error = BR_FAILED_REPLY;
1542 goto err_bad_offset;
1544 fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1545 off_min = *offp + sizeof(struct flat_binder_object);
1547 case BINDER_TYPE_BINDER:
1548 case BINDER_TYPE_WEAK_BINDER: {
1549 struct binder_ref *ref;
1550 struct binder_node *node = binder_get_node(proc, fp->binder);
1553 node = binder_new_node(proc, fp->binder, fp->cookie);
1555 return_error = BR_FAILED_REPLY;
1556 goto err_binder_new_node_failed;
1558 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1559 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1561 if (fp->cookie != node->cookie) {
1562 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1563 proc->pid, thread->pid,
1564 (u64)fp->binder, node->debug_id,
1565 (u64)fp->cookie, (u64)node->cookie);
1566 return_error = BR_FAILED_REPLY;
1567 goto err_binder_get_ref_for_node_failed;
1569 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
1570 return_error = BR_FAILED_REPLY;
1571 goto err_binder_get_ref_for_node_failed;
1573 ref = binder_get_ref_for_node(target_proc, node);
1575 return_error = BR_FAILED_REPLY;
1576 goto err_binder_get_ref_for_node_failed;
1578 if (fp->type == BINDER_TYPE_BINDER)
1579 fp->type = BINDER_TYPE_HANDLE;
1581 fp->type = BINDER_TYPE_WEAK_HANDLE;
1582 fp->handle = ref->desc;
1583 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1586 trace_binder_transaction_node_to_ref(t, node, ref);
1587 binder_debug(BINDER_DEBUG_TRANSACTION,
1588 " node %d u%016llx -> ref %d desc %d\n",
1589 node->debug_id, (u64)node->ptr,
1590 ref->debug_id, ref->desc);
1592 case BINDER_TYPE_HANDLE:
1593 case BINDER_TYPE_WEAK_HANDLE: {
1594 struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1597 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1599 thread->pid, fp->handle);
1600 return_error = BR_FAILED_REPLY;
1601 goto err_binder_get_ref_failed;
1603 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
1604 return_error = BR_FAILED_REPLY;
1605 goto err_binder_get_ref_failed;
1607 if (ref->node->proc == target_proc) {
1608 if (fp->type == BINDER_TYPE_HANDLE)
1609 fp->type = BINDER_TYPE_BINDER;
1611 fp->type = BINDER_TYPE_WEAK_BINDER;
1612 fp->binder = ref->node->ptr;
1613 fp->cookie = ref->node->cookie;
1614 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1615 trace_binder_transaction_ref_to_node(t, ref);
1616 binder_debug(BINDER_DEBUG_TRANSACTION,
1617 " ref %d desc %d -> node %d u%016llx\n",
1618 ref->debug_id, ref->desc, ref->node->debug_id,
1619 (u64)ref->node->ptr);
1621 struct binder_ref *new_ref;
1623 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1624 if (new_ref == NULL) {
1625 return_error = BR_FAILED_REPLY;
1626 goto err_binder_get_ref_for_node_failed;
1628 fp->handle = new_ref->desc;
1629 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1630 trace_binder_transaction_ref_to_ref(t, ref,
1632 binder_debug(BINDER_DEBUG_TRANSACTION,
1633 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1634 ref->debug_id, ref->desc, new_ref->debug_id,
1635 new_ref->desc, ref->node->debug_id);
1639 case BINDER_TYPE_FD: {
1644 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1645 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
1646 proc->pid, thread->pid, fp->handle);
1647 return_error = BR_FAILED_REPLY;
1648 goto err_fd_not_allowed;
1650 } else if (!target_node->accept_fds) {
1651 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
1652 proc->pid, thread->pid, fp->handle);
1653 return_error = BR_FAILED_REPLY;
1654 goto err_fd_not_allowed;
1657 file = fget(fp->handle);
1659 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1660 proc->pid, thread->pid, fp->handle);
1661 return_error = BR_FAILED_REPLY;
1662 goto err_fget_failed;
1664 if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) {
1666 return_error = BR_FAILED_REPLY;
1667 goto err_get_unused_fd_failed;
1669 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1670 if (target_fd < 0) {
1672 return_error = BR_FAILED_REPLY;
1673 goto err_get_unused_fd_failed;
1675 task_fd_install(target_proc, target_fd, file);
1676 trace_binder_transaction_fd(t, fp->handle, target_fd);
1677 binder_debug(BINDER_DEBUG_TRANSACTION,
1678 " fd %d -> %d\n", fp->handle, target_fd);
1680 fp->handle = target_fd;
1684 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1685 proc->pid, thread->pid, fp->type);
1686 return_error = BR_FAILED_REPLY;
1687 goto err_bad_object_type;
1691 BUG_ON(t->buffer->async_transaction != 0);
1692 binder_pop_transaction(target_thread, in_reply_to);
1693 } else if (!(t->flags & TF_ONE_WAY)) {
1694 BUG_ON(t->buffer->async_transaction != 0);
1696 t->from_parent = thread->transaction_stack;
1697 thread->transaction_stack = t;
1699 BUG_ON(target_node == NULL);
1700 BUG_ON(t->buffer->async_transaction != 1);
1701 if (target_node->has_async_transaction) {
1702 target_list = &target_node->async_todo;
1705 target_node->has_async_transaction = 1;
1707 t->work.type = BINDER_WORK_TRANSACTION;
1708 list_add_tail(&t->work.entry, target_list);
1709 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1710 list_add_tail(&tcomplete->entry, &thread->todo);
1712 wake_up_interruptible(target_wait);
1715 err_get_unused_fd_failed:
1718 err_binder_get_ref_for_node_failed:
1719 err_binder_get_ref_failed:
1720 err_binder_new_node_failed:
1721 err_bad_object_type:
1723 err_copy_data_failed:
1724 trace_binder_transaction_failed_buffer_release(t->buffer);
1725 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1726 t->buffer->transaction = NULL;
1727 binder_free_buf(target_proc, t->buffer);
1728 err_binder_alloc_buf_failed:
1730 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1731 err_alloc_tcomplete_failed:
1733 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1736 err_empty_call_stack:
1738 err_invalid_target_handle:
1739 err_no_context_mgr_node:
1740 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1741 "%d:%d transaction failed %d, size %lld-%lld\n",
1742 proc->pid, thread->pid, return_error,
1743 (u64)tr->data_size, (u64)tr->offsets_size);
1746 struct binder_transaction_log_entry *fe;
1748 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1752 BUG_ON(thread->return_error != BR_OK);
1754 thread->return_error = BR_TRANSACTION_COMPLETE;
1755 binder_send_failed_reply(in_reply_to, return_error);
1757 thread->return_error = return_error;
1760 static int binder_thread_write(struct binder_proc *proc,
1761 struct binder_thread *thread,
1762 binder_uintptr_t binder_buffer, size_t size,
1763 binder_size_t *consumed)
1766 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
1767 void __user *ptr = buffer + *consumed;
1768 void __user *end = buffer + size;
1770 while (ptr < end && thread->return_error == BR_OK) {
1771 if (get_user(cmd, (uint32_t __user *)ptr))
1773 ptr += sizeof(uint32_t);
1774 trace_binder_command(cmd);
1775 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1776 binder_stats.bc[_IOC_NR(cmd)]++;
1777 proc->stats.bc[_IOC_NR(cmd)]++;
1778 thread->stats.bc[_IOC_NR(cmd)]++;
1786 struct binder_ref *ref;
1787 const char *debug_string;
1789 if (get_user(target, (uint32_t __user *)ptr))
1791 ptr += sizeof(uint32_t);
1792 if (target == 0 && binder_context_mgr_node &&
1793 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1794 ref = binder_get_ref_for_node(proc,
1795 binder_context_mgr_node);
1796 if (ref->desc != target) {
1797 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1798 proc->pid, thread->pid,
1802 ref = binder_get_ref(proc, target);
1804 binder_user_error("%d:%d refcount change on invalid ref %d\n",
1805 proc->pid, thread->pid, target);
1810 debug_string = "IncRefs";
1811 binder_inc_ref(ref, 0, NULL);
1814 debug_string = "Acquire";
1815 binder_inc_ref(ref, 1, NULL);
1818 debug_string = "Release";
1819 binder_dec_ref(ref, 1);
1823 debug_string = "DecRefs";
1824 binder_dec_ref(ref, 0);
1827 binder_debug(BINDER_DEBUG_USER_REFS,
1828 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
1829 proc->pid, thread->pid, debug_string, ref->debug_id,
1830 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1833 case BC_INCREFS_DONE:
1834 case BC_ACQUIRE_DONE: {
1835 binder_uintptr_t node_ptr;
1836 binder_uintptr_t cookie;
1837 struct binder_node *node;
1839 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
1841 ptr += sizeof(binder_uintptr_t);
1842 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1844 ptr += sizeof(binder_uintptr_t);
1845 node = binder_get_node(proc, node_ptr);
1847 binder_user_error("%d:%d %s u%016llx no match\n",
1848 proc->pid, thread->pid,
1849 cmd == BC_INCREFS_DONE ?
1855 if (cookie != node->cookie) {
1856 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
1857 proc->pid, thread->pid,
1858 cmd == BC_INCREFS_DONE ?
1859 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1860 (u64)node_ptr, node->debug_id,
1861 (u64)cookie, (u64)node->cookie);
1864 if (cmd == BC_ACQUIRE_DONE) {
1865 if (node->pending_strong_ref == 0) {
1866 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
1867 proc->pid, thread->pid,
1871 node->pending_strong_ref = 0;
1873 if (node->pending_weak_ref == 0) {
1874 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
1875 proc->pid, thread->pid,
1879 node->pending_weak_ref = 0;
1881 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1882 binder_debug(BINDER_DEBUG_USER_REFS,
1883 "%d:%d %s node %d ls %d lw %d\n",
1884 proc->pid, thread->pid,
1885 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1886 node->debug_id, node->local_strong_refs, node->local_weak_refs);
1889 case BC_ATTEMPT_ACQUIRE:
1890 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
1892 case BC_ACQUIRE_RESULT:
1893 pr_err("BC_ACQUIRE_RESULT not supported\n");
1896 case BC_FREE_BUFFER: {
1897 binder_uintptr_t data_ptr;
1898 struct binder_buffer *buffer;
1900 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
1902 ptr += sizeof(binder_uintptr_t);
1904 buffer = binder_buffer_lookup(proc, data_ptr);
1905 if (buffer == NULL) {
1906 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
1907 proc->pid, thread->pid, (u64)data_ptr);
1910 if (!buffer->allow_user_free) {
1911 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
1912 proc->pid, thread->pid, (u64)data_ptr);
1915 binder_debug(BINDER_DEBUG_FREE_BUFFER,
1916 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
1917 proc->pid, thread->pid, (u64)data_ptr,
1919 buffer->transaction ? "active" : "finished");
1921 if (buffer->transaction) {
1922 buffer->transaction->buffer = NULL;
1923 buffer->transaction = NULL;
1925 if (buffer->async_transaction && buffer->target_node) {
1926 BUG_ON(!buffer->target_node->has_async_transaction);
1927 if (list_empty(&buffer->target_node->async_todo))
1928 buffer->target_node->has_async_transaction = 0;
1930 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1932 trace_binder_transaction_buffer_release(buffer);
1933 binder_transaction_buffer_release(proc, buffer, NULL);
1934 binder_free_buf(proc, buffer);
1938 case BC_TRANSACTION:
1940 struct binder_transaction_data tr;
1942 if (copy_from_user(&tr, ptr, sizeof(tr)))
1945 binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
1949 case BC_REGISTER_LOOPER:
1950 binder_debug(BINDER_DEBUG_THREADS,
1951 "%d:%d BC_REGISTER_LOOPER\n",
1952 proc->pid, thread->pid);
1953 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
1954 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1955 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
1956 proc->pid, thread->pid);
1957 } else if (proc->requested_threads == 0) {
1958 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1959 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
1960 proc->pid, thread->pid);
1962 proc->requested_threads--;
1963 proc->requested_threads_started++;
1965 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
1967 case BC_ENTER_LOOPER:
1968 binder_debug(BINDER_DEBUG_THREADS,
1969 "%d:%d BC_ENTER_LOOPER\n",
1970 proc->pid, thread->pid);
1971 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
1972 thread->looper |= BINDER_LOOPER_STATE_INVALID;
1973 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
1974 proc->pid, thread->pid);
1976 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
1978 case BC_EXIT_LOOPER:
1979 binder_debug(BINDER_DEBUG_THREADS,
1980 "%d:%d BC_EXIT_LOOPER\n",
1981 proc->pid, thread->pid);
1982 thread->looper |= BINDER_LOOPER_STATE_EXITED;
1985 case BC_REQUEST_DEATH_NOTIFICATION:
1986 case BC_CLEAR_DEATH_NOTIFICATION: {
1988 binder_uintptr_t cookie;
1989 struct binder_ref *ref;
1990 struct binder_ref_death *death;
1992 if (get_user(target, (uint32_t __user *)ptr))
1994 ptr += sizeof(uint32_t);
1995 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1997 ptr += sizeof(binder_uintptr_t);
1998 ref = binder_get_ref(proc, target);
2000 binder_user_error("%d:%d %s invalid ref %d\n",
2001 proc->pid, thread->pid,
2002 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2003 "BC_REQUEST_DEATH_NOTIFICATION" :
2004 "BC_CLEAR_DEATH_NOTIFICATION",
2009 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2010 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2011 proc->pid, thread->pid,
2012 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2013 "BC_REQUEST_DEATH_NOTIFICATION" :
2014 "BC_CLEAR_DEATH_NOTIFICATION",
2015 (u64)cookie, ref->debug_id, ref->desc,
2016 ref->strong, ref->weak, ref->node->debug_id);
2018 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2020 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2021 proc->pid, thread->pid);
2024 death = kzalloc(sizeof(*death), GFP_KERNEL);
2025 if (death == NULL) {
2026 thread->return_error = BR_ERROR;
2027 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2028 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2029 proc->pid, thread->pid);
2032 binder_stats_created(BINDER_STAT_DEATH);
2033 INIT_LIST_HEAD(&death->work.entry);
2034 death->cookie = cookie;
2036 if (ref->node->proc == NULL) {
2037 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2038 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2039 list_add_tail(&ref->death->work.entry, &thread->todo);
2041 list_add_tail(&ref->death->work.entry, &proc->todo);
2042 wake_up_interruptible(&proc->wait);
2046 if (ref->death == NULL) {
2047 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2048 proc->pid, thread->pid);
2052 if (death->cookie != cookie) {
2053 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2054 proc->pid, thread->pid,
2060 if (list_empty(&death->work.entry)) {
2061 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2062 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2063 list_add_tail(&death->work.entry, &thread->todo);
2065 list_add_tail(&death->work.entry, &proc->todo);
2066 wake_up_interruptible(&proc->wait);
2069 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2070 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2074 case BC_DEAD_BINDER_DONE: {
2075 struct binder_work *w;
2076 binder_uintptr_t cookie;
2077 struct binder_ref_death *death = NULL;
2079 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2082 ptr += sizeof(void *);
2083 list_for_each_entry(w, &proc->delivered_death, entry) {
2084 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2086 if (tmp_death->cookie == cookie) {
2091 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2092 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2093 proc->pid, thread->pid, (u64)cookie,
2095 if (death == NULL) {
2096 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2097 proc->pid, thread->pid, (u64)cookie);
2101 list_del_init(&death->work.entry);
2102 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2103 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2104 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2105 list_add_tail(&death->work.entry, &thread->todo);
2107 list_add_tail(&death->work.entry, &proc->todo);
2108 wake_up_interruptible(&proc->wait);
2114 pr_err("%d:%d unknown command %d\n",
2115 proc->pid, thread->pid, cmd);
2118 *consumed = ptr - buffer;
2123 static void binder_stat_br(struct binder_proc *proc,
2124 struct binder_thread *thread, uint32_t cmd)
2126 trace_binder_return(cmd);
2127 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2128 binder_stats.br[_IOC_NR(cmd)]++;
2129 proc->stats.br[_IOC_NR(cmd)]++;
2130 thread->stats.br[_IOC_NR(cmd)]++;
2134 static int binder_has_proc_work(struct binder_proc *proc,
2135 struct binder_thread *thread)
2137 return !list_empty(&proc->todo) ||
2138 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2141 static int binder_has_thread_work(struct binder_thread *thread)
2143 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2144 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2147 static int binder_thread_read(struct binder_proc *proc,
2148 struct binder_thread *thread,
2149 binder_uintptr_t binder_buffer, size_t size,
2150 binder_size_t *consumed, int non_block)
2152 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2153 void __user *ptr = buffer + *consumed;
2154 void __user *end = buffer + size;
2157 int wait_for_proc_work;
2159 if (*consumed == 0) {
2160 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2162 ptr += sizeof(uint32_t);
2166 wait_for_proc_work = thread->transaction_stack == NULL &&
2167 list_empty(&thread->todo);
2169 if (thread->return_error != BR_OK && ptr < end) {
2170 if (thread->return_error2 != BR_OK) {
2171 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2173 ptr += sizeof(uint32_t);
2174 binder_stat_br(proc, thread, thread->return_error2);
2177 thread->return_error2 = BR_OK;
2179 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2181 ptr += sizeof(uint32_t);
2182 binder_stat_br(proc, thread, thread->return_error);
2183 thread->return_error = BR_OK;
2188 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2189 if (wait_for_proc_work)
2190 proc->ready_threads++;
2192 binder_unlock(__func__);
2194 trace_binder_wait_for_work(wait_for_proc_work,
2195 !!thread->transaction_stack,
2196 !list_empty(&thread->todo));
2197 if (wait_for_proc_work) {
2198 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2199 BINDER_LOOPER_STATE_ENTERED))) {
2200 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2201 proc->pid, thread->pid, thread->looper);
2202 wait_event_interruptible(binder_user_error_wait,
2203 binder_stop_on_user_error < 2);
2205 binder_set_nice(proc->default_priority);
2207 if (!binder_has_proc_work(proc, thread))
2210 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2213 if (!binder_has_thread_work(thread))
2216 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2219 binder_lock(__func__);
2221 if (wait_for_proc_work)
2222 proc->ready_threads--;
2223 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2230 struct binder_transaction_data tr;
2231 struct binder_work *w;
2232 struct binder_transaction *t = NULL;
2234 if (!list_empty(&thread->todo)) {
2235 w = list_first_entry(&thread->todo, struct binder_work,
2237 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2238 w = list_first_entry(&proc->todo, struct binder_work,
2242 if (ptr - buffer == 4 &&
2243 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
2248 if (end - ptr < sizeof(tr) + 4)
2252 case BINDER_WORK_TRANSACTION: {
2253 t = container_of(w, struct binder_transaction, work);
2255 case BINDER_WORK_TRANSACTION_COMPLETE: {
2256 cmd = BR_TRANSACTION_COMPLETE;
2257 if (put_user(cmd, (uint32_t __user *)ptr))
2259 ptr += sizeof(uint32_t);
2261 binder_stat_br(proc, thread, cmd);
2262 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2263 "%d:%d BR_TRANSACTION_COMPLETE\n",
2264 proc->pid, thread->pid);
2266 list_del(&w->entry);
2268 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2270 case BINDER_WORK_NODE: {
2271 struct binder_node *node = container_of(w, struct binder_node, work);
2272 uint32_t cmd = BR_NOOP;
2273 const char *cmd_name;
2274 int strong = node->internal_strong_refs || node->local_strong_refs;
2275 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2277 if (weak && !node->has_weak_ref) {
2279 cmd_name = "BR_INCREFS";
2280 node->has_weak_ref = 1;
2281 node->pending_weak_ref = 1;
2282 node->local_weak_refs++;
2283 } else if (strong && !node->has_strong_ref) {
2285 cmd_name = "BR_ACQUIRE";
2286 node->has_strong_ref = 1;
2287 node->pending_strong_ref = 1;
2288 node->local_strong_refs++;
2289 } else if (!strong && node->has_strong_ref) {
2291 cmd_name = "BR_RELEASE";
2292 node->has_strong_ref = 0;
2293 } else if (!weak && node->has_weak_ref) {
2295 cmd_name = "BR_DECREFS";
2296 node->has_weak_ref = 0;
2298 if (cmd != BR_NOOP) {
2299 if (put_user(cmd, (uint32_t __user *)ptr))
2301 ptr += sizeof(uint32_t);
2302 if (put_user(node->ptr,
2303 (binder_uintptr_t __user *)ptr))
2305 ptr += sizeof(binder_uintptr_t);
2306 if (put_user(node->cookie,
2307 (binder_uintptr_t __user *)ptr))
2309 ptr += sizeof(binder_uintptr_t);
2311 binder_stat_br(proc, thread, cmd);
2312 binder_debug(BINDER_DEBUG_USER_REFS,
2313 "%d:%d %s %d u%016llx c%016llx\n",
2314 proc->pid, thread->pid, cmd_name,
2316 (u64)node->ptr, (u64)node->cookie);
2318 list_del_init(&w->entry);
2319 if (!weak && !strong) {
2320 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2321 "%d:%d node %d u%016llx c%016llx deleted\n",
2322 proc->pid, thread->pid,
2326 rb_erase(&node->rb_node, &proc->nodes);
2328 binder_stats_deleted(BINDER_STAT_NODE);
2330 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2331 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2332 proc->pid, thread->pid,
2339 case BINDER_WORK_DEAD_BINDER:
2340 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2341 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2342 struct binder_ref_death *death;
2345 death = container_of(w, struct binder_ref_death, work);
2346 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2347 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2349 cmd = BR_DEAD_BINDER;
2350 if (put_user(cmd, (uint32_t __user *)ptr))
2352 ptr += sizeof(uint32_t);
2353 if (put_user(death->cookie,
2354 (binder_uintptr_t __user *)ptr))
2356 ptr += sizeof(binder_uintptr_t);
2357 binder_stat_br(proc, thread, cmd);
2358 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2359 "%d:%d %s %016llx\n",
2360 proc->pid, thread->pid,
2361 cmd == BR_DEAD_BINDER ?
2363 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2364 (u64)death->cookie);
2366 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2367 list_del(&w->entry);
2369 binder_stats_deleted(BINDER_STAT_DEATH);
2371 list_move(&w->entry, &proc->delivered_death);
2372 if (cmd == BR_DEAD_BINDER)
2373 goto done; /* DEAD_BINDER notifications can cause transactions */
2380 BUG_ON(t->buffer == NULL);
2381 if (t->buffer->target_node) {
2382 struct binder_node *target_node = t->buffer->target_node;
2384 tr.target.ptr = target_node->ptr;
2385 tr.cookie = target_node->cookie;
2386 t->saved_priority = task_nice(current);
2387 if (t->priority < target_node->min_priority &&
2388 !(t->flags & TF_ONE_WAY))
2389 binder_set_nice(t->priority);
2390 else if (!(t->flags & TF_ONE_WAY) ||
2391 t->saved_priority > target_node->min_priority)
2392 binder_set_nice(target_node->min_priority);
2393 cmd = BR_TRANSACTION;
2400 tr.flags = t->flags;
2401 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2404 struct task_struct *sender = t->from->proc->tsk;
2406 tr.sender_pid = task_tgid_nr_ns(sender,
2407 task_active_pid_ns(current));
2412 tr.data_size = t->buffer->data_size;
2413 tr.offsets_size = t->buffer->offsets_size;
2414 tr.data.ptr.buffer = (binder_uintptr_t)(
2415 (uintptr_t)t->buffer->data +
2416 proc->user_buffer_offset);
2417 tr.data.ptr.offsets = tr.data.ptr.buffer +
2418 ALIGN(t->buffer->data_size,
2421 if (put_user(cmd, (uint32_t __user *)ptr))
2423 ptr += sizeof(uint32_t);
2424 if (copy_to_user(ptr, &tr, sizeof(tr)))
2428 trace_binder_transaction_received(t);
2429 binder_stat_br(proc, thread, cmd);
2430 binder_debug(BINDER_DEBUG_TRANSACTION,
2431 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2432 proc->pid, thread->pid,
2433 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2435 t->debug_id, t->from ? t->from->proc->pid : 0,
2436 t->from ? t->from->pid : 0, cmd,
2437 t->buffer->data_size, t->buffer->offsets_size,
2438 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2440 list_del(&t->work.entry);
2441 t->buffer->allow_user_free = 1;
2442 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2443 t->to_parent = thread->transaction_stack;
2444 t->to_thread = thread;
2445 thread->transaction_stack = t;
2447 t->buffer->transaction = NULL;
2449 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2456 *consumed = ptr - buffer;
2457 if (proc->requested_threads + proc->ready_threads == 0 &&
2458 proc->requested_threads_started < proc->max_threads &&
2459 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2460 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2461 /*spawn a new thread if we leave this out */) {
2462 proc->requested_threads++;
2463 binder_debug(BINDER_DEBUG_THREADS,
2464 "%d:%d BR_SPAWN_LOOPER\n",
2465 proc->pid, thread->pid);
2466 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2468 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2473 static void binder_release_work(struct list_head *list)
2475 struct binder_work *w;
2477 while (!list_empty(list)) {
2478 w = list_first_entry(list, struct binder_work, entry);
2479 list_del_init(&w->entry);
2481 case BINDER_WORK_TRANSACTION: {
2482 struct binder_transaction *t;
2484 t = container_of(w, struct binder_transaction, work);
2485 if (t->buffer->target_node &&
2486 !(t->flags & TF_ONE_WAY)) {
2487 binder_send_failed_reply(t, BR_DEAD_REPLY);
2489 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2490 "undelivered transaction %d\n",
2492 t->buffer->transaction = NULL;
2494 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2497 case BINDER_WORK_TRANSACTION_COMPLETE: {
2498 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2499 "undelivered TRANSACTION_COMPLETE\n");
2501 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2503 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2504 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2505 struct binder_ref_death *death;
2507 death = container_of(w, struct binder_ref_death, work);
2508 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2509 "undelivered death notification, %016llx\n",
2510 (u64)death->cookie);
2512 binder_stats_deleted(BINDER_STAT_DEATH);
2515 pr_err("unexpected work type, %d, not freed\n",
2523 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2525 struct binder_thread *thread = NULL;
2526 struct rb_node *parent = NULL;
2527 struct rb_node **p = &proc->threads.rb_node;
2531 thread = rb_entry(parent, struct binder_thread, rb_node);
2533 if (current->pid < thread->pid)
2535 else if (current->pid > thread->pid)
2536 p = &(*p)->rb_right;
2541 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2544 binder_stats_created(BINDER_STAT_THREAD);
2545 thread->proc = proc;
2546 thread->pid = current->pid;
2547 init_waitqueue_head(&thread->wait);
2548 INIT_LIST_HEAD(&thread->todo);
2549 rb_link_node(&thread->rb_node, parent, p);
2550 rb_insert_color(&thread->rb_node, &proc->threads);
2551 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2552 thread->return_error = BR_OK;
2553 thread->return_error2 = BR_OK;
2558 static int binder_free_thread(struct binder_proc *proc,
2559 struct binder_thread *thread)
2561 struct binder_transaction *t;
2562 struct binder_transaction *send_reply = NULL;
2563 int active_transactions = 0;
2565 rb_erase(&thread->rb_node, &proc->threads);
2566 t = thread->transaction_stack;
2567 if (t && t->to_thread == thread)
2570 active_transactions++;
2571 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2572 "release %d:%d transaction %d %s, still active\n",
2573 proc->pid, thread->pid,
2575 (t->to_thread == thread) ? "in" : "out");
2577 if (t->to_thread == thread) {
2579 t->to_thread = NULL;
2581 t->buffer->transaction = NULL;
2585 } else if (t->from == thread) {
2592 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2593 binder_release_work(&thread->todo);
2595 binder_stats_deleted(BINDER_STAT_THREAD);
2596 return active_transactions;
2599 static unsigned int binder_poll(struct file *filp,
2600 struct poll_table_struct *wait)
2602 struct binder_proc *proc = filp->private_data;
2603 struct binder_thread *thread = NULL;
2604 int wait_for_proc_work;
2606 binder_lock(__func__);
2608 thread = binder_get_thread(proc);
2610 wait_for_proc_work = thread->transaction_stack == NULL &&
2611 list_empty(&thread->todo) && thread->return_error == BR_OK;
2613 binder_unlock(__func__);
2615 if (wait_for_proc_work) {
2616 if (binder_has_proc_work(proc, thread))
2618 poll_wait(filp, &proc->wait, wait);
2619 if (binder_has_proc_work(proc, thread))
2622 if (binder_has_thread_work(thread))
2624 poll_wait(filp, &thread->wait, wait);
2625 if (binder_has_thread_work(thread))
2631 static int binder_ioctl_write_read(struct file *filp,
2632 unsigned int cmd, unsigned long arg,
2633 struct binder_thread *thread)
2636 struct binder_proc *proc = filp->private_data;
2637 unsigned int size = _IOC_SIZE(cmd);
2638 void __user *ubuf = (void __user *)arg;
2639 struct binder_write_read bwr;
2641 if (size != sizeof(struct binder_write_read)) {
2645 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2649 binder_debug(BINDER_DEBUG_READ_WRITE,
2650 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2651 proc->pid, thread->pid,
2652 (u64)bwr.write_size, (u64)bwr.write_buffer,
2653 (u64)bwr.read_size, (u64)bwr.read_buffer);
2655 if (bwr.write_size > 0) {
2656 ret = binder_thread_write(proc, thread,
2659 &bwr.write_consumed);
2660 trace_binder_write_done(ret);
2662 bwr.read_consumed = 0;
2663 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2668 if (bwr.read_size > 0) {
2669 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2672 filp->f_flags & O_NONBLOCK);
2673 trace_binder_read_done(ret);
2674 if (!list_empty(&proc->todo))
2675 wake_up_interruptible(&proc->wait);
2677 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2682 binder_debug(BINDER_DEBUG_READ_WRITE,
2683 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2684 proc->pid, thread->pid,
2685 (u64)bwr.write_consumed, (u64)bwr.write_size,
2686 (u64)bwr.read_consumed, (u64)bwr.read_size);
2687 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2695 static int binder_ioctl_set_ctx_mgr(struct file *filp)
2698 struct binder_proc *proc = filp->private_data;
2699 kuid_t curr_euid = current_euid();
2701 if (binder_context_mgr_node != NULL) {
2702 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2706 ret = security_binder_set_context_mgr(proc->tsk);
2709 if (uid_valid(binder_context_mgr_uid)) {
2710 if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
2711 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2712 from_kuid(&init_user_ns, curr_euid),
2713 from_kuid(&init_user_ns,
2714 binder_context_mgr_uid));
2719 binder_context_mgr_uid = curr_euid;
2721 binder_context_mgr_node = binder_new_node(proc, 0, 0);
2722 if (binder_context_mgr_node == NULL) {
2726 binder_context_mgr_node->local_weak_refs++;
2727 binder_context_mgr_node->local_strong_refs++;
2728 binder_context_mgr_node->has_strong_ref = 1;
2729 binder_context_mgr_node->has_weak_ref = 1;
2734 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2737 struct binder_proc *proc = filp->private_data;
2738 struct binder_thread *thread;
2739 unsigned int size = _IOC_SIZE(cmd);
2740 void __user *ubuf = (void __user *)arg;
2742 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
2743 proc->pid, current->pid, cmd, arg);*/
2745 trace_binder_ioctl(cmd, arg);
2747 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2751 binder_lock(__func__);
2752 thread = binder_get_thread(proc);
2753 if (thread == NULL) {
2759 case BINDER_WRITE_READ:
2760 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2764 case BINDER_SET_MAX_THREADS:
2765 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2770 case BINDER_SET_CONTEXT_MGR:
2771 ret = binder_ioctl_set_ctx_mgr(filp);
2775 case BINDER_THREAD_EXIT:
2776 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
2777 proc->pid, thread->pid);
2778 binder_free_thread(proc, thread);
2781 case BINDER_VERSION: {
2782 struct binder_version __user *ver = ubuf;
2784 if (size != sizeof(struct binder_version)) {
2788 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2789 &ver->protocol_version)) {
2802 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2803 binder_unlock(__func__);
2804 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2805 if (ret && ret != -ERESTARTSYS)
2806 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2808 trace_binder_ioctl_done(ret);
2812 static void binder_vma_open(struct vm_area_struct *vma)
2814 struct binder_proc *proc = vma->vm_private_data;
2816 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2817 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2818 proc->pid, vma->vm_start, vma->vm_end,
2819 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2820 (unsigned long)pgprot_val(vma->vm_page_prot));
2823 static void binder_vma_close(struct vm_area_struct *vma)
2825 struct binder_proc *proc = vma->vm_private_data;
2827 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2828 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2829 proc->pid, vma->vm_start, vma->vm_end,
2830 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2831 (unsigned long)pgprot_val(vma->vm_page_prot));
2833 proc->vma_vm_mm = NULL;
2834 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2837 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2839 return VM_FAULT_SIGBUS;
2842 static struct vm_operations_struct binder_vm_ops = {
2843 .open = binder_vma_open,
2844 .close = binder_vma_close,
2845 .fault = binder_vm_fault,
2848 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2851 struct vm_struct *area;
2852 struct binder_proc *proc = filp->private_data;
2853 const char *failure_string;
2854 struct binder_buffer *buffer;
2856 if (proc->tsk != current)
2859 if ((vma->vm_end - vma->vm_start) > SZ_4M)
2860 vma->vm_end = vma->vm_start + SZ_4M;
2862 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2863 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2864 proc->pid, vma->vm_start, vma->vm_end,
2865 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2866 (unsigned long)pgprot_val(vma->vm_page_prot));
2868 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2870 failure_string = "bad vm_flags";
2873 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2875 mutex_lock(&binder_mmap_lock);
2878 failure_string = "already mapped";
2879 goto err_already_mapped;
2882 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2885 failure_string = "get_vm_area";
2886 goto err_get_vm_area_failed;
2888 proc->buffer = area->addr;
2889 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2890 mutex_unlock(&binder_mmap_lock);
2892 #ifdef CONFIG_CPU_CACHE_VIPT
2893 if (cache_is_vipt_aliasing()) {
2894 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2895 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2896 vma->vm_start += PAGE_SIZE;
2900 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2901 if (proc->pages == NULL) {
2903 failure_string = "alloc page array";
2904 goto err_alloc_pages_failed;
2906 proc->buffer_size = vma->vm_end - vma->vm_start;
2908 vma->vm_ops = &binder_vm_ops;
2909 vma->vm_private_data = proc;
2911 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2913 failure_string = "alloc small buf";
2914 goto err_alloc_small_buf_failed;
2916 buffer = proc->buffer;
2917 INIT_LIST_HEAD(&proc->buffers);
2918 list_add(&buffer->entry, &proc->buffers);
2920 binder_insert_free_buffer(proc, buffer);
2921 proc->free_async_space = proc->buffer_size / 2;
2923 proc->files = get_files_struct(current);
2925 proc->vma_vm_mm = vma->vm_mm;
2927 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
2928 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2931 err_alloc_small_buf_failed:
2934 err_alloc_pages_failed:
2935 mutex_lock(&binder_mmap_lock);
2936 vfree(proc->buffer);
2937 proc->buffer = NULL;
2938 err_get_vm_area_failed:
2940 mutex_unlock(&binder_mmap_lock);
2942 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
2943 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2947 static int binder_open(struct inode *nodp, struct file *filp)
2949 struct binder_proc *proc;
2951 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2952 current->group_leader->pid, current->pid);
2954 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2957 get_task_struct(current);
2958 proc->tsk = current;
2959 INIT_LIST_HEAD(&proc->todo);
2960 init_waitqueue_head(&proc->wait);
2961 proc->default_priority = task_nice(current);
2963 binder_lock(__func__);
2965 binder_stats_created(BINDER_STAT_PROC);
2966 hlist_add_head(&proc->proc_node, &binder_procs);
2967 proc->pid = current->group_leader->pid;
2968 INIT_LIST_HEAD(&proc->delivered_death);
2969 filp->private_data = proc;
2971 binder_unlock(__func__);
2973 if (binder_debugfs_dir_entry_proc) {
2976 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2977 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
2978 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
2984 static int binder_flush(struct file *filp, fl_owner_t id)
2986 struct binder_proc *proc = filp->private_data;
2988 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
2993 static void binder_deferred_flush(struct binder_proc *proc)
2998 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
2999 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3001 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3002 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3003 wake_up_interruptible(&thread->wait);
3007 wake_up_interruptible_all(&proc->wait);
3009 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3010 "binder_flush: %d woke %d threads\n", proc->pid,
3014 static int binder_release(struct inode *nodp, struct file *filp)
3016 struct binder_proc *proc = filp->private_data;
3018 debugfs_remove(proc->debugfs_entry);
3019 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3024 static int binder_node_release(struct binder_node *node, int refs)
3026 struct binder_ref *ref;
3029 list_del_init(&node->work.entry);
3030 binder_release_work(&node->async_todo);
3032 if (hlist_empty(&node->refs)) {
3034 binder_stats_deleted(BINDER_STAT_NODE);
3040 node->local_strong_refs = 0;
3041 node->local_weak_refs = 0;
3042 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3044 hlist_for_each_entry(ref, &node->refs, node_entry) {
3052 if (list_empty(&ref->death->work.entry)) {
3053 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3054 list_add_tail(&ref->death->work.entry,
3056 wake_up_interruptible(&ref->proc->wait);
3061 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3062 "node %d now dead, refs %d, death %d\n",
3063 node->debug_id, refs, death);
3068 static void binder_deferred_release(struct binder_proc *proc)
3070 struct binder_transaction *t;
3072 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3073 active_transactions, page_count;
3076 BUG_ON(proc->files);
3078 hlist_del(&proc->proc_node);
3080 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
3081 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3082 "%s: %d context_mgr_node gone\n",
3083 __func__, proc->pid);
3084 binder_context_mgr_node = NULL;
3088 active_transactions = 0;
3089 while ((n = rb_first(&proc->threads))) {
3090 struct binder_thread *thread;
3092 thread = rb_entry(n, struct binder_thread, rb_node);
3094 active_transactions += binder_free_thread(proc, thread);
3099 while ((n = rb_first(&proc->nodes))) {
3100 struct binder_node *node;
3102 node = rb_entry(n, struct binder_node, rb_node);
3104 rb_erase(&node->rb_node, &proc->nodes);
3105 incoming_refs = binder_node_release(node, incoming_refs);
3109 while ((n = rb_first(&proc->refs_by_desc))) {
3110 struct binder_ref *ref;
3112 ref = rb_entry(n, struct binder_ref, rb_node_desc);
3114 binder_delete_ref(ref);
3117 binder_release_work(&proc->todo);
3118 binder_release_work(&proc->delivered_death);
3121 while ((n = rb_first(&proc->allocated_buffers))) {
3122 struct binder_buffer *buffer;
3124 buffer = rb_entry(n, struct binder_buffer, rb_node);
3126 t = buffer->transaction;
3129 buffer->transaction = NULL;
3130 pr_err("release proc %d, transaction %d, not freed\n",
3131 proc->pid, t->debug_id);
3135 binder_free_buf(proc, buffer);
3139 binder_stats_deleted(BINDER_STAT_PROC);
3145 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3148 if (!proc->pages[i])
3151 page_addr = proc->buffer + i * PAGE_SIZE;
3152 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3153 "%s: %d: page %d at %p not freed\n",
3154 __func__, proc->pid, i, page_addr);
3155 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3156 __free_page(proc->pages[i]);
3160 vfree(proc->buffer);
3163 put_task_struct(proc->tsk);
3165 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3166 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3167 __func__, proc->pid, threads, nodes, incoming_refs,
3168 outgoing_refs, active_transactions, buffers, page_count);
3173 static void binder_deferred_func(struct work_struct *work)
3175 struct binder_proc *proc;
3176 struct files_struct *files;
3181 binder_lock(__func__);
3182 mutex_lock(&binder_deferred_lock);
3183 if (!hlist_empty(&binder_deferred_list)) {
3184 proc = hlist_entry(binder_deferred_list.first,
3185 struct binder_proc, deferred_work_node);
3186 hlist_del_init(&proc->deferred_work_node);
3187 defer = proc->deferred_work;
3188 proc->deferred_work = 0;
3193 mutex_unlock(&binder_deferred_lock);
3196 if (defer & BINDER_DEFERRED_PUT_FILES) {
3197 files = proc->files;
3202 if (defer & BINDER_DEFERRED_FLUSH)
3203 binder_deferred_flush(proc);
3205 if (defer & BINDER_DEFERRED_RELEASE)
3206 binder_deferred_release(proc); /* frees proc */
3208 binder_unlock(__func__);
3210 put_files_struct(files);
3213 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3216 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3218 mutex_lock(&binder_deferred_lock);
3219 proc->deferred_work |= defer;
3220 if (hlist_unhashed(&proc->deferred_work_node)) {
3221 hlist_add_head(&proc->deferred_work_node,
3222 &binder_deferred_list);
3223 queue_work(binder_deferred_workqueue, &binder_deferred_work);
3225 mutex_unlock(&binder_deferred_lock);
3228 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3229 struct binder_transaction *t)
3232 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3233 prefix, t->debug_id, t,
3234 t->from ? t->from->proc->pid : 0,
3235 t->from ? t->from->pid : 0,
3236 t->to_proc ? t->to_proc->pid : 0,
3237 t->to_thread ? t->to_thread->pid : 0,
3238 t->code, t->flags, t->priority, t->need_reply);
3239 if (t->buffer == NULL) {
3240 seq_puts(m, " buffer free\n");
3243 if (t->buffer->target_node)
3244 seq_printf(m, " node %d",
3245 t->buffer->target_node->debug_id);
3246 seq_printf(m, " size %zd:%zd data %p\n",
3247 t->buffer->data_size, t->buffer->offsets_size,
3251 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3252 struct binder_buffer *buffer)
3254 seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3255 prefix, buffer->debug_id, buffer->data,
3256 buffer->data_size, buffer->offsets_size,
3257 buffer->transaction ? "active" : "delivered");
3260 static void print_binder_work(struct seq_file *m, const char *prefix,
3261 const char *transaction_prefix,
3262 struct binder_work *w)
3264 struct binder_node *node;
3265 struct binder_transaction *t;
3268 case BINDER_WORK_TRANSACTION:
3269 t = container_of(w, struct binder_transaction, work);
3270 print_binder_transaction(m, transaction_prefix, t);
3272 case BINDER_WORK_TRANSACTION_COMPLETE:
3273 seq_printf(m, "%stransaction complete\n", prefix);
3275 case BINDER_WORK_NODE:
3276 node = container_of(w, struct binder_node, work);
3277 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3278 prefix, node->debug_id,
3279 (u64)node->ptr, (u64)node->cookie);
3281 case BINDER_WORK_DEAD_BINDER:
3282 seq_printf(m, "%shas dead binder\n", prefix);
3284 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3285 seq_printf(m, "%shas cleared dead binder\n", prefix);
3287 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3288 seq_printf(m, "%shas cleared death notification\n", prefix);
3291 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3296 static void print_binder_thread(struct seq_file *m,
3297 struct binder_thread *thread,
3300 struct binder_transaction *t;
3301 struct binder_work *w;
3302 size_t start_pos = m->count;
3305 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3306 header_pos = m->count;
3307 t = thread->transaction_stack;
3309 if (t->from == thread) {
3310 print_binder_transaction(m,
3311 " outgoing transaction", t);
3313 } else if (t->to_thread == thread) {
3314 print_binder_transaction(m,
3315 " incoming transaction", t);
3318 print_binder_transaction(m, " bad transaction", t);
3322 list_for_each_entry(w, &thread->todo, entry) {
3323 print_binder_work(m, " ", " pending transaction", w);
3325 if (!print_always && m->count == header_pos)
3326 m->count = start_pos;
3329 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3331 struct binder_ref *ref;
3332 struct binder_work *w;
3336 hlist_for_each_entry(ref, &node->refs, node_entry)
3339 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3340 node->debug_id, (u64)node->ptr, (u64)node->cookie,
3341 node->has_strong_ref, node->has_weak_ref,
3342 node->local_strong_refs, node->local_weak_refs,
3343 node->internal_strong_refs, count);
3345 seq_puts(m, " proc");
3346 hlist_for_each_entry(ref, &node->refs, node_entry)
3347 seq_printf(m, " %d", ref->proc->pid);
3350 list_for_each_entry(w, &node->async_todo, entry)
3351 print_binder_work(m, " ",
3352 " pending async transaction", w);
3355 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3357 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3358 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3359 ref->node->debug_id, ref->strong, ref->weak, ref->death);
3362 static void print_binder_proc(struct seq_file *m,
3363 struct binder_proc *proc, int print_all)
3365 struct binder_work *w;
3367 size_t start_pos = m->count;
3370 seq_printf(m, "proc %d\n", proc->pid);
3371 header_pos = m->count;
3373 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3374 print_binder_thread(m, rb_entry(n, struct binder_thread,
3375 rb_node), print_all);
3376 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3377 struct binder_node *node = rb_entry(n, struct binder_node,
3379 if (print_all || node->has_async_transaction)
3380 print_binder_node(m, node);
3383 for (n = rb_first(&proc->refs_by_desc);
3386 print_binder_ref(m, rb_entry(n, struct binder_ref,
3389 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3390 print_binder_buffer(m, " buffer",
3391 rb_entry(n, struct binder_buffer, rb_node));
3392 list_for_each_entry(w, &proc->todo, entry)
3393 print_binder_work(m, " ", " pending transaction", w);
3394 list_for_each_entry(w, &proc->delivered_death, entry) {
3395 seq_puts(m, " has delivered dead binder\n");
3398 if (!print_all && m->count == header_pos)
3399 m->count = start_pos;
3402 static const char * const binder_return_strings[] = {
3407 "BR_ACQUIRE_RESULT",
3409 "BR_TRANSACTION_COMPLETE",
3414 "BR_ATTEMPT_ACQUIRE",
3419 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3423 static const char * const binder_command_strings[] = {
3426 "BC_ACQUIRE_RESULT",
3434 "BC_ATTEMPT_ACQUIRE",
3435 "BC_REGISTER_LOOPER",
3438 "BC_REQUEST_DEATH_NOTIFICATION",
3439 "BC_CLEAR_DEATH_NOTIFICATION",
3440 "BC_DEAD_BINDER_DONE"
3443 static const char * const binder_objstat_strings[] = {
3450 "transaction_complete"
3453 static void print_binder_stats(struct seq_file *m, const char *prefix,
3454 struct binder_stats *stats)
3458 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3459 ARRAY_SIZE(binder_command_strings));
3460 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3462 seq_printf(m, "%s%s: %d\n", prefix,
3463 binder_command_strings[i], stats->bc[i]);
3466 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3467 ARRAY_SIZE(binder_return_strings));
3468 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3470 seq_printf(m, "%s%s: %d\n", prefix,
3471 binder_return_strings[i], stats->br[i]);
3474 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3475 ARRAY_SIZE(binder_objstat_strings));
3476 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3477 ARRAY_SIZE(stats->obj_deleted));
3478 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3479 if (stats->obj_created[i] || stats->obj_deleted[i])
3480 seq_printf(m, "%s%s: active %d total %d\n", prefix,
3481 binder_objstat_strings[i],
3482 stats->obj_created[i] - stats->obj_deleted[i],
3483 stats->obj_created[i]);
3487 static void print_binder_proc_stats(struct seq_file *m,
3488 struct binder_proc *proc)
3490 struct binder_work *w;
3492 int count, strong, weak;
3494 seq_printf(m, "proc %d\n", proc->pid);
3496 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3498 seq_printf(m, " threads: %d\n", count);
3499 seq_printf(m, " requested threads: %d+%d/%d\n"
3500 " ready threads %d\n"
3501 " free async space %zd\n", proc->requested_threads,
3502 proc->requested_threads_started, proc->max_threads,
3503 proc->ready_threads, proc->free_async_space);
3505 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3507 seq_printf(m, " nodes: %d\n", count);
3511 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3512 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3515 strong += ref->strong;
3518 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
3521 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3523 seq_printf(m, " buffers: %d\n", count);
3526 list_for_each_entry(w, &proc->todo, entry) {
3528 case BINDER_WORK_TRANSACTION:
3535 seq_printf(m, " pending transactions: %d\n", count);
3537 print_binder_stats(m, " ", &proc->stats);
3541 static int binder_state_show(struct seq_file *m, void *unused)
3543 struct binder_proc *proc;
3544 struct binder_node *node;
3545 int do_lock = !binder_debug_no_lock;
3548 binder_lock(__func__);
3550 seq_puts(m, "binder state:\n");
3552 if (!hlist_empty(&binder_dead_nodes))
3553 seq_puts(m, "dead nodes:\n");
3554 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3555 print_binder_node(m, node);
3557 hlist_for_each_entry(proc, &binder_procs, proc_node)
3558 print_binder_proc(m, proc, 1);
3560 binder_unlock(__func__);
3564 static int binder_stats_show(struct seq_file *m, void *unused)
3566 struct binder_proc *proc;
3567 int do_lock = !binder_debug_no_lock;
3570 binder_lock(__func__);
3572 seq_puts(m, "binder stats:\n");
3574 print_binder_stats(m, "", &binder_stats);
3576 hlist_for_each_entry(proc, &binder_procs, proc_node)
3577 print_binder_proc_stats(m, proc);
3579 binder_unlock(__func__);
3583 static int binder_transactions_show(struct seq_file *m, void *unused)
3585 struct binder_proc *proc;
3586 int do_lock = !binder_debug_no_lock;
3589 binder_lock(__func__);
3591 seq_puts(m, "binder transactions:\n");
3592 hlist_for_each_entry(proc, &binder_procs, proc_node)
3593 print_binder_proc(m, proc, 0);
3595 binder_unlock(__func__);
3599 static int binder_proc_show(struct seq_file *m, void *unused)
3601 struct binder_proc *proc = m->private;
3602 int do_lock = !binder_debug_no_lock;
3605 binder_lock(__func__);
3606 seq_puts(m, "binder proc state:\n");
3607 print_binder_proc(m, proc, 1);
3609 binder_unlock(__func__);
3613 static void print_binder_transaction_log_entry(struct seq_file *m,
3614 struct binder_transaction_log_entry *e)
3617 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3618 e->debug_id, (e->call_type == 2) ? "reply" :
3619 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3620 e->from_thread, e->to_proc, e->to_thread, e->to_node,
3621 e->target_handle, e->data_size, e->offsets_size);
3624 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3626 struct binder_transaction_log *log = m->private;
3630 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3631 print_binder_transaction_log_entry(m, &log->entry[i]);
3633 for (i = 0; i < log->next; i++)
3634 print_binder_transaction_log_entry(m, &log->entry[i]);
3638 static const struct file_operations binder_fops = {
3639 .owner = THIS_MODULE,
3640 .poll = binder_poll,
3641 .unlocked_ioctl = binder_ioctl,
3642 .compat_ioctl = binder_ioctl,
3643 .mmap = binder_mmap,
3644 .open = binder_open,
3645 .flush = binder_flush,
3646 .release = binder_release,
3649 static struct miscdevice binder_miscdev = {
3650 .minor = MISC_DYNAMIC_MINOR,
3652 .fops = &binder_fops
3655 BINDER_DEBUG_ENTRY(state);
3656 BINDER_DEBUG_ENTRY(stats);
3657 BINDER_DEBUG_ENTRY(transactions);
3658 BINDER_DEBUG_ENTRY(transaction_log);
3660 static int __init binder_init(void)
3664 binder_deferred_workqueue = create_singlethread_workqueue("binder");
3665 if (!binder_deferred_workqueue)
3668 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3669 if (binder_debugfs_dir_entry_root)
3670 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3671 binder_debugfs_dir_entry_root);
3672 ret = misc_register(&binder_miscdev);
3673 if (binder_debugfs_dir_entry_root) {
3674 debugfs_create_file("state",
3676 binder_debugfs_dir_entry_root,
3678 &binder_state_fops);
3679 debugfs_create_file("stats",
3681 binder_debugfs_dir_entry_root,
3683 &binder_stats_fops);
3684 debugfs_create_file("transactions",
3686 binder_debugfs_dir_entry_root,
3688 &binder_transactions_fops);
3689 debugfs_create_file("transaction_log",
3691 binder_debugfs_dir_entry_root,
3692 &binder_transaction_log,
3693 &binder_transaction_log_fops);
3694 debugfs_create_file("failed_transaction_log",
3696 binder_debugfs_dir_entry_root,
3697 &binder_transaction_log_failed,
3698 &binder_transaction_log_fops);
3703 device_initcall(binder_init);
3705 #define CREATE_TRACE_POINTS
3706 #include "binder_trace.h"
3708 MODULE_LICENSE("GPL v2");