3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/security.h>
42 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
43 #define BINDER_IPC_32BIT 1
46 #include <uapi/linux/android/binder.h>
47 #include "binder_trace.h"
49 static DEFINE_MUTEX(binder_main_lock);
50 static DEFINE_MUTEX(binder_deferred_lock);
51 static DEFINE_MUTEX(binder_mmap_lock);
53 static HLIST_HEAD(binder_devices);
54 static HLIST_HEAD(binder_procs);
55 static HLIST_HEAD(binder_deferred_list);
56 static HLIST_HEAD(binder_dead_nodes);
58 static struct dentry *binder_debugfs_dir_entry_root;
59 static struct dentry *binder_debugfs_dir_entry_proc;
60 static int binder_last_id;
61 static struct workqueue_struct *binder_deferred_workqueue;
63 #define BINDER_DEBUG_ENTRY(name) \
64 static int binder_##name##_open(struct inode *inode, struct file *file) \
66 return single_open(file, binder_##name##_show, inode->i_private); \
69 static const struct file_operations binder_##name##_fops = { \
70 .owner = THIS_MODULE, \
71 .open = binder_##name##_open, \
73 .llseek = seq_lseek, \
74 .release = single_release, \
77 static int binder_proc_show(struct seq_file *m, void *unused);
78 BINDER_DEBUG_ENTRY(proc);
80 /* This is only defined in include/asm-arm/sizes.h */
86 #define SZ_4M 0x400000
89 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
91 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
94 BINDER_DEBUG_USER_ERROR = 1U << 0,
95 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
96 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
97 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
98 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
99 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
100 BINDER_DEBUG_READ_WRITE = 1U << 6,
101 BINDER_DEBUG_USER_REFS = 1U << 7,
102 BINDER_DEBUG_THREADS = 1U << 8,
103 BINDER_DEBUG_TRANSACTION = 1U << 9,
104 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
105 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
106 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
107 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
108 BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
109 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
111 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
112 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
113 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
115 static bool binder_debug_no_lock;
116 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
118 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119 module_param_named(devices, binder_devices_param, charp, S_IRUGO);
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122 static int binder_stop_on_user_error;
124 static int binder_set_stop_on_user_error(const char *val,
125 struct kernel_param *kp)
129 ret = param_set_int(val, kp);
130 if (binder_stop_on_user_error < 2)
131 wake_up(&binder_user_error_wait);
134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
137 #define binder_debug(mask, x...) \
139 if (binder_debug_mask & mask) \
143 #define binder_user_error(x...) \
145 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
147 if (binder_stop_on_user_error) \
148 binder_stop_on_user_error = 2; \
151 #define to_flat_binder_object(hdr) \
152 container_of(hdr, struct flat_binder_object, hdr)
154 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
156 enum binder_stat_types {
162 BINDER_STAT_TRANSACTION,
163 BINDER_STAT_TRANSACTION_COMPLETE,
167 struct binder_stats {
168 int br[_IOC_NR(BR_FAILED_REPLY) + 1];
169 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
170 int obj_created[BINDER_STAT_COUNT];
171 int obj_deleted[BINDER_STAT_COUNT];
174 static struct binder_stats binder_stats;
176 static inline void binder_stats_deleted(enum binder_stat_types type)
178 binder_stats.obj_deleted[type]++;
181 static inline void binder_stats_created(enum binder_stat_types type)
183 binder_stats.obj_created[type]++;
186 struct binder_transaction_log_entry {
197 const char *context_name;
199 struct binder_transaction_log {
202 struct binder_transaction_log_entry entry[32];
204 static struct binder_transaction_log binder_transaction_log;
205 static struct binder_transaction_log binder_transaction_log_failed;
207 static struct binder_transaction_log_entry *binder_transaction_log_add(
208 struct binder_transaction_log *log)
210 struct binder_transaction_log_entry *e;
212 e = &log->entry[log->next];
213 memset(e, 0, sizeof(*e));
215 if (log->next == ARRAY_SIZE(log->entry)) {
222 struct binder_context {
223 struct binder_node *binder_context_mgr_node;
224 kuid_t binder_context_mgr_uid;
228 struct binder_device {
229 struct hlist_node hlist;
230 struct miscdevice miscdev;
231 struct binder_context context;
235 struct list_head entry;
237 BINDER_WORK_TRANSACTION = 1,
238 BINDER_WORK_TRANSACTION_COMPLETE,
240 BINDER_WORK_DEAD_BINDER,
241 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
242 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
248 struct binder_work work;
250 struct rb_node rb_node;
251 struct hlist_node dead_node;
253 struct binder_proc *proc;
254 struct hlist_head refs;
255 int internal_strong_refs;
257 int local_strong_refs;
258 binder_uintptr_t ptr;
259 binder_uintptr_t cookie;
260 unsigned has_strong_ref:1;
261 unsigned pending_strong_ref:1;
262 unsigned has_weak_ref:1;
263 unsigned pending_weak_ref:1;
264 unsigned has_async_transaction:1;
265 unsigned accept_fds:1;
266 unsigned min_priority:8;
267 struct list_head async_todo;
270 struct binder_ref_death {
271 struct binder_work work;
272 binder_uintptr_t cookie;
276 /* Lookups needed: */
277 /* node + proc => ref (transaction) */
278 /* desc + proc => ref (transaction, inc/dec ref) */
279 /* node => refs + procs (proc exit) */
281 struct rb_node rb_node_desc;
282 struct rb_node rb_node_node;
283 struct hlist_node node_entry;
284 struct binder_proc *proc;
285 struct binder_node *node;
289 struct binder_ref_death *death;
292 struct binder_buffer {
293 struct list_head entry; /* free and allocated entries by address */
294 struct rb_node rb_node; /* free entry by size or allocated entry */
297 unsigned allow_user_free:1;
298 unsigned async_transaction:1;
299 unsigned debug_id:29;
301 struct binder_transaction *transaction;
303 struct binder_node *target_node;
306 size_t extra_buffers_size;
310 enum binder_deferred_state {
311 BINDER_DEFERRED_PUT_FILES = 0x01,
312 BINDER_DEFERRED_FLUSH = 0x02,
313 BINDER_DEFERRED_RELEASE = 0x04,
317 struct hlist_node proc_node;
318 struct rb_root threads;
319 struct rb_root nodes;
320 struct rb_root refs_by_desc;
321 struct rb_root refs_by_node;
323 struct vm_area_struct *vma;
324 struct mm_struct *vma_vm_mm;
325 struct task_struct *tsk;
326 struct files_struct *files;
327 struct hlist_node deferred_work_node;
330 ptrdiff_t user_buffer_offset;
332 struct list_head buffers;
333 struct rb_root free_buffers;
334 struct rb_root allocated_buffers;
335 size_t free_async_space;
339 uint32_t buffer_free;
340 struct list_head todo;
341 wait_queue_head_t wait;
342 struct binder_stats stats;
343 struct list_head delivered_death;
345 int requested_threads;
346 int requested_threads_started;
348 long default_priority;
349 struct dentry *debugfs_entry;
350 struct binder_context *context;
354 BINDER_LOOPER_STATE_REGISTERED = 0x01,
355 BINDER_LOOPER_STATE_ENTERED = 0x02,
356 BINDER_LOOPER_STATE_EXITED = 0x04,
357 BINDER_LOOPER_STATE_INVALID = 0x08,
358 BINDER_LOOPER_STATE_WAITING = 0x10,
359 BINDER_LOOPER_STATE_NEED_RETURN = 0x20
362 struct binder_thread {
363 struct binder_proc *proc;
364 struct rb_node rb_node;
367 struct binder_transaction *transaction_stack;
368 struct list_head todo;
369 uint32_t return_error; /* Write failed, return error code in read buf */
370 uint32_t return_error2; /* Write failed, return error code in read */
371 /* buffer. Used when sending a reply to a dead process that */
372 /* we are also waiting on */
373 wait_queue_head_t wait;
374 struct binder_stats stats;
377 struct binder_transaction {
379 struct binder_work work;
380 struct binder_thread *from;
381 struct binder_transaction *from_parent;
382 struct binder_proc *to_proc;
383 struct binder_thread *to_thread;
384 struct binder_transaction *to_parent;
385 unsigned need_reply:1;
386 /* unsigned is_dead:1; */ /* not used at the moment */
388 struct binder_buffer *buffer;
397 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
399 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
401 struct files_struct *files = proc->files;
402 unsigned long rlim_cur;
408 if (!lock_task_sighand(proc->tsk, &irqs))
411 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
412 unlock_task_sighand(proc->tsk, &irqs);
414 return __alloc_fd(files, 0, rlim_cur, flags);
418 * copied from fd_install
420 static void task_fd_install(
421 struct binder_proc *proc, unsigned int fd, struct file *file)
424 __fd_install(proc->files, fd, file);
428 * copied from sys_close
430 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
434 if (proc->files == NULL)
437 retval = __close_fd(proc->files, fd);
438 /* can't restart close syscall because file table entry was cleared */
439 if (unlikely(retval == -ERESTARTSYS ||
440 retval == -ERESTARTNOINTR ||
441 retval == -ERESTARTNOHAND ||
442 retval == -ERESTART_RESTARTBLOCK))
448 static inline void binder_lock(const char *tag)
450 trace_binder_lock(tag);
451 mutex_lock(&binder_main_lock);
452 trace_binder_locked(tag);
455 static inline void binder_unlock(const char *tag)
457 trace_binder_unlock(tag);
458 mutex_unlock(&binder_main_lock);
461 static void binder_set_nice(long nice)
465 if (can_nice(current, nice)) {
466 set_user_nice(current, nice);
469 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
470 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
471 "%d: nice value %ld not allowed use %ld instead\n",
472 current->pid, nice, min_nice);
473 set_user_nice(current, min_nice);
474 if (min_nice <= MAX_NICE)
476 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
479 static size_t binder_buffer_size(struct binder_proc *proc,
480 struct binder_buffer *buffer)
482 if (list_is_last(&buffer->entry, &proc->buffers))
483 return proc->buffer + proc->buffer_size - (void *)buffer->data;
484 return (size_t)list_entry(buffer->entry.next,
485 struct binder_buffer, entry) - (size_t)buffer->data;
488 static void binder_insert_free_buffer(struct binder_proc *proc,
489 struct binder_buffer *new_buffer)
491 struct rb_node **p = &proc->free_buffers.rb_node;
492 struct rb_node *parent = NULL;
493 struct binder_buffer *buffer;
495 size_t new_buffer_size;
497 BUG_ON(!new_buffer->free);
499 new_buffer_size = binder_buffer_size(proc, new_buffer);
501 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
502 "%d: add free buffer, size %zd, at %p\n",
503 proc->pid, new_buffer_size, new_buffer);
507 buffer = rb_entry(parent, struct binder_buffer, rb_node);
508 BUG_ON(!buffer->free);
510 buffer_size = binder_buffer_size(proc, buffer);
512 if (new_buffer_size < buffer_size)
513 p = &parent->rb_left;
515 p = &parent->rb_right;
517 rb_link_node(&new_buffer->rb_node, parent, p);
518 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
521 static void binder_insert_allocated_buffer(struct binder_proc *proc,
522 struct binder_buffer *new_buffer)
524 struct rb_node **p = &proc->allocated_buffers.rb_node;
525 struct rb_node *parent = NULL;
526 struct binder_buffer *buffer;
528 BUG_ON(new_buffer->free);
532 buffer = rb_entry(parent, struct binder_buffer, rb_node);
533 BUG_ON(buffer->free);
535 if (new_buffer < buffer)
536 p = &parent->rb_left;
537 else if (new_buffer > buffer)
538 p = &parent->rb_right;
542 rb_link_node(&new_buffer->rb_node, parent, p);
543 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
546 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
549 struct rb_node *n = proc->allocated_buffers.rb_node;
550 struct binder_buffer *buffer;
551 struct binder_buffer *kern_ptr;
553 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
554 - offsetof(struct binder_buffer, data));
557 buffer = rb_entry(n, struct binder_buffer, rb_node);
558 BUG_ON(buffer->free);
560 if (kern_ptr < buffer)
562 else if (kern_ptr > buffer)
570 static int binder_update_page_range(struct binder_proc *proc, int allocate,
571 void *start, void *end,
572 struct vm_area_struct *vma)
575 unsigned long user_page_addr;
577 struct mm_struct *mm;
579 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
580 "%d: %s pages %p-%p\n", proc->pid,
581 allocate ? "allocate" : "free", start, end);
586 trace_binder_update_page_range(proc, allocate, start, end);
591 mm = get_task_mm(proc->tsk);
594 down_write(&mm->mmap_sem);
596 if (vma && mm != proc->vma_vm_mm) {
597 pr_err("%d: vma mm and task mm mismatch\n",
607 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
612 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
615 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
618 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
620 pr_err("%d: binder_alloc_buf failed for page at %p\n",
621 proc->pid, page_addr);
622 goto err_alloc_page_failed;
624 ret = map_kernel_range_noflush((unsigned long)page_addr,
625 PAGE_SIZE, PAGE_KERNEL, page);
626 flush_cache_vmap((unsigned long)page_addr,
627 (unsigned long)page_addr + PAGE_SIZE);
629 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
630 proc->pid, page_addr);
631 goto err_map_kernel_failed;
634 (uintptr_t)page_addr + proc->user_buffer_offset;
635 ret = vm_insert_page(vma, user_page_addr, page[0]);
637 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
638 proc->pid, user_page_addr);
639 goto err_vm_insert_page_failed;
641 /* vm_insert_page does not seem to increment the refcount */
644 up_write(&mm->mmap_sem);
650 for (page_addr = end - PAGE_SIZE; page_addr >= start;
651 page_addr -= PAGE_SIZE) {
652 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
654 zap_page_range(vma, (uintptr_t)page_addr +
655 proc->user_buffer_offset, PAGE_SIZE, NULL);
656 err_vm_insert_page_failed:
657 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
658 err_map_kernel_failed:
661 err_alloc_page_failed:
666 up_write(&mm->mmap_sem);
672 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
675 size_t extra_buffers_size,
678 struct rb_node *n = proc->free_buffers.rb_node;
679 struct binder_buffer *buffer;
681 struct rb_node *best_fit = NULL;
684 size_t size, data_offsets_size;
686 if (proc->vma == NULL) {
687 pr_err("%d: binder_alloc_buf, no vma\n",
692 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
693 ALIGN(offsets_size, sizeof(void *));
695 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
696 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
697 proc->pid, data_size, offsets_size);
700 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
701 if (size < data_offsets_size || size < extra_buffers_size) {
702 binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
703 proc->pid, extra_buffers_size);
707 proc->free_async_space < size + sizeof(struct binder_buffer)) {
708 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
709 "%d: binder_alloc_buf size %zd failed, no async space left\n",
715 buffer = rb_entry(n, struct binder_buffer, rb_node);
716 BUG_ON(!buffer->free);
717 buffer_size = binder_buffer_size(proc, buffer);
719 if (size < buffer_size) {
722 } else if (size > buffer_size)
729 if (best_fit == NULL) {
730 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
735 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
736 buffer_size = binder_buffer_size(proc, buffer);
739 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
740 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
741 proc->pid, size, buffer, buffer_size);
744 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
746 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
747 buffer_size = size; /* no room for other buffers */
749 buffer_size = size + sizeof(struct binder_buffer);
752 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
753 if (end_page_addr > has_page_addr)
754 end_page_addr = has_page_addr;
755 if (binder_update_page_range(proc, 1,
756 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
759 rb_erase(best_fit, &proc->free_buffers);
761 binder_insert_allocated_buffer(proc, buffer);
762 if (buffer_size != size) {
763 struct binder_buffer *new_buffer = (void *)buffer->data + size;
765 list_add(&new_buffer->entry, &buffer->entry);
766 new_buffer->free = 1;
767 binder_insert_free_buffer(proc, new_buffer);
769 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
770 "%d: binder_alloc_buf size %zd got %p\n",
771 proc->pid, size, buffer);
772 buffer->data_size = data_size;
773 buffer->offsets_size = offsets_size;
774 buffer->extra_buffers_size = extra_buffers_size;
775 buffer->async_transaction = is_async;
777 proc->free_async_space -= size + sizeof(struct binder_buffer);
778 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
779 "%d: binder_alloc_buf size %zd async free %zd\n",
780 proc->pid, size, proc->free_async_space);
786 static void *buffer_start_page(struct binder_buffer *buffer)
788 return (void *)((uintptr_t)buffer & PAGE_MASK);
791 static void *buffer_end_page(struct binder_buffer *buffer)
793 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
796 static void binder_delete_free_buffer(struct binder_proc *proc,
797 struct binder_buffer *buffer)
799 struct binder_buffer *prev, *next = NULL;
800 int free_page_end = 1;
801 int free_page_start = 1;
803 BUG_ON(proc->buffers.next == &buffer->entry);
804 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
806 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
808 if (buffer_end_page(prev) == buffer_end_page(buffer))
810 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
811 "%d: merge free, buffer %p share page with %p\n",
812 proc->pid, buffer, prev);
815 if (!list_is_last(&buffer->entry, &proc->buffers)) {
816 next = list_entry(buffer->entry.next,
817 struct binder_buffer, entry);
818 if (buffer_start_page(next) == buffer_end_page(buffer)) {
820 if (buffer_start_page(next) ==
821 buffer_start_page(buffer))
823 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
824 "%d: merge free, buffer %p share page with %p\n",
825 proc->pid, buffer, prev);
828 list_del(&buffer->entry);
829 if (free_page_start || free_page_end) {
830 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
831 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
832 proc->pid, buffer, free_page_start ? "" : " end",
833 free_page_end ? "" : " start", prev, next);
834 binder_update_page_range(proc, 0, free_page_start ?
835 buffer_start_page(buffer) : buffer_end_page(buffer),
836 (free_page_end ? buffer_end_page(buffer) :
837 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
841 static void binder_free_buf(struct binder_proc *proc,
842 struct binder_buffer *buffer)
844 size_t size, buffer_size;
846 buffer_size = binder_buffer_size(proc, buffer);
848 size = ALIGN(buffer->data_size, sizeof(void *)) +
849 ALIGN(buffer->offsets_size, sizeof(void *)) +
850 ALIGN(buffer->extra_buffers_size, sizeof(void *));
852 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
853 "%d: binder_free_buf %p size %zd buffer_size %zd\n",
854 proc->pid, buffer, size, buffer_size);
856 BUG_ON(buffer->free);
857 BUG_ON(size > buffer_size);
858 BUG_ON(buffer->transaction != NULL);
859 BUG_ON((void *)buffer < proc->buffer);
860 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
862 if (buffer->async_transaction) {
863 proc->free_async_space += size + sizeof(struct binder_buffer);
865 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
866 "%d: binder_free_buf size %zd async free %zd\n",
867 proc->pid, size, proc->free_async_space);
870 binder_update_page_range(proc, 0,
871 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
872 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
874 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
876 if (!list_is_last(&buffer->entry, &proc->buffers)) {
877 struct binder_buffer *next = list_entry(buffer->entry.next,
878 struct binder_buffer, entry);
881 rb_erase(&next->rb_node, &proc->free_buffers);
882 binder_delete_free_buffer(proc, next);
885 if (proc->buffers.next != &buffer->entry) {
886 struct binder_buffer *prev = list_entry(buffer->entry.prev,
887 struct binder_buffer, entry);
890 binder_delete_free_buffer(proc, buffer);
891 rb_erase(&prev->rb_node, &proc->free_buffers);
895 binder_insert_free_buffer(proc, buffer);
898 static struct binder_node *binder_get_node(struct binder_proc *proc,
899 binder_uintptr_t ptr)
901 struct rb_node *n = proc->nodes.rb_node;
902 struct binder_node *node;
905 node = rb_entry(n, struct binder_node, rb_node);
909 else if (ptr > node->ptr)
917 static struct binder_node *binder_new_node(struct binder_proc *proc,
918 binder_uintptr_t ptr,
919 binder_uintptr_t cookie)
921 struct rb_node **p = &proc->nodes.rb_node;
922 struct rb_node *parent = NULL;
923 struct binder_node *node;
927 node = rb_entry(parent, struct binder_node, rb_node);
931 else if (ptr > node->ptr)
937 node = kzalloc(sizeof(*node), GFP_KERNEL);
940 binder_stats_created(BINDER_STAT_NODE);
941 rb_link_node(&node->rb_node, parent, p);
942 rb_insert_color(&node->rb_node, &proc->nodes);
943 node->debug_id = ++binder_last_id;
946 node->cookie = cookie;
947 node->work.type = BINDER_WORK_NODE;
948 INIT_LIST_HEAD(&node->work.entry);
949 INIT_LIST_HEAD(&node->async_todo);
950 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
951 "%d:%d node %d u%016llx c%016llx created\n",
952 proc->pid, current->pid, node->debug_id,
953 (u64)node->ptr, (u64)node->cookie);
957 static int binder_inc_node(struct binder_node *node, int strong, int internal,
958 struct list_head *target_list)
962 if (target_list == NULL &&
963 node->internal_strong_refs == 0 &&
965 node == node->proc->context->
966 binder_context_mgr_node &&
967 node->has_strong_ref)) {
968 pr_err("invalid inc strong node for %d\n",
972 node->internal_strong_refs++;
974 node->local_strong_refs++;
975 if (!node->has_strong_ref && target_list) {
976 list_del_init(&node->work.entry);
977 list_add_tail(&node->work.entry, target_list);
981 node->local_weak_refs++;
982 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
983 if (target_list == NULL) {
984 pr_err("invalid inc weak node for %d\n",
988 list_add_tail(&node->work.entry, target_list);
994 static int binder_dec_node(struct binder_node *node, int strong, int internal)
998 node->internal_strong_refs--;
1000 node->local_strong_refs--;
1001 if (node->local_strong_refs || node->internal_strong_refs)
1005 node->local_weak_refs--;
1006 if (node->local_weak_refs || !hlist_empty(&node->refs))
1009 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
1010 if (list_empty(&node->work.entry)) {
1011 list_add_tail(&node->work.entry, &node->proc->todo);
1012 wake_up_interruptible(&node->proc->wait);
1015 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1016 !node->local_weak_refs) {
1017 list_del_init(&node->work.entry);
1019 rb_erase(&node->rb_node, &node->proc->nodes);
1020 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1021 "refless node %d deleted\n",
1024 hlist_del(&node->dead_node);
1025 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1026 "dead node %d deleted\n",
1030 binder_stats_deleted(BINDER_STAT_NODE);
1038 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1039 u32 desc, bool need_strong_ref)
1041 struct rb_node *n = proc->refs_by_desc.rb_node;
1042 struct binder_ref *ref;
1045 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1047 if (desc < ref->desc) {
1049 } else if (desc > ref->desc) {
1051 } else if (need_strong_ref && !ref->strong) {
1052 binder_user_error("tried to use weak ref as strong ref\n");
1061 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1062 struct binder_node *node)
1065 struct rb_node **p = &proc->refs_by_node.rb_node;
1066 struct rb_node *parent = NULL;
1067 struct binder_ref *ref, *new_ref;
1068 struct binder_context *context = proc->context;
1072 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1074 if (node < ref->node)
1076 else if (node > ref->node)
1077 p = &(*p)->rb_right;
1081 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1082 if (new_ref == NULL)
1084 binder_stats_created(BINDER_STAT_REF);
1085 new_ref->debug_id = ++binder_last_id;
1086 new_ref->proc = proc;
1087 new_ref->node = node;
1088 rb_link_node(&new_ref->rb_node_node, parent, p);
1089 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1091 new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1092 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1093 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1094 if (ref->desc > new_ref->desc)
1096 new_ref->desc = ref->desc + 1;
1099 p = &proc->refs_by_desc.rb_node;
1102 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1104 if (new_ref->desc < ref->desc)
1106 else if (new_ref->desc > ref->desc)
1107 p = &(*p)->rb_right;
1111 rb_link_node(&new_ref->rb_node_desc, parent, p);
1112 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1114 hlist_add_head(&new_ref->node_entry, &node->refs);
1116 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1117 "%d new ref %d desc %d for node %d\n",
1118 proc->pid, new_ref->debug_id, new_ref->desc,
1121 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1122 "%d new ref %d desc %d for dead node\n",
1123 proc->pid, new_ref->debug_id, new_ref->desc);
1128 static void binder_delete_ref(struct binder_ref *ref)
1130 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1131 "%d delete ref %d desc %d for node %d\n",
1132 ref->proc->pid, ref->debug_id, ref->desc,
1133 ref->node->debug_id);
1135 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1136 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1138 binder_dec_node(ref->node, 1, 1);
1139 hlist_del(&ref->node_entry);
1140 binder_dec_node(ref->node, 0, 1);
1142 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1143 "%d delete ref %d desc %d has death notification\n",
1144 ref->proc->pid, ref->debug_id, ref->desc);
1145 list_del(&ref->death->work.entry);
1147 binder_stats_deleted(BINDER_STAT_DEATH);
1150 binder_stats_deleted(BINDER_STAT_REF);
1153 static int binder_inc_ref(struct binder_ref *ref, int strong,
1154 struct list_head *target_list)
1159 if (ref->strong == 0) {
1160 ret = binder_inc_node(ref->node, 1, 1, target_list);
1166 if (ref->weak == 0) {
1167 ret = binder_inc_node(ref->node, 0, 1, target_list);
1177 static int binder_dec_ref(struct binder_ref *ref, int strong)
1180 if (ref->strong == 0) {
1181 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1182 ref->proc->pid, ref->debug_id,
1183 ref->desc, ref->strong, ref->weak);
1187 if (ref->strong == 0) {
1190 ret = binder_dec_node(ref->node, strong, 1);
1195 if (ref->weak == 0) {
1196 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1197 ref->proc->pid, ref->debug_id,
1198 ref->desc, ref->strong, ref->weak);
1203 if (ref->strong == 0 && ref->weak == 0)
1204 binder_delete_ref(ref);
1208 static void binder_pop_transaction(struct binder_thread *target_thread,
1209 struct binder_transaction *t)
1211 if (target_thread) {
1212 BUG_ON(target_thread->transaction_stack != t);
1213 BUG_ON(target_thread->transaction_stack->from != target_thread);
1214 target_thread->transaction_stack =
1215 target_thread->transaction_stack->from_parent;
1220 t->buffer->transaction = NULL;
1222 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1225 static void binder_send_failed_reply(struct binder_transaction *t,
1226 uint32_t error_code)
1228 struct binder_thread *target_thread;
1229 struct binder_transaction *next;
1231 BUG_ON(t->flags & TF_ONE_WAY);
1233 target_thread = t->from;
1234 if (target_thread) {
1235 if (target_thread->return_error != BR_OK &&
1236 target_thread->return_error2 == BR_OK) {
1237 target_thread->return_error2 =
1238 target_thread->return_error;
1239 target_thread->return_error = BR_OK;
1241 if (target_thread->return_error == BR_OK) {
1242 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1243 "send failed reply for transaction %d to %d:%d\n",
1245 target_thread->proc->pid,
1246 target_thread->pid);
1248 binder_pop_transaction(target_thread, t);
1249 target_thread->return_error = error_code;
1250 wake_up_interruptible(&target_thread->wait);
1252 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1253 target_thread->proc->pid,
1255 target_thread->return_error);
1259 next = t->from_parent;
1261 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1262 "send failed reply for transaction %d, target dead\n",
1265 binder_pop_transaction(target_thread, t);
1267 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1268 "reply failed, no target thread at root\n");
1272 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1273 "reply failed, no target thread -- retry %d\n",
1279 * binder_validate_object() - checks for a valid metadata object in a buffer.
1280 * @buffer: binder_buffer that we're parsing.
1281 * @offset: offset in the buffer at which to validate an object.
1283 * Return: If there's a valid metadata object at @offset in @buffer, the
1284 * size of that object. Otherwise, it returns zero.
1286 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1288 /* Check if we can read a header first */
1289 struct binder_object_header *hdr;
1290 size_t object_size = 0;
1292 if (offset > buffer->data_size - sizeof(*hdr) ||
1293 buffer->data_size < sizeof(*hdr) ||
1294 !IS_ALIGNED(offset, sizeof(u32)))
1297 /* Ok, now see if we can read a complete object. */
1298 hdr = (struct binder_object_header *)(buffer->data + offset);
1299 switch (hdr->type) {
1300 case BINDER_TYPE_BINDER:
1301 case BINDER_TYPE_WEAK_BINDER:
1302 case BINDER_TYPE_HANDLE:
1303 case BINDER_TYPE_WEAK_HANDLE:
1304 object_size = sizeof(struct flat_binder_object);
1306 case BINDER_TYPE_FD:
1307 object_size = sizeof(struct binder_fd_object);
1312 if (offset <= buffer->data_size - object_size &&
1313 buffer->data_size >= object_size)
1319 static void binder_transaction_buffer_release(struct binder_proc *proc,
1320 struct binder_buffer *buffer,
1321 binder_size_t *failed_at)
1323 binder_size_t *offp, *off_end;
1324 int debug_id = buffer->debug_id;
1326 binder_debug(BINDER_DEBUG_TRANSACTION,
1327 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1328 proc->pid, buffer->debug_id,
1329 buffer->data_size, buffer->offsets_size, failed_at);
1331 if (buffer->target_node)
1332 binder_dec_node(buffer->target_node, 1, 0);
1334 offp = (binder_size_t *)(buffer->data +
1335 ALIGN(buffer->data_size, sizeof(void *)));
1337 off_end = failed_at;
1339 off_end = (void *)offp + buffer->offsets_size;
1340 for (; offp < off_end; offp++) {
1341 struct binder_object_header *hdr;
1342 size_t object_size = binder_validate_object(buffer, *offp);
1344 if (object_size == 0) {
1345 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1346 debug_id, (u64)*offp, buffer->data_size);
1349 hdr = (struct binder_object_header *)(buffer->data + *offp);
1350 switch (hdr->type) {
1351 case BINDER_TYPE_BINDER:
1352 case BINDER_TYPE_WEAK_BINDER: {
1353 struct flat_binder_object *fp;
1354 struct binder_node *node;
1356 fp = to_flat_binder_object(hdr);
1357 node = binder_get_node(proc, fp->binder);
1359 pr_err("transaction release %d bad node %016llx\n",
1360 debug_id, (u64)fp->binder);
1363 binder_debug(BINDER_DEBUG_TRANSACTION,
1364 " node %d u%016llx\n",
1365 node->debug_id, (u64)node->ptr);
1366 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1369 case BINDER_TYPE_HANDLE:
1370 case BINDER_TYPE_WEAK_HANDLE: {
1371 struct flat_binder_object *fp;
1372 struct binder_ref *ref;
1374 fp = to_flat_binder_object(hdr);
1375 ref = binder_get_ref(proc, fp->handle,
1376 hdr->type == BINDER_TYPE_HANDLE);
1379 pr_err("transaction release %d bad handle %d\n",
1380 debug_id, fp->handle);
1383 binder_debug(BINDER_DEBUG_TRANSACTION,
1384 " ref %d desc %d (node %d)\n",
1385 ref->debug_id, ref->desc, ref->node->debug_id);
1386 binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
1389 case BINDER_TYPE_FD: {
1390 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1392 binder_debug(BINDER_DEBUG_TRANSACTION,
1393 " fd %d\n", fp->fd);
1395 task_close_fd(proc, fp->fd);
1399 pr_err("transaction release %d bad object type %x\n",
1400 debug_id, hdr->type);
1406 static int binder_translate_binder(struct flat_binder_object *fp,
1407 struct binder_transaction *t,
1408 struct binder_thread *thread)
1410 struct binder_node *node;
1411 struct binder_ref *ref;
1412 struct binder_proc *proc = thread->proc;
1413 struct binder_proc *target_proc = t->to_proc;
1415 node = binder_get_node(proc, fp->binder);
1417 node = binder_new_node(proc, fp->binder, fp->cookie);
1421 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1422 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1424 if (fp->cookie != node->cookie) {
1425 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1426 proc->pid, thread->pid, (u64)fp->binder,
1427 node->debug_id, (u64)fp->cookie,
1431 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1434 ref = binder_get_ref_for_node(target_proc, node);
1438 if (fp->hdr.type == BINDER_TYPE_BINDER)
1439 fp->hdr.type = BINDER_TYPE_HANDLE;
1441 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1443 fp->handle = ref->desc;
1445 binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1447 trace_binder_transaction_node_to_ref(t, node, ref);
1448 binder_debug(BINDER_DEBUG_TRANSACTION,
1449 " node %d u%016llx -> ref %d desc %d\n",
1450 node->debug_id, (u64)node->ptr,
1451 ref->debug_id, ref->desc);
1456 static int binder_translate_handle(struct flat_binder_object *fp,
1457 struct binder_transaction *t,
1458 struct binder_thread *thread)
1460 struct binder_ref *ref;
1461 struct binder_proc *proc = thread->proc;
1462 struct binder_proc *target_proc = t->to_proc;
1464 ref = binder_get_ref(proc, fp->handle,
1465 fp->hdr.type == BINDER_TYPE_HANDLE);
1467 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1468 proc->pid, thread->pid, fp->handle);
1471 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1474 if (ref->node->proc == target_proc) {
1475 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1476 fp->hdr.type = BINDER_TYPE_BINDER;
1478 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1479 fp->binder = ref->node->ptr;
1480 fp->cookie = ref->node->cookie;
1481 binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1483 trace_binder_transaction_ref_to_node(t, ref);
1484 binder_debug(BINDER_DEBUG_TRANSACTION,
1485 " ref %d desc %d -> node %d u%016llx\n",
1486 ref->debug_id, ref->desc, ref->node->debug_id,
1487 (u64)ref->node->ptr);
1489 struct binder_ref *new_ref;
1491 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1496 fp->handle = new_ref->desc;
1498 binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1500 trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1501 binder_debug(BINDER_DEBUG_TRANSACTION,
1502 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1503 ref->debug_id, ref->desc, new_ref->debug_id,
1504 new_ref->desc, ref->node->debug_id);
1509 static int binder_translate_fd(int fd,
1510 struct binder_transaction *t,
1511 struct binder_thread *thread,
1512 struct binder_transaction *in_reply_to)
1514 struct binder_proc *proc = thread->proc;
1515 struct binder_proc *target_proc = t->to_proc;
1519 bool target_allows_fd;
1522 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1524 target_allows_fd = t->buffer->target_node->accept_fds;
1525 if (!target_allows_fd) {
1526 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1527 proc->pid, thread->pid,
1528 in_reply_to ? "reply" : "transaction",
1531 goto err_fd_not_accepted;
1536 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1537 proc->pid, thread->pid, fd);
1541 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1547 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1548 if (target_fd < 0) {
1550 goto err_get_unused_fd;
1552 task_fd_install(target_proc, target_fd, file);
1553 trace_binder_transaction_fd(t, fd, target_fd);
1554 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
1563 err_fd_not_accepted:
1567 static void binder_transaction(struct binder_proc *proc,
1568 struct binder_thread *thread,
1569 struct binder_transaction_data *tr, int reply,
1570 binder_size_t extra_buffers_size)
1573 struct binder_transaction *t;
1574 struct binder_work *tcomplete;
1575 binder_size_t *offp, *off_end;
1576 binder_size_t off_min;
1577 struct binder_proc *target_proc;
1578 struct binder_thread *target_thread = NULL;
1579 struct binder_node *target_node = NULL;
1580 struct list_head *target_list;
1581 wait_queue_head_t *target_wait;
1582 struct binder_transaction *in_reply_to = NULL;
1583 struct binder_transaction_log_entry *e;
1584 uint32_t return_error;
1585 struct binder_context *context = proc->context;
1587 e = binder_transaction_log_add(&binder_transaction_log);
1588 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1589 e->from_proc = proc->pid;
1590 e->from_thread = thread->pid;
1591 e->target_handle = tr->target.handle;
1592 e->data_size = tr->data_size;
1593 e->offsets_size = tr->offsets_size;
1594 e->context_name = proc->context->name;
1597 in_reply_to = thread->transaction_stack;
1598 if (in_reply_to == NULL) {
1599 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1600 proc->pid, thread->pid);
1601 return_error = BR_FAILED_REPLY;
1602 goto err_empty_call_stack;
1604 binder_set_nice(in_reply_to->saved_priority);
1605 if (in_reply_to->to_thread != thread) {
1606 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1607 proc->pid, thread->pid, in_reply_to->debug_id,
1608 in_reply_to->to_proc ?
1609 in_reply_to->to_proc->pid : 0,
1610 in_reply_to->to_thread ?
1611 in_reply_to->to_thread->pid : 0);
1612 return_error = BR_FAILED_REPLY;
1614 goto err_bad_call_stack;
1616 thread->transaction_stack = in_reply_to->to_parent;
1617 target_thread = in_reply_to->from;
1618 if (target_thread == NULL) {
1619 return_error = BR_DEAD_REPLY;
1620 goto err_dead_binder;
1622 if (target_thread->transaction_stack != in_reply_to) {
1623 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1624 proc->pid, thread->pid,
1625 target_thread->transaction_stack ?
1626 target_thread->transaction_stack->debug_id : 0,
1627 in_reply_to->debug_id);
1628 return_error = BR_FAILED_REPLY;
1630 target_thread = NULL;
1631 goto err_dead_binder;
1633 target_proc = target_thread->proc;
1635 if (tr->target.handle) {
1636 struct binder_ref *ref;
1638 ref = binder_get_ref(proc, tr->target.handle, true);
1640 binder_user_error("%d:%d got transaction to invalid handle\n",
1641 proc->pid, thread->pid);
1642 return_error = BR_FAILED_REPLY;
1643 goto err_invalid_target_handle;
1645 target_node = ref->node;
1647 target_node = context->binder_context_mgr_node;
1648 if (target_node == NULL) {
1649 return_error = BR_DEAD_REPLY;
1650 goto err_no_context_mgr_node;
1653 e->to_node = target_node->debug_id;
1654 target_proc = target_node->proc;
1655 if (target_proc == NULL) {
1656 return_error = BR_DEAD_REPLY;
1657 goto err_dead_binder;
1659 if (security_binder_transaction(proc->tsk,
1660 target_proc->tsk) < 0) {
1661 return_error = BR_FAILED_REPLY;
1662 goto err_invalid_target_handle;
1664 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1665 struct binder_transaction *tmp;
1667 tmp = thread->transaction_stack;
1668 if (tmp->to_thread != thread) {
1669 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1670 proc->pid, thread->pid, tmp->debug_id,
1671 tmp->to_proc ? tmp->to_proc->pid : 0,
1673 tmp->to_thread->pid : 0);
1674 return_error = BR_FAILED_REPLY;
1675 goto err_bad_call_stack;
1678 if (tmp->from && tmp->from->proc == target_proc)
1679 target_thread = tmp->from;
1680 tmp = tmp->from_parent;
1684 if (target_thread) {
1685 e->to_thread = target_thread->pid;
1686 target_list = &target_thread->todo;
1687 target_wait = &target_thread->wait;
1689 target_list = &target_proc->todo;
1690 target_wait = &target_proc->wait;
1692 e->to_proc = target_proc->pid;
1694 /* TODO: reuse incoming transaction for reply */
1695 t = kzalloc(sizeof(*t), GFP_KERNEL);
1697 return_error = BR_FAILED_REPLY;
1698 goto err_alloc_t_failed;
1700 binder_stats_created(BINDER_STAT_TRANSACTION);
1702 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1703 if (tcomplete == NULL) {
1704 return_error = BR_FAILED_REPLY;
1705 goto err_alloc_tcomplete_failed;
1707 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1709 t->debug_id = ++binder_last_id;
1710 e->debug_id = t->debug_id;
1713 binder_debug(BINDER_DEBUG_TRANSACTION,
1714 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
1715 proc->pid, thread->pid, t->debug_id,
1716 target_proc->pid, target_thread->pid,
1717 (u64)tr->data.ptr.buffer,
1718 (u64)tr->data.ptr.offsets,
1719 (u64)tr->data_size, (u64)tr->offsets_size,
1720 (u64)extra_buffers_size);
1722 binder_debug(BINDER_DEBUG_TRANSACTION,
1723 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
1724 proc->pid, thread->pid, t->debug_id,
1725 target_proc->pid, target_node->debug_id,
1726 (u64)tr->data.ptr.buffer,
1727 (u64)tr->data.ptr.offsets,
1728 (u64)tr->data_size, (u64)tr->offsets_size,
1729 (u64)extra_buffers_size);
1731 if (!reply && !(tr->flags & TF_ONE_WAY))
1735 t->sender_euid = task_euid(proc->tsk);
1736 t->to_proc = target_proc;
1737 t->to_thread = target_thread;
1739 t->flags = tr->flags;
1740 t->priority = task_nice(current);
1742 trace_binder_transaction(reply, t, target_node);
1744 t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1745 tr->offsets_size, extra_buffers_size,
1746 !reply && (t->flags & TF_ONE_WAY));
1747 if (t->buffer == NULL) {
1748 return_error = BR_FAILED_REPLY;
1749 goto err_binder_alloc_buf_failed;
1751 t->buffer->allow_user_free = 0;
1752 t->buffer->debug_id = t->debug_id;
1753 t->buffer->transaction = t;
1754 t->buffer->target_node = target_node;
1755 trace_binder_transaction_alloc_buf(t->buffer);
1757 binder_inc_node(target_node, 1, 0, NULL);
1759 offp = (binder_size_t *)(t->buffer->data +
1760 ALIGN(tr->data_size, sizeof(void *)));
1762 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1763 tr->data.ptr.buffer, tr->data_size)) {
1764 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1765 proc->pid, thread->pid);
1766 return_error = BR_FAILED_REPLY;
1767 goto err_copy_data_failed;
1769 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1770 tr->data.ptr.offsets, tr->offsets_size)) {
1771 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1772 proc->pid, thread->pid);
1773 return_error = BR_FAILED_REPLY;
1774 goto err_copy_data_failed;
1776 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1777 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1778 proc->pid, thread->pid, (u64)tr->offsets_size);
1779 return_error = BR_FAILED_REPLY;
1780 goto err_bad_offset;
1782 off_end = (void *)offp + tr->offsets_size;
1784 for (; offp < off_end; offp++) {
1785 struct binder_object_header *hdr;
1786 size_t object_size = binder_validate_object(t->buffer, *offp);
1788 if (object_size == 0 || *offp < off_min) {
1789 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
1790 proc->pid, thread->pid, (u64)*offp,
1792 (u64)t->buffer->data_size);
1793 return_error = BR_FAILED_REPLY;
1794 goto err_bad_offset;
1797 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
1798 off_min = *offp + object_size;
1799 switch (hdr->type) {
1800 case BINDER_TYPE_BINDER:
1801 case BINDER_TYPE_WEAK_BINDER: {
1802 struct flat_binder_object *fp;
1804 fp = to_flat_binder_object(hdr);
1805 ret = binder_translate_binder(fp, t, thread);
1807 return_error = BR_FAILED_REPLY;
1808 goto err_translate_failed;
1811 case BINDER_TYPE_HANDLE:
1812 case BINDER_TYPE_WEAK_HANDLE: {
1813 struct flat_binder_object *fp;
1815 fp = to_flat_binder_object(hdr);
1816 ret = binder_translate_handle(fp, t, thread);
1818 return_error = BR_FAILED_REPLY;
1819 goto err_translate_failed;
1823 case BINDER_TYPE_FD: {
1824 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1825 int target_fd = binder_translate_fd(fp->fd, t, thread,
1828 if (target_fd < 0) {
1829 return_error = BR_FAILED_REPLY;
1830 goto err_translate_failed;
1837 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1838 proc->pid, thread->pid, hdr->type);
1839 return_error = BR_FAILED_REPLY;
1840 goto err_bad_object_type;
1844 BUG_ON(t->buffer->async_transaction != 0);
1845 binder_pop_transaction(target_thread, in_reply_to);
1846 } else if (!(t->flags & TF_ONE_WAY)) {
1847 BUG_ON(t->buffer->async_transaction != 0);
1849 t->from_parent = thread->transaction_stack;
1850 thread->transaction_stack = t;
1852 BUG_ON(target_node == NULL);
1853 BUG_ON(t->buffer->async_transaction != 1);
1854 if (target_node->has_async_transaction) {
1855 target_list = &target_node->async_todo;
1858 target_node->has_async_transaction = 1;
1860 t->work.type = BINDER_WORK_TRANSACTION;
1861 list_add_tail(&t->work.entry, target_list);
1862 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1863 list_add_tail(&tcomplete->entry, &thread->todo);
1865 wake_up_interruptible(target_wait);
1868 err_translate_failed:
1869 err_bad_object_type:
1871 err_copy_data_failed:
1872 trace_binder_transaction_failed_buffer_release(t->buffer);
1873 binder_transaction_buffer_release(target_proc, t->buffer, offp);
1874 t->buffer->transaction = NULL;
1875 binder_free_buf(target_proc, t->buffer);
1876 err_binder_alloc_buf_failed:
1878 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1879 err_alloc_tcomplete_failed:
1881 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1884 err_empty_call_stack:
1886 err_invalid_target_handle:
1887 err_no_context_mgr_node:
1888 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1889 "%d:%d transaction failed %d, size %lld-%lld\n",
1890 proc->pid, thread->pid, return_error,
1891 (u64)tr->data_size, (u64)tr->offsets_size);
1894 struct binder_transaction_log_entry *fe;
1896 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1900 BUG_ON(thread->return_error != BR_OK);
1902 thread->return_error = BR_TRANSACTION_COMPLETE;
1903 binder_send_failed_reply(in_reply_to, return_error);
1905 thread->return_error = return_error;
1908 static int binder_thread_write(struct binder_proc *proc,
1909 struct binder_thread *thread,
1910 binder_uintptr_t binder_buffer, size_t size,
1911 binder_size_t *consumed)
1914 struct binder_context *context = proc->context;
1915 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
1916 void __user *ptr = buffer + *consumed;
1917 void __user *end = buffer + size;
1919 while (ptr < end && thread->return_error == BR_OK) {
1920 if (get_user(cmd, (uint32_t __user *)ptr))
1922 ptr += sizeof(uint32_t);
1923 trace_binder_command(cmd);
1924 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1925 binder_stats.bc[_IOC_NR(cmd)]++;
1926 proc->stats.bc[_IOC_NR(cmd)]++;
1927 thread->stats.bc[_IOC_NR(cmd)]++;
1935 struct binder_ref *ref;
1936 const char *debug_string;
1938 if (get_user(target, (uint32_t __user *)ptr))
1940 ptr += sizeof(uint32_t);
1941 if (target == 0 && context->binder_context_mgr_node &&
1942 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1943 ref = binder_get_ref_for_node(proc,
1944 context->binder_context_mgr_node);
1945 if (ref->desc != target) {
1946 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1947 proc->pid, thread->pid,
1951 ref = binder_get_ref(proc, target,
1952 cmd == BC_ACQUIRE ||
1955 binder_user_error("%d:%d refcount change on invalid ref %d\n",
1956 proc->pid, thread->pid, target);
1961 debug_string = "IncRefs";
1962 binder_inc_ref(ref, 0, NULL);
1965 debug_string = "Acquire";
1966 binder_inc_ref(ref, 1, NULL);
1969 debug_string = "Release";
1970 binder_dec_ref(ref, 1);
1974 debug_string = "DecRefs";
1975 binder_dec_ref(ref, 0);
1978 binder_debug(BINDER_DEBUG_USER_REFS,
1979 "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
1980 proc->pid, thread->pid, debug_string, ref->debug_id,
1981 ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1984 case BC_INCREFS_DONE:
1985 case BC_ACQUIRE_DONE: {
1986 binder_uintptr_t node_ptr;
1987 binder_uintptr_t cookie;
1988 struct binder_node *node;
1990 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
1992 ptr += sizeof(binder_uintptr_t);
1993 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1995 ptr += sizeof(binder_uintptr_t);
1996 node = binder_get_node(proc, node_ptr);
1998 binder_user_error("%d:%d %s u%016llx no match\n",
1999 proc->pid, thread->pid,
2000 cmd == BC_INCREFS_DONE ?
2006 if (cookie != node->cookie) {
2007 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
2008 proc->pid, thread->pid,
2009 cmd == BC_INCREFS_DONE ?
2010 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2011 (u64)node_ptr, node->debug_id,
2012 (u64)cookie, (u64)node->cookie);
2015 if (cmd == BC_ACQUIRE_DONE) {
2016 if (node->pending_strong_ref == 0) {
2017 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
2018 proc->pid, thread->pid,
2022 node->pending_strong_ref = 0;
2024 if (node->pending_weak_ref == 0) {
2025 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
2026 proc->pid, thread->pid,
2030 node->pending_weak_ref = 0;
2032 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2033 binder_debug(BINDER_DEBUG_USER_REFS,
2034 "%d:%d %s node %d ls %d lw %d\n",
2035 proc->pid, thread->pid,
2036 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2037 node->debug_id, node->local_strong_refs, node->local_weak_refs);
2040 case BC_ATTEMPT_ACQUIRE:
2041 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
2043 case BC_ACQUIRE_RESULT:
2044 pr_err("BC_ACQUIRE_RESULT not supported\n");
2047 case BC_FREE_BUFFER: {
2048 binder_uintptr_t data_ptr;
2049 struct binder_buffer *buffer;
2051 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
2053 ptr += sizeof(binder_uintptr_t);
2055 buffer = binder_buffer_lookup(proc, data_ptr);
2056 if (buffer == NULL) {
2057 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2058 proc->pid, thread->pid, (u64)data_ptr);
2061 if (!buffer->allow_user_free) {
2062 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2063 proc->pid, thread->pid, (u64)data_ptr);
2066 binder_debug(BINDER_DEBUG_FREE_BUFFER,
2067 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2068 proc->pid, thread->pid, (u64)data_ptr,
2070 buffer->transaction ? "active" : "finished");
2072 if (buffer->transaction) {
2073 buffer->transaction->buffer = NULL;
2074 buffer->transaction = NULL;
2076 if (buffer->async_transaction && buffer->target_node) {
2077 BUG_ON(!buffer->target_node->has_async_transaction);
2078 if (list_empty(&buffer->target_node->async_todo))
2079 buffer->target_node->has_async_transaction = 0;
2081 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2083 trace_binder_transaction_buffer_release(buffer);
2084 binder_transaction_buffer_release(proc, buffer, NULL);
2085 binder_free_buf(proc, buffer);
2089 case BC_TRANSACTION:
2091 struct binder_transaction_data tr;
2093 if (copy_from_user(&tr, ptr, sizeof(tr)))
2096 binder_transaction(proc, thread, &tr,
2097 cmd == BC_REPLY, 0);
2101 case BC_REGISTER_LOOPER:
2102 binder_debug(BINDER_DEBUG_THREADS,
2103 "%d:%d BC_REGISTER_LOOPER\n",
2104 proc->pid, thread->pid);
2105 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2106 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2107 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
2108 proc->pid, thread->pid);
2109 } else if (proc->requested_threads == 0) {
2110 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2111 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
2112 proc->pid, thread->pid);
2114 proc->requested_threads--;
2115 proc->requested_threads_started++;
2117 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2119 case BC_ENTER_LOOPER:
2120 binder_debug(BINDER_DEBUG_THREADS,
2121 "%d:%d BC_ENTER_LOOPER\n",
2122 proc->pid, thread->pid);
2123 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2124 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2125 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
2126 proc->pid, thread->pid);
2128 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2130 case BC_EXIT_LOOPER:
2131 binder_debug(BINDER_DEBUG_THREADS,
2132 "%d:%d BC_EXIT_LOOPER\n",
2133 proc->pid, thread->pid);
2134 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2137 case BC_REQUEST_DEATH_NOTIFICATION:
2138 case BC_CLEAR_DEATH_NOTIFICATION: {
2140 binder_uintptr_t cookie;
2141 struct binder_ref *ref;
2142 struct binder_ref_death *death;
2144 if (get_user(target, (uint32_t __user *)ptr))
2146 ptr += sizeof(uint32_t);
2147 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2149 ptr += sizeof(binder_uintptr_t);
2150 ref = binder_get_ref(proc, target, false);
2152 binder_user_error("%d:%d %s invalid ref %d\n",
2153 proc->pid, thread->pid,
2154 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2155 "BC_REQUEST_DEATH_NOTIFICATION" :
2156 "BC_CLEAR_DEATH_NOTIFICATION",
2161 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2162 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2163 proc->pid, thread->pid,
2164 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2165 "BC_REQUEST_DEATH_NOTIFICATION" :
2166 "BC_CLEAR_DEATH_NOTIFICATION",
2167 (u64)cookie, ref->debug_id, ref->desc,
2168 ref->strong, ref->weak, ref->node->debug_id);
2170 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2172 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2173 proc->pid, thread->pid);
2176 death = kzalloc(sizeof(*death), GFP_KERNEL);
2177 if (death == NULL) {
2178 thread->return_error = BR_ERROR;
2179 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2180 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2181 proc->pid, thread->pid);
2184 binder_stats_created(BINDER_STAT_DEATH);
2185 INIT_LIST_HEAD(&death->work.entry);
2186 death->cookie = cookie;
2188 if (ref->node->proc == NULL) {
2189 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2190 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2191 list_add_tail(&ref->death->work.entry, &thread->todo);
2193 list_add_tail(&ref->death->work.entry, &proc->todo);
2194 wake_up_interruptible(&proc->wait);
2198 if (ref->death == NULL) {
2199 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2200 proc->pid, thread->pid);
2204 if (death->cookie != cookie) {
2205 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2206 proc->pid, thread->pid,
2212 if (list_empty(&death->work.entry)) {
2213 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2214 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2215 list_add_tail(&death->work.entry, &thread->todo);
2217 list_add_tail(&death->work.entry, &proc->todo);
2218 wake_up_interruptible(&proc->wait);
2221 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2222 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2226 case BC_DEAD_BINDER_DONE: {
2227 struct binder_work *w;
2228 binder_uintptr_t cookie;
2229 struct binder_ref_death *death = NULL;
2231 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2234 ptr += sizeof(cookie);
2235 list_for_each_entry(w, &proc->delivered_death, entry) {
2236 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2238 if (tmp_death->cookie == cookie) {
2243 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2244 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2245 proc->pid, thread->pid, (u64)cookie,
2247 if (death == NULL) {
2248 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2249 proc->pid, thread->pid, (u64)cookie);
2253 list_del_init(&death->work.entry);
2254 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2255 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2256 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2257 list_add_tail(&death->work.entry, &thread->todo);
2259 list_add_tail(&death->work.entry, &proc->todo);
2260 wake_up_interruptible(&proc->wait);
2266 pr_err("%d:%d unknown command %d\n",
2267 proc->pid, thread->pid, cmd);
2270 *consumed = ptr - buffer;
2275 static void binder_stat_br(struct binder_proc *proc,
2276 struct binder_thread *thread, uint32_t cmd)
2278 trace_binder_return(cmd);
2279 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2280 binder_stats.br[_IOC_NR(cmd)]++;
2281 proc->stats.br[_IOC_NR(cmd)]++;
2282 thread->stats.br[_IOC_NR(cmd)]++;
2286 static int binder_has_proc_work(struct binder_proc *proc,
2287 struct binder_thread *thread)
2289 return !list_empty(&proc->todo) ||
2290 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2293 static int binder_has_thread_work(struct binder_thread *thread)
2295 return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2296 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2299 static int binder_thread_read(struct binder_proc *proc,
2300 struct binder_thread *thread,
2301 binder_uintptr_t binder_buffer, size_t size,
2302 binder_size_t *consumed, int non_block)
2304 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2305 void __user *ptr = buffer + *consumed;
2306 void __user *end = buffer + size;
2309 int wait_for_proc_work;
2311 if (*consumed == 0) {
2312 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2314 ptr += sizeof(uint32_t);
2318 wait_for_proc_work = thread->transaction_stack == NULL &&
2319 list_empty(&thread->todo);
2321 if (thread->return_error != BR_OK && ptr < end) {
2322 if (thread->return_error2 != BR_OK) {
2323 if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2325 ptr += sizeof(uint32_t);
2326 binder_stat_br(proc, thread, thread->return_error2);
2329 thread->return_error2 = BR_OK;
2331 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2333 ptr += sizeof(uint32_t);
2334 binder_stat_br(proc, thread, thread->return_error);
2335 thread->return_error = BR_OK;
2340 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2341 if (wait_for_proc_work)
2342 proc->ready_threads++;
2344 binder_unlock(__func__);
2346 trace_binder_wait_for_work(wait_for_proc_work,
2347 !!thread->transaction_stack,
2348 !list_empty(&thread->todo));
2349 if (wait_for_proc_work) {
2350 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2351 BINDER_LOOPER_STATE_ENTERED))) {
2352 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2353 proc->pid, thread->pid, thread->looper);
2354 wait_event_interruptible(binder_user_error_wait,
2355 binder_stop_on_user_error < 2);
2357 binder_set_nice(proc->default_priority);
2359 if (!binder_has_proc_work(proc, thread))
2362 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2365 if (!binder_has_thread_work(thread))
2368 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2371 binder_lock(__func__);
2373 if (wait_for_proc_work)
2374 proc->ready_threads--;
2375 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2382 struct binder_transaction_data tr;
2383 struct binder_work *w;
2384 struct binder_transaction *t = NULL;
2386 if (!list_empty(&thread->todo)) {
2387 w = list_first_entry(&thread->todo, struct binder_work,
2389 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2390 w = list_first_entry(&proc->todo, struct binder_work,
2394 if (ptr - buffer == 4 &&
2395 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
2400 if (end - ptr < sizeof(tr) + 4)
2404 case BINDER_WORK_TRANSACTION: {
2405 t = container_of(w, struct binder_transaction, work);
2407 case BINDER_WORK_TRANSACTION_COMPLETE: {
2408 cmd = BR_TRANSACTION_COMPLETE;
2409 if (put_user(cmd, (uint32_t __user *)ptr))
2411 ptr += sizeof(uint32_t);
2413 binder_stat_br(proc, thread, cmd);
2414 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2415 "%d:%d BR_TRANSACTION_COMPLETE\n",
2416 proc->pid, thread->pid);
2418 list_del(&w->entry);
2420 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2422 case BINDER_WORK_NODE: {
2423 struct binder_node *node = container_of(w, struct binder_node, work);
2424 uint32_t cmd = BR_NOOP;
2425 const char *cmd_name;
2426 int strong = node->internal_strong_refs || node->local_strong_refs;
2427 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2429 if (weak && !node->has_weak_ref) {
2431 cmd_name = "BR_INCREFS";
2432 node->has_weak_ref = 1;
2433 node->pending_weak_ref = 1;
2434 node->local_weak_refs++;
2435 } else if (strong && !node->has_strong_ref) {
2437 cmd_name = "BR_ACQUIRE";
2438 node->has_strong_ref = 1;
2439 node->pending_strong_ref = 1;
2440 node->local_strong_refs++;
2441 } else if (!strong && node->has_strong_ref) {
2443 cmd_name = "BR_RELEASE";
2444 node->has_strong_ref = 0;
2445 } else if (!weak && node->has_weak_ref) {
2447 cmd_name = "BR_DECREFS";
2448 node->has_weak_ref = 0;
2450 if (cmd != BR_NOOP) {
2451 if (put_user(cmd, (uint32_t __user *)ptr))
2453 ptr += sizeof(uint32_t);
2454 if (put_user(node->ptr,
2455 (binder_uintptr_t __user *)ptr))
2457 ptr += sizeof(binder_uintptr_t);
2458 if (put_user(node->cookie,
2459 (binder_uintptr_t __user *)ptr))
2461 ptr += sizeof(binder_uintptr_t);
2463 binder_stat_br(proc, thread, cmd);
2464 binder_debug(BINDER_DEBUG_USER_REFS,
2465 "%d:%d %s %d u%016llx c%016llx\n",
2466 proc->pid, thread->pid, cmd_name,
2468 (u64)node->ptr, (u64)node->cookie);
2470 list_del_init(&w->entry);
2471 if (!weak && !strong) {
2472 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2473 "%d:%d node %d u%016llx c%016llx deleted\n",
2474 proc->pid, thread->pid,
2478 rb_erase(&node->rb_node, &proc->nodes);
2480 binder_stats_deleted(BINDER_STAT_NODE);
2482 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2483 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2484 proc->pid, thread->pid,
2491 case BINDER_WORK_DEAD_BINDER:
2492 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2493 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2494 struct binder_ref_death *death;
2497 death = container_of(w, struct binder_ref_death, work);
2498 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2499 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2501 cmd = BR_DEAD_BINDER;
2502 if (put_user(cmd, (uint32_t __user *)ptr))
2504 ptr += sizeof(uint32_t);
2505 if (put_user(death->cookie,
2506 (binder_uintptr_t __user *)ptr))
2508 ptr += sizeof(binder_uintptr_t);
2509 binder_stat_br(proc, thread, cmd);
2510 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2511 "%d:%d %s %016llx\n",
2512 proc->pid, thread->pid,
2513 cmd == BR_DEAD_BINDER ?
2515 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2516 (u64)death->cookie);
2518 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2519 list_del(&w->entry);
2521 binder_stats_deleted(BINDER_STAT_DEATH);
2523 list_move(&w->entry, &proc->delivered_death);
2524 if (cmd == BR_DEAD_BINDER)
2525 goto done; /* DEAD_BINDER notifications can cause transactions */
2532 BUG_ON(t->buffer == NULL);
2533 if (t->buffer->target_node) {
2534 struct binder_node *target_node = t->buffer->target_node;
2536 tr.target.ptr = target_node->ptr;
2537 tr.cookie = target_node->cookie;
2538 t->saved_priority = task_nice(current);
2539 if (t->priority < target_node->min_priority &&
2540 !(t->flags & TF_ONE_WAY))
2541 binder_set_nice(t->priority);
2542 else if (!(t->flags & TF_ONE_WAY) ||
2543 t->saved_priority > target_node->min_priority)
2544 binder_set_nice(target_node->min_priority);
2545 cmd = BR_TRANSACTION;
2552 tr.flags = t->flags;
2553 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2556 struct task_struct *sender = t->from->proc->tsk;
2558 tr.sender_pid = task_tgid_nr_ns(sender,
2559 task_active_pid_ns(current));
2564 tr.data_size = t->buffer->data_size;
2565 tr.offsets_size = t->buffer->offsets_size;
2566 tr.data.ptr.buffer = (binder_uintptr_t)(
2567 (uintptr_t)t->buffer->data +
2568 proc->user_buffer_offset);
2569 tr.data.ptr.offsets = tr.data.ptr.buffer +
2570 ALIGN(t->buffer->data_size,
2573 if (put_user(cmd, (uint32_t __user *)ptr))
2575 ptr += sizeof(uint32_t);
2576 if (copy_to_user(ptr, &tr, sizeof(tr)))
2580 trace_binder_transaction_received(t);
2581 binder_stat_br(proc, thread, cmd);
2582 binder_debug(BINDER_DEBUG_TRANSACTION,
2583 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2584 proc->pid, thread->pid,
2585 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2587 t->debug_id, t->from ? t->from->proc->pid : 0,
2588 t->from ? t->from->pid : 0, cmd,
2589 t->buffer->data_size, t->buffer->offsets_size,
2590 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2592 list_del(&t->work.entry);
2593 t->buffer->allow_user_free = 1;
2594 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2595 t->to_parent = thread->transaction_stack;
2596 t->to_thread = thread;
2597 thread->transaction_stack = t;
2599 t->buffer->transaction = NULL;
2601 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2608 *consumed = ptr - buffer;
2609 if (proc->requested_threads + proc->ready_threads == 0 &&
2610 proc->requested_threads_started < proc->max_threads &&
2611 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2612 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2613 /*spawn a new thread if we leave this out */) {
2614 proc->requested_threads++;
2615 binder_debug(BINDER_DEBUG_THREADS,
2616 "%d:%d BR_SPAWN_LOOPER\n",
2617 proc->pid, thread->pid);
2618 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2620 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2625 static void binder_release_work(struct list_head *list)
2627 struct binder_work *w;
2629 while (!list_empty(list)) {
2630 w = list_first_entry(list, struct binder_work, entry);
2631 list_del_init(&w->entry);
2633 case BINDER_WORK_TRANSACTION: {
2634 struct binder_transaction *t;
2636 t = container_of(w, struct binder_transaction, work);
2637 if (t->buffer->target_node &&
2638 !(t->flags & TF_ONE_WAY)) {
2639 binder_send_failed_reply(t, BR_DEAD_REPLY);
2641 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2642 "undelivered transaction %d\n",
2644 t->buffer->transaction = NULL;
2646 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2649 case BINDER_WORK_TRANSACTION_COMPLETE: {
2650 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2651 "undelivered TRANSACTION_COMPLETE\n");
2653 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2655 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2656 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2657 struct binder_ref_death *death;
2659 death = container_of(w, struct binder_ref_death, work);
2660 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2661 "undelivered death notification, %016llx\n",
2662 (u64)death->cookie);
2664 binder_stats_deleted(BINDER_STAT_DEATH);
2667 pr_err("unexpected work type, %d, not freed\n",
2675 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2677 struct binder_thread *thread = NULL;
2678 struct rb_node *parent = NULL;
2679 struct rb_node **p = &proc->threads.rb_node;
2683 thread = rb_entry(parent, struct binder_thread, rb_node);
2685 if (current->pid < thread->pid)
2687 else if (current->pid > thread->pid)
2688 p = &(*p)->rb_right;
2693 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2696 binder_stats_created(BINDER_STAT_THREAD);
2697 thread->proc = proc;
2698 thread->pid = current->pid;
2699 init_waitqueue_head(&thread->wait);
2700 INIT_LIST_HEAD(&thread->todo);
2701 rb_link_node(&thread->rb_node, parent, p);
2702 rb_insert_color(&thread->rb_node, &proc->threads);
2703 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2704 thread->return_error = BR_OK;
2705 thread->return_error2 = BR_OK;
2710 static int binder_free_thread(struct binder_proc *proc,
2711 struct binder_thread *thread)
2713 struct binder_transaction *t;
2714 struct binder_transaction *send_reply = NULL;
2715 int active_transactions = 0;
2717 rb_erase(&thread->rb_node, &proc->threads);
2718 t = thread->transaction_stack;
2719 if (t && t->to_thread == thread)
2722 active_transactions++;
2723 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2724 "release %d:%d transaction %d %s, still active\n",
2725 proc->pid, thread->pid,
2727 (t->to_thread == thread) ? "in" : "out");
2729 if (t->to_thread == thread) {
2731 t->to_thread = NULL;
2733 t->buffer->transaction = NULL;
2737 } else if (t->from == thread) {
2744 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2745 binder_release_work(&thread->todo);
2747 binder_stats_deleted(BINDER_STAT_THREAD);
2748 return active_transactions;
2751 static unsigned int binder_poll(struct file *filp,
2752 struct poll_table_struct *wait)
2754 struct binder_proc *proc = filp->private_data;
2755 struct binder_thread *thread = NULL;
2756 int wait_for_proc_work;
2758 binder_lock(__func__);
2760 thread = binder_get_thread(proc);
2762 wait_for_proc_work = thread->transaction_stack == NULL &&
2763 list_empty(&thread->todo) && thread->return_error == BR_OK;
2765 binder_unlock(__func__);
2767 if (wait_for_proc_work) {
2768 if (binder_has_proc_work(proc, thread))
2770 poll_wait(filp, &proc->wait, wait);
2771 if (binder_has_proc_work(proc, thread))
2774 if (binder_has_thread_work(thread))
2776 poll_wait(filp, &thread->wait, wait);
2777 if (binder_has_thread_work(thread))
2783 static int binder_ioctl_write_read(struct file *filp,
2784 unsigned int cmd, unsigned long arg,
2785 struct binder_thread *thread)
2788 struct binder_proc *proc = filp->private_data;
2789 unsigned int size = _IOC_SIZE(cmd);
2790 void __user *ubuf = (void __user *)arg;
2791 struct binder_write_read bwr;
2793 if (size != sizeof(struct binder_write_read)) {
2797 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2801 binder_debug(BINDER_DEBUG_READ_WRITE,
2802 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2803 proc->pid, thread->pid,
2804 (u64)bwr.write_size, (u64)bwr.write_buffer,
2805 (u64)bwr.read_size, (u64)bwr.read_buffer);
2807 if (bwr.write_size > 0) {
2808 ret = binder_thread_write(proc, thread,
2811 &bwr.write_consumed);
2812 trace_binder_write_done(ret);
2814 bwr.read_consumed = 0;
2815 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2820 if (bwr.read_size > 0) {
2821 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2824 filp->f_flags & O_NONBLOCK);
2825 trace_binder_read_done(ret);
2826 if (!list_empty(&proc->todo))
2827 wake_up_interruptible(&proc->wait);
2829 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2834 binder_debug(BINDER_DEBUG_READ_WRITE,
2835 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2836 proc->pid, thread->pid,
2837 (u64)bwr.write_consumed, (u64)bwr.write_size,
2838 (u64)bwr.read_consumed, (u64)bwr.read_size);
2839 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2847 static int binder_ioctl_set_ctx_mgr(struct file *filp)
2850 struct binder_proc *proc = filp->private_data;
2851 struct binder_context *context = proc->context;
2853 kuid_t curr_euid = current_euid();
2855 if (context->binder_context_mgr_node) {
2856 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2860 ret = security_binder_set_context_mgr(proc->tsk);
2863 if (uid_valid(context->binder_context_mgr_uid)) {
2864 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
2865 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2866 from_kuid(&init_user_ns, curr_euid),
2867 from_kuid(&init_user_ns,
2868 context->binder_context_mgr_uid));
2873 context->binder_context_mgr_uid = curr_euid;
2875 context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
2876 if (!context->binder_context_mgr_node) {
2880 context->binder_context_mgr_node->local_weak_refs++;
2881 context->binder_context_mgr_node->local_strong_refs++;
2882 context->binder_context_mgr_node->has_strong_ref = 1;
2883 context->binder_context_mgr_node->has_weak_ref = 1;
2888 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2891 struct binder_proc *proc = filp->private_data;
2892 struct binder_thread *thread;
2893 unsigned int size = _IOC_SIZE(cmd);
2894 void __user *ubuf = (void __user *)arg;
2896 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
2897 proc->pid, current->pid, cmd, arg);*/
2899 trace_binder_ioctl(cmd, arg);
2901 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2905 binder_lock(__func__);
2906 thread = binder_get_thread(proc);
2907 if (thread == NULL) {
2913 case BINDER_WRITE_READ:
2914 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2918 case BINDER_SET_MAX_THREADS:
2919 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2924 case BINDER_SET_CONTEXT_MGR:
2925 ret = binder_ioctl_set_ctx_mgr(filp);
2929 case BINDER_THREAD_EXIT:
2930 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
2931 proc->pid, thread->pid);
2932 binder_free_thread(proc, thread);
2935 case BINDER_VERSION: {
2936 struct binder_version __user *ver = ubuf;
2938 if (size != sizeof(struct binder_version)) {
2942 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2943 &ver->protocol_version)) {
2956 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2957 binder_unlock(__func__);
2958 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2959 if (ret && ret != -ERESTARTSYS)
2960 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2962 trace_binder_ioctl_done(ret);
2966 static void binder_vma_open(struct vm_area_struct *vma)
2968 struct binder_proc *proc = vma->vm_private_data;
2970 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2971 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2972 proc->pid, vma->vm_start, vma->vm_end,
2973 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2974 (unsigned long)pgprot_val(vma->vm_page_prot));
2977 static void binder_vma_close(struct vm_area_struct *vma)
2979 struct binder_proc *proc = vma->vm_private_data;
2981 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2982 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2983 proc->pid, vma->vm_start, vma->vm_end,
2984 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2985 (unsigned long)pgprot_val(vma->vm_page_prot));
2987 proc->vma_vm_mm = NULL;
2988 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2991 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2993 return VM_FAULT_SIGBUS;
2996 static const struct vm_operations_struct binder_vm_ops = {
2997 .open = binder_vma_open,
2998 .close = binder_vma_close,
2999 .fault = binder_vm_fault,
3002 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3005 struct vm_struct *area;
3006 struct binder_proc *proc = filp->private_data;
3007 const char *failure_string;
3008 struct binder_buffer *buffer;
3010 if (proc->tsk != current)
3013 if ((vma->vm_end - vma->vm_start) > SZ_4M)
3014 vma->vm_end = vma->vm_start + SZ_4M;
3016 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3017 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3018 proc->pid, vma->vm_start, vma->vm_end,
3019 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3020 (unsigned long)pgprot_val(vma->vm_page_prot));
3022 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3024 failure_string = "bad vm_flags";
3027 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3029 mutex_lock(&binder_mmap_lock);
3032 failure_string = "already mapped";
3033 goto err_already_mapped;
3036 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
3039 failure_string = "get_vm_area";
3040 goto err_get_vm_area_failed;
3042 proc->buffer = area->addr;
3043 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
3044 mutex_unlock(&binder_mmap_lock);
3046 #ifdef CONFIG_CPU_CACHE_VIPT
3047 if (cache_is_vipt_aliasing()) {
3048 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
3049 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
3050 vma->vm_start += PAGE_SIZE;
3054 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
3055 if (proc->pages == NULL) {
3057 failure_string = "alloc page array";
3058 goto err_alloc_pages_failed;
3060 proc->buffer_size = vma->vm_end - vma->vm_start;
3062 vma->vm_ops = &binder_vm_ops;
3063 vma->vm_private_data = proc;
3065 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
3067 failure_string = "alloc small buf";
3068 goto err_alloc_small_buf_failed;
3070 buffer = proc->buffer;
3071 INIT_LIST_HEAD(&proc->buffers);
3072 list_add(&buffer->entry, &proc->buffers);
3074 binder_insert_free_buffer(proc, buffer);
3075 proc->free_async_space = proc->buffer_size / 2;
3077 proc->files = get_files_struct(current);
3079 proc->vma_vm_mm = vma->vm_mm;
3081 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
3082 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
3085 err_alloc_small_buf_failed:
3088 err_alloc_pages_failed:
3089 mutex_lock(&binder_mmap_lock);
3090 vfree(proc->buffer);
3091 proc->buffer = NULL;
3092 err_get_vm_area_failed:
3094 mutex_unlock(&binder_mmap_lock);
3096 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
3097 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3101 static int binder_open(struct inode *nodp, struct file *filp)
3103 struct binder_proc *proc;
3104 struct binder_device *binder_dev;
3106 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3107 current->group_leader->pid, current->pid);
3109 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3112 get_task_struct(current);
3113 proc->tsk = current;
3114 INIT_LIST_HEAD(&proc->todo);
3115 init_waitqueue_head(&proc->wait);
3116 proc->default_priority = task_nice(current);
3117 binder_dev = container_of(filp->private_data, struct binder_device,
3119 proc->context = &binder_dev->context;
3121 binder_lock(__func__);
3123 binder_stats_created(BINDER_STAT_PROC);
3124 hlist_add_head(&proc->proc_node, &binder_procs);
3125 proc->pid = current->group_leader->pid;
3126 INIT_LIST_HEAD(&proc->delivered_death);
3127 filp->private_data = proc;
3129 binder_unlock(__func__);
3131 if (binder_debugfs_dir_entry_proc) {
3134 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
3136 * proc debug entries are shared between contexts, so
3137 * this will fail if the process tries to open the driver
3138 * again with a different context. The priting code will
3139 * anyway print all contexts that a given PID has, so this
3142 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3143 binder_debugfs_dir_entry_proc,
3144 (void *)(unsigned long)proc->pid,
3151 static int binder_flush(struct file *filp, fl_owner_t id)
3153 struct binder_proc *proc = filp->private_data;
3155 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3160 static void binder_deferred_flush(struct binder_proc *proc)
3165 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3166 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3168 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3169 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3170 wake_up_interruptible(&thread->wait);
3174 wake_up_interruptible_all(&proc->wait);
3176 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3177 "binder_flush: %d woke %d threads\n", proc->pid,
3181 static int binder_release(struct inode *nodp, struct file *filp)
3183 struct binder_proc *proc = filp->private_data;
3185 debugfs_remove(proc->debugfs_entry);
3186 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3191 static int binder_node_release(struct binder_node *node, int refs)
3193 struct binder_ref *ref;
3196 list_del_init(&node->work.entry);
3197 binder_release_work(&node->async_todo);
3199 if (hlist_empty(&node->refs)) {
3201 binder_stats_deleted(BINDER_STAT_NODE);
3207 node->local_strong_refs = 0;
3208 node->local_weak_refs = 0;
3209 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3211 hlist_for_each_entry(ref, &node->refs, node_entry) {
3219 if (list_empty(&ref->death->work.entry)) {
3220 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3221 list_add_tail(&ref->death->work.entry,
3223 wake_up_interruptible(&ref->proc->wait);
3228 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3229 "node %d now dead, refs %d, death %d\n",
3230 node->debug_id, refs, death);
3235 static void binder_deferred_release(struct binder_proc *proc)
3237 struct binder_transaction *t;
3238 struct binder_context *context = proc->context;
3240 int threads, nodes, incoming_refs, outgoing_refs, buffers,
3241 active_transactions, page_count;
3244 BUG_ON(proc->files);
3246 hlist_del(&proc->proc_node);
3248 if (context->binder_context_mgr_node &&
3249 context->binder_context_mgr_node->proc == proc) {
3250 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3251 "%s: %d context_mgr_node gone\n",
3252 __func__, proc->pid);
3253 context->binder_context_mgr_node = NULL;
3257 active_transactions = 0;
3258 while ((n = rb_first(&proc->threads))) {
3259 struct binder_thread *thread;
3261 thread = rb_entry(n, struct binder_thread, rb_node);
3263 active_transactions += binder_free_thread(proc, thread);
3268 while ((n = rb_first(&proc->nodes))) {
3269 struct binder_node *node;
3271 node = rb_entry(n, struct binder_node, rb_node);
3273 rb_erase(&node->rb_node, &proc->nodes);
3274 incoming_refs = binder_node_release(node, incoming_refs);
3278 while ((n = rb_first(&proc->refs_by_desc))) {
3279 struct binder_ref *ref;
3281 ref = rb_entry(n, struct binder_ref, rb_node_desc);
3283 binder_delete_ref(ref);
3286 binder_release_work(&proc->todo);
3287 binder_release_work(&proc->delivered_death);
3290 while ((n = rb_first(&proc->allocated_buffers))) {
3291 struct binder_buffer *buffer;
3293 buffer = rb_entry(n, struct binder_buffer, rb_node);
3295 t = buffer->transaction;
3298 buffer->transaction = NULL;
3299 pr_err("release proc %d, transaction %d, not freed\n",
3300 proc->pid, t->debug_id);
3304 binder_free_buf(proc, buffer);
3308 binder_stats_deleted(BINDER_STAT_PROC);
3314 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3317 if (!proc->pages[i])
3320 page_addr = proc->buffer + i * PAGE_SIZE;
3321 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3322 "%s: %d: page %d at %p not freed\n",
3323 __func__, proc->pid, i, page_addr);
3324 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3325 __free_page(proc->pages[i]);
3329 vfree(proc->buffer);
3332 put_task_struct(proc->tsk);
3334 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3335 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3336 __func__, proc->pid, threads, nodes, incoming_refs,
3337 outgoing_refs, active_transactions, buffers, page_count);
3342 static void binder_deferred_func(struct work_struct *work)
3344 struct binder_proc *proc;
3345 struct files_struct *files;
3350 binder_lock(__func__);
3351 mutex_lock(&binder_deferred_lock);
3352 if (!hlist_empty(&binder_deferred_list)) {
3353 proc = hlist_entry(binder_deferred_list.first,
3354 struct binder_proc, deferred_work_node);
3355 hlist_del_init(&proc->deferred_work_node);
3356 defer = proc->deferred_work;
3357 proc->deferred_work = 0;
3362 mutex_unlock(&binder_deferred_lock);
3365 if (defer & BINDER_DEFERRED_PUT_FILES) {
3366 files = proc->files;
3371 if (defer & BINDER_DEFERRED_FLUSH)
3372 binder_deferred_flush(proc);
3374 if (defer & BINDER_DEFERRED_RELEASE)
3375 binder_deferred_release(proc); /* frees proc */
3377 binder_unlock(__func__);
3379 put_files_struct(files);
3382 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3385 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3387 mutex_lock(&binder_deferred_lock);
3388 proc->deferred_work |= defer;
3389 if (hlist_unhashed(&proc->deferred_work_node)) {
3390 hlist_add_head(&proc->deferred_work_node,
3391 &binder_deferred_list);
3392 queue_work(binder_deferred_workqueue, &binder_deferred_work);
3394 mutex_unlock(&binder_deferred_lock);
3397 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3398 struct binder_transaction *t)
3401 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3402 prefix, t->debug_id, t,
3403 t->from ? t->from->proc->pid : 0,
3404 t->from ? t->from->pid : 0,
3405 t->to_proc ? t->to_proc->pid : 0,
3406 t->to_thread ? t->to_thread->pid : 0,
3407 t->code, t->flags, t->priority, t->need_reply);
3408 if (t->buffer == NULL) {
3409 seq_puts(m, " buffer free\n");
3412 if (t->buffer->target_node)
3413 seq_printf(m, " node %d",
3414 t->buffer->target_node->debug_id);
3415 seq_printf(m, " size %zd:%zd data %p\n",
3416 t->buffer->data_size, t->buffer->offsets_size,
3420 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3421 struct binder_buffer *buffer)
3423 seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3424 prefix, buffer->debug_id, buffer->data,
3425 buffer->data_size, buffer->offsets_size,
3426 buffer->transaction ? "active" : "delivered");
3429 static void print_binder_work(struct seq_file *m, const char *prefix,
3430 const char *transaction_prefix,
3431 struct binder_work *w)
3433 struct binder_node *node;
3434 struct binder_transaction *t;
3437 case BINDER_WORK_TRANSACTION:
3438 t = container_of(w, struct binder_transaction, work);
3439 print_binder_transaction(m, transaction_prefix, t);
3441 case BINDER_WORK_TRANSACTION_COMPLETE:
3442 seq_printf(m, "%stransaction complete\n", prefix);
3444 case BINDER_WORK_NODE:
3445 node = container_of(w, struct binder_node, work);
3446 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3447 prefix, node->debug_id,
3448 (u64)node->ptr, (u64)node->cookie);
3450 case BINDER_WORK_DEAD_BINDER:
3451 seq_printf(m, "%shas dead binder\n", prefix);
3453 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3454 seq_printf(m, "%shas cleared dead binder\n", prefix);
3456 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3457 seq_printf(m, "%shas cleared death notification\n", prefix);
3460 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3465 static void print_binder_thread(struct seq_file *m,
3466 struct binder_thread *thread,
3469 struct binder_transaction *t;
3470 struct binder_work *w;
3471 size_t start_pos = m->count;
3474 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
3475 header_pos = m->count;
3476 t = thread->transaction_stack;
3478 if (t->from == thread) {
3479 print_binder_transaction(m,
3480 " outgoing transaction", t);
3482 } else if (t->to_thread == thread) {
3483 print_binder_transaction(m,
3484 " incoming transaction", t);
3487 print_binder_transaction(m, " bad transaction", t);
3491 list_for_each_entry(w, &thread->todo, entry) {
3492 print_binder_work(m, " ", " pending transaction", w);
3494 if (!print_always && m->count == header_pos)
3495 m->count = start_pos;
3498 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3500 struct binder_ref *ref;
3501 struct binder_work *w;
3505 hlist_for_each_entry(ref, &node->refs, node_entry)
3508 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3509 node->debug_id, (u64)node->ptr, (u64)node->cookie,
3510 node->has_strong_ref, node->has_weak_ref,
3511 node->local_strong_refs, node->local_weak_refs,
3512 node->internal_strong_refs, count);
3514 seq_puts(m, " proc");
3515 hlist_for_each_entry(ref, &node->refs, node_entry)
3516 seq_printf(m, " %d", ref->proc->pid);
3519 list_for_each_entry(w, &node->async_todo, entry)
3520 print_binder_work(m, " ",
3521 " pending async transaction", w);
3524 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3526 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
3527 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3528 ref->node->debug_id, ref->strong, ref->weak, ref->death);
3531 static void print_binder_proc(struct seq_file *m,
3532 struct binder_proc *proc, int print_all)
3534 struct binder_work *w;
3536 size_t start_pos = m->count;
3539 seq_printf(m, "proc %d\n", proc->pid);
3540 seq_printf(m, "context %s\n", proc->context->name);
3541 header_pos = m->count;
3543 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3544 print_binder_thread(m, rb_entry(n, struct binder_thread,
3545 rb_node), print_all);
3546 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3547 struct binder_node *node = rb_entry(n, struct binder_node,
3549 if (print_all || node->has_async_transaction)
3550 print_binder_node(m, node);
3553 for (n = rb_first(&proc->refs_by_desc);
3556 print_binder_ref(m, rb_entry(n, struct binder_ref,
3559 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3560 print_binder_buffer(m, " buffer",
3561 rb_entry(n, struct binder_buffer, rb_node));
3562 list_for_each_entry(w, &proc->todo, entry)
3563 print_binder_work(m, " ", " pending transaction", w);
3564 list_for_each_entry(w, &proc->delivered_death, entry) {
3565 seq_puts(m, " has delivered dead binder\n");
3568 if (!print_all && m->count == header_pos)
3569 m->count = start_pos;
3572 static const char * const binder_return_strings[] = {
3577 "BR_ACQUIRE_RESULT",
3579 "BR_TRANSACTION_COMPLETE",
3584 "BR_ATTEMPT_ACQUIRE",
3589 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3593 static const char * const binder_command_strings[] = {
3596 "BC_ACQUIRE_RESULT",
3604 "BC_ATTEMPT_ACQUIRE",
3605 "BC_REGISTER_LOOPER",
3608 "BC_REQUEST_DEATH_NOTIFICATION",
3609 "BC_CLEAR_DEATH_NOTIFICATION",
3610 "BC_DEAD_BINDER_DONE"
3613 static const char * const binder_objstat_strings[] = {
3620 "transaction_complete"
3623 static void print_binder_stats(struct seq_file *m, const char *prefix,
3624 struct binder_stats *stats)
3628 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3629 ARRAY_SIZE(binder_command_strings));
3630 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3632 seq_printf(m, "%s%s: %d\n", prefix,
3633 binder_command_strings[i], stats->bc[i]);
3636 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3637 ARRAY_SIZE(binder_return_strings));
3638 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3640 seq_printf(m, "%s%s: %d\n", prefix,
3641 binder_return_strings[i], stats->br[i]);
3644 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3645 ARRAY_SIZE(binder_objstat_strings));
3646 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3647 ARRAY_SIZE(stats->obj_deleted));
3648 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3649 if (stats->obj_created[i] || stats->obj_deleted[i])
3650 seq_printf(m, "%s%s: active %d total %d\n", prefix,
3651 binder_objstat_strings[i],
3652 stats->obj_created[i] - stats->obj_deleted[i],
3653 stats->obj_created[i]);
3657 static void print_binder_proc_stats(struct seq_file *m,
3658 struct binder_proc *proc)
3660 struct binder_work *w;
3662 int count, strong, weak;
3664 seq_printf(m, "proc %d\n", proc->pid);
3665 seq_printf(m, "context %s\n", proc->context->name);
3667 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3669 seq_printf(m, " threads: %d\n", count);
3670 seq_printf(m, " requested threads: %d+%d/%d\n"
3671 " ready threads %d\n"
3672 " free async space %zd\n", proc->requested_threads,
3673 proc->requested_threads_started, proc->max_threads,
3674 proc->ready_threads, proc->free_async_space);
3676 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3678 seq_printf(m, " nodes: %d\n", count);
3682 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3683 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3686 strong += ref->strong;
3689 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
3692 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3694 seq_printf(m, " buffers: %d\n", count);
3697 list_for_each_entry(w, &proc->todo, entry) {
3699 case BINDER_WORK_TRANSACTION:
3706 seq_printf(m, " pending transactions: %d\n", count);
3708 print_binder_stats(m, " ", &proc->stats);
3712 static int binder_state_show(struct seq_file *m, void *unused)
3714 struct binder_proc *proc;
3715 struct binder_node *node;
3716 int do_lock = !binder_debug_no_lock;
3719 binder_lock(__func__);
3721 seq_puts(m, "binder state:\n");
3723 if (!hlist_empty(&binder_dead_nodes))
3724 seq_puts(m, "dead nodes:\n");
3725 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3726 print_binder_node(m, node);
3728 hlist_for_each_entry(proc, &binder_procs, proc_node)
3729 print_binder_proc(m, proc, 1);
3731 binder_unlock(__func__);
3735 static int binder_stats_show(struct seq_file *m, void *unused)
3737 struct binder_proc *proc;
3738 int do_lock = !binder_debug_no_lock;
3741 binder_lock(__func__);
3743 seq_puts(m, "binder stats:\n");
3745 print_binder_stats(m, "", &binder_stats);
3747 hlist_for_each_entry(proc, &binder_procs, proc_node)
3748 print_binder_proc_stats(m, proc);
3750 binder_unlock(__func__);
3754 static int binder_transactions_show(struct seq_file *m, void *unused)
3756 struct binder_proc *proc;
3757 int do_lock = !binder_debug_no_lock;
3760 binder_lock(__func__);
3762 seq_puts(m, "binder transactions:\n");
3763 hlist_for_each_entry(proc, &binder_procs, proc_node)
3764 print_binder_proc(m, proc, 0);
3766 binder_unlock(__func__);
3770 static int binder_proc_show(struct seq_file *m, void *unused)
3772 struct binder_proc *itr;
3773 int pid = (unsigned long)m->private;
3774 int do_lock = !binder_debug_no_lock;
3777 binder_lock(__func__);
3779 hlist_for_each_entry(itr, &binder_procs, proc_node) {
3780 if (itr->pid == pid) {
3781 seq_puts(m, "binder proc state:\n");
3782 print_binder_proc(m, itr, 1);
3786 binder_unlock(__func__);
3790 static void print_binder_transaction_log_entry(struct seq_file *m,
3791 struct binder_transaction_log_entry *e)
3794 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
3795 e->debug_id, (e->call_type == 2) ? "reply" :
3796 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3797 e->from_thread, e->to_proc, e->to_thread, e->context_name,
3798 e->to_node, e->target_handle, e->data_size, e->offsets_size);
3801 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3803 struct binder_transaction_log *log = m->private;
3807 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3808 print_binder_transaction_log_entry(m, &log->entry[i]);
3810 for (i = 0; i < log->next; i++)
3811 print_binder_transaction_log_entry(m, &log->entry[i]);
3815 static const struct file_operations binder_fops = {
3816 .owner = THIS_MODULE,
3817 .poll = binder_poll,
3818 .unlocked_ioctl = binder_ioctl,
3819 .compat_ioctl = binder_ioctl,
3820 .mmap = binder_mmap,
3821 .open = binder_open,
3822 .flush = binder_flush,
3823 .release = binder_release,
3826 BINDER_DEBUG_ENTRY(state);
3827 BINDER_DEBUG_ENTRY(stats);
3828 BINDER_DEBUG_ENTRY(transactions);
3829 BINDER_DEBUG_ENTRY(transaction_log);
3831 static int __init init_binder_device(const char *name)
3834 struct binder_device *binder_device;
3836 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
3840 binder_device->miscdev.fops = &binder_fops;
3841 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
3842 binder_device->miscdev.name = name;
3844 binder_device->context.binder_context_mgr_uid = INVALID_UID;
3845 binder_device->context.name = name;
3847 ret = misc_register(&binder_device->miscdev);
3849 kfree(binder_device);
3853 hlist_add_head(&binder_device->hlist, &binder_devices);
3858 static int __init binder_init(void)
3861 char *device_name, *device_names;
3862 struct binder_device *device;
3863 struct hlist_node *tmp;
3865 binder_deferred_workqueue = create_singlethread_workqueue("binder");
3866 if (!binder_deferred_workqueue)
3869 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3870 if (binder_debugfs_dir_entry_root)
3871 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3872 binder_debugfs_dir_entry_root);
3874 if (binder_debugfs_dir_entry_root) {
3875 debugfs_create_file("state",
3877 binder_debugfs_dir_entry_root,
3879 &binder_state_fops);
3880 debugfs_create_file("stats",
3882 binder_debugfs_dir_entry_root,
3884 &binder_stats_fops);
3885 debugfs_create_file("transactions",
3887 binder_debugfs_dir_entry_root,
3889 &binder_transactions_fops);
3890 debugfs_create_file("transaction_log",
3892 binder_debugfs_dir_entry_root,
3893 &binder_transaction_log,
3894 &binder_transaction_log_fops);
3895 debugfs_create_file("failed_transaction_log",
3897 binder_debugfs_dir_entry_root,
3898 &binder_transaction_log_failed,
3899 &binder_transaction_log_fops);
3903 * Copy the module_parameter string, because we don't want to
3904 * tokenize it in-place.
3906 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
3907 if (!device_names) {
3909 goto err_alloc_device_names_failed;
3911 strcpy(device_names, binder_devices_param);
3913 while ((device_name = strsep(&device_names, ","))) {
3914 ret = init_binder_device(device_name);
3916 goto err_init_binder_device_failed;
3921 err_init_binder_device_failed:
3922 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
3923 misc_deregister(&device->miscdev);
3924 hlist_del(&device->hlist);
3927 err_alloc_device_names_failed:
3928 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
3930 destroy_workqueue(binder_deferred_workqueue);
3935 device_initcall(binder_init);
3937 #define CREATE_TRACE_POINTS
3938 #include "binder_trace.h"
3940 MODULE_LICENSE("GPL v2");