4e1187df0785e6719228e4f298ef56c84115c8c7
[firefly-linux-kernel-4.4.55.git] / drivers / android / binder.c
1 /* binder.c
2  *
3  * Android IPC Subsystem
4  *
5  * Copyright (C) 2007-2008 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
24 #include <linux/fs.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/security.h>
41
42 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
43 #define BINDER_IPC_32BIT 1
44 #endif
45
46 #include <uapi/linux/android/binder.h>
47 #include "binder_trace.h"
48
49 static DEFINE_MUTEX(binder_main_lock);
50 static DEFINE_MUTEX(binder_deferred_lock);
51 static DEFINE_MUTEX(binder_mmap_lock);
52
53 static HLIST_HEAD(binder_devices);
54 static HLIST_HEAD(binder_procs);
55 static HLIST_HEAD(binder_deferred_list);
56 static HLIST_HEAD(binder_dead_nodes);
57
58 static struct dentry *binder_debugfs_dir_entry_root;
59 static struct dentry *binder_debugfs_dir_entry_proc;
60 static int binder_last_id;
61 static struct workqueue_struct *binder_deferred_workqueue;
62
63 #define BINDER_DEBUG_ENTRY(name) \
64 static int binder_##name##_open(struct inode *inode, struct file *file) \
65 { \
66         return single_open(file, binder_##name##_show, inode->i_private); \
67 } \
68 \
69 static const struct file_operations binder_##name##_fops = { \
70         .owner = THIS_MODULE, \
71         .open = binder_##name##_open, \
72         .read = seq_read, \
73         .llseek = seq_lseek, \
74         .release = single_release, \
75 }
76
77 static int binder_proc_show(struct seq_file *m, void *unused);
78 BINDER_DEBUG_ENTRY(proc);
79
80 /* This is only defined in include/asm-arm/sizes.h */
81 #ifndef SZ_1K
82 #define SZ_1K                               0x400
83 #endif
84
85 #ifndef SZ_4M
86 #define SZ_4M                               0x400000
87 #endif
88
89 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
90
91 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
92
93 enum {
94         BINDER_DEBUG_USER_ERROR             = 1U << 0,
95         BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
96         BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
97         BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
98         BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
99         BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
100         BINDER_DEBUG_READ_WRITE             = 1U << 6,
101         BINDER_DEBUG_USER_REFS              = 1U << 7,
102         BINDER_DEBUG_THREADS                = 1U << 8,
103         BINDER_DEBUG_TRANSACTION            = 1U << 9,
104         BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
105         BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
106         BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
107         BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
108         BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
109         BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
110 };
111 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
112         BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
113 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
114
115 static bool binder_debug_no_lock;
116 module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
117
118 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
119 module_param_named(devices, binder_devices_param, charp, S_IRUGO);
120
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
122 static int binder_stop_on_user_error;
123
124 static int binder_set_stop_on_user_error(const char *val,
125                                          struct kernel_param *kp)
126 {
127         int ret;
128
129         ret = param_set_int(val, kp);
130         if (binder_stop_on_user_error < 2)
131                 wake_up(&binder_user_error_wait);
132         return ret;
133 }
134 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
135         param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
136
137 #define binder_debug(mask, x...) \
138         do { \
139                 if (binder_debug_mask & mask) \
140                         pr_info(x); \
141         } while (0)
142
143 #define binder_user_error(x...) \
144         do { \
145                 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
146                         pr_info(x); \
147                 if (binder_stop_on_user_error) \
148                         binder_stop_on_user_error = 2; \
149         } while (0)
150
151 #define to_flat_binder_object(hdr) \
152         container_of(hdr, struct flat_binder_object, hdr)
153
154 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
155
156 enum binder_stat_types {
157         BINDER_STAT_PROC,
158         BINDER_STAT_THREAD,
159         BINDER_STAT_NODE,
160         BINDER_STAT_REF,
161         BINDER_STAT_DEATH,
162         BINDER_STAT_TRANSACTION,
163         BINDER_STAT_TRANSACTION_COMPLETE,
164         BINDER_STAT_COUNT
165 };
166
167 struct binder_stats {
168         int br[_IOC_NR(BR_FAILED_REPLY) + 1];
169         int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
170         int obj_created[BINDER_STAT_COUNT];
171         int obj_deleted[BINDER_STAT_COUNT];
172 };
173
174 static struct binder_stats binder_stats;
175
176 static inline void binder_stats_deleted(enum binder_stat_types type)
177 {
178         binder_stats.obj_deleted[type]++;
179 }
180
181 static inline void binder_stats_created(enum binder_stat_types type)
182 {
183         binder_stats.obj_created[type]++;
184 }
185
186 struct binder_transaction_log_entry {
187         int debug_id;
188         int call_type;
189         int from_proc;
190         int from_thread;
191         int target_handle;
192         int to_proc;
193         int to_thread;
194         int to_node;
195         int data_size;
196         int offsets_size;
197         const char *context_name;
198 };
199 struct binder_transaction_log {
200         int next;
201         int full;
202         struct binder_transaction_log_entry entry[32];
203 };
204 static struct binder_transaction_log binder_transaction_log;
205 static struct binder_transaction_log binder_transaction_log_failed;
206
207 static struct binder_transaction_log_entry *binder_transaction_log_add(
208         struct binder_transaction_log *log)
209 {
210         struct binder_transaction_log_entry *e;
211
212         e = &log->entry[log->next];
213         memset(e, 0, sizeof(*e));
214         log->next++;
215         if (log->next == ARRAY_SIZE(log->entry)) {
216                 log->next = 0;
217                 log->full = 1;
218         }
219         return e;
220 }
221
222 struct binder_context {
223         struct binder_node *binder_context_mgr_node;
224         kuid_t binder_context_mgr_uid;
225         const char *name;
226 };
227
228 struct binder_device {
229         struct hlist_node hlist;
230         struct miscdevice miscdev;
231         struct binder_context context;
232 };
233
234 struct binder_work {
235         struct list_head entry;
236         enum {
237                 BINDER_WORK_TRANSACTION = 1,
238                 BINDER_WORK_TRANSACTION_COMPLETE,
239                 BINDER_WORK_NODE,
240                 BINDER_WORK_DEAD_BINDER,
241                 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
242                 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
243         } type;
244 };
245
246 struct binder_node {
247         int debug_id;
248         struct binder_work work;
249         union {
250                 struct rb_node rb_node;
251                 struct hlist_node dead_node;
252         };
253         struct binder_proc *proc;
254         struct hlist_head refs;
255         int internal_strong_refs;
256         int local_weak_refs;
257         int local_strong_refs;
258         binder_uintptr_t ptr;
259         binder_uintptr_t cookie;
260         unsigned has_strong_ref:1;
261         unsigned pending_strong_ref:1;
262         unsigned has_weak_ref:1;
263         unsigned pending_weak_ref:1;
264         unsigned has_async_transaction:1;
265         unsigned accept_fds:1;
266         unsigned min_priority:8;
267         struct list_head async_todo;
268 };
269
270 struct binder_ref_death {
271         struct binder_work work;
272         binder_uintptr_t cookie;
273 };
274
275 struct binder_ref {
276         /* Lookups needed: */
277         /*   node + proc => ref (transaction) */
278         /*   desc + proc => ref (transaction, inc/dec ref) */
279         /*   node => refs + procs (proc exit) */
280         int debug_id;
281         struct rb_node rb_node_desc;
282         struct rb_node rb_node_node;
283         struct hlist_node node_entry;
284         struct binder_proc *proc;
285         struct binder_node *node;
286         uint32_t desc;
287         int strong;
288         int weak;
289         struct binder_ref_death *death;
290 };
291
292 struct binder_buffer {
293         struct list_head entry; /* free and allocated entries by address */
294         struct rb_node rb_node; /* free entry by size or allocated entry */
295                                 /* by address */
296         unsigned free:1;
297         unsigned allow_user_free:1;
298         unsigned async_transaction:1;
299         unsigned debug_id:29;
300
301         struct binder_transaction *transaction;
302
303         struct binder_node *target_node;
304         size_t data_size;
305         size_t offsets_size;
306         size_t extra_buffers_size;
307         uint8_t data[0];
308 };
309
310 enum binder_deferred_state {
311         BINDER_DEFERRED_PUT_FILES    = 0x01,
312         BINDER_DEFERRED_FLUSH        = 0x02,
313         BINDER_DEFERRED_RELEASE      = 0x04,
314 };
315
316 struct binder_proc {
317         struct hlist_node proc_node;
318         struct rb_root threads;
319         struct rb_root nodes;
320         struct rb_root refs_by_desc;
321         struct rb_root refs_by_node;
322         int pid;
323         struct vm_area_struct *vma;
324         struct mm_struct *vma_vm_mm;
325         struct task_struct *tsk;
326         struct files_struct *files;
327         struct hlist_node deferred_work_node;
328         int deferred_work;
329         void *buffer;
330         ptrdiff_t user_buffer_offset;
331
332         struct list_head buffers;
333         struct rb_root free_buffers;
334         struct rb_root allocated_buffers;
335         size_t free_async_space;
336
337         struct page **pages;
338         size_t buffer_size;
339         uint32_t buffer_free;
340         struct list_head todo;
341         wait_queue_head_t wait;
342         struct binder_stats stats;
343         struct list_head delivered_death;
344         int max_threads;
345         int requested_threads;
346         int requested_threads_started;
347         int ready_threads;
348         long default_priority;
349         struct dentry *debugfs_entry;
350         struct binder_context *context;
351 };
352
353 enum {
354         BINDER_LOOPER_STATE_REGISTERED  = 0x01,
355         BINDER_LOOPER_STATE_ENTERED     = 0x02,
356         BINDER_LOOPER_STATE_EXITED      = 0x04,
357         BINDER_LOOPER_STATE_INVALID     = 0x08,
358         BINDER_LOOPER_STATE_WAITING     = 0x10,
359         BINDER_LOOPER_STATE_NEED_RETURN = 0x20
360 };
361
362 struct binder_thread {
363         struct binder_proc *proc;
364         struct rb_node rb_node;
365         int pid;
366         int looper;
367         struct binder_transaction *transaction_stack;
368         struct list_head todo;
369         uint32_t return_error; /* Write failed, return error code in read buf */
370         uint32_t return_error2; /* Write failed, return error code in read */
371                 /* buffer. Used when sending a reply to a dead process that */
372                 /* we are also waiting on */
373         wait_queue_head_t wait;
374         struct binder_stats stats;
375 };
376
377 struct binder_transaction {
378         int debug_id;
379         struct binder_work work;
380         struct binder_thread *from;
381         struct binder_transaction *from_parent;
382         struct binder_proc *to_proc;
383         struct binder_thread *to_thread;
384         struct binder_transaction *to_parent;
385         unsigned need_reply:1;
386         /* unsigned is_dead:1; */       /* not used at the moment */
387
388         struct binder_buffer *buffer;
389         unsigned int    code;
390         unsigned int    flags;
391         long    priority;
392         long    saved_priority;
393         kuid_t  sender_euid;
394 };
395
396 static void
397 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
398
399 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
400 {
401         struct files_struct *files = proc->files;
402         unsigned long rlim_cur;
403         unsigned long irqs;
404
405         if (files == NULL)
406                 return -ESRCH;
407
408         if (!lock_task_sighand(proc->tsk, &irqs))
409                 return -EMFILE;
410
411         rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
412         unlock_task_sighand(proc->tsk, &irqs);
413
414         return __alloc_fd(files, 0, rlim_cur, flags);
415 }
416
417 /*
418  * copied from fd_install
419  */
420 static void task_fd_install(
421         struct binder_proc *proc, unsigned int fd, struct file *file)
422 {
423         if (proc->files)
424                 __fd_install(proc->files, fd, file);
425 }
426
427 /*
428  * copied from sys_close
429  */
430 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
431 {
432         int retval;
433
434         if (proc->files == NULL)
435                 return -ESRCH;
436
437         retval = __close_fd(proc->files, fd);
438         /* can't restart close syscall because file table entry was cleared */
439         if (unlikely(retval == -ERESTARTSYS ||
440                      retval == -ERESTARTNOINTR ||
441                      retval == -ERESTARTNOHAND ||
442                      retval == -ERESTART_RESTARTBLOCK))
443                 retval = -EINTR;
444
445         return retval;
446 }
447
448 static inline void binder_lock(const char *tag)
449 {
450         trace_binder_lock(tag);
451         mutex_lock(&binder_main_lock);
452         trace_binder_locked(tag);
453 }
454
455 static inline void binder_unlock(const char *tag)
456 {
457         trace_binder_unlock(tag);
458         mutex_unlock(&binder_main_lock);
459 }
460
461 static void binder_set_nice(long nice)
462 {
463         long min_nice;
464
465         if (can_nice(current, nice)) {
466                 set_user_nice(current, nice);
467                 return;
468         }
469         min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
470         binder_debug(BINDER_DEBUG_PRIORITY_CAP,
471                      "%d: nice value %ld not allowed use %ld instead\n",
472                       current->pid, nice, min_nice);
473         set_user_nice(current, min_nice);
474         if (min_nice <= MAX_NICE)
475                 return;
476         binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
477 }
478
479 static size_t binder_buffer_size(struct binder_proc *proc,
480                                  struct binder_buffer *buffer)
481 {
482         if (list_is_last(&buffer->entry, &proc->buffers))
483                 return proc->buffer + proc->buffer_size - (void *)buffer->data;
484         return (size_t)list_entry(buffer->entry.next,
485                           struct binder_buffer, entry) - (size_t)buffer->data;
486 }
487
488 static void binder_insert_free_buffer(struct binder_proc *proc,
489                                       struct binder_buffer *new_buffer)
490 {
491         struct rb_node **p = &proc->free_buffers.rb_node;
492         struct rb_node *parent = NULL;
493         struct binder_buffer *buffer;
494         size_t buffer_size;
495         size_t new_buffer_size;
496
497         BUG_ON(!new_buffer->free);
498
499         new_buffer_size = binder_buffer_size(proc, new_buffer);
500
501         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
502                      "%d: add free buffer, size %zd, at %p\n",
503                       proc->pid, new_buffer_size, new_buffer);
504
505         while (*p) {
506                 parent = *p;
507                 buffer = rb_entry(parent, struct binder_buffer, rb_node);
508                 BUG_ON(!buffer->free);
509
510                 buffer_size = binder_buffer_size(proc, buffer);
511
512                 if (new_buffer_size < buffer_size)
513                         p = &parent->rb_left;
514                 else
515                         p = &parent->rb_right;
516         }
517         rb_link_node(&new_buffer->rb_node, parent, p);
518         rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
519 }
520
521 static void binder_insert_allocated_buffer(struct binder_proc *proc,
522                                            struct binder_buffer *new_buffer)
523 {
524         struct rb_node **p = &proc->allocated_buffers.rb_node;
525         struct rb_node *parent = NULL;
526         struct binder_buffer *buffer;
527
528         BUG_ON(new_buffer->free);
529
530         while (*p) {
531                 parent = *p;
532                 buffer = rb_entry(parent, struct binder_buffer, rb_node);
533                 BUG_ON(buffer->free);
534
535                 if (new_buffer < buffer)
536                         p = &parent->rb_left;
537                 else if (new_buffer > buffer)
538                         p = &parent->rb_right;
539                 else
540                         BUG();
541         }
542         rb_link_node(&new_buffer->rb_node, parent, p);
543         rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
544 }
545
546 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
547                                                   uintptr_t user_ptr)
548 {
549         struct rb_node *n = proc->allocated_buffers.rb_node;
550         struct binder_buffer *buffer;
551         struct binder_buffer *kern_ptr;
552
553         kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
554                 - offsetof(struct binder_buffer, data));
555
556         while (n) {
557                 buffer = rb_entry(n, struct binder_buffer, rb_node);
558                 BUG_ON(buffer->free);
559
560                 if (kern_ptr < buffer)
561                         n = n->rb_left;
562                 else if (kern_ptr > buffer)
563                         n = n->rb_right;
564                 else
565                         return buffer;
566         }
567         return NULL;
568 }
569
570 static int binder_update_page_range(struct binder_proc *proc, int allocate,
571                                     void *start, void *end,
572                                     struct vm_area_struct *vma)
573 {
574         void *page_addr;
575         unsigned long user_page_addr;
576         struct page **page;
577         struct mm_struct *mm;
578
579         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
580                      "%d: %s pages %p-%p\n", proc->pid,
581                      allocate ? "allocate" : "free", start, end);
582
583         if (end <= start)
584                 return 0;
585
586         trace_binder_update_page_range(proc, allocate, start, end);
587
588         if (vma)
589                 mm = NULL;
590         else
591                 mm = get_task_mm(proc->tsk);
592
593         if (mm) {
594                 down_write(&mm->mmap_sem);
595                 vma = proc->vma;
596                 if (vma && mm != proc->vma_vm_mm) {
597                         pr_err("%d: vma mm and task mm mismatch\n",
598                                 proc->pid);
599                         vma = NULL;
600                 }
601         }
602
603         if (allocate == 0)
604                 goto free_range;
605
606         if (vma == NULL) {
607                 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
608                         proc->pid);
609                 goto err_no_vma;
610         }
611
612         for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
613                 int ret;
614
615                 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
616
617                 BUG_ON(*page);
618                 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
619                 if (*page == NULL) {
620                         pr_err("%d: binder_alloc_buf failed for page at %p\n",
621                                 proc->pid, page_addr);
622                         goto err_alloc_page_failed;
623                 }
624                 ret = map_kernel_range_noflush((unsigned long)page_addr,
625                                         PAGE_SIZE, PAGE_KERNEL, page);
626                 flush_cache_vmap((unsigned long)page_addr,
627                                 (unsigned long)page_addr + PAGE_SIZE);
628                 if (ret != 1) {
629                         pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
630                                proc->pid, page_addr);
631                         goto err_map_kernel_failed;
632                 }
633                 user_page_addr =
634                         (uintptr_t)page_addr + proc->user_buffer_offset;
635                 ret = vm_insert_page(vma, user_page_addr, page[0]);
636                 if (ret) {
637                         pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
638                                proc->pid, user_page_addr);
639                         goto err_vm_insert_page_failed;
640                 }
641                 /* vm_insert_page does not seem to increment the refcount */
642         }
643         if (mm) {
644                 up_write(&mm->mmap_sem);
645                 mmput(mm);
646         }
647         return 0;
648
649 free_range:
650         for (page_addr = end - PAGE_SIZE; page_addr >= start;
651              page_addr -= PAGE_SIZE) {
652                 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
653                 if (vma)
654                         zap_page_range(vma, (uintptr_t)page_addr +
655                                 proc->user_buffer_offset, PAGE_SIZE, NULL);
656 err_vm_insert_page_failed:
657                 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
658 err_map_kernel_failed:
659                 __free_page(*page);
660                 *page = NULL;
661 err_alloc_page_failed:
662                 ;
663         }
664 err_no_vma:
665         if (mm) {
666                 up_write(&mm->mmap_sem);
667                 mmput(mm);
668         }
669         return -ENOMEM;
670 }
671
672 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
673                                               size_t data_size,
674                                               size_t offsets_size,
675                                               size_t extra_buffers_size,
676                                               int is_async)
677 {
678         struct rb_node *n = proc->free_buffers.rb_node;
679         struct binder_buffer *buffer;
680         size_t buffer_size;
681         struct rb_node *best_fit = NULL;
682         void *has_page_addr;
683         void *end_page_addr;
684         size_t size, data_offsets_size;
685
686         if (proc->vma == NULL) {
687                 pr_err("%d: binder_alloc_buf, no vma\n",
688                        proc->pid);
689                 return NULL;
690         }
691
692         data_offsets_size = ALIGN(data_size, sizeof(void *)) +
693                 ALIGN(offsets_size, sizeof(void *));
694
695         if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
696                 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
697                                 proc->pid, data_size, offsets_size);
698                 return NULL;
699         }
700         size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
701         if (size < data_offsets_size || size < extra_buffers_size) {
702                 binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
703                                   proc->pid, extra_buffers_size);
704                 return NULL;
705         }
706         if (is_async &&
707             proc->free_async_space < size + sizeof(struct binder_buffer)) {
708                 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
709                              "%d: binder_alloc_buf size %zd failed, no async space left\n",
710                               proc->pid, size);
711                 return NULL;
712         }
713
714         while (n) {
715                 buffer = rb_entry(n, struct binder_buffer, rb_node);
716                 BUG_ON(!buffer->free);
717                 buffer_size = binder_buffer_size(proc, buffer);
718
719                 if (size < buffer_size) {
720                         best_fit = n;
721                         n = n->rb_left;
722                 } else if (size > buffer_size)
723                         n = n->rb_right;
724                 else {
725                         best_fit = n;
726                         break;
727                 }
728         }
729         if (best_fit == NULL) {
730                 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
731                         proc->pid, size);
732                 return NULL;
733         }
734         if (n == NULL) {
735                 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
736                 buffer_size = binder_buffer_size(proc, buffer);
737         }
738
739         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
740                      "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
741                       proc->pid, size, buffer, buffer_size);
742
743         has_page_addr =
744                 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
745         if (n == NULL) {
746                 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
747                         buffer_size = size; /* no room for other buffers */
748                 else
749                         buffer_size = size + sizeof(struct binder_buffer);
750         }
751         end_page_addr =
752                 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
753         if (end_page_addr > has_page_addr)
754                 end_page_addr = has_page_addr;
755         if (binder_update_page_range(proc, 1,
756             (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
757                 return NULL;
758
759         rb_erase(best_fit, &proc->free_buffers);
760         buffer->free = 0;
761         binder_insert_allocated_buffer(proc, buffer);
762         if (buffer_size != size) {
763                 struct binder_buffer *new_buffer = (void *)buffer->data + size;
764
765                 list_add(&new_buffer->entry, &buffer->entry);
766                 new_buffer->free = 1;
767                 binder_insert_free_buffer(proc, new_buffer);
768         }
769         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
770                      "%d: binder_alloc_buf size %zd got %p\n",
771                       proc->pid, size, buffer);
772         buffer->data_size = data_size;
773         buffer->offsets_size = offsets_size;
774         buffer->extra_buffers_size = extra_buffers_size;
775         buffer->async_transaction = is_async;
776         if (is_async) {
777                 proc->free_async_space -= size + sizeof(struct binder_buffer);
778                 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
779                              "%d: binder_alloc_buf size %zd async free %zd\n",
780                               proc->pid, size, proc->free_async_space);
781         }
782
783         return buffer;
784 }
785
786 static void *buffer_start_page(struct binder_buffer *buffer)
787 {
788         return (void *)((uintptr_t)buffer & PAGE_MASK);
789 }
790
791 static void *buffer_end_page(struct binder_buffer *buffer)
792 {
793         return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
794 }
795
796 static void binder_delete_free_buffer(struct binder_proc *proc,
797                                       struct binder_buffer *buffer)
798 {
799         struct binder_buffer *prev, *next = NULL;
800         int free_page_end = 1;
801         int free_page_start = 1;
802
803         BUG_ON(proc->buffers.next == &buffer->entry);
804         prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
805         BUG_ON(!prev->free);
806         if (buffer_end_page(prev) == buffer_start_page(buffer)) {
807                 free_page_start = 0;
808                 if (buffer_end_page(prev) == buffer_end_page(buffer))
809                         free_page_end = 0;
810                 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
811                              "%d: merge free, buffer %p share page with %p\n",
812                               proc->pid, buffer, prev);
813         }
814
815         if (!list_is_last(&buffer->entry, &proc->buffers)) {
816                 next = list_entry(buffer->entry.next,
817                                   struct binder_buffer, entry);
818                 if (buffer_start_page(next) == buffer_end_page(buffer)) {
819                         free_page_end = 0;
820                         if (buffer_start_page(next) ==
821                             buffer_start_page(buffer))
822                                 free_page_start = 0;
823                         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
824                                      "%d: merge free, buffer %p share page with %p\n",
825                                       proc->pid, buffer, prev);
826                 }
827         }
828         list_del(&buffer->entry);
829         if (free_page_start || free_page_end) {
830                 binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
831                              "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
832                              proc->pid, buffer, free_page_start ? "" : " end",
833                              free_page_end ? "" : " start", prev, next);
834                 binder_update_page_range(proc, 0, free_page_start ?
835                         buffer_start_page(buffer) : buffer_end_page(buffer),
836                         (free_page_end ? buffer_end_page(buffer) :
837                         buffer_start_page(buffer)) + PAGE_SIZE, NULL);
838         }
839 }
840
841 static void binder_free_buf(struct binder_proc *proc,
842                             struct binder_buffer *buffer)
843 {
844         size_t size, buffer_size;
845
846         buffer_size = binder_buffer_size(proc, buffer);
847
848         size = ALIGN(buffer->data_size, sizeof(void *)) +
849                 ALIGN(buffer->offsets_size, sizeof(void *)) +
850                 ALIGN(buffer->extra_buffers_size, sizeof(void *));
851
852         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
853                      "%d: binder_free_buf %p size %zd buffer_size %zd\n",
854                       proc->pid, buffer, size, buffer_size);
855
856         BUG_ON(buffer->free);
857         BUG_ON(size > buffer_size);
858         BUG_ON(buffer->transaction != NULL);
859         BUG_ON((void *)buffer < proc->buffer);
860         BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
861
862         if (buffer->async_transaction) {
863                 proc->free_async_space += size + sizeof(struct binder_buffer);
864
865                 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
866                              "%d: binder_free_buf size %zd async free %zd\n",
867                               proc->pid, size, proc->free_async_space);
868         }
869
870         binder_update_page_range(proc, 0,
871                 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
872                 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
873                 NULL);
874         rb_erase(&buffer->rb_node, &proc->allocated_buffers);
875         buffer->free = 1;
876         if (!list_is_last(&buffer->entry, &proc->buffers)) {
877                 struct binder_buffer *next = list_entry(buffer->entry.next,
878                                                 struct binder_buffer, entry);
879
880                 if (next->free) {
881                         rb_erase(&next->rb_node, &proc->free_buffers);
882                         binder_delete_free_buffer(proc, next);
883                 }
884         }
885         if (proc->buffers.next != &buffer->entry) {
886                 struct binder_buffer *prev = list_entry(buffer->entry.prev,
887                                                 struct binder_buffer, entry);
888
889                 if (prev->free) {
890                         binder_delete_free_buffer(proc, buffer);
891                         rb_erase(&prev->rb_node, &proc->free_buffers);
892                         buffer = prev;
893                 }
894         }
895         binder_insert_free_buffer(proc, buffer);
896 }
897
898 static struct binder_node *binder_get_node(struct binder_proc *proc,
899                                            binder_uintptr_t ptr)
900 {
901         struct rb_node *n = proc->nodes.rb_node;
902         struct binder_node *node;
903
904         while (n) {
905                 node = rb_entry(n, struct binder_node, rb_node);
906
907                 if (ptr < node->ptr)
908                         n = n->rb_left;
909                 else if (ptr > node->ptr)
910                         n = n->rb_right;
911                 else
912                         return node;
913         }
914         return NULL;
915 }
916
917 static struct binder_node *binder_new_node(struct binder_proc *proc,
918                                            binder_uintptr_t ptr,
919                                            binder_uintptr_t cookie)
920 {
921         struct rb_node **p = &proc->nodes.rb_node;
922         struct rb_node *parent = NULL;
923         struct binder_node *node;
924
925         while (*p) {
926                 parent = *p;
927                 node = rb_entry(parent, struct binder_node, rb_node);
928
929                 if (ptr < node->ptr)
930                         p = &(*p)->rb_left;
931                 else if (ptr > node->ptr)
932                         p = &(*p)->rb_right;
933                 else
934                         return NULL;
935         }
936
937         node = kzalloc(sizeof(*node), GFP_KERNEL);
938         if (node == NULL)
939                 return NULL;
940         binder_stats_created(BINDER_STAT_NODE);
941         rb_link_node(&node->rb_node, parent, p);
942         rb_insert_color(&node->rb_node, &proc->nodes);
943         node->debug_id = ++binder_last_id;
944         node->proc = proc;
945         node->ptr = ptr;
946         node->cookie = cookie;
947         node->work.type = BINDER_WORK_NODE;
948         INIT_LIST_HEAD(&node->work.entry);
949         INIT_LIST_HEAD(&node->async_todo);
950         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
951                      "%d:%d node %d u%016llx c%016llx created\n",
952                      proc->pid, current->pid, node->debug_id,
953                      (u64)node->ptr, (u64)node->cookie);
954         return node;
955 }
956
957 static int binder_inc_node(struct binder_node *node, int strong, int internal,
958                            struct list_head *target_list)
959 {
960         if (strong) {
961                 if (internal) {
962                         if (target_list == NULL &&
963                             node->internal_strong_refs == 0 &&
964                             !(node->proc &&
965                               node == node->proc->context->
966                                       binder_context_mgr_node &&
967                               node->has_strong_ref)) {
968                                 pr_err("invalid inc strong node for %d\n",
969                                         node->debug_id);
970                                 return -EINVAL;
971                         }
972                         node->internal_strong_refs++;
973                 } else
974                         node->local_strong_refs++;
975                 if (!node->has_strong_ref && target_list) {
976                         list_del_init(&node->work.entry);
977                         list_add_tail(&node->work.entry, target_list);
978                 }
979         } else {
980                 if (!internal)
981                         node->local_weak_refs++;
982                 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
983                         if (target_list == NULL) {
984                                 pr_err("invalid inc weak node for %d\n",
985                                         node->debug_id);
986                                 return -EINVAL;
987                         }
988                         list_add_tail(&node->work.entry, target_list);
989                 }
990         }
991         return 0;
992 }
993
994 static int binder_dec_node(struct binder_node *node, int strong, int internal)
995 {
996         if (strong) {
997                 if (internal)
998                         node->internal_strong_refs--;
999                 else
1000                         node->local_strong_refs--;
1001                 if (node->local_strong_refs || node->internal_strong_refs)
1002                         return 0;
1003         } else {
1004                 if (!internal)
1005                         node->local_weak_refs--;
1006                 if (node->local_weak_refs || !hlist_empty(&node->refs))
1007                         return 0;
1008         }
1009         if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
1010                 if (list_empty(&node->work.entry)) {
1011                         list_add_tail(&node->work.entry, &node->proc->todo);
1012                         wake_up_interruptible(&node->proc->wait);
1013                 }
1014         } else {
1015                 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1016                     !node->local_weak_refs) {
1017                         list_del_init(&node->work.entry);
1018                         if (node->proc) {
1019                                 rb_erase(&node->rb_node, &node->proc->nodes);
1020                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1021                                              "refless node %d deleted\n",
1022                                              node->debug_id);
1023                         } else {
1024                                 hlist_del(&node->dead_node);
1025                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1026                                              "dead node %d deleted\n",
1027                                              node->debug_id);
1028                         }
1029                         kfree(node);
1030                         binder_stats_deleted(BINDER_STAT_NODE);
1031                 }
1032         }
1033
1034         return 0;
1035 }
1036
1037
1038 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1039                                          u32 desc, bool need_strong_ref)
1040 {
1041         struct rb_node *n = proc->refs_by_desc.rb_node;
1042         struct binder_ref *ref;
1043
1044         while (n) {
1045                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1046
1047                 if (desc < ref->desc) {
1048                         n = n->rb_left;
1049                 } else if (desc > ref->desc) {
1050                         n = n->rb_right;
1051                 } else if (need_strong_ref && !ref->strong) {
1052                         binder_user_error("tried to use weak ref as strong ref\n");
1053                         return NULL;
1054                 } else {
1055                         return ref;
1056                 }
1057         }
1058         return NULL;
1059 }
1060
1061 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1062                                                   struct binder_node *node)
1063 {
1064         struct rb_node *n;
1065         struct rb_node **p = &proc->refs_by_node.rb_node;
1066         struct rb_node *parent = NULL;
1067         struct binder_ref *ref, *new_ref;
1068         struct binder_context *context = proc->context;
1069
1070         while (*p) {
1071                 parent = *p;
1072                 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1073
1074                 if (node < ref->node)
1075                         p = &(*p)->rb_left;
1076                 else if (node > ref->node)
1077                         p = &(*p)->rb_right;
1078                 else
1079                         return ref;
1080         }
1081         new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1082         if (new_ref == NULL)
1083                 return NULL;
1084         binder_stats_created(BINDER_STAT_REF);
1085         new_ref->debug_id = ++binder_last_id;
1086         new_ref->proc = proc;
1087         new_ref->node = node;
1088         rb_link_node(&new_ref->rb_node_node, parent, p);
1089         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1090
1091         new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1092         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1093                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1094                 if (ref->desc > new_ref->desc)
1095                         break;
1096                 new_ref->desc = ref->desc + 1;
1097         }
1098
1099         p = &proc->refs_by_desc.rb_node;
1100         while (*p) {
1101                 parent = *p;
1102                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1103
1104                 if (new_ref->desc < ref->desc)
1105                         p = &(*p)->rb_left;
1106                 else if (new_ref->desc > ref->desc)
1107                         p = &(*p)->rb_right;
1108                 else
1109                         BUG();
1110         }
1111         rb_link_node(&new_ref->rb_node_desc, parent, p);
1112         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1113         if (node) {
1114                 hlist_add_head(&new_ref->node_entry, &node->refs);
1115
1116                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1117                              "%d new ref %d desc %d for node %d\n",
1118                               proc->pid, new_ref->debug_id, new_ref->desc,
1119                               node->debug_id);
1120         } else {
1121                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1122                              "%d new ref %d desc %d for dead node\n",
1123                               proc->pid, new_ref->debug_id, new_ref->desc);
1124         }
1125         return new_ref;
1126 }
1127
1128 static void binder_delete_ref(struct binder_ref *ref)
1129 {
1130         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1131                      "%d delete ref %d desc %d for node %d\n",
1132                       ref->proc->pid, ref->debug_id, ref->desc,
1133                       ref->node->debug_id);
1134
1135         rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1136         rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1137         if (ref->strong)
1138                 binder_dec_node(ref->node, 1, 1);
1139         hlist_del(&ref->node_entry);
1140         binder_dec_node(ref->node, 0, 1);
1141         if (ref->death) {
1142                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1143                              "%d delete ref %d desc %d has death notification\n",
1144                               ref->proc->pid, ref->debug_id, ref->desc);
1145                 list_del(&ref->death->work.entry);
1146                 kfree(ref->death);
1147                 binder_stats_deleted(BINDER_STAT_DEATH);
1148         }
1149         kfree(ref);
1150         binder_stats_deleted(BINDER_STAT_REF);
1151 }
1152
1153 static int binder_inc_ref(struct binder_ref *ref, int strong,
1154                           struct list_head *target_list)
1155 {
1156         int ret;
1157
1158         if (strong) {
1159                 if (ref->strong == 0) {
1160                         ret = binder_inc_node(ref->node, 1, 1, target_list);
1161                         if (ret)
1162                                 return ret;
1163                 }
1164                 ref->strong++;
1165         } else {
1166                 if (ref->weak == 0) {
1167                         ret = binder_inc_node(ref->node, 0, 1, target_list);
1168                         if (ret)
1169                                 return ret;
1170                 }
1171                 ref->weak++;
1172         }
1173         return 0;
1174 }
1175
1176
1177 static int binder_dec_ref(struct binder_ref *ref, int strong)
1178 {
1179         if (strong) {
1180                 if (ref->strong == 0) {
1181                         binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1182                                           ref->proc->pid, ref->debug_id,
1183                                           ref->desc, ref->strong, ref->weak);
1184                         return -EINVAL;
1185                 }
1186                 ref->strong--;
1187                 if (ref->strong == 0) {
1188                         int ret;
1189
1190                         ret = binder_dec_node(ref->node, strong, 1);
1191                         if (ret)
1192                                 return ret;
1193                 }
1194         } else {
1195                 if (ref->weak == 0) {
1196                         binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1197                                           ref->proc->pid, ref->debug_id,
1198                                           ref->desc, ref->strong, ref->weak);
1199                         return -EINVAL;
1200                 }
1201                 ref->weak--;
1202         }
1203         if (ref->strong == 0 && ref->weak == 0)
1204                 binder_delete_ref(ref);
1205         return 0;
1206 }
1207
1208 static void binder_pop_transaction(struct binder_thread *target_thread,
1209                                    struct binder_transaction *t)
1210 {
1211         if (target_thread) {
1212                 BUG_ON(target_thread->transaction_stack != t);
1213                 BUG_ON(target_thread->transaction_stack->from != target_thread);
1214                 target_thread->transaction_stack =
1215                         target_thread->transaction_stack->from_parent;
1216                 t->from = NULL;
1217         }
1218         t->need_reply = 0;
1219         if (t->buffer)
1220                 t->buffer->transaction = NULL;
1221         kfree(t);
1222         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1223 }
1224
1225 static void binder_send_failed_reply(struct binder_transaction *t,
1226                                      uint32_t error_code)
1227 {
1228         struct binder_thread *target_thread;
1229         struct binder_transaction *next;
1230
1231         BUG_ON(t->flags & TF_ONE_WAY);
1232         while (1) {
1233                 target_thread = t->from;
1234                 if (target_thread) {
1235                         if (target_thread->return_error != BR_OK &&
1236                            target_thread->return_error2 == BR_OK) {
1237                                 target_thread->return_error2 =
1238                                         target_thread->return_error;
1239                                 target_thread->return_error = BR_OK;
1240                         }
1241                         if (target_thread->return_error == BR_OK) {
1242                                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1243                                              "send failed reply for transaction %d to %d:%d\n",
1244                                               t->debug_id,
1245                                               target_thread->proc->pid,
1246                                               target_thread->pid);
1247
1248                                 binder_pop_transaction(target_thread, t);
1249                                 target_thread->return_error = error_code;
1250                                 wake_up_interruptible(&target_thread->wait);
1251                         } else {
1252                                 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1253                                         target_thread->proc->pid,
1254                                         target_thread->pid,
1255                                         target_thread->return_error);
1256                         }
1257                         return;
1258                 }
1259                 next = t->from_parent;
1260
1261                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1262                              "send failed reply for transaction %d, target dead\n",
1263                              t->debug_id);
1264
1265                 binder_pop_transaction(target_thread, t);
1266                 if (next == NULL) {
1267                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
1268                                      "reply failed, no target thread at root\n");
1269                         return;
1270                 }
1271                 t = next;
1272                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1273                              "reply failed, no target thread -- retry %d\n",
1274                               t->debug_id);
1275         }
1276 }
1277
1278 /**
1279  * binder_validate_object() - checks for a valid metadata object in a buffer.
1280  * @buffer:     binder_buffer that we're parsing.
1281  * @offset:     offset in the buffer at which to validate an object.
1282  *
1283  * Return:      If there's a valid metadata object at @offset in @buffer, the
1284  *              size of that object. Otherwise, it returns zero.
1285  */
1286 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1287 {
1288         /* Check if we can read a header first */
1289         struct binder_object_header *hdr;
1290         size_t object_size = 0;
1291
1292         if (offset > buffer->data_size - sizeof(*hdr) ||
1293             buffer->data_size < sizeof(*hdr) ||
1294             !IS_ALIGNED(offset, sizeof(u32)))
1295                 return 0;
1296
1297         /* Ok, now see if we can read a complete object. */
1298         hdr = (struct binder_object_header *)(buffer->data + offset);
1299         switch (hdr->type) {
1300         case BINDER_TYPE_BINDER:
1301         case BINDER_TYPE_WEAK_BINDER:
1302         case BINDER_TYPE_HANDLE:
1303         case BINDER_TYPE_WEAK_HANDLE:
1304                 object_size = sizeof(struct flat_binder_object);
1305                 break;
1306         case BINDER_TYPE_FD:
1307                 object_size = sizeof(struct binder_fd_object);
1308                 break;
1309         default:
1310                 return 0;
1311         }
1312         if (offset <= buffer->data_size - object_size &&
1313             buffer->data_size >= object_size)
1314                 return object_size;
1315         else
1316                 return 0;
1317 }
1318
1319 static void binder_transaction_buffer_release(struct binder_proc *proc,
1320                                               struct binder_buffer *buffer,
1321                                               binder_size_t *failed_at)
1322 {
1323         binder_size_t *offp, *off_end;
1324         int debug_id = buffer->debug_id;
1325
1326         binder_debug(BINDER_DEBUG_TRANSACTION,
1327                      "%d buffer release %d, size %zd-%zd, failed at %p\n",
1328                      proc->pid, buffer->debug_id,
1329                      buffer->data_size, buffer->offsets_size, failed_at);
1330
1331         if (buffer->target_node)
1332                 binder_dec_node(buffer->target_node, 1, 0);
1333
1334         offp = (binder_size_t *)(buffer->data +
1335                                  ALIGN(buffer->data_size, sizeof(void *)));
1336         if (failed_at)
1337                 off_end = failed_at;
1338         else
1339                 off_end = (void *)offp + buffer->offsets_size;
1340         for (; offp < off_end; offp++) {
1341                 struct binder_object_header *hdr;
1342                 size_t object_size = binder_validate_object(buffer, *offp);
1343
1344                 if (object_size == 0) {
1345                         pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1346                                debug_id, (u64)*offp, buffer->data_size);
1347                         continue;
1348                 }
1349                 hdr = (struct binder_object_header *)(buffer->data + *offp);
1350                 switch (hdr->type) {
1351                 case BINDER_TYPE_BINDER:
1352                 case BINDER_TYPE_WEAK_BINDER: {
1353                         struct flat_binder_object *fp;
1354                         struct binder_node *node;
1355
1356                         fp = to_flat_binder_object(hdr);
1357                         node = binder_get_node(proc, fp->binder);
1358                         if (node == NULL) {
1359                                 pr_err("transaction release %d bad node %016llx\n",
1360                                        debug_id, (u64)fp->binder);
1361                                 break;
1362                         }
1363                         binder_debug(BINDER_DEBUG_TRANSACTION,
1364                                      "        node %d u%016llx\n",
1365                                      node->debug_id, (u64)node->ptr);
1366                         binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1367                                         0);
1368                 } break;
1369                 case BINDER_TYPE_HANDLE:
1370                 case BINDER_TYPE_WEAK_HANDLE: {
1371                         struct flat_binder_object *fp;
1372                         struct binder_ref *ref;
1373
1374                         fp = to_flat_binder_object(hdr);
1375                         ref = binder_get_ref(proc, fp->handle,
1376                                              hdr->type == BINDER_TYPE_HANDLE);
1377
1378                         if (ref == NULL) {
1379                                 pr_err("transaction release %d bad handle %d\n",
1380                                  debug_id, fp->handle);
1381                                 break;
1382                         }
1383                         binder_debug(BINDER_DEBUG_TRANSACTION,
1384                                      "        ref %d desc %d (node %d)\n",
1385                                      ref->debug_id, ref->desc, ref->node->debug_id);
1386                         binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
1387                 } break;
1388
1389                 case BINDER_TYPE_FD: {
1390                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
1391
1392                         binder_debug(BINDER_DEBUG_TRANSACTION,
1393                                      "        fd %d\n", fp->fd);
1394                         if (failed_at)
1395                                 task_close_fd(proc, fp->fd);
1396                 } break;
1397
1398                 default:
1399                         pr_err("transaction release %d bad object type %x\n",
1400                                 debug_id, hdr->type);
1401                         break;
1402                 }
1403         }
1404 }
1405
1406 static int binder_translate_binder(struct flat_binder_object *fp,
1407                                    struct binder_transaction *t,
1408                                    struct binder_thread *thread)
1409 {
1410         struct binder_node *node;
1411         struct binder_ref *ref;
1412         struct binder_proc *proc = thread->proc;
1413         struct binder_proc *target_proc = t->to_proc;
1414
1415         node = binder_get_node(proc, fp->binder);
1416         if (!node) {
1417                 node = binder_new_node(proc, fp->binder, fp->cookie);
1418                 if (!node)
1419                         return -ENOMEM;
1420
1421                 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1422                 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1423         }
1424         if (fp->cookie != node->cookie) {
1425                 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1426                                   proc->pid, thread->pid, (u64)fp->binder,
1427                                   node->debug_id, (u64)fp->cookie,
1428                                   (u64)node->cookie);
1429                 return -EINVAL;
1430         }
1431         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1432                 return -EPERM;
1433
1434         ref = binder_get_ref_for_node(target_proc, node);
1435         if (!ref)
1436                 return -EINVAL;
1437
1438         if (fp->hdr.type == BINDER_TYPE_BINDER)
1439                 fp->hdr.type = BINDER_TYPE_HANDLE;
1440         else
1441                 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1442         fp->binder = 0;
1443         fp->handle = ref->desc;
1444         fp->cookie = 0;
1445         binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
1446
1447         trace_binder_transaction_node_to_ref(t, node, ref);
1448         binder_debug(BINDER_DEBUG_TRANSACTION,
1449                      "        node %d u%016llx -> ref %d desc %d\n",
1450                      node->debug_id, (u64)node->ptr,
1451                      ref->debug_id, ref->desc);
1452
1453         return 0;
1454 }
1455
1456 static int binder_translate_handle(struct flat_binder_object *fp,
1457                                    struct binder_transaction *t,
1458                                    struct binder_thread *thread)
1459 {
1460         struct binder_ref *ref;
1461         struct binder_proc *proc = thread->proc;
1462         struct binder_proc *target_proc = t->to_proc;
1463
1464         ref = binder_get_ref(proc, fp->handle,
1465                              fp->hdr.type == BINDER_TYPE_HANDLE);
1466         if (!ref) {
1467                 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1468                                   proc->pid, thread->pid, fp->handle);
1469                 return -EINVAL;
1470         }
1471         if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1472                 return -EPERM;
1473
1474         if (ref->node->proc == target_proc) {
1475                 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1476                         fp->hdr.type = BINDER_TYPE_BINDER;
1477                 else
1478                         fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
1479                 fp->binder = ref->node->ptr;
1480                 fp->cookie = ref->node->cookie;
1481                 binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
1482                                 0, NULL);
1483                 trace_binder_transaction_ref_to_node(t, ref);
1484                 binder_debug(BINDER_DEBUG_TRANSACTION,
1485                              "        ref %d desc %d -> node %d u%016llx\n",
1486                              ref->debug_id, ref->desc, ref->node->debug_id,
1487                              (u64)ref->node->ptr);
1488         } else {
1489                 struct binder_ref *new_ref;
1490
1491                 new_ref = binder_get_ref_for_node(target_proc, ref->node);
1492                 if (!new_ref)
1493                         return -EINVAL;
1494
1495                 fp->binder = 0;
1496                 fp->handle = new_ref->desc;
1497                 fp->cookie = 0;
1498                 binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
1499                                NULL);
1500                 trace_binder_transaction_ref_to_ref(t, ref, new_ref);
1501                 binder_debug(BINDER_DEBUG_TRANSACTION,
1502                              "        ref %d desc %d -> ref %d desc %d (node %d)\n",
1503                              ref->debug_id, ref->desc, new_ref->debug_id,
1504                              new_ref->desc, ref->node->debug_id);
1505         }
1506         return 0;
1507 }
1508
1509 static int binder_translate_fd(int fd,
1510                                struct binder_transaction *t,
1511                                struct binder_thread *thread,
1512                                struct binder_transaction *in_reply_to)
1513 {
1514         struct binder_proc *proc = thread->proc;
1515         struct binder_proc *target_proc = t->to_proc;
1516         int target_fd;
1517         struct file *file;
1518         int ret;
1519         bool target_allows_fd;
1520
1521         if (in_reply_to)
1522                 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1523         else
1524                 target_allows_fd = t->buffer->target_node->accept_fds;
1525         if (!target_allows_fd) {
1526                 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1527                                   proc->pid, thread->pid,
1528                                   in_reply_to ? "reply" : "transaction",
1529                                   fd);
1530                 ret = -EPERM;
1531                 goto err_fd_not_accepted;
1532         }
1533
1534         file = fget(fd);
1535         if (!file) {
1536                 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1537                                   proc->pid, thread->pid, fd);
1538                 ret = -EBADF;
1539                 goto err_fget;
1540         }
1541         ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1542         if (ret < 0) {
1543                 ret = -EPERM;
1544                 goto err_security;
1545         }
1546
1547         target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1548         if (target_fd < 0) {
1549                 ret = -ENOMEM;
1550                 goto err_get_unused_fd;
1551         }
1552         task_fd_install(target_proc, target_fd, file);
1553         trace_binder_transaction_fd(t, fd, target_fd);
1554         binder_debug(BINDER_DEBUG_TRANSACTION, "        fd %d -> %d\n",
1555                      fd, target_fd);
1556
1557         return target_fd;
1558
1559 err_get_unused_fd:
1560 err_security:
1561         fput(file);
1562 err_fget:
1563 err_fd_not_accepted:
1564         return ret;
1565 }
1566
1567 static void binder_transaction(struct binder_proc *proc,
1568                                struct binder_thread *thread,
1569                                struct binder_transaction_data *tr, int reply,
1570                                binder_size_t extra_buffers_size)
1571 {
1572         int ret;
1573         struct binder_transaction *t;
1574         struct binder_work *tcomplete;
1575         binder_size_t *offp, *off_end;
1576         binder_size_t off_min;
1577         struct binder_proc *target_proc;
1578         struct binder_thread *target_thread = NULL;
1579         struct binder_node *target_node = NULL;
1580         struct list_head *target_list;
1581         wait_queue_head_t *target_wait;
1582         struct binder_transaction *in_reply_to = NULL;
1583         struct binder_transaction_log_entry *e;
1584         uint32_t return_error;
1585         struct binder_context *context = proc->context;
1586
1587         e = binder_transaction_log_add(&binder_transaction_log);
1588         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1589         e->from_proc = proc->pid;
1590         e->from_thread = thread->pid;
1591         e->target_handle = tr->target.handle;
1592         e->data_size = tr->data_size;
1593         e->offsets_size = tr->offsets_size;
1594         e->context_name = proc->context->name;
1595
1596         if (reply) {
1597                 in_reply_to = thread->transaction_stack;
1598                 if (in_reply_to == NULL) {
1599                         binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1600                                           proc->pid, thread->pid);
1601                         return_error = BR_FAILED_REPLY;
1602                         goto err_empty_call_stack;
1603                 }
1604                 binder_set_nice(in_reply_to->saved_priority);
1605                 if (in_reply_to->to_thread != thread) {
1606                         binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1607                                 proc->pid, thread->pid, in_reply_to->debug_id,
1608                                 in_reply_to->to_proc ?
1609                                 in_reply_to->to_proc->pid : 0,
1610                                 in_reply_to->to_thread ?
1611                                 in_reply_to->to_thread->pid : 0);
1612                         return_error = BR_FAILED_REPLY;
1613                         in_reply_to = NULL;
1614                         goto err_bad_call_stack;
1615                 }
1616                 thread->transaction_stack = in_reply_to->to_parent;
1617                 target_thread = in_reply_to->from;
1618                 if (target_thread == NULL) {
1619                         return_error = BR_DEAD_REPLY;
1620                         goto err_dead_binder;
1621                 }
1622                 if (target_thread->transaction_stack != in_reply_to) {
1623                         binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1624                                 proc->pid, thread->pid,
1625                                 target_thread->transaction_stack ?
1626                                 target_thread->transaction_stack->debug_id : 0,
1627                                 in_reply_to->debug_id);
1628                         return_error = BR_FAILED_REPLY;
1629                         in_reply_to = NULL;
1630                         target_thread = NULL;
1631                         goto err_dead_binder;
1632                 }
1633                 target_proc = target_thread->proc;
1634         } else {
1635                 if (tr->target.handle) {
1636                         struct binder_ref *ref;
1637
1638                         ref = binder_get_ref(proc, tr->target.handle, true);
1639                         if (ref == NULL) {
1640                                 binder_user_error("%d:%d got transaction to invalid handle\n",
1641                                         proc->pid, thread->pid);
1642                                 return_error = BR_FAILED_REPLY;
1643                                 goto err_invalid_target_handle;
1644                         }
1645                         target_node = ref->node;
1646                 } else {
1647                         target_node = context->binder_context_mgr_node;
1648                         if (target_node == NULL) {
1649                                 return_error = BR_DEAD_REPLY;
1650                                 goto err_no_context_mgr_node;
1651                         }
1652                 }
1653                 e->to_node = target_node->debug_id;
1654                 target_proc = target_node->proc;
1655                 if (target_proc == NULL) {
1656                         return_error = BR_DEAD_REPLY;
1657                         goto err_dead_binder;
1658                 }
1659                 if (security_binder_transaction(proc->tsk,
1660                                                 target_proc->tsk) < 0) {
1661                         return_error = BR_FAILED_REPLY;
1662                         goto err_invalid_target_handle;
1663                 }
1664                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1665                         struct binder_transaction *tmp;
1666
1667                         tmp = thread->transaction_stack;
1668                         if (tmp->to_thread != thread) {
1669                                 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
1670                                         proc->pid, thread->pid, tmp->debug_id,
1671                                         tmp->to_proc ? tmp->to_proc->pid : 0,
1672                                         tmp->to_thread ?
1673                                         tmp->to_thread->pid : 0);
1674                                 return_error = BR_FAILED_REPLY;
1675                                 goto err_bad_call_stack;
1676                         }
1677                         while (tmp) {
1678                                 if (tmp->from && tmp->from->proc == target_proc)
1679                                         target_thread = tmp->from;
1680                                 tmp = tmp->from_parent;
1681                         }
1682                 }
1683         }
1684         if (target_thread) {
1685                 e->to_thread = target_thread->pid;
1686                 target_list = &target_thread->todo;
1687                 target_wait = &target_thread->wait;
1688         } else {
1689                 target_list = &target_proc->todo;
1690                 target_wait = &target_proc->wait;
1691         }
1692         e->to_proc = target_proc->pid;
1693
1694         /* TODO: reuse incoming transaction for reply */
1695         t = kzalloc(sizeof(*t), GFP_KERNEL);
1696         if (t == NULL) {
1697                 return_error = BR_FAILED_REPLY;
1698                 goto err_alloc_t_failed;
1699         }
1700         binder_stats_created(BINDER_STAT_TRANSACTION);
1701
1702         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1703         if (tcomplete == NULL) {
1704                 return_error = BR_FAILED_REPLY;
1705                 goto err_alloc_tcomplete_failed;
1706         }
1707         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1708
1709         t->debug_id = ++binder_last_id;
1710         e->debug_id = t->debug_id;
1711
1712         if (reply)
1713                 binder_debug(BINDER_DEBUG_TRANSACTION,
1714                              "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
1715                              proc->pid, thread->pid, t->debug_id,
1716                              target_proc->pid, target_thread->pid,
1717                              (u64)tr->data.ptr.buffer,
1718                              (u64)tr->data.ptr.offsets,
1719                              (u64)tr->data_size, (u64)tr->offsets_size,
1720                              (u64)extra_buffers_size);
1721         else
1722                 binder_debug(BINDER_DEBUG_TRANSACTION,
1723                              "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
1724                              proc->pid, thread->pid, t->debug_id,
1725                              target_proc->pid, target_node->debug_id,
1726                              (u64)tr->data.ptr.buffer,
1727                              (u64)tr->data.ptr.offsets,
1728                              (u64)tr->data_size, (u64)tr->offsets_size,
1729                              (u64)extra_buffers_size);
1730
1731         if (!reply && !(tr->flags & TF_ONE_WAY))
1732                 t->from = thread;
1733         else
1734                 t->from = NULL;
1735         t->sender_euid = task_euid(proc->tsk);
1736         t->to_proc = target_proc;
1737         t->to_thread = target_thread;
1738         t->code = tr->code;
1739         t->flags = tr->flags;
1740         t->priority = task_nice(current);
1741
1742         trace_binder_transaction(reply, t, target_node);
1743
1744         t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1745                 tr->offsets_size, extra_buffers_size,
1746                 !reply && (t->flags & TF_ONE_WAY));
1747         if (t->buffer == NULL) {
1748                 return_error = BR_FAILED_REPLY;
1749                 goto err_binder_alloc_buf_failed;
1750         }
1751         t->buffer->allow_user_free = 0;
1752         t->buffer->debug_id = t->debug_id;
1753         t->buffer->transaction = t;
1754         t->buffer->target_node = target_node;
1755         trace_binder_transaction_alloc_buf(t->buffer);
1756         if (target_node)
1757                 binder_inc_node(target_node, 1, 0, NULL);
1758
1759         offp = (binder_size_t *)(t->buffer->data +
1760                                  ALIGN(tr->data_size, sizeof(void *)));
1761
1762         if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1763                            tr->data.ptr.buffer, tr->data_size)) {
1764                 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1765                                 proc->pid, thread->pid);
1766                 return_error = BR_FAILED_REPLY;
1767                 goto err_copy_data_failed;
1768         }
1769         if (copy_from_user(offp, (const void __user *)(uintptr_t)
1770                            tr->data.ptr.offsets, tr->offsets_size)) {
1771                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1772                                 proc->pid, thread->pid);
1773                 return_error = BR_FAILED_REPLY;
1774                 goto err_copy_data_failed;
1775         }
1776         if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1777                 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1778                                 proc->pid, thread->pid, (u64)tr->offsets_size);
1779                 return_error = BR_FAILED_REPLY;
1780                 goto err_bad_offset;
1781         }
1782         off_end = (void *)offp + tr->offsets_size;
1783         off_min = 0;
1784         for (; offp < off_end; offp++) {
1785                 struct binder_object_header *hdr;
1786                 size_t object_size = binder_validate_object(t->buffer, *offp);
1787
1788                 if (object_size == 0 || *offp < off_min) {
1789                         binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
1790                                           proc->pid, thread->pid, (u64)*offp,
1791                                           (u64)off_min,
1792                                           (u64)t->buffer->data_size);
1793                         return_error = BR_FAILED_REPLY;
1794                         goto err_bad_offset;
1795                 }
1796
1797                 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
1798                 off_min = *offp + object_size;
1799                 switch (hdr->type) {
1800                 case BINDER_TYPE_BINDER:
1801                 case BINDER_TYPE_WEAK_BINDER: {
1802                         struct flat_binder_object *fp;
1803
1804                         fp = to_flat_binder_object(hdr);
1805                         ret = binder_translate_binder(fp, t, thread);
1806                         if (ret < 0) {
1807                                 return_error = BR_FAILED_REPLY;
1808                                 goto err_translate_failed;
1809                         }
1810                 } break;
1811                 case BINDER_TYPE_HANDLE:
1812                 case BINDER_TYPE_WEAK_HANDLE: {
1813                         struct flat_binder_object *fp;
1814
1815                         fp = to_flat_binder_object(hdr);
1816                         ret = binder_translate_handle(fp, t, thread);
1817                         if (ret < 0) {
1818                                 return_error = BR_FAILED_REPLY;
1819                                 goto err_translate_failed;
1820                         }
1821                 } break;
1822
1823                 case BINDER_TYPE_FD: {
1824                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
1825                         int target_fd = binder_translate_fd(fp->fd, t, thread,
1826                                                             in_reply_to);
1827
1828                         if (target_fd < 0) {
1829                                 return_error = BR_FAILED_REPLY;
1830                                 goto err_translate_failed;
1831                         }
1832                         fp->pad_binder = 0;
1833                         fp->fd = target_fd;
1834                 } break;
1835
1836                 default:
1837                         binder_user_error("%d:%d got transaction with invalid object type, %x\n",
1838                                 proc->pid, thread->pid, hdr->type);
1839                         return_error = BR_FAILED_REPLY;
1840                         goto err_bad_object_type;
1841                 }
1842         }
1843         if (reply) {
1844                 BUG_ON(t->buffer->async_transaction != 0);
1845                 binder_pop_transaction(target_thread, in_reply_to);
1846         } else if (!(t->flags & TF_ONE_WAY)) {
1847                 BUG_ON(t->buffer->async_transaction != 0);
1848                 t->need_reply = 1;
1849                 t->from_parent = thread->transaction_stack;
1850                 thread->transaction_stack = t;
1851         } else {
1852                 BUG_ON(target_node == NULL);
1853                 BUG_ON(t->buffer->async_transaction != 1);
1854                 if (target_node->has_async_transaction) {
1855                         target_list = &target_node->async_todo;
1856                         target_wait = NULL;
1857                 } else
1858                         target_node->has_async_transaction = 1;
1859         }
1860         t->work.type = BINDER_WORK_TRANSACTION;
1861         list_add_tail(&t->work.entry, target_list);
1862         tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1863         list_add_tail(&tcomplete->entry, &thread->todo);
1864         if (target_wait)
1865                 wake_up_interruptible(target_wait);
1866         return;
1867
1868 err_translate_failed:
1869 err_bad_object_type:
1870 err_bad_offset:
1871 err_copy_data_failed:
1872         trace_binder_transaction_failed_buffer_release(t->buffer);
1873         binder_transaction_buffer_release(target_proc, t->buffer, offp);
1874         t->buffer->transaction = NULL;
1875         binder_free_buf(target_proc, t->buffer);
1876 err_binder_alloc_buf_failed:
1877         kfree(tcomplete);
1878         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1879 err_alloc_tcomplete_failed:
1880         kfree(t);
1881         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1882 err_alloc_t_failed:
1883 err_bad_call_stack:
1884 err_empty_call_stack:
1885 err_dead_binder:
1886 err_invalid_target_handle:
1887 err_no_context_mgr_node:
1888         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1889                      "%d:%d transaction failed %d, size %lld-%lld\n",
1890                      proc->pid, thread->pid, return_error,
1891                      (u64)tr->data_size, (u64)tr->offsets_size);
1892
1893         {
1894                 struct binder_transaction_log_entry *fe;
1895
1896                 fe = binder_transaction_log_add(&binder_transaction_log_failed);
1897                 *fe = *e;
1898         }
1899
1900         BUG_ON(thread->return_error != BR_OK);
1901         if (in_reply_to) {
1902                 thread->return_error = BR_TRANSACTION_COMPLETE;
1903                 binder_send_failed_reply(in_reply_to, return_error);
1904         } else
1905                 thread->return_error = return_error;
1906 }
1907
1908 static int binder_thread_write(struct binder_proc *proc,
1909                         struct binder_thread *thread,
1910                         binder_uintptr_t binder_buffer, size_t size,
1911                         binder_size_t *consumed)
1912 {
1913         uint32_t cmd;
1914         struct binder_context *context = proc->context;
1915         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
1916         void __user *ptr = buffer + *consumed;
1917         void __user *end = buffer + size;
1918
1919         while (ptr < end && thread->return_error == BR_OK) {
1920                 if (get_user(cmd, (uint32_t __user *)ptr))
1921                         return -EFAULT;
1922                 ptr += sizeof(uint32_t);
1923                 trace_binder_command(cmd);
1924                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1925                         binder_stats.bc[_IOC_NR(cmd)]++;
1926                         proc->stats.bc[_IOC_NR(cmd)]++;
1927                         thread->stats.bc[_IOC_NR(cmd)]++;
1928                 }
1929                 switch (cmd) {
1930                 case BC_INCREFS:
1931                 case BC_ACQUIRE:
1932                 case BC_RELEASE:
1933                 case BC_DECREFS: {
1934                         uint32_t target;
1935                         struct binder_ref *ref;
1936                         const char *debug_string;
1937
1938                         if (get_user(target, (uint32_t __user *)ptr))
1939                                 return -EFAULT;
1940                         ptr += sizeof(uint32_t);
1941                         if (target == 0 && context->binder_context_mgr_node &&
1942                             (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1943                                 ref = binder_get_ref_for_node(proc,
1944                                         context->binder_context_mgr_node);
1945                                 if (ref->desc != target) {
1946                                         binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
1947                                                 proc->pid, thread->pid,
1948                                                 ref->desc);
1949                                 }
1950                         } else
1951                                 ref = binder_get_ref(proc, target,
1952                                                      cmd == BC_ACQUIRE ||
1953                                                      cmd == BC_RELEASE);
1954                         if (ref == NULL) {
1955                                 binder_user_error("%d:%d refcount change on invalid ref %d\n",
1956                                         proc->pid, thread->pid, target);
1957                                 break;
1958                         }
1959                         switch (cmd) {
1960                         case BC_INCREFS:
1961                                 debug_string = "IncRefs";
1962                                 binder_inc_ref(ref, 0, NULL);
1963                                 break;
1964                         case BC_ACQUIRE:
1965                                 debug_string = "Acquire";
1966                                 binder_inc_ref(ref, 1, NULL);
1967                                 break;
1968                         case BC_RELEASE:
1969                                 debug_string = "Release";
1970                                 binder_dec_ref(ref, 1);
1971                                 break;
1972                         case BC_DECREFS:
1973                         default:
1974                                 debug_string = "DecRefs";
1975                                 binder_dec_ref(ref, 0);
1976                                 break;
1977                         }
1978                         binder_debug(BINDER_DEBUG_USER_REFS,
1979                                      "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
1980                                      proc->pid, thread->pid, debug_string, ref->debug_id,
1981                                      ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1982                         break;
1983                 }
1984                 case BC_INCREFS_DONE:
1985                 case BC_ACQUIRE_DONE: {
1986                         binder_uintptr_t node_ptr;
1987                         binder_uintptr_t cookie;
1988                         struct binder_node *node;
1989
1990                         if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
1991                                 return -EFAULT;
1992                         ptr += sizeof(binder_uintptr_t);
1993                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1994                                 return -EFAULT;
1995                         ptr += sizeof(binder_uintptr_t);
1996                         node = binder_get_node(proc, node_ptr);
1997                         if (node == NULL) {
1998                                 binder_user_error("%d:%d %s u%016llx no match\n",
1999                                         proc->pid, thread->pid,
2000                                         cmd == BC_INCREFS_DONE ?
2001                                         "BC_INCREFS_DONE" :
2002                                         "BC_ACQUIRE_DONE",
2003                                         (u64)node_ptr);
2004                                 break;
2005                         }
2006                         if (cookie != node->cookie) {
2007                                 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
2008                                         proc->pid, thread->pid,
2009                                         cmd == BC_INCREFS_DONE ?
2010                                         "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2011                                         (u64)node_ptr, node->debug_id,
2012                                         (u64)cookie, (u64)node->cookie);
2013                                 break;
2014                         }
2015                         if (cmd == BC_ACQUIRE_DONE) {
2016                                 if (node->pending_strong_ref == 0) {
2017                                         binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
2018                                                 proc->pid, thread->pid,
2019                                                 node->debug_id);
2020                                         break;
2021                                 }
2022                                 node->pending_strong_ref = 0;
2023                         } else {
2024                                 if (node->pending_weak_ref == 0) {
2025                                         binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
2026                                                 proc->pid, thread->pid,
2027                                                 node->debug_id);
2028                                         break;
2029                                 }
2030                                 node->pending_weak_ref = 0;
2031                         }
2032                         binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2033                         binder_debug(BINDER_DEBUG_USER_REFS,
2034                                      "%d:%d %s node %d ls %d lw %d\n",
2035                                      proc->pid, thread->pid,
2036                                      cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2037                                      node->debug_id, node->local_strong_refs, node->local_weak_refs);
2038                         break;
2039                 }
2040                 case BC_ATTEMPT_ACQUIRE:
2041                         pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
2042                         return -EINVAL;
2043                 case BC_ACQUIRE_RESULT:
2044                         pr_err("BC_ACQUIRE_RESULT not supported\n");
2045                         return -EINVAL;
2046
2047                 case BC_FREE_BUFFER: {
2048                         binder_uintptr_t data_ptr;
2049                         struct binder_buffer *buffer;
2050
2051                         if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
2052                                 return -EFAULT;
2053                         ptr += sizeof(binder_uintptr_t);
2054
2055                         buffer = binder_buffer_lookup(proc, data_ptr);
2056                         if (buffer == NULL) {
2057                                 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2058                                         proc->pid, thread->pid, (u64)data_ptr);
2059                                 break;
2060                         }
2061                         if (!buffer->allow_user_free) {
2062                                 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2063                                         proc->pid, thread->pid, (u64)data_ptr);
2064                                 break;
2065                         }
2066                         binder_debug(BINDER_DEBUG_FREE_BUFFER,
2067                                      "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2068                                      proc->pid, thread->pid, (u64)data_ptr,
2069                                      buffer->debug_id,
2070                                      buffer->transaction ? "active" : "finished");
2071
2072                         if (buffer->transaction) {
2073                                 buffer->transaction->buffer = NULL;
2074                                 buffer->transaction = NULL;
2075                         }
2076                         if (buffer->async_transaction && buffer->target_node) {
2077                                 BUG_ON(!buffer->target_node->has_async_transaction);
2078                                 if (list_empty(&buffer->target_node->async_todo))
2079                                         buffer->target_node->has_async_transaction = 0;
2080                                 else
2081                                         list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2082                         }
2083                         trace_binder_transaction_buffer_release(buffer);
2084                         binder_transaction_buffer_release(proc, buffer, NULL);
2085                         binder_free_buf(proc, buffer);
2086                         break;
2087                 }
2088
2089                 case BC_TRANSACTION:
2090                 case BC_REPLY: {
2091                         struct binder_transaction_data tr;
2092
2093                         if (copy_from_user(&tr, ptr, sizeof(tr)))
2094                                 return -EFAULT;
2095                         ptr += sizeof(tr);
2096                         binder_transaction(proc, thread, &tr,
2097                                            cmd == BC_REPLY, 0);
2098                         break;
2099                 }
2100
2101                 case BC_REGISTER_LOOPER:
2102                         binder_debug(BINDER_DEBUG_THREADS,
2103                                      "%d:%d BC_REGISTER_LOOPER\n",
2104                                      proc->pid, thread->pid);
2105                         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2106                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2107                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
2108                                         proc->pid, thread->pid);
2109                         } else if (proc->requested_threads == 0) {
2110                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2111                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
2112                                         proc->pid, thread->pid);
2113                         } else {
2114                                 proc->requested_threads--;
2115                                 proc->requested_threads_started++;
2116                         }
2117                         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2118                         break;
2119                 case BC_ENTER_LOOPER:
2120                         binder_debug(BINDER_DEBUG_THREADS,
2121                                      "%d:%d BC_ENTER_LOOPER\n",
2122                                      proc->pid, thread->pid);
2123                         if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2124                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
2125                                 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
2126                                         proc->pid, thread->pid);
2127                         }
2128                         thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2129                         break;
2130                 case BC_EXIT_LOOPER:
2131                         binder_debug(BINDER_DEBUG_THREADS,
2132                                      "%d:%d BC_EXIT_LOOPER\n",
2133                                      proc->pid, thread->pid);
2134                         thread->looper |= BINDER_LOOPER_STATE_EXITED;
2135                         break;
2136
2137                 case BC_REQUEST_DEATH_NOTIFICATION:
2138                 case BC_CLEAR_DEATH_NOTIFICATION: {
2139                         uint32_t target;
2140                         binder_uintptr_t cookie;
2141                         struct binder_ref *ref;
2142                         struct binder_ref_death *death;
2143
2144                         if (get_user(target, (uint32_t __user *)ptr))
2145                                 return -EFAULT;
2146                         ptr += sizeof(uint32_t);
2147                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2148                                 return -EFAULT;
2149                         ptr += sizeof(binder_uintptr_t);
2150                         ref = binder_get_ref(proc, target, false);
2151                         if (ref == NULL) {
2152                                 binder_user_error("%d:%d %s invalid ref %d\n",
2153                                         proc->pid, thread->pid,
2154                                         cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2155                                         "BC_REQUEST_DEATH_NOTIFICATION" :
2156                                         "BC_CLEAR_DEATH_NOTIFICATION",
2157                                         target);
2158                                 break;
2159                         }
2160
2161                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2162                                      "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2163                                      proc->pid, thread->pid,
2164                                      cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2165                                      "BC_REQUEST_DEATH_NOTIFICATION" :
2166                                      "BC_CLEAR_DEATH_NOTIFICATION",
2167                                      (u64)cookie, ref->debug_id, ref->desc,
2168                                      ref->strong, ref->weak, ref->node->debug_id);
2169
2170                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2171                                 if (ref->death) {
2172                                         binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2173                                                 proc->pid, thread->pid);
2174                                         break;
2175                                 }
2176                                 death = kzalloc(sizeof(*death), GFP_KERNEL);
2177                                 if (death == NULL) {
2178                                         thread->return_error = BR_ERROR;
2179                                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2180                                                      "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2181                                                      proc->pid, thread->pid);
2182                                         break;
2183                                 }
2184                                 binder_stats_created(BINDER_STAT_DEATH);
2185                                 INIT_LIST_HEAD(&death->work.entry);
2186                                 death->cookie = cookie;
2187                                 ref->death = death;
2188                                 if (ref->node->proc == NULL) {
2189                                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2190                                         if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2191                                                 list_add_tail(&ref->death->work.entry, &thread->todo);
2192                                         } else {
2193                                                 list_add_tail(&ref->death->work.entry, &proc->todo);
2194                                                 wake_up_interruptible(&proc->wait);
2195                                         }
2196                                 }
2197                         } else {
2198                                 if (ref->death == NULL) {
2199                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2200                                                 proc->pid, thread->pid);
2201                                         break;
2202                                 }
2203                                 death = ref->death;
2204                                 if (death->cookie != cookie) {
2205                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2206                                                 proc->pid, thread->pid,
2207                                                 (u64)death->cookie,
2208                                                 (u64)cookie);
2209                                         break;
2210                                 }
2211                                 ref->death = NULL;
2212                                 if (list_empty(&death->work.entry)) {
2213                                         death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2214                                         if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2215                                                 list_add_tail(&death->work.entry, &thread->todo);
2216                                         } else {
2217                                                 list_add_tail(&death->work.entry, &proc->todo);
2218                                                 wake_up_interruptible(&proc->wait);
2219                                         }
2220                                 } else {
2221                                         BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2222                                         death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2223                                 }
2224                         }
2225                 } break;
2226                 case BC_DEAD_BINDER_DONE: {
2227                         struct binder_work *w;
2228                         binder_uintptr_t cookie;
2229                         struct binder_ref_death *death = NULL;
2230
2231                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2232                                 return -EFAULT;
2233
2234                         ptr += sizeof(cookie);
2235                         list_for_each_entry(w, &proc->delivered_death, entry) {
2236                                 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2237
2238                                 if (tmp_death->cookie == cookie) {
2239                                         death = tmp_death;
2240                                         break;
2241                                 }
2242                         }
2243                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
2244                                      "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2245                                      proc->pid, thread->pid, (u64)cookie,
2246                                      death);
2247                         if (death == NULL) {
2248                                 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2249                                         proc->pid, thread->pid, (u64)cookie);
2250                                 break;
2251                         }
2252
2253                         list_del_init(&death->work.entry);
2254                         if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2255                                 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2256                                 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2257                                         list_add_tail(&death->work.entry, &thread->todo);
2258                                 } else {
2259                                         list_add_tail(&death->work.entry, &proc->todo);
2260                                         wake_up_interruptible(&proc->wait);
2261                                 }
2262                         }
2263                 } break;
2264
2265                 default:
2266                         pr_err("%d:%d unknown command %d\n",
2267                                proc->pid, thread->pid, cmd);
2268                         return -EINVAL;
2269                 }
2270                 *consumed = ptr - buffer;
2271         }
2272         return 0;
2273 }
2274
2275 static void binder_stat_br(struct binder_proc *proc,
2276                            struct binder_thread *thread, uint32_t cmd)
2277 {
2278         trace_binder_return(cmd);
2279         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2280                 binder_stats.br[_IOC_NR(cmd)]++;
2281                 proc->stats.br[_IOC_NR(cmd)]++;
2282                 thread->stats.br[_IOC_NR(cmd)]++;
2283         }
2284 }
2285
2286 static int binder_has_proc_work(struct binder_proc *proc,
2287                                 struct binder_thread *thread)
2288 {
2289         return !list_empty(&proc->todo) ||
2290                 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2291 }
2292
2293 static int binder_has_thread_work(struct binder_thread *thread)
2294 {
2295         return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2296                 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2297 }
2298
2299 static int binder_thread_read(struct binder_proc *proc,
2300                               struct binder_thread *thread,
2301                               binder_uintptr_t binder_buffer, size_t size,
2302                               binder_size_t *consumed, int non_block)
2303 {
2304         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
2305         void __user *ptr = buffer + *consumed;
2306         void __user *end = buffer + size;
2307
2308         int ret = 0;
2309         int wait_for_proc_work;
2310
2311         if (*consumed == 0) {
2312                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2313                         return -EFAULT;
2314                 ptr += sizeof(uint32_t);
2315         }
2316
2317 retry:
2318         wait_for_proc_work = thread->transaction_stack == NULL &&
2319                                 list_empty(&thread->todo);
2320
2321         if (thread->return_error != BR_OK && ptr < end) {
2322                 if (thread->return_error2 != BR_OK) {
2323                         if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2324                                 return -EFAULT;
2325                         ptr += sizeof(uint32_t);
2326                         binder_stat_br(proc, thread, thread->return_error2);
2327                         if (ptr == end)
2328                                 goto done;
2329                         thread->return_error2 = BR_OK;
2330                 }
2331                 if (put_user(thread->return_error, (uint32_t __user *)ptr))
2332                         return -EFAULT;
2333                 ptr += sizeof(uint32_t);
2334                 binder_stat_br(proc, thread, thread->return_error);
2335                 thread->return_error = BR_OK;
2336                 goto done;
2337         }
2338
2339
2340         thread->looper |= BINDER_LOOPER_STATE_WAITING;
2341         if (wait_for_proc_work)
2342                 proc->ready_threads++;
2343
2344         binder_unlock(__func__);
2345
2346         trace_binder_wait_for_work(wait_for_proc_work,
2347                                    !!thread->transaction_stack,
2348                                    !list_empty(&thread->todo));
2349         if (wait_for_proc_work) {
2350                 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2351                                         BINDER_LOOPER_STATE_ENTERED))) {
2352                         binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2353                                 proc->pid, thread->pid, thread->looper);
2354                         wait_event_interruptible(binder_user_error_wait,
2355                                                  binder_stop_on_user_error < 2);
2356                 }
2357                 binder_set_nice(proc->default_priority);
2358                 if (non_block) {
2359                         if (!binder_has_proc_work(proc, thread))
2360                                 ret = -EAGAIN;
2361                 } else
2362                         ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2363         } else {
2364                 if (non_block) {
2365                         if (!binder_has_thread_work(thread))
2366                                 ret = -EAGAIN;
2367                 } else
2368                         ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
2369         }
2370
2371         binder_lock(__func__);
2372
2373         if (wait_for_proc_work)
2374                 proc->ready_threads--;
2375         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2376
2377         if (ret)
2378                 return ret;
2379
2380         while (1) {
2381                 uint32_t cmd;
2382                 struct binder_transaction_data tr;
2383                 struct binder_work *w;
2384                 struct binder_transaction *t = NULL;
2385
2386                 if (!list_empty(&thread->todo)) {
2387                         w = list_first_entry(&thread->todo, struct binder_work,
2388                                              entry);
2389                 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2390                         w = list_first_entry(&proc->todo, struct binder_work,
2391                                              entry);
2392                 } else {
2393                         /* no data added */
2394                         if (ptr - buffer == 4 &&
2395                             !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
2396                                 goto retry;
2397                         break;
2398                 }
2399
2400                 if (end - ptr < sizeof(tr) + 4)
2401                         break;
2402
2403                 switch (w->type) {
2404                 case BINDER_WORK_TRANSACTION: {
2405                         t = container_of(w, struct binder_transaction, work);
2406                 } break;
2407                 case BINDER_WORK_TRANSACTION_COMPLETE: {
2408                         cmd = BR_TRANSACTION_COMPLETE;
2409                         if (put_user(cmd, (uint32_t __user *)ptr))
2410                                 return -EFAULT;
2411                         ptr += sizeof(uint32_t);
2412
2413                         binder_stat_br(proc, thread, cmd);
2414                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2415                                      "%d:%d BR_TRANSACTION_COMPLETE\n",
2416                                      proc->pid, thread->pid);
2417
2418                         list_del(&w->entry);
2419                         kfree(w);
2420                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2421                 } break;
2422                 case BINDER_WORK_NODE: {
2423                         struct binder_node *node = container_of(w, struct binder_node, work);
2424                         uint32_t cmd = BR_NOOP;
2425                         const char *cmd_name;
2426                         int strong = node->internal_strong_refs || node->local_strong_refs;
2427                         int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2428
2429                         if (weak && !node->has_weak_ref) {
2430                                 cmd = BR_INCREFS;
2431                                 cmd_name = "BR_INCREFS";
2432                                 node->has_weak_ref = 1;
2433                                 node->pending_weak_ref = 1;
2434                                 node->local_weak_refs++;
2435                         } else if (strong && !node->has_strong_ref) {
2436                                 cmd = BR_ACQUIRE;
2437                                 cmd_name = "BR_ACQUIRE";
2438                                 node->has_strong_ref = 1;
2439                                 node->pending_strong_ref = 1;
2440                                 node->local_strong_refs++;
2441                         } else if (!strong && node->has_strong_ref) {
2442                                 cmd = BR_RELEASE;
2443                                 cmd_name = "BR_RELEASE";
2444                                 node->has_strong_ref = 0;
2445                         } else if (!weak && node->has_weak_ref) {
2446                                 cmd = BR_DECREFS;
2447                                 cmd_name = "BR_DECREFS";
2448                                 node->has_weak_ref = 0;
2449                         }
2450                         if (cmd != BR_NOOP) {
2451                                 if (put_user(cmd, (uint32_t __user *)ptr))
2452                                         return -EFAULT;
2453                                 ptr += sizeof(uint32_t);
2454                                 if (put_user(node->ptr,
2455                                              (binder_uintptr_t __user *)ptr))
2456                                         return -EFAULT;
2457                                 ptr += sizeof(binder_uintptr_t);
2458                                 if (put_user(node->cookie,
2459                                              (binder_uintptr_t __user *)ptr))
2460                                         return -EFAULT;
2461                                 ptr += sizeof(binder_uintptr_t);
2462
2463                                 binder_stat_br(proc, thread, cmd);
2464                                 binder_debug(BINDER_DEBUG_USER_REFS,
2465                                              "%d:%d %s %d u%016llx c%016llx\n",
2466                                              proc->pid, thread->pid, cmd_name,
2467                                              node->debug_id,
2468                                              (u64)node->ptr, (u64)node->cookie);
2469                         } else {
2470                                 list_del_init(&w->entry);
2471                                 if (!weak && !strong) {
2472                                         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2473                                                      "%d:%d node %d u%016llx c%016llx deleted\n",
2474                                                      proc->pid, thread->pid,
2475                                                      node->debug_id,
2476                                                      (u64)node->ptr,
2477                                                      (u64)node->cookie);
2478                                         rb_erase(&node->rb_node, &proc->nodes);
2479                                         kfree(node);
2480                                         binder_stats_deleted(BINDER_STAT_NODE);
2481                                 } else {
2482                                         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2483                                                      "%d:%d node %d u%016llx c%016llx state unchanged\n",
2484                                                      proc->pid, thread->pid,
2485                                                      node->debug_id,
2486                                                      (u64)node->ptr,
2487                                                      (u64)node->cookie);
2488                                 }
2489                         }
2490                 } break;
2491                 case BINDER_WORK_DEAD_BINDER:
2492                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2493                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2494                         struct binder_ref_death *death;
2495                         uint32_t cmd;
2496
2497                         death = container_of(w, struct binder_ref_death, work);
2498                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2499                                 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2500                         else
2501                                 cmd = BR_DEAD_BINDER;
2502                         if (put_user(cmd, (uint32_t __user *)ptr))
2503                                 return -EFAULT;
2504                         ptr += sizeof(uint32_t);
2505                         if (put_user(death->cookie,
2506                                      (binder_uintptr_t __user *)ptr))
2507                                 return -EFAULT;
2508                         ptr += sizeof(binder_uintptr_t);
2509                         binder_stat_br(proc, thread, cmd);
2510                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2511                                      "%d:%d %s %016llx\n",
2512                                       proc->pid, thread->pid,
2513                                       cmd == BR_DEAD_BINDER ?
2514                                       "BR_DEAD_BINDER" :
2515                                       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2516                                       (u64)death->cookie);
2517
2518                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2519                                 list_del(&w->entry);
2520                                 kfree(death);
2521                                 binder_stats_deleted(BINDER_STAT_DEATH);
2522                         } else
2523                                 list_move(&w->entry, &proc->delivered_death);
2524                         if (cmd == BR_DEAD_BINDER)
2525                                 goto done; /* DEAD_BINDER notifications can cause transactions */
2526                 } break;
2527                 }
2528
2529                 if (!t)
2530                         continue;
2531
2532                 BUG_ON(t->buffer == NULL);
2533                 if (t->buffer->target_node) {
2534                         struct binder_node *target_node = t->buffer->target_node;
2535
2536                         tr.target.ptr = target_node->ptr;
2537                         tr.cookie =  target_node->cookie;
2538                         t->saved_priority = task_nice(current);
2539                         if (t->priority < target_node->min_priority &&
2540                             !(t->flags & TF_ONE_WAY))
2541                                 binder_set_nice(t->priority);
2542                         else if (!(t->flags & TF_ONE_WAY) ||
2543                                  t->saved_priority > target_node->min_priority)
2544                                 binder_set_nice(target_node->min_priority);
2545                         cmd = BR_TRANSACTION;
2546                 } else {
2547                         tr.target.ptr = 0;
2548                         tr.cookie = 0;
2549                         cmd = BR_REPLY;
2550                 }
2551                 tr.code = t->code;
2552                 tr.flags = t->flags;
2553                 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
2554
2555                 if (t->from) {
2556                         struct task_struct *sender = t->from->proc->tsk;
2557
2558                         tr.sender_pid = task_tgid_nr_ns(sender,
2559                                                         task_active_pid_ns(current));
2560                 } else {
2561                         tr.sender_pid = 0;
2562                 }
2563
2564                 tr.data_size = t->buffer->data_size;
2565                 tr.offsets_size = t->buffer->offsets_size;
2566                 tr.data.ptr.buffer = (binder_uintptr_t)(
2567                                         (uintptr_t)t->buffer->data +
2568                                         proc->user_buffer_offset);
2569                 tr.data.ptr.offsets = tr.data.ptr.buffer +
2570                                         ALIGN(t->buffer->data_size,
2571                                             sizeof(void *));
2572
2573                 if (put_user(cmd, (uint32_t __user *)ptr))
2574                         return -EFAULT;
2575                 ptr += sizeof(uint32_t);
2576                 if (copy_to_user(ptr, &tr, sizeof(tr)))
2577                         return -EFAULT;
2578                 ptr += sizeof(tr);
2579
2580                 trace_binder_transaction_received(t);
2581                 binder_stat_br(proc, thread, cmd);
2582                 binder_debug(BINDER_DEBUG_TRANSACTION,
2583                              "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
2584                              proc->pid, thread->pid,
2585                              (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2586                              "BR_REPLY",
2587                              t->debug_id, t->from ? t->from->proc->pid : 0,
2588                              t->from ? t->from->pid : 0, cmd,
2589                              t->buffer->data_size, t->buffer->offsets_size,
2590                              (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
2591
2592                 list_del(&t->work.entry);
2593                 t->buffer->allow_user_free = 1;
2594                 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2595                         t->to_parent = thread->transaction_stack;
2596                         t->to_thread = thread;
2597                         thread->transaction_stack = t;
2598                 } else {
2599                         t->buffer->transaction = NULL;
2600                         kfree(t);
2601                         binder_stats_deleted(BINDER_STAT_TRANSACTION);
2602                 }
2603                 break;
2604         }
2605
2606 done:
2607
2608         *consumed = ptr - buffer;
2609         if (proc->requested_threads + proc->ready_threads == 0 &&
2610             proc->requested_threads_started < proc->max_threads &&
2611             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2612              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2613              /*spawn a new thread if we leave this out */) {
2614                 proc->requested_threads++;
2615                 binder_debug(BINDER_DEBUG_THREADS,
2616                              "%d:%d BR_SPAWN_LOOPER\n",
2617                              proc->pid, thread->pid);
2618                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2619                         return -EFAULT;
2620                 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2621         }
2622         return 0;
2623 }
2624
2625 static void binder_release_work(struct list_head *list)
2626 {
2627         struct binder_work *w;
2628
2629         while (!list_empty(list)) {
2630                 w = list_first_entry(list, struct binder_work, entry);
2631                 list_del_init(&w->entry);
2632                 switch (w->type) {
2633                 case BINDER_WORK_TRANSACTION: {
2634                         struct binder_transaction *t;
2635
2636                         t = container_of(w, struct binder_transaction, work);
2637                         if (t->buffer->target_node &&
2638                             !(t->flags & TF_ONE_WAY)) {
2639                                 binder_send_failed_reply(t, BR_DEAD_REPLY);
2640                         } else {
2641                                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2642                                         "undelivered transaction %d\n",
2643                                         t->debug_id);
2644                                 t->buffer->transaction = NULL;
2645                                 kfree(t);
2646                                 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2647                         }
2648                 } break;
2649                 case BINDER_WORK_TRANSACTION_COMPLETE: {
2650                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2651                                 "undelivered TRANSACTION_COMPLETE\n");
2652                         kfree(w);
2653                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2654                 } break;
2655                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2656                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2657                         struct binder_ref_death *death;
2658
2659                         death = container_of(w, struct binder_ref_death, work);
2660                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2661                                 "undelivered death notification, %016llx\n",
2662                                 (u64)death->cookie);
2663                         kfree(death);
2664                         binder_stats_deleted(BINDER_STAT_DEATH);
2665                 } break;
2666                 default:
2667                         pr_err("unexpected work type, %d, not freed\n",
2668                                w->type);
2669                         break;
2670                 }
2671         }
2672
2673 }
2674
2675 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2676 {
2677         struct binder_thread *thread = NULL;
2678         struct rb_node *parent = NULL;
2679         struct rb_node **p = &proc->threads.rb_node;
2680
2681         while (*p) {
2682                 parent = *p;
2683                 thread = rb_entry(parent, struct binder_thread, rb_node);
2684
2685                 if (current->pid < thread->pid)
2686                         p = &(*p)->rb_left;
2687                 else if (current->pid > thread->pid)
2688                         p = &(*p)->rb_right;
2689                 else
2690                         break;
2691         }
2692         if (*p == NULL) {
2693                 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2694                 if (thread == NULL)
2695                         return NULL;
2696                 binder_stats_created(BINDER_STAT_THREAD);
2697                 thread->proc = proc;
2698                 thread->pid = current->pid;
2699                 init_waitqueue_head(&thread->wait);
2700                 INIT_LIST_HEAD(&thread->todo);
2701                 rb_link_node(&thread->rb_node, parent, p);
2702                 rb_insert_color(&thread->rb_node, &proc->threads);
2703                 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2704                 thread->return_error = BR_OK;
2705                 thread->return_error2 = BR_OK;
2706         }
2707         return thread;
2708 }
2709
2710 static int binder_free_thread(struct binder_proc *proc,
2711                               struct binder_thread *thread)
2712 {
2713         struct binder_transaction *t;
2714         struct binder_transaction *send_reply = NULL;
2715         int active_transactions = 0;
2716
2717         rb_erase(&thread->rb_node, &proc->threads);
2718         t = thread->transaction_stack;
2719         if (t && t->to_thread == thread)
2720                 send_reply = t;
2721         while (t) {
2722                 active_transactions++;
2723                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2724                              "release %d:%d transaction %d %s, still active\n",
2725                               proc->pid, thread->pid,
2726                              t->debug_id,
2727                              (t->to_thread == thread) ? "in" : "out");
2728
2729                 if (t->to_thread == thread) {
2730                         t->to_proc = NULL;
2731                         t->to_thread = NULL;
2732                         if (t->buffer) {
2733                                 t->buffer->transaction = NULL;
2734                                 t->buffer = NULL;
2735                         }
2736                         t = t->to_parent;
2737                 } else if (t->from == thread) {
2738                         t->from = NULL;
2739                         t = t->from_parent;
2740                 } else
2741                         BUG();
2742         }
2743         if (send_reply)
2744                 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2745         binder_release_work(&thread->todo);
2746         kfree(thread);
2747         binder_stats_deleted(BINDER_STAT_THREAD);
2748         return active_transactions;
2749 }
2750
2751 static unsigned int binder_poll(struct file *filp,
2752                                 struct poll_table_struct *wait)
2753 {
2754         struct binder_proc *proc = filp->private_data;
2755         struct binder_thread *thread = NULL;
2756         int wait_for_proc_work;
2757
2758         binder_lock(__func__);
2759
2760         thread = binder_get_thread(proc);
2761
2762         wait_for_proc_work = thread->transaction_stack == NULL &&
2763                 list_empty(&thread->todo) && thread->return_error == BR_OK;
2764
2765         binder_unlock(__func__);
2766
2767         if (wait_for_proc_work) {
2768                 if (binder_has_proc_work(proc, thread))
2769                         return POLLIN;
2770                 poll_wait(filp, &proc->wait, wait);
2771                 if (binder_has_proc_work(proc, thread))
2772                         return POLLIN;
2773         } else {
2774                 if (binder_has_thread_work(thread))
2775                         return POLLIN;
2776                 poll_wait(filp, &thread->wait, wait);
2777                 if (binder_has_thread_work(thread))
2778                         return POLLIN;
2779         }
2780         return 0;
2781 }
2782
2783 static int binder_ioctl_write_read(struct file *filp,
2784                                 unsigned int cmd, unsigned long arg,
2785                                 struct binder_thread *thread)
2786 {
2787         int ret = 0;
2788         struct binder_proc *proc = filp->private_data;
2789         unsigned int size = _IOC_SIZE(cmd);
2790         void __user *ubuf = (void __user *)arg;
2791         struct binder_write_read bwr;
2792
2793         if (size != sizeof(struct binder_write_read)) {
2794                 ret = -EINVAL;
2795                 goto out;
2796         }
2797         if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2798                 ret = -EFAULT;
2799                 goto out;
2800         }
2801         binder_debug(BINDER_DEBUG_READ_WRITE,
2802                      "%d:%d write %lld at %016llx, read %lld at %016llx\n",
2803                      proc->pid, thread->pid,
2804                      (u64)bwr.write_size, (u64)bwr.write_buffer,
2805                      (u64)bwr.read_size, (u64)bwr.read_buffer);
2806
2807         if (bwr.write_size > 0) {
2808                 ret = binder_thread_write(proc, thread,
2809                                           bwr.write_buffer,
2810                                           bwr.write_size,
2811                                           &bwr.write_consumed);
2812                 trace_binder_write_done(ret);
2813                 if (ret < 0) {
2814                         bwr.read_consumed = 0;
2815                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2816                                 ret = -EFAULT;
2817                         goto out;
2818                 }
2819         }
2820         if (bwr.read_size > 0) {
2821                 ret = binder_thread_read(proc, thread, bwr.read_buffer,
2822                                          bwr.read_size,
2823                                          &bwr.read_consumed,
2824                                          filp->f_flags & O_NONBLOCK);
2825                 trace_binder_read_done(ret);
2826                 if (!list_empty(&proc->todo))
2827                         wake_up_interruptible(&proc->wait);
2828                 if (ret < 0) {
2829                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2830                                 ret = -EFAULT;
2831                         goto out;
2832                 }
2833         }
2834         binder_debug(BINDER_DEBUG_READ_WRITE,
2835                      "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
2836                      proc->pid, thread->pid,
2837                      (u64)bwr.write_consumed, (u64)bwr.write_size,
2838                      (u64)bwr.read_consumed, (u64)bwr.read_size);
2839         if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2840                 ret = -EFAULT;
2841                 goto out;
2842         }
2843 out:
2844         return ret;
2845 }
2846
2847 static int binder_ioctl_set_ctx_mgr(struct file *filp)
2848 {
2849         int ret = 0;
2850         struct binder_proc *proc = filp->private_data;
2851         struct binder_context *context = proc->context;
2852
2853         kuid_t curr_euid = current_euid();
2854
2855         if (context->binder_context_mgr_node) {
2856                 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
2857                 ret = -EBUSY;
2858                 goto out;
2859         }
2860         ret = security_binder_set_context_mgr(proc->tsk);
2861         if (ret < 0)
2862                 goto out;
2863         if (uid_valid(context->binder_context_mgr_uid)) {
2864                 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
2865                         pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
2866                                from_kuid(&init_user_ns, curr_euid),
2867                                from_kuid(&init_user_ns,
2868                                          context->binder_context_mgr_uid));
2869                         ret = -EPERM;
2870                         goto out;
2871                 }
2872         } else {
2873                 context->binder_context_mgr_uid = curr_euid;
2874         }
2875         context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
2876         if (!context->binder_context_mgr_node) {
2877                 ret = -ENOMEM;
2878                 goto out;
2879         }
2880         context->binder_context_mgr_node->local_weak_refs++;
2881         context->binder_context_mgr_node->local_strong_refs++;
2882         context->binder_context_mgr_node->has_strong_ref = 1;
2883         context->binder_context_mgr_node->has_weak_ref = 1;
2884 out:
2885         return ret;
2886 }
2887
2888 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2889 {
2890         int ret;
2891         struct binder_proc *proc = filp->private_data;
2892         struct binder_thread *thread;
2893         unsigned int size = _IOC_SIZE(cmd);
2894         void __user *ubuf = (void __user *)arg;
2895
2896         /*pr_info("binder_ioctl: %d:%d %x %lx\n",
2897                         proc->pid, current->pid, cmd, arg);*/
2898
2899         trace_binder_ioctl(cmd, arg);
2900
2901         ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2902         if (ret)
2903                 goto err_unlocked;
2904
2905         binder_lock(__func__);
2906         thread = binder_get_thread(proc);
2907         if (thread == NULL) {
2908                 ret = -ENOMEM;
2909                 goto err;
2910         }
2911
2912         switch (cmd) {
2913         case BINDER_WRITE_READ:
2914                 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
2915                 if (ret)
2916                         goto err;
2917                 break;
2918         case BINDER_SET_MAX_THREADS:
2919                 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2920                         ret = -EINVAL;
2921                         goto err;
2922                 }
2923                 break;
2924         case BINDER_SET_CONTEXT_MGR:
2925                 ret = binder_ioctl_set_ctx_mgr(filp);
2926                 if (ret)
2927                         goto err;
2928                 break;
2929         case BINDER_THREAD_EXIT:
2930                 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
2931                              proc->pid, thread->pid);
2932                 binder_free_thread(proc, thread);
2933                 thread = NULL;
2934                 break;
2935         case BINDER_VERSION: {
2936                 struct binder_version __user *ver = ubuf;
2937
2938                 if (size != sizeof(struct binder_version)) {
2939                         ret = -EINVAL;
2940                         goto err;
2941                 }
2942                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
2943                              &ver->protocol_version)) {
2944                         ret = -EINVAL;
2945                         goto err;
2946                 }
2947                 break;
2948         }
2949         default:
2950                 ret = -EINVAL;
2951                 goto err;
2952         }
2953         ret = 0;
2954 err:
2955         if (thread)
2956                 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2957         binder_unlock(__func__);
2958         wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2959         if (ret && ret != -ERESTARTSYS)
2960                 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2961 err_unlocked:
2962         trace_binder_ioctl_done(ret);
2963         return ret;
2964 }
2965
2966 static void binder_vma_open(struct vm_area_struct *vma)
2967 {
2968         struct binder_proc *proc = vma->vm_private_data;
2969
2970         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2971                      "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2972                      proc->pid, vma->vm_start, vma->vm_end,
2973                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2974                      (unsigned long)pgprot_val(vma->vm_page_prot));
2975 }
2976
2977 static void binder_vma_close(struct vm_area_struct *vma)
2978 {
2979         struct binder_proc *proc = vma->vm_private_data;
2980
2981         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2982                      "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2983                      proc->pid, vma->vm_start, vma->vm_end,
2984                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2985                      (unsigned long)pgprot_val(vma->vm_page_prot));
2986         proc->vma = NULL;
2987         proc->vma_vm_mm = NULL;
2988         binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2989 }
2990
2991 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2992 {
2993         return VM_FAULT_SIGBUS;
2994 }
2995
2996 static const struct vm_operations_struct binder_vm_ops = {
2997         .open = binder_vma_open,
2998         .close = binder_vma_close,
2999         .fault = binder_vm_fault,
3000 };
3001
3002 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3003 {
3004         int ret;
3005         struct vm_struct *area;
3006         struct binder_proc *proc = filp->private_data;
3007         const char *failure_string;
3008         struct binder_buffer *buffer;
3009
3010         if (proc->tsk != current)
3011                 return -EINVAL;
3012
3013         if ((vma->vm_end - vma->vm_start) > SZ_4M)
3014                 vma->vm_end = vma->vm_start + SZ_4M;
3015
3016         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3017                      "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3018                      proc->pid, vma->vm_start, vma->vm_end,
3019                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3020                      (unsigned long)pgprot_val(vma->vm_page_prot));
3021
3022         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3023                 ret = -EPERM;
3024                 failure_string = "bad vm_flags";
3025                 goto err_bad_arg;
3026         }
3027         vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3028
3029         mutex_lock(&binder_mmap_lock);
3030         if (proc->buffer) {
3031                 ret = -EBUSY;
3032                 failure_string = "already mapped";
3033                 goto err_already_mapped;
3034         }
3035
3036         area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
3037         if (area == NULL) {
3038                 ret = -ENOMEM;
3039                 failure_string = "get_vm_area";
3040                 goto err_get_vm_area_failed;
3041         }
3042         proc->buffer = area->addr;
3043         proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
3044         mutex_unlock(&binder_mmap_lock);
3045
3046 #ifdef CONFIG_CPU_CACHE_VIPT
3047         if (cache_is_vipt_aliasing()) {
3048                 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
3049                         pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
3050                         vma->vm_start += PAGE_SIZE;
3051                 }
3052         }
3053 #endif
3054         proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
3055         if (proc->pages == NULL) {
3056                 ret = -ENOMEM;
3057                 failure_string = "alloc page array";
3058                 goto err_alloc_pages_failed;
3059         }
3060         proc->buffer_size = vma->vm_end - vma->vm_start;
3061
3062         vma->vm_ops = &binder_vm_ops;
3063         vma->vm_private_data = proc;
3064
3065         if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
3066                 ret = -ENOMEM;
3067                 failure_string = "alloc small buf";
3068                 goto err_alloc_small_buf_failed;
3069         }
3070         buffer = proc->buffer;
3071         INIT_LIST_HEAD(&proc->buffers);
3072         list_add(&buffer->entry, &proc->buffers);
3073         buffer->free = 1;
3074         binder_insert_free_buffer(proc, buffer);
3075         proc->free_async_space = proc->buffer_size / 2;
3076         barrier();
3077         proc->files = get_files_struct(current);
3078         proc->vma = vma;
3079         proc->vma_vm_mm = vma->vm_mm;
3080
3081         /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
3082                  proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
3083         return 0;
3084
3085 err_alloc_small_buf_failed:
3086         kfree(proc->pages);
3087         proc->pages = NULL;
3088 err_alloc_pages_failed:
3089         mutex_lock(&binder_mmap_lock);
3090         vfree(proc->buffer);
3091         proc->buffer = NULL;
3092 err_get_vm_area_failed:
3093 err_already_mapped:
3094         mutex_unlock(&binder_mmap_lock);
3095 err_bad_arg:
3096         pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
3097                proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3098         return ret;
3099 }
3100
3101 static int binder_open(struct inode *nodp, struct file *filp)
3102 {
3103         struct binder_proc *proc;
3104         struct binder_device *binder_dev;
3105
3106         binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3107                      current->group_leader->pid, current->pid);
3108
3109         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3110         if (proc == NULL)
3111                 return -ENOMEM;
3112         get_task_struct(current);
3113         proc->tsk = current;
3114         INIT_LIST_HEAD(&proc->todo);
3115         init_waitqueue_head(&proc->wait);
3116         proc->default_priority = task_nice(current);
3117         binder_dev = container_of(filp->private_data, struct binder_device,
3118                                   miscdev);
3119         proc->context = &binder_dev->context;
3120
3121         binder_lock(__func__);
3122
3123         binder_stats_created(BINDER_STAT_PROC);
3124         hlist_add_head(&proc->proc_node, &binder_procs);
3125         proc->pid = current->group_leader->pid;
3126         INIT_LIST_HEAD(&proc->delivered_death);
3127         filp->private_data = proc;
3128
3129         binder_unlock(__func__);
3130
3131         if (binder_debugfs_dir_entry_proc) {
3132                 char strbuf[11];
3133
3134                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
3135                 /*
3136                  * proc debug entries are shared between contexts, so
3137                  * this will fail if the process tries to open the driver
3138                  * again with a different context. The priting code will
3139                  * anyway print all contexts that a given PID has, so this
3140                  * is not a problem.
3141                  */
3142                 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
3143                         binder_debugfs_dir_entry_proc,
3144                         (void *)(unsigned long)proc->pid,
3145                         &binder_proc_fops);
3146         }
3147
3148         return 0;
3149 }
3150
3151 static int binder_flush(struct file *filp, fl_owner_t id)
3152 {
3153         struct binder_proc *proc = filp->private_data;
3154
3155         binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3156
3157         return 0;
3158 }
3159
3160 static void binder_deferred_flush(struct binder_proc *proc)
3161 {
3162         struct rb_node *n;
3163         int wake_count = 0;
3164
3165         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3166                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3167
3168                 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
3169                 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3170                         wake_up_interruptible(&thread->wait);
3171                         wake_count++;
3172                 }
3173         }
3174         wake_up_interruptible_all(&proc->wait);
3175
3176         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3177                      "binder_flush: %d woke %d threads\n", proc->pid,
3178                      wake_count);
3179 }
3180
3181 static int binder_release(struct inode *nodp, struct file *filp)
3182 {
3183         struct binder_proc *proc = filp->private_data;
3184
3185         debugfs_remove(proc->debugfs_entry);
3186         binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3187
3188         return 0;
3189 }
3190
3191 static int binder_node_release(struct binder_node *node, int refs)
3192 {
3193         struct binder_ref *ref;
3194         int death = 0;
3195
3196         list_del_init(&node->work.entry);
3197         binder_release_work(&node->async_todo);
3198
3199         if (hlist_empty(&node->refs)) {
3200                 kfree(node);
3201                 binder_stats_deleted(BINDER_STAT_NODE);
3202
3203                 return refs;
3204         }
3205
3206         node->proc = NULL;
3207         node->local_strong_refs = 0;
3208         node->local_weak_refs = 0;
3209         hlist_add_head(&node->dead_node, &binder_dead_nodes);
3210
3211         hlist_for_each_entry(ref, &node->refs, node_entry) {
3212                 refs++;
3213
3214                 if (!ref->death)
3215                         continue;
3216
3217                 death++;
3218
3219                 if (list_empty(&ref->death->work.entry)) {
3220                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3221                         list_add_tail(&ref->death->work.entry,
3222                                       &ref->proc->todo);
3223                         wake_up_interruptible(&ref->proc->wait);
3224                 } else
3225                         BUG();
3226         }
3227
3228         binder_debug(BINDER_DEBUG_DEAD_BINDER,
3229                      "node %d now dead, refs %d, death %d\n",
3230                      node->debug_id, refs, death);
3231
3232         return refs;
3233 }
3234
3235 static void binder_deferred_release(struct binder_proc *proc)
3236 {
3237         struct binder_transaction *t;
3238         struct binder_context *context = proc->context;
3239         struct rb_node *n;
3240         int threads, nodes, incoming_refs, outgoing_refs, buffers,
3241                 active_transactions, page_count;
3242
3243         BUG_ON(proc->vma);
3244         BUG_ON(proc->files);
3245
3246         hlist_del(&proc->proc_node);
3247
3248         if (context->binder_context_mgr_node &&
3249             context->binder_context_mgr_node->proc == proc) {
3250                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3251                              "%s: %d context_mgr_node gone\n",
3252                              __func__, proc->pid);
3253                 context->binder_context_mgr_node = NULL;
3254         }
3255
3256         threads = 0;
3257         active_transactions = 0;
3258         while ((n = rb_first(&proc->threads))) {
3259                 struct binder_thread *thread;
3260
3261                 thread = rb_entry(n, struct binder_thread, rb_node);
3262                 threads++;
3263                 active_transactions += binder_free_thread(proc, thread);
3264         }
3265
3266         nodes = 0;
3267         incoming_refs = 0;
3268         while ((n = rb_first(&proc->nodes))) {
3269                 struct binder_node *node;
3270
3271                 node = rb_entry(n, struct binder_node, rb_node);
3272                 nodes++;
3273                 rb_erase(&node->rb_node, &proc->nodes);
3274                 incoming_refs = binder_node_release(node, incoming_refs);
3275         }
3276
3277         outgoing_refs = 0;
3278         while ((n = rb_first(&proc->refs_by_desc))) {
3279                 struct binder_ref *ref;
3280
3281                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
3282                 outgoing_refs++;
3283                 binder_delete_ref(ref);
3284         }
3285
3286         binder_release_work(&proc->todo);
3287         binder_release_work(&proc->delivered_death);
3288
3289         buffers = 0;
3290         while ((n = rb_first(&proc->allocated_buffers))) {
3291                 struct binder_buffer *buffer;
3292
3293                 buffer = rb_entry(n, struct binder_buffer, rb_node);
3294
3295                 t = buffer->transaction;
3296                 if (t) {
3297                         t->buffer = NULL;
3298                         buffer->transaction = NULL;
3299                         pr_err("release proc %d, transaction %d, not freed\n",
3300                                proc->pid, t->debug_id);
3301                         /*BUG();*/
3302                 }
3303
3304                 binder_free_buf(proc, buffer);
3305                 buffers++;
3306         }
3307
3308         binder_stats_deleted(BINDER_STAT_PROC);
3309
3310         page_count = 0;
3311         if (proc->pages) {
3312                 int i;
3313
3314                 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3315                         void *page_addr;
3316
3317                         if (!proc->pages[i])
3318                                 continue;
3319
3320                         page_addr = proc->buffer + i * PAGE_SIZE;
3321                         binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3322                                      "%s: %d: page %d at %p not freed\n",
3323                                      __func__, proc->pid, i, page_addr);
3324                         unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
3325                         __free_page(proc->pages[i]);
3326                         page_count++;
3327                 }
3328                 kfree(proc->pages);
3329                 vfree(proc->buffer);
3330         }
3331
3332         put_task_struct(proc->tsk);
3333
3334         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3335                      "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3336                      __func__, proc->pid, threads, nodes, incoming_refs,
3337                      outgoing_refs, active_transactions, buffers, page_count);
3338
3339         kfree(proc);
3340 }
3341
3342 static void binder_deferred_func(struct work_struct *work)
3343 {
3344         struct binder_proc *proc;
3345         struct files_struct *files;
3346
3347         int defer;
3348
3349         do {
3350                 binder_lock(__func__);
3351                 mutex_lock(&binder_deferred_lock);
3352                 if (!hlist_empty(&binder_deferred_list)) {
3353                         proc = hlist_entry(binder_deferred_list.first,
3354                                         struct binder_proc, deferred_work_node);
3355                         hlist_del_init(&proc->deferred_work_node);
3356                         defer = proc->deferred_work;
3357                         proc->deferred_work = 0;
3358                 } else {
3359                         proc = NULL;
3360                         defer = 0;
3361                 }
3362                 mutex_unlock(&binder_deferred_lock);
3363
3364                 files = NULL;
3365                 if (defer & BINDER_DEFERRED_PUT_FILES) {
3366                         files = proc->files;
3367                         if (files)
3368                                 proc->files = NULL;
3369                 }
3370
3371                 if (defer & BINDER_DEFERRED_FLUSH)
3372                         binder_deferred_flush(proc);
3373
3374                 if (defer & BINDER_DEFERRED_RELEASE)
3375                         binder_deferred_release(proc); /* frees proc */
3376
3377                 binder_unlock(__func__);
3378                 if (files)
3379                         put_files_struct(files);
3380         } while (proc);
3381 }
3382 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3383
3384 static void
3385 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3386 {
3387         mutex_lock(&binder_deferred_lock);
3388         proc->deferred_work |= defer;
3389         if (hlist_unhashed(&proc->deferred_work_node)) {
3390                 hlist_add_head(&proc->deferred_work_node,
3391                                 &binder_deferred_list);
3392                 queue_work(binder_deferred_workqueue, &binder_deferred_work);
3393         }
3394         mutex_unlock(&binder_deferred_lock);
3395 }
3396
3397 static void print_binder_transaction(struct seq_file *m, const char *prefix,
3398                                      struct binder_transaction *t)
3399 {
3400         seq_printf(m,
3401                    "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3402                    prefix, t->debug_id, t,
3403                    t->from ? t->from->proc->pid : 0,
3404                    t->from ? t->from->pid : 0,
3405                    t->to_proc ? t->to_proc->pid : 0,
3406                    t->to_thread ? t->to_thread->pid : 0,
3407                    t->code, t->flags, t->priority, t->need_reply);
3408         if (t->buffer == NULL) {
3409                 seq_puts(m, " buffer free\n");
3410                 return;
3411         }
3412         if (t->buffer->target_node)
3413                 seq_printf(m, " node %d",
3414                            t->buffer->target_node->debug_id);
3415         seq_printf(m, " size %zd:%zd data %p\n",
3416                    t->buffer->data_size, t->buffer->offsets_size,
3417                    t->buffer->data);
3418 }
3419
3420 static void print_binder_buffer(struct seq_file *m, const char *prefix,
3421                                 struct binder_buffer *buffer)
3422 {
3423         seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3424                    prefix, buffer->debug_id, buffer->data,
3425                    buffer->data_size, buffer->offsets_size,
3426                    buffer->transaction ? "active" : "delivered");
3427 }
3428
3429 static void print_binder_work(struct seq_file *m, const char *prefix,
3430                               const char *transaction_prefix,
3431                               struct binder_work *w)
3432 {
3433         struct binder_node *node;
3434         struct binder_transaction *t;
3435
3436         switch (w->type) {
3437         case BINDER_WORK_TRANSACTION:
3438                 t = container_of(w, struct binder_transaction, work);
3439                 print_binder_transaction(m, transaction_prefix, t);
3440                 break;
3441         case BINDER_WORK_TRANSACTION_COMPLETE:
3442                 seq_printf(m, "%stransaction complete\n", prefix);
3443                 break;
3444         case BINDER_WORK_NODE:
3445                 node = container_of(w, struct binder_node, work);
3446                 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3447                            prefix, node->debug_id,
3448                            (u64)node->ptr, (u64)node->cookie);
3449                 break;
3450         case BINDER_WORK_DEAD_BINDER:
3451                 seq_printf(m, "%shas dead binder\n", prefix);
3452                 break;
3453         case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3454                 seq_printf(m, "%shas cleared dead binder\n", prefix);
3455                 break;
3456         case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3457                 seq_printf(m, "%shas cleared death notification\n", prefix);
3458                 break;
3459         default:
3460                 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3461                 break;
3462         }
3463 }
3464
3465 static void print_binder_thread(struct seq_file *m,
3466                                 struct binder_thread *thread,
3467                                 int print_always)
3468 {
3469         struct binder_transaction *t;
3470         struct binder_work *w;
3471         size_t start_pos = m->count;
3472         size_t header_pos;
3473
3474         seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper);
3475         header_pos = m->count;
3476         t = thread->transaction_stack;
3477         while (t) {
3478                 if (t->from == thread) {
3479                         print_binder_transaction(m,
3480                                                  "    outgoing transaction", t);
3481                         t = t->from_parent;
3482                 } else if (t->to_thread == thread) {
3483                         print_binder_transaction(m,
3484                                                  "    incoming transaction", t);
3485                         t = t->to_parent;
3486                 } else {
3487                         print_binder_transaction(m, "    bad transaction", t);
3488                         t = NULL;
3489                 }
3490         }
3491         list_for_each_entry(w, &thread->todo, entry) {
3492                 print_binder_work(m, "    ", "    pending transaction", w);
3493         }
3494         if (!print_always && m->count == header_pos)
3495                 m->count = start_pos;
3496 }
3497
3498 static void print_binder_node(struct seq_file *m, struct binder_node *node)
3499 {
3500         struct binder_ref *ref;
3501         struct binder_work *w;
3502         int count;
3503
3504         count = 0;
3505         hlist_for_each_entry(ref, &node->refs, node_entry)
3506                 count++;
3507
3508         seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3509                    node->debug_id, (u64)node->ptr, (u64)node->cookie,
3510                    node->has_strong_ref, node->has_weak_ref,
3511                    node->local_strong_refs, node->local_weak_refs,
3512                    node->internal_strong_refs, count);
3513         if (count) {
3514                 seq_puts(m, " proc");
3515                 hlist_for_each_entry(ref, &node->refs, node_entry)
3516                         seq_printf(m, " %d", ref->proc->pid);
3517         }
3518         seq_puts(m, "\n");
3519         list_for_each_entry(w, &node->async_todo, entry)
3520                 print_binder_work(m, "    ",
3521                                   "    pending async transaction", w);
3522 }
3523
3524 static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3525 {
3526         seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
3527                    ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3528                    ref->node->debug_id, ref->strong, ref->weak, ref->death);
3529 }
3530
3531 static void print_binder_proc(struct seq_file *m,
3532                               struct binder_proc *proc, int print_all)
3533 {
3534         struct binder_work *w;
3535         struct rb_node *n;
3536         size_t start_pos = m->count;
3537         size_t header_pos;
3538
3539         seq_printf(m, "proc %d\n", proc->pid);
3540         seq_printf(m, "context %s\n", proc->context->name);
3541         header_pos = m->count;
3542
3543         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3544                 print_binder_thread(m, rb_entry(n, struct binder_thread,
3545                                                 rb_node), print_all);
3546         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3547                 struct binder_node *node = rb_entry(n, struct binder_node,
3548                                                     rb_node);
3549                 if (print_all || node->has_async_transaction)
3550                         print_binder_node(m, node);
3551         }
3552         if (print_all) {
3553                 for (n = rb_first(&proc->refs_by_desc);
3554                      n != NULL;
3555                      n = rb_next(n))
3556                         print_binder_ref(m, rb_entry(n, struct binder_ref,
3557                                                      rb_node_desc));
3558         }
3559         for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3560                 print_binder_buffer(m, "  buffer",
3561                                     rb_entry(n, struct binder_buffer, rb_node));
3562         list_for_each_entry(w, &proc->todo, entry)
3563                 print_binder_work(m, "  ", "  pending transaction", w);
3564         list_for_each_entry(w, &proc->delivered_death, entry) {
3565                 seq_puts(m, "  has delivered dead binder\n");
3566                 break;
3567         }
3568         if (!print_all && m->count == header_pos)
3569                 m->count = start_pos;
3570 }
3571
3572 static const char * const binder_return_strings[] = {
3573         "BR_ERROR",
3574         "BR_OK",
3575         "BR_TRANSACTION",
3576         "BR_REPLY",
3577         "BR_ACQUIRE_RESULT",
3578         "BR_DEAD_REPLY",
3579         "BR_TRANSACTION_COMPLETE",
3580         "BR_INCREFS",
3581         "BR_ACQUIRE",
3582         "BR_RELEASE",
3583         "BR_DECREFS",
3584         "BR_ATTEMPT_ACQUIRE",
3585         "BR_NOOP",
3586         "BR_SPAWN_LOOPER",
3587         "BR_FINISHED",
3588         "BR_DEAD_BINDER",
3589         "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3590         "BR_FAILED_REPLY"
3591 };
3592
3593 static const char * const binder_command_strings[] = {
3594         "BC_TRANSACTION",
3595         "BC_REPLY",
3596         "BC_ACQUIRE_RESULT",
3597         "BC_FREE_BUFFER",
3598         "BC_INCREFS",
3599         "BC_ACQUIRE",
3600         "BC_RELEASE",
3601         "BC_DECREFS",
3602         "BC_INCREFS_DONE",
3603         "BC_ACQUIRE_DONE",
3604         "BC_ATTEMPT_ACQUIRE",
3605         "BC_REGISTER_LOOPER",
3606         "BC_ENTER_LOOPER",
3607         "BC_EXIT_LOOPER",
3608         "BC_REQUEST_DEATH_NOTIFICATION",
3609         "BC_CLEAR_DEATH_NOTIFICATION",
3610         "BC_DEAD_BINDER_DONE"
3611 };
3612
3613 static const char * const binder_objstat_strings[] = {
3614         "proc",
3615         "thread",
3616         "node",
3617         "ref",
3618         "death",
3619         "transaction",
3620         "transaction_complete"
3621 };
3622
3623 static void print_binder_stats(struct seq_file *m, const char *prefix,
3624                                struct binder_stats *stats)
3625 {
3626         int i;
3627
3628         BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3629                      ARRAY_SIZE(binder_command_strings));
3630         for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3631                 if (stats->bc[i])
3632                         seq_printf(m, "%s%s: %d\n", prefix,
3633                                    binder_command_strings[i], stats->bc[i]);
3634         }
3635
3636         BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3637                      ARRAY_SIZE(binder_return_strings));
3638         for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3639                 if (stats->br[i])
3640                         seq_printf(m, "%s%s: %d\n", prefix,
3641                                    binder_return_strings[i], stats->br[i]);
3642         }
3643
3644         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3645                      ARRAY_SIZE(binder_objstat_strings));
3646         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3647                      ARRAY_SIZE(stats->obj_deleted));
3648         for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3649                 if (stats->obj_created[i] || stats->obj_deleted[i])
3650                         seq_printf(m, "%s%s: active %d total %d\n", prefix,
3651                                 binder_objstat_strings[i],
3652                                 stats->obj_created[i] - stats->obj_deleted[i],
3653                                 stats->obj_created[i]);
3654         }
3655 }
3656
3657 static void print_binder_proc_stats(struct seq_file *m,
3658                                     struct binder_proc *proc)
3659 {
3660         struct binder_work *w;
3661         struct rb_node *n;
3662         int count, strong, weak;
3663
3664         seq_printf(m, "proc %d\n", proc->pid);
3665         seq_printf(m, "context %s\n", proc->context->name);
3666         count = 0;
3667         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3668                 count++;
3669         seq_printf(m, "  threads: %d\n", count);
3670         seq_printf(m, "  requested threads: %d+%d/%d\n"
3671                         "  ready threads %d\n"
3672                         "  free async space %zd\n", proc->requested_threads,
3673                         proc->requested_threads_started, proc->max_threads,
3674                         proc->ready_threads, proc->free_async_space);
3675         count = 0;
3676         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3677                 count++;
3678         seq_printf(m, "  nodes: %d\n", count);
3679         count = 0;
3680         strong = 0;
3681         weak = 0;
3682         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3683                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
3684                                                   rb_node_desc);
3685                 count++;
3686                 strong += ref->strong;
3687                 weak += ref->weak;
3688         }
3689         seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
3690
3691         count = 0;
3692         for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3693                 count++;
3694         seq_printf(m, "  buffers: %d\n", count);
3695
3696         count = 0;
3697         list_for_each_entry(w, &proc->todo, entry) {
3698                 switch (w->type) {
3699                 case BINDER_WORK_TRANSACTION:
3700                         count++;
3701                         break;
3702                 default:
3703                         break;
3704                 }
3705         }
3706         seq_printf(m, "  pending transactions: %d\n", count);
3707
3708         print_binder_stats(m, "  ", &proc->stats);
3709 }
3710
3711
3712 static int binder_state_show(struct seq_file *m, void *unused)
3713 {
3714         struct binder_proc *proc;
3715         struct binder_node *node;
3716         int do_lock = !binder_debug_no_lock;
3717
3718         if (do_lock)
3719                 binder_lock(__func__);
3720
3721         seq_puts(m, "binder state:\n");
3722
3723         if (!hlist_empty(&binder_dead_nodes))
3724                 seq_puts(m, "dead nodes:\n");
3725         hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
3726                 print_binder_node(m, node);
3727
3728         hlist_for_each_entry(proc, &binder_procs, proc_node)
3729                 print_binder_proc(m, proc, 1);
3730         if (do_lock)
3731                 binder_unlock(__func__);
3732         return 0;
3733 }
3734
3735 static int binder_stats_show(struct seq_file *m, void *unused)
3736 {
3737         struct binder_proc *proc;
3738         int do_lock = !binder_debug_no_lock;
3739
3740         if (do_lock)
3741                 binder_lock(__func__);
3742
3743         seq_puts(m, "binder stats:\n");
3744
3745         print_binder_stats(m, "", &binder_stats);
3746
3747         hlist_for_each_entry(proc, &binder_procs, proc_node)
3748                 print_binder_proc_stats(m, proc);
3749         if (do_lock)
3750                 binder_unlock(__func__);
3751         return 0;
3752 }
3753
3754 static int binder_transactions_show(struct seq_file *m, void *unused)
3755 {
3756         struct binder_proc *proc;
3757         int do_lock = !binder_debug_no_lock;
3758
3759         if (do_lock)
3760                 binder_lock(__func__);
3761
3762         seq_puts(m, "binder transactions:\n");
3763         hlist_for_each_entry(proc, &binder_procs, proc_node)
3764                 print_binder_proc(m, proc, 0);
3765         if (do_lock)
3766                 binder_unlock(__func__);
3767         return 0;
3768 }
3769
3770 static int binder_proc_show(struct seq_file *m, void *unused)
3771 {
3772         struct binder_proc *itr;
3773         int pid = (unsigned long)m->private;
3774         int do_lock = !binder_debug_no_lock;
3775
3776         if (do_lock)
3777                 binder_lock(__func__);
3778
3779         hlist_for_each_entry(itr, &binder_procs, proc_node) {
3780                 if (itr->pid == pid) {
3781                         seq_puts(m, "binder proc state:\n");
3782                         print_binder_proc(m, itr, 1);
3783                 }
3784         }
3785         if (do_lock)
3786                 binder_unlock(__func__);
3787         return 0;
3788 }
3789
3790 static void print_binder_transaction_log_entry(struct seq_file *m,
3791                                         struct binder_transaction_log_entry *e)
3792 {
3793         seq_printf(m,
3794                    "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
3795                    e->debug_id, (e->call_type == 2) ? "reply" :
3796                    ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3797                    e->from_thread, e->to_proc, e->to_thread, e->context_name,
3798                    e->to_node, e->target_handle, e->data_size, e->offsets_size);
3799 }
3800
3801 static int binder_transaction_log_show(struct seq_file *m, void *unused)
3802 {
3803         struct binder_transaction_log *log = m->private;
3804         int i;
3805
3806         if (log->full) {
3807                 for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3808                         print_binder_transaction_log_entry(m, &log->entry[i]);
3809         }
3810         for (i = 0; i < log->next; i++)
3811                 print_binder_transaction_log_entry(m, &log->entry[i]);
3812         return 0;
3813 }
3814
3815 static const struct file_operations binder_fops = {
3816         .owner = THIS_MODULE,
3817         .poll = binder_poll,
3818         .unlocked_ioctl = binder_ioctl,
3819         .compat_ioctl = binder_ioctl,
3820         .mmap = binder_mmap,
3821         .open = binder_open,
3822         .flush = binder_flush,
3823         .release = binder_release,
3824 };
3825
3826 BINDER_DEBUG_ENTRY(state);
3827 BINDER_DEBUG_ENTRY(stats);
3828 BINDER_DEBUG_ENTRY(transactions);
3829 BINDER_DEBUG_ENTRY(transaction_log);
3830
3831 static int __init init_binder_device(const char *name)
3832 {
3833         int ret;
3834         struct binder_device *binder_device;
3835
3836         binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
3837         if (!binder_device)
3838                 return -ENOMEM;
3839
3840         binder_device->miscdev.fops = &binder_fops;
3841         binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
3842         binder_device->miscdev.name = name;
3843
3844         binder_device->context.binder_context_mgr_uid = INVALID_UID;
3845         binder_device->context.name = name;
3846
3847         ret = misc_register(&binder_device->miscdev);
3848         if (ret < 0) {
3849                 kfree(binder_device);
3850                 return ret;
3851         }
3852
3853         hlist_add_head(&binder_device->hlist, &binder_devices);
3854
3855         return ret;
3856 }
3857
3858 static int __init binder_init(void)
3859 {
3860         int ret;
3861         char *device_name, *device_names;
3862         struct binder_device *device;
3863         struct hlist_node *tmp;
3864
3865         binder_deferred_workqueue = create_singlethread_workqueue("binder");
3866         if (!binder_deferred_workqueue)
3867                 return -ENOMEM;
3868
3869         binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3870         if (binder_debugfs_dir_entry_root)
3871                 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3872                                                  binder_debugfs_dir_entry_root);
3873
3874         if (binder_debugfs_dir_entry_root) {
3875                 debugfs_create_file("state",
3876                                     S_IRUGO,
3877                                     binder_debugfs_dir_entry_root,
3878                                     NULL,
3879                                     &binder_state_fops);
3880                 debugfs_create_file("stats",
3881                                     S_IRUGO,
3882                                     binder_debugfs_dir_entry_root,
3883                                     NULL,
3884                                     &binder_stats_fops);
3885                 debugfs_create_file("transactions",
3886                                     S_IRUGO,
3887                                     binder_debugfs_dir_entry_root,
3888                                     NULL,
3889                                     &binder_transactions_fops);
3890                 debugfs_create_file("transaction_log",
3891                                     S_IRUGO,
3892                                     binder_debugfs_dir_entry_root,
3893                                     &binder_transaction_log,
3894                                     &binder_transaction_log_fops);
3895                 debugfs_create_file("failed_transaction_log",
3896                                     S_IRUGO,
3897                                     binder_debugfs_dir_entry_root,
3898                                     &binder_transaction_log_failed,
3899                                     &binder_transaction_log_fops);
3900         }
3901
3902         /*
3903          * Copy the module_parameter string, because we don't want to
3904          * tokenize it in-place.
3905          */
3906         device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
3907         if (!device_names) {
3908                 ret = -ENOMEM;
3909                 goto err_alloc_device_names_failed;
3910         }
3911         strcpy(device_names, binder_devices_param);
3912
3913         while ((device_name = strsep(&device_names, ","))) {
3914                 ret = init_binder_device(device_name);
3915                 if (ret)
3916                         goto err_init_binder_device_failed;
3917         }
3918
3919         return ret;
3920
3921 err_init_binder_device_failed:
3922         hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
3923                 misc_deregister(&device->miscdev);
3924                 hlist_del(&device->hlist);
3925                 kfree(device);
3926         }
3927 err_alloc_device_names_failed:
3928         debugfs_remove_recursive(binder_debugfs_dir_entry_root);
3929
3930         destroy_workqueue(binder_deferred_workqueue);
3931
3932         return ret;
3933 }
3934
3935 device_initcall(binder_init);
3936
3937 #define CREATE_TRACE_POINTS
3938 #include "binder_trace.h"
3939
3940 MODULE_LICENSE("GPL v2");