1 /* arch/arm/mach-rk29/vpu_mem.c
\r
3 * Copyright (C) 2010 ROCKCHIP, Inc.
\r
4 * author: chenhengming chm@rock-chips.com
\r
6 * This software is licensed under the terms of the GNU General Public
\r
7 * License version 2, as published by the Free Software Foundation, and
\r
8 * may be copied, distributed, and modified under those terms.
\r
10 * This program is distributed in the hope that it will be useful,
\r
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
\r
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
\r
13 * GNU General Public License for more details.
\r
17 #include <linux/miscdevice.h>
\r
18 #include <linux/platform_device.h>
\r
19 #include <linux/fs.h>
\r
20 #include <linux/file.h>
\r
21 #include <linux/mm.h>
\r
22 #include <linux/list.h>
\r
23 #include <linux/debugfs.h>
\r
24 #include <linux/mempolicy.h>
\r
25 #include <linux/sched.h>
\r
27 #include <asm/uaccess.h>
\r
28 #include <asm/cacheflush.h>
\r
30 #include <mach/vpu_mem.h>
\r
32 #define VPU_MEM_MIN_ALLOC PAGE_SIZE
\r
33 #define VPU_MEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
\r
35 #define VPU_MEM_DEBUG 0
\r
36 #define VPU_MEM_DEBUG_MSGS 0
\r
38 #if VPU_MEM_DEBUG_MSGS
\r
39 #define DLOG(fmt,args...) \
\r
40 do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
\r
44 #define DLOG(x...) do {} while (0)
\r
48 * struct for process session which connect to vpu_mem
\r
50 * @author ChenHengming (2011-4-11)
\r
52 typedef struct vpu_mem_session {
\r
53 /* a list of memory region used posted by current process */
\r
54 struct list_head list_used;
\r
55 struct list_head list_post;
\r
56 /* a linked list of data so we can access them for debugging */
\r
57 struct list_head list_session;
\r
58 /* process id of teh mapping process */
\r
63 * global region info
\r
65 typedef struct vpu_mem_region_info {
\r
66 struct list_head index_list; /* link to index list use for search */
\r
74 * struct for region information
\r
75 * this struct should be modified with bitmap lock
\r
77 typedef struct vpu_mem_link_info {
\r
78 struct list_head session_link; /* link to vpu_mem_session list */
\r
79 struct list_head status_link; /* link to vdm_info.status list use for search */
\r
88 * struct for global vpu memory info
\r
90 typedef struct vpu_mem_info {
\r
91 struct miscdevice dev;
\r
92 /* physical start address of the remaped vpu_mem space */
\r
94 /* vitual start address of the remaped vpu_mem space */
\r
95 unsigned char __iomem *vbase;
\r
96 /* total size of the vpu_mem space */
\r
98 /* number of entries in the vpu_mem space */
\r
99 unsigned long num_entries;
\r
100 /* indicates maps of this region should be cached, if a mix of
\r
101 * cached and uncached is desired, set this and open the device with
\r
102 * O_SYNC to get an uncached region */
\r
106 * vdm_session init only store the free region but use a vdm_session for convenience
\r
108 vdm_session status;
\r
109 struct list_head list_index; /* sort by index */
\r
110 struct list_head list_free; /* free region list */
\r
111 struct list_head list_session; /* session list */
\r
112 struct rw_semaphore rw_sem;
\r
115 static vdm_info vpu_mem;
\r
116 static int vpu_mem_count;
\r
117 static int vpu_mem_over = 0;
\r
119 #define vdm_used (vpu_mem.status.list_used)
\r
120 #define vdm_post (vpu_mem.status.list_post)
\r
121 #define vdm_index (vpu_mem.list_index)
\r
122 #define vdm_free (vpu_mem.list_free)
\r
123 #define vdm_proc (vpu_mem.list_session)
\r
124 #define vdm_rwsem (vpu_mem.rw_sem)
\r
125 #define is_free_region(x) ((0 == (x)->used) && (0 == (x)->post))
\r
128 * vpu memory info dump:
\r
129 * first dump global info, then dump each session info
\r
131 * @author ChenHengming (2011-4-20)
\r
133 static void dump_status(void)
\r
135 vdm_link *link, *tmp_link;
\r
136 vdm_region *region, *tmp_region;
\r
137 vdm_session *session, *tmp_session;
\r
139 printk("vpu mem status dump :\n\n");
\r
141 // °´ index ´òÓ¡È«²¿ region
\r
142 printk("region:\n");
\r
143 list_for_each_entry_safe(region, tmp_region, &vdm_index, index_list) {
\r
144 printk(" idx %6d pfn %6d used %3d post %3d\n",
\r
145 region->index, region->pfn, region->used, region->post);
\r
147 printk("free :\n");
\r
148 list_for_each_entry_safe(link, tmp_link, &vdm_free, status_link) {
\r
149 printk(" idx %6d pfn %6d used %3d post %3d\n",
\r
150 link->index, link->pfn, link->link_used, link->link_post);
\r
152 printk("used :\n");
\r
153 list_for_each_entry_safe(link, tmp_link, &vdm_used, status_link) {
\r
154 printk(" idx %6d pfn %6d used %3d post %3d\n",
\r
155 link->index, link->pfn, link->link_used, link->link_post);
\r
157 printk("post :\n");
\r
158 list_for_each_entry_safe(link, tmp_link, &vdm_post, status_link) {
\r
159 printk(" idx %6d pfn %6d used %3d post %3d\n",
\r
160 link->index, link->pfn, link->link_used, link->link_post);
\r
163 // ´òÓ¡ vpu_mem_info ÖеÄÈ«²¿ session µÄ region Õ¼ÓÃÇé¿ö
\r
164 list_for_each_entry_safe(session, tmp_session, &vdm_proc, list_session) {
\r
165 printk("pid: %d\n", session->pid);
\r
167 list_for_each_entry_safe(link, tmp_link, &session->list_used, session_link) {
\r
168 printk("used: idx %6d pfn %6d used %3d\n",
\r
169 link->index, link->pfn, link->link_used);
\r
171 list_for_each_entry_safe(link, tmp_link, &session->list_post, session_link) {
\r
172 printk("post: idx %6d pfn %6d post %3d\n",
\r
173 link->index, link->pfn, link->link_post);
\r
179 * find used link in a session
\r
181 * @author ChenHengming (2011-4-18)
\r
186 * @return vdm_link*
\r
188 static vdm_link *find_used_link(vdm_session *session, int index)
\r
192 list_for_each_entry_safe(pos, n, &session->list_used, session_link) {
\r
193 if (index == pos->index) {
\r
194 DLOG("found index %d ptr %x\n", index, pos);
\r
203 * find post link from vpu_mem's vdm_post list
\r
205 * @author ChenHengming (2011-4-18)
\r
209 * @return vdm_link*
\r
211 static vdm_link *find_post_link(int index)
\r
215 list_for_each_entry_safe(pos, n, &vdm_post, status_link) {
\r
216 if (index == pos->index) {
\r
225 * find free link from vpu_mem's vdm_free list
\r
227 * @author Administrator (2011-4-19)
\r
231 * @return vdm_link*
\r
233 static vdm_link *find_free_link(int index)
\r
237 list_for_each_entry_safe(pos, n, &vdm_free, status_link) {
\r
238 if (index == pos->index) {
\r
246 * insert a region into the index list for search
\r
248 * @author ChenHengming (2011-4-18)
\r
254 static int _insert_region_index(vdm_region *region)
\r
256 int index = region->index;
\r
259 vdm_region *tmp, *n;
\r
261 if (list_empty(&vdm_index)) {
\r
262 DLOG("index list is empty, insert first region\n");
\r
263 list_add_tail(®ion->index_list, &vdm_index);
\r
267 list_for_each_entry_safe(tmp, n, &vdm_index, index_list) {
\r
269 DLOG("insert index %d pfn %d last %d next %d ptr %x\n", index, region->pfn, last, next, tmp);
\r
270 if ((last < index) && (index < next)) {
\r
272 list_add_tail(®ion->index_list, &tmp->index_list);
\r
278 printk(KERN_ERR "_insert_region_by_index %d fail!\n", index);
\r
284 * insert a link into vdm_free list, indexed by vdm_link->index
\r
286 * @author ChenHengming (2011-4-20)
\r
290 static void _insert_link_status_free(vdm_link *link)
\r
292 int index = link->index;
\r
297 if (list_empty(&vdm_free)) {
\r
298 DLOG("free list is empty, list_add_tail first region\n");
\r
299 list_add_tail(&link->status_link, &vdm_free);
\r
303 list_for_each_entry_safe(tmp, n, &vdm_free, status_link) {
\r
305 if ((last < index) && (index < next)) {
\r
306 DLOG("list_add_tail index %d pfn %d last %d next %d ptr %x\n", index, link->pfn, last, next, tmp);
\r
307 list_add_tail(&link->status_link, &tmp->status_link);
\r
312 list_add_tail(&link->status_link, &tmp->status_link);
\r
313 DLOG("list_add index %d pfn %d last %d ptr %x\n", index, link->pfn, last, tmp);
\r
317 static void _insert_link_status_post(vdm_link *link)
\r
319 int index = link->index;
\r
324 if (list_empty(&vdm_post)) {
\r
325 DLOG("post list is empty, list_add_tail first region\n");
\r
326 list_add_tail(&link->status_link, &vdm_post);
\r
330 list_for_each_entry_safe(tmp, n, &vdm_post, status_link) {
\r
332 if ((last < index) && (index < next)) {
\r
333 DLOG("list_add_tail index %d pfn %d last %d next %d ptr %x\n", index, link->pfn, last, next, tmp);
\r
334 list_add_tail(&link->status_link, &tmp->status_link);
\r
340 list_add_tail(&link->status_link, &tmp->status_link);
\r
341 DLOG("list_add index %d pfn %d last %d ptr %x\n", index, link->pfn, last, tmp);
\r
345 static void _insert_link_status_used(vdm_link *link)
\r
347 int index = link->index;
\r
352 if (list_empty(&vdm_used)) {
\r
353 DLOG("used list is empty, list_add_tail first region\n");
\r
354 list_add_tail(&link->status_link, &vdm_used);
\r
358 list_for_each_entry_safe(tmp, n, &vdm_used, status_link) {
\r
360 if ((last < index) && (index < next)) {
\r
361 DLOG("list_add_tail index %d pfn %d last %d next %d ptr %x\n", index, link->pfn, last, next, tmp);
\r
362 list_add_tail(&link->status_link, &tmp->status_link);
\r
368 list_add_tail(&link->status_link, &tmp->status_link);
\r
369 DLOG("list_add index %d pfn %d last %d ptr %x\n", index, link->pfn, last, tmp);
\r
373 static void _insert_link_session_used(vdm_link *link, vdm_session *session)
\r
375 int index = link->index;
\r
380 if (list_empty(&session->list_used)) {
\r
381 DLOG("session used list is empty, list_add_tail first region\n");
\r
382 list_add_tail(&link->session_link, &session->list_used);
\r
386 list_for_each_entry_safe(tmp, n, &session->list_used, session_link) {
\r
388 if ((last < index) && (index < next)) {
\r
389 list_add_tail(&link->session_link, &tmp->session_link);
\r
390 DLOG("list_add_tail index %d pfn %d last %d next %d ptr %x\n", index, link->pfn, last, next, tmp);
\r
396 list_add_tail(&link->session_link, &tmp->session_link);
\r
397 DLOG("list_add index %d pfn %d last %d ptr %x\n", index, link->pfn, last, tmp);
\r
401 static void _insert_link_session_post(vdm_link *link, vdm_session *session)
\r
403 int index = link->index;
\r
408 if (list_empty(&session->list_post)) {
\r
409 DLOG("session post list is empty, list_add_tail first region\n");
\r
410 list_add_tail(&link->session_link, &session->list_post);
\r
414 list_for_each_entry_safe(tmp, n, &session->list_post, session_link) {
\r
416 if ((last < index) && (index < next)) {
\r
417 list_add_tail(&link->session_link, &tmp->session_link);
\r
418 DLOG("list_add_tail index %d pfn %d last %d next %d ptr %x\n", index, link->pfn, last, next, tmp);
\r
424 list_add_tail(&link->session_link, &tmp->session_link);
\r
425 DLOG("list_add index %d pfn %d last %d ptr %x\n", index, link->pfn, last, tmp);
\r
429 static void _remove_free_region(vdm_region *region)
\r
431 list_del_init(®ion->index_list);
\r
435 static void _remove_free_link(vdm_link *link)
\r
437 list_del_init(&link->session_link);
\r
438 list_del_init(&link->status_link);
\r
442 static void _merge_two_region(vdm_region *dst, vdm_region *src)
\r
444 vdm_link *dst_link = find_free_link(dst->index);
\r
445 vdm_link *src_link = find_free_link(src->index);
\r
446 dst->pfn += src->pfn;
\r
447 dst_link->pfn += src_link->pfn;
\r
448 _remove_free_link(src_link);
\r
449 _remove_free_region(src);
\r
452 static void merge_free_region_and_link(vdm_region *region)
\r
454 if (region->used || region->post) {
\r
455 printk(KERN_ALERT "try to merge unfree region!\n");
\r
458 vdm_region *neighbor;
\r
459 struct list_head *tmp = region->index_list.next;
\r
460 if (tmp != &vdm_index) {
\r
461 neighbor = (vdm_region *)list_entry(tmp, vdm_region, index_list);
\r
462 if (is_free_region(neighbor)) {
\r
463 DLOG("merge next\n");
\r
464 _merge_two_region(region, neighbor);
\r
467 tmp = region->index_list.prev;
\r
468 if (tmp != &vdm_index) {
\r
469 neighbor = (vdm_region *)list_entry(tmp, vdm_region, index_list);
\r
470 if (is_free_region(neighbor)) {
\r
471 DLOG("merge prev\n");
\r
472 _merge_two_region(neighbor, region);
\r
478 static void put_free_link(vdm_link *link)
\r
480 list_del_init(&link->session_link);
\r
481 list_del_init(&link->status_link);
\r
482 _insert_link_status_free(link);
\r
485 static void put_used_link(vdm_link *link, vdm_session *session)
\r
487 list_del_init(&link->session_link);
\r
488 list_del_init(&link->status_link);
\r
489 _insert_link_status_used(link);
\r
490 _insert_link_session_used(link, session);
\r
493 static void put_post_link(vdm_link *link, vdm_session *session)
\r
495 list_del_init(&link->session_link);
\r
496 list_del_init(&link->status_link);
\r
497 _insert_link_status_post(link);
\r
498 _insert_link_session_post(link, session);
\r
502 * Create a link and a region by index and pfn at a same time,
\r
503 * and connect the link with the region
\r
505 * @author ChenHengming (2011-4-20)
\r
510 * @return vdm_link*
\r
512 static vdm_link *new_link_by_index(int index, int pfn)
\r
514 vdm_region *region = (vdm_region *)kmalloc(sizeof(vdm_region), GFP_KERNEL);
\r
515 vdm_link *link = (vdm_link *)kmalloc(sizeof(vdm_link ), GFP_KERNEL);
\r
517 if ((NULL == region) || (NULL == link)) {
\r
518 printk(KERN_ALERT "can not kmalloc vdm_region and vdm_link in %s", __FUNCTION__);
\r
530 region->index = index;
\r
533 INIT_LIST_HEAD(®ion->index_list);
\r
535 link->link_post = 0;
\r
536 link->link_used = 0;
\r
537 link->region = region;
\r
538 link->index = region->index;
\r
539 link->pfn = region->pfn;
\r
540 INIT_LIST_HEAD(&link->session_link);
\r
541 INIT_LIST_HEAD(&link->status_link);
\r
547 * Create a link from a already exist region and connect to the
\r
550 * @author ChenHengming (2011-4-20)
\r
554 * @return vdm_link*
\r
556 static vdm_link *new_link_by_region(vdm_region *region)
\r
558 vdm_link *link = (vdm_link *)kmalloc(sizeof(vdm_link), GFP_KERNEL);
\r
559 if (NULL == link) {
\r
560 printk(KERN_ALERT "can not kmalloc vdm_region and vdm_link in %s", __FUNCTION__);
\r
564 link->link_post = 0;
\r
565 link->link_used = 0;
\r
566 link->region = region;
\r
567 link->index = region->index;
\r
568 link->pfn = region->pfn;
\r
569 INIT_LIST_HEAD(&link->session_link);
\r
570 INIT_LIST_HEAD(&link->status_link);
\r
576 * Delete a link completely
\r
578 * @author ChenHengming (2011-4-20)
\r
582 static void link_del(vdm_link *link)
\r
584 list_del_init(&link->session_link);
\r
585 list_del_init(&link->status_link);
\r
590 * Called by malloc, check whether a free link can by used for a
\r
591 * len of pfn, if can then put a used link to status link
\r
593 * @author ChenHengming (2011-4-20)
\r
599 * @return vdm_link*
\r
601 static vdm_link *get_used_link_from_free_link(vdm_link *link, vdm_session *session, int pfn)
\r
603 if (pfn > link->pfn) {
\r
606 if (pfn == link->pfn) {
\r
607 DLOG("pfn == link->pfn %d\n", pfn);
\r
608 link->link_used = 1;
\r
609 link->region->used = 1;
\r
610 put_used_link(link, session);
\r
613 vdm_link *used = new_link_by_index(link->index, pfn);
\r
617 link->index += pfn;
\r
619 link->region->index += pfn;
\r
620 link->region->pfn -= pfn;
\r
621 used->link_used = 1;
\r
622 used->region->used = 1;
\r
624 DLOG("used: index %d pfn %d ptr %x\n", used->index, used->pfn, used->region);
\r
625 if (_insert_region_index(used->region)) {
\r
626 printk(KERN_ALERT "fail to insert allocated region index %d pfn %d\n", used->index, used->pfn);
\r
628 link->index -= pfn;
\r
630 link->region->index -= pfn;
\r
631 link->region->pfn += pfn;
\r
632 _remove_free_region(used->region);
\r
633 _remove_free_link(used);
\r
636 put_used_link(used, session);
\r
641 static int vpu_mem_release(struct inode *, struct file *);
\r
642 static int vpu_mem_mmap(struct file *, struct vm_area_struct *);
\r
643 static int vpu_mem_open(struct inode *, struct file *);
\r
644 static long vpu_mem_ioctl(struct file *, unsigned int, unsigned long);
\r
646 struct file_operations vpu_mem_fops = {
\r
647 .open = vpu_mem_open,
\r
648 .mmap = vpu_mem_mmap,
\r
649 .unlocked_ioctl = vpu_mem_ioctl,
\r
650 .release = vpu_mem_release,
\r
653 int is_vpu_mem_file(struct file *file)
\r
655 if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
\r
657 if (unlikely(file->f_dentry->d_inode->i_rdev !=
\r
658 MKDEV(MISC_MAJOR, vpu_mem.dev.minor)))
\r
663 static long vpu_mem_allocate(struct file *file, unsigned int len)
\r
665 vdm_link *free, *n;
\r
666 unsigned int pfn = (len + VPU_MEM_MIN_ALLOC - 1)/VPU_MEM_MIN_ALLOC;
\r
667 vdm_session *session = (vdm_session *)file->private_data;
\r
669 if (!is_vpu_mem_file(file)) {
\r
670 printk(KERN_INFO "allocate vpu_mem session from invalid file\n");
\r
674 list_for_each_entry_safe(free, n, &vdm_free, status_link) {
\r
675 /* find match free buffer use it first */
\r
676 vdm_link *used = get_used_link_from_free_link(free, session, pfn);
\r
677 DLOG("search free buffer at index %d pfn %d for len %d\n", free->index, free->pfn, pfn);
\r
678 if (NULL == used) {
\r
681 DLOG("found buffer at index %d pfn %d for ptr %x\n", used->index, used->pfn, used);
\r
682 return used->index;
\r
686 if (!vpu_mem_over) {
\r
687 printk(KERN_INFO "vpu_mem: no space left to allocate!\n");
\r
694 static int vpu_mem_free(struct file *file, int index)
\r
696 vdm_session *session = (vdm_session *)file->private_data;
\r
698 if (!is_vpu_mem_file(file)) {
\r
699 printk(KERN_INFO "free vpu_mem session from invalid file.\n");
\r
703 DLOG("searching for index %d\n", index);
\r
705 vdm_link *link = find_used_link(session, index);
\r
706 if (NULL == link) {
\r
707 DLOG("no link of index %d searched\n", index);
\r
711 link->region->used--;
\r
712 if (0 == link->link_used) {
\r
713 if (is_free_region(link->region)) {
\r
714 put_free_link(link);
\r
715 merge_free_region_and_link(link->region);
\r
724 static int vpu_mem_duplicate(struct file *file, int index)
\r
726 vdm_session *session = (vdm_session *)file->private_data;
\r
727 /* caller should hold the write lock on vpu_mem_sem! */
\r
728 if (!is_vpu_mem_file(file)) {
\r
729 printk(KERN_INFO "duplicate vpu_mem session from invalid file.\n");
\r
733 DLOG("duplicate index %d\n", index);
\r
735 vdm_link *post = find_post_link(index);
\r
736 if (NULL == post) {
\r
737 vdm_link *used = find_used_link(session, index);
\r
738 if (NULL == used) {
\r
739 printk(KERN_ERR "try to duplicate unknown index %d\n", index);
\r
743 post = new_link_by_region(used->region);
\r
744 post->link_post = 1;
\r
745 post->region->post++;
\r
746 put_post_link(post, session);
\r
748 DLOG("duplicate posted index %d\n", index);
\r
750 post->region->post++;
\r
757 static int vpu_mem_link(struct file *file, int index)
\r
759 vdm_session *session = (vdm_session *)file->private_data;
\r
761 if (!is_vpu_mem_file(file)) {
\r
762 printk(KERN_INFO "link vpu_mem session from invalid file.\n");
\r
766 DLOG("link index %d\n", index);
\r
768 vdm_link *post = find_post_link(index);
\r
769 if (NULL == post) {
\r
770 printk(KERN_ERR "try to link unknown index %d\n", index);
\r
774 vdm_link *used = find_used_link(session, index);
\r
776 post->region->post--;
\r
777 if (0 == post->link_post) {
\r
778 if (NULL == used) {
\r
780 post->region->used++;
\r
781 put_used_link(post, session);
\r
784 used->region->used++;
\r
788 if (NULL == used) {
\r
789 used = new_link_by_region(post->region);
\r
791 used->region->used++;
\r
792 put_used_link(used, session);
\r
795 used->region->used++;
\r
804 void vpu_mem_cache_opt(struct file *file, long index, unsigned int cmd)
\r
806 vdm_session *session = (vdm_session *)file->private_data;
\r
809 if (!is_vpu_mem_file(file)) {
\r
813 if (!vpu_mem.cached || file->f_flags & O_SYNC)
\r
816 down_read(&vdm_rwsem);
\r
818 vdm_link *link = find_used_link(session, index);
\r
819 if (NULL == link) {
\r
820 pr_err("vpu_mem_cache_opt on non-exsist index %ld\n", index);
\r
823 start = vpu_mem.vbase + index * VPU_MEM_MIN_ALLOC;
\r
824 end = start + link->pfn * VPU_MEM_MIN_ALLOC;;
\r
826 case VPU_MEM_CACHE_FLUSH : {
\r
827 dmac_flush_range(start, end);
\r
830 case VPU_MEM_CACHE_CLEAN : {
\r
831 dmac_clean_range(start, end);
\r
834 case VPU_MEM_CACHE_INVALID : {
\r
835 dmac_inv_range(start, end);
\r
842 up_read(&vdm_rwsem);
\r
845 static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot)
\r
847 #ifdef pgprot_noncached
\r
848 if (vpu_mem.cached == 0 || file->f_flags & O_SYNC)
\r
849 return pgprot_noncached(vma_prot);
\r
851 #ifdef pgprot_ext_buffered
\r
852 else if (vpu_mem.buffered)
\r
853 return pgprot_ext_buffered(vma_prot);
\r
858 static int vpu_mem_map_pfn_range(struct vm_area_struct *vma, unsigned long len)
\r
860 DLOG("map len %lx\n", len);
\r
861 BUG_ON(!VPU_MEM_IS_PAGE_ALIGNED(vma->vm_start));
\r
862 BUG_ON(!VPU_MEM_IS_PAGE_ALIGNED(vma->vm_end));
\r
863 BUG_ON(!VPU_MEM_IS_PAGE_ALIGNED(len));
\r
864 if (io_remap_pfn_range(vma, vma->vm_start,
\r
865 vpu_mem.base >> PAGE_SHIFT,
\r
866 len, vma->vm_page_prot)) {
\r
872 static int vpu_mem_open(struct inode *inode, struct file *file)
\r
874 vdm_session *session;
\r
877 DLOG("current %u file %p(%d)\n", current->pid, file, (int)file_count(file));
\r
878 /* setup file->private_data to indicate its unmapped */
\r
879 /* you can only open a vpu_mem device one time */
\r
880 if (file->private_data != NULL)
\r
882 session = kmalloc(sizeof(vdm_session), GFP_KERNEL);
\r
884 printk(KERN_ALERT "vpu_mem: unable to allocate memory for vpu_mem metadata.");
\r
887 session->pid = current->pid;
\r
888 INIT_LIST_HEAD(&session->list_post);
\r
889 INIT_LIST_HEAD(&session->list_used);
\r
891 file->private_data = session;
\r
893 down_write(&vdm_rwsem);
\r
894 list_add_tail(&session->list_session, &vdm_proc);
\r
895 up_write(&vdm_rwsem);
\r
899 static int vpu_mem_mmap(struct file *file, struct vm_area_struct *vma)
\r
901 vdm_session *session;
\r
902 unsigned long vma_size = vma->vm_end - vma->vm_start;
\r
905 if (vma->vm_pgoff || !VPU_MEM_IS_PAGE_ALIGNED(vma_size)) {
\r
906 printk(KERN_ALERT "vpu_mem: mmaps must be at offset zero, aligned"
\r
907 " and a multiple of pages_size.\n");
\r
911 session = (vdm_session *)file->private_data;
\r
913 /* assert: vma_size must be the total size of the vpu_mem */
\r
914 if (vpu_mem.size != vma_size) {
\r
915 printk(KERN_WARNING "vpu_mem: mmap size [%lu] does not match"
\r
916 "size of backing region [%lu].\n", vma_size, vpu_mem.size);
\r
921 vma->vm_pgoff = vpu_mem.base >> PAGE_SHIFT;
\r
922 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot);
\r
924 if (vpu_mem_map_pfn_range(vma, vma_size)) {
\r
925 printk(KERN_INFO "vpu_mem: mmap failed in kernel!\n");
\r
930 session->pid = current->pid;
\r
936 static int vpu_mem_release(struct inode *inode, struct file *file)
\r
938 vdm_session *session = (vdm_session *)file->private_data;
\r
940 down_write(&vdm_rwsem);
\r
942 vdm_link *link, *tmp_link;
\r
943 //unsigned long flags = current->flags;
\r
944 //printk("current->flags: %lx\n", flags);
\r
945 list_del(&session->list_session);
\r
946 file->private_data = NULL;
\r
948 list_for_each_entry_safe(link, tmp_link, &session->list_post, session_link) {
\r
951 link->region->post--;
\r
952 } while (link->link_post);
\r
953 if (find_free_link(link->index)) {
\r
956 put_free_link(link);
\r
958 if (is_free_region(link->region)) {
\r
959 merge_free_region_and_link(link->region);
\r
962 list_for_each_entry_safe(link, tmp_link, &session->list_used, session_link) {
\r
965 link->region->used--;
\r
966 } while (link->link_used);
\r
967 if (find_free_link(link->index)) {
\r
970 put_free_link(link);
\r
972 if (is_free_region(link->region)) {
\r
973 merge_free_region_and_link(link->region);
\r
977 up_write(&vdm_rwsem);
\r
983 static long vpu_mem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
\r
985 long index, ret = 0;
\r
988 case VPU_MEM_GET_PHYS:
\r
989 DLOG("get_phys\n");
\r
990 printk(KERN_INFO "vpu_mem: request for physical address of vpu_mem region "
\r
991 "from process %d.\n", current->pid);
\r
992 if (copy_to_user((void __user *)arg, &vpu_mem.base, sizeof(vpu_mem.base)))
\r
995 case VPU_MEM_GET_TOTAL_SIZE:
\r
996 DLOG("get total size\n");
\r
997 if (copy_to_user((void __user *)arg, &vpu_mem.size, sizeof(vpu_mem.size)))
\r
1000 case VPU_MEM_ALLOCATE:
\r
1001 DLOG("allocate\n");
\r
1003 unsigned int size;
\r
1004 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
\r
1006 down_write(&vdm_rwsem);
\r
1007 ret = vpu_mem_allocate(file, size);
\r
1008 up_write(&vdm_rwsem);
\r
1009 DLOG("allocate at index %ld\n", ret);
\r
1012 case VPU_MEM_FREE:
\r
1013 DLOG("mem free\n");
\r
1015 if (copy_from_user(&index, (void __user *)arg, sizeof(index)))
\r
1017 if (index >= vpu_mem.size)
\r
1019 down_write(&vdm_rwsem);
\r
1020 ret = vpu_mem_free(file, index);
\r
1021 up_write(&vdm_rwsem);
\r
1024 case VPU_MEM_CACHE_FLUSH:
\r
1025 case VPU_MEM_CACHE_CLEAN:
\r
1026 case VPU_MEM_CACHE_INVALID:
\r
1029 if (copy_from_user(&index, (void __user *)arg, sizeof(index)))
\r
1033 vpu_mem_cache_opt(file, index, cmd);
\r
1036 case VPU_MEM_DUPLICATE:
\r
1037 DLOG("duplicate\n");
\r
1039 if (copy_from_user(&index, (void __user *)arg, sizeof(index)))
\r
1041 down_write(&vdm_rwsem);
\r
1042 ret = vpu_mem_duplicate(file, index);
\r
1043 up_write(&vdm_rwsem);
\r
1046 case VPU_MEM_LINK:
\r
1049 if (copy_from_user(&index, (void __user *)arg, sizeof(index)))
\r
1051 down_write(&vdm_rwsem);
\r
1052 ret = vpu_mem_link(file, index);
\r
1053 up_write(&vdm_rwsem);
\r
1063 static ssize_t debug_open(struct inode *inode, struct file *file)
\r
1065 file->private_data = inode->i_private;
\r
1069 static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
\r
1072 vdm_region *region, *tmp_region;
\r
1073 const int debug_bufmax = 4096;
\r
1074 static char buffer[4096];
\r
1077 DLOG("debug open\n");
\r
1078 n = scnprintf(buffer, debug_bufmax,
\r
1079 "pid #: mapped regions (offset, len, used, post) ...\n");
\r
1080 down_read(&vdm_rwsem);
\r
1081 list_for_each_entry_safe(region, tmp_region, &vdm_index, index_list) {
\r
1082 n += scnprintf(buffer + n, debug_bufmax - n,
\r
1084 region->index, region->pfn, region->used, region->post);
\r
1086 up_read(&vdm_rwsem);
\r
1089 return simple_read_from_buffer(buf, count, ppos, buffer, n);
\r
1092 static struct file_operations debug_fops = {
\r
1093 .read = debug_read,
\r
1094 .open = debug_open,
\r
1098 int vpu_mem_setup(struct vpu_mem_platform_data *pdata)
\r
1100 vdm_link *tmp = NULL;
\r
1103 if (vpu_mem_count) {
\r
1104 printk(KERN_ALERT "Only one vpu_mem driver can be register!\n");
\r
1105 goto err_cant_register_device;
\r
1108 memset(&vpu_mem, 0, sizeof(struct vpu_mem_info));
\r
1110 vpu_mem.cached = pdata->cached;
\r
1111 vpu_mem.buffered = pdata->buffered;
\r
1112 vpu_mem.base = pdata->start;
\r
1113 vpu_mem.size = pdata->size;
\r
1114 init_rwsem(&vdm_rwsem);
\r
1115 INIT_LIST_HEAD(&vdm_proc);
\r
1116 INIT_LIST_HEAD(&vdm_used);
\r
1117 INIT_LIST_HEAD(&vdm_post);
\r
1118 INIT_LIST_HEAD(&vdm_free);
\r
1119 INIT_LIST_HEAD(&vdm_index);
\r
1120 vpu_mem.dev.name = pdata->name;
\r
1121 vpu_mem.dev.minor = MISC_DYNAMIC_MINOR;
\r
1122 vpu_mem.dev.fops = &vpu_mem_fops;
\r
1124 err = misc_register(&vpu_mem.dev);
\r
1126 printk(KERN_ALERT "Unable to register vpu_mem driver!\n");
\r
1127 goto err_cant_register_device;
\r
1130 vpu_mem.num_entries = vpu_mem.size / VPU_MEM_MIN_ALLOC;
\r
1132 tmp = new_link_by_index(0, vpu_mem.num_entries);
\r
1133 if (NULL == tmp) {
\r
1134 printk(KERN_ALERT "init free region failed\n");
\r
1135 goto err_no_mem_for_metadata;
\r
1137 put_free_link(tmp);
\r
1138 _insert_region_index(tmp->region);
\r
1140 if (vpu_mem.cached)
\r
1141 vpu_mem.vbase = ioremap_cached(vpu_mem.base, vpu_mem.size);
\r
1142 #ifdef ioremap_ext_buffered
\r
1143 else if (vpu_mem.buffered)
\r
1144 vpu_mem.vbase = ioremap_ext_buffered(vpu_mem.base, vpu_mem.size);
\r
1147 vpu_mem.vbase = ioremap(vpu_mem.base, vpu_mem.size);
\r
1149 if (vpu_mem.vbase == 0)
\r
1150 goto error_cant_remap;
\r
1153 debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)vpu_mem.dev.minor,
\r
1156 printk("%s: %d initialized\n", pdata->name, vpu_mem.dev.minor);
\r
1163 err_no_mem_for_metadata:
\r
1164 misc_deregister(&vpu_mem.dev);
\r
1165 err_cant_register_device:
\r
1169 static int vpu_mem_probe(struct platform_device *pdev)
\r
1171 struct vpu_mem_platform_data *pdata;
\r
1173 if (!pdev || !pdev->dev.platform_data) {
\r
1174 printk(KERN_ALERT "Unable to probe vpu_mem!\n");
\r
1177 pdata = pdev->dev.platform_data;
\r
1178 return vpu_mem_setup(pdata);
\r
1181 static int vpu_mem_remove(struct platform_device *pdev)
\r
1183 if (!pdev || !pdev->dev.platform_data) {
\r
1184 printk(KERN_ALERT "Unable to remove vpu_mem!\n");
\r
1187 if (vpu_mem_count) {
\r
1188 misc_deregister(&vpu_mem.dev);
\r
1191 printk(KERN_ALERT "no vpu_mem to remove!\n");
\r
1196 static struct platform_driver vpu_mem_driver = {
\r
1197 .probe = vpu_mem_probe,
\r
1198 .remove = vpu_mem_remove,
\r
1199 .driver = { .name = "vpu_mem" }
\r
1203 static int __init vpu_mem_init(void)
\r
1205 return platform_driver_register(&vpu_mem_driver);
\r
1208 static void __exit vpu_mem_exit(void)
\r
1210 platform_driver_unregister(&vpu_mem_driver);
\r
1213 module_init(vpu_mem_init);
\r
1214 module_exit(vpu_mem_exit);
\r
1216 #ifdef CONFIG_PROC_FS
\r
1217 #include <linux/proc_fs.h>
\r
1218 #include <linux/seq_file.h>
\r
1220 static int proc_vpu_mem_show(struct seq_file *s, void *v)
\r
1222 if (vpu_mem_count) {
\r
1223 seq_printf(s, "vpu mem opened\n");
\r
1225 seq_printf(s, "vpu mem closed\n");
\r
1229 down_read(&vdm_rwsem);
\r
1231 vdm_link *link, *tmp_link;
\r
1232 vdm_region *region, *tmp_region;
\r
1233 vdm_session *session, *tmp_session;
\r
1234 // °´ index ´òÓ¡È«²¿ region
\r
1235 seq_printf(s, "index:\n");
\r
1236 list_for_each_entry_safe(region, tmp_region, &vdm_index, index_list) {
\r
1237 seq_printf(s, " idx %6d pfn %6d used %3d post %3d\n",
\r
1238 region->index, region->pfn, region->used, region->post);
\r
1240 if (list_empty(&vdm_free)) {
\r
1241 seq_printf(s, "free : empty\n");
\r
1243 seq_printf(s, "free :\n");
\r
1244 list_for_each_entry_safe(link, tmp_link, &vdm_free, status_link) {
\r
1245 seq_printf(s, " idx %6d pfn %6d used %3d post %3d\n",
\r
1246 link->index, link->pfn, link->link_used, link->link_post);
\r
1249 if (list_empty(&vdm_used)) {
\r
1250 seq_printf(s, "used : empty\n");
\r
1252 seq_printf(s, "used :\n");
\r
1253 list_for_each_entry_safe(link, tmp_link, &vdm_used, status_link) {
\r
1254 seq_printf(s, " idx %6d pfn %6d used %3d post %3d\n",
\r
1255 link->index, link->pfn, link->link_used, link->link_post);
\r
1258 if (list_empty(&vdm_post)) {
\r
1259 seq_printf(s, "post : empty\n");
\r
1261 seq_printf(s, "post :\n");
\r
1262 list_for_each_entry_safe(link, tmp_link, &vdm_post, status_link) {
\r
1263 seq_printf(s, " idx %6d pfn %6d used %3d post %3d\n",
\r
1264 link->index, link->pfn, link->link_used, link->link_post);
\r
1268 // ´òÓ¡ vpu_mem_info ÖеÄÈ«²¿ session µÄ region Õ¼ÓÃÇé¿ö
\r
1269 list_for_each_entry_safe(session, tmp_session, &vdm_proc, list_session) {
\r
1270 seq_printf(s, "\npid: %d\n", session->pid);
\r
1271 if (list_empty(&session->list_used)) {
\r
1272 seq_printf(s, "used : empty\n");
\r
1274 seq_printf(s, "used :\n");
\r
1275 list_for_each_entry_safe(link, tmp_link, &session->list_used, session_link) {
\r
1276 seq_printf(s, " idx %6d pfn %6d used %3d\n",
\r
1277 link->index, link->pfn, link->link_used);
\r
1280 if (list_empty(&session->list_post)) {
\r
1281 seq_printf(s, "post : empty\n");
\r
1283 seq_printf(s, "post :\n");
\r
1284 list_for_each_entry_safe(link, tmp_link, &session->list_post, session_link) {
\r
1285 seq_printf(s, " idx %6d pfn %6d post %3d\n",
\r
1286 link->index, link->pfn, link->link_post);
\r
1292 up_read(&vdm_rwsem);
\r
1296 static int proc_vpu_mem_open(struct inode *inode, struct file *file)
\r
1298 return single_open(file, proc_vpu_mem_show, NULL);
\r
1301 static const struct file_operations proc_vpu_mem_fops = {
\r
1302 .open = proc_vpu_mem_open,
\r
1304 .llseek = seq_lseek,
\r
1305 .release = single_release,
\r
1308 static int __init vpu_mem_proc_init(void)
\r
1310 proc_create("vpu_mem", 0, NULL, &proc_vpu_mem_fops);
\r
1314 late_initcall(vpu_mem_proc_init);
\r
1315 #endif /* CONFIG_PROC_FS */
\r