Merge tag 'lsk-v3.10-android-14.07' into develop-3.10
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / rockchip-iommu.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License version 2 as
4  * published by the Free Software Foundation.
5  */
6
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
8 #define DEBUG
9 #endif
10
11 #include <linux/io.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/err.h>
16 #include <linux/mm.h>
17 #include <linux/errno.h>
18 #include <linux/memblock.h>
19 #include <linux/export.h>
20 #include <linux/module.h>
21
22 #include <asm/cacheflush.h>
23 #include <asm/pgtable.h>
24 #include <linux/of.h>
25 #include <linux/rockchip/sysmmu.h>
26
27 #include "rockchip-iommu.h"
28
29 /* We does not consider super section mapping (16MB) */
30 #define SPAGE_ORDER 12
31 #define SPAGE_SIZE (1 << SPAGE_ORDER)
32 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
33 typedef enum sysmmu_entry_flags 
34 {
35         SYSMMU_FLAGS_PRESENT = 0x01,
36         SYSMMU_FLAGS_READ_PERMISSION = 0x02,
37         SYSMMU_FLAGS_WRITE_PERMISSION = 0x04,
38         SYSMMU_FLAGS_OVERRIDE_CACHE  = 0x8,
39         SYSMMU_FLAGS_WRITE_CACHEABLE  = 0x10,
40         SYSMMU_FLAGS_WRITE_ALLOCATE  = 0x20,
41         SYSMMU_FLAGS_WRITE_BUFFERABLE  = 0x40,
42         SYSMMU_FLAGS_READ_CACHEABLE  = 0x80,
43         SYSMMU_FLAGS_READ_ALLOCATE  = 0x100,
44         SYSMMU_FLAGS_MASK = 0x1FF,
45 } sysmmu_entry_flags;
46
47 #define lv1ent_fault(sent) ((*(sent) & SYSMMU_FLAGS_PRESENT) == 0)
48 #define lv1ent_page(sent) ((*(sent) & SYSMMU_FLAGS_PRESENT) == 1)
49 #define lv2ent_fault(pent) ((*(pent) & SYSMMU_FLAGS_PRESENT) == 0)
50 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
51 #define spage_offs(iova) ((iova) & 0x0FFF)
52
53 #define lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
54 #define lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
55
56 #define NUM_LV1ENTRIES 1024
57 #define NUM_LV2ENTRIES 1024
58
59 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
60
61 #define lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
62
63 #define mk_lv1ent_page(pa) ((pa) | SYSMMU_FLAGS_PRESENT)
64 /*write and read permission for level2 page default*/
65 #define mk_lv2ent_spage(pa) ((pa) | SYSMMU_FLAGS_PRESENT |SYSMMU_FLAGS_READ_PERMISSION |SYSMMU_FLAGS_WRITE_PERMISSION)
66
67 #define SYSMMU_REG_POLL_COUNT_FAST 1000
68
69 /**
70  * MMU register numbers
71  * Used in the register read/write routines.
72  * See the hardware documentation for more information about each register
73  */
74 typedef enum sysmmu_register 
75 {
76         SYSMMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
77         SYSMMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
78         SYSMMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
79         SYSMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
80         SYSMMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
81         SYSMMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
82         SYSMMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
83         SYSMMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
84         SYSMMU_REGISTER_INT_STATUS = 0x0020, /**< Interrupt status based on the mask */
85         SYSMMU_REGISTER_AUTO_GATING     = 0x0024
86 } sysmmu_register;
87
88 typedef enum sysmmu_command 
89 {
90         SYSMMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
91         SYSMMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
92         SYSMMU_COMMAND_ENABLE_STALL = 0x02, /**<  Enable stall on page fault */
93         SYSMMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
94         SYSMMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
95         SYSMMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
96         SYSMMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
97 } sysmmu_command;
98
99 /**
100  * MMU interrupt register bits
101  * Each cause of the interrupt is reported
102  * through the (raw) interrupt status registers.
103  * Multiple interrupts can be pending, so multiple bits
104  * can be set at once.
105  */
106 typedef enum sysmmu_interrupt 
107 {
108         SYSMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
109         SYSMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
110 } sysmmu_interrupt;
111
112 typedef enum sysmmu_status_bits 
113 {
114         SYSMMU_STATUS_BIT_PAGING_ENABLED      = 1 << 0,
115         SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE   = 1 << 1,
116         SYSMMU_STATUS_BIT_STALL_ACTIVE        = 1 << 2,
117         SYSMMU_STATUS_BIT_IDLE                = 1 << 3,
118         SYSMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
119         SYSMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
120         SYSMMU_STATUS_BIT_STALL_NOT_ACTIVE    = 1 << 31,
121 } sys_mmu_status_bits;
122
123 /**
124  * Size of an MMU page in bytes
125  */
126 #define SYSMMU_PAGE_SIZE 0x1000
127
128 /*
129  * Size of the address space referenced by a page table page
130  */
131 #define SYSMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
132
133 /**
134  * Page directory index from address
135  * Calculates the page directory index from the given address
136  */
137 #define SYSMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
138
139 /**
140  * Page table index from address
141  * Calculates the page table index from the given address
142  */
143 #define SYSMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
144
145 /**
146  * Extract the memory address from an PDE/PTE entry
147  */
148 #define SYSMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
149
150 #define INVALID_PAGE ((u32)(~0))
151
152 static struct kmem_cache *lv2table_kmem_cache;
153 static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
154 {
155         return pgtable + lv1ent_offset(iova);
156 }
157
158 static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
159 {
160         return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
161 }
162
163 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
164         "PAGE FAULT",
165         "BUS ERROR",
166         "UNKNOWN FAULT"
167 };
168
169 struct rk_iommu_domain {
170         struct list_head clients; /* list of sysmmu_drvdata.node */
171         unsigned long *pgtable; /* lv1 page table, 4KB */
172         short *lv2entcnt; /* free lv2 entry counter for each section */
173         spinlock_t lock; /* lock for this structure */
174         spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
175 };
176
177 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
178 {
179         /* return true if the System MMU was not active previously
180            and it needs to be initialized */
181         return ++data->activations == 1;
182 }
183
184 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
185 {
186         /* return true if the System MMU is needed to be disabled */
187         BUG_ON(data->activations < 1);
188         return --data->activations == 0;
189 }
190
191 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
192 {
193         return data->activations > 0;
194 }
195 static void sysmmu_disable_stall(void __iomem *sfrbase)
196 {
197         int i;
198         u32 mmu_status = __raw_readl(sfrbase+SYSMMU_REGISTER_STATUS);
199         if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED )) 
200         {
201                 //pr_err("MMU disable skipped since it was not enabled.\n");
202                 return;
203         }
204         if (mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) 
205         {
206                 pr_err("Aborting MMU disable stall request since it is in pagefault state.\n");
207                 return;
208         }
209         
210         __raw_writel(SYSMMU_COMMAND_DISABLE_STALL, sfrbase + SYSMMU_REGISTER_COMMAND);
211         
212         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
213         {
214                 u32 status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
215                 if ( 0 == (status & SYSMMU_STATUS_BIT_STALL_ACTIVE) ) 
216                 {
217                         break;
218                 }
219                 if ( status &  SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
220                 {
221                         break;
222                 }
223                 if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED )) 
224                 {
225                         break;
226                 }
227         }
228         if (SYSMMU_REG_POLL_COUNT_FAST == i) 
229                 pr_err("Disable stall request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
230 }
231 static bool sysmmu_enable_stall(void __iomem *sfrbase)
232 {
233         int i;
234         u32 mmu_status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
235
236         if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED) ) 
237         {
238                 //pr_info("MMU stall is implicit when Paging is not enabled.\n");
239                 return true;
240         }
241         if ( mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
242         {
243                 pr_err("Aborting MMU stall request since it is in pagefault state.\n");
244                 return false;
245         }
246         
247         __raw_writel(SYSMMU_COMMAND_ENABLE_STALL, sfrbase + SYSMMU_REGISTER_COMMAND);
248
249         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
250         {
251                 mmu_status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
252                 if (mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) 
253                 {
254                         break;
255                 }
256                 if ((mmu_status & SYSMMU_STATUS_BIT_STALL_ACTIVE)&&(0==(mmu_status & SYSMMU_STATUS_BIT_STALL_NOT_ACTIVE))) 
257                 {
258                         break;
259                 }
260                 if (0 == (mmu_status & ( SYSMMU_STATUS_BIT_PAGING_ENABLED ))) 
261                 {
262                         break;
263                 }
264         }
265         if (SYSMMU_REG_POLL_COUNT_FAST == i) 
266         {
267                 pr_info("Enable stall request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
268                 return false;
269         }
270         if ( mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
271         {
272                 pr_info("Aborting MMU stall request since it has a pagefault.\n");
273                 return false;
274         }
275         return true;
276 }
277
278 static bool sysmmu_enable_paging(void __iomem *sfrbase)
279 {
280         int i;
281         __raw_writel(SYSMMU_COMMAND_ENABLE_PAGING, sfrbase + SYSMMU_REGISTER_COMMAND);
282
283         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
284         {
285                 if (__raw_readl(sfrbase + SYSMMU_REGISTER_STATUS) & SYSMMU_STATUS_BIT_PAGING_ENABLED) 
286                 {
287                         //pr_info("Enable paging request success.\n");
288                         break;
289                 }
290         }
291         if (SYSMMU_REG_POLL_COUNT_FAST == i)
292         {
293                 pr_err("Enable paging request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
294                 return false;
295         }
296         return true;
297 }
298 static bool sysmmu_disable_paging(void __iomem *sfrbase)
299 {
300         int i;
301         __raw_writel(SYSMMU_COMMAND_DISABLE_PAGING, sfrbase + SYSMMU_REGISTER_COMMAND);
302
303         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
304         {
305                 if (!(__raw_readl(sfrbase + SYSMMU_REGISTER_STATUS) & SYSMMU_STATUS_BIT_PAGING_ENABLED)) 
306                 {
307                         //pr_info("Disable paging request success.\n");
308                         break;
309                 }
310         }
311         if (SYSMMU_REG_POLL_COUNT_FAST == i)
312         {
313                 pr_err("Disable paging request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
314                 return false;
315         }
316         return true;
317 }
318
319 void sysmmu_page_fault_done(void __iomem *sfrbase,const char *dbgname)
320 {
321         pr_info("MMU: %s: Leaving page fault mode\n", dbgname);
322         __raw_writel(SYSMMU_COMMAND_PAGE_FAULT_DONE, sfrbase + SYSMMU_REGISTER_COMMAND);
323 }
324 bool sysmmu_zap_tlb(void __iomem *sfrbase)
325 {
326         bool stall_success = sysmmu_enable_stall(sfrbase);
327         
328         __raw_writel(SYSMMU_COMMAND_ZAP_CACHE, sfrbase + SYSMMU_REGISTER_COMMAND);
329         if (false == stall_success) 
330         {
331                 /* False means that it is in Pagefault state. Not possible to disable_stall then */
332                 return false;
333         }
334         sysmmu_disable_stall(sfrbase);
335         return true;
336 }
337 static inline bool sysmmu_raw_reset(void __iomem *sfrbase)
338 {
339         int i;
340         __raw_writel(0xCAFEBABE, sfrbase + SYSMMU_REGISTER_DTE_ADDR);
341
342         if(!(0xCAFEB000 == __raw_readl(sfrbase+SYSMMU_REGISTER_DTE_ADDR)))
343         {
344                 pr_err("error when %s.\n",__func__);
345                 return false;
346         }
347         __raw_writel(SYSMMU_COMMAND_HARD_RESET, sfrbase + SYSMMU_REGISTER_COMMAND);
348
349         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
350         {
351                 if(__raw_readl(sfrbase + SYSMMU_REGISTER_DTE_ADDR) == 0)
352                 {
353                         break;
354                 }
355         }
356         if (SYSMMU_REG_POLL_COUNT_FAST == i) {
357                 pr_err("%s,Reset request failed, MMU status is 0x%08X\n", __func__,__raw_readl(sfrbase + SYSMMU_REGISTER_DTE_ADDR));
358                 return false;
359         }
360         return true;
361 }
362
363 static void __sysmmu_set_ptbase(void __iomem *sfrbase,unsigned long pgd)
364 {
365         __raw_writel(pgd, sfrbase + SYSMMU_REGISTER_DTE_ADDR);
366
367 }
368
369 static bool sysmmu_reset(void __iomem *sfrbase,const char *dbgname)
370 {
371         bool err = true;
372         
373         err = sysmmu_enable_stall(sfrbase);
374         if(!err)
375         {
376                 pr_info("%s:stall failed: %s\n",__func__,dbgname);
377                 return err;
378         }
379         err = sysmmu_raw_reset(sfrbase);
380         if(err)
381         {
382                 __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, sfrbase+SYSMMU_REGISTER_INT_MASK);
383         }
384         sysmmu_disable_stall(sfrbase);
385         if(!err)
386                 pr_info("%s: failed: %s\n", __func__,dbgname);
387         return err;
388 }
389
390 static inline void pgtable_flush(void *vastart, void *vaend)
391 {
392         dmac_flush_range(vastart, vaend);
393         outer_flush_range(virt_to_phys(vastart),virt_to_phys(vaend));
394 }
395 static void __set_fault_handler(struct sysmmu_drvdata *data,
396                                         sysmmu_fault_handler_t handler)
397 {
398         unsigned long flags;
399
400         write_lock_irqsave(&data->lock, flags);
401         data->fault_handler = handler;
402         write_unlock_irqrestore(&data->lock, flags);
403 }
404
405 void rockchip_sysmmu_set_fault_handler(struct device *dev,sysmmu_fault_handler_t handler)
406 {
407         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
408
409         __set_fault_handler(data, handler);
410 }
411
412 static int default_fault_handler(struct device *dev,
413                                         enum rk_sysmmu_inttype itype,
414                                         unsigned long pgtable_base,
415                                         unsigned long fault_addr,
416                                         unsigned int status
417                                         )
418 {
419         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
420
421         if(!data)
422         {
423                 pr_info("%s,iommu device not assigned yet\n",__func__);
424                 return 0;
425         }
426         if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
427                 itype = SYSMMU_FAULT_UNKNOWN;
428
429         if(itype == SYSMMU_BUSERROR)
430                 pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",sysmmu_fault_name[itype], fault_addr, pgtable_base);
431
432         if(itype == SYSMMU_PAGEFAULT)
433                 pr_err("SYSMMU:Page fault detected at 0x%lx from bus id %d of type %s on %s\n",
434                                 fault_addr,
435                                 (status >> 6) & 0x1F,
436                                 (status & 32) ? "write" : "read",
437                                 data->dbgname
438                                 );
439
440         pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
441
442         BUG();
443
444         return 0;
445 }
446 static void dump_pagetbl(u32 fault_address,u32 addr_dte)
447 {
448 #if 0
449         u32  offset1;
450         u32  offset2;
451         u32 *level2_base;
452         u32 *level1_entry;
453         u32 *level2_entry;
454 #endif
455         #if 1
456         u32 lv1_offset;
457         u32 lv2_offset;
458         
459         u32 *lv1_entry_pa;
460         u32 *lv1_entry_va;
461         u32 *lv1_entry_value;
462         
463         u32 *lv2_base;
464         u32 *lv2_entry_pa;
465         u32 *lv2_entry_va;
466         u32 *lv2_entry_value;
467
468         
469         lv1_offset = lv1ent_offset(fault_address);
470         lv2_offset = lv2ent_offset(fault_address);
471         
472         lv1_entry_pa = (u32 *)addr_dte + lv1_offset;
473         lv1_entry_va = (u32 *)(__va(addr_dte)) + lv1_offset;
474         lv1_entry_value = (u32 *)(*lv1_entry_va);
475         
476         lv2_base = (u32 *)((*lv1_entry_va) & 0xfffffffe);
477         lv2_entry_pa = (u32 * )lv2_base + lv2_offset;
478         lv2_entry_va = (u32 * )(__va(lv2_base)) + lv2_offset;
479         lv2_entry_value = (u32 *)(*lv2_entry_va);
480         
481         pr_info("fault address = 0x%08x,dte addr pa = 0x%08x,va = 0x%08x\n",fault_address,addr_dte,(u32)__va(addr_dte));
482         pr_info("lv1_offset = 0x%x,lv1_entry_pa = 0x%08x,lv1_entry_va = 0x%08x\n",lv1_offset,(u32)lv1_entry_pa,(u32)lv1_entry_va);
483         pr_info("lv1_entry_value(*lv1_entry_va) = 0x%08x,lv2_base = 0x%08x\n",(u32)lv1_entry_value,(u32)lv2_base);
484         pr_info("lv2_offset = 0x%x,lv2_entry_pa = 0x%08x,lv2_entry_va = 0x%08x\n",lv2_offset,(u32)lv2_entry_pa,(u32)lv2_entry_va);
485         pr_info("lv2_entry value(*lv2_entry_va) = 0x%08x\n",(u32)lv2_entry_value);
486         
487         #endif
488 #if 0
489         offset1 = lv1ent_offset(fault_address);
490         offset2 = lv2ent_offset(fault_address);
491         level1_entry = (u32 *)__va(addr_dte)+offset1;
492         level2_base = (u32 *)__va((*level1_entry)&0xfffffffe);
493         level2_entry = level2_base+offset2;
494         pr_info("level1 offset=%d,level2 offset=%d,level1_entry=0x%08x\n",offset1,offset2,(u32)level1_entry);
495         pr_info("*level1_entry = 0x%08x\n",*level1_entry);
496         pr_info("*level2_entry = 0x%08x\n",*level2_entry);
497 #endif
498
499 }
500 static irqreturn_t rockchip_sysmmu_irq(int irq, void *dev_id)
501 {
502         /* SYSMMU is in blocked when interrupt occurred. */
503         struct sysmmu_drvdata *data = dev_id;
504         struct resource *irqres;
505         struct platform_device *pdev;
506         enum rk_sysmmu_inttype itype = SYSMMU_FAULT_UNKNOWN;
507         u32 status;
508         u32 rawstat;
509         u32 int_status;
510         u32 fault_address;
511         int i, ret = 0;
512
513         read_lock(&data->lock);
514         
515 #if 0
516         WARN_ON(!is_sysmmu_active(data));
517 #else
518         if(!is_sysmmu_active(data))
519         {
520                 read_unlock(&data->lock);
521                 return IRQ_HANDLED;
522         }
523 #endif  
524         pdev = to_platform_device(data->sysmmu);
525
526         for (i = 0; i < data->num_res_irq; i++) 
527         {
528                 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
529                 if (irqres && ((int)irqres->start == irq))
530                         break;
531         }
532
533         if (i == data->num_res_irq) 
534         {
535                 itype = SYSMMU_FAULT_UNKNOWN;
536         } 
537         else 
538         {
539                 int_status = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_INT_STATUS);
540                 if(int_status != 0)
541                 {
542                         /*mask status*/
543                         __raw_writel(0x00,data->res_bases[i] + SYSMMU_REGISTER_INT_MASK);
544                         
545                         rawstat = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_INT_RAWSTAT);
546
547                         if(rawstat & SYSMMU_INTERRUPT_PAGE_FAULT)
548                         {
549                                 fault_address = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_PAGE_FAULT_ADDR);
550                                 itype = SYSMMU_PAGEFAULT;
551                         }
552                         else if(rawstat & SYSMMU_INTERRUPT_READ_BUS_ERROR)
553                         {
554                                 itype = SYSMMU_BUSERROR;
555                         }
556                         else
557                         {
558                                 goto out;
559                         }
560                         dump_pagetbl(fault_address,__raw_readl(data->res_bases[i] + SYSMMU_REGISTER_DTE_ADDR));
561                 }
562                 else
563                         goto out;
564         }
565         
566         if (data->fault_handler) 
567         {
568                 unsigned long base = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_DTE_ADDR);
569                 status = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_STATUS);
570                 ret = data->fault_handler(data->dev, itype, base, fault_address,status);
571         }
572
573         if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
574         {
575                 if(SYSMMU_PAGEFAULT == itype)
576                 {
577                         sysmmu_zap_tlb(data->res_bases[i]);
578                         sysmmu_page_fault_done(data->res_bases[i],data->dbgname);
579                         __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, data->res_bases[i]+SYSMMU_REGISTER_INT_MASK);
580                 }
581         }
582         else
583                 pr_err("(%s) %s is not handled.\n",data->dbgname, sysmmu_fault_name[itype]);
584
585 out :
586         read_unlock(&data->lock);
587
588         return IRQ_HANDLED;
589 }
590
591 static bool __rockchip_sysmmu_disable(struct sysmmu_drvdata *data)
592 {
593         unsigned long flags;
594         bool disabled = false;
595         int i;
596         write_lock_irqsave(&data->lock, flags);
597
598         if (!set_sysmmu_inactive(data))
599                 goto finish;
600
601         for(i=0;i<data->num_res_mem;i++)
602         {
603                 sysmmu_disable_paging(data->res_bases[i]);
604         }
605
606         disabled = true;
607         data->pgtable = 0;
608         data->domain = NULL;
609 finish:
610         write_unlock_irqrestore(&data->lock, flags);
611
612         if (disabled)
613                 pr_info("(%s) Disabled\n", data->dbgname);
614         else
615                 pr_info("(%s) %d times left to be disabled\n",data->dbgname, data->activations);
616
617         return disabled;
618 }
619
620 /* __rk_sysmmu_enable: Enables System MMU
621  *
622  * returns -error if an error occurred and System MMU is not enabled,
623  * 0 if the System MMU has been just enabled and 1 if System MMU was already
624  * enabled before.
625  */
626 static int __rockchip_sysmmu_enable(struct sysmmu_drvdata *data,unsigned long pgtable, struct iommu_domain *domain)
627 {
628         int i, ret = 0;
629         unsigned long flags;
630
631         write_lock_irqsave(&data->lock, flags);
632
633         if (!set_sysmmu_active(data)) 
634         {
635                 if (WARN_ON(pgtable != data->pgtable)) 
636                 {
637                         ret = -EBUSY;
638                         set_sysmmu_inactive(data);
639                 } 
640                 else 
641                         ret = 1;
642
643                 pr_info("(%s) Already enabled\n", data->dbgname);
644                 goto finish;
645         }
646         
647         data->pgtable = pgtable;
648
649         for (i = 0; i < data->num_res_mem; i++) 
650         {
651                 bool status;
652                 status = sysmmu_enable_stall(data->res_bases[i]);
653                 if(status)
654                 {
655                         __sysmmu_set_ptbase(data->res_bases[i], pgtable);
656                         __raw_writel(SYSMMU_COMMAND_ZAP_CACHE, data->res_bases[i] + SYSMMU_REGISTER_COMMAND);
657                 }
658                 __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, data->res_bases[i]+SYSMMU_REGISTER_INT_MASK);
659                 sysmmu_enable_paging(data->res_bases[i]);
660                 sysmmu_disable_stall(data->res_bases[i]);
661         }
662
663         data->domain = domain;
664
665         pr_info("(%s) Enabled\n", data->dbgname);
666 finish:
667         write_unlock_irqrestore(&data->lock, flags);
668
669         return ret;
670 }
671 bool rockchip_sysmmu_disable(struct device *dev)
672 {
673         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
674         bool disabled;
675
676         disabled = __rockchip_sysmmu_disable(data);
677
678         return disabled;
679 }
680 void rockchip_sysmmu_tlb_invalidate(struct device *dev)
681 {
682         unsigned long flags;
683         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
684
685         read_lock_irqsave(&data->lock, flags);
686
687         if (is_sysmmu_active(data)) 
688         {
689                 int i;
690                 for (i = 0; i < data->num_res_mem; i++) 
691                 {
692                         if(!sysmmu_zap_tlb(data->res_bases[i]))
693                                 pr_err("%s,invalidating TLB failed\n",data->dbgname);
694                 }
695         } 
696         else 
697                 pr_info("(%s) Disabled. Skipping invalidating TLB.\n",data->dbgname);
698
699         read_unlock_irqrestore(&data->lock, flags);
700 }
701 static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,dma_addr_t iova)
702 {
703         struct rk_iommu_domain *priv = domain->priv;
704         unsigned long *entry;
705         unsigned long flags;
706         phys_addr_t phys = 0;
707
708         spin_lock_irqsave(&priv->pgtablelock, flags);
709
710         entry = section_entry(priv->pgtable, iova);
711         entry = page_entry(entry, iova);
712         phys = spage_phys(entry) + spage_offs(iova);
713         
714         spin_unlock_irqrestore(&priv->pgtablelock, flags);
715
716         return phys;
717 }
718 static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
719                                                                 short *pgcnt)
720 {
721         if (!lv2ent_fault(pent))
722                 return -EADDRINUSE;
723
724         *pent = mk_lv2ent_spage(paddr);
725         pgtable_flush(pent, pent + 1);
726         *pgcnt -= 1;
727         return 0;
728 }
729
730 static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,short *pgcounter)
731 {
732         if (lv1ent_fault(sent)) 
733         {
734                 unsigned long *pent;
735
736                 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
737                 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
738                 if (!pent)
739                         return NULL;
740
741                 *sent = mk_lv1ent_page(__pa(pent));
742                 kmemleak_ignore(pent);
743                 *pgcounter = NUM_LV2ENTRIES;
744                 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
745                 pgtable_flush(sent, sent + 1);
746         }
747         return page_entry(sent, iova);
748 }
749
750 static size_t rockchip_iommu_unmap(struct iommu_domain *domain,unsigned long iova, size_t size)
751 {
752         struct rk_iommu_domain *priv = domain->priv;
753         unsigned long flags;
754         unsigned long *ent;
755
756         BUG_ON(priv->pgtable == NULL);
757
758         spin_lock_irqsave(&priv->pgtablelock, flags);
759
760         ent = section_entry(priv->pgtable, iova);
761
762         if (unlikely(lv1ent_fault(ent))) 
763         {
764                 if (size > SPAGE_SIZE)
765                         size = SPAGE_SIZE;
766                 goto done;
767         }
768
769         /* lv1ent_page(sent) == true here */
770
771         ent = page_entry(ent, iova);
772
773         if (unlikely(lv2ent_fault(ent))) 
774         {
775                 size = SPAGE_SIZE;
776                 goto done;
777         }
778         
779         *ent = 0;
780         size = SPAGE_SIZE;
781         priv->lv2entcnt[lv1ent_offset(iova)] += 1;
782         goto done;
783
784 done:
785         //pr_info("%s:unmap iova 0x%lx/0x%x bytes\n",__func__, iova,size);
786         spin_unlock_irqrestore(&priv->pgtablelock, flags);
787
788         return size;
789 }
790 static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
791                          phys_addr_t paddr, size_t size, int prot)
792 {
793         struct rk_iommu_domain *priv = domain->priv;
794         unsigned long *entry;
795         unsigned long flags;
796         int ret = -ENOMEM;
797         unsigned long *pent;
798
799         BUG_ON(priv->pgtable == NULL);
800
801         spin_lock_irqsave(&priv->pgtablelock, flags);
802
803         entry = section_entry(priv->pgtable, iova);
804         
805         pent = alloc_lv2entry(entry, iova,&priv->lv2entcnt[lv1ent_offset(iova)]);
806         if (!pent)
807                 ret = -ENOMEM;
808         else
809                 ret = lv2set_page(pent, paddr, size,&priv->lv2entcnt[lv1ent_offset(iova)]);
810         
811         if (ret)
812         {
813                 pr_err("%s: Failed to map iova 0x%lx/0x%x bytes\n",__func__, iova, size);
814         }
815         spin_unlock_irqrestore(&priv->pgtablelock, flags);
816
817         return ret;
818 }
819
820 static void rockchip_iommu_detach_device(struct iommu_domain *domain,
821                                     struct device *dev)
822 {
823         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
824         struct rk_iommu_domain *priv = domain->priv;
825         struct list_head *pos;
826         unsigned long flags;
827         bool found = false;
828
829         spin_lock_irqsave(&priv->lock, flags);
830
831         list_for_each(pos, &priv->clients) 
832         {
833                 if (list_entry(pos, struct sysmmu_drvdata, node) == data) 
834                 {
835                         found = true;
836                         break;
837                 }
838         }
839         if (!found)
840                 goto finish;
841
842         if (__rockchip_sysmmu_disable(data)) 
843         {
844                 pr_info("%s: Detached IOMMU with pgtable %#lx\n",__func__, __pa(priv->pgtable));
845                 list_del(&data->node);
846                 INIT_LIST_HEAD(&data->node);
847
848         } 
849         else 
850                 pr_info("%s: Detaching IOMMU with pgtable %#lx delayed",__func__, __pa(priv->pgtable));
851         
852 finish:
853         spin_unlock_irqrestore(&priv->lock, flags);
854 }
855 static int rockchip_iommu_attach_device(struct iommu_domain *domain,struct device *dev)
856 {
857         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
858         struct rk_iommu_domain *priv = domain->priv;
859         unsigned long flags;
860         int ret;
861
862         spin_lock_irqsave(&priv->lock, flags);
863
864         ret = __rockchip_sysmmu_enable(data, __pa(priv->pgtable), domain);
865
866         if (ret == 0) 
867         {
868                 /* 'data->node' must not be appeared in priv->clients */
869                 BUG_ON(!list_empty(&data->node));
870                 data->dev = dev;
871                 list_add_tail(&data->node, &priv->clients);
872         }
873
874         spin_unlock_irqrestore(&priv->lock, flags);
875
876         if (ret < 0) 
877         {
878                 pr_err("%s: Failed to attach IOMMU with pgtable %#lx\n",__func__, __pa(priv->pgtable));
879         } 
880         else if (ret > 0) 
881         {
882                 pr_info("%s: IOMMU with pgtable 0x%lx already attached\n",__func__, __pa(priv->pgtable));
883         } 
884         else 
885         {
886                 pr_info("%s: Attached new IOMMU with pgtable 0x%lx\n",__func__, __pa(priv->pgtable));
887         }
888
889         return ret;
890 }
891 static void rockchip_iommu_domain_destroy(struct iommu_domain *domain)
892 {
893         struct rk_iommu_domain *priv = domain->priv;
894         struct sysmmu_drvdata *data;
895         unsigned long flags;
896         int i;
897
898         WARN_ON(!list_empty(&priv->clients));
899
900         spin_lock_irqsave(&priv->lock, flags);
901
902         list_for_each_entry(data, &priv->clients, node) 
903         {
904                 while (!rockchip_sysmmu_disable(data->dev))
905                         ; /* until System MMU is actually disabled */
906         }
907         spin_unlock_irqrestore(&priv->lock, flags);
908
909         for (i = 0; i < NUM_LV1ENTRIES; i++)
910                 if (lv1ent_page(priv->pgtable + i))
911                         kmem_cache_free(lv2table_kmem_cache,__va(lv2table_base(priv->pgtable + i)));
912
913         free_pages((unsigned long)priv->pgtable, 0);
914         free_pages((unsigned long)priv->lv2entcnt, 0);
915         kfree(domain->priv);
916         domain->priv = NULL;
917 }
918
919 static int rockchip_iommu_domain_init(struct iommu_domain *domain)
920 {
921         struct rk_iommu_domain *priv;
922
923         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
924         if (!priv)
925                 return -ENOMEM;
926         
927 /*rk32xx sysmmu use 2 level pagetable,
928    level1 and leve2 both have 1024 entries,each entry  occupy 4 bytes,
929    so alloc a page size for each page table 
930 */
931         priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
932         if (!priv->pgtable)
933                 goto err_pgtable;
934
935         priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
936         if (!priv->lv2entcnt)
937                 goto err_counter;
938
939         pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
940
941         spin_lock_init(&priv->lock);
942         spin_lock_init(&priv->pgtablelock);
943         INIT_LIST_HEAD(&priv->clients);
944
945         domain->priv = priv;
946         return 0;
947
948 err_counter:
949         free_pages((unsigned long)priv->pgtable, 0);    
950 err_pgtable:
951         kfree(priv);
952         return -ENOMEM;
953 }
954
955 static struct iommu_ops rk_iommu_ops = 
956 {
957         .domain_init = &rockchip_iommu_domain_init,
958         .domain_destroy = &rockchip_iommu_domain_destroy,
959         .attach_dev = &rockchip_iommu_attach_device,
960         .detach_dev = &rockchip_iommu_detach_device,
961         .map = &rockchip_iommu_map,
962         .unmap = &rockchip_iommu_unmap,
963         .iova_to_phys = &rockchip_iommu_iova_to_phys,
964         .pgsize_bitmap = SPAGE_SIZE,
965 };
966
967 static int rockchip_sysmmu_prepare(void)
968 {
969         int ret = 0;
970         static int registed = 0;
971         
972         if(registed)
973                 return 0;
974         
975         lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
976         if (!lv2table_kmem_cache) 
977         {
978                 pr_err("%s: failed to create kmem cache\n", __func__);
979                 return -ENOMEM;
980         }
981         ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
982         if(!ret)
983                 registed = 1;
984         else
985                 pr_err("%s:failed to set iommu to bus\r\n",__func__);
986         return ret;
987 }
988 static int  rockchip_get_sysmmu_resource_num(struct platform_device *pdev,unsigned int type)
989 {
990         struct resource *info = NULL;
991         int num_resources = 0;
992         
993         /*get resouce info*/
994 again:
995         info = platform_get_resource(pdev, type, num_resources);
996         while(info)
997         {
998                 num_resources++;
999                 goto again;
1000         }
1001         return num_resources;
1002 }
1003
1004 static struct kobject *dump_mmu_object;
1005
1006 static int dump_mmu_pagetbl(struct device *dev,struct device_attribute *attr, const char *buf,u32 count)
1007 {
1008         u32 fault_address;
1009         u32 iommu_dte ;
1010         u32 mmu_base;
1011         void __iomem *base;
1012         u32 ret;
1013         ret = kstrtouint(buf,0,&mmu_base);
1014         if (ret)
1015                 printk("%s is not in hexdecimal form.\n", buf);
1016         base = ioremap(mmu_base, 0x100);
1017         iommu_dte = __raw_readl(base + SYSMMU_REGISTER_DTE_ADDR);
1018         fault_address = __raw_readl(base + SYSMMU_REGISTER_PAGE_FAULT_ADDR);
1019         dump_pagetbl(fault_address,iommu_dte);
1020         return count;
1021 }
1022 static DEVICE_ATTR(dump_mmu_pgtable, 0644, NULL, dump_mmu_pagetbl);
1023
1024 void dump_iommu_sysfs_init(void )
1025 {
1026         u32 ret;
1027         dump_mmu_object = kobject_create_and_add("rk_iommu", NULL);
1028         if (dump_mmu_object == NULL)
1029                 return;
1030         ret = sysfs_create_file(dump_mmu_object, &dev_attr_dump_mmu_pgtable.attr);
1031         return;
1032 }
1033         
1034
1035
1036 static int rockchip_sysmmu_probe(struct platform_device *pdev)
1037 {
1038         int i, ret;
1039         struct device *dev;
1040         struct sysmmu_drvdata *data;
1041         
1042         dev = &pdev->dev;
1043         
1044         ret = rockchip_sysmmu_prepare();
1045         if(ret)
1046         {
1047                 pr_err("%s,failed\r\n",__func__);
1048                 goto err_alloc;
1049         }
1050
1051         data = devm_kzalloc(dev,sizeof(*data), GFP_KERNEL);
1052         if (!data) 
1053         {
1054                 dev_dbg(dev, "Not enough memory\n");
1055                 ret = -ENOMEM;
1056                 goto err_alloc;
1057         }
1058         
1059         ret = dev_set_drvdata(dev, data);
1060         if (ret) 
1061         {
1062                 dev_dbg(dev, "Unabled to initialize driver data\n");
1063                 goto err_init;
1064         }
1065         
1066         if(pdev->dev.of_node)
1067         {
1068                 of_property_read_string(pdev->dev.of_node,"dbgname",&(data->dbgname));
1069         }
1070         else
1071         {
1072                 pr_info("dbgname not assigned in device tree or device node not exist\r\n");
1073         }
1074
1075         pr_info("(%s) Enter\n", data->dbgname);
1076
1077         /*rk32xx sysmmu need both irq and memory */
1078         data->num_res_mem = rockchip_get_sysmmu_resource_num(pdev,IORESOURCE_MEM);
1079         if(0 == data->num_res_mem)
1080         {
1081                 pr_err("can't find sysmmu memory resource \r\n");
1082                 goto err_init;
1083         }
1084         pr_info("data->num_res_mem=%d\n",data->num_res_mem);
1085         data->num_res_irq = rockchip_get_sysmmu_resource_num(pdev,IORESOURCE_IRQ);
1086         if(0 == data->num_res_irq)
1087         {
1088                 pr_err("can't find sysmmu irq resource \r\n");
1089                 goto err_init;
1090         }
1091         
1092         data->res_bases = kmalloc(sizeof(*data->res_bases) * data->num_res_mem,GFP_KERNEL);
1093         if (data->res_bases == NULL)
1094         {
1095                 dev_dbg(dev, "Not enough memory\n");
1096                 ret = -ENOMEM;
1097                 goto err_init;
1098         }
1099
1100         for (i = 0; i < data->num_res_mem; i++) 
1101         {
1102                 struct resource *res;
1103                 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1104                 if (!res) 
1105                 {
1106                         pr_err("Unable to find IOMEM region\n");
1107                         ret = -ENOENT;
1108                         goto err_res;
1109                 }
1110                 data->res_bases[i] = ioremap(res->start, resource_size(res));
1111                 pr_info("res->start = 0x%08x  ioremap to  data->res_bases[%d] = 0x%08x\n",res->start,i,(unsigned int)data->res_bases[i]);
1112                 if (!data->res_bases[i]) 
1113                 {
1114                         pr_err("Unable to map IOMEM @ PA:%#x\n",res->start);
1115                         ret = -ENOENT;
1116                         goto err_res;
1117                 }
1118                 if(!strstr(data->dbgname,"isp"))
1119                 {
1120                         /*reset sysmmu*/
1121                         if(!sysmmu_reset(data->res_bases[i],data->dbgname))
1122                         {
1123                                 ret = -ENOENT;
1124                                 goto err_res;
1125                         }
1126                 }
1127         }
1128
1129         for (i = 0; i < data->num_res_irq; i++) 
1130         {
1131                 ret = platform_get_irq(pdev, i);
1132                 if (ret <= 0) 
1133                 {
1134                         pr_err("Unable to find IRQ resource\n");
1135                         goto err_irq;
1136                 }
1137                 ret = request_irq(ret, rockchip_sysmmu_irq, IRQF_SHARED ,dev_name(dev), data);
1138                 if (ret) 
1139                 {
1140                         pr_err("Unabled to register interrupt handler\n");
1141                         goto err_irq;
1142                 }
1143         }
1144         ret = rockchip_init_iovmm(dev, &data->vmm);
1145         if (ret)
1146                 goto err_irq;
1147         
1148         
1149         data->sysmmu = dev;
1150         rwlock_init(&data->lock);
1151         INIT_LIST_HEAD(&data->node);
1152
1153         __set_fault_handler(data, &default_fault_handler);
1154
1155         pr_info("(%s) Initialized\n", data->dbgname);
1156         return 0;
1157
1158 err_irq:
1159         while (i-- > 0) 
1160         {
1161                 int irq;
1162
1163                 irq = platform_get_irq(pdev, i);
1164                 free_irq(irq, data);
1165         }
1166 err_res:
1167         while (data->num_res_mem-- > 0)
1168                 iounmap(data->res_bases[data->num_res_mem]);
1169         kfree(data->res_bases);
1170 err_init:
1171         kfree(data);
1172 err_alloc:
1173         dev_err(dev, "Failed to initialize\n");
1174         return ret;
1175 }
1176
1177 #ifdef CONFIG_OF
1178 static const struct of_device_id sysmmu_dt_ids[] = 
1179 {
1180         { .compatible = IEP_SYSMMU_COMPATIBLE_NAME},
1181         { .compatible = VIP_SYSMMU_COMPATIBLE_NAME},
1182         { .compatible = VOPB_SYSMMU_COMPATIBLE_NAME},
1183         { .compatible = VOPL_SYSMMU_COMPATIBLE_NAME},
1184         { .compatible = HEVC_SYSMMU_COMPATIBLE_NAME},
1185         { .compatible = VPU_SYSMMU_COMPATIBLE_NAME},
1186         { .compatible = ISP_SYSMMU_COMPATIBLE_NAME},
1187         { /* end */ }
1188 };
1189 MODULE_DEVICE_TABLE(of, sysmmu_dt_ids);
1190 #endif
1191
1192 static struct platform_driver rk_sysmmu_driver = 
1193 {
1194         .probe = rockchip_sysmmu_probe,
1195         .remove = NULL,
1196         .driver = 
1197         {
1198                    .name = "rk_sysmmu",
1199                    .owner = THIS_MODULE,
1200                    .of_match_table = of_match_ptr(sysmmu_dt_ids),
1201         },
1202 };
1203
1204 #if 0
1205 /*I don't know why this can't work*/
1206 #ifdef CONFIG_OF
1207 module_platform_driver(rk_sysmmu_driver);
1208 #endif
1209 #endif
1210 static int __init rockchip_sysmmu_init_driver(void)
1211 {
1212         dump_iommu_sysfs_init();
1213
1214         return platform_driver_register(&rk_sysmmu_driver);
1215 }
1216
1217 core_initcall(rockchip_sysmmu_init_driver);
1218