mmc: host: rk_sdmmc:
[firefly-linux-kernel-4.4.55.git] / drivers / iommu / rockchip-iommu.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License version 2 as
4  * published by the Free Software Foundation.
5  */
6
7 #ifdef CONFIG_ROCKCHIP_IOMMU_DEBUG
8 #define DEBUG
9 #endif
10
11 #include <linux/io.h>
12 #include <linux/interrupt.h>
13 #include <linux/slab.h>
14 #include <linux/clk.h>
15 #include <linux/err.h>
16 #include <linux/mm.h>
17 #include <linux/errno.h>
18 #include <linux/memblock.h>
19 #include <linux/export.h>
20
21 #include <asm/cacheflush.h>
22 #include <asm/pgtable.h>
23 #include <linux/of.h>
24 #include <linux/rockchip/sysmmu.h>
25
26 #include "rockchip-iommu.h"
27
28 /* We does not consider super section mapping (16MB) */
29 #define SPAGE_ORDER 12
30 #define SPAGE_SIZE (1 << SPAGE_ORDER)
31 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
32 typedef enum sysmmu_entry_flags 
33 {
34         SYSMMU_FLAGS_PRESENT = 0x01,
35         SYSMMU_FLAGS_READ_PERMISSION = 0x02,
36         SYSMMU_FLAGS_WRITE_PERMISSION = 0x04,
37         SYSMMU_FLAGS_OVERRIDE_CACHE  = 0x8,
38         SYSMMU_FLAGS_WRITE_CACHEABLE  = 0x10,
39         SYSMMU_FLAGS_WRITE_ALLOCATE  = 0x20,
40         SYSMMU_FLAGS_WRITE_BUFFERABLE  = 0x40,
41         SYSMMU_FLAGS_READ_CACHEABLE  = 0x80,
42         SYSMMU_FLAGS_READ_ALLOCATE  = 0x100,
43         SYSMMU_FLAGS_MASK = 0x1FF,
44 } sysmmu_entry_flags;
45
46 #define lv1ent_fault(sent) ((*(sent) & SYSMMU_FLAGS_PRESENT) == 0)
47 #define lv1ent_page(sent) ((*(sent) & SYSMMU_FLAGS_PRESENT) == 1)
48 #define lv2ent_fault(pent) ((*(pent) & SYSMMU_FLAGS_PRESENT) == 0)
49 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
50 #define spage_offs(iova) ((iova) & 0x0FFF)
51
52 #define lv1ent_offset(iova) (((iova)>>22) & 0x03FF)
53 #define lv2ent_offset(iova) (((iova)>>12) & 0x03FF)
54
55 #define NUM_LV1ENTRIES 1024
56 #define NUM_LV2ENTRIES 1024
57
58 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
59
60 #define lv2table_base(sent) (*(sent) & 0xFFFFFFFE)
61
62 #define mk_lv1ent_page(pa) ((pa) | SYSMMU_FLAGS_PRESENT)
63 /*write and read permission for level2 page default*/
64 #define mk_lv2ent_spage(pa) ((pa) | SYSMMU_FLAGS_PRESENT |SYSMMU_FLAGS_READ_PERMISSION |SYSMMU_FLAGS_WRITE_PERMISSION)
65
66 #define SYSMMU_REG_POLL_COUNT_FAST 1000
67
68 /**
69  * MMU register numbers
70  * Used in the register read/write routines.
71  * See the hardware documentation for more information about each register
72  */
73 typedef enum sysmmu_register 
74 {
75         SYSMMU_REGISTER_DTE_ADDR = 0x0000, /**< Current Page Directory Pointer */
76         SYSMMU_REGISTER_STATUS = 0x0004, /**< Status of the MMU */
77         SYSMMU_REGISTER_COMMAND = 0x0008, /**< Command register, used to control the MMU */
78         SYSMMU_REGISTER_PAGE_FAULT_ADDR = 0x000C, /**< Logical address of the last page fault */
79         SYSMMU_REGISTER_ZAP_ONE_LINE = 0x010, /**< Used to invalidate the mapping of a single page from the MMU */
80         SYSMMU_REGISTER_INT_RAWSTAT = 0x0014, /**< Raw interrupt status, all interrupts visible */
81         SYSMMU_REGISTER_INT_CLEAR = 0x0018, /**< Indicate to the MMU that the interrupt has been received */
82         SYSMMU_REGISTER_INT_MASK = 0x001C, /**< Enable/disable types of interrupts */
83         SYSMMU_REGISTER_INT_STATUS = 0x0020, /**< Interrupt status based on the mask */
84         SYSMMU_REGISTER_AUTO_GATING     = 0x0024
85 } sysmmu_register;
86
87 typedef enum sysmmu_command 
88 {
89         SYSMMU_COMMAND_ENABLE_PAGING = 0x00, /**< Enable paging (memory translation) */
90         SYSMMU_COMMAND_DISABLE_PAGING = 0x01, /**< Disable paging (memory translation) */
91         SYSMMU_COMMAND_ENABLE_STALL = 0x02, /**<  Enable stall on page fault */
92         SYSMMU_COMMAND_DISABLE_STALL = 0x03, /**< Disable stall on page fault */
93         SYSMMU_COMMAND_ZAP_CACHE = 0x04, /**< Zap the entire page table cache */
94         SYSMMU_COMMAND_PAGE_FAULT_DONE = 0x05, /**< Page fault processed */
95         SYSMMU_COMMAND_HARD_RESET = 0x06 /**< Reset the MMU back to power-on settings */
96 } sysmmu_command;
97
98 /**
99  * MMU interrupt register bits
100  * Each cause of the interrupt is reported
101  * through the (raw) interrupt status registers.
102  * Multiple interrupts can be pending, so multiple bits
103  * can be set at once.
104  */
105 typedef enum sysmmu_interrupt 
106 {
107         SYSMMU_INTERRUPT_PAGE_FAULT = 0x01, /**< A page fault occured */
108         SYSMMU_INTERRUPT_READ_BUS_ERROR = 0x02 /**< A bus read error occured */
109 } sysmmu_interrupt;
110
111 typedef enum sysmmu_status_bits 
112 {
113         SYSMMU_STATUS_BIT_PAGING_ENABLED      = 1 << 0,
114         SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE   = 1 << 1,
115         SYSMMU_STATUS_BIT_STALL_ACTIVE        = 1 << 2,
116         SYSMMU_STATUS_BIT_IDLE                = 1 << 3,
117         SYSMMU_STATUS_BIT_REPLAY_BUFFER_EMPTY = 1 << 4,
118         SYSMMU_STATUS_BIT_PAGE_FAULT_IS_WRITE = 1 << 5,
119         SYSMMU_STATUS_BIT_STALL_NOT_ACTIVE    = 1 << 31,
120 } sys_mmu_status_bits;
121
122 /**
123  * Size of an MMU page in bytes
124  */
125 #define SYSMMU_PAGE_SIZE 0x1000
126
127 /*
128  * Size of the address space referenced by a page table page
129  */
130 #define SYSMMU_VIRTUAL_PAGE_SIZE 0x400000 /* 4 MiB */
131
132 /**
133  * Page directory index from address
134  * Calculates the page directory index from the given address
135  */
136 #define SYSMMU_PDE_ENTRY(address) (((address)>>22) & 0x03FF)
137
138 /**
139  * Page table index from address
140  * Calculates the page table index from the given address
141  */
142 #define SYSMMU_PTE_ENTRY(address) (((address)>>12) & 0x03FF)
143
144 /**
145  * Extract the memory address from an PDE/PTE entry
146  */
147 #define SYSMMU_ENTRY_ADDRESS(value) ((value) & 0xFFFFFC00)
148
149 #define INVALID_PAGE ((u32)(~0))
150
151 static struct kmem_cache *lv2table_kmem_cache;
152 static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
153 {
154         return pgtable + lv1ent_offset(iova);
155 }
156
157 static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
158 {
159         return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
160 }
161
162 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
163         "PAGE FAULT",
164         "BUS ERROR",
165         "UNKNOWN FAULT"
166 };
167
168 struct rk_iommu_domain {
169         struct list_head clients; /* list of sysmmu_drvdata.node */
170         unsigned long *pgtable; /* lv1 page table, 4KB */
171         short *lv2entcnt; /* free lv2 entry counter for each section */
172         spinlock_t lock; /* lock for this structure */
173         spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
174 };
175
176 static bool set_sysmmu_active(struct sysmmu_drvdata *data)
177 {
178         /* return true if the System MMU was not active previously
179            and it needs to be initialized */
180         return ++data->activations == 1;
181 }
182
183 static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
184 {
185         /* return true if the System MMU is needed to be disabled */
186         BUG_ON(data->activations < 1);
187         return --data->activations == 0;
188 }
189
190 static bool is_sysmmu_active(struct sysmmu_drvdata *data)
191 {
192         return data->activations > 0;
193 }
194 static void sysmmu_disable_stall(void __iomem *sfrbase)
195 {
196         int i;
197         u32 mmu_status = __raw_readl(sfrbase+SYSMMU_REGISTER_STATUS);
198         if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED )) 
199         {
200                 //pr_err("MMU disable skipped since it was not enabled.\n");
201                 return;
202         }
203         if (mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) 
204         {
205                 pr_err("Aborting MMU disable stall request since it is in pagefault state.\n");
206                 return;
207         }
208         
209         __raw_writel(SYSMMU_COMMAND_DISABLE_STALL, sfrbase + SYSMMU_REGISTER_COMMAND);
210         
211         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
212         {
213                 u32 status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
214                 if ( 0 == (status & SYSMMU_STATUS_BIT_STALL_ACTIVE) ) 
215                 {
216                         break;
217                 }
218                 if ( status &  SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
219                 {
220                         break;
221                 }
222                 if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED )) 
223                 {
224                         break;
225                 }
226         }
227         if (SYSMMU_REG_POLL_COUNT_FAST == i) 
228                 pr_err("Disable stall request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
229 }
230 static bool sysmmu_enable_stall(void __iomem *sfrbase)
231 {
232         int i;
233         u32 mmu_status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
234
235         if ( 0 == (mmu_status & SYSMMU_STATUS_BIT_PAGING_ENABLED) ) 
236         {
237                 //pr_info("MMU stall is implicit when Paging is not enabled.\n");
238                 return true;
239         }
240         if ( mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
241         {
242                 pr_err("Aborting MMU stall request since it is in pagefault state.\n");
243                 return false;
244         }
245         
246         __raw_writel(SYSMMU_COMMAND_ENABLE_STALL, sfrbase + SYSMMU_REGISTER_COMMAND);
247
248         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
249         {
250                 mmu_status = __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS);
251                 if (mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE) 
252                 {
253                         break;
254                 }
255                 if ((mmu_status & SYSMMU_STATUS_BIT_STALL_ACTIVE)&&(0==(mmu_status & SYSMMU_STATUS_BIT_STALL_NOT_ACTIVE))) 
256                 {
257                         break;
258                 }
259                 if (0 == (mmu_status & ( SYSMMU_STATUS_BIT_PAGING_ENABLED ))) 
260                 {
261                         break;
262                 }
263         }
264         if (SYSMMU_REG_POLL_COUNT_FAST == i) 
265         {
266                 pr_info("Enable stall request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
267                 return false;
268         }
269         if ( mmu_status & SYSMMU_STATUS_BIT_PAGE_FAULT_ACTIVE ) 
270         {
271                 pr_info("Aborting MMU stall request since it has a pagefault.\n");
272                 return false;
273         }
274         return true;
275 }
276
277 static bool sysmmu_enable_paging(void __iomem *sfrbase)
278 {
279         int i;
280         __raw_writel(SYSMMU_COMMAND_ENABLE_PAGING, sfrbase + SYSMMU_REGISTER_COMMAND);
281
282         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
283         {
284                 if (__raw_readl(sfrbase + SYSMMU_REGISTER_STATUS) & SYSMMU_STATUS_BIT_PAGING_ENABLED) 
285                 {
286                         //pr_info("Enable paging request success.\n");
287                         break;
288                 }
289         }
290         if (SYSMMU_REG_POLL_COUNT_FAST == i)
291         {
292                 pr_err("Enable paging request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
293                 return false;
294         }
295         return true;
296 }
297 static bool sysmmu_disable_paging(void __iomem *sfrbase)
298 {
299         int i;
300         __raw_writel(SYSMMU_COMMAND_DISABLE_PAGING, sfrbase + SYSMMU_REGISTER_COMMAND);
301
302         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
303         {
304                 if (!(__raw_readl(sfrbase + SYSMMU_REGISTER_STATUS) & SYSMMU_STATUS_BIT_PAGING_ENABLED)) 
305                 {
306                         //pr_info("Disable paging request success.\n");
307                         break;
308                 }
309         }
310         if (SYSMMU_REG_POLL_COUNT_FAST == i)
311         {
312                 pr_err("Disable paging request failed, MMU status is 0x%08X\n", __raw_readl(sfrbase + SYSMMU_REGISTER_STATUS));
313                 return false;
314         }
315         return true;
316 }
317
318 void sysmmu_page_fault_done(void __iomem *sfrbase,const char *dbgname)
319 {
320         pr_info("MMU: %s: Leaving page fault mode\n", dbgname);
321         __raw_writel(SYSMMU_COMMAND_PAGE_FAULT_DONE, sfrbase + SYSMMU_REGISTER_COMMAND);
322 }
323 bool sysmmu_zap_tlb(void __iomem *sfrbase)
324 {
325         bool stall_success = sysmmu_enable_stall(sfrbase);
326         
327         __raw_writel(SYSMMU_COMMAND_ZAP_CACHE, sfrbase + SYSMMU_REGISTER_COMMAND);
328         if (false == stall_success) 
329         {
330                 /* False means that it is in Pagefault state. Not possible to disable_stall then */
331                 return false;
332         }
333         sysmmu_disable_stall(sfrbase);
334         return true;
335 }
336 static inline bool sysmmu_raw_reset(void __iomem *sfrbase)
337 {
338         int i;
339         __raw_writel(0xCAFEBABE, sfrbase + SYSMMU_REGISTER_DTE_ADDR);
340
341         if(!(0xCAFEB000 == __raw_readl(sfrbase+SYSMMU_REGISTER_DTE_ADDR)))
342         {
343                 pr_err("error when %s.\n",__func__);
344                 return false;
345         }
346         __raw_writel(SYSMMU_COMMAND_HARD_RESET, sfrbase + SYSMMU_REGISTER_COMMAND);
347
348         for (i = 0; i < SYSMMU_REG_POLL_COUNT_FAST; ++i) 
349         {
350                 if(__raw_readl(sfrbase + SYSMMU_REGISTER_DTE_ADDR) == 0)
351                 {
352                         break;
353                 }
354         }
355         if (SYSMMU_REG_POLL_COUNT_FAST == i) {
356                 pr_err("%s,Reset request failed, MMU status is 0x%08X\n", __func__,__raw_readl(sfrbase + SYSMMU_REGISTER_DTE_ADDR));
357                 return false;
358         }
359         return true;
360 }
361
362 static void __sysmmu_set_ptbase(void __iomem *sfrbase,unsigned long pgd)
363 {
364         __raw_writel(pgd, sfrbase + SYSMMU_REGISTER_DTE_ADDR);
365
366 }
367
368 static bool sysmmu_reset(void __iomem *sfrbase,const char *dbgname)
369 {
370         bool err = true;
371         
372         err = sysmmu_enable_stall(sfrbase);
373         if(!err)
374         {
375                 pr_info("%s:stall failed: %s\n",__func__,dbgname);
376                 return err;
377         }
378         err = sysmmu_raw_reset(sfrbase);
379         if(err)
380         {
381                 __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, sfrbase+SYSMMU_REGISTER_INT_MASK);
382         }
383         sysmmu_disable_stall(sfrbase);
384         if(!err)
385                 pr_info("%s: failed: %s\n", __func__,dbgname);
386         return err;
387 }
388
389 static inline void pgtable_flush(void *vastart, void *vaend)
390 {
391         dmac_flush_range(vastart, vaend);
392         outer_flush_range(virt_to_phys(vastart),virt_to_phys(vaend));
393 }
394 static void __set_fault_handler(struct sysmmu_drvdata *data,
395                                         sysmmu_fault_handler_t handler)
396 {
397         unsigned long flags;
398
399         write_lock_irqsave(&data->lock, flags);
400         data->fault_handler = handler;
401         write_unlock_irqrestore(&data->lock, flags);
402 }
403
404 void rockchip_sysmmu_set_fault_handler(struct device *dev,sysmmu_fault_handler_t handler)
405 {
406         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
407
408         __set_fault_handler(data, handler);
409 }
410
411 static int default_fault_handler(struct device *dev,
412                                         enum rk_sysmmu_inttype itype,
413                                         unsigned long pgtable_base,
414                                         unsigned long fault_addr,
415                                         unsigned int status
416                                         )
417 {
418         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
419
420         if(!data)
421         {
422                 pr_info("%s,iommu device not assigned yet\n",__func__);
423                 return 0;
424         }
425         if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
426                 itype = SYSMMU_FAULT_UNKNOWN;
427
428         if(itype == SYSMMU_BUSERROR)
429                 pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",sysmmu_fault_name[itype], fault_addr, pgtable_base);
430
431         if(itype == SYSMMU_PAGEFAULT)
432                 pr_err("SYSMMU:Page fault detected at 0x%lx from bus id %d of type %s on %s\n",
433                                 fault_addr,
434                                 (status >> 6) & 0x1F,
435                                 (status & 32) ? "write" : "read",
436                                 data->dbgname
437                                 );
438
439         pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
440
441         BUG();
442
443         return 0;
444 }
445 static void dump_pagetbl(u32 fault_address,u32 addr_dte)
446 {
447 #if 0
448         u32  offset1;
449         u32  offset2;
450         u32 *level2_base;
451         u32 *level1_entry;
452         u32 *level2_entry;
453 #endif
454         #if 1
455         u32 lv1_offset;
456         u32 lv2_offset;
457         
458         u32 *lv1_entry_pa;
459         u32 *lv1_entry_va;
460         u32 *lv1_entry_value;
461         
462         u32 *lv2_base;
463         u32 *lv2_entry_pa;
464         u32 *lv2_entry_va;
465         u32 *lv2_entry_value;
466
467         
468         lv1_offset = lv1ent_offset(fault_address);
469         lv2_offset = lv2ent_offset(fault_address);
470         
471         lv1_entry_pa = (u32 *)addr_dte + lv1_offset;
472         lv1_entry_va = (u32 *)(__va(addr_dte)) + lv1_offset;
473         lv1_entry_value = (u32 *)(*lv1_entry_va);
474         
475         lv2_base = (u32 *)((*lv1_entry_va) & 0xfffffffe);
476         lv2_entry_pa = (u32 * )lv2_base + lv2_offset;
477         lv2_entry_va = (u32 * )(__va(lv2_base)) + lv2_offset;
478         lv2_entry_value = (u32 *)(*lv2_entry_va);
479         
480         pr_info("fault address = 0x%08x,dte addr pa = 0x%08x,va = 0x%08x\n",fault_address,addr_dte,(u32)__va(addr_dte));
481         pr_info("lv1_offset = 0x%x,lv1_entry_pa = 0x%08x,lv1_entry_va = 0x%08x\n",lv1_offset,(u32)lv1_entry_pa,(u32)lv1_entry_va);
482         pr_info("lv1_entry_value(*lv1_entry_va) = 0x%08x,lv2_base = 0x%08x\n",(u32)lv1_entry_value,(u32)lv2_base);
483         pr_info("lv2_offset = 0x%x,lv2_entry_pa = 0x%08x,lv2_entry_va = 0x%08x\n",lv2_offset,(u32)lv2_entry_pa,(u32)lv2_entry_va);
484         pr_info("lv2_entry value(*lv2_entry_va) = 0x%08x\n",(u32)lv2_entry_value);
485         
486         #endif
487 #if 0
488         offset1 = lv1ent_offset(fault_address);
489         offset2 = lv2ent_offset(fault_address);
490         level1_entry = (u32 *)__va(addr_dte)+offset1;
491         level2_base = (u32 *)__va((*level1_entry)&0xfffffffe);
492         level2_entry = level2_base+offset2;
493         pr_info("level1 offset=%d,level2 offset=%d,level1_entry=0x%08x\n",offset1,offset2,(u32)level1_entry);
494         pr_info("*level1_entry = 0x%08x\n",*level1_entry);
495         pr_info("*level2_entry = 0x%08x\n",*level2_entry);
496 #endif
497
498 }
499 static irqreturn_t rockchip_sysmmu_irq(int irq, void *dev_id)
500 {
501         /* SYSMMU is in blocked when interrupt occurred. */
502         struct sysmmu_drvdata *data = dev_id;
503         struct resource *irqres;
504         struct platform_device *pdev;
505         enum rk_sysmmu_inttype itype = SYSMMU_FAULT_UNKNOWN;
506         u32 status;
507         u32 rawstat;
508         u32 int_status;
509         u32 fault_address;
510         int i, ret = 0;
511
512         read_lock(&data->lock);
513         
514 #if 0
515         WARN_ON(!is_sysmmu_active(data));
516 #else
517         if(!is_sysmmu_active(data))
518         {
519                 read_unlock(&data->lock);
520                 return IRQ_HANDLED;
521         }
522 #endif  
523         pdev = to_platform_device(data->sysmmu);
524
525         for (i = 0; i < data->num_res_irq; i++) 
526         {
527                 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
528                 if (irqres && ((int)irqres->start == irq))
529                         break;
530         }
531
532         if (i == data->num_res_irq) 
533         {
534                 itype = SYSMMU_FAULT_UNKNOWN;
535         } 
536         else 
537         {
538                 int_status = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_INT_STATUS);
539                 if(int_status != 0)
540                 {
541                         /*mask status*/
542                         __raw_writel(0x00,data->res_bases[i] + SYSMMU_REGISTER_INT_MASK);
543                         
544                         rawstat = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_INT_RAWSTAT);
545
546                         if(rawstat & SYSMMU_INTERRUPT_PAGE_FAULT)
547                         {
548                                 fault_address = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_PAGE_FAULT_ADDR);
549                                 itype = SYSMMU_PAGEFAULT;
550                         }
551                         else if(rawstat & SYSMMU_INTERRUPT_READ_BUS_ERROR)
552                         {
553                                 itype = SYSMMU_BUSERROR;
554                         }
555                         else
556                         {
557                                 goto out;
558                         }
559                         dump_pagetbl(fault_address,__raw_readl(data->res_bases[i] + SYSMMU_REGISTER_DTE_ADDR));
560                 }
561                 else
562                         goto out;
563         }
564         
565         if (data->fault_handler) 
566         {
567                 unsigned long base = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_DTE_ADDR);
568                 status = __raw_readl(data->res_bases[i] + SYSMMU_REGISTER_STATUS);
569                 ret = data->fault_handler(data->dev, itype, base, fault_address,status);
570         }
571
572         if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
573         {
574                 if(SYSMMU_PAGEFAULT == itype)
575                 {
576                         sysmmu_zap_tlb(data->res_bases[i]);
577                         sysmmu_page_fault_done(data->res_bases[i],data->dbgname);
578                         __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, data->res_bases[i]+SYSMMU_REGISTER_INT_MASK);
579                 }
580         }
581         else
582                 pr_err("(%s) %s is not handled.\n",data->dbgname, sysmmu_fault_name[itype]);
583
584 out :
585         read_unlock(&data->lock);
586
587         return IRQ_HANDLED;
588 }
589
590 static bool __rockchip_sysmmu_disable(struct sysmmu_drvdata *data)
591 {
592         unsigned long flags;
593         bool disabled = false;
594         int i;
595         write_lock_irqsave(&data->lock, flags);
596
597         if (!set_sysmmu_inactive(data))
598                 goto finish;
599
600         for(i=0;i<data->num_res_mem;i++)
601         {
602                 sysmmu_disable_paging(data->res_bases[i]);
603         }
604
605         disabled = true;
606         data->pgtable = 0;
607         data->domain = NULL;
608 finish:
609         write_unlock_irqrestore(&data->lock, flags);
610
611         if (disabled)
612                 pr_info("(%s) Disabled\n", data->dbgname);
613         else
614                 pr_info("(%s) %d times left to be disabled\n",data->dbgname, data->activations);
615
616         return disabled;
617 }
618
619 /* __rk_sysmmu_enable: Enables System MMU
620  *
621  * returns -error if an error occurred and System MMU is not enabled,
622  * 0 if the System MMU has been just enabled and 1 if System MMU was already
623  * enabled before.
624  */
625 static int __rockchip_sysmmu_enable(struct sysmmu_drvdata *data,unsigned long pgtable, struct iommu_domain *domain)
626 {
627         int i, ret = 0;
628         unsigned long flags;
629
630         write_lock_irqsave(&data->lock, flags);
631
632         if (!set_sysmmu_active(data)) 
633         {
634                 if (WARN_ON(pgtable != data->pgtable)) 
635                 {
636                         ret = -EBUSY;
637                         set_sysmmu_inactive(data);
638                 } 
639                 else 
640                         ret = 1;
641
642                 pr_info("(%s) Already enabled\n", data->dbgname);
643                 goto finish;
644         }
645         
646         data->pgtable = pgtable;
647
648         for (i = 0; i < data->num_res_mem; i++) 
649         {
650                 bool status;
651                 status = sysmmu_enable_stall(data->res_bases[i]);
652                 if(status)
653                 {
654                         __sysmmu_set_ptbase(data->res_bases[i], pgtable);
655                         __raw_writel(SYSMMU_COMMAND_ZAP_CACHE, data->res_bases[i] + SYSMMU_REGISTER_COMMAND);
656                 }
657                 __raw_writel(SYSMMU_INTERRUPT_PAGE_FAULT|SYSMMU_INTERRUPT_READ_BUS_ERROR, data->res_bases[i]+SYSMMU_REGISTER_INT_MASK);
658                 sysmmu_enable_paging(data->res_bases[i]);
659                 sysmmu_disable_stall(data->res_bases[i]);
660         }
661
662         data->domain = domain;
663
664         pr_info("(%s) Enabled\n", data->dbgname);
665 finish:
666         write_unlock_irqrestore(&data->lock, flags);
667
668         return ret;
669 }
670 bool rockchip_sysmmu_disable(struct device *dev)
671 {
672         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
673         bool disabled;
674
675         disabled = __rockchip_sysmmu_disable(data);
676
677         return disabled;
678 }
679 void rockchip_sysmmu_tlb_invalidate(struct device *dev)
680 {
681         unsigned long flags;
682         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
683
684         read_lock_irqsave(&data->lock, flags);
685
686         if (is_sysmmu_active(data)) 
687         {
688                 int i;
689                 for (i = 0; i < data->num_res_mem; i++) 
690                 {
691                         if(!sysmmu_zap_tlb(data->res_bases[i]))
692                                 pr_err("%s,invalidating TLB failed\n",data->dbgname);
693                 }
694         } 
695         else 
696                 pr_info("(%s) Disabled. Skipping invalidating TLB.\n",data->dbgname);
697
698         read_unlock_irqrestore(&data->lock, flags);
699 }
700 static phys_addr_t rockchip_iommu_iova_to_phys(struct iommu_domain *domain,dma_addr_t iova)
701 {
702         struct rk_iommu_domain *priv = domain->priv;
703         unsigned long *entry;
704         unsigned long flags;
705         phys_addr_t phys = 0;
706
707         spin_lock_irqsave(&priv->pgtablelock, flags);
708
709         entry = section_entry(priv->pgtable, iova);
710         entry = page_entry(entry, iova);
711         phys = spage_phys(entry) + spage_offs(iova);
712         
713         spin_unlock_irqrestore(&priv->pgtablelock, flags);
714
715         return phys;
716 }
717 static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
718                                                                 short *pgcnt)
719 {
720         if (!lv2ent_fault(pent))
721                 return -EADDRINUSE;
722
723         *pent = mk_lv2ent_spage(paddr);
724         pgtable_flush(pent, pent + 1);
725         *pgcnt -= 1;
726         return 0;
727 }
728
729 static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,short *pgcounter)
730 {
731         if (lv1ent_fault(sent)) 
732         {
733                 unsigned long *pent;
734
735                 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
736                 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
737                 if (!pent)
738                         return NULL;
739
740                 *sent = mk_lv1ent_page(__pa(pent));
741                 kmemleak_ignore(pent);
742                 *pgcounter = NUM_LV2ENTRIES;
743                 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
744                 pgtable_flush(sent, sent + 1);
745         }
746         return page_entry(sent, iova);
747 }
748
749 static size_t rockchip_iommu_unmap(struct iommu_domain *domain,unsigned long iova, size_t size)
750 {
751         struct rk_iommu_domain *priv = domain->priv;
752         unsigned long flags;
753         unsigned long *ent;
754
755         BUG_ON(priv->pgtable == NULL);
756
757         spin_lock_irqsave(&priv->pgtablelock, flags);
758
759         ent = section_entry(priv->pgtable, iova);
760
761         if (unlikely(lv1ent_fault(ent))) 
762         {
763                 if (size > SPAGE_SIZE)
764                         size = SPAGE_SIZE;
765                 goto done;
766         }
767
768         /* lv1ent_page(sent) == true here */
769
770         ent = page_entry(ent, iova);
771
772         if (unlikely(lv2ent_fault(ent))) 
773         {
774                 size = SPAGE_SIZE;
775                 goto done;
776         }
777         
778         *ent = 0;
779         size = SPAGE_SIZE;
780         priv->lv2entcnt[lv1ent_offset(iova)] += 1;
781         goto done;
782
783 done:
784         //pr_info("%s:unmap iova 0x%lx/0x%x bytes\n",__func__, iova,size);
785         spin_unlock_irqrestore(&priv->pgtablelock, flags);
786
787         return size;
788 }
789 static int rockchip_iommu_map(struct iommu_domain *domain, unsigned long iova,
790                          phys_addr_t paddr, size_t size, int prot)
791 {
792         struct rk_iommu_domain *priv = domain->priv;
793         unsigned long *entry;
794         unsigned long flags;
795         int ret = -ENOMEM;
796         unsigned long *pent;
797
798         BUG_ON(priv->pgtable == NULL);
799
800         spin_lock_irqsave(&priv->pgtablelock, flags);
801
802         entry = section_entry(priv->pgtable, iova);
803         
804         pent = alloc_lv2entry(entry, iova,&priv->lv2entcnt[lv1ent_offset(iova)]);
805         if (!pent)
806                 ret = -ENOMEM;
807         else
808                 ret = lv2set_page(pent, paddr, size,&priv->lv2entcnt[lv1ent_offset(iova)]);
809         
810         if (ret)
811         {
812                 pr_err("%s: Failed to map iova 0x%lx/0x%x bytes\n",__func__, iova, size);
813         }
814         spin_unlock_irqrestore(&priv->pgtablelock, flags);
815
816         return ret;
817 }
818
819 static void rockchip_iommu_detach_device(struct iommu_domain *domain,
820                                     struct device *dev)
821 {
822         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
823         struct rk_iommu_domain *priv = domain->priv;
824         struct list_head *pos;
825         unsigned long flags;
826         bool found = false;
827
828         spin_lock_irqsave(&priv->lock, flags);
829
830         list_for_each(pos, &priv->clients) 
831         {
832                 if (list_entry(pos, struct sysmmu_drvdata, node) == data) 
833                 {
834                         found = true;
835                         break;
836                 }
837         }
838         if (!found)
839                 goto finish;
840
841         if (__rockchip_sysmmu_disable(data)) 
842         {
843                 pr_info("%s: Detached IOMMU with pgtable %#lx\n",__func__, __pa(priv->pgtable));
844                 list_del(&data->node);
845                 INIT_LIST_HEAD(&data->node);
846
847         } 
848         else 
849                 pr_info("%s: Detaching IOMMU with pgtable %#lx delayed",__func__, __pa(priv->pgtable));
850         
851 finish:
852         spin_unlock_irqrestore(&priv->lock, flags);
853 }
854 static int rockchip_iommu_attach_device(struct iommu_domain *domain,struct device *dev)
855 {
856         struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
857         struct rk_iommu_domain *priv = domain->priv;
858         unsigned long flags;
859         int ret;
860
861         spin_lock_irqsave(&priv->lock, flags);
862
863         ret = __rockchip_sysmmu_enable(data, __pa(priv->pgtable), domain);
864
865         if (ret == 0) 
866         {
867                 /* 'data->node' must not be appeared in priv->clients */
868                 BUG_ON(!list_empty(&data->node));
869                 data->dev = dev;
870                 list_add_tail(&data->node, &priv->clients);
871         }
872
873         spin_unlock_irqrestore(&priv->lock, flags);
874
875         if (ret < 0) 
876         {
877                 pr_err("%s: Failed to attach IOMMU with pgtable %#lx\n",__func__, __pa(priv->pgtable));
878         } 
879         else if (ret > 0) 
880         {
881                 pr_info("%s: IOMMU with pgtable 0x%lx already attached\n",__func__, __pa(priv->pgtable));
882         } 
883         else 
884         {
885                 pr_info("%s: Attached new IOMMU with pgtable 0x%lx\n",__func__, __pa(priv->pgtable));
886         }
887
888         return ret;
889 }
890 static void rockchip_iommu_domain_destroy(struct iommu_domain *domain)
891 {
892         struct rk_iommu_domain *priv = domain->priv;
893         struct sysmmu_drvdata *data;
894         unsigned long flags;
895         int i;
896
897         WARN_ON(!list_empty(&priv->clients));
898
899         spin_lock_irqsave(&priv->lock, flags);
900
901         list_for_each_entry(data, &priv->clients, node) 
902         {
903                 while (!rockchip_sysmmu_disable(data->dev))
904                         ; /* until System MMU is actually disabled */
905         }
906         spin_unlock_irqrestore(&priv->lock, flags);
907
908         for (i = 0; i < NUM_LV1ENTRIES; i++)
909                 if (lv1ent_page(priv->pgtable + i))
910                         kmem_cache_free(lv2table_kmem_cache,__va(lv2table_base(priv->pgtable + i)));
911
912         free_pages((unsigned long)priv->pgtable, 0);
913         free_pages((unsigned long)priv->lv2entcnt, 0);
914         kfree(domain->priv);
915         domain->priv = NULL;
916 }
917
918 static int rockchip_iommu_domain_init(struct iommu_domain *domain)
919 {
920         struct rk_iommu_domain *priv;
921
922         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
923         if (!priv)
924                 return -ENOMEM;
925         
926 /*rk32xx sysmmu use 2 level pagetable,
927    level1 and leve2 both have 1024 entries,each entry  occupy 4 bytes,
928    so alloc a page size for each page table 
929 */
930         priv->pgtable = (unsigned long *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
931         if (!priv->pgtable)
932                 goto err_pgtable;
933
934         priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 0);
935         if (!priv->lv2entcnt)
936                 goto err_counter;
937
938         pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
939
940         spin_lock_init(&priv->lock);
941         spin_lock_init(&priv->pgtablelock);
942         INIT_LIST_HEAD(&priv->clients);
943
944         domain->priv = priv;
945         return 0;
946
947 err_counter:
948         free_pages((unsigned long)priv->pgtable, 0);    
949 err_pgtable:
950         kfree(priv);
951         return -ENOMEM;
952 }
953
954 static struct iommu_ops rk_iommu_ops = 
955 {
956         .domain_init = &rockchip_iommu_domain_init,
957         .domain_destroy = &rockchip_iommu_domain_destroy,
958         .attach_dev = &rockchip_iommu_attach_device,
959         .detach_dev = &rockchip_iommu_detach_device,
960         .map = &rockchip_iommu_map,
961         .unmap = &rockchip_iommu_unmap,
962         .iova_to_phys = &rockchip_iommu_iova_to_phys,
963         .pgsize_bitmap = SPAGE_SIZE,
964 };
965
966 static int rockchip_sysmmu_prepare(void)
967 {
968         int ret = 0;
969         static int registed = 0;
970         
971         if(registed)
972                 return 0;
973         
974         lv2table_kmem_cache = kmem_cache_create("rk-iommu-lv2table",LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
975         if (!lv2table_kmem_cache) 
976         {
977                 pr_err("%s: failed to create kmem cache\n", __func__);
978                 return -ENOMEM;
979         }
980         ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
981         if(!ret)
982                 registed = 1;
983         else
984                 pr_err("%s:failed to set iommu to bus\r\n",__func__);
985         return ret;
986 }
987 static int  rockchip_get_sysmmu_resource_num(struct platform_device *pdev,unsigned int type)
988 {
989         struct resource *info = NULL;
990         int num_resources = 0;
991         
992         /*get resouce info*/
993 again:
994         info = platform_get_resource(pdev, type, num_resources);
995         while(info)
996         {
997                 num_resources++;
998                 goto again;
999         }
1000         return num_resources;
1001 }
1002
1003 static struct kobject *dump_mmu_object;
1004
1005 static int dump_mmu_pagetbl(struct device *dev,struct device_attribute *attr, const char *buf,u32 count)
1006 {
1007         u32 fault_address;
1008         u32 iommu_dte ;
1009         u32 mmu_base;
1010         void __iomem *base;
1011         u32 ret;
1012         ret = kstrtouint(buf,0,&mmu_base);
1013         if (ret)
1014                 printk("%s is not in hexdecimal form.\n", buf);
1015         base = ioremap(mmu_base, 0x100);
1016         iommu_dte = __raw_readl(base + SYSMMU_REGISTER_DTE_ADDR);
1017         fault_address = __raw_readl(base + SYSMMU_REGISTER_PAGE_FAULT_ADDR);
1018         dump_pagetbl(fault_address,iommu_dte);
1019         return count;
1020 }
1021 static DEVICE_ATTR(dump_mmu_pgtable, 0644, NULL, dump_mmu_pagetbl);
1022
1023 void dump_iommu_sysfs_init(void )
1024 {
1025         dump_mmu_object = kobject_create_and_add("rk_iommu", NULL);
1026         if (dump_mmu_object == NULL)
1027                 return;
1028         sysfs_create_file(dump_mmu_object, &dev_attr_dump_mmu_pgtable.attr);
1029         return;
1030 }
1031         
1032
1033
1034 static int rockchip_sysmmu_probe(struct platform_device *pdev)
1035 {
1036         int i, ret;
1037         struct device *dev;
1038         struct sysmmu_drvdata *data;
1039         
1040         dev = &pdev->dev;
1041         
1042         ret = rockchip_sysmmu_prepare();
1043         if(ret)
1044         {
1045                 pr_err("%s,failed\r\n",__func__);
1046                 goto err_alloc;
1047         }
1048
1049         data = devm_kzalloc(dev,sizeof(*data), GFP_KERNEL);
1050         if (!data) 
1051         {
1052                 dev_dbg(dev, "Not enough memory\n");
1053                 ret = -ENOMEM;
1054                 goto err_alloc;
1055         }
1056         
1057         ret = dev_set_drvdata(dev, data);
1058         if (ret) 
1059         {
1060                 dev_dbg(dev, "Unabled to initialize driver data\n");
1061                 goto err_init;
1062         }
1063         
1064         if(pdev->dev.of_node)
1065         {
1066                 of_property_read_string(pdev->dev.of_node,"dbgname",&(data->dbgname));
1067         }
1068         else
1069         {
1070                 pr_info("dbgname not assigned in device tree or device node not exist\r\n");
1071         }
1072
1073         pr_info("(%s) Enter\n", data->dbgname);
1074
1075         /*rk32xx sysmmu need both irq and memory */
1076         data->num_res_mem = rockchip_get_sysmmu_resource_num(pdev,IORESOURCE_MEM);
1077         if(0 == data->num_res_mem)
1078         {
1079                 pr_err("can't find sysmmu memory resource \r\n");
1080                 goto err_init;
1081         }
1082         pr_info("data->num_res_mem=%d\n",data->num_res_mem);
1083         data->num_res_irq = rockchip_get_sysmmu_resource_num(pdev,IORESOURCE_IRQ);
1084         if(0 == data->num_res_irq)
1085         {
1086                 pr_err("can't find sysmmu irq resource \r\n");
1087                 goto err_init;
1088         }
1089         
1090         data->res_bases = kmalloc(sizeof(*data->res_bases) * data->num_res_mem,GFP_KERNEL);
1091         if (data->res_bases == NULL)
1092         {
1093                 dev_dbg(dev, "Not enough memory\n");
1094                 ret = -ENOMEM;
1095                 goto err_init;
1096         }
1097
1098         for (i = 0; i < data->num_res_mem; i++) 
1099         {
1100                 struct resource *res;
1101                 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1102                 if (!res) 
1103                 {
1104                         pr_err("Unable to find IOMEM region\n");
1105                         ret = -ENOENT;
1106                         goto err_res;
1107                 }
1108                 data->res_bases[i] = ioremap(res->start, resource_size(res));
1109                 pr_info("res->start = 0x%08x  ioremap to  data->res_bases[%d] = 0x%08x\n",res->start,i,(unsigned int)data->res_bases[i]);
1110                 if (!data->res_bases[i]) 
1111                 {
1112                         pr_err("Unable to map IOMEM @ PA:%#x\n",res->start);
1113                         ret = -ENOENT;
1114                         goto err_res;
1115                 }
1116                 if(!strstr(data->dbgname,"isp"))
1117                 {
1118                         /*reset sysmmu*/
1119                         if(!sysmmu_reset(data->res_bases[i],data->dbgname))
1120                         {
1121                                 ret = -ENOENT;
1122                                 goto err_res;
1123                         }
1124                 }
1125         }
1126
1127         for (i = 0; i < data->num_res_irq; i++) 
1128         {
1129                 ret = platform_get_irq(pdev, i);
1130                 if (ret <= 0) 
1131                 {
1132                         pr_err("Unable to find IRQ resource\n");
1133                         goto err_irq;
1134                 }
1135                 ret = request_irq(ret, rockchip_sysmmu_irq, IRQF_SHARED ,dev_name(dev), data);
1136                 if (ret) 
1137                 {
1138                         pr_err("Unabled to register interrupt handler\n");
1139                         goto err_irq;
1140                 }
1141         }
1142         ret = rockchip_init_iovmm(dev, &data->vmm);
1143         if (ret)
1144                 goto err_irq;
1145         
1146         
1147         data->sysmmu = dev;
1148         rwlock_init(&data->lock);
1149         INIT_LIST_HEAD(&data->node);
1150
1151         __set_fault_handler(data, &default_fault_handler);
1152
1153         pr_info("(%s) Initialized\n", data->dbgname);
1154         return 0;
1155
1156 err_irq:
1157         while (i-- > 0) 
1158         {
1159                 int irq;
1160
1161                 irq = platform_get_irq(pdev, i);
1162                 free_irq(irq, data);
1163         }
1164 err_res:
1165         while (data->num_res_mem-- > 0)
1166                 iounmap(data->res_bases[data->num_res_mem]);
1167         kfree(data->res_bases);
1168 err_init:
1169         kfree(data);
1170 err_alloc:
1171         dev_err(dev, "Failed to initialize\n");
1172         return ret;
1173 }
1174
1175 #ifdef CONFIG_OF
1176 static const struct of_device_id sysmmu_dt_ids[] = 
1177 {
1178         { .compatible = IEP_SYSMMU_COMPATIBLE_NAME},
1179         { .compatible = VIP_SYSMMU_COMPATIBLE_NAME},
1180         { .compatible = VOPB_SYSMMU_COMPATIBLE_NAME},
1181         { .compatible = VOPL_SYSMMU_COMPATIBLE_NAME},
1182         { .compatible = HEVC_SYSMMU_COMPATIBLE_NAME},
1183         { .compatible = VPU_SYSMMU_COMPATIBLE_NAME},
1184         { .compatible = ISP_SYSMMU_COMPATIBLE_NAME},
1185         { /* end */ }
1186 };
1187 MODULE_DEVICE_TABLE(of, sysmmu_dt_ids);
1188 #endif
1189
1190 static struct platform_driver rk_sysmmu_driver = 
1191 {
1192         .probe = rockchip_sysmmu_probe,
1193         .remove = NULL,
1194         .driver = 
1195         {
1196                    .name = "rk_sysmmu",
1197                    .owner = THIS_MODULE,
1198                    .of_match_table = of_match_ptr(sysmmu_dt_ids),
1199         },
1200 };
1201
1202 #if 0
1203 /*I don't know why this can't work*/
1204 #ifdef CONFIG_OF
1205 module_platform_driver(rk_sysmmu_driver);
1206 #endif
1207 #endif
1208 static int __init rockchip_sysmmu_init_driver(void)
1209 {
1210         dump_iommu_sysfs_init();
1211
1212         return platform_driver_register(&rk_sysmmu_driver);
1213 }
1214
1215 core_initcall(rockchip_sysmmu_init_driver);
1216