2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
49 #include <linux/mfd/syscon.h>
50 #include <linux/regmap.h>
52 #include "rk_sdmmc_dbg.h"
53 #include <linux/regulator/rockchip_io_vol_domain.h>
54 #include "../../clk/rockchip/clk-ops.h"
56 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
58 /* Common flag combinations */
59 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
60 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
62 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
64 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
65 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
66 #define DW_MCI_SEND_STATUS 1
67 #define DW_MCI_RECV_STATUS 2
68 #define DW_MCI_DMA_THRESHOLD 16
70 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
71 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
73 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
74 #define SDMMC_DATA_TIMEOUT_SD 500
75 #define SDMMC_DATA_TIMEOUT_SDIO 250
76 #define SDMMC_DATA_TIMEOUT_EMMC 2500
78 #define SDMMC_CMD_RTO_MAX_HOLD 200
79 #define SDMMC_WAIT_FOR_UNBUSY 2500
81 #ifdef CONFIG_MMC_DW_IDMAC
82 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
83 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
84 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
88 u32 des0; /* Control Descriptor */
89 #define IDMAC_DES0_DIC BIT(1)
90 #define IDMAC_DES0_LD BIT(2)
91 #define IDMAC_DES0_FD BIT(3)
92 #define IDMAC_DES0_CH BIT(4)
93 #define IDMAC_DES0_ER BIT(5)
94 #define IDMAC_DES0_CES BIT(30)
95 #define IDMAC_DES0_OWN BIT(31)
97 u32 des1; /* Buffer sizes */
98 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
99 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
101 u32 des2; /* buffer 1 physical address */
103 u32 des3; /* buffer 2 physical address */
105 #endif /* CONFIG_MMC_DW_IDMAC */
107 static const u8 tuning_blk_pattern_4bit[] = {
108 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
109 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
110 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
111 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
112 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
113 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
114 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
115 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
118 static const u8 tuning_blk_pattern_8bit[] = {
119 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
120 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
121 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
122 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
123 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
124 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
125 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
126 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
127 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
128 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
129 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
130 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
131 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
132 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
133 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
134 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
137 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
138 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
139 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
140 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
142 /*printk the all register of current host*/
144 static int dw_mci_regs_printk(struct dw_mci *host)
146 struct sdmmc_reg *regs = dw_mci_regs;
148 while( regs->name != 0 ){
149 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
152 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
157 #if defined(CONFIG_DEBUG_FS)
158 static int dw_mci_req_show(struct seq_file *s, void *v)
160 struct dw_mci_slot *slot = s->private;
161 struct mmc_request *mrq;
162 struct mmc_command *cmd;
163 struct mmc_command *stop;
164 struct mmc_data *data;
166 /* Make sure we get a consistent snapshot */
167 spin_lock_bh(&slot->host->lock);
177 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
178 cmd->opcode, cmd->arg, cmd->flags,
179 cmd->resp[0], cmd->resp[1], cmd->resp[2],
180 cmd->resp[2], cmd->error);
182 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
183 data->bytes_xfered, data->blocks,
184 data->blksz, data->flags, data->error);
187 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
188 stop->opcode, stop->arg, stop->flags,
189 stop->resp[0], stop->resp[1], stop->resp[2],
190 stop->resp[2], stop->error);
193 spin_unlock_bh(&slot->host->lock);
198 static int dw_mci_req_open(struct inode *inode, struct file *file)
200 return single_open(file, dw_mci_req_show, inode->i_private);
203 static const struct file_operations dw_mci_req_fops = {
204 .owner = THIS_MODULE,
205 .open = dw_mci_req_open,
208 .release = single_release,
211 static int dw_mci_regs_show(struct seq_file *s, void *v)
213 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
214 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
215 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
216 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
217 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
218 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
223 static int dw_mci_regs_open(struct inode *inode, struct file *file)
225 return single_open(file, dw_mci_regs_show, inode->i_private);
228 static const struct file_operations dw_mci_regs_fops = {
229 .owner = THIS_MODULE,
230 .open = dw_mci_regs_open,
233 .release = single_release,
236 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
238 struct mmc_host *mmc = slot->mmc;
239 struct dw_mci *host = slot->host;
243 root = mmc->debugfs_root;
247 node = debugfs_create_file("regs", S_IRUSR, root, host,
252 node = debugfs_create_file("req", S_IRUSR, root, slot,
257 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
261 node = debugfs_create_x32("pending_events", S_IRUSR, root,
262 (u32 *)&host->pending_events);
266 node = debugfs_create_x32("completed_events", S_IRUSR, root,
267 (u32 *)&host->completed_events);
274 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
276 #endif /* defined(CONFIG_DEBUG_FS) */
278 static void dw_mci_set_timeout(struct dw_mci *host)
280 /* timeout (maximum) */
281 mci_writel(host, TMOUT, 0xffffffff);
284 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
286 struct mmc_data *data;
287 struct dw_mci_slot *slot = mmc_priv(mmc);
288 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
290 cmd->error = -EINPROGRESS;
294 if (cmdr == MMC_STOP_TRANSMISSION)
295 cmdr |= SDMMC_CMD_STOP;
297 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
299 if (cmd->flags & MMC_RSP_PRESENT) {
300 /* We expect a response, so set this bit */
301 cmdr |= SDMMC_CMD_RESP_EXP;
302 if (cmd->flags & MMC_RSP_136)
303 cmdr |= SDMMC_CMD_RESP_LONG;
306 if (cmd->flags & MMC_RSP_CRC)
307 cmdr |= SDMMC_CMD_RESP_CRC;
311 cmdr |= SDMMC_CMD_DAT_EXP;
312 if (data->flags & MMC_DATA_STREAM)
313 cmdr |= SDMMC_CMD_STRM_MODE;
314 if (data->flags & MMC_DATA_WRITE)
315 cmdr |= SDMMC_CMD_DAT_WR;
318 if (drv_data && drv_data->prepare_command)
319 drv_data->prepare_command(slot->host, &cmdr);
325 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
327 struct mmc_command *stop;
333 stop = &host->stop_abort;
335 memset(stop, 0, sizeof(struct mmc_command));
337 if (cmdr == MMC_READ_SINGLE_BLOCK ||
338 cmdr == MMC_READ_MULTIPLE_BLOCK ||
339 cmdr == MMC_WRITE_BLOCK ||
340 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
341 stop->opcode = MMC_STOP_TRANSMISSION;
343 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
344 } else if (cmdr == SD_IO_RW_EXTENDED) {
345 stop->opcode = SD_IO_RW_DIRECT;
346 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
347 ((cmd->arg >> 28) & 0x7);
348 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
353 cmdr = stop->opcode | SDMMC_CMD_STOP |
354 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
359 static void dw_mci_start_command(struct dw_mci *host,
360 struct mmc_command *cmd, u32 cmd_flags)
362 struct dw_mci_slot *slot = host->slot[0];
363 /*temporality fix slot[0] due to host->num_slots equal to 1*/
365 host->pre_cmd = host->cmd;
368 "start command: ARGR=0x%08x CMDR=0x%08x\n",
369 cmd->arg, cmd_flags);
371 if(SD_SWITCH_VOLTAGE == cmd->opcode){
372 /*confirm non-low-power mode*/
373 mci_writel(host, CMDARG, 0);
374 dw_mci_disable_low_power(slot);
376 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
377 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
379 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
382 mci_writel(host, CMDARG, cmd->arg);
385 /* fix the value to 1 in some Soc,for example RK3188. */
386 if(host->mmc->hold_reg_flag)
387 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
389 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
393 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
395 dw_mci_start_command(host, data->stop, host->stop_cmdr);
398 /* DMA interface functions */
399 static void dw_mci_stop_dma(struct dw_mci *host)
401 if (host->using_dma) {
402 /* Fixme: No need to terminate edma, may cause flush op */
403 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
404 host->dma_ops->stop(host);
405 host->dma_ops->cleanup(host);
408 /* Data transfer was stopped by the interrupt handler */
409 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
412 static int dw_mci_get_dma_dir(struct mmc_data *data)
414 if (data->flags & MMC_DATA_WRITE)
415 return DMA_TO_DEVICE;
417 return DMA_FROM_DEVICE;
420 #ifdef CONFIG_MMC_DW_IDMAC
421 static void dw_mci_dma_cleanup(struct dw_mci *host)
423 struct mmc_data *data = host->data;
426 if (!data->host_cookie)
427 dma_unmap_sg(host->dev,
430 dw_mci_get_dma_dir(data));
433 static void dw_mci_idmac_reset(struct dw_mci *host)
435 u32 bmod = mci_readl(host, BMOD);
436 /* Software reset of DMA */
437 bmod |= SDMMC_IDMAC_SWRESET;
438 mci_writel(host, BMOD, bmod);
441 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
445 /* Disable and reset the IDMAC interface */
446 temp = mci_readl(host, CTRL);
447 temp &= ~SDMMC_CTRL_USE_IDMAC;
448 temp |= SDMMC_CTRL_DMA_RESET;
449 mci_writel(host, CTRL, temp);
451 /* Stop the IDMAC running */
452 temp = mci_readl(host, BMOD);
453 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
454 temp |= SDMMC_IDMAC_SWRESET;
455 mci_writel(host, BMOD, temp);
458 static void dw_mci_idmac_complete_dma(void *arg)
460 struct dw_mci *host = arg;
461 struct mmc_data *data = host->data;
463 dev_vdbg(host->dev, "DMA complete\n");
466 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
467 host->mrq->cmd->opcode,host->mrq->cmd->arg,
468 data->blocks,data->blksz,mmc_hostname(host->mmc));
471 host->dma_ops->cleanup(host);
474 * If the card was removed, data will be NULL. No point in trying to
475 * send the stop command or waiting for NBUSY in this case.
478 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
479 tasklet_schedule(&host->tasklet);
483 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
487 struct idmac_desc *desc = host->sg_cpu;
489 for (i = 0; i < sg_len; i++, desc++) {
490 unsigned int length = sg_dma_len(&data->sg[i]);
491 u32 mem_addr = sg_dma_address(&data->sg[i]);
493 /* Set the OWN bit and disable interrupts for this descriptor */
494 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
497 IDMAC_SET_BUFFER1_SIZE(desc, length);
499 /* Physical address to DMA to/from */
500 desc->des2 = mem_addr;
503 /* Set first descriptor */
505 desc->des0 |= IDMAC_DES0_FD;
507 /* Set last descriptor */
508 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
509 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
510 desc->des0 |= IDMAC_DES0_LD;
515 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
519 dw_mci_translate_sglist(host, host->data, sg_len);
521 /* Select IDMAC interface */
522 temp = mci_readl(host, CTRL);
523 temp |= SDMMC_CTRL_USE_IDMAC;
524 mci_writel(host, CTRL, temp);
528 /* Enable the IDMAC */
529 temp = mci_readl(host, BMOD);
530 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
531 mci_writel(host, BMOD, temp);
533 /* Start it running */
534 mci_writel(host, PLDMND, 1);
537 static int dw_mci_idmac_init(struct dw_mci *host)
539 struct idmac_desc *p;
542 /* Number of descriptors in the ring buffer */
543 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
545 /* Forward link the descriptor list */
546 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
547 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
549 /* Set the last descriptor as the end-of-ring descriptor */
550 p->des3 = host->sg_dma;
551 p->des0 = IDMAC_DES0_ER;
553 dw_mci_idmac_reset(host);
555 /* Mask out interrupts - get Tx & Rx complete only */
556 mci_writel(host, IDSTS, IDMAC_INT_CLR);
557 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
560 /* Set the descriptor base address */
561 mci_writel(host, DBADDR, host->sg_dma);
565 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
566 .init = dw_mci_idmac_init,
567 .start = dw_mci_idmac_start_dma,
568 .stop = dw_mci_idmac_stop_dma,
569 .complete = dw_mci_idmac_complete_dma,
570 .cleanup = dw_mci_dma_cleanup,
574 static void dw_mci_edma_cleanup(struct dw_mci *host)
576 struct mmc_data *data = host->data;
579 if (!data->host_cookie)
580 dma_unmap_sg(host->dev,
581 data->sg, data->sg_len,
582 dw_mci_get_dma_dir(data));
585 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
587 dmaengine_terminate_all(host->dms->ch);
590 static void dw_mci_edmac_complete_dma(void *arg)
592 struct dw_mci *host = arg;
593 struct mmc_data *data = host->data;
595 dev_vdbg(host->dev, "DMA complete\n");
598 if(data->flags & MMC_DATA_READ)
599 /* Invalidate cache after read */
600 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
601 data->sg_len, DMA_FROM_DEVICE);
603 host->dma_ops->cleanup(host);
606 * If the card was removed, data will be NULL. No point in trying to
607 * send the stop command or waiting for NBUSY in this case.
610 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
611 tasklet_schedule(&host->tasklet);
615 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
617 struct dma_slave_config slave_config;
618 struct dma_async_tx_descriptor *desc = NULL;
619 struct scatterlist *sgl = host->data->sg;
620 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
621 u32 sg_elems = host->data->sg_len;
622 u32 fifoth_val, mburst;
626 /* Set external dma config: burst size, burst width*/
627 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
628 slave_config.src_addr = slave_config.dst_addr;
629 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
630 slave_config.src_addr_width = slave_config.dst_addr_width;
632 /* Match FIFO dma burst MSIZE with external dma config*/
633 fifoth_val = mci_readl(host, FIFOTH);
634 mburst = mszs[(fifoth_val >> 28) & 0x7];
636 /* edmac limit burst to 16, but work around for rk3036 to 8 */
637 if (unlikely(cpu_is_rk3036()))
642 slave_config.dst_maxburst = (mburst > burst_limit) ? burst_limit : mburst;
643 slave_config.src_maxburst = slave_config.dst_maxburst;
645 if(host->data->flags & MMC_DATA_WRITE){
646 slave_config.direction = DMA_MEM_TO_DEV;
647 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
649 dev_err(host->dev, "error in dw_mci edma configuration.\n");
653 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
654 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
656 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
659 /* Set dw_mci_edmac_complete_dma as callback */
660 desc->callback = dw_mci_edmac_complete_dma;
661 desc->callback_param = (void *)host;
662 dmaengine_submit(desc);
664 /* Flush cache before write */
665 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
666 sg_elems, DMA_TO_DEVICE);
667 dma_async_issue_pending(host->dms->ch);
670 slave_config.direction = DMA_DEV_TO_MEM;
671 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
673 dev_err(host->dev, "error in dw_mci edma configuration.\n");
676 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
677 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
679 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
682 /* set dw_mci_edmac_complete_dma as callback */
683 desc->callback = dw_mci_edmac_complete_dma;
684 desc->callback_param = (void *)host;
685 dmaengine_submit(desc);
686 dma_async_issue_pending(host->dms->ch);
690 static int dw_mci_edmac_init(struct dw_mci *host)
692 /* Request external dma channel, SHOULD decide chn in dts */
694 host->dms = (struct dw_mci_dma_slave *)kmalloc
695 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
696 if (NULL == host->dms) {
697 dev_err(host->dev, "No enough memory to alloc dms.\n");
701 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
702 if (!host->dms->ch) {
703 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
704 host->dms->ch->chan_id);
711 if (NULL != host->dms) {
719 static void dw_mci_edmac_exit(struct dw_mci *host)
721 if (NULL != host->dms) {
722 if (NULL != host->dms->ch) {
723 dma_release_channel(host->dms->ch);
724 host->dms->ch = NULL;
731 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
732 .init = dw_mci_edmac_init,
733 .exit = dw_mci_edmac_exit,
734 .start = dw_mci_edmac_start_dma,
735 .stop = dw_mci_edmac_stop_dma,
736 .complete = dw_mci_edmac_complete_dma,
737 .cleanup = dw_mci_edma_cleanup,
739 #endif /* CONFIG_MMC_DW_IDMAC */
741 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
742 struct mmc_data *data,
745 struct scatterlist *sg;
746 unsigned int i, sg_len;
748 if (!next && data->host_cookie)
749 return data->host_cookie;
752 * We don't do DMA on "complex" transfers, i.e. with
753 * non-word-aligned buffers or lengths. Also, we don't bother
754 * with all the DMA setup overhead for short transfers.
756 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
762 for_each_sg(data->sg, sg, data->sg_len, i) {
763 if (sg->offset & 3 || sg->length & 3)
767 sg_len = dma_map_sg(host->dev,
770 dw_mci_get_dma_dir(data));
775 data->host_cookie = sg_len;
780 static void dw_mci_pre_req(struct mmc_host *mmc,
781 struct mmc_request *mrq,
784 struct dw_mci_slot *slot = mmc_priv(mmc);
785 struct mmc_data *data = mrq->data;
787 if (!slot->host->use_dma || !data)
790 if (data->host_cookie) {
791 data->host_cookie = 0;
795 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
796 data->host_cookie = 0;
799 static void dw_mci_post_req(struct mmc_host *mmc,
800 struct mmc_request *mrq,
803 struct dw_mci_slot *slot = mmc_priv(mmc);
804 struct mmc_data *data = mrq->data;
806 if (!slot->host->use_dma || !data)
809 if (data->host_cookie)
810 dma_unmap_sg(slot->host->dev,
813 dw_mci_get_dma_dir(data));
814 data->host_cookie = 0;
817 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
819 #ifdef CONFIG_MMC_DW_IDMAC
820 unsigned int blksz = data->blksz;
821 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
822 u32 fifo_width = 1 << host->data_shift;
823 u32 blksz_depth = blksz / fifo_width, fifoth_val;
824 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
825 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
827 tx_wmark = (host->fifo_depth) / 2;
828 tx_wmark_invers = host->fifo_depth - tx_wmark;
832 * if blksz is not a multiple of the FIFO width
834 if (blksz % fifo_width) {
841 if (!((blksz_depth % mszs[idx]) ||
842 (tx_wmark_invers % mszs[idx]))) {
844 rx_wmark = mszs[idx] - 1;
849 * If idx is '0', it won't be tried
850 * Thus, initial values are uesed
853 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
854 mci_writel(host, FIFOTH, fifoth_val);
859 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
861 unsigned int blksz = data->blksz;
862 u32 blksz_depth, fifo_depth;
865 WARN_ON(!(data->flags & MMC_DATA_READ));
867 if (host->timing != MMC_TIMING_MMC_HS200 &&
868 host->timing != MMC_TIMING_UHS_SDR104)
871 blksz_depth = blksz / (1 << host->data_shift);
872 fifo_depth = host->fifo_depth;
874 if (blksz_depth > fifo_depth)
878 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
879 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
880 * Currently just choose blksz.
883 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
887 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
890 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
898 /* If we don't have a channel, we can't do DMA */
902 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
904 /* Fixme: No need terminate edma, may cause flush op */
905 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
906 host->dma_ops->stop(host);
913 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
914 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
918 * Decide the MSIZE and RX/TX Watermark.
919 * If current block size is same with previous size,
920 * no need to update fifoth.
922 if (host->prev_blksz != data->blksz)
923 dw_mci_adjust_fifoth(host, data);
926 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
928 /* Enable the DMA interface */
929 temp = mci_readl(host, CTRL);
930 temp |= SDMMC_CTRL_DMA_ENABLE;
931 mci_writel(host, CTRL, temp);
933 /* Disable RX/TX IRQs, let DMA handle it */
934 spin_lock_irqsave(&host->slock, flags);
935 temp = mci_readl(host, INTMASK);
936 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
937 mci_writel(host, INTMASK, temp);
938 spin_unlock_irqrestore(&host->slock, flags);
940 host->dma_ops->start(host, sg_len);
945 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
950 data->error = -EINPROGRESS;
952 //WARN_ON(host->data);
957 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
959 if (data->flags & MMC_DATA_READ) {
960 host->dir_status = DW_MCI_RECV_STATUS;
961 dw_mci_ctrl_rd_thld(host, data);
963 host->dir_status = DW_MCI_SEND_STATUS;
966 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
967 data->blocks, data->blksz, mmc_hostname(host->mmc));
969 if (dw_mci_submit_data_dma(host, data)) {
970 int flags = SG_MITER_ATOMIC;
971 if (host->data->flags & MMC_DATA_READ)
972 flags |= SG_MITER_TO_SG;
974 flags |= SG_MITER_FROM_SG;
976 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
978 host->part_buf_start = 0;
979 host->part_buf_count = 0;
981 spin_lock_irqsave(&host->slock, flag);
982 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
983 temp = mci_readl(host, INTMASK);
984 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
985 mci_writel(host, INTMASK, temp);
986 spin_unlock_irqrestore(&host->slock, flag);
988 temp = mci_readl(host, CTRL);
989 temp &= ~SDMMC_CTRL_DMA_ENABLE;
990 mci_writel(host, CTRL, temp);
993 * Use the initial fifoth_val for PIO mode.
994 * If next issued data may be transfered by DMA mode,
995 * prev_blksz should be invalidated.
997 mci_writel(host, FIFOTH, host->fifoth_val);
998 host->prev_blksz = 0;
1001 * Keep the current block size.
1002 * It will be used to decide whether to update
1003 * fifoth register next time.
1005 host->prev_blksz = data->blksz;
1009 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1011 struct dw_mci *host = slot->host;
1012 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1013 unsigned int cmd_status = 0;
1014 #ifdef SDMMC_WAIT_FOR_UNBUSY
1016 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1018 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1020 ret = time_before(jiffies, timeout);
1021 cmd_status = mci_readl(host, STATUS);
1022 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1026 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1027 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1030 mci_writel(host, CMDARG, arg);
1032 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1033 if(cmd & SDMMC_CMD_UPD_CLK)
1034 timeout = jiffies + msecs_to_jiffies(50);
1036 timeout = jiffies + msecs_to_jiffies(500);
1037 while (time_before(jiffies, timeout)) {
1038 cmd_status = mci_readl(host, CMD);
1039 if (!(cmd_status & SDMMC_CMD_START))
1042 dev_err(&slot->mmc->class_dev,
1043 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1044 cmd, arg, cmd_status);
1047 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1049 struct dw_mci *host = slot->host;
1050 unsigned int tempck,clock = slot->clock;
1055 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1056 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1059 mci_writel(host, CLKENA, 0);
1060 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1061 if(host->svi_flags == 0)
1062 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1064 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1066 } else if (clock != host->current_speed || force_clkinit) {
1067 div = host->bus_hz / clock;
1068 if (host->bus_hz % clock && host->bus_hz > clock)
1070 * move the + 1 after the divide to prevent
1071 * over-clocking the card.
1075 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1077 if ((clock << div) != slot->__clk_old || force_clkinit) {
1078 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1079 dev_info(&slot->mmc->class_dev,
1080 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1081 slot->id, host->bus_hz, clock,
1084 host->set_speed = tempck;
1085 host->set_div = div;
1089 mci_writel(host, CLKENA, 0);
1090 mci_writel(host, CLKSRC, 0);
1094 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1096 if(clock <= 400*1000){
1097 MMC_DBG_BOOT_FUNC(host->mmc,
1098 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1099 clock * 2, mmc_hostname(host->mmc));
1100 /* clk_mmc will change parents to 24MHz xtal*/
1101 clk_set_rate(host->clk_mmc, clock * 2);
1104 host->set_div = div;
1108 MMC_DBG_BOOT_FUNC(host->mmc,
1109 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1110 mmc_hostname(host->mmc));
1113 MMC_DBG_ERR_FUNC(host->mmc,
1114 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1115 mmc_hostname(host->mmc));
1117 host->set_div = div;
1118 host->bus_hz = host->set_speed * 2;
1119 MMC_DBG_BOOT_FUNC(host->mmc,
1120 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1121 div, host->bus_hz, mmc_hostname(host->mmc));
1123 /* BUG may be here, come on, Linux BSP engineer looks!
1124 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1125 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1126 some oops happened like that:
1127 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1128 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1129 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1130 mmc0: new high speed DDR MMC card at address 0001
1131 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1133 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1134 mmcblk0: retrying using single block read
1135 mmcblk0: error -110 sending status command, retrying
1137 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1140 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1141 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1144 host->set_div = div;
1145 host->bus_hz = host->set_speed * 2;
1146 MMC_DBG_BOOT_FUNC(host->mmc,
1147 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1148 div, host->bus_hz, mmc_hostname(host->mmc));
1151 if (host->verid < DW_MMC_240A)
1152 clk_set_rate(host->clk_mmc,(host->bus_hz));
1154 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1160 /* set clock to desired speed */
1161 mci_writel(host, CLKDIV, div);
1165 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1167 /* enable clock; only low power if no SDIO */
1168 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1170 if (host->verid < DW_MMC_240A)
1171 sdio_int = SDMMC_INT_SDIO(slot->id);
1173 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1175 if (!(mci_readl(host, INTMASK) & sdio_int))
1176 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1177 mci_writel(host, CLKENA, clk_en_a);
1181 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1182 /* keep the clock with reflecting clock dividor */
1183 slot->__clk_old = clock << div;
1186 host->current_speed = clock;
1188 if(slot->ctype != slot->pre_ctype)
1189 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1191 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1192 mmc_hostname(host->mmc));
1193 slot->pre_ctype = slot->ctype;
1195 /* Set the current slot bus width */
1196 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1199 extern struct mmc_card *this_card;
1200 static void dw_mci_wait_unbusy(struct dw_mci *host)
1203 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1204 unsigned long time_loop;
1205 unsigned int status;
1208 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1210 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1211 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1212 /* Special care for (secure)erase timeout calculation */
1214 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1217 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1218 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1219 300000 * (this_card->ext_csd.sec_erase_mult)) :
1220 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1224 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1225 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1226 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1227 timeout = SDMMC_DATA_TIMEOUT_SD;
1230 time_loop = jiffies + msecs_to_jiffies(timeout);
1232 status = mci_readl(host, STATUS);
1233 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1235 } while (time_before(jiffies, time_loop));
1240 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1243 * 0--status is busy.
1244 * 1--status is unbusy.
1246 int dw_mci_card_busy(struct mmc_host *mmc)
1248 struct dw_mci_slot *slot = mmc_priv(mmc);
1249 struct dw_mci *host = slot->host;
1251 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1252 host->svi_flags, mmc_hostname(host->mmc));
1255 if(host->svi_flags == 0){
1257 host->svi_flags = 1;
1258 return host->svi_flags;
1261 host->svi_flags = 0;
1262 return host->svi_flags;
1268 static void __dw_mci_start_request(struct dw_mci *host,
1269 struct dw_mci_slot *slot,
1270 struct mmc_command *cmd)
1272 struct mmc_request *mrq;
1273 struct mmc_data *data;
1277 if (host->pdata->select_slot)
1278 host->pdata->select_slot(slot->id);
1280 host->cur_slot = slot;
1283 dw_mci_wait_unbusy(host);
1285 host->pending_events = 0;
1286 host->completed_events = 0;
1287 host->data_status = 0;
1291 dw_mci_set_timeout(host);
1292 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1293 mci_writel(host, BLKSIZ, data->blksz);
1296 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1298 /* this is the first command, send the initialization clock */
1299 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1300 cmdflags |= SDMMC_CMD_INIT;
1303 dw_mci_submit_data(host, data);
1307 dw_mci_start_command(host, cmd, cmdflags);
1310 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1313 static void dw_mci_start_request(struct dw_mci *host,
1314 struct dw_mci_slot *slot)
1316 struct mmc_request *mrq = slot->mrq;
1317 struct mmc_command *cmd;
1319 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1320 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1322 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1323 __dw_mci_start_request(host, slot, cmd);
1326 /* must be called with host->lock held */
1327 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1328 struct mmc_request *mrq)
1330 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1335 if (host->state == STATE_IDLE) {
1336 host->state = STATE_SENDING_CMD;
1337 dw_mci_start_request(host, slot);
1339 list_add_tail(&slot->queue_node, &host->queue);
1343 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1345 struct dw_mci_slot *slot = mmc_priv(mmc);
1346 struct dw_mci *host = slot->host;
1351 * The check for card presence and queueing of the request must be
1352 * atomic, otherwise the card could be removed in between and the
1353 * request wouldn't fail until another card was inserted.
1355 spin_lock_bh(&host->lock);
1357 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1358 spin_unlock_bh(&host->lock);
1359 mrq->cmd->error = -ENOMEDIUM;
1360 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1361 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1363 mmc_request_done(mmc, mrq);
1367 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1368 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1370 dw_mci_queue_request(host, slot, mrq);
1372 spin_unlock_bh(&host->lock);
1375 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1377 struct dw_mci_slot *slot = mmc_priv(mmc);
1378 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1379 struct dw_mci *host = slot->host;
1381 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1384 #ifdef SDMMC_WAIT_FOR_UNBUSY
1385 unsigned long time_loop;
1388 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1389 if(host->svi_flags == 1)
1390 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1392 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1394 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1397 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1398 printk("%d..%s: no card. [%s]\n", \
1399 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1404 ret = time_before(jiffies, time_loop);
1405 regs = mci_readl(slot->host, STATUS);
1406 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1412 printk("slot->flags = %lu ", slot->flags);
1413 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1414 if(host->svi_flags != 1)
1417 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1418 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1422 switch (ios->bus_width) {
1423 case MMC_BUS_WIDTH_4:
1424 slot->ctype = SDMMC_CTYPE_4BIT;
1426 case MMC_BUS_WIDTH_8:
1427 slot->ctype = SDMMC_CTYPE_8BIT;
1430 /* set default 1 bit mode */
1431 slot->ctype = SDMMC_CTYPE_1BIT;
1432 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1435 regs = mci_readl(slot->host, UHS_REG);
1438 if (ios->timing == MMC_TIMING_UHS_DDR50)
1439 regs |= ((0x1 << slot->id) << 16);
1441 regs &= ~((0x1 << slot->id) << 16);
1443 mci_writel(slot->host, UHS_REG, regs);
1444 slot->host->timing = ios->timing;
1447 * Use mirror of ios->clock to prevent race with mmc
1448 * core ios update when finding the minimum.
1450 slot->clock = ios->clock;
1452 if (drv_data && drv_data->set_ios)
1453 drv_data->set_ios(slot->host, ios);
1455 /* Slot specific timing and width adjustment */
1456 dw_mci_setup_bus(slot, false);
1460 switch (ios->power_mode) {
1462 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1464 if (slot->host->pdata->setpower)
1465 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1466 regs = mci_readl(slot->host, PWREN);
1467 regs |= (1 << slot->id);
1468 mci_writel(slot->host, PWREN, regs);
1471 /* Power down slot */
1472 if(slot->host->pdata->setpower)
1473 slot->host->pdata->setpower(slot->id, 0);
1474 regs = mci_readl(slot->host, PWREN);
1475 regs &= ~(1 << slot->id);
1476 mci_writel(slot->host, PWREN, regs);
1483 static int dw_mci_get_ro(struct mmc_host *mmc)
1486 struct dw_mci_slot *slot = mmc_priv(mmc);
1487 struct dw_mci_board *brd = slot->host->pdata;
1489 /* Use platform get_ro function, else try on board write protect */
1490 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1492 else if(brd->get_ro)
1493 read_only = brd->get_ro(slot->id);
1494 else if(gpio_is_valid(slot->wp_gpio))
1495 read_only = gpio_get_value(slot->wp_gpio);
1498 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1500 dev_dbg(&mmc->class_dev, "card is %s\n",
1501 read_only ? "read-only" : "read-write");
1506 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1508 struct dw_mci_slot *slot = mmc_priv(mmc);
1509 struct dw_mci *host = slot->host;
1510 /*struct dw_mci_board *brd = slot->host->pdata;*/
1512 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1515 spin_lock_bh(&host->lock);
1518 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1520 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1522 spin_unlock_bh(&host->lock);
1524 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1525 if(__clk_is_enabled(host->hclk_mmc) == false)
1526 clk_prepare_enable(host->hclk_mmc);
1527 if(__clk_is_enabled(host->clk_mmc) == false)
1528 clk_prepare_enable(host->clk_mmc);
1530 if(__clk_is_enabled(host->clk_mmc) == true)
1531 clk_disable_unprepare(slot->host->clk_mmc);
1532 if(__clk_is_enabled(host->hclk_mmc) == true)
1533 clk_disable_unprepare(slot->host->hclk_mmc);
1536 mmc_detect_change(slot->mmc, 20);
1542 static int dw_mci_get_cd(struct mmc_host *mmc)
1545 struct dw_mci_slot *slot = mmc_priv(mmc);
1546 struct dw_mci_board *brd = slot->host->pdata;
1547 struct dw_mci *host = slot->host;
1548 int gpio_cd = mmc_gpio_get_cd(mmc);
1549 int force_jtag_bit, force_jtag_reg;
1553 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
1554 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1555 gpio_cd = slot->cd_gpio;
1556 irq = gpio_to_irq(gpio_cd);
1557 if (gpio_is_valid(gpio_cd)) {
1558 gpio_val = gpio_get_value(gpio_cd);
1559 if (soc_is_rk3036()) {
1560 force_jtag_bit = 11;
1561 force_jtag_reg = RK312X_GRF_SOC_CON0;
1562 } else if (soc_is_rk3126() || soc_is_rk3126b()) {
1563 force_jtag_reg = RK312X_GRF_SOC_CON0;
1567 if (gpio_val == gpio_get_value(gpio_cd)) {
1568 gpio_cd = gpio_get_value(gpio_cd) == 0 ? 1 : 0;
1570 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1571 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1572 grf_writel((0x1 << (force_jtag_bit + 16)) | (1 << force_jtag_bit),
1575 dw_mci_ctrl_all_reset(host);
1577 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
1578 /* Really card detected: SHOULD disable force_jtag */
1579 grf_writel((0x1 << (force_jtag_bit + 16)) | (0 << force_jtag_bit),
1584 gpio_val = gpio_get_value(gpio_cd);
1586 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
1587 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1588 return slot->last_detect_state;
1591 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1595 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1596 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1598 /* Use platform get_cd function, else try onboard card detect */
1599 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1601 else if (brd->get_cd)
1602 present = !brd->get_cd(slot->id);
1603 else if (!IS_ERR_VALUE(gpio_cd))
1606 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1609 spin_lock_bh(&host->lock);
1611 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1612 dev_dbg(&mmc->class_dev, "card is present\n");
1614 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1615 dev_dbg(&mmc->class_dev, "card is not present\n");
1617 spin_unlock_bh(&host->lock);
1624 * Dts Should caps emmc controller with poll-hw-reset
1626 static void dw_mci_hw_reset(struct mmc_host *mmc)
1628 struct dw_mci_slot *slot = mmc_priv(mmc);
1629 struct dw_mci *host = slot->host;
1634 unsigned long timeout;
1637 /* (1) CMD12 to end any transfer in process */
1638 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1639 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1641 if(host->mmc->hold_reg_flag)
1642 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1643 mci_writel(host, CMDARG, 0);
1645 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1647 timeout = jiffies + msecs_to_jiffies(500);
1649 ret = time_before(jiffies, timeout);
1650 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1655 MMC_DBG_ERR_FUNC(host->mmc,
1656 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1657 __func__, mmc_hostname(host->mmc));
1659 /* (2) wait DTO, even if no response is sent back by card */
1661 timeout = jiffies + msecs_to_jiffies(5);
1663 ret = time_before(jiffies, timeout);
1664 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1665 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1671 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1673 /* Software reset - BMOD[0] for IDMA only */
1674 regs = mci_readl(host, BMOD);
1675 regs |= SDMMC_IDMAC_SWRESET;
1676 mci_writel(host, BMOD, regs);
1677 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1678 regs = mci_readl(host, BMOD);
1679 if(regs & SDMMC_IDMAC_SWRESET)
1680 MMC_DBG_WARN_FUNC(host->mmc,
1681 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1682 __func__, mmc_hostname(host->mmc));
1684 /* DMA reset - CTRL[2] */
1685 regs = mci_readl(host, CTRL);
1686 regs |= SDMMC_CTRL_DMA_RESET;
1687 mci_writel(host, CTRL, regs);
1688 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1689 regs = mci_readl(host, CTRL);
1690 if(regs & SDMMC_CTRL_DMA_RESET)
1691 MMC_DBG_WARN_FUNC(host->mmc,
1692 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1693 __func__, mmc_hostname(host->mmc));
1695 /* FIFO reset - CTRL[1] */
1696 regs = mci_readl(host, CTRL);
1697 regs |= SDMMC_CTRL_FIFO_RESET;
1698 mci_writel(host, CTRL, regs);
1699 mdelay(1); /* no timing limited, 1ms is random value */
1700 regs = mci_readl(host, CTRL);
1701 if(regs & SDMMC_CTRL_FIFO_RESET)
1702 MMC_DBG_WARN_FUNC(host->mmc,
1703 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1704 __func__, mmc_hostname(host->mmc));
1707 According to eMMC spec
1708 tRstW >= 1us ; RST_n pulse width
1709 tRSCA >= 200us ; RST_n to Command time
1710 tRSTH >= 1us ; RST_n high period
1712 mci_writel(slot->host, PWREN, 0x0);
1713 mci_writel(slot->host, RST_N, 0x0);
1715 udelay(10); /* 10us for bad quality eMMc. */
1717 mci_writel(slot->host, PWREN, 0x1);
1718 mci_writel(slot->host, RST_N, 0x1);
1720 usleep_range(500, 1000); /* at least 500(> 200us) */
1724 * Disable lower power mode.
1726 * Low power mode will stop the card clock when idle. According to the
1727 * description of the CLKENA register we should disable low power mode
1728 * for SDIO cards if we need SDIO interrupts to work.
1730 * This function is fast if low power mode is already disabled.
1732 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1734 struct dw_mci *host = slot->host;
1736 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1738 clk_en_a = mci_readl(host, CLKENA);
1740 if (clk_en_a & clken_low_pwr) {
1741 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1742 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1743 SDMMC_CMD_PRV_DAT_WAIT, 0);
1747 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1749 struct dw_mci_slot *slot = mmc_priv(mmc);
1750 struct dw_mci *host = slot->host;
1751 unsigned long flags;
1755 spin_lock_irqsave(&host->slock, flags);
1757 /* Enable/disable Slot Specific SDIO interrupt */
1758 int_mask = mci_readl(host, INTMASK);
1760 if (host->verid < DW_MMC_240A)
1761 sdio_int = SDMMC_INT_SDIO(slot->id);
1763 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1767 * Turn off low power mode if it was enabled. This is a bit of
1768 * a heavy operation and we disable / enable IRQs a lot, so
1769 * we'll leave low power mode disabled and it will get
1770 * re-enabled again in dw_mci_setup_bus().
1772 dw_mci_disable_low_power(slot);
1774 mci_writel(host, INTMASK,
1775 (int_mask | sdio_int));
1777 mci_writel(host, INTMASK,
1778 (int_mask & ~sdio_int));
1781 spin_unlock_irqrestore(&host->slock, flags);
1784 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1786 IO_DOMAIN_12 = 1200,
1787 IO_DOMAIN_18 = 1800,
1788 IO_DOMAIN_33 = 3300,
1790 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1800 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1801 __FUNCTION__, mmc_hostname(host->mmc));
1804 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1805 __FUNCTION__, mmc_hostname(host->mmc));
1809 if (cpu_is_rk3288()) {
1810 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1811 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1814 } else if (host->cid == DW_MCI_TYPE_RK3368) {
1815 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1816 regmap_write(host->grf, 0x900, (voltage << 6) | (1 << 22));
1820 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1821 __FUNCTION__, mmc_hostname(host->mmc));
1825 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1826 struct mmc_ios *ios)
1829 unsigned int value,uhs_reg;
1832 * Signal Voltage Switching is only applicable for Host Controllers
1835 if (host->verid < DW_MMC_240A)
1838 uhs_reg = mci_readl(host, UHS_REG);
1839 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1840 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1842 switch (ios->signal_voltage) {
1843 case MMC_SIGNAL_VOLTAGE_330:
1844 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1846 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1847 /* regulator_put(host->vmmc); //to be done in remove function. */
1849 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1850 __func__, regulator_get_voltage(host->vmmc), ret);
1852 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1853 " failed\n", mmc_hostname(host->mmc));
1856 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1858 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1859 __FUNCTION__, mmc_hostname(host->mmc));
1861 /* set High-power mode */
1862 value = mci_readl(host, CLKENA);
1863 value &= ~SDMMC_CLKEN_LOW_PWR;
1864 mci_writel(host,CLKENA , value);
1866 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1867 mci_writel(host,UHS_REG , uhs_reg);
1870 usleep_range(5000, 5500);
1872 /* 3.3V regulator output should be stable within 5 ms */
1873 uhs_reg = mci_readl(host, UHS_REG);
1874 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1877 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1878 mmc_hostname(host->mmc));
1881 case MMC_SIGNAL_VOLTAGE_180:
1883 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1884 /* regulator_put(host->vmmc);//to be done in remove function. */
1886 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1887 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1889 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1890 " failed\n", mmc_hostname(host->mmc));
1893 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1897 * Enable 1.8V Signal Enable in the Host Control2
1900 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1903 usleep_range(5000, 5500);
1904 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1905 __FUNCTION__,mmc_hostname(host->mmc));
1907 /* 1.8V regulator output should be stable within 5 ms */
1908 uhs_reg = mci_readl(host, UHS_REG);
1909 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1912 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1913 mmc_hostname(host->mmc));
1916 case MMC_SIGNAL_VOLTAGE_120:
1918 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1920 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1921 " failed\n", mmc_hostname(host->mmc));
1927 /* No signal voltage switch required */
1933 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1934 struct mmc_ios *ios)
1936 struct dw_mci_slot *slot = mmc_priv(mmc);
1937 struct dw_mci *host = slot->host;
1940 if (host->verid < DW_MMC_240A)
1943 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1949 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1951 struct dw_mci_slot *slot = mmc_priv(mmc);
1952 struct dw_mci *host = slot->host;
1953 const struct dw_mci_drv_data *drv_data = host->drv_data;
1954 struct dw_mci_tuning_data tuning_data;
1957 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1958 if(cpu_is_rk3036() || cpu_is_rk312x())
1961 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1962 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1963 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1964 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1965 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1966 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1967 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1971 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1972 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1973 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1976 "Undefined command(%d) for tuning\n", opcode);
1981 /* Recommend sample phase and delayline
1982 Fixme: Mix-use these three controllers will cause
1985 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1986 tuning_data.con_id = 3;
1987 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1988 tuning_data.con_id = 1;
1990 tuning_data.con_id = 0;
1992 /* 0: driver, from host->devices
1993 1: sample, from devices->host
1995 tuning_data.tuning_type = 1;
1997 if (drv_data && drv_data->execute_tuning)
1998 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
2003 static void dw_mci_post_tmo(struct mmc_host *mmc)
2005 struct dw_mci_slot *slot = mmc_priv(mmc);
2006 struct dw_mci *host = slot->host;
2007 host->cur_slot->mrq = NULL;
2009 host->state = STATE_IDLE;
2012 static const struct mmc_host_ops dw_mci_ops = {
2013 .request = dw_mci_request,
2014 .pre_req = dw_mci_pre_req,
2015 .post_req = dw_mci_post_req,
2016 .set_ios = dw_mci_set_ios,
2017 .get_ro = dw_mci_get_ro,
2018 .get_cd = dw_mci_get_cd,
2019 .set_sdio_status = dw_mci_set_sdio_status,
2020 .hw_reset = dw_mci_hw_reset,
2021 .enable_sdio_irq = dw_mci_enable_sdio_irq,
2022 .execute_tuning = dw_mci_execute_tuning,
2023 .post_tmo = dw_mci_post_tmo,
2024 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
2025 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
2026 .card_busy = dw_mci_card_busy,
2031 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
2033 unsigned long flags;
2038 local_irq_save(flags);
2039 if(host->irq_state != irqflag)
2041 host->irq_state = irqflag;
2044 enable_irq(host->irq);
2048 disable_irq(host->irq);
2051 local_irq_restore(flags);
2055 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2056 __releases(&host->lock)
2057 __acquires(&host->lock)
2059 if(DW_MCI_SEND_STATUS == host->dir_status){
2061 if( MMC_BUS_TEST_W != host->cmd->opcode){
2062 if(host->data_status & SDMMC_INT_DCRC)
2063 host->data->error = -EILSEQ;
2064 else if(host->data_status & SDMMC_INT_EBE)
2065 host->data->error = -ETIMEDOUT;
2067 dw_mci_wait_unbusy(host);
2070 dw_mci_wait_unbusy(host);
2075 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2076 __releases(&host->lock)
2077 __acquires(&host->lock)
2079 struct dw_mci_slot *slot;
2080 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2082 //WARN_ON(host->cmd || host->data);
2084 dw_mci_deal_data_end(host, mrq);
2087 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2088 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2090 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2091 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2093 host->cur_slot->mrq = NULL;
2095 if (!list_empty(&host->queue)) {
2096 slot = list_entry(host->queue.next,
2097 struct dw_mci_slot, queue_node);
2098 list_del(&slot->queue_node);
2099 dev_vdbg(host->dev, "list not empty: %s is next\n",
2100 mmc_hostname(slot->mmc));
2101 host->state = STATE_SENDING_CMD;
2102 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2103 dw_mci_start_request(host, slot);
2105 dev_vdbg(host->dev, "list empty\n");
2106 host->state = STATE_IDLE;
2109 spin_unlock(&host->lock);
2110 mmc_request_done(prev_mmc, mrq);
2111 spin_lock(&host->lock);
2114 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2116 u32 status = host->cmd_status;
2118 host->cmd_status = 0;
2120 /* Read the response from the card (up to 16 bytes) */
2121 if (cmd->flags & MMC_RSP_PRESENT) {
2122 if (cmd->flags & MMC_RSP_136) {
2123 cmd->resp[3] = mci_readl(host, RESP0);
2124 cmd->resp[2] = mci_readl(host, RESP1);
2125 cmd->resp[1] = mci_readl(host, RESP2);
2126 cmd->resp[0] = mci_readl(host, RESP3);
2128 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2129 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2131 cmd->resp[0] = mci_readl(host, RESP0);
2135 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2136 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2140 if (status & SDMMC_INT_RTO)
2142 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2145 cmd->error = -ETIMEDOUT;
2146 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2147 cmd->error = -EILSEQ;
2148 }else if (status & SDMMC_INT_RESP_ERR){
2153 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2154 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2157 if(MMC_SEND_STATUS != cmd->opcode)
2158 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2159 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2160 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2164 /* newer ip versions need a delay between retries */
2165 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2171 static void dw_mci_tasklet_func(unsigned long priv)
2173 struct dw_mci *host = (struct dw_mci *)priv;
2174 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2175 struct mmc_data *data;
2176 struct mmc_command *cmd;
2177 enum dw_mci_state state;
2178 enum dw_mci_state prev_state;
2179 u32 status, cmd_flags;
2180 unsigned long timeout = 0;
2183 spin_lock(&host->lock);
2185 state = host->state;
2195 case STATE_SENDING_CMD:
2196 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2197 &host->pending_events))
2202 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2203 dw_mci_command_complete(host, cmd);
2204 if (cmd == host->mrq->sbc && !cmd->error) {
2205 prev_state = state = STATE_SENDING_CMD;
2206 __dw_mci_start_request(host, host->cur_slot,
2211 if (cmd->data && cmd->error) {
2212 dw_mci_stop_dma(host);
2215 send_stop_cmd(host, data);
2216 state = STATE_SENDING_STOP;
2219 /* host->data = NULL; */
2222 send_stop_abort(host, data);
2223 state = STATE_SENDING_STOP;
2226 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2229 if (!host->mrq->data || cmd->error) {
2230 dw_mci_request_end(host, host->mrq);
2234 prev_state = state = STATE_SENDING_DATA;
2237 case STATE_SENDING_DATA:
2238 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2239 dw_mci_stop_dma(host);
2242 send_stop_cmd(host, data);
2244 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2245 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2246 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2248 mci_writel(host, CMDARG, 0);
2250 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2251 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2253 if(host->mmc->hold_reg_flag)
2254 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2256 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2258 timeout = jiffies + msecs_to_jiffies(500);
2261 ret = time_before(jiffies, timeout);
2262 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2266 MMC_DBG_ERR_FUNC(host->mmc,
2267 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2268 __func__, mmc_hostname(host->mmc));
2271 send_stop_abort(host, data);
2273 state = STATE_DATA_ERROR;
2277 MMC_DBG_CMD_FUNC(host->mmc,
2278 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2279 prev_state,state, mmc_hostname(host->mmc));
2281 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2282 &host->pending_events))
2284 MMC_DBG_INFO_FUNC(host->mmc,
2285 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2286 prev_state,state,mmc_hostname(host->mmc));
2288 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2289 prev_state = state = STATE_DATA_BUSY;
2292 case STATE_DATA_BUSY:
2293 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2294 &host->pending_events))
2297 dw_mci_deal_data_end(host, host->mrq);
2298 MMC_DBG_INFO_FUNC(host->mmc,
2299 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2300 prev_state,state,mmc_hostname(host->mmc));
2302 /* host->data = NULL; */
2303 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2304 status = host->data_status;
2306 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2307 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2308 MMC_DBG_ERR_FUNC(host->mmc,
2309 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2310 prev_state,state, status, mmc_hostname(host->mmc));
2312 if (status & SDMMC_INT_DRTO) {
2313 data->error = -ETIMEDOUT;
2314 } else if (status & SDMMC_INT_DCRC) {
2315 data->error = -EILSEQ;
2316 } else if (status & SDMMC_INT_EBE &&
2317 host->dir_status == DW_MCI_SEND_STATUS){
2319 * No data CRC status was returned.
2320 * The number of bytes transferred will
2321 * be exaggerated in PIO mode.
2323 data->bytes_xfered = 0;
2324 data->error = -ETIMEDOUT;
2333 * After an error, there may be data lingering
2334 * in the FIFO, so reset it - doing so
2335 * generates a block interrupt, hence setting
2336 * the scatter-gather pointer to NULL.
2338 dw_mci_fifo_reset(host);
2340 data->bytes_xfered = data->blocks * data->blksz;
2345 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2346 prev_state,state,mmc_hostname(host->mmc));
2347 dw_mci_request_end(host, host->mrq);
2350 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2351 prev_state,state,mmc_hostname(host->mmc));
2353 if (host->mrq->sbc && !data->error) {
2354 data->stop->error = 0;
2356 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2357 prev_state,state,mmc_hostname(host->mmc));
2359 dw_mci_request_end(host, host->mrq);
2363 prev_state = state = STATE_SENDING_STOP;
2365 send_stop_cmd(host, data);
2367 if (data->stop && !data->error) {
2368 /* stop command for open-ended transfer*/
2370 send_stop_abort(host, data);
2374 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2375 prev_state,state,mmc_hostname(host->mmc));
2377 case STATE_SENDING_STOP:
2378 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2381 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2382 prev_state, state, mmc_hostname(host->mmc));
2384 /* CMD error in data command */
2385 if (host->mrq->cmd->error && host->mrq->data) {
2386 dw_mci_fifo_reset(host);
2390 host->data = NULL; */
2392 dw_mci_command_complete(host, host->mrq->stop);
2394 if (host->mrq->stop)
2395 dw_mci_command_complete(host, host->mrq->stop);
2397 host->cmd_status = 0;
2400 dw_mci_request_end(host, host->mrq);
2403 case STATE_DATA_ERROR:
2404 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2405 &host->pending_events))
2408 state = STATE_DATA_BUSY;
2411 } while (state != prev_state);
2413 host->state = state;
2415 spin_unlock(&host->lock);
2419 /* push final bytes to part_buf, only use during push */
2420 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2422 memcpy((void *)&host->part_buf, buf, cnt);
2423 host->part_buf_count = cnt;
2426 /* append bytes to part_buf, only use during push */
2427 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2429 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2430 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2431 host->part_buf_count += cnt;
2435 /* pull first bytes from part_buf, only use during pull */
2436 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2438 cnt = min(cnt, (int)host->part_buf_count);
2440 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2442 host->part_buf_count -= cnt;
2443 host->part_buf_start += cnt;
2448 /* pull final bytes from the part_buf, assuming it's just been filled */
2449 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2451 memcpy(buf, &host->part_buf, cnt);
2452 host->part_buf_start = cnt;
2453 host->part_buf_count = (1 << host->data_shift) - cnt;
2456 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2458 struct mmc_data *data = host->data;
2461 /* try and push anything in the part_buf */
2462 if (unlikely(host->part_buf_count)) {
2463 int len = dw_mci_push_part_bytes(host, buf, cnt);
2466 if (host->part_buf_count == 2) {
2467 mci_writew(host, DATA(host->data_offset),
2469 host->part_buf_count = 0;
2472 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2473 if (unlikely((unsigned long)buf & 0x1)) {
2475 u16 aligned_buf[64];
2476 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2477 int items = len >> 1;
2479 /* memcpy from input buffer into aligned buffer */
2480 memcpy(aligned_buf, buf, len);
2483 /* push data from aligned buffer into fifo */
2484 for (i = 0; i < items; ++i)
2485 mci_writew(host, DATA(host->data_offset),
2492 for (; cnt >= 2; cnt -= 2)
2493 mci_writew(host, DATA(host->data_offset), *pdata++);
2496 /* put anything remaining in the part_buf */
2498 dw_mci_set_part_bytes(host, buf, cnt);
2499 /* Push data if we have reached the expected data length */
2500 if ((data->bytes_xfered + init_cnt) ==
2501 (data->blksz * data->blocks))
2502 mci_writew(host, DATA(host->data_offset),
2507 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2509 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2510 if (unlikely((unsigned long)buf & 0x1)) {
2512 /* pull data from fifo into aligned buffer */
2513 u16 aligned_buf[64];
2514 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2515 int items = len >> 1;
2517 for (i = 0; i < items; ++i)
2518 aligned_buf[i] = mci_readw(host,
2519 DATA(host->data_offset));
2520 /* memcpy from aligned buffer into output buffer */
2521 memcpy(buf, aligned_buf, len);
2529 for (; cnt >= 2; cnt -= 2)
2530 *pdata++ = mci_readw(host, DATA(host->data_offset));
2534 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2535 dw_mci_pull_final_bytes(host, buf, cnt);
2539 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2541 struct mmc_data *data = host->data;
2544 /* try and push anything in the part_buf */
2545 if (unlikely(host->part_buf_count)) {
2546 int len = dw_mci_push_part_bytes(host, buf, cnt);
2549 if (host->part_buf_count == 4) {
2550 mci_writel(host, DATA(host->data_offset),
2552 host->part_buf_count = 0;
2555 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2556 if (unlikely((unsigned long)buf & 0x3)) {
2558 u32 aligned_buf[32];
2559 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2560 int items = len >> 2;
2562 /* memcpy from input buffer into aligned buffer */
2563 memcpy(aligned_buf, buf, len);
2566 /* push data from aligned buffer into fifo */
2567 for (i = 0; i < items; ++i)
2568 mci_writel(host, DATA(host->data_offset),
2575 for (; cnt >= 4; cnt -= 4)
2576 mci_writel(host, DATA(host->data_offset), *pdata++);
2579 /* put anything remaining in the part_buf */
2581 dw_mci_set_part_bytes(host, buf, cnt);
2582 /* Push data if we have reached the expected data length */
2583 if ((data->bytes_xfered + init_cnt) ==
2584 (data->blksz * data->blocks))
2585 mci_writel(host, DATA(host->data_offset),
2590 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2592 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2593 if (unlikely((unsigned long)buf & 0x3)) {
2595 /* pull data from fifo into aligned buffer */
2596 u32 aligned_buf[32];
2597 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2598 int items = len >> 2;
2600 for (i = 0; i < items; ++i)
2601 aligned_buf[i] = mci_readl(host,
2602 DATA(host->data_offset));
2603 /* memcpy from aligned buffer into output buffer */
2604 memcpy(buf, aligned_buf, len);
2612 for (; cnt >= 4; cnt -= 4)
2613 *pdata++ = mci_readl(host, DATA(host->data_offset));
2617 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2618 dw_mci_pull_final_bytes(host, buf, cnt);
2622 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2624 struct mmc_data *data = host->data;
2627 /* try and push anything in the part_buf */
2628 if (unlikely(host->part_buf_count)) {
2629 int len = dw_mci_push_part_bytes(host, buf, cnt);
2633 if (host->part_buf_count == 8) {
2634 mci_writeq(host, DATA(host->data_offset),
2636 host->part_buf_count = 0;
2639 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2640 if (unlikely((unsigned long)buf & 0x7)) {
2642 u64 aligned_buf[16];
2643 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2644 int items = len >> 3;
2646 /* memcpy from input buffer into aligned buffer */
2647 memcpy(aligned_buf, buf, len);
2650 /* push data from aligned buffer into fifo */
2651 for (i = 0; i < items; ++i)
2652 mci_writeq(host, DATA(host->data_offset),
2659 for (; cnt >= 8; cnt -= 8)
2660 mci_writeq(host, DATA(host->data_offset), *pdata++);
2663 /* put anything remaining in the part_buf */
2665 dw_mci_set_part_bytes(host, buf, cnt);
2666 /* Push data if we have reached the expected data length */
2667 if ((data->bytes_xfered + init_cnt) ==
2668 (data->blksz * data->blocks))
2669 mci_writeq(host, DATA(host->data_offset),
2674 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2676 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2677 if (unlikely((unsigned long)buf & 0x7)) {
2679 /* pull data from fifo into aligned buffer */
2680 u64 aligned_buf[16];
2681 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2682 int items = len >> 3;
2684 for (i = 0; i < items; ++i)
2685 aligned_buf[i] = mci_readq(host,
2686 DATA(host->data_offset));
2687 /* memcpy from aligned buffer into output buffer */
2688 memcpy(buf, aligned_buf, len);
2696 for (; cnt >= 8; cnt -= 8)
2697 *pdata++ = mci_readq(host, DATA(host->data_offset));
2701 host->part_buf = mci_readq(host, DATA(host->data_offset));
2702 dw_mci_pull_final_bytes(host, buf, cnt);
2706 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2710 /* get remaining partial bytes */
2711 len = dw_mci_pull_part_bytes(host, buf, cnt);
2712 if (unlikely(len == cnt))
2717 /* get the rest of the data */
2718 host->pull_data(host, buf, cnt);
2721 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2723 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2725 unsigned int offset;
2726 struct mmc_data *data = host->data;
2727 int shift = host->data_shift;
2730 unsigned int remain, fcnt;
2732 if(!host->mmc->bus_refs){
2733 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2737 if (!sg_miter_next(sg_miter))
2740 host->sg = sg_miter->piter.sg;
2741 buf = sg_miter->addr;
2742 remain = sg_miter->length;
2746 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2747 << shift) + host->part_buf_count;
2748 len = min(remain, fcnt);
2751 dw_mci_pull_data(host, (void *)(buf + offset), len);
2752 data->bytes_xfered += len;
2757 sg_miter->consumed = offset;
2758 status = mci_readl(host, MINTSTS);
2759 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2760 /* if the RXDR is ready read again */
2761 } while ((status & SDMMC_INT_RXDR) ||
2762 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2765 if (!sg_miter_next(sg_miter))
2767 sg_miter->consumed = 0;
2769 sg_miter_stop(sg_miter);
2773 sg_miter_stop(sg_miter);
2777 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2780 static void dw_mci_write_data_pio(struct dw_mci *host)
2782 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2784 unsigned int offset;
2785 struct mmc_data *data = host->data;
2786 int shift = host->data_shift;
2789 unsigned int fifo_depth = host->fifo_depth;
2790 unsigned int remain, fcnt;
2792 if(!host->mmc->bus_refs){
2793 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2798 if (!sg_miter_next(sg_miter))
2801 host->sg = sg_miter->piter.sg;
2802 buf = sg_miter->addr;
2803 remain = sg_miter->length;
2807 fcnt = ((fifo_depth -
2808 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2809 << shift) - host->part_buf_count;
2810 len = min(remain, fcnt);
2813 host->push_data(host, (void *)(buf + offset), len);
2814 data->bytes_xfered += len;
2819 sg_miter->consumed = offset;
2820 status = mci_readl(host, MINTSTS);
2821 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2822 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2825 if (!sg_miter_next(sg_miter))
2827 sg_miter->consumed = 0;
2829 sg_miter_stop(sg_miter);
2833 sg_miter_stop(sg_miter);
2837 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2840 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2842 if (!host->cmd_status)
2843 host->cmd_status = status;
2850 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2851 tasklet_schedule(&host->tasklet);
2854 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2856 struct dw_mci *host = dev_id;
2857 u32 pending, sdio_int;
2860 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2863 * DTO fix - version 2.10a and below, and only if internal DMA
2866 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2868 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2869 pending |= SDMMC_INT_DATA_OVER;
2873 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2874 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2875 host->cmd_status = pending;
2877 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2878 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2880 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2883 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2884 /* if there is an error report DATA_ERROR */
2885 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2886 host->data_status = pending;
2888 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2890 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2891 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2892 tasklet_schedule(&host->tasklet);
2895 if (pending & SDMMC_INT_DATA_OVER) {
2896 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2897 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2898 if (!host->data_status)
2899 host->data_status = pending;
2901 if (host->dir_status == DW_MCI_RECV_STATUS) {
2902 if (host->sg != NULL)
2903 dw_mci_read_data_pio(host, true);
2905 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2906 tasklet_schedule(&host->tasklet);
2909 if (pending & SDMMC_INT_RXDR) {
2910 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2911 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2912 dw_mci_read_data_pio(host, false);
2915 if (pending & SDMMC_INT_TXDR) {
2916 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2917 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2918 dw_mci_write_data_pio(host);
2921 if (pending & SDMMC_INT_VSI) {
2922 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2923 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2924 dw_mci_cmd_interrupt(host, pending);
2927 if (pending & SDMMC_INT_CMD_DONE) {
2928 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2929 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2930 dw_mci_cmd_interrupt(host, pending);
2933 if (pending & SDMMC_INT_CD) {
2934 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2935 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2936 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2937 queue_work(host->card_workqueue, &host->card_work);
2940 if (pending & SDMMC_INT_HLE) {
2941 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2942 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2946 /* Handle SDIO Interrupts */
2947 for (i = 0; i < host->num_slots; i++) {
2948 struct dw_mci_slot *slot = host->slot[i];
2950 if (host->verid < DW_MMC_240A)
2951 sdio_int = SDMMC_INT_SDIO(i);
2953 sdio_int = SDMMC_INT_SDIO(i + 8);
2955 if (pending & sdio_int) {
2956 mci_writel(host, RINTSTS, sdio_int);
2957 mmc_signal_sdio_irq(slot->mmc);
2963 #ifdef CONFIG_MMC_DW_IDMAC
2964 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2965 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2966 /* Handle DMA interrupts */
2967 pending = mci_readl(host, IDSTS);
2968 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2969 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2970 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2971 host->dma_ops->complete((void *)host);
2979 static void dw_mci_work_routine_card(struct work_struct *work)
2981 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2984 for (i = 0; i < host->num_slots; i++) {
2985 struct dw_mci_slot *slot = host->slot[i];
2986 struct mmc_host *mmc = slot->mmc;
2987 struct mmc_request *mrq;
2990 present = dw_mci_get_cd(mmc);
2992 /* Card insert, switch data line to uart function, and vice verse.
2993 eONLY audi chip need switched by software, using udbg tag in dts!
2995 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
2997 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
2998 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
2999 mmc_hostname(host->mmc));
3001 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3002 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3003 mmc_hostname(host->mmc));
3007 while (present != slot->last_detect_state) {
3008 dev_dbg(&slot->mmc->class_dev, "card %s\n",
3009 present ? "inserted" : "removed");
3010 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
3011 present ? "inserted" : "removed.", mmc_hostname(mmc));
3013 dw_mci_ctrl_all_reset(host);
3014 /* Stop edma when rountine card triggered */
3015 if(cpu_is_rk3036() || cpu_is_rk312x())
3016 if(host->dma_ops && host->dma_ops->stop)
3017 host->dma_ops->stop(host);
3018 rk_send_wakeup_key();//wake up system
3019 spin_lock_bh(&host->lock);
3021 /* Card change detected */
3022 slot->last_detect_state = present;
3024 /* Clean up queue if present */
3027 if (mrq == host->mrq) {
3031 switch (host->state) {
3034 case STATE_SENDING_CMD:
3035 mrq->cmd->error = -ENOMEDIUM;
3039 case STATE_SENDING_DATA:
3040 mrq->data->error = -ENOMEDIUM;
3041 dw_mci_stop_dma(host);
3043 case STATE_DATA_BUSY:
3044 case STATE_DATA_ERROR:
3045 if (mrq->data->error == -EINPROGRESS)
3046 mrq->data->error = -ENOMEDIUM;
3050 case STATE_SENDING_STOP:
3051 mrq->stop->error = -ENOMEDIUM;
3055 dw_mci_request_end(host, mrq);
3057 list_del(&slot->queue_node);
3058 mrq->cmd->error = -ENOMEDIUM;
3060 mrq->data->error = -ENOMEDIUM;
3062 mrq->stop->error = -ENOMEDIUM;
3064 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3065 mrq->cmd->opcode, mmc_hostname(mmc));
3067 spin_unlock(&host->lock);
3068 mmc_request_done(slot->mmc, mrq);
3069 spin_lock(&host->lock);
3073 /* Power down slot */
3075 /* Clear down the FIFO */
3076 dw_mci_fifo_reset(host);
3077 #ifdef CONFIG_MMC_DW_IDMAC
3078 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3079 dw_mci_idmac_reset(host);
3084 spin_unlock_bh(&host->lock);
3086 present = dw_mci_get_cd(mmc);
3089 mmc_detect_change(slot->mmc,
3090 msecs_to_jiffies(host->pdata->detect_delay_ms));
3095 /* given a slot id, find out the device node representing that slot */
3096 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3098 struct device_node *np;
3102 if (!dev || !dev->of_node)
3105 for_each_child_of_node(dev->of_node, np) {
3106 addr = of_get_property(np, "reg", &len);
3107 if (!addr || (len < sizeof(int)))
3109 if (be32_to_cpup(addr) == slot)
3115 static struct dw_mci_of_slot_quirks {
3118 } of_slot_quirks[] = {
3120 .quirk = "disable-wp",
3121 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3125 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3127 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3132 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3133 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3134 quirks |= of_slot_quirks[idx].id;
3139 /* find out bus-width for a given slot */
3140 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3142 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3148 if (of_property_read_u32(np, "bus-width", &bus_wd))
3149 dev_err(dev, "bus-width property not found, assuming width"
3155 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3156 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3158 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3164 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3166 /* Having a missing entry is valid; return silently */
3167 if (!gpio_is_valid(gpio))
3170 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3171 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3175 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3181 /* find the write protect gpio for a given slot; or -1 if none specified */
3182 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3184 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3190 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3192 /* Having a missing entry is valid; return silently */
3193 if (!gpio_is_valid(gpio))
3196 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3197 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3204 /* find the cd gpio for a given slot */
3205 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3206 struct mmc_host *mmc)
3208 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3214 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3216 /* Having a missing entry is valid; return silently */
3217 if (!gpio_is_valid(gpio))
3220 if (mmc_gpio_request_cd(mmc, gpio, 0))
3221 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3224 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3226 struct mmc_host *mmc = dev_id;
3227 struct dw_mci_slot *slot = mmc_priv(mmc);
3228 struct dw_mci *host = slot->host;
3229 int gpio_cd = slot->cd_gpio;
3231 (gpio_get_value(gpio_cd) == 0) ?
3232 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
3233 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
3235 /* wakeup system whether gpio debounce or not */
3236 rk_send_wakeup_key();
3238 /* no need to trigger detect flow when rescan is disabled.
3239 This case happended in dpm, that we just wakeup system and
3240 let suspend_post notify callback handle it.
3242 if(mmc->rescan_disable == 0)
3243 queue_work(host->card_workqueue, &host->card_work);
3245 printk("%s: rescan been disabled!\n", __FUNCTION__);
3250 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3251 struct mmc_host *mmc)
3253 struct dw_mci_slot *slot = mmc_priv(mmc);
3254 struct dw_mci *host = slot->host;
3258 /* Having a missing entry is valid; return silently */
3259 if (!gpio_is_valid(gpio))
3262 irq = gpio_to_irq(gpio);
3264 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3265 NULL, dw_mci_gpio_cd_irqt,
3266 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3270 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3272 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3273 enable_irq_wake(irq);
3276 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3280 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3281 struct mmc_host *mmc)
3283 if (!gpio_is_valid(gpio))
3286 if (gpio_to_irq(gpio) >= 0) {
3287 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3288 devm_gpio_free(&mmc->class_dev, gpio);
3291 #else /* CONFIG_OF */
3292 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3296 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3300 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3304 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3308 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3309 struct mmc_host *mmc)
3313 #endif /* CONFIG_OF */
3315 /* @host: dw_mci host prvdata
3316 * Init pinctrl for each platform. Usually we assign
3317 * "defalut" tag for functional usage, "idle" tag for gpio
3318 * state and "udbg" tag for uart_dbg if any.
3320 static void dw_mci_init_pinctrl(struct dw_mci *host)
3322 /* Fixme: DON'T TOUCH EMMC SETTING! */
3323 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3326 /* Get pinctrl for DTS */
3327 host->pinctrl = devm_pinctrl_get(host->dev);
3328 if (IS_ERR(host->pinctrl)) {
3329 dev_err(host->dev, "%s: No pinctrl used!\n",
3330 mmc_hostname(host->mmc));
3334 /* Lookup idle state */
3335 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3336 PINCTRL_STATE_IDLE);
3337 if (IS_ERR(host->pins_idle)) {
3338 dev_err(host->dev, "%s: No idle tag found!\n",
3339 mmc_hostname(host->mmc));
3341 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3342 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3343 mmc_hostname(host->mmc));
3346 /* Lookup default state */
3347 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3348 PINCTRL_STATE_DEFAULT);
3349 if (IS_ERR(host->pins_default)) {
3350 dev_err(host->dev, "%s: No default pinctrl found!\n",
3351 mmc_hostname(host->mmc));
3353 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3354 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3355 mmc_hostname(host->mmc));
3358 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3359 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3360 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3361 if (IS_ERR(host->pins_udbg)) {
3362 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3363 mmc_hostname(host->mmc));
3365 if (!dw_mci_get_cd(host->mmc))
3366 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3367 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3368 mmc_hostname(host->mmc));
3373 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3374 unsigned long mode, void *unused)
3376 struct mmc_host *host = container_of(
3377 notify_block, struct mmc_host, pm_notify);
3378 unsigned long flags;
3381 case PM_HIBERNATION_PREPARE:
3382 case PM_SUSPEND_PREPARE:
3383 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3384 spin_lock_irqsave(&host->lock, flags);
3385 host->rescan_disable = 1;
3386 spin_unlock_irqrestore(&host->lock, flags);
3387 if (cancel_delayed_work(&host->detect))
3388 wake_unlock(&host->detect_wake_lock);
3391 case PM_POST_SUSPEND:
3392 case PM_POST_HIBERNATION:
3393 case PM_POST_RESTORE:
3394 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3395 spin_lock_irqsave(&host->lock, flags);
3396 host->rescan_disable = 0;
3397 spin_unlock_irqrestore(&host->lock, flags);
3398 mmc_detect_change(host, 10);
3404 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3406 struct mmc_host *mmc;
3407 struct dw_mci_slot *slot;
3408 const struct dw_mci_drv_data *drv_data = host->drv_data;
3413 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3417 slot = mmc_priv(mmc);
3421 host->slot[id] = slot;
3424 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3426 mmc->ops = &dw_mci_ops;
3428 if (of_property_read_u32_array(host->dev->of_node,
3429 "clock-freq-min-max", freq, 2)) {
3430 mmc->f_min = DW_MCI_FREQ_MIN;
3431 mmc->f_max = DW_MCI_FREQ_MAX;
3433 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3434 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3436 mmc->f_min = freq[0];
3437 mmc->f_max = freq[1];
3439 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3440 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3443 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3445 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3446 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3447 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3448 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3449 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3450 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3452 if (of_find_property(host->dev->of_node, "supports-tSD", NULL))
3453 mmc->restrict_caps |= RESTRICT_CARD_TYPE_TSD;
3455 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3456 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3457 if (register_pm_notifier(&mmc->pm_notify)) {
3458 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3459 goto err_pm_notifier;
3463 /* We assume only low-level chip use gpio_cd */
3464 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
3465 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3466 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3467 if (gpio_is_valid(slot->cd_gpio)) {
3468 /* Request gpio int for card detection */
3469 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3471 slot->cd_gpio = -ENODEV;
3472 dev_err(host->dev, "failed to get your cd-gpios!\n");
3476 if (host->pdata->get_ocr)
3477 mmc->ocr_avail = host->pdata->get_ocr(id);
3480 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3481 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3482 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3483 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3487 * Start with slot power disabled, it will be enabled when a card
3490 if (host->pdata->setpower)
3491 host->pdata->setpower(id, 0);
3493 if (host->pdata->caps)
3494 mmc->caps = host->pdata->caps;
3496 if (host->pdata->pm_caps)
3497 mmc->pm_caps = host->pdata->pm_caps;
3499 if (host->dev->of_node) {
3500 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3504 ctrl_id = to_platform_device(host->dev)->id;
3506 if (drv_data && drv_data->caps)
3507 mmc->caps |= drv_data->caps[ctrl_id];
3508 if (drv_data && drv_data->hold_reg_flag)
3509 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3511 /* set the compatibility of driver. */
3512 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3513 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3515 if (host->pdata->caps2)
3516 mmc->caps2 = host->pdata->caps2;
3518 if (host->pdata->get_bus_wd)
3519 bus_width = host->pdata->get_bus_wd(slot->id);
3520 else if (host->dev->of_node)
3521 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3525 switch (bus_width) {
3527 mmc->caps |= MMC_CAP_8_BIT_DATA;
3529 mmc->caps |= MMC_CAP_4_BIT_DATA;
3532 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3533 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3534 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3535 mmc->caps |= MMC_CAP_SDIO_IRQ;
3536 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3537 mmc->caps |= MMC_CAP_HW_RESET;
3538 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3539 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3540 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3541 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3542 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3543 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3544 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3545 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3547 /*Assign pm_caps pass to pm_flags*/
3548 mmc->pm_flags = mmc->pm_caps;
3550 if (host->pdata->blk_settings) {
3551 mmc->max_segs = host->pdata->blk_settings->max_segs;
3552 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3553 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3554 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3555 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3557 /* Useful defaults if platform data is unset. */
3558 #ifdef CONFIG_MMC_DW_IDMAC
3559 mmc->max_segs = host->ring_size;
3560 mmc->max_blk_size = 65536;
3561 mmc->max_blk_count = host->ring_size;
3562 mmc->max_seg_size = 0x1000;
3563 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3564 if(cpu_is_rk3036() || cpu_is_rk312x()){
3565 /* fixup for external dmac setting */
3567 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3568 mmc->max_blk_count = 65535;
3569 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3570 mmc->max_seg_size = mmc->max_req_size;
3574 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3575 mmc->max_blk_count = 512;
3576 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3577 mmc->max_seg_size = mmc->max_req_size;
3578 #endif /* CONFIG_MMC_DW_IDMAC */
3582 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3584 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3589 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3590 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3594 if (IS_ERR(host->vmmc)) {
3595 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3598 ret = regulator_enable(host->vmmc);
3601 "failed to enable regulator: %d\n", ret);
3608 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3610 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3611 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3613 dw_mci_init_pinctrl(host);
3614 ret = mmc_add_host(mmc);
3618 #if defined(CONFIG_DEBUG_FS)
3619 dw_mci_init_debugfs(slot);
3622 /* Card initially undetected */
3623 slot->last_detect_state = 1;
3627 unregister_pm_notifier(&mmc->pm_notify);
3630 if (gpio_is_valid(slot->cd_gpio))
3631 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3636 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3638 /* Shutdown detect IRQ */
3639 if (slot->host->pdata->exit)
3640 slot->host->pdata->exit(id);
3642 /* Debugfs stuff is cleaned up by mmc core */
3643 mmc_remove_host(slot->mmc);
3644 slot->host->slot[id] = NULL;
3645 mmc_free_host(slot->mmc);
3648 static void dw_mci_init_dma(struct dw_mci *host)
3650 /* Alloc memory for sg translation */
3651 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3652 &host->sg_dma, GFP_KERNEL);
3653 if (!host->sg_cpu) {
3654 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3659 memset(host->sg_cpu, 0, PAGE_SIZE);
3662 /* Determine which DMA interface to use */
3663 #if defined(CONFIG_MMC_DW_IDMAC)
3664 if(cpu_is_rk3036() || cpu_is_rk312x()){
3665 host->dma_ops = &dw_mci_edmac_ops;
3666 dev_info(host->dev, "Using external DMA controller.\n");
3668 host->dma_ops = &dw_mci_idmac_ops;
3669 dev_info(host->dev, "Using internal DMA controller.\n");
3676 if (host->dma_ops->init && host->dma_ops->start &&
3677 host->dma_ops->stop && host->dma_ops->cleanup) {
3678 if (host->dma_ops->init(host)) {
3679 dev_err(host->dev, "%s: Unable to initialize "
3680 "DMA Controller.\n", __func__);
3684 dev_err(host->dev, "DMA initialization not found.\n");
3692 dev_info(host->dev, "Using PIO mode.\n");
3697 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3699 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3702 ctrl = mci_readl(host, CTRL);
3704 mci_writel(host, CTRL, ctrl);
3706 /* wait till resets clear */
3708 ctrl = mci_readl(host, CTRL);
3709 if (!(ctrl & reset))
3711 } while (time_before(jiffies, timeout));
3714 "Timeout resetting block (ctrl reset %#x)\n",
3720 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3723 * Reseting generates a block interrupt, hence setting
3724 * the scatter-gather pointer to NULL.
3727 sg_miter_stop(&host->sg_miter);
3731 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3734 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3736 return dw_mci_ctrl_reset(host,
3737 SDMMC_CTRL_FIFO_RESET |
3739 SDMMC_CTRL_DMA_RESET);
3744 static struct dw_mci_of_quirks {
3749 .quirk = "broken-cd",
3750 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3754 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3756 struct dw_mci_board *pdata;
3757 struct device *dev = host->dev;
3758 struct device_node *np = dev->of_node;
3759 const struct dw_mci_drv_data *drv_data = host->drv_data;
3761 u32 clock_frequency;
3763 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3765 dev_err(dev, "could not allocate memory for pdata\n");
3766 return ERR_PTR(-ENOMEM);
3769 /* find out number of slots supported */
3770 if (of_property_read_u32(dev->of_node, "num-slots",
3771 &pdata->num_slots)) {
3772 dev_info(dev, "num-slots property not found, "
3773 "assuming 1 slot is available\n");
3774 pdata->num_slots = 1;
3778 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3779 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3780 pdata->quirks |= of_quirks[idx].id;
3783 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3784 dev_info(dev, "fifo-depth property not found, using "
3785 "value of FIFOTH register as default\n");
3787 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3789 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3790 pdata->bus_hz = clock_frequency;
3792 if (drv_data && drv_data->parse_dt) {
3793 ret = drv_data->parse_dt(host);
3795 return ERR_PTR(ret);
3798 if (of_find_property(np, "keep-power-in-suspend", NULL))
3799 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3801 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3802 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3804 if (of_find_property(np, "supports-highspeed", NULL))
3805 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3807 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3808 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3810 if (of_find_property(np, "supports-DDR_MODE", NULL))
3811 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3813 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3814 pdata->caps2 |= MMC_CAP2_HS200;
3816 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3817 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3819 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3820 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3822 if (of_get_property(np, "cd-inverted", NULL))
3823 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3824 if (of_get_property(np, "bootpart-no-access", NULL))
3825 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3830 #else /* CONFIG_OF */
3831 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3833 return ERR_PTR(-EINVAL);
3835 #endif /* CONFIG_OF */
3837 int dw_mci_probe(struct dw_mci *host)
3839 const struct dw_mci_drv_data *drv_data = host->drv_data;
3840 int width, i, ret = 0;
3846 host->pdata = dw_mci_parse_dt(host);
3847 if (IS_ERR(host->pdata)) {
3848 dev_err(host->dev, "platform data not available\n");
3853 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3855 "Platform data must supply select_slot function\n");
3860 * In 2.40a spec, Data offset is changed.
3861 * Need to check the version-id and set data-offset for DATA register.
3863 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3864 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3866 if (host->verid < DW_MMC_240A)
3867 host->data_offset = DATA_OFFSET;
3869 host->data_offset = DATA_240A_OFFSET;
3872 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3873 if (IS_ERR(host->hclk_mmc)) {
3874 dev_err(host->dev, "failed to get hclk_mmc\n");
3875 ret = PTR_ERR(host->hclk_mmc);
3879 clk_prepare_enable(host->hclk_mmc);
3882 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3883 if (IS_ERR(host->clk_mmc)) {
3884 dev_err(host->dev, "failed to get clk mmc_per\n");
3885 ret = PTR_ERR(host->clk_mmc);
3889 host->bus_hz = host->pdata->bus_hz;
3890 if (!host->bus_hz) {
3891 dev_err(host->dev,"Platform data must supply bus speed\n");
3896 if (host->verid < DW_MMC_240A)
3897 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3899 //rockchip: fix divider 2 in clksum before controlller
3900 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3903 dev_err(host->dev, "failed to set clk mmc\n");
3906 clk_prepare_enable(host->clk_mmc);
3908 if (drv_data && drv_data->setup_clock) {
3909 ret = drv_data->setup_clock(host);
3912 "implementation specific clock setup failed\n");
3917 host->quirks = host->pdata->quirks;
3918 host->irq_state = true;
3919 host->set_speed = 0;
3921 host->svi_flags = 0;
3923 spin_lock_init(&host->lock);
3924 spin_lock_init(&host->slock);
3926 INIT_LIST_HEAD(&host->queue);
3928 * Get the host data width - this assumes that HCON has been set with
3929 * the correct values.
3931 i = (mci_readl(host, HCON) >> 7) & 0x7;
3933 host->push_data = dw_mci_push_data16;
3934 host->pull_data = dw_mci_pull_data16;
3936 host->data_shift = 1;
3937 } else if (i == 2) {
3938 host->push_data = dw_mci_push_data64;
3939 host->pull_data = dw_mci_pull_data64;
3941 host->data_shift = 3;
3943 /* Check for a reserved value, and warn if it is */
3945 "HCON reports a reserved host data width!\n"
3946 "Defaulting to 32-bit access.\n");
3947 host->push_data = dw_mci_push_data32;
3948 host->pull_data = dw_mci_pull_data32;
3950 host->data_shift = 2;
3953 /* Reset all blocks */
3954 if (!dw_mci_ctrl_all_reset(host))
3957 host->dma_ops = host->pdata->dma_ops;
3958 dw_mci_init_dma(host);
3960 /* Clear the interrupts for the host controller */
3961 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3962 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3964 /* Put in max timeout */
3965 mci_writel(host, TMOUT, 0xFFFFFFFF);
3968 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3969 * Tx Mark = fifo_size / 2 DMA Size = 8
3971 if (!host->pdata->fifo_depth) {
3973 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3974 * have been overwritten by the bootloader, just like we're
3975 * about to do, so if you know the value for your hardware, you
3976 * should put it in the platform data.
3978 fifo_size = mci_readl(host, FIFOTH);
3979 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3981 fifo_size = host->pdata->fifo_depth;
3983 host->fifo_depth = fifo_size;
3985 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3986 mci_writel(host, FIFOTH, host->fifoth_val);
3988 /* disable clock to CIU */
3989 mci_writel(host, CLKENA, 0);
3990 mci_writel(host, CLKSRC, 0);
3992 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3993 host->card_workqueue = alloc_workqueue("dw-mci-card",
3994 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3995 if (!host->card_workqueue) {
3999 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4000 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4001 host->irq_flags, "dw-mci", host);
4005 if (host->pdata->num_slots)
4006 host->num_slots = host->pdata->num_slots;
4008 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4010 /* We need at least one slot to succeed */
4011 for (i = 0; i < host->num_slots; i++) {
4012 ret = dw_mci_init_slot(host, i);
4014 dev_dbg(host->dev, "slot %d init failed\n", i);
4020 * Enable interrupts for command done, data over, data empty, card det,
4021 * receive ready and error such as transmit, receive timeout, crc error
4023 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4024 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4025 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4026 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4027 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4028 regs |= SDMMC_INT_CD;
4030 mci_writel(host, INTMASK, regs);
4032 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4034 dev_info(host->dev, "DW MMC controller at irq %d, "
4035 "%d bit host data width, "
4037 host->irq, width, fifo_size);
4040 dev_info(host->dev, "%d slots initialized\n", init_slots);
4042 dev_dbg(host->dev, "attempted to initialize %d slots, "
4043 "but failed on all\n", host->num_slots);
4048 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4049 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4054 destroy_workqueue(host->card_workqueue);
4057 if (host->use_dma && host->dma_ops->exit)
4058 host->dma_ops->exit(host);
4061 regulator_disable(host->vmmc);
4062 regulator_put(host->vmmc);
4066 if (!IS_ERR(host->clk_mmc))
4067 clk_disable_unprepare(host->clk_mmc);
4069 if (!IS_ERR(host->hclk_mmc))
4070 clk_disable_unprepare(host->hclk_mmc);
4074 EXPORT_SYMBOL(dw_mci_probe);
4076 void dw_mci_remove(struct dw_mci *host)
4078 struct mmc_host *mmc = host->mmc;
4079 struct dw_mci_slot *slot = mmc_priv(mmc);
4082 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4083 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4085 for(i = 0; i < host->num_slots; i++){
4086 dev_dbg(host->dev, "remove slot %d\n", i);
4088 dw_mci_cleanup_slot(host->slot[i], i);
4091 /* disable clock to CIU */
4092 mci_writel(host, CLKENA, 0);
4093 mci_writel(host, CLKSRC, 0);
4095 destroy_workqueue(host->card_workqueue);
4096 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4097 unregister_pm_notifier(&host->mmc->pm_notify);
4099 if(host->use_dma && host->dma_ops->exit)
4100 host->dma_ops->exit(host);
4102 if (gpio_is_valid(slot->cd_gpio))
4103 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4106 regulator_disable(host->vmmc);
4107 regulator_put(host->vmmc);
4109 if(!IS_ERR(host->clk_mmc))
4110 clk_disable_unprepare(host->clk_mmc);
4112 if(!IS_ERR(host->hclk_mmc))
4113 clk_disable_unprepare(host->hclk_mmc);
4115 EXPORT_SYMBOL(dw_mci_remove);
4119 #ifdef CONFIG_PM_SLEEP
4121 * TODO: we should probably disable the clock to the card in the suspend path.
4123 extern int get_wifi_chip_type(void);
4124 int dw_mci_suspend(struct dw_mci *host)
4126 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4127 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() == WIFI_RTKWIFI))
4131 regulator_disable(host->vmmc);
4133 /*only for sdmmc controller*/
4134 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4135 disable_irq(host->irq);
4136 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4137 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4138 mmc_hostname(host->mmc));
4140 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4141 mci_writel(host, INTMASK, 0x00);
4142 mci_writel(host, CTRL, 0x00);
4144 /* Soc rk3126/3036 already in gpio_cd mode */
4145 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4146 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4147 enable_irq_wake(host->mmc->slot.cd_irq);
4152 EXPORT_SYMBOL(dw_mci_suspend);
4154 int dw_mci_resume(struct dw_mci *host)
4156 int i, ret, retry_cnt = 0;
4158 struct dw_mci_slot *slot;
4160 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4161 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() == WIFI_RTKWIFI))
4166 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4167 slot = mmc_priv(host->mmc);
4168 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4172 /*only for sdmmc controller*/
4173 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4174 /* Soc rk3126/3036 already in gpio_cd mode */
4175 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4176 disable_irq_wake(host->mmc->slot.cd_irq);
4177 mmc_gpio_free_cd(host->mmc);
4179 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4180 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4181 mmc_hostname(host->mmc));
4185 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4186 else if(cpu_is_rk3036())
4187 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4188 else if(cpu_is_rk312x())
4189 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4190 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4191 else if(host->cid == DW_MCI_TYPE_RK3368)
4192 regmap_write(host->grf, 0x43c, ((1 << 13) << 16) | (0 << 13));
4195 ret = regulator_enable(host->vmmc);
4198 "failed to enable regulator: %d\n", ret);
4203 if(!dw_mci_ctrl_all_reset(host)){
4208 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4209 if(host->use_dma && host->dma_ops->init)
4210 host->dma_ops->init(host);
4213 * Restore the initial value at FIFOTH register
4214 * And Invalidate the prev_blksz with zero
4216 mci_writel(host, FIFOTH, host->fifoth_val);
4217 host->prev_blksz = 0;
4218 /* Put in max timeout */
4219 mci_writel(host, TMOUT, 0xFFFFFFFF);
4221 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4222 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4224 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4225 regs |= SDMMC_INT_CD;
4226 mci_writel(host, INTMASK, regs);
4227 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4228 /*only for sdmmc controller*/
4229 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4230 enable_irq(host->irq);
4233 for(i = 0; i < host->num_slots; i++){
4234 struct dw_mci_slot *slot = host->slot[i];
4237 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4238 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4239 dw_mci_setup_bus(slot, true);
4245 EXPORT_SYMBOL(dw_mci_resume);
4246 #endif /* CONFIG_PM_SLEEP */
4248 static int __init dw_mci_init(void)
4250 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4254 static void __exit dw_mci_exit(void)
4258 module_init(dw_mci_init);
4259 module_exit(dw_mci_exit);
4261 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4262 MODULE_AUTHOR("NXP Semiconductor VietNam");
4263 MODULE_AUTHOR("Imagination Technologies Ltd");
4264 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4265 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4266 MODULE_LICENSE("GPL v2");