2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
50 #include "rk_sdmmc_dbg.h"
51 #include <linux/regulator/rockchip_io_vol_domain.h>
52 #include "../../clk/rockchip/clk-ops.h"
54 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
56 /* Common flag combinations */
57 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
58 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
60 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
62 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
63 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
64 #define DW_MCI_SEND_STATUS 1
65 #define DW_MCI_RECV_STATUS 2
66 #define DW_MCI_DMA_THRESHOLD 16
68 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
69 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
71 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
72 #define SDMMC_DATA_TIMEOUT_SD 500
73 #define SDMMC_DATA_TIMEOUT_SDIO 250
74 #define SDMMC_DATA_TIMEOUT_EMMC 2500
76 #define SDMMC_CMD_RTO_MAX_HOLD 200
77 #define SDMMC_WAIT_FOR_UNBUSY 2500
79 #ifdef CONFIG_MMC_DW_IDMAC
80 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
81 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
82 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
86 u32 des0; /* Control Descriptor */
87 #define IDMAC_DES0_DIC BIT(1)
88 #define IDMAC_DES0_LD BIT(2)
89 #define IDMAC_DES0_FD BIT(3)
90 #define IDMAC_DES0_CH BIT(4)
91 #define IDMAC_DES0_ER BIT(5)
92 #define IDMAC_DES0_CES BIT(30)
93 #define IDMAC_DES0_OWN BIT(31)
95 u32 des1; /* Buffer sizes */
96 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
97 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
99 u32 des2; /* buffer 1 physical address */
101 u32 des3; /* buffer 2 physical address */
103 #endif /* CONFIG_MMC_DW_IDMAC */
105 static const u8 tuning_blk_pattern_4bit[] = {
106 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
107 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
108 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
109 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
110 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
111 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
112 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
113 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
116 static const u8 tuning_blk_pattern_8bit[] = {
117 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
118 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
119 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
120 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
121 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
122 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
123 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
124 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
125 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
126 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
127 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
128 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
129 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
130 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
131 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
132 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
135 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
136 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
137 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
138 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
140 /*printk the all register of current host*/
142 static int dw_mci_regs_printk(struct dw_mci *host)
144 struct sdmmc_reg *regs = dw_mci_regs;
146 while( regs->name != 0 ){
147 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
150 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
155 #if defined(CONFIG_DEBUG_FS)
156 static int dw_mci_req_show(struct seq_file *s, void *v)
158 struct dw_mci_slot *slot = s->private;
159 struct mmc_request *mrq;
160 struct mmc_command *cmd;
161 struct mmc_command *stop;
162 struct mmc_data *data;
164 /* Make sure we get a consistent snapshot */
165 spin_lock_bh(&slot->host->lock);
175 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
176 cmd->opcode, cmd->arg, cmd->flags,
177 cmd->resp[0], cmd->resp[1], cmd->resp[2],
178 cmd->resp[2], cmd->error);
180 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
181 data->bytes_xfered, data->blocks,
182 data->blksz, data->flags, data->error);
185 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
186 stop->opcode, stop->arg, stop->flags,
187 stop->resp[0], stop->resp[1], stop->resp[2],
188 stop->resp[2], stop->error);
191 spin_unlock_bh(&slot->host->lock);
196 static int dw_mci_req_open(struct inode *inode, struct file *file)
198 return single_open(file, dw_mci_req_show, inode->i_private);
201 static const struct file_operations dw_mci_req_fops = {
202 .owner = THIS_MODULE,
203 .open = dw_mci_req_open,
206 .release = single_release,
209 static int dw_mci_regs_show(struct seq_file *s, void *v)
211 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
212 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
213 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
214 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
215 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
216 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
221 static int dw_mci_regs_open(struct inode *inode, struct file *file)
223 return single_open(file, dw_mci_regs_show, inode->i_private);
226 static const struct file_operations dw_mci_regs_fops = {
227 .owner = THIS_MODULE,
228 .open = dw_mci_regs_open,
231 .release = single_release,
234 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
236 struct mmc_host *mmc = slot->mmc;
237 struct dw_mci *host = slot->host;
241 root = mmc->debugfs_root;
245 node = debugfs_create_file("regs", S_IRUSR, root, host,
250 node = debugfs_create_file("req", S_IRUSR, root, slot,
255 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
259 node = debugfs_create_x32("pending_events", S_IRUSR, root,
260 (u32 *)&host->pending_events);
264 node = debugfs_create_x32("completed_events", S_IRUSR, root,
265 (u32 *)&host->completed_events);
272 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
274 #endif /* defined(CONFIG_DEBUG_FS) */
276 static void dw_mci_set_timeout(struct dw_mci *host)
278 /* timeout (maximum) */
279 mci_writel(host, TMOUT, 0xffffffff);
282 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
284 struct mmc_data *data;
285 struct dw_mci_slot *slot = mmc_priv(mmc);
286 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
288 cmd->error = -EINPROGRESS;
292 if (cmdr == MMC_STOP_TRANSMISSION)
293 cmdr |= SDMMC_CMD_STOP;
295 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
297 if (cmd->flags & MMC_RSP_PRESENT) {
298 /* We expect a response, so set this bit */
299 cmdr |= SDMMC_CMD_RESP_EXP;
300 if (cmd->flags & MMC_RSP_136)
301 cmdr |= SDMMC_CMD_RESP_LONG;
304 if (cmd->flags & MMC_RSP_CRC)
305 cmdr |= SDMMC_CMD_RESP_CRC;
309 cmdr |= SDMMC_CMD_DAT_EXP;
310 if (data->flags & MMC_DATA_STREAM)
311 cmdr |= SDMMC_CMD_STRM_MODE;
312 if (data->flags & MMC_DATA_WRITE)
313 cmdr |= SDMMC_CMD_DAT_WR;
316 if (drv_data && drv_data->prepare_command)
317 drv_data->prepare_command(slot->host, &cmdr);
323 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
325 struct mmc_command *stop;
331 stop = &host->stop_abort;
333 memset(stop, 0, sizeof(struct mmc_command));
335 if (cmdr == MMC_READ_SINGLE_BLOCK ||
336 cmdr == MMC_READ_MULTIPLE_BLOCK ||
337 cmdr == MMC_WRITE_BLOCK ||
338 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
339 stop->opcode = MMC_STOP_TRANSMISSION;
341 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
342 } else if (cmdr == SD_IO_RW_EXTENDED) {
343 stop->opcode = SD_IO_RW_DIRECT;
344 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
345 ((cmd->arg >> 28) & 0x7);
346 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
351 cmdr = stop->opcode | SDMMC_CMD_STOP |
352 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
357 static void dw_mci_start_command(struct dw_mci *host,
358 struct mmc_command *cmd, u32 cmd_flags)
360 struct dw_mci_slot *slot = host->slot[0];
361 /*temporality fix slot[0] due to host->num_slots equal to 1*/
363 host->pre_cmd = host->cmd;
366 "start command: ARGR=0x%08x CMDR=0x%08x\n",
367 cmd->arg, cmd_flags);
369 if(SD_SWITCH_VOLTAGE == cmd->opcode){
370 /*confirm non-low-power mode*/
371 mci_writel(host, CMDARG, 0);
372 dw_mci_disable_low_power(slot);
374 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
375 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
377 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
380 mci_writel(host, CMDARG, cmd->arg);
383 /* fix the value to 1 in some Soc,for example RK3188. */
384 if(host->mmc->hold_reg_flag)
385 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
387 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
391 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
393 dw_mci_start_command(host, data->stop, host->stop_cmdr);
396 /* DMA interface functions */
397 static void dw_mci_stop_dma(struct dw_mci *host)
399 if (host->using_dma) {
400 /* Fixme: No need to terminate edma, may cause flush op */
401 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
402 host->dma_ops->stop(host);
403 host->dma_ops->cleanup(host);
406 /* Data transfer was stopped by the interrupt handler */
407 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
410 static int dw_mci_get_dma_dir(struct mmc_data *data)
412 if (data->flags & MMC_DATA_WRITE)
413 return DMA_TO_DEVICE;
415 return DMA_FROM_DEVICE;
418 #ifdef CONFIG_MMC_DW_IDMAC
419 static void dw_mci_dma_cleanup(struct dw_mci *host)
421 struct mmc_data *data = host->data;
424 if (!data->host_cookie)
425 dma_unmap_sg(host->dev,
428 dw_mci_get_dma_dir(data));
431 static void dw_mci_idmac_reset(struct dw_mci *host)
433 u32 bmod = mci_readl(host, BMOD);
434 /* Software reset of DMA */
435 bmod |= SDMMC_IDMAC_SWRESET;
436 mci_writel(host, BMOD, bmod);
439 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
443 /* Disable and reset the IDMAC interface */
444 temp = mci_readl(host, CTRL);
445 temp &= ~SDMMC_CTRL_USE_IDMAC;
446 temp |= SDMMC_CTRL_DMA_RESET;
447 mci_writel(host, CTRL, temp);
449 /* Stop the IDMAC running */
450 temp = mci_readl(host, BMOD);
451 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
452 temp |= SDMMC_IDMAC_SWRESET;
453 mci_writel(host, BMOD, temp);
456 static void dw_mci_idmac_complete_dma(void *arg)
458 struct dw_mci *host = arg;
459 struct mmc_data *data = host->data;
461 dev_vdbg(host->dev, "DMA complete\n");
464 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
465 host->mrq->cmd->opcode,host->mrq->cmd->arg,
466 data->blocks,data->blksz,mmc_hostname(host->mmc));
469 host->dma_ops->cleanup(host);
472 * If the card was removed, data will be NULL. No point in trying to
473 * send the stop command or waiting for NBUSY in this case.
476 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
477 tasklet_schedule(&host->tasklet);
481 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
485 struct idmac_desc *desc = host->sg_cpu;
487 for (i = 0; i < sg_len; i++, desc++) {
488 unsigned int length = sg_dma_len(&data->sg[i]);
489 u32 mem_addr = sg_dma_address(&data->sg[i]);
491 /* Set the OWN bit and disable interrupts for this descriptor */
492 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
495 IDMAC_SET_BUFFER1_SIZE(desc, length);
497 /* Physical address to DMA to/from */
498 desc->des2 = mem_addr;
501 /* Set first descriptor */
503 desc->des0 |= IDMAC_DES0_FD;
505 /* Set last descriptor */
506 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
507 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
508 desc->des0 |= IDMAC_DES0_LD;
513 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
517 dw_mci_translate_sglist(host, host->data, sg_len);
519 /* Select IDMAC interface */
520 temp = mci_readl(host, CTRL);
521 temp |= SDMMC_CTRL_USE_IDMAC;
522 mci_writel(host, CTRL, temp);
526 /* Enable the IDMAC */
527 temp = mci_readl(host, BMOD);
528 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
529 mci_writel(host, BMOD, temp);
531 /* Start it running */
532 mci_writel(host, PLDMND, 1);
535 static int dw_mci_idmac_init(struct dw_mci *host)
537 struct idmac_desc *p;
540 /* Number of descriptors in the ring buffer */
541 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
543 /* Forward link the descriptor list */
544 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
545 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
547 /* Set the last descriptor as the end-of-ring descriptor */
548 p->des3 = host->sg_dma;
549 p->des0 = IDMAC_DES0_ER;
551 dw_mci_idmac_reset(host);
553 /* Mask out interrupts - get Tx & Rx complete only */
554 mci_writel(host, IDSTS, IDMAC_INT_CLR);
555 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
558 /* Set the descriptor base address */
559 mci_writel(host, DBADDR, host->sg_dma);
563 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
564 .init = dw_mci_idmac_init,
565 .start = dw_mci_idmac_start_dma,
566 .stop = dw_mci_idmac_stop_dma,
567 .complete = dw_mci_idmac_complete_dma,
568 .cleanup = dw_mci_dma_cleanup,
572 static void dw_mci_edma_cleanup(struct dw_mci *host)
574 struct mmc_data *data = host->data;
577 if (!data->host_cookie)
578 dma_unmap_sg(host->dev,
579 data->sg, data->sg_len,
580 dw_mci_get_dma_dir(data));
583 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
585 dmaengine_terminate_all(host->dms->ch);
588 static void dw_mci_edmac_complete_dma(void *arg)
590 struct dw_mci *host = arg;
591 struct mmc_data *data = host->data;
593 dev_vdbg(host->dev, "DMA complete\n");
596 if(data->flags & MMC_DATA_READ)
597 /* Invalidate cache after read */
598 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
599 data->sg_len, DMA_FROM_DEVICE);
601 host->dma_ops->cleanup(host);
604 * If the card was removed, data will be NULL. No point in trying to
605 * send the stop command or waiting for NBUSY in this case.
608 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
609 tasklet_schedule(&host->tasklet);
613 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
615 struct dma_slave_config slave_config;
616 struct dma_async_tx_descriptor *desc = NULL;
617 struct scatterlist *sgl = host->data->sg;
618 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
619 u32 sg_elems = host->data->sg_len;
620 u32 fifoth_val, mburst;
624 /* Set external dma config: burst size, burst width*/
625 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
626 slave_config.src_addr = slave_config.dst_addr;
627 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
628 slave_config.src_addr_width = slave_config.dst_addr_width;
630 /* Match FIFO dma burst MSIZE with external dma config*/
631 fifoth_val = mci_readl(host, FIFOTH);
632 mburst = mszs[(fifoth_val >> 28) & 0x7];
634 /* edmac limit burst to 16, but work around for rk3036 to 8 */
635 if (unlikely(cpu_is_rk3036()))
640 slave_config.dst_maxburst = (mburst > burst_limit) ? burst_limit : mburst;
641 slave_config.src_maxburst = slave_config.dst_maxburst;
643 if(host->data->flags & MMC_DATA_WRITE){
644 slave_config.direction = DMA_MEM_TO_DEV;
645 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
647 dev_err(host->dev, "error in dw_mci edma configuration.\n");
651 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
652 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
654 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
657 /* Set dw_mci_edmac_complete_dma as callback */
658 desc->callback = dw_mci_edmac_complete_dma;
659 desc->callback_param = (void *)host;
660 dmaengine_submit(desc);
662 /* Flush cache before write */
663 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
664 sg_elems, DMA_TO_DEVICE);
665 dma_async_issue_pending(host->dms->ch);
668 slave_config.direction = DMA_DEV_TO_MEM;
669 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
671 dev_err(host->dev, "error in dw_mci edma configuration.\n");
674 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
675 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
677 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
680 /* set dw_mci_edmac_complete_dma as callback */
681 desc->callback = dw_mci_edmac_complete_dma;
682 desc->callback_param = (void *)host;
683 dmaengine_submit(desc);
684 dma_async_issue_pending(host->dms->ch);
688 static int dw_mci_edmac_init(struct dw_mci *host)
690 /* Request external dma channel, SHOULD decide chn in dts */
692 host->dms = (struct dw_mci_dma_slave *)kmalloc
693 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
694 if (NULL == host->dms) {
695 dev_err(host->dev, "No enough memory to alloc dms.\n");
699 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
700 if (!host->dms->ch) {
701 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
702 host->dms->ch->chan_id);
709 if (NULL != host->dms) {
717 static void dw_mci_edmac_exit(struct dw_mci *host)
719 if (NULL != host->dms) {
720 if (NULL != host->dms->ch) {
721 dma_release_channel(host->dms->ch);
722 host->dms->ch = NULL;
729 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
730 .init = dw_mci_edmac_init,
731 .exit = dw_mci_edmac_exit,
732 .start = dw_mci_edmac_start_dma,
733 .stop = dw_mci_edmac_stop_dma,
734 .complete = dw_mci_edmac_complete_dma,
735 .cleanup = dw_mci_edma_cleanup,
737 #endif /* CONFIG_MMC_DW_IDMAC */
739 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
740 struct mmc_data *data,
743 struct scatterlist *sg;
744 unsigned int i, sg_len;
746 if (!next && data->host_cookie)
747 return data->host_cookie;
750 * We don't do DMA on "complex" transfers, i.e. with
751 * non-word-aligned buffers or lengths. Also, we don't bother
752 * with all the DMA setup overhead for short transfers.
754 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
760 for_each_sg(data->sg, sg, data->sg_len, i) {
761 if (sg->offset & 3 || sg->length & 3)
765 sg_len = dma_map_sg(host->dev,
768 dw_mci_get_dma_dir(data));
773 data->host_cookie = sg_len;
778 static void dw_mci_pre_req(struct mmc_host *mmc,
779 struct mmc_request *mrq,
782 struct dw_mci_slot *slot = mmc_priv(mmc);
783 struct mmc_data *data = mrq->data;
785 if (!slot->host->use_dma || !data)
788 if (data->host_cookie) {
789 data->host_cookie = 0;
793 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
794 data->host_cookie = 0;
797 static void dw_mci_post_req(struct mmc_host *mmc,
798 struct mmc_request *mrq,
801 struct dw_mci_slot *slot = mmc_priv(mmc);
802 struct mmc_data *data = mrq->data;
804 if (!slot->host->use_dma || !data)
807 if (data->host_cookie)
808 dma_unmap_sg(slot->host->dev,
811 dw_mci_get_dma_dir(data));
812 data->host_cookie = 0;
815 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
817 #ifdef CONFIG_MMC_DW_IDMAC
818 unsigned int blksz = data->blksz;
819 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
820 u32 fifo_width = 1 << host->data_shift;
821 u32 blksz_depth = blksz / fifo_width, fifoth_val;
822 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
823 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
825 tx_wmark = (host->fifo_depth) / 2;
826 tx_wmark_invers = host->fifo_depth - tx_wmark;
830 * if blksz is not a multiple of the FIFO width
832 if (blksz % fifo_width) {
839 if (!((blksz_depth % mszs[idx]) ||
840 (tx_wmark_invers % mszs[idx]))) {
842 rx_wmark = mszs[idx] - 1;
847 * If idx is '0', it won't be tried
848 * Thus, initial values are uesed
851 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
852 mci_writel(host, FIFOTH, fifoth_val);
857 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
859 unsigned int blksz = data->blksz;
860 u32 blksz_depth, fifo_depth;
863 WARN_ON(!(data->flags & MMC_DATA_READ));
865 if (host->timing != MMC_TIMING_MMC_HS200 &&
866 host->timing != MMC_TIMING_UHS_SDR104)
869 blksz_depth = blksz / (1 << host->data_shift);
870 fifo_depth = host->fifo_depth;
872 if (blksz_depth > fifo_depth)
876 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
877 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
878 * Currently just choose blksz.
881 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
885 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
888 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
896 /* If we don't have a channel, we can't do DMA */
900 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
902 /* Fixme: No need terminate edma, may cause flush op */
903 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
904 host->dma_ops->stop(host);
911 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
912 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
916 * Decide the MSIZE and RX/TX Watermark.
917 * If current block size is same with previous size,
918 * no need to update fifoth.
920 if (host->prev_blksz != data->blksz)
921 dw_mci_adjust_fifoth(host, data);
924 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
926 /* Enable the DMA interface */
927 temp = mci_readl(host, CTRL);
928 temp |= SDMMC_CTRL_DMA_ENABLE;
929 mci_writel(host, CTRL, temp);
931 /* Disable RX/TX IRQs, let DMA handle it */
932 spin_lock_irqsave(&host->slock, flags);
933 temp = mci_readl(host, INTMASK);
934 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
935 mci_writel(host, INTMASK, temp);
936 spin_unlock_irqrestore(&host->slock, flags);
938 host->dma_ops->start(host, sg_len);
943 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
948 data->error = -EINPROGRESS;
955 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
957 if (data->flags & MMC_DATA_READ) {
958 host->dir_status = DW_MCI_RECV_STATUS;
959 dw_mci_ctrl_rd_thld(host, data);
961 host->dir_status = DW_MCI_SEND_STATUS;
964 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
965 data->blocks, data->blksz, mmc_hostname(host->mmc));
967 if (dw_mci_submit_data_dma(host, data)) {
968 int flags = SG_MITER_ATOMIC;
969 if (host->data->flags & MMC_DATA_READ)
970 flags |= SG_MITER_TO_SG;
972 flags |= SG_MITER_FROM_SG;
974 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
976 host->part_buf_start = 0;
977 host->part_buf_count = 0;
979 spin_lock_irqsave(&host->slock, flag);
980 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
981 temp = mci_readl(host, INTMASK);
982 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
983 mci_writel(host, INTMASK, temp);
984 spin_unlock_irqrestore(&host->slock, flag);
986 temp = mci_readl(host, CTRL);
987 temp &= ~SDMMC_CTRL_DMA_ENABLE;
988 mci_writel(host, CTRL, temp);
991 * Use the initial fifoth_val for PIO mode.
992 * If next issued data may be transfered by DMA mode,
993 * prev_blksz should be invalidated.
995 mci_writel(host, FIFOTH, host->fifoth_val);
996 host->prev_blksz = 0;
999 * Keep the current block size.
1000 * It will be used to decide whether to update
1001 * fifoth register next time.
1003 host->prev_blksz = data->blksz;
1007 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1009 struct dw_mci *host = slot->host;
1010 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1011 unsigned int cmd_status = 0;
1012 #ifdef SDMMC_WAIT_FOR_UNBUSY
1014 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1016 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1018 ret = time_before(jiffies, timeout);
1019 cmd_status = mci_readl(host, STATUS);
1020 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1024 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1025 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1028 mci_writel(host, CMDARG, arg);
1030 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1031 if(cmd & SDMMC_CMD_UPD_CLK)
1032 timeout = jiffies + msecs_to_jiffies(50);
1034 timeout = jiffies + msecs_to_jiffies(500);
1035 while (time_before(jiffies, timeout)) {
1036 cmd_status = mci_readl(host, CMD);
1037 if (!(cmd_status & SDMMC_CMD_START))
1040 dev_err(&slot->mmc->class_dev,
1041 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1042 cmd, arg, cmd_status);
1045 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1047 struct dw_mci *host = slot->host;
1048 unsigned int tempck,clock = slot->clock;
1053 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1054 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1057 mci_writel(host, CLKENA, 0);
1058 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1059 if(host->svi_flags == 0)
1060 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1062 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1064 } else if (clock != host->current_speed || force_clkinit) {
1065 div = host->bus_hz / clock;
1066 if (host->bus_hz % clock && host->bus_hz > clock)
1068 * move the + 1 after the divide to prevent
1069 * over-clocking the card.
1073 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1075 if ((clock << div) != slot->__clk_old || force_clkinit) {
1076 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1077 dev_info(&slot->mmc->class_dev,
1078 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1079 slot->id, host->bus_hz, clock,
1082 host->set_speed = tempck;
1083 host->set_div = div;
1087 mci_writel(host, CLKENA, 0);
1088 mci_writel(host, CLKSRC, 0);
1092 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1094 if(clock <= 400*1000){
1095 MMC_DBG_BOOT_FUNC(host->mmc,
1096 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1097 clock * 2, mmc_hostname(host->mmc));
1098 /* clk_mmc will change parents to 24MHz xtal*/
1099 clk_set_rate(host->clk_mmc, clock * 2);
1102 host->set_div = div;
1106 MMC_DBG_BOOT_FUNC(host->mmc,
1107 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1108 mmc_hostname(host->mmc));
1111 MMC_DBG_ERR_FUNC(host->mmc,
1112 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1113 mmc_hostname(host->mmc));
1115 host->set_div = div;
1116 host->bus_hz = host->set_speed * 2;
1117 MMC_DBG_BOOT_FUNC(host->mmc,
1118 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1119 div, host->bus_hz, mmc_hostname(host->mmc));
1121 /* BUG may be here, come on, Linux BSP engineer looks!
1122 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1123 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1124 some oops happened like that:
1125 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1126 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1127 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1128 mmc0: new high speed DDR MMC card at address 0001
1129 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1131 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1132 mmcblk0: retrying using single block read
1133 mmcblk0: error -110 sending status command, retrying
1135 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1138 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1139 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1142 host->set_div = div;
1143 host->bus_hz = host->set_speed * 2;
1144 MMC_DBG_BOOT_FUNC(host->mmc,
1145 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1146 div, host->bus_hz, mmc_hostname(host->mmc));
1149 if (host->verid < DW_MMC_240A)
1150 clk_set_rate(host->clk_mmc,(host->bus_hz));
1152 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1158 /* set clock to desired speed */
1159 mci_writel(host, CLKDIV, div);
1163 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1165 /* enable clock; only low power if no SDIO */
1166 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1168 if (host->verid < DW_MMC_240A)
1169 sdio_int = SDMMC_INT_SDIO(slot->id);
1171 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1173 if (!(mci_readl(host, INTMASK) & sdio_int))
1174 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1175 mci_writel(host, CLKENA, clk_en_a);
1179 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1180 /* keep the clock with reflecting clock dividor */
1181 slot->__clk_old = clock << div;
1184 host->current_speed = clock;
1186 if(slot->ctype != slot->pre_ctype)
1187 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1189 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1190 mmc_hostname(host->mmc));
1191 slot->pre_ctype = slot->ctype;
1193 /* Set the current slot bus width */
1194 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1197 extern struct mmc_card *this_card;
1198 static void dw_mci_wait_unbusy(struct dw_mci *host)
1201 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1202 unsigned long time_loop;
1203 unsigned int status;
1206 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1208 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1209 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1210 /* Special care for (secure)erase timeout calculation */
1212 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1215 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1216 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1217 300000 * (this_card->ext_csd.sec_erase_mult)) :
1218 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1222 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1223 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1224 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1225 timeout = SDMMC_DATA_TIMEOUT_SD;
1228 time_loop = jiffies + msecs_to_jiffies(timeout);
1230 status = mci_readl(host, STATUS);
1231 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1233 } while (time_before(jiffies, time_loop));
1238 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1241 * 0--status is busy.
1242 * 1--status is unbusy.
1244 int dw_mci_card_busy(struct mmc_host *mmc)
1246 struct dw_mci_slot *slot = mmc_priv(mmc);
1247 struct dw_mci *host = slot->host;
1249 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1250 host->svi_flags, mmc_hostname(host->mmc));
1253 if(host->svi_flags == 0){
1255 host->svi_flags = 1;
1256 return host->svi_flags;
1259 host->svi_flags = 0;
1260 return host->svi_flags;
1266 static void __dw_mci_start_request(struct dw_mci *host,
1267 struct dw_mci_slot *slot,
1268 struct mmc_command *cmd)
1270 struct mmc_request *mrq;
1271 struct mmc_data *data;
1275 if (host->pdata->select_slot)
1276 host->pdata->select_slot(slot->id);
1278 host->cur_slot = slot;
1281 dw_mci_wait_unbusy(host);
1283 host->pending_events = 0;
1284 host->completed_events = 0;
1285 host->data_status = 0;
1289 dw_mci_set_timeout(host);
1290 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1291 mci_writel(host, BLKSIZ, data->blksz);
1294 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1296 /* this is the first command, send the initialization clock */
1297 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1298 cmdflags |= SDMMC_CMD_INIT;
1301 dw_mci_submit_data(host, data);
1305 dw_mci_start_command(host, cmd, cmdflags);
1308 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1311 static void dw_mci_start_request(struct dw_mci *host,
1312 struct dw_mci_slot *slot)
1314 struct mmc_request *mrq = slot->mrq;
1315 struct mmc_command *cmd;
1317 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1318 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1320 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1321 __dw_mci_start_request(host, slot, cmd);
1324 /* must be called with host->lock held */
1325 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1326 struct mmc_request *mrq)
1328 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1333 if (host->state == STATE_IDLE) {
1334 host->state = STATE_SENDING_CMD;
1335 dw_mci_start_request(host, slot);
1337 list_add_tail(&slot->queue_node, &host->queue);
1341 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1343 struct dw_mci_slot *slot = mmc_priv(mmc);
1344 struct dw_mci *host = slot->host;
1349 * The check for card presence and queueing of the request must be
1350 * atomic, otherwise the card could be removed in between and the
1351 * request wouldn't fail until another card was inserted.
1353 spin_lock_bh(&host->lock);
1355 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1356 spin_unlock_bh(&host->lock);
1357 mrq->cmd->error = -ENOMEDIUM;
1358 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1359 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1361 mmc_request_done(mmc, mrq);
1365 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1366 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1368 dw_mci_queue_request(host, slot, mrq);
1370 spin_unlock_bh(&host->lock);
1373 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1375 struct dw_mci_slot *slot = mmc_priv(mmc);
1376 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1377 struct dw_mci *host = slot->host;
1379 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1382 #ifdef SDMMC_WAIT_FOR_UNBUSY
1383 unsigned long time_loop;
1386 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1387 if(host->svi_flags == 1)
1388 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1390 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1392 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1395 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1396 printk("%d..%s: no card. [%s]\n", \
1397 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1402 ret = time_before(jiffies, time_loop);
1403 regs = mci_readl(slot->host, STATUS);
1404 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1410 printk("slot->flags = %lu ", slot->flags);
1411 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1412 if(host->svi_flags != 1)
1415 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1416 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1420 switch (ios->bus_width) {
1421 case MMC_BUS_WIDTH_4:
1422 slot->ctype = SDMMC_CTYPE_4BIT;
1424 case MMC_BUS_WIDTH_8:
1425 slot->ctype = SDMMC_CTYPE_8BIT;
1428 /* set default 1 bit mode */
1429 slot->ctype = SDMMC_CTYPE_1BIT;
1430 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1433 regs = mci_readl(slot->host, UHS_REG);
1436 if (ios->timing == MMC_TIMING_UHS_DDR50)
1437 regs |= ((0x1 << slot->id) << 16);
1439 regs &= ~((0x1 << slot->id) << 16);
1441 mci_writel(slot->host, UHS_REG, regs);
1442 slot->host->timing = ios->timing;
1445 * Use mirror of ios->clock to prevent race with mmc
1446 * core ios update when finding the minimum.
1448 slot->clock = ios->clock;
1450 if (drv_data && drv_data->set_ios)
1451 drv_data->set_ios(slot->host, ios);
1453 /* Slot specific timing and width adjustment */
1454 dw_mci_setup_bus(slot, false);
1458 switch (ios->power_mode) {
1460 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1462 if (slot->host->pdata->setpower)
1463 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1464 regs = mci_readl(slot->host, PWREN);
1465 regs |= (1 << slot->id);
1466 mci_writel(slot->host, PWREN, regs);
1469 /* Power down slot */
1470 if(slot->host->pdata->setpower)
1471 slot->host->pdata->setpower(slot->id, 0);
1472 regs = mci_readl(slot->host, PWREN);
1473 regs &= ~(1 << slot->id);
1474 mci_writel(slot->host, PWREN, regs);
1481 static int dw_mci_get_ro(struct mmc_host *mmc)
1484 struct dw_mci_slot *slot = mmc_priv(mmc);
1485 struct dw_mci_board *brd = slot->host->pdata;
1487 /* Use platform get_ro function, else try on board write protect */
1488 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1490 else if(brd->get_ro)
1491 read_only = brd->get_ro(slot->id);
1492 else if(gpio_is_valid(slot->wp_gpio))
1493 read_only = gpio_get_value(slot->wp_gpio);
1496 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1498 dev_dbg(&mmc->class_dev, "card is %s\n",
1499 read_only ? "read-only" : "read-write");
1504 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1506 struct dw_mci_slot *slot = mmc_priv(mmc);
1507 struct dw_mci *host = slot->host;
1508 /*struct dw_mci_board *brd = slot->host->pdata;*/
1510 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1513 spin_lock_bh(&host->lock);
1516 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1518 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1520 spin_unlock_bh(&host->lock);
1522 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1523 if(__clk_is_enabled(host->hclk_mmc) == false)
1524 clk_prepare_enable(host->hclk_mmc);
1525 if(__clk_is_enabled(host->clk_mmc) == false)
1526 clk_prepare_enable(host->clk_mmc);
1528 if(__clk_is_enabled(host->clk_mmc) == true)
1529 clk_disable_unprepare(slot->host->clk_mmc);
1530 if(__clk_is_enabled(host->hclk_mmc) == true)
1531 clk_disable_unprepare(slot->host->hclk_mmc);
1534 mmc_detect_change(slot->mmc, 20);
1540 static int dw_mci_get_cd(struct mmc_host *mmc)
1543 struct dw_mci_slot *slot = mmc_priv(mmc);
1544 struct dw_mci_board *brd = slot->host->pdata;
1545 struct dw_mci *host = slot->host;
1546 int gpio_cd = mmc_gpio_get_cd(mmc);
1550 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
1551 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1552 gpio_cd = slot->cd_gpio;
1553 irq = gpio_to_irq(gpio_cd);
1554 if (gpio_is_valid(gpio_cd)) {
1555 gpio_val = gpio_get_value(gpio_cd);
1557 if (gpio_val == gpio_get_value(gpio_cd)) {
1558 gpio_cd = gpio_get_value(gpio_cd) == 0 ? 1 : 0;
1560 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1561 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1562 grf_writel((0x1 << 24) | (1 << 8), RK312X_GRF_SOC_CON0);
1563 dw_mci_ctrl_all_reset(host);
1565 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
1566 /* Really card detected: SHOULD disable force_jtag */
1567 grf_writel((0x1 << 24) | (0 << 8), RK312X_GRF_SOC_CON0);
1571 gpio_val = gpio_get_value(gpio_cd);
1573 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
1574 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1575 return slot->last_detect_state;
1578 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1582 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1583 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1585 /* Use platform get_cd function, else try onboard card detect */
1586 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1588 else if (brd->get_cd)
1589 present = !brd->get_cd(slot->id);
1590 else if (!IS_ERR_VALUE(gpio_cd))
1593 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1596 spin_lock_bh(&host->lock);
1598 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1599 dev_dbg(&mmc->class_dev, "card is present\n");
1601 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1602 dev_dbg(&mmc->class_dev, "card is not present\n");
1604 spin_unlock_bh(&host->lock);
1611 * Dts Should caps emmc controller with poll-hw-reset
1613 static void dw_mci_hw_reset(struct mmc_host *mmc)
1615 struct dw_mci_slot *slot = mmc_priv(mmc);
1616 struct dw_mci *host = slot->host;
1621 unsigned long timeout;
1624 /* (1) CMD12 to end any transfer in process */
1625 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1626 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1628 if(host->mmc->hold_reg_flag)
1629 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1630 mci_writel(host, CMDARG, 0);
1632 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1634 timeout = jiffies + msecs_to_jiffies(500);
1636 ret = time_before(jiffies, timeout);
1637 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1642 MMC_DBG_ERR_FUNC(host->mmc,
1643 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1644 __func__, mmc_hostname(host->mmc));
1646 /* (2) wait DTO, even if no response is sent back by card */
1648 timeout = jiffies + msecs_to_jiffies(5);
1650 ret = time_before(jiffies, timeout);
1651 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1652 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1658 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1660 /* Software reset - BMOD[0] for IDMA only */
1661 regs = mci_readl(host, BMOD);
1662 regs |= SDMMC_IDMAC_SWRESET;
1663 mci_writel(host, BMOD, regs);
1664 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1665 regs = mci_readl(host, BMOD);
1666 if(regs & SDMMC_IDMAC_SWRESET)
1667 MMC_DBG_WARN_FUNC(host->mmc,
1668 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1669 __func__, mmc_hostname(host->mmc));
1671 /* DMA reset - CTRL[2] */
1672 regs = mci_readl(host, CTRL);
1673 regs |= SDMMC_CTRL_DMA_RESET;
1674 mci_writel(host, CTRL, regs);
1675 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1676 regs = mci_readl(host, CTRL);
1677 if(regs & SDMMC_CTRL_DMA_RESET)
1678 MMC_DBG_WARN_FUNC(host->mmc,
1679 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1680 __func__, mmc_hostname(host->mmc));
1682 /* FIFO reset - CTRL[1] */
1683 regs = mci_readl(host, CTRL);
1684 regs |= SDMMC_CTRL_FIFO_RESET;
1685 mci_writel(host, CTRL, regs);
1686 mdelay(1); /* no timing limited, 1ms is random value */
1687 regs = mci_readl(host, CTRL);
1688 if(regs & SDMMC_CTRL_FIFO_RESET)
1689 MMC_DBG_WARN_FUNC(host->mmc,
1690 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1691 __func__, mmc_hostname(host->mmc));
1694 According to eMMC spec
1695 tRstW >= 1us ; RST_n pulse width
1696 tRSCA >= 200us ; RST_n to Command time
1697 tRSTH >= 1us ; RST_n high period
1699 mci_writel(slot->host, PWREN, 0x0);
1700 mci_writel(slot->host, RST_N, 0x0);
1702 udelay(10); /* 10us for bad quality eMMc. */
1704 mci_writel(slot->host, PWREN, 0x1);
1705 mci_writel(slot->host, RST_N, 0x1);
1707 usleep_range(500, 1000); /* at least 500(> 200us) */
1711 * Disable lower power mode.
1713 * Low power mode will stop the card clock when idle. According to the
1714 * description of the CLKENA register we should disable low power mode
1715 * for SDIO cards if we need SDIO interrupts to work.
1717 * This function is fast if low power mode is already disabled.
1719 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1721 struct dw_mci *host = slot->host;
1723 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1725 clk_en_a = mci_readl(host, CLKENA);
1727 if (clk_en_a & clken_low_pwr) {
1728 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1729 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1730 SDMMC_CMD_PRV_DAT_WAIT, 0);
1734 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1736 struct dw_mci_slot *slot = mmc_priv(mmc);
1737 struct dw_mci *host = slot->host;
1738 unsigned long flags;
1742 spin_lock_irqsave(&host->slock, flags);
1744 /* Enable/disable Slot Specific SDIO interrupt */
1745 int_mask = mci_readl(host, INTMASK);
1747 if (host->verid < DW_MMC_240A)
1748 sdio_int = SDMMC_INT_SDIO(slot->id);
1750 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1754 * Turn off low power mode if it was enabled. This is a bit of
1755 * a heavy operation and we disable / enable IRQs a lot, so
1756 * we'll leave low power mode disabled and it will get
1757 * re-enabled again in dw_mci_setup_bus().
1759 dw_mci_disable_low_power(slot);
1761 mci_writel(host, INTMASK,
1762 (int_mask | sdio_int));
1764 mci_writel(host, INTMASK,
1765 (int_mask & ~sdio_int));
1768 spin_unlock_irqrestore(&host->slock, flags);
1771 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1773 IO_DOMAIN_12 = 1200,
1774 IO_DOMAIN_18 = 1800,
1775 IO_DOMAIN_33 = 3300,
1777 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1787 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1788 __FUNCTION__, mmc_hostname(host->mmc));
1791 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1792 __FUNCTION__, mmc_hostname(host->mmc));
1796 if(cpu_is_rk3288()){
1797 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1798 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1802 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1803 __FUNCTION__, mmc_hostname(host->mmc));
1807 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1808 struct mmc_ios *ios)
1811 unsigned int value,uhs_reg;
1814 * Signal Voltage Switching is only applicable for Host Controllers
1817 if (host->verid < DW_MMC_240A)
1820 uhs_reg = mci_readl(host, UHS_REG);
1821 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1822 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1824 switch (ios->signal_voltage) {
1825 case MMC_SIGNAL_VOLTAGE_330:
1826 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1828 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1829 /* regulator_put(host->vmmc); //to be done in remove function. */
1831 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1832 __func__, regulator_get_voltage(host->vmmc), ret);
1834 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1835 " failed\n", mmc_hostname(host->mmc));
1838 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1840 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1841 __FUNCTION__, mmc_hostname(host->mmc));
1843 /* set High-power mode */
1844 value = mci_readl(host, CLKENA);
1845 value &= ~SDMMC_CLKEN_LOW_PWR;
1846 mci_writel(host,CLKENA , value);
1848 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1849 mci_writel(host,UHS_REG , uhs_reg);
1852 usleep_range(5000, 5500);
1854 /* 3.3V regulator output should be stable within 5 ms */
1855 uhs_reg = mci_readl(host, UHS_REG);
1856 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1859 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1860 mmc_hostname(host->mmc));
1863 case MMC_SIGNAL_VOLTAGE_180:
1865 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1866 /* regulator_put(host->vmmc);//to be done in remove function. */
1868 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1869 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1871 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1872 " failed\n", mmc_hostname(host->mmc));
1875 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1879 * Enable 1.8V Signal Enable in the Host Control2
1882 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1885 usleep_range(5000, 5500);
1886 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1887 __FUNCTION__,mmc_hostname(host->mmc));
1889 /* 1.8V regulator output should be stable within 5 ms */
1890 uhs_reg = mci_readl(host, UHS_REG);
1891 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1894 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1895 mmc_hostname(host->mmc));
1898 case MMC_SIGNAL_VOLTAGE_120:
1900 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1902 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1903 " failed\n", mmc_hostname(host->mmc));
1909 /* No signal voltage switch required */
1915 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1916 struct mmc_ios *ios)
1918 struct dw_mci_slot *slot = mmc_priv(mmc);
1919 struct dw_mci *host = slot->host;
1922 if (host->verid < DW_MMC_240A)
1925 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1931 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1933 struct dw_mci_slot *slot = mmc_priv(mmc);
1934 struct dw_mci *host = slot->host;
1935 const struct dw_mci_drv_data *drv_data = host->drv_data;
1936 struct dw_mci_tuning_data tuning_data;
1939 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1940 if(cpu_is_rk3036() || cpu_is_rk312x())
1943 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1944 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1945 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1946 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1947 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1948 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1949 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1953 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1954 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1955 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1958 "Undefined command(%d) for tuning\n", opcode);
1963 /* Recommend sample phase and delayline
1964 Fixme: Mix-use these three controllers will cause
1967 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1968 tuning_data.con_id = 3;
1969 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1970 tuning_data.con_id = 1;
1972 tuning_data.con_id = 0;
1974 /* 0: driver, from host->devices
1975 1: sample, from devices->host
1977 tuning_data.tuning_type = 1;
1979 if (drv_data && drv_data->execute_tuning)
1980 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1985 static const struct mmc_host_ops dw_mci_ops = {
1986 .request = dw_mci_request,
1987 .pre_req = dw_mci_pre_req,
1988 .post_req = dw_mci_post_req,
1989 .set_ios = dw_mci_set_ios,
1990 .get_ro = dw_mci_get_ro,
1991 .get_cd = dw_mci_get_cd,
1992 .set_sdio_status = dw_mci_set_sdio_status,
1993 .hw_reset = dw_mci_hw_reset,
1994 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1995 .execute_tuning = dw_mci_execute_tuning,
1996 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1997 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1998 .card_busy = dw_mci_card_busy,
2003 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
2005 unsigned long flags;
2010 local_irq_save(flags);
2011 if(host->irq_state != irqflag)
2013 host->irq_state = irqflag;
2016 enable_irq(host->irq);
2020 disable_irq(host->irq);
2023 local_irq_restore(flags);
2027 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2028 __releases(&host->lock)
2029 __acquires(&host->lock)
2031 if(DW_MCI_SEND_STATUS == host->dir_status){
2033 if( MMC_BUS_TEST_W != host->cmd->opcode){
2034 if(host->data_status & SDMMC_INT_DCRC)
2035 host->data->error = -EILSEQ;
2036 else if(host->data_status & SDMMC_INT_EBE)
2037 host->data->error = -ETIMEDOUT;
2039 dw_mci_wait_unbusy(host);
2042 dw_mci_wait_unbusy(host);
2047 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2048 __releases(&host->lock)
2049 __acquires(&host->lock)
2051 struct dw_mci_slot *slot;
2052 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2054 WARN_ON(host->cmd || host->data);
2056 del_timer_sync(&host->dto_timer);
2057 dw_mci_deal_data_end(host, mrq);
2060 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2061 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2063 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2064 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2066 host->cur_slot->mrq = NULL;
2068 if (!list_empty(&host->queue)) {
2069 slot = list_entry(host->queue.next,
2070 struct dw_mci_slot, queue_node);
2071 list_del(&slot->queue_node);
2072 dev_vdbg(host->dev, "list not empty: %s is next\n",
2073 mmc_hostname(slot->mmc));
2074 host->state = STATE_SENDING_CMD;
2075 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2076 dw_mci_start_request(host, slot);
2078 dev_vdbg(host->dev, "list empty\n");
2079 host->state = STATE_IDLE;
2082 spin_unlock(&host->lock);
2083 mmc_request_done(prev_mmc, mrq);
2084 spin_lock(&host->lock);
2087 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2089 u32 status = host->cmd_status;
2091 host->cmd_status = 0;
2093 /* Read the response from the card (up to 16 bytes) */
2094 if (cmd->flags & MMC_RSP_PRESENT) {
2095 if (cmd->flags & MMC_RSP_136) {
2096 cmd->resp[3] = mci_readl(host, RESP0);
2097 cmd->resp[2] = mci_readl(host, RESP1);
2098 cmd->resp[1] = mci_readl(host, RESP2);
2099 cmd->resp[0] = mci_readl(host, RESP3);
2101 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2102 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2104 cmd->resp[0] = mci_readl(host, RESP0);
2108 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2109 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2113 if (status & SDMMC_INT_RTO)
2115 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2118 cmd->error = -ETIMEDOUT;
2119 del_timer_sync(&host->dto_timer);
2120 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2121 del_timer_sync(&host->dto_timer);
2122 cmd->error = -EILSEQ;
2123 }else if (status & SDMMC_INT_RESP_ERR){
2124 del_timer_sync(&host->dto_timer);
2129 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2130 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2133 del_timer_sync(&host->dto_timer);
2134 if(MMC_SEND_STATUS != cmd->opcode)
2135 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2136 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2137 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2141 /* newer ip versions need a delay between retries */
2142 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2148 static void dw_mci_tasklet_func(unsigned long priv)
2150 struct dw_mci *host = (struct dw_mci *)priv;
2151 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2152 struct mmc_data *data;
2153 struct mmc_command *cmd;
2154 enum dw_mci_state state;
2155 enum dw_mci_state prev_state;
2156 u32 status, cmd_flags;
2157 unsigned long timeout = 0;
2160 spin_lock(&host->lock);
2162 state = host->state;
2172 case STATE_SENDING_CMD:
2173 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2174 &host->pending_events))
2179 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2180 dw_mci_command_complete(host, cmd);
2181 if (cmd == host->mrq->sbc && !cmd->error) {
2182 prev_state = state = STATE_SENDING_CMD;
2183 __dw_mci_start_request(host, host->cur_slot,
2188 if (cmd->data && cmd->error) {
2189 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2190 dw_mci_stop_dma(host);
2193 send_stop_cmd(host, data);
2194 state = STATE_SENDING_STOP;
2200 send_stop_abort(host, data);
2201 state = STATE_SENDING_STOP;
2204 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2207 if (!host->mrq->data || cmd->error) {
2208 dw_mci_request_end(host, host->mrq);
2212 prev_state = state = STATE_SENDING_DATA;
2215 case STATE_SENDING_DATA:
2216 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2217 dw_mci_stop_dma(host);
2220 send_stop_cmd(host, data);
2222 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2223 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2224 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2226 mci_writel(host, CMDARG, 0);
2228 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2229 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2231 if(host->mmc->hold_reg_flag)
2232 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2234 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2236 timeout = jiffies + msecs_to_jiffies(500);
2239 ret = time_before(jiffies, timeout);
2240 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2244 MMC_DBG_ERR_FUNC(host->mmc,
2245 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2246 __func__, mmc_hostname(host->mmc));
2249 send_stop_abort(host, data);
2251 state = STATE_DATA_ERROR;
2255 MMC_DBG_CMD_FUNC(host->mmc,
2256 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2257 prev_state,state, mmc_hostname(host->mmc));
2259 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2260 &host->pending_events))
2262 MMC_DBG_INFO_FUNC(host->mmc,
2263 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2264 prev_state,state,mmc_hostname(host->mmc));
2266 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2267 prev_state = state = STATE_DATA_BUSY;
2270 case STATE_DATA_BUSY:
2271 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2272 &host->pending_events))
2275 dw_mci_deal_data_end(host, host->mrq);
2276 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2277 MMC_DBG_INFO_FUNC(host->mmc,
2278 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2279 prev_state,state,mmc_hostname(host->mmc));
2282 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2283 status = host->data_status;
2285 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2286 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2287 MMC_DBG_ERR_FUNC(host->mmc,
2288 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2289 prev_state,state, status, mmc_hostname(host->mmc));
2291 if (status & SDMMC_INT_DRTO) {
2292 data->error = -ETIMEDOUT;
2293 } else if (status & SDMMC_INT_DCRC) {
2294 data->error = -EILSEQ;
2295 } else if (status & SDMMC_INT_EBE &&
2296 host->dir_status == DW_MCI_SEND_STATUS){
2298 * No data CRC status was returned.
2299 * The number of bytes transferred will
2300 * be exaggerated in PIO mode.
2302 data->bytes_xfered = 0;
2303 data->error = -ETIMEDOUT;
2312 * After an error, there may be data lingering
2313 * in the FIFO, so reset it - doing so
2314 * generates a block interrupt, hence setting
2315 * the scatter-gather pointer to NULL.
2317 dw_mci_fifo_reset(host);
2319 data->bytes_xfered = data->blocks * data->blksz;
2324 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2325 prev_state,state,mmc_hostname(host->mmc));
2326 dw_mci_request_end(host, host->mrq);
2329 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2330 prev_state,state,mmc_hostname(host->mmc));
2332 if (host->mrq->sbc && !data->error) {
2333 data->stop->error = 0;
2335 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2336 prev_state,state,mmc_hostname(host->mmc));
2338 dw_mci_request_end(host, host->mrq);
2342 prev_state = state = STATE_SENDING_STOP;
2344 send_stop_cmd(host, data);
2346 if (data->stop && !data->error) {
2347 /* stop command for open-ended transfer*/
2349 send_stop_abort(host, data);
2353 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2354 prev_state,state,mmc_hostname(host->mmc));
2356 case STATE_SENDING_STOP:
2357 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2360 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2361 prev_state, state, mmc_hostname(host->mmc));
2363 /* CMD error in data command */
2364 if (host->mrq->cmd->error && host->mrq->data) {
2365 dw_mci_fifo_reset(host);
2371 dw_mci_command_complete(host, host->mrq->stop);
2373 if (host->mrq->stop)
2374 dw_mci_command_complete(host, host->mrq->stop);
2376 host->cmd_status = 0;
2379 dw_mci_request_end(host, host->mrq);
2382 case STATE_DATA_ERROR:
2383 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2384 &host->pending_events))
2387 state = STATE_DATA_BUSY;
2390 } while (state != prev_state);
2392 host->state = state;
2394 spin_unlock(&host->lock);
2398 /* push final bytes to part_buf, only use during push */
2399 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2401 memcpy((void *)&host->part_buf, buf, cnt);
2402 host->part_buf_count = cnt;
2405 /* append bytes to part_buf, only use during push */
2406 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2408 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2409 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2410 host->part_buf_count += cnt;
2414 /* pull first bytes from part_buf, only use during pull */
2415 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2417 cnt = min(cnt, (int)host->part_buf_count);
2419 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2421 host->part_buf_count -= cnt;
2422 host->part_buf_start += cnt;
2427 /* pull final bytes from the part_buf, assuming it's just been filled */
2428 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2430 memcpy(buf, &host->part_buf, cnt);
2431 host->part_buf_start = cnt;
2432 host->part_buf_count = (1 << host->data_shift) - cnt;
2435 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2437 struct mmc_data *data = host->data;
2440 /* try and push anything in the part_buf */
2441 if (unlikely(host->part_buf_count)) {
2442 int len = dw_mci_push_part_bytes(host, buf, cnt);
2445 if (host->part_buf_count == 2) {
2446 mci_writew(host, DATA(host->data_offset),
2448 host->part_buf_count = 0;
2451 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2452 if (unlikely((unsigned long)buf & 0x1)) {
2454 u16 aligned_buf[64];
2455 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2456 int items = len >> 1;
2458 /* memcpy from input buffer into aligned buffer */
2459 memcpy(aligned_buf, buf, len);
2462 /* push data from aligned buffer into fifo */
2463 for (i = 0; i < items; ++i)
2464 mci_writew(host, DATA(host->data_offset),
2471 for (; cnt >= 2; cnt -= 2)
2472 mci_writew(host, DATA(host->data_offset), *pdata++);
2475 /* put anything remaining in the part_buf */
2477 dw_mci_set_part_bytes(host, buf, cnt);
2478 /* Push data if we have reached the expected data length */
2479 if ((data->bytes_xfered + init_cnt) ==
2480 (data->blksz * data->blocks))
2481 mci_writew(host, DATA(host->data_offset),
2486 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2488 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2489 if (unlikely((unsigned long)buf & 0x1)) {
2491 /* pull data from fifo into aligned buffer */
2492 u16 aligned_buf[64];
2493 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2494 int items = len >> 1;
2496 for (i = 0; i < items; ++i)
2497 aligned_buf[i] = mci_readw(host,
2498 DATA(host->data_offset));
2499 /* memcpy from aligned buffer into output buffer */
2500 memcpy(buf, aligned_buf, len);
2508 for (; cnt >= 2; cnt -= 2)
2509 *pdata++ = mci_readw(host, DATA(host->data_offset));
2513 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2514 dw_mci_pull_final_bytes(host, buf, cnt);
2518 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2520 struct mmc_data *data = host->data;
2523 /* try and push anything in the part_buf */
2524 if (unlikely(host->part_buf_count)) {
2525 int len = dw_mci_push_part_bytes(host, buf, cnt);
2528 if (host->part_buf_count == 4) {
2529 mci_writel(host, DATA(host->data_offset),
2531 host->part_buf_count = 0;
2534 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2535 if (unlikely((unsigned long)buf & 0x3)) {
2537 u32 aligned_buf[32];
2538 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2539 int items = len >> 2;
2541 /* memcpy from input buffer into aligned buffer */
2542 memcpy(aligned_buf, buf, len);
2545 /* push data from aligned buffer into fifo */
2546 for (i = 0; i < items; ++i)
2547 mci_writel(host, DATA(host->data_offset),
2554 for (; cnt >= 4; cnt -= 4)
2555 mci_writel(host, DATA(host->data_offset), *pdata++);
2558 /* put anything remaining in the part_buf */
2560 dw_mci_set_part_bytes(host, buf, cnt);
2561 /* Push data if we have reached the expected data length */
2562 if ((data->bytes_xfered + init_cnt) ==
2563 (data->blksz * data->blocks))
2564 mci_writel(host, DATA(host->data_offset),
2569 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2571 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2572 if (unlikely((unsigned long)buf & 0x3)) {
2574 /* pull data from fifo into aligned buffer */
2575 u32 aligned_buf[32];
2576 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2577 int items = len >> 2;
2579 for (i = 0; i < items; ++i)
2580 aligned_buf[i] = mci_readl(host,
2581 DATA(host->data_offset));
2582 /* memcpy from aligned buffer into output buffer */
2583 memcpy(buf, aligned_buf, len);
2591 for (; cnt >= 4; cnt -= 4)
2592 *pdata++ = mci_readl(host, DATA(host->data_offset));
2596 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2597 dw_mci_pull_final_bytes(host, buf, cnt);
2601 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2603 struct mmc_data *data = host->data;
2606 /* try and push anything in the part_buf */
2607 if (unlikely(host->part_buf_count)) {
2608 int len = dw_mci_push_part_bytes(host, buf, cnt);
2612 if (host->part_buf_count == 8) {
2613 mci_writeq(host, DATA(host->data_offset),
2615 host->part_buf_count = 0;
2618 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2619 if (unlikely((unsigned long)buf & 0x7)) {
2621 u64 aligned_buf[16];
2622 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2623 int items = len >> 3;
2625 /* memcpy from input buffer into aligned buffer */
2626 memcpy(aligned_buf, buf, len);
2629 /* push data from aligned buffer into fifo */
2630 for (i = 0; i < items; ++i)
2631 mci_writeq(host, DATA(host->data_offset),
2638 for (; cnt >= 8; cnt -= 8)
2639 mci_writeq(host, DATA(host->data_offset), *pdata++);
2642 /* put anything remaining in the part_buf */
2644 dw_mci_set_part_bytes(host, buf, cnt);
2645 /* Push data if we have reached the expected data length */
2646 if ((data->bytes_xfered + init_cnt) ==
2647 (data->blksz * data->blocks))
2648 mci_writeq(host, DATA(host->data_offset),
2653 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2655 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2656 if (unlikely((unsigned long)buf & 0x7)) {
2658 /* pull data from fifo into aligned buffer */
2659 u64 aligned_buf[16];
2660 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2661 int items = len >> 3;
2663 for (i = 0; i < items; ++i)
2664 aligned_buf[i] = mci_readq(host,
2665 DATA(host->data_offset));
2666 /* memcpy from aligned buffer into output buffer */
2667 memcpy(buf, aligned_buf, len);
2675 for (; cnt >= 8; cnt -= 8)
2676 *pdata++ = mci_readq(host, DATA(host->data_offset));
2680 host->part_buf = mci_readq(host, DATA(host->data_offset));
2681 dw_mci_pull_final_bytes(host, buf, cnt);
2685 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2689 /* get remaining partial bytes */
2690 len = dw_mci_pull_part_bytes(host, buf, cnt);
2691 if (unlikely(len == cnt))
2696 /* get the rest of the data */
2697 host->pull_data(host, buf, cnt);
2700 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2702 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2704 unsigned int offset;
2705 struct mmc_data *data = host->data;
2706 int shift = host->data_shift;
2709 unsigned int remain, fcnt;
2711 if(!host->mmc->bus_refs){
2712 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2716 if (!sg_miter_next(sg_miter))
2719 host->sg = sg_miter->piter.sg;
2720 buf = sg_miter->addr;
2721 remain = sg_miter->length;
2725 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2726 << shift) + host->part_buf_count;
2727 len = min(remain, fcnt);
2730 dw_mci_pull_data(host, (void *)(buf + offset), len);
2731 data->bytes_xfered += len;
2736 sg_miter->consumed = offset;
2737 status = mci_readl(host, MINTSTS);
2738 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2739 /* if the RXDR is ready read again */
2740 } while ((status & SDMMC_INT_RXDR) ||
2741 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2744 if (!sg_miter_next(sg_miter))
2746 sg_miter->consumed = 0;
2748 sg_miter_stop(sg_miter);
2752 sg_miter_stop(sg_miter);
2756 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2759 static void dw_mci_write_data_pio(struct dw_mci *host)
2761 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2763 unsigned int offset;
2764 struct mmc_data *data = host->data;
2765 int shift = host->data_shift;
2768 unsigned int fifo_depth = host->fifo_depth;
2769 unsigned int remain, fcnt;
2771 if(!host->mmc->bus_refs){
2772 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2777 if (!sg_miter_next(sg_miter))
2780 host->sg = sg_miter->piter.sg;
2781 buf = sg_miter->addr;
2782 remain = sg_miter->length;
2786 fcnt = ((fifo_depth -
2787 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2788 << shift) - host->part_buf_count;
2789 len = min(remain, fcnt);
2792 host->push_data(host, (void *)(buf + offset), len);
2793 data->bytes_xfered += len;
2798 sg_miter->consumed = offset;
2799 status = mci_readl(host, MINTSTS);
2800 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2801 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2804 if (!sg_miter_next(sg_miter))
2806 sg_miter->consumed = 0;
2808 sg_miter_stop(sg_miter);
2812 sg_miter_stop(sg_miter);
2816 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2819 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2821 u32 multi, unit = SZ_2M;
2823 if (!host->cmd_status)
2824 host->cmd_status = status;
2829 if ((MMC_STOP_TRANSMISSION != host->cmd->opcode)) {
2830 multi = (mci_readl(host, BYTCNT) / unit) +
2831 ((mci_readl(host, BYTCNT) % unit) ? 1 :0 ) +
2832 ((host->cmd->retries > 2) ? 2 : host->cmd->retries);
2833 /* Max limit time: 8s for dto */
2834 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4000 * multi));
2839 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2840 tasklet_schedule(&host->tasklet);
2843 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2845 struct dw_mci *host = dev_id;
2846 u32 pending, sdio_int;
2849 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2852 * DTO fix - version 2.10a and below, and only if internal DMA
2855 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2857 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2858 pending |= SDMMC_INT_DATA_OVER;
2862 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2863 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2864 host->cmd_status = pending;
2866 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2867 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2869 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2872 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2873 /* if there is an error report DATA_ERROR */
2874 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2875 host->data_status = pending;
2877 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2879 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2880 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2881 tasklet_schedule(&host->tasklet);
2884 if (pending & SDMMC_INT_DATA_OVER) {
2885 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2886 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2887 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2888 if (!host->data_status)
2889 host->data_status = pending;
2891 if (host->dir_status == DW_MCI_RECV_STATUS) {
2892 if (host->sg != NULL)
2893 dw_mci_read_data_pio(host, true);
2895 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2896 tasklet_schedule(&host->tasklet);
2899 if (pending & SDMMC_INT_RXDR) {
2900 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2901 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2902 dw_mci_read_data_pio(host, false);
2905 if (pending & SDMMC_INT_TXDR) {
2906 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2907 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2908 dw_mci_write_data_pio(host);
2911 if (pending & SDMMC_INT_VSI) {
2912 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2913 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2914 dw_mci_cmd_interrupt(host, pending);
2917 if (pending & SDMMC_INT_CMD_DONE) {
2918 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2919 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2920 dw_mci_cmd_interrupt(host, pending);
2923 if (pending & SDMMC_INT_CD) {
2924 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2925 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2926 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2927 queue_work(host->card_workqueue, &host->card_work);
2930 if (pending & SDMMC_INT_HLE) {
2931 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2932 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2936 /* Handle SDIO Interrupts */
2937 for (i = 0; i < host->num_slots; i++) {
2938 struct dw_mci_slot *slot = host->slot[i];
2940 if (host->verid < DW_MMC_240A)
2941 sdio_int = SDMMC_INT_SDIO(i);
2943 sdio_int = SDMMC_INT_SDIO(i + 8);
2945 if (pending & sdio_int) {
2946 mci_writel(host, RINTSTS, sdio_int);
2947 mmc_signal_sdio_irq(slot->mmc);
2953 #ifdef CONFIG_MMC_DW_IDMAC
2954 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2955 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2956 /* Handle DMA interrupts */
2957 pending = mci_readl(host, IDSTS);
2958 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2959 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2960 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2961 host->dma_ops->complete((void *)host);
2969 static void dw_mci_work_routine_card(struct work_struct *work)
2971 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2974 for (i = 0; i < host->num_slots; i++) {
2975 struct dw_mci_slot *slot = host->slot[i];
2976 struct mmc_host *mmc = slot->mmc;
2977 struct mmc_request *mrq;
2980 present = dw_mci_get_cd(mmc);
2982 /* Card insert, switch data line to uart function, and vice verse.
2983 * ONLY audi chip need switched by software, using udbg tag in dts!
2985 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
2987 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
2988 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
2989 mmc_hostname(host->mmc));
2991 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
2992 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
2993 mmc_hostname(host->mmc));
2997 while (present != slot->last_detect_state) {
2998 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2999 present ? "inserted" : "removed");
3000 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
3001 present ? "inserted" : "removed.", mmc_hostname(mmc));
3003 dw_mci_ctrl_all_reset(host);
3004 /* Stop edma when rountine card triggered */
3005 if(cpu_is_rk3036() || cpu_is_rk312x())
3006 if(host->dma_ops && host->dma_ops->stop)
3007 host->dma_ops->stop(host);
3008 rk_send_wakeup_key();//wake up system
3009 spin_lock_bh(&host->lock);
3011 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
3012 /* Card change detected */
3013 slot->last_detect_state = present;
3015 /* Clean up queue if present */
3018 if (mrq == host->mrq) {
3022 switch (host->state) {
3025 case STATE_SENDING_CMD:
3026 mrq->cmd->error = -ENOMEDIUM;
3030 case STATE_SENDING_DATA:
3031 mrq->data->error = -ENOMEDIUM;
3032 dw_mci_stop_dma(host);
3034 case STATE_DATA_BUSY:
3035 case STATE_DATA_ERROR:
3036 if (mrq->data->error == -EINPROGRESS)
3037 mrq->data->error = -ENOMEDIUM;
3041 case STATE_SENDING_STOP:
3042 mrq->stop->error = -ENOMEDIUM;
3046 dw_mci_request_end(host, mrq);
3048 list_del(&slot->queue_node);
3049 mrq->cmd->error = -ENOMEDIUM;
3051 mrq->data->error = -ENOMEDIUM;
3053 mrq->stop->error = -ENOMEDIUM;
3055 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3056 mrq->cmd->opcode, mmc_hostname(mmc));
3058 spin_unlock(&host->lock);
3059 mmc_request_done(slot->mmc, mrq);
3060 spin_lock(&host->lock);
3064 /* Power down slot */
3066 /* Clear down the FIFO */
3067 dw_mci_fifo_reset(host);
3068 #ifdef CONFIG_MMC_DW_IDMAC
3069 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3070 dw_mci_idmac_reset(host);
3075 spin_unlock_bh(&host->lock);
3077 present = dw_mci_get_cd(mmc);
3080 mmc_detect_change(slot->mmc,
3081 msecs_to_jiffies(host->pdata->detect_delay_ms));
3086 /* given a slot id, find out the device node representing that slot */
3087 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3089 struct device_node *np;
3093 if (!dev || !dev->of_node)
3096 for_each_child_of_node(dev->of_node, np) {
3097 addr = of_get_property(np, "reg", &len);
3098 if (!addr || (len < sizeof(int)))
3100 if (be32_to_cpup(addr) == slot)
3106 static struct dw_mci_of_slot_quirks {
3109 } of_slot_quirks[] = {
3111 .quirk = "disable-wp",
3112 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3116 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3118 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3123 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3124 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3125 quirks |= of_slot_quirks[idx].id;
3130 /* find out bus-width for a given slot */
3131 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3133 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3139 if (of_property_read_u32(np, "bus-width", &bus_wd))
3140 dev_err(dev, "bus-width property not found, assuming width"
3146 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3147 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3149 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3155 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3157 /* Having a missing entry is valid; return silently */
3158 if (!gpio_is_valid(gpio))
3161 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3162 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3166 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3172 /* find the write protect gpio for a given slot; or -1 if none specified */
3173 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3175 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3181 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3183 /* Having a missing entry is valid; return silently */
3184 if (!gpio_is_valid(gpio))
3187 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3188 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3195 /* find the cd gpio for a given slot */
3196 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3197 struct mmc_host *mmc)
3199 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3205 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3207 /* Having a missing entry is valid; return silently */
3208 if (!gpio_is_valid(gpio))
3211 if (mmc_gpio_request_cd(mmc, gpio, 0))
3212 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3215 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3217 struct mmc_host *mmc = dev_id;
3218 struct dw_mci_slot *slot = mmc_priv(mmc);
3219 struct dw_mci *host = slot->host;
3220 int gpio_cd = slot->cd_gpio;
3222 (gpio_get_value(gpio_cd) == 0) ?
3223 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
3224 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
3226 /* wakeup system whether gpio debounce or not */
3227 rk_send_wakeup_key();
3229 /* no need to trigger detect flow when rescan is disabled.
3230 This case happended in dpm, that we just wakeup system and
3231 let suspend_post notify callback handle it.
3233 if(mmc->rescan_disable == 0)
3234 queue_work(host->card_workqueue, &host->card_work);
3236 printk("%s: rescan been disabled!\n", __FUNCTION__);
3241 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3242 struct mmc_host *mmc)
3244 struct dw_mci_slot *slot = mmc_priv(mmc);
3245 struct dw_mci *host = slot->host;
3249 /* Having a missing entry is valid; return silently */
3250 if (!gpio_is_valid(gpio))
3253 irq = gpio_to_irq(gpio);
3255 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3256 NULL, dw_mci_gpio_cd_irqt,
3257 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3261 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3263 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3264 enable_irq_wake(irq);
3267 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3271 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3272 struct mmc_host *mmc)
3274 if (!gpio_is_valid(gpio))
3277 if (gpio_to_irq(gpio) >= 0) {
3278 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3279 devm_gpio_free(&mmc->class_dev, gpio);
3282 #else /* CONFIG_OF */
3283 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3287 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3291 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3295 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3299 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3300 struct mmc_host *mmc)
3304 #endif /* CONFIG_OF */
3306 /* @host: dw_mci host prvdata
3307 * Init pinctrl for each platform. Usually we assign
3308 * "defalut" tag for functional usage, "idle" tag for gpio
3309 * state and "udbg" tag for uart_dbg if any.
3311 static void dw_mci_init_pinctrl(struct dw_mci *host)
3313 /* Fixme: DON'T TOUCH EMMC SETTING! */
3314 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3317 /* Get pinctrl for DTS */
3318 host->pinctrl = devm_pinctrl_get(host->dev);
3319 if (IS_ERR(host->pinctrl)) {
3320 dev_err(host->dev, "%s: No pinctrl used!\n",
3321 mmc_hostname(host->mmc));
3325 /* Lookup idle state */
3326 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3327 PINCTRL_STATE_IDLE);
3328 if (IS_ERR(host->pins_idle)) {
3329 dev_err(host->dev, "%s: No idle tag found!\n",
3330 mmc_hostname(host->mmc));
3332 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3333 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3334 mmc_hostname(host->mmc));
3337 /* Lookup default state */
3338 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3339 PINCTRL_STATE_DEFAULT);
3340 if (IS_ERR(host->pins_default)) {
3341 dev_err(host->dev, "%s: No default pinctrl found!\n",
3342 mmc_hostname(host->mmc));
3344 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3345 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3346 mmc_hostname(host->mmc));
3349 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3350 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3351 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3352 if (IS_ERR(host->pins_udbg)) {
3353 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3354 mmc_hostname(host->mmc));
3356 if (!dw_mci_get_cd(host->mmc))
3357 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3358 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3359 mmc_hostname(host->mmc));
3364 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3365 unsigned long mode, void *unused)
3367 struct mmc_host *host = container_of(
3368 notify_block, struct mmc_host, pm_notify);
3369 unsigned long flags;
3372 case PM_HIBERNATION_PREPARE:
3373 case PM_SUSPEND_PREPARE:
3374 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3375 spin_lock_irqsave(&host->lock, flags);
3376 host->rescan_disable = 1;
3377 spin_unlock_irqrestore(&host->lock, flags);
3378 if (cancel_delayed_work(&host->detect))
3379 wake_unlock(&host->detect_wake_lock);
3382 case PM_POST_SUSPEND:
3383 case PM_POST_HIBERNATION:
3384 case PM_POST_RESTORE:
3385 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3386 spin_lock_irqsave(&host->lock, flags);
3387 host->rescan_disable = 0;
3388 spin_unlock_irqrestore(&host->lock, flags);
3389 mmc_detect_change(host, 10);
3395 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3397 struct mmc_host *mmc;
3398 struct dw_mci_slot *slot;
3399 const struct dw_mci_drv_data *drv_data = host->drv_data;
3404 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3408 slot = mmc_priv(mmc);
3412 host->slot[id] = slot;
3415 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3417 mmc->ops = &dw_mci_ops;
3419 if (of_property_read_u32_array(host->dev->of_node,
3420 "clock-freq-min-max", freq, 2)) {
3421 mmc->f_min = DW_MCI_FREQ_MIN;
3422 mmc->f_max = DW_MCI_FREQ_MAX;
3424 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3425 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3427 mmc->f_min = freq[0];
3428 mmc->f_max = freq[1];
3430 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3431 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3434 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3436 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3437 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3438 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3439 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3440 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3441 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3443 if (of_find_property(host->dev->of_node, "supports-tSD", NULL))
3444 mmc->restrict_caps |= RESTRICT_CARD_TYPE_TSD;
3446 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3447 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3448 if (register_pm_notifier(&mmc->pm_notify)) {
3449 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3450 goto err_pm_notifier;
3454 /* We assume only low-level chip use gpio_cd */
3455 if ((soc_is_rk3126() || soc_is_rk3126b() soc_is_rk3036()) &&
3456 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3457 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3458 if (gpio_is_valid(slot->cd_gpio)) {
3459 /* Request gpio int for card detection */
3460 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3462 slot->cd_gpio = -ENODEV;
3463 dev_err(host->dev, "failed to get your cd-gpios!\n");
3467 if (host->pdata->get_ocr)
3468 mmc->ocr_avail = host->pdata->get_ocr(id);
3471 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3472 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3473 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3474 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3478 * Start with slot power disabled, it will be enabled when a card
3481 if (host->pdata->setpower)
3482 host->pdata->setpower(id, 0);
3484 if (host->pdata->caps)
3485 mmc->caps = host->pdata->caps;
3487 if (host->pdata->pm_caps)
3488 mmc->pm_caps = host->pdata->pm_caps;
3490 if (host->dev->of_node) {
3491 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3495 ctrl_id = to_platform_device(host->dev)->id;
3497 if (drv_data && drv_data->caps)
3498 mmc->caps |= drv_data->caps[ctrl_id];
3499 if (drv_data && drv_data->hold_reg_flag)
3500 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3502 /* set the compatibility of driver. */
3503 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3504 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3506 if (host->pdata->caps2)
3507 mmc->caps2 = host->pdata->caps2;
3509 if (host->pdata->get_bus_wd)
3510 bus_width = host->pdata->get_bus_wd(slot->id);
3511 else if (host->dev->of_node)
3512 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3516 switch (bus_width) {
3518 mmc->caps |= MMC_CAP_8_BIT_DATA;
3520 mmc->caps |= MMC_CAP_4_BIT_DATA;
3523 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3524 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3525 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3526 mmc->caps |= MMC_CAP_SDIO_IRQ;
3527 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3528 mmc->caps |= MMC_CAP_HW_RESET;
3529 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3530 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3531 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3532 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3533 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3534 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3535 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3536 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3538 /*Assign pm_caps pass to pm_flags*/
3539 mmc->pm_flags = mmc->pm_caps;
3541 if (host->pdata->blk_settings) {
3542 mmc->max_segs = host->pdata->blk_settings->max_segs;
3543 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3544 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3545 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3546 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3548 /* Useful defaults if platform data is unset. */
3549 #ifdef CONFIG_MMC_DW_IDMAC
3550 mmc->max_segs = host->ring_size;
3551 mmc->max_blk_size = 65536;
3552 mmc->max_blk_count = host->ring_size;
3553 mmc->max_seg_size = 0x1000;
3554 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3555 if(cpu_is_rk3036() || cpu_is_rk312x()){
3556 /* fixup for external dmac setting */
3558 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3559 mmc->max_blk_count = 65535;
3560 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3561 mmc->max_seg_size = mmc->max_req_size;
3565 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3566 mmc->max_blk_count = 512;
3567 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3568 mmc->max_seg_size = mmc->max_req_size;
3569 #endif /* CONFIG_MMC_DW_IDMAC */
3573 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3575 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3580 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3581 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3585 if (IS_ERR(host->vmmc)) {
3586 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3589 ret = regulator_enable(host->vmmc);
3592 "failed to enable regulator: %d\n", ret);
3599 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3601 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3602 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3604 dw_mci_init_pinctrl(host);
3605 ret = mmc_add_host(mmc);
3609 #if defined(CONFIG_DEBUG_FS)
3610 dw_mci_init_debugfs(slot);
3613 /* Card initially undetected */
3614 slot->last_detect_state = 1;
3618 unregister_pm_notifier(&mmc->pm_notify);
3621 if (gpio_is_valid(slot->cd_gpio))
3622 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3627 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3629 /* Shutdown detect IRQ */
3630 if (slot->host->pdata->exit)
3631 slot->host->pdata->exit(id);
3633 /* Debugfs stuff is cleaned up by mmc core */
3634 mmc_remove_host(slot->mmc);
3635 slot->host->slot[id] = NULL;
3636 mmc_free_host(slot->mmc);
3639 static void dw_mci_init_dma(struct dw_mci *host)
3641 /* Alloc memory for sg translation */
3642 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3643 &host->sg_dma, GFP_KERNEL);
3644 if (!host->sg_cpu) {
3645 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3650 /* Determine which DMA interface to use */
3651 #if defined(CONFIG_MMC_DW_IDMAC)
3652 if(cpu_is_rk3036() || cpu_is_rk312x()){
3653 host->dma_ops = &dw_mci_edmac_ops;
3654 dev_info(host->dev, "Using external DMA controller.\n");
3656 host->dma_ops = &dw_mci_idmac_ops;
3657 dev_info(host->dev, "Using internal DMA controller.\n");
3664 if (host->dma_ops->init && host->dma_ops->start &&
3665 host->dma_ops->stop && host->dma_ops->cleanup) {
3666 if (host->dma_ops->init(host)) {
3667 dev_err(host->dev, "%s: Unable to initialize "
3668 "DMA Controller.\n", __func__);
3672 dev_err(host->dev, "DMA initialization not found.\n");
3680 dev_info(host->dev, "Using PIO mode.\n");
3685 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3687 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3690 ctrl = mci_readl(host, CTRL);
3692 mci_writel(host, CTRL, ctrl);
3694 /* wait till resets clear */
3696 ctrl = mci_readl(host, CTRL);
3697 if (!(ctrl & reset))
3699 } while (time_before(jiffies, timeout));
3702 "Timeout resetting block (ctrl reset %#x)\n",
3708 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3711 * Reseting generates a block interrupt, hence setting
3712 * the scatter-gather pointer to NULL.
3715 sg_miter_stop(&host->sg_miter);
3719 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3722 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3724 return dw_mci_ctrl_reset(host,
3725 SDMMC_CTRL_FIFO_RESET |
3727 SDMMC_CTRL_DMA_RESET);
3732 static struct dw_mci_of_quirks {
3737 .quirk = "broken-cd",
3738 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3742 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3744 struct dw_mci_board *pdata;
3745 struct device *dev = host->dev;
3746 struct device_node *np = dev->of_node;
3747 const struct dw_mci_drv_data *drv_data = host->drv_data;
3749 u32 clock_frequency;
3751 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3753 dev_err(dev, "could not allocate memory for pdata\n");
3754 return ERR_PTR(-ENOMEM);
3757 /* find out number of slots supported */
3758 if (of_property_read_u32(dev->of_node, "num-slots",
3759 &pdata->num_slots)) {
3760 dev_info(dev, "num-slots property not found, "
3761 "assuming 1 slot is available\n");
3762 pdata->num_slots = 1;
3766 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3767 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3768 pdata->quirks |= of_quirks[idx].id;
3771 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3772 dev_info(dev, "fifo-depth property not found, using "
3773 "value of FIFOTH register as default\n");
3775 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3777 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3778 pdata->bus_hz = clock_frequency;
3780 if (drv_data && drv_data->parse_dt) {
3781 ret = drv_data->parse_dt(host);
3783 return ERR_PTR(ret);
3786 if (of_find_property(np, "keep-power-in-suspend", NULL))
3787 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3789 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3790 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3792 if (of_find_property(np, "supports-highspeed", NULL))
3793 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3795 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3796 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3798 if (of_find_property(np, "supports-DDR_MODE", NULL))
3799 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3801 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3802 pdata->caps2 |= MMC_CAP2_HS200;
3804 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3805 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3807 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3808 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3810 if (of_get_property(np, "cd-inverted", NULL))
3811 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3812 if (of_get_property(np, "bootpart-no-access", NULL))
3813 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3818 #else /* CONFIG_OF */
3819 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3821 return ERR_PTR(-EINVAL);
3823 #endif /* CONFIG_OF */
3825 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3830 dev_err(host->dev, "host->state = 0x%x\n", host->state);
3831 switch(host->state){
3834 case STATE_SENDING_DATA:
3835 case STATE_DATA_BUSY:
3836 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3837 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3838 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3839 host->state = STATE_DATA_BUSY;
3840 if (!dw_mci_ctrl_all_reset(host)) {
3841 dev_err(host->dev, "dto: ctrl_all_reset failed!\n");
3845 /* NO requirement to reclaim slave chn using external dmac */
3846 #ifdef CONFIG_MMC_DW_IDMAC
3847 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3848 if (host->use_dma && host->dma_ops->init)
3849 host->dma_ops->init(host);
3853 * Restore the initial value at FIFOTH register
3854 * And Invalidate the prev_blksz with zero
3856 mci_writel(host, FIFOTH, host->fifoth_val);
3857 host->prev_blksz = 0;
3858 mci_writel(host, TMOUT, 0xFFFFFFFF);
3859 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3860 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3861 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3862 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3863 regs |= SDMMC_INT_CD;
3865 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)) {
3866 if (host->verid < DW_MMC_240A)
3867 sdio_int = SDMMC_INT_SDIO(0);
3869 sdio_int = SDMMC_INT_SDIO(8);
3871 if (mci_readl(host, INTMASK) & sdio_int)
3875 mci_writel(host, INTMASK, regs);
3876 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3877 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3878 tasklet_schedule(&host->tasklet);
3884 static void dw_mci_dto_timeout(unsigned long host_data)
3886 struct dw_mci *host = (struct dw_mci *) host_data;
3888 disable_irq(host->irq);
3890 dev_err(host->dev, "data_over interrupt timeout!\n");
3891 host->data_status = SDMMC_INT_EBE;
3892 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3893 dw_mci_dealwith_timeout(host);
3895 enable_irq(host->irq);
3898 int dw_mci_probe(struct dw_mci *host)
3900 const struct dw_mci_drv_data *drv_data = host->drv_data;
3901 int width, i, ret = 0;
3907 host->pdata = dw_mci_parse_dt(host);
3908 if (IS_ERR(host->pdata)) {
3909 dev_err(host->dev, "platform data not available\n");
3914 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3916 "Platform data must supply select_slot function\n");
3921 * In 2.40a spec, Data offset is changed.
3922 * Need to check the version-id and set data-offset for DATA register.
3924 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3925 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3927 if (host->verid < DW_MMC_240A)
3928 host->data_offset = DATA_OFFSET;
3930 host->data_offset = DATA_240A_OFFSET;
3933 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3934 if (IS_ERR(host->hclk_mmc)) {
3935 dev_err(host->dev, "failed to get hclk_mmc\n");
3936 ret = PTR_ERR(host->hclk_mmc);
3940 clk_prepare_enable(host->hclk_mmc);
3943 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3944 if (IS_ERR(host->clk_mmc)) {
3945 dev_err(host->dev, "failed to get clk mmc_per\n");
3946 ret = PTR_ERR(host->clk_mmc);
3950 host->bus_hz = host->pdata->bus_hz;
3951 if (!host->bus_hz) {
3952 dev_err(host->dev,"Platform data must supply bus speed\n");
3957 if (host->verid < DW_MMC_240A)
3958 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3960 //rockchip: fix divider 2 in clksum before controlller
3961 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3964 dev_err(host->dev, "failed to set clk mmc\n");
3967 clk_prepare_enable(host->clk_mmc);
3969 if (drv_data && drv_data->setup_clock) {
3970 ret = drv_data->setup_clock(host);
3973 "implementation specific clock setup failed\n");
3978 host->quirks = host->pdata->quirks;
3979 host->irq_state = true;
3980 host->set_speed = 0;
3982 host->svi_flags = 0;
3984 spin_lock_init(&host->lock);
3985 spin_lock_init(&host->slock);
3987 INIT_LIST_HEAD(&host->queue);
3989 * Get the host data width - this assumes that HCON has been set with
3990 * the correct values.
3992 i = (mci_readl(host, HCON) >> 7) & 0x7;
3994 host->push_data = dw_mci_push_data16;
3995 host->pull_data = dw_mci_pull_data16;
3997 host->data_shift = 1;
3998 } else if (i == 2) {
3999 host->push_data = dw_mci_push_data64;
4000 host->pull_data = dw_mci_pull_data64;
4002 host->data_shift = 3;
4004 /* Check for a reserved value, and warn if it is */
4006 "HCON reports a reserved host data width!\n"
4007 "Defaulting to 32-bit access.\n");
4008 host->push_data = dw_mci_push_data32;
4009 host->pull_data = dw_mci_pull_data32;
4011 host->data_shift = 2;
4014 /* Reset all blocks */
4015 if (!dw_mci_ctrl_all_reset(host))
4018 host->dma_ops = host->pdata->dma_ops;
4019 dw_mci_init_dma(host);
4021 /* Clear the interrupts for the host controller */
4022 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4023 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4025 /* Put in max timeout */
4026 mci_writel(host, TMOUT, 0xFFFFFFFF);
4029 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
4030 * Tx Mark = fifo_size / 2 DMA Size = 8
4032 if (!host->pdata->fifo_depth) {
4034 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
4035 * have been overwritten by the bootloader, just like we're
4036 * about to do, so if you know the value for your hardware, you
4037 * should put it in the platform data.
4039 fifo_size = mci_readl(host, FIFOTH);
4040 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
4042 fifo_size = host->pdata->fifo_depth;
4044 host->fifo_depth = fifo_size;
4046 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
4047 mci_writel(host, FIFOTH, host->fifoth_val);
4049 /* disable clock to CIU */
4050 mci_writel(host, CLKENA, 0);
4051 mci_writel(host, CLKSRC, 0);
4053 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
4054 host->card_workqueue = alloc_workqueue("dw-mci-card",
4055 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
4056 if (!host->card_workqueue) {
4060 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4061 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4062 host->irq_flags, "dw-mci", host);
4066 if (host->pdata->num_slots)
4067 host->num_slots = host->pdata->num_slots;
4069 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4071 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
4072 /* We need at least one slot to succeed */
4073 for (i = 0; i < host->num_slots; i++) {
4074 ret = dw_mci_init_slot(host, i);
4076 dev_dbg(host->dev, "slot %d init failed\n", i);
4082 * Enable interrupts for command done, data over, data empty, card det,
4083 * receive ready and error such as transmit, receive timeout, crc error
4085 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4086 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4087 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4088 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4089 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4090 regs |= SDMMC_INT_CD;
4092 mci_writel(host, INTMASK, regs);
4094 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4096 dev_info(host->dev, "DW MMC controller at irq %d, "
4097 "%d bit host data width, "
4099 host->irq, width, fifo_size);
4102 dev_info(host->dev, "%d slots initialized\n", init_slots);
4104 dev_dbg(host->dev, "attempted to initialize %d slots, "
4105 "but failed on all\n", host->num_slots);
4110 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4111 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4116 destroy_workqueue(host->card_workqueue);
4119 if (host->use_dma && host->dma_ops->exit)
4120 host->dma_ops->exit(host);
4123 regulator_disable(host->vmmc);
4124 regulator_put(host->vmmc);
4128 if (!IS_ERR(host->clk_mmc))
4129 clk_disable_unprepare(host->clk_mmc);
4131 if (!IS_ERR(host->hclk_mmc))
4132 clk_disable_unprepare(host->hclk_mmc);
4136 EXPORT_SYMBOL(dw_mci_probe);
4138 void dw_mci_remove(struct dw_mci *host)
4140 struct mmc_host *mmc = host->mmc;
4141 struct dw_mci_slot *slot = mmc_priv(mmc);
4144 del_timer_sync(&host->dto_timer);
4146 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4147 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4149 for(i = 0; i < host->num_slots; i++){
4150 dev_dbg(host->dev, "remove slot %d\n", i);
4152 dw_mci_cleanup_slot(host->slot[i], i);
4155 /* disable clock to CIU */
4156 mci_writel(host, CLKENA, 0);
4157 mci_writel(host, CLKSRC, 0);
4159 destroy_workqueue(host->card_workqueue);
4160 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4161 unregister_pm_notifier(&host->mmc->pm_notify);
4163 if(host->use_dma && host->dma_ops->exit)
4164 host->dma_ops->exit(host);
4166 if (gpio_is_valid(slot->cd_gpio))
4167 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4170 regulator_disable(host->vmmc);
4171 regulator_put(host->vmmc);
4173 if(!IS_ERR(host->clk_mmc))
4174 clk_disable_unprepare(host->clk_mmc);
4176 if(!IS_ERR(host->hclk_mmc))
4177 clk_disable_unprepare(host->hclk_mmc);
4179 EXPORT_SYMBOL(dw_mci_remove);
4183 #ifdef CONFIG_PM_SLEEP
4185 * TODO: we should probably disable the clock to the card in the suspend path.
4187 extern int get_wifi_chip_type(void);
4188 int dw_mci_suspend(struct dw_mci *host)
4190 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4191 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() == WIFI_RTKWIFI))
4195 regulator_disable(host->vmmc);
4197 /*only for sdmmc controller*/
4198 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4199 disable_irq(host->irq);
4200 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4201 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4202 mmc_hostname(host->mmc));
4204 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4205 mci_writel(host, INTMASK, 0x00);
4206 mci_writel(host, CTRL, 0x00);
4208 /* Soc rk3126/3036 already in gpio_cd mode */
4209 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4210 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4211 enable_irq_wake(host->mmc->slot.cd_irq);
4216 EXPORT_SYMBOL(dw_mci_suspend);
4218 int dw_mci_resume(struct dw_mci *host)
4220 int i, ret, retry_cnt = 0;
4222 struct dw_mci_slot *slot;
4224 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4225 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() == WIFI_RTKWIFI))
4230 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4231 slot = mmc_priv(host->mmc);
4232 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4236 /*only for sdmmc controller*/
4237 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4238 /* Soc rk3126/3036 already in gpio_cd mode */
4239 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4240 disable_irq_wake(host->mmc->slot.cd_irq);
4241 mmc_gpio_free_cd(host->mmc);
4243 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4244 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4245 mmc_hostname(host->mmc));
4249 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4250 else if(cpu_is_rk3036())
4251 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4252 else if(cpu_is_rk312x())
4253 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4254 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4257 ret = regulator_enable(host->vmmc);
4260 "failed to enable regulator: %d\n", ret);
4265 if(!dw_mci_ctrl_all_reset(host)){
4270 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4271 if(host->use_dma && host->dma_ops->init)
4272 host->dma_ops->init(host);
4275 * Restore the initial value at FIFOTH register
4276 * And Invalidate the prev_blksz with zero
4278 mci_writel(host, FIFOTH, host->fifoth_val);
4279 host->prev_blksz = 0;
4280 /* Put in max timeout */
4281 mci_writel(host, TMOUT, 0xFFFFFFFF);
4283 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4284 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4286 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4287 regs |= SDMMC_INT_CD;
4288 mci_writel(host, INTMASK, regs);
4289 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4290 /*only for sdmmc controller*/
4291 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4292 enable_irq(host->irq);
4295 for(i = 0; i < host->num_slots; i++){
4296 struct dw_mci_slot *slot = host->slot[i];
4299 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4300 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4301 dw_mci_setup_bus(slot, true);
4307 EXPORT_SYMBOL(dw_mci_resume);
4308 #endif /* CONFIG_PM_SLEEP */
4310 static int __init dw_mci_init(void)
4312 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4316 static void __exit dw_mci_exit(void)
4320 module_init(dw_mci_init);
4321 module_exit(dw_mci_exit);
4323 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4324 MODULE_AUTHOR("NXP Semiconductor VietNam");
4325 MODULE_AUTHOR("Imagination Technologies Ltd");
4326 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4327 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4328 MODULE_LICENSE("GPL v2");