2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
49 #include <linux/mfd/syscon.h>
50 #include <linux/regmap.h>
51 #include <linux/log2.h>
53 #include "rk_sdmmc_dbg.h"
54 #include <linux/regulator/rockchip_io_vol_domain.h>
55 #include "../../clk/rockchip/clk-ops.h"
57 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
59 /* Common flag combinations */
60 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
61 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
63 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
65 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
66 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
67 #define DW_MCI_SEND_STATUS 1
68 #define DW_MCI_RECV_STATUS 2
69 #define DW_MCI_DMA_THRESHOLD 16
71 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
72 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
74 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
75 #define SDMMC_DATA_TIMEOUT_SD 500
76 #define SDMMC_DATA_TIMEOUT_SDIO 250
77 #define SDMMC_DATA_TIMEOUT_EMMC 2500
79 #define SDMMC_CMD_RTO_MAX_HOLD 200
80 #define SDMMC_WAIT_FOR_UNBUSY 2500
82 #ifdef CONFIG_MMC_DW_IDMAC
83 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
84 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
85 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
89 u32 des0; /* Control Descriptor */
90 #define IDMAC_DES0_DIC BIT(1)
91 #define IDMAC_DES0_LD BIT(2)
92 #define IDMAC_DES0_FD BIT(3)
93 #define IDMAC_DES0_CH BIT(4)
94 #define IDMAC_DES0_ER BIT(5)
95 #define IDMAC_DES0_CES BIT(30)
96 #define IDMAC_DES0_OWN BIT(31)
98 u32 des1; /* Buffer sizes */
99 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
100 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
102 u32 des2; /* buffer 1 physical address */
104 u32 des3; /* buffer 2 physical address */
106 #endif /* CONFIG_MMC_DW_IDMAC */
108 static const u8 tuning_blk_pattern_4bit[] = {
109 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
110 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
111 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
112 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
113 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
114 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
115 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
116 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
119 static const u8 tuning_blk_pattern_8bit[] = {
120 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
121 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
122 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
123 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
124 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
125 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
126 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
127 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
128 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
129 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
130 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
131 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
132 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
133 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
134 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
135 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
138 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
139 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
140 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
141 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
143 /*printk the all register of current host*/
145 static int dw_mci_regs_printk(struct dw_mci *host)
147 struct sdmmc_reg *regs = dw_mci_regs;
149 while( regs->name != 0 ){
150 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
153 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
158 #if defined(CONFIG_DEBUG_FS)
159 static int dw_mci_req_show(struct seq_file *s, void *v)
161 struct dw_mci_slot *slot = s->private;
162 struct mmc_request *mrq;
163 struct mmc_command *cmd;
164 struct mmc_command *stop;
165 struct mmc_data *data;
167 /* Make sure we get a consistent snapshot */
168 spin_lock_bh(&slot->host->lock);
178 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
179 cmd->opcode, cmd->arg, cmd->flags,
180 cmd->resp[0], cmd->resp[1], cmd->resp[2],
181 cmd->resp[2], cmd->error);
183 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
184 data->bytes_xfered, data->blocks,
185 data->blksz, data->flags, data->error);
188 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
189 stop->opcode, stop->arg, stop->flags,
190 stop->resp[0], stop->resp[1], stop->resp[2],
191 stop->resp[2], stop->error);
194 spin_unlock_bh(&slot->host->lock);
199 static int dw_mci_req_open(struct inode *inode, struct file *file)
201 return single_open(file, dw_mci_req_show, inode->i_private);
204 static const struct file_operations dw_mci_req_fops = {
205 .owner = THIS_MODULE,
206 .open = dw_mci_req_open,
209 .release = single_release,
212 static int dw_mci_regs_show(struct seq_file *s, void *v)
214 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
215 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
216 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
217 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
218 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
219 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
224 static int dw_mci_regs_open(struct inode *inode, struct file *file)
226 return single_open(file, dw_mci_regs_show, inode->i_private);
229 static const struct file_operations dw_mci_regs_fops = {
230 .owner = THIS_MODULE,
231 .open = dw_mci_regs_open,
234 .release = single_release,
237 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
239 struct mmc_host *mmc = slot->mmc;
240 struct dw_mci *host = slot->host;
244 root = mmc->debugfs_root;
248 node = debugfs_create_file("regs", S_IRUSR, root, host,
253 node = debugfs_create_file("req", S_IRUSR, root, slot,
258 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
262 node = debugfs_create_x32("pending_events", S_IRUSR, root,
263 (u32 *)&host->pending_events);
267 node = debugfs_create_x32("completed_events", S_IRUSR, root,
268 (u32 *)&host->completed_events);
275 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
277 #endif /* defined(CONFIG_DEBUG_FS) */
279 static void dw_mci_set_timeout(struct dw_mci *host)
281 /* timeout (maximum) */
282 mci_writel(host, TMOUT, 0xffffffff);
285 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
287 struct mmc_data *data;
288 struct dw_mci_slot *slot = mmc_priv(mmc);
289 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
291 cmd->error = -EINPROGRESS;
295 if (cmdr == MMC_STOP_TRANSMISSION)
296 cmdr |= SDMMC_CMD_STOP;
298 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
300 if (cmd->flags & MMC_RSP_PRESENT) {
301 /* We expect a response, so set this bit */
302 cmdr |= SDMMC_CMD_RESP_EXP;
303 if (cmd->flags & MMC_RSP_136)
304 cmdr |= SDMMC_CMD_RESP_LONG;
307 if (cmd->flags & MMC_RSP_CRC)
308 cmdr |= SDMMC_CMD_RESP_CRC;
312 cmdr |= SDMMC_CMD_DAT_EXP;
313 if (data->flags & MMC_DATA_STREAM)
314 cmdr |= SDMMC_CMD_STRM_MODE;
315 if (data->flags & MMC_DATA_WRITE)
316 cmdr |= SDMMC_CMD_DAT_WR;
319 if (drv_data && drv_data->prepare_command)
320 drv_data->prepare_command(slot->host, &cmdr);
326 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
328 struct mmc_command *stop;
334 stop = &host->stop_abort;
336 memset(stop, 0, sizeof(struct mmc_command));
338 if (cmdr == MMC_READ_SINGLE_BLOCK ||
339 cmdr == MMC_READ_MULTIPLE_BLOCK ||
340 cmdr == MMC_WRITE_BLOCK ||
341 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
342 stop->opcode = MMC_STOP_TRANSMISSION;
344 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
345 } else if (cmdr == SD_IO_RW_EXTENDED) {
346 stop->opcode = SD_IO_RW_DIRECT;
347 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
348 ((cmd->arg >> 28) & 0x7);
349 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
354 cmdr = stop->opcode | SDMMC_CMD_STOP |
355 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
360 static void dw_mci_start_command(struct dw_mci *host,
361 struct mmc_command *cmd, u32 cmd_flags)
363 struct dw_mci_slot *slot = host->slot[0];
364 /*temporality fix slot[0] due to host->num_slots equal to 1*/
366 host->pre_cmd = host->cmd;
369 "start command: ARGR=0x%08x CMDR=0x%08x\n",
370 cmd->arg, cmd_flags);
372 if(SD_SWITCH_VOLTAGE == cmd->opcode){
373 /*confirm non-low-power mode*/
374 mci_writel(host, CMDARG, 0);
375 dw_mci_disable_low_power(slot);
377 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
378 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
380 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
383 mci_writel(host, CMDARG, cmd->arg);
386 /* fix the value to 1 in some Soc,for example RK3188. */
387 if(host->mmc->hold_reg_flag)
388 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
390 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
394 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
396 dw_mci_start_command(host, data->stop, host->stop_cmdr);
399 /* DMA interface functions */
400 static void dw_mci_stop_dma(struct dw_mci *host)
402 if (host->using_dma) {
403 /* Fixme: No need to terminate edma, may cause flush op */
404 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
405 host->dma_ops->stop(host);
406 host->dma_ops->cleanup(host);
409 /* Data transfer was stopped by the interrupt handler */
410 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
413 static int dw_mci_get_dma_dir(struct mmc_data *data)
415 if (data->flags & MMC_DATA_WRITE)
416 return DMA_TO_DEVICE;
418 return DMA_FROM_DEVICE;
421 #ifdef CONFIG_MMC_DW_IDMAC
422 static void dw_mci_dma_cleanup(struct dw_mci *host)
424 struct mmc_data *data = host->data;
427 if (!data->host_cookie)
428 dma_unmap_sg(host->dev,
431 dw_mci_get_dma_dir(data));
434 static void dw_mci_idmac_reset(struct dw_mci *host)
436 u32 bmod = mci_readl(host, BMOD);
437 /* Software reset of DMA */
438 bmod |= SDMMC_IDMAC_SWRESET;
439 mci_writel(host, BMOD, bmod);
442 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
446 /* Disable and reset the IDMAC interface */
447 temp = mci_readl(host, CTRL);
448 temp &= ~SDMMC_CTRL_USE_IDMAC;
449 temp |= SDMMC_CTRL_DMA_RESET;
450 mci_writel(host, CTRL, temp);
452 /* Stop the IDMAC running */
453 temp = mci_readl(host, BMOD);
454 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
455 temp |= SDMMC_IDMAC_SWRESET;
456 mci_writel(host, BMOD, temp);
459 static void dw_mci_idmac_complete_dma(void *arg)
461 struct dw_mci *host = arg;
462 struct mmc_data *data = host->data;
464 dev_vdbg(host->dev, "DMA complete\n");
467 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
468 host->mrq->cmd->opcode,host->mrq->cmd->arg,
469 data->blocks,data->blksz,mmc_hostname(host->mmc));
472 host->dma_ops->cleanup(host);
475 * If the card was removed, data will be NULL. No point in trying to
476 * send the stop command or waiting for NBUSY in this case.
479 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
480 tasklet_schedule(&host->tasklet);
484 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
488 struct idmac_desc *desc = host->sg_cpu;
490 for (i = 0; i < sg_len; i++, desc++) {
491 unsigned int length = sg_dma_len(&data->sg[i]);
492 u32 mem_addr = sg_dma_address(&data->sg[i]);
494 /* Set the OWN bit and disable interrupts for this descriptor */
495 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
498 IDMAC_SET_BUFFER1_SIZE(desc, length);
500 /* Physical address to DMA to/from */
501 desc->des2 = mem_addr;
504 /* Set first descriptor */
506 desc->des0 |= IDMAC_DES0_FD;
508 /* Set last descriptor */
509 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
510 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
511 desc->des0 |= IDMAC_DES0_LD;
516 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
520 dw_mci_translate_sglist(host, host->data, sg_len);
522 /* Select IDMAC interface */
523 temp = mci_readl(host, CTRL);
524 temp |= SDMMC_CTRL_USE_IDMAC;
525 mci_writel(host, CTRL, temp);
529 /* Enable the IDMAC */
530 temp = mci_readl(host, BMOD);
531 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
532 mci_writel(host, BMOD, temp);
534 /* Start it running */
535 mci_writel(host, PLDMND, 1);
538 static int dw_mci_idmac_init(struct dw_mci *host)
540 struct idmac_desc *p;
543 /* Number of descriptors in the ring buffer */
544 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
546 /* Forward link the descriptor list */
547 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
548 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
550 /* Set the last descriptor as the end-of-ring descriptor */
551 p->des3 = host->sg_dma;
552 p->des0 = IDMAC_DES0_ER;
554 dw_mci_idmac_reset(host);
556 /* Mask out interrupts - get Tx & Rx complete only */
557 mci_writel(host, IDSTS, IDMAC_INT_CLR);
558 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
561 /* Set the descriptor base address */
562 mci_writel(host, DBADDR, host->sg_dma);
566 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
567 .init = dw_mci_idmac_init,
568 .start = dw_mci_idmac_start_dma,
569 .stop = dw_mci_idmac_stop_dma,
570 .complete = dw_mci_idmac_complete_dma,
571 .cleanup = dw_mci_dma_cleanup,
575 static void dw_mci_edma_cleanup(struct dw_mci *host)
577 struct mmc_data *data = host->data;
580 if (!data->host_cookie)
581 dma_unmap_sg(host->dev,
582 data->sg, data->sg_len,
583 dw_mci_get_dma_dir(data));
586 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
588 dmaengine_terminate_all(host->dms->ch);
591 static void dw_mci_edmac_complete_dma(void *arg)
593 struct dw_mci *host = arg;
594 struct mmc_data *data = host->data;
596 dev_vdbg(host->dev, "DMA complete\n");
599 if(data->flags & MMC_DATA_READ)
600 /* Invalidate cache after read */
601 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
602 data->sg_len, DMA_FROM_DEVICE);
604 host->dma_ops->cleanup(host);
607 * If the card was removed, data will be NULL. No point in trying to
608 * send the stop command or waiting for NBUSY in this case.
611 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
612 tasklet_schedule(&host->tasklet);
616 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
618 struct dma_slave_config slave_config;
619 struct dma_async_tx_descriptor *desc = NULL;
620 struct scatterlist *sgl = host->data->sg;
621 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
622 u32 sg_elems = host->data->sg_len;
623 u32 fifoth_val, mburst;
625 u32 idx, rx_wmark, tx_wmark;
628 /* Set external dma config: burst size, burst width*/
629 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
630 slave_config.src_addr = slave_config.dst_addr;
631 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
632 slave_config.src_addr_width = slave_config.dst_addr_width;
634 /* Match FIFO dma burst MSIZE with external dma config*/
635 fifoth_val = mci_readl(host, FIFOTH);
636 mburst = mszs[(fifoth_val >> 28) & 0x7];
638 /* edmac limit burst to 16, but work around for rk3036 to 8 */
639 if (unlikely(cpu_is_rk3036()))
644 if (mburst > burst_limit) {
645 mburst = burst_limit;
646 idx = (ilog2(mburst) > 0) ? (ilog2(mburst) - 1) : 0;
648 if (soc_is_rk3126b()) {
650 rx_wmark = (host->fifo_depth) / 2 - 1;
652 rx_wmark = mszs[idx] - 1;
655 tx_wmark = (host->fifo_depth) / 2;
656 fifoth_val = SDMMC_SET_FIFOTH(idx, rx_wmark, tx_wmark);
658 mci_writel(host, FIFOTH, fifoth_val);
661 slave_config.dst_maxburst = mburst;
662 slave_config.src_maxburst = slave_config.dst_maxburst;
664 if(host->data->flags & MMC_DATA_WRITE){
665 slave_config.direction = DMA_MEM_TO_DEV;
666 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
668 dev_err(host->dev, "error in dw_mci edma configuration.\n");
672 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
673 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
675 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
678 /* Set dw_mci_edmac_complete_dma as callback */
679 desc->callback = dw_mci_edmac_complete_dma;
680 desc->callback_param = (void *)host;
681 dmaengine_submit(desc);
683 /* Flush cache before write */
684 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
685 sg_elems, DMA_TO_DEVICE);
686 dma_async_issue_pending(host->dms->ch);
689 slave_config.direction = DMA_DEV_TO_MEM;
690 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
692 dev_err(host->dev, "error in dw_mci edma configuration.\n");
695 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
696 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
698 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
701 /* set dw_mci_edmac_complete_dma as callback */
702 desc->callback = dw_mci_edmac_complete_dma;
703 desc->callback_param = (void *)host;
704 dmaengine_submit(desc);
705 dma_async_issue_pending(host->dms->ch);
709 static int dw_mci_edmac_init(struct dw_mci *host)
711 /* Request external dma channel, SHOULD decide chn in dts */
713 host->dms = (struct dw_mci_dma_slave *)kmalloc
714 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
715 if (NULL == host->dms) {
716 dev_err(host->dev, "No enough memory to alloc dms.\n");
720 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
721 if (!host->dms->ch) {
722 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
723 host->dms->ch->chan_id);
730 if (NULL != host->dms) {
738 static void dw_mci_edmac_exit(struct dw_mci *host)
740 if (NULL != host->dms) {
741 if (NULL != host->dms->ch) {
742 dma_release_channel(host->dms->ch);
743 host->dms->ch = NULL;
750 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
751 .init = dw_mci_edmac_init,
752 .exit = dw_mci_edmac_exit,
753 .start = dw_mci_edmac_start_dma,
754 .stop = dw_mci_edmac_stop_dma,
755 .complete = dw_mci_edmac_complete_dma,
756 .cleanup = dw_mci_edma_cleanup,
758 #endif /* CONFIG_MMC_DW_IDMAC */
760 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
761 struct mmc_data *data,
764 struct scatterlist *sg;
765 unsigned int i, sg_len;
767 if (!next && data->host_cookie)
768 return data->host_cookie;
771 * We don't do DMA on "complex" transfers, i.e. with
772 * non-word-aligned buffers or lengths. Also, we don't bother
773 * with all the DMA setup overhead for short transfers.
775 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
781 for_each_sg(data->sg, sg, data->sg_len, i) {
782 if (sg->offset & 3 || sg->length & 3)
786 sg_len = dma_map_sg(host->dev,
789 dw_mci_get_dma_dir(data));
794 data->host_cookie = sg_len;
799 static void dw_mci_pre_req(struct mmc_host *mmc,
800 struct mmc_request *mrq,
803 struct dw_mci_slot *slot = mmc_priv(mmc);
804 struct mmc_data *data = mrq->data;
806 if (!slot->host->use_dma || !data)
809 if (data->host_cookie) {
810 data->host_cookie = 0;
814 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
815 data->host_cookie = 0;
818 static void dw_mci_post_req(struct mmc_host *mmc,
819 struct mmc_request *mrq,
822 struct dw_mci_slot *slot = mmc_priv(mmc);
823 struct mmc_data *data = mrq->data;
825 if (!slot->host->use_dma || !data)
828 if (data->host_cookie)
829 dma_unmap_sg(slot->host->dev,
832 dw_mci_get_dma_dir(data));
833 data->host_cookie = 0;
836 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
838 #ifdef CONFIG_MMC_DW_IDMAC
839 unsigned int blksz = data->blksz;
840 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
841 u32 fifo_width = 1 << host->data_shift;
842 u32 blksz_depth = blksz / fifo_width, fifoth_val;
843 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
844 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
846 tx_wmark = (host->fifo_depth) / 2;
847 tx_wmark_invers = host->fifo_depth - tx_wmark;
851 * if blksz is not a multiple of the FIFO width
853 if (blksz % fifo_width) {
860 if (!((blksz_depth % mszs[idx]) ||
861 (tx_wmark_invers % mszs[idx]))) {
863 rx_wmark = mszs[idx] - 1;
868 * If idx is '0', it won't be tried
869 * Thus, initial values are uesed
872 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
873 mci_writel(host, FIFOTH, fifoth_val);
878 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
880 unsigned int blksz = data->blksz;
881 u32 blksz_depth, fifo_depth;
884 WARN_ON(!(data->flags & MMC_DATA_READ));
886 if (host->timing != MMC_TIMING_MMC_HS200 &&
887 host->timing != MMC_TIMING_UHS_SDR104)
890 blksz_depth = blksz / (1 << host->data_shift);
891 fifo_depth = host->fifo_depth;
893 if (blksz_depth > fifo_depth)
897 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
898 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
899 * Currently just choose blksz.
902 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
906 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
909 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
917 /* If we don't have a channel, we can't do DMA */
921 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
923 /* Fixme: No need terminate edma, may cause flush op */
924 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
925 host->dma_ops->stop(host);
932 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
933 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
937 * Decide the MSIZE and RX/TX Watermark.
938 * If current block size is same with previous size,
939 * no need to update fifoth.
941 if (host->prev_blksz != data->blksz)
942 dw_mci_adjust_fifoth(host, data);
945 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
947 /* Enable the DMA interface */
948 temp = mci_readl(host, CTRL);
949 temp |= SDMMC_CTRL_DMA_ENABLE;
950 mci_writel(host, CTRL, temp);
952 /* Disable RX/TX IRQs, let DMA handle it */
953 spin_lock_irqsave(&host->slock, flags);
954 temp = mci_readl(host, INTMASK);
955 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
956 mci_writel(host, INTMASK, temp);
957 spin_unlock_irqrestore(&host->slock, flags);
959 host->dma_ops->start(host, sg_len);
964 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
969 data->error = -EINPROGRESS;
971 //WARN_ON(host->data);
976 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
978 if (data->flags & MMC_DATA_READ) {
979 host->dir_status = DW_MCI_RECV_STATUS;
980 dw_mci_ctrl_rd_thld(host, data);
982 host->dir_status = DW_MCI_SEND_STATUS;
985 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
986 data->blocks, data->blksz, mmc_hostname(host->mmc));
988 if (dw_mci_submit_data_dma(host, data)) {
989 int flags = SG_MITER_ATOMIC;
990 if (host->data->flags & MMC_DATA_READ)
991 flags |= SG_MITER_TO_SG;
993 flags |= SG_MITER_FROM_SG;
995 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
997 host->part_buf_start = 0;
998 host->part_buf_count = 0;
1000 spin_lock_irqsave(&host->slock, flag);
1001 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1002 temp = mci_readl(host, INTMASK);
1003 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1004 mci_writel(host, INTMASK, temp);
1005 spin_unlock_irqrestore(&host->slock, flag);
1007 temp = mci_readl(host, CTRL);
1008 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1009 mci_writel(host, CTRL, temp);
1012 * Use the initial fifoth_val for PIO mode.
1013 * If next issued data may be transfered by DMA mode,
1014 * prev_blksz should be invalidated.
1016 mci_writel(host, FIFOTH, host->fifoth_val);
1017 host->prev_blksz = 0;
1020 * Keep the current block size.
1021 * It will be used to decide whether to update
1022 * fifoth register next time.
1024 host->prev_blksz = data->blksz;
1028 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1030 struct dw_mci *host = slot->host;
1031 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1032 unsigned int cmd_status = 0;
1033 #ifdef SDMMC_WAIT_FOR_UNBUSY
1035 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1037 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1039 ret = time_before(jiffies, timeout);
1040 cmd_status = mci_readl(host, STATUS);
1041 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1045 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1046 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1049 mci_writel(host, CMDARG, arg);
1051 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1052 if(cmd & SDMMC_CMD_UPD_CLK)
1053 timeout = jiffies + msecs_to_jiffies(50);
1055 timeout = jiffies + msecs_to_jiffies(500);
1056 while (time_before(jiffies, timeout)) {
1057 cmd_status = mci_readl(host, CMD);
1058 if (!(cmd_status & SDMMC_CMD_START))
1061 dev_err(&slot->mmc->class_dev,
1062 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1063 cmd, arg, cmd_status);
1066 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1068 struct dw_mci *host = slot->host;
1069 unsigned int tempck,clock = slot->clock;
1074 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1075 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1078 mci_writel(host, CLKENA, 0);
1079 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1080 if(host->svi_flags == 0)
1081 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1083 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1085 } else if (clock != host->current_speed || force_clkinit) {
1086 div = host->bus_hz / clock;
1087 if (host->bus_hz % clock && host->bus_hz > clock)
1089 * move the + 1 after the divide to prevent
1090 * over-clocking the card.
1094 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1096 if ((clock << div) != slot->__clk_old || force_clkinit) {
1097 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1098 dev_info(&slot->mmc->class_dev,
1099 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1100 slot->id, host->bus_hz, clock,
1103 host->set_speed = tempck;
1104 host->set_div = div;
1108 mci_writel(host, CLKENA, 0);
1109 mci_writel(host, CLKSRC, 0);
1113 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1115 if(clock <= 400*1000){
1116 MMC_DBG_BOOT_FUNC(host->mmc,
1117 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1118 clock * 2, mmc_hostname(host->mmc));
1119 /* clk_mmc will change parents to 24MHz xtal*/
1120 clk_set_rate(host->clk_mmc, clock * 2);
1123 host->set_div = div;
1127 MMC_DBG_BOOT_FUNC(host->mmc,
1128 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1129 mmc_hostname(host->mmc));
1132 MMC_DBG_ERR_FUNC(host->mmc,
1133 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1134 mmc_hostname(host->mmc));
1136 host->set_div = div;
1137 host->bus_hz = host->set_speed * 2;
1138 MMC_DBG_BOOT_FUNC(host->mmc,
1139 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1140 div, host->bus_hz, mmc_hostname(host->mmc));
1142 /* BUG may be here, come on, Linux BSP engineer looks!
1143 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1144 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1145 some oops happened like that:
1146 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1147 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1148 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1149 mmc0: new high speed DDR MMC card at address 0001
1150 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1152 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1153 mmcblk0: retrying using single block read
1154 mmcblk0: error -110 sending status command, retrying
1156 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1159 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1160 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1163 host->set_div = div;
1164 host->bus_hz = host->set_speed * 2;
1165 MMC_DBG_BOOT_FUNC(host->mmc,
1166 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1167 div, host->bus_hz, mmc_hostname(host->mmc));
1170 if (host->verid < DW_MMC_240A)
1171 clk_set_rate(host->clk_mmc,(host->bus_hz));
1173 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1179 /* set clock to desired speed */
1180 mci_writel(host, CLKDIV, div);
1184 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1186 /* enable clock; only low power if no SDIO */
1187 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1189 if (host->verid < DW_MMC_240A)
1190 sdio_int = SDMMC_INT_SDIO(slot->id);
1192 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1194 if (!(mci_readl(host, INTMASK) & sdio_int))
1195 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1196 mci_writel(host, CLKENA, clk_en_a);
1200 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1201 /* keep the clock with reflecting clock dividor */
1202 slot->__clk_old = clock << div;
1205 host->current_speed = clock;
1207 if(slot->ctype != slot->pre_ctype)
1208 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1210 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1211 mmc_hostname(host->mmc));
1212 slot->pre_ctype = slot->ctype;
1214 /* Set the current slot bus width */
1215 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1218 extern struct mmc_card *this_card;
1219 static void dw_mci_wait_unbusy(struct dw_mci *host)
1222 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1223 unsigned long time_loop;
1224 unsigned int status;
1227 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1229 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1230 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1231 /* Special care for (secure)erase timeout calculation */
1233 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1236 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1237 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1238 300000 * (this_card->ext_csd.sec_erase_mult)) :
1239 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1243 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1244 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1245 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1246 timeout = SDMMC_DATA_TIMEOUT_SD;
1249 time_loop = jiffies + msecs_to_jiffies(timeout);
1251 status = mci_readl(host, STATUS);
1252 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1254 } while (time_before(jiffies, time_loop));
1259 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1262 * 0--status is busy.
1263 * 1--status is unbusy.
1265 int dw_mci_card_busy(struct mmc_host *mmc)
1267 struct dw_mci_slot *slot = mmc_priv(mmc);
1268 struct dw_mci *host = slot->host;
1270 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1271 host->svi_flags, mmc_hostname(host->mmc));
1274 if(host->svi_flags == 0){
1276 host->svi_flags = 1;
1277 return host->svi_flags;
1280 host->svi_flags = 0;
1281 return host->svi_flags;
1287 static void __dw_mci_start_request(struct dw_mci *host,
1288 struct dw_mci_slot *slot,
1289 struct mmc_command *cmd)
1291 struct mmc_request *mrq;
1292 struct mmc_data *data;
1296 if (host->pdata->select_slot)
1297 host->pdata->select_slot(slot->id);
1299 host->cur_slot = slot;
1302 dw_mci_wait_unbusy(host);
1304 host->pending_events = 0;
1305 host->completed_events = 0;
1306 host->data_status = 0;
1310 dw_mci_set_timeout(host);
1311 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1312 mci_writel(host, BLKSIZ, data->blksz);
1315 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1317 /* this is the first command, send the initialization clock */
1318 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1319 cmdflags |= SDMMC_CMD_INIT;
1322 dw_mci_submit_data(host, data);
1326 dw_mci_start_command(host, cmd, cmdflags);
1329 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1332 static void dw_mci_start_request(struct dw_mci *host,
1333 struct dw_mci_slot *slot)
1335 struct mmc_request *mrq = slot->mrq;
1336 struct mmc_command *cmd;
1338 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1339 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1341 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1342 __dw_mci_start_request(host, slot, cmd);
1345 /* must be called with host->lock held */
1346 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1347 struct mmc_request *mrq)
1349 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1354 if (host->state == STATE_IDLE) {
1355 host->state = STATE_SENDING_CMD;
1356 dw_mci_start_request(host, slot);
1358 list_add_tail(&slot->queue_node, &host->queue);
1362 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1364 struct dw_mci_slot *slot = mmc_priv(mmc);
1365 struct dw_mci *host = slot->host;
1370 * The check for card presence and queueing of the request must be
1371 * atomic, otherwise the card could be removed in between and the
1372 * request wouldn't fail until another card was inserted.
1374 spin_lock_bh(&host->lock);
1376 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1377 spin_unlock_bh(&host->lock);
1378 mrq->cmd->error = -ENOMEDIUM;
1379 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1380 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1382 mmc_request_done(mmc, mrq);
1386 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1387 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1389 dw_mci_queue_request(host, slot, mrq);
1391 spin_unlock_bh(&host->lock);
1394 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1396 struct dw_mci_slot *slot = mmc_priv(mmc);
1397 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1398 struct dw_mci *host = slot->host;
1400 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1403 #ifdef SDMMC_WAIT_FOR_UNBUSY
1404 unsigned long time_loop;
1407 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1408 if(host->svi_flags == 1)
1409 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1411 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1413 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1416 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1417 printk("%d..%s: no card. [%s]\n", \
1418 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1423 ret = time_before(jiffies, time_loop);
1424 regs = mci_readl(slot->host, STATUS);
1425 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1431 printk("slot->flags = %lu ", slot->flags);
1432 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1433 if(host->svi_flags != 1)
1436 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1437 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1441 switch (ios->bus_width) {
1442 case MMC_BUS_WIDTH_4:
1443 slot->ctype = SDMMC_CTYPE_4BIT;
1445 case MMC_BUS_WIDTH_8:
1446 slot->ctype = SDMMC_CTYPE_8BIT;
1449 /* set default 1 bit mode */
1450 slot->ctype = SDMMC_CTYPE_1BIT;
1451 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1454 regs = mci_readl(slot->host, UHS_REG);
1457 if (ios->timing == MMC_TIMING_UHS_DDR50)
1458 regs |= ((0x1 << slot->id) << 16);
1460 regs &= ~((0x1 << slot->id) << 16);
1462 mci_writel(slot->host, UHS_REG, regs);
1463 slot->host->timing = ios->timing;
1466 * Use mirror of ios->clock to prevent race with mmc
1467 * core ios update when finding the minimum.
1469 slot->clock = ios->clock;
1471 if (drv_data && drv_data->set_ios)
1472 drv_data->set_ios(slot->host, ios);
1474 /* Slot specific timing and width adjustment */
1475 dw_mci_setup_bus(slot, false);
1479 switch (ios->power_mode) {
1481 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1483 if (slot->host->pdata->setpower)
1484 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1485 regs = mci_readl(slot->host, PWREN);
1486 regs |= (1 << slot->id);
1487 mci_writel(slot->host, PWREN, regs);
1490 /* Power down slot */
1491 if(slot->host->pdata->setpower)
1492 slot->host->pdata->setpower(slot->id, 0);
1493 regs = mci_readl(slot->host, PWREN);
1494 regs &= ~(1 << slot->id);
1495 mci_writel(slot->host, PWREN, regs);
1502 static int dw_mci_get_ro(struct mmc_host *mmc)
1505 struct dw_mci_slot *slot = mmc_priv(mmc);
1506 struct dw_mci_board *brd = slot->host->pdata;
1508 /* Use platform get_ro function, else try on board write protect */
1509 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1511 else if(brd->get_ro)
1512 read_only = brd->get_ro(slot->id);
1513 else if(gpio_is_valid(slot->wp_gpio))
1514 read_only = gpio_get_value(slot->wp_gpio);
1517 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1519 dev_dbg(&mmc->class_dev, "card is %s\n",
1520 read_only ? "read-only" : "read-write");
1525 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1527 struct dw_mci_slot *slot = mmc_priv(mmc);
1528 struct dw_mci *host = slot->host;
1529 /*struct dw_mci_board *brd = slot->host->pdata;*/
1531 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1534 spin_lock_bh(&host->lock);
1537 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1539 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1541 spin_unlock_bh(&host->lock);
1543 if (test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1544 if (__clk_is_enabled(host->hclk_mmc) == false)
1545 clk_prepare_enable(host->hclk_mmc);
1546 if (__clk_is_enabled(host->clk_mmc) == false)
1547 clk_prepare_enable(host->clk_mmc);
1549 if (__clk_is_enabled(host->clk_mmc) == true)
1550 clk_disable_unprepare(slot->host->clk_mmc);
1551 if (__clk_is_enabled(host->hclk_mmc) == true)
1552 clk_disable_unprepare(slot->host->hclk_mmc);
1555 mmc_detect_change(slot->mmc, 20);
1561 static int dw_mci_get_cd(struct mmc_host *mmc)
1564 struct dw_mci_slot *slot = mmc_priv(mmc);
1565 struct dw_mci_board *brd = slot->host->pdata;
1566 struct dw_mci *host = slot->host;
1567 int gpio_cd = mmc_gpio_get_cd(mmc);
1568 int force_jtag_bit, force_jtag_reg;
1572 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
1573 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1574 gpio_cd = slot->cd_gpio;
1575 irq = gpio_to_irq(gpio_cd);
1576 if (gpio_is_valid(gpio_cd)) {
1577 gpio_val = gpio_get_value(gpio_cd);
1578 if (soc_is_rk3036()) {
1579 force_jtag_bit = 11;
1580 force_jtag_reg = RK312X_GRF_SOC_CON0;
1581 } else if (soc_is_rk3126() || soc_is_rk3126b()) {
1582 force_jtag_reg = RK312X_GRF_SOC_CON0;
1586 if (gpio_val == gpio_get_value(gpio_cd)) {
1587 gpio_cd = (gpio_val == 0 ? 1 : 0);
1589 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1590 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1591 grf_writel((0x1 << (force_jtag_bit + 16)) | (1 << force_jtag_bit),
1594 dw_mci_ctrl_all_reset(host);
1596 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
1597 /* Really card detected: SHOULD disable force_jtag */
1598 grf_writel((0x1 << (force_jtag_bit + 16)) | (0 << force_jtag_bit),
1603 gpio_val = gpio_get_value(gpio_cd);
1605 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
1606 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1607 return slot->last_detect_state;
1610 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1614 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1615 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1617 /* Use platform get_cd function, else try onboard card detect */
1618 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1620 else if (brd->get_cd)
1621 present = !brd->get_cd(slot->id);
1622 else if (!IS_ERR_VALUE(gpio_cd))
1625 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1628 spin_lock_bh(&host->lock);
1630 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1631 dev_dbg(&mmc->class_dev, "card is present\n");
1633 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1634 dev_dbg(&mmc->class_dev, "card is not present\n");
1636 spin_unlock_bh(&host->lock);
1643 * Dts Should caps emmc controller with poll-hw-reset
1645 static void dw_mci_hw_reset(struct mmc_host *mmc)
1647 struct dw_mci_slot *slot = mmc_priv(mmc);
1648 struct dw_mci *host = slot->host;
1653 unsigned long timeout;
1656 /* (1) CMD12 to end any transfer in process */
1657 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1658 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1660 if(host->mmc->hold_reg_flag)
1661 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1662 mci_writel(host, CMDARG, 0);
1664 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1666 timeout = jiffies + msecs_to_jiffies(500);
1668 ret = time_before(jiffies, timeout);
1669 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1674 MMC_DBG_ERR_FUNC(host->mmc,
1675 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1676 __func__, mmc_hostname(host->mmc));
1678 /* (2) wait DTO, even if no response is sent back by card */
1680 timeout = jiffies + msecs_to_jiffies(5);
1682 ret = time_before(jiffies, timeout);
1683 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1684 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1690 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1692 /* Software reset - BMOD[0] for IDMA only */
1693 regs = mci_readl(host, BMOD);
1694 regs |= SDMMC_IDMAC_SWRESET;
1695 mci_writel(host, BMOD, regs);
1696 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1697 regs = mci_readl(host, BMOD);
1698 if(regs & SDMMC_IDMAC_SWRESET)
1699 MMC_DBG_WARN_FUNC(host->mmc,
1700 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1701 __func__, mmc_hostname(host->mmc));
1703 /* DMA reset - CTRL[2] */
1704 regs = mci_readl(host, CTRL);
1705 regs |= SDMMC_CTRL_DMA_RESET;
1706 mci_writel(host, CTRL, regs);
1707 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1708 regs = mci_readl(host, CTRL);
1709 if(regs & SDMMC_CTRL_DMA_RESET)
1710 MMC_DBG_WARN_FUNC(host->mmc,
1711 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1712 __func__, mmc_hostname(host->mmc));
1714 /* FIFO reset - CTRL[1] */
1715 regs = mci_readl(host, CTRL);
1716 regs |= SDMMC_CTRL_FIFO_RESET;
1717 mci_writel(host, CTRL, regs);
1718 mdelay(1); /* no timing limited, 1ms is random value */
1719 regs = mci_readl(host, CTRL);
1720 if(regs & SDMMC_CTRL_FIFO_RESET)
1721 MMC_DBG_WARN_FUNC(host->mmc,
1722 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1723 __func__, mmc_hostname(host->mmc));
1726 According to eMMC spec
1727 tRstW >= 1us ; RST_n pulse width
1728 tRSCA >= 200us ; RST_n to Command time
1729 tRSTH >= 1us ; RST_n high period
1731 mci_writel(slot->host, PWREN, 0x0);
1732 mci_writel(slot->host, RST_N, 0x0);
1734 udelay(10); /* 10us for bad quality eMMc. */
1736 mci_writel(slot->host, PWREN, 0x1);
1737 mci_writel(slot->host, RST_N, 0x1);
1739 usleep_range(500, 1000); /* at least 500(> 200us) */
1743 * Disable lower power mode.
1745 * Low power mode will stop the card clock when idle. According to the
1746 * description of the CLKENA register we should disable low power mode
1747 * for SDIO cards if we need SDIO interrupts to work.
1749 * This function is fast if low power mode is already disabled.
1751 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1753 struct dw_mci *host = slot->host;
1755 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1757 clk_en_a = mci_readl(host, CLKENA);
1759 if (clk_en_a & clken_low_pwr) {
1760 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1761 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1762 SDMMC_CMD_PRV_DAT_WAIT, 0);
1766 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1768 struct dw_mci_slot *slot = mmc_priv(mmc);
1769 struct dw_mci *host = slot->host;
1770 unsigned long flags;
1774 spin_lock_irqsave(&host->slock, flags);
1776 /* Enable/disable Slot Specific SDIO interrupt */
1777 int_mask = mci_readl(host, INTMASK);
1779 if (host->verid < DW_MMC_240A)
1780 sdio_int = SDMMC_INT_SDIO(slot->id);
1782 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1786 * Turn off low power mode if it was enabled. This is a bit of
1787 * a heavy operation and we disable / enable IRQs a lot, so
1788 * we'll leave low power mode disabled and it will get
1789 * re-enabled again in dw_mci_setup_bus().
1791 dw_mci_disable_low_power(slot);
1793 mci_writel(host, INTMASK,
1794 (int_mask | sdio_int));
1796 mci_writel(host, INTMASK,
1797 (int_mask & ~sdio_int));
1800 spin_unlock_irqrestore(&host->slock, flags);
1803 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1805 IO_DOMAIN_12 = 1200,
1806 IO_DOMAIN_18 = 1800,
1807 IO_DOMAIN_33 = 3300,
1809 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1819 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1820 __FUNCTION__, mmc_hostname(host->mmc));
1823 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1824 __FUNCTION__, mmc_hostname(host->mmc));
1828 if (cpu_is_rk3288()) {
1829 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1830 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1833 } else if (host->cid == DW_MCI_TYPE_RK3368) {
1834 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1835 regmap_write(host->grf, 0x900, (voltage << 6) | (1 << 22));
1839 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1840 __FUNCTION__, mmc_hostname(host->mmc));
1844 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1845 struct mmc_ios *ios)
1848 unsigned int value,uhs_reg;
1851 * Signal Voltage Switching is only applicable for Host Controllers
1854 if (host->verid < DW_MMC_240A)
1857 uhs_reg = mci_readl(host, UHS_REG);
1858 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1859 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1861 switch (ios->signal_voltage) {
1862 case MMC_SIGNAL_VOLTAGE_330:
1863 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1865 if (cpu_is_rk3288())
1866 ret = io_domain_regulator_set_voltage(
1867 host->vmmc, 3300000, 3300000);
1869 ret = regulator_set_voltage(host->vmmc, 3300000, 3300000);
1871 /* regulator_put(host->vmmc); //to be done in remove function. */
1873 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1874 __func__, regulator_get_voltage(host->vmmc), ret);
1876 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1877 " failed\n", mmc_hostname(host->mmc));
1880 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1882 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1883 __FUNCTION__, mmc_hostname(host->mmc));
1885 /* set High-power mode */
1886 value = mci_readl(host, CLKENA);
1887 value &= ~SDMMC_CLKEN_LOW_PWR;
1888 mci_writel(host,CLKENA , value);
1890 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1891 mci_writel(host,UHS_REG , uhs_reg);
1894 usleep_range(5000, 5500);
1896 /* 3.3V regulator output should be stable within 5 ms */
1897 uhs_reg = mci_readl(host, UHS_REG);
1898 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1901 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1902 mmc_hostname(host->mmc));
1905 case MMC_SIGNAL_VOLTAGE_180:
1907 if (cpu_is_rk3288())
1908 ret = io_domain_regulator_set_voltage(
1912 ret = regulator_set_voltage(
1915 /* regulator_put(host->vmmc);//to be done in remove function. */
1917 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1918 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1920 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1921 " failed\n", mmc_hostname(host->mmc));
1924 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1928 * Enable 1.8V Signal Enable in the Host Control2
1931 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1934 usleep_range(5000, 5500);
1935 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1936 __FUNCTION__,mmc_hostname(host->mmc));
1938 /* 1.8V regulator output should be stable within 5 ms */
1939 uhs_reg = mci_readl(host, UHS_REG);
1940 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1943 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1944 mmc_hostname(host->mmc));
1947 case MMC_SIGNAL_VOLTAGE_120:
1949 if (cpu_is_rk3288())
1950 ret = io_domain_regulator_set_voltage(
1954 ret = regulator_set_voltage(host->vmmc,
1957 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1958 " failed\n", mmc_hostname(host->mmc));
1964 /* No signal voltage switch required */
1970 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1971 struct mmc_ios *ios)
1973 struct dw_mci_slot *slot = mmc_priv(mmc);
1974 struct dw_mci *host = slot->host;
1977 if (host->verid < DW_MMC_240A)
1980 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1986 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1988 struct dw_mci_slot *slot = mmc_priv(mmc);
1989 struct dw_mci *host = slot->host;
1990 const struct dw_mci_drv_data *drv_data = host->drv_data;
1991 struct dw_mci_tuning_data tuning_data;
1994 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1995 if(cpu_is_rk3036() || cpu_is_rk312x())
1998 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1999 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
2000 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
2001 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
2002 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
2003 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
2004 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
2008 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
2009 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
2010 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
2013 "Undefined command(%d) for tuning\n", opcode);
2018 /* Recommend sample phase and delayline
2019 Fixme: Mix-use these three controllers will cause
2022 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
2023 tuning_data.con_id = 3;
2024 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2025 tuning_data.con_id = 1;
2027 tuning_data.con_id = 0;
2029 /* 0: driver, from host->devices
2030 1: sample, from devices->host
2032 tuning_data.tuning_type = 1;
2034 if (drv_data && drv_data->execute_tuning)
2035 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
2040 static void dw_mci_post_tmo(struct mmc_host *mmc)
2042 struct dw_mci_slot *slot = mmc_priv(mmc);
2043 struct dw_mci *host = slot->host;
2044 host->cur_slot->mrq = NULL;
2046 host->state = STATE_IDLE;
2049 static const struct mmc_host_ops dw_mci_ops = {
2050 .request = dw_mci_request,
2051 .pre_req = dw_mci_pre_req,
2052 .post_req = dw_mci_post_req,
2053 .set_ios = dw_mci_set_ios,
2054 .get_ro = dw_mci_get_ro,
2055 .get_cd = dw_mci_get_cd,
2056 .set_sdio_status = dw_mci_set_sdio_status,
2057 .hw_reset = dw_mci_hw_reset,
2058 .enable_sdio_irq = dw_mci_enable_sdio_irq,
2059 .execute_tuning = dw_mci_execute_tuning,
2060 .post_tmo = dw_mci_post_tmo,
2061 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
2062 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
2063 .card_busy = dw_mci_card_busy,
2068 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
2070 unsigned long flags;
2075 local_irq_save(flags);
2076 if(host->irq_state != irqflag)
2078 host->irq_state = irqflag;
2081 enable_irq(host->irq);
2085 disable_irq(host->irq);
2088 local_irq_restore(flags);
2092 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2093 __releases(&host->lock)
2094 __acquires(&host->lock)
2096 if(DW_MCI_SEND_STATUS == host->dir_status){
2098 if( MMC_BUS_TEST_W != host->cmd->opcode){
2099 if(host->data_status & SDMMC_INT_DCRC)
2100 host->data->error = -EILSEQ;
2101 else if(host->data_status & SDMMC_INT_EBE)
2102 host->data->error = -ETIMEDOUT;
2104 dw_mci_wait_unbusy(host);
2107 dw_mci_wait_unbusy(host);
2112 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2113 __releases(&host->lock)
2114 __acquires(&host->lock)
2116 struct dw_mci_slot *slot;
2117 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2119 //WARN_ON(host->cmd || host->data);
2121 dw_mci_deal_data_end(host, mrq);
2124 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2125 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2127 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2128 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2130 host->cur_slot->mrq = NULL;
2132 if (!list_empty(&host->queue)) {
2133 slot = list_entry(host->queue.next,
2134 struct dw_mci_slot, queue_node);
2135 list_del(&slot->queue_node);
2136 dev_vdbg(host->dev, "list not empty: %s is next\n",
2137 mmc_hostname(slot->mmc));
2138 host->state = STATE_SENDING_CMD;
2139 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2140 dw_mci_start_request(host, slot);
2142 dev_vdbg(host->dev, "list empty\n");
2143 host->state = STATE_IDLE;
2146 spin_unlock(&host->lock);
2147 mmc_request_done(prev_mmc, mrq);
2148 spin_lock(&host->lock);
2151 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2153 u32 status = host->cmd_status;
2155 host->cmd_status = 0;
2157 /* Read the response from the card (up to 16 bytes) */
2158 if (cmd->flags & MMC_RSP_PRESENT) {
2159 if (cmd->flags & MMC_RSP_136) {
2160 cmd->resp[3] = mci_readl(host, RESP0);
2161 cmd->resp[2] = mci_readl(host, RESP1);
2162 cmd->resp[1] = mci_readl(host, RESP2);
2163 cmd->resp[0] = mci_readl(host, RESP3);
2165 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2166 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2168 cmd->resp[0] = mci_readl(host, RESP0);
2172 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2173 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2177 if (status & SDMMC_INT_RTO)
2179 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2182 cmd->error = -ETIMEDOUT;
2183 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2184 cmd->error = -EILSEQ;
2185 }else if (status & SDMMC_INT_RESP_ERR){
2190 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2191 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2194 if(MMC_SEND_STATUS != cmd->opcode)
2195 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2196 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2197 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2201 /* newer ip versions need a delay between retries */
2202 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2208 static void dw_mci_tasklet_func(unsigned long priv)
2210 struct dw_mci *host = (struct dw_mci *)priv;
2211 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2212 struct mmc_data *data;
2213 struct mmc_command *cmd;
2214 enum dw_mci_state state;
2215 enum dw_mci_state prev_state;
2216 u32 status, cmd_flags;
2217 unsigned long timeout = 0;
2220 spin_lock(&host->lock);
2222 state = host->state;
2232 case STATE_SENDING_CMD:
2233 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2234 &host->pending_events))
2239 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2240 dw_mci_command_complete(host, cmd);
2241 if (cmd == host->mrq->sbc && !cmd->error) {
2242 prev_state = state = STATE_SENDING_CMD;
2243 __dw_mci_start_request(host, host->cur_slot,
2248 if (cmd->data && cmd->error) {
2249 dw_mci_stop_dma(host);
2252 send_stop_cmd(host, data);
2253 state = STATE_SENDING_STOP;
2256 /* host->data = NULL; */
2259 send_stop_abort(host, data);
2260 state = STATE_SENDING_STOP;
2263 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2266 if (!host->mrq->data || cmd->error) {
2267 dw_mci_request_end(host, host->mrq);
2271 prev_state = state = STATE_SENDING_DATA;
2274 case STATE_SENDING_DATA:
2275 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2276 dw_mci_stop_dma(host);
2279 send_stop_cmd(host, data);
2281 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2282 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2283 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2285 mci_writel(host, CMDARG, 0);
2287 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2288 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2290 if(host->mmc->hold_reg_flag)
2291 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2293 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2295 timeout = jiffies + msecs_to_jiffies(500);
2298 ret = time_before(jiffies, timeout);
2299 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2303 MMC_DBG_ERR_FUNC(host->mmc,
2304 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2305 __func__, mmc_hostname(host->mmc));
2308 send_stop_abort(host, data);
2310 state = STATE_DATA_ERROR;
2314 MMC_DBG_CMD_FUNC(host->mmc,
2315 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2316 prev_state,state, mmc_hostname(host->mmc));
2318 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2319 &host->pending_events))
2321 MMC_DBG_INFO_FUNC(host->mmc,
2322 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2323 prev_state,state,mmc_hostname(host->mmc));
2325 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2326 prev_state = state = STATE_DATA_BUSY;
2329 case STATE_DATA_BUSY:
2330 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2331 &host->pending_events))
2334 dw_mci_deal_data_end(host, host->mrq);
2335 MMC_DBG_INFO_FUNC(host->mmc,
2336 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2337 prev_state,state,mmc_hostname(host->mmc));
2339 /* host->data = NULL; */
2340 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2341 status = host->data_status;
2343 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2344 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2345 MMC_DBG_ERR_FUNC(host->mmc,
2346 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2347 prev_state,state, status, mmc_hostname(host->mmc));
2349 if (status & SDMMC_INT_DRTO) {
2350 data->error = -ETIMEDOUT;
2351 } else if (status & SDMMC_INT_DCRC) {
2352 data->error = -EILSEQ;
2353 } else if (status & SDMMC_INT_EBE &&
2354 host->dir_status == DW_MCI_SEND_STATUS){
2356 * No data CRC status was returned.
2357 * The number of bytes transferred will
2358 * be exaggerated in PIO mode.
2360 data->bytes_xfered = 0;
2361 data->error = -ETIMEDOUT;
2370 * After an error, there may be data lingering
2371 * in the FIFO, so reset it - doing so
2372 * generates a block interrupt, hence setting
2373 * the scatter-gather pointer to NULL.
2375 dw_mci_fifo_reset(host);
2377 data->bytes_xfered = data->blocks * data->blksz;
2382 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2383 prev_state,state,mmc_hostname(host->mmc));
2384 dw_mci_request_end(host, host->mrq);
2387 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2388 prev_state,state,mmc_hostname(host->mmc));
2390 if (host->mrq->sbc && !data->error) {
2391 data->stop->error = 0;
2393 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2394 prev_state,state,mmc_hostname(host->mmc));
2396 dw_mci_request_end(host, host->mrq);
2400 prev_state = state = STATE_SENDING_STOP;
2402 send_stop_cmd(host, data);
2404 if (data->stop && !data->error) {
2405 /* stop command for open-ended transfer*/
2407 send_stop_abort(host, data);
2411 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2412 prev_state,state,mmc_hostname(host->mmc));
2414 case STATE_SENDING_STOP:
2415 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2418 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2419 prev_state, state, mmc_hostname(host->mmc));
2421 /* CMD error in data command */
2422 if (host->mrq->cmd->error && host->mrq->data) {
2423 dw_mci_fifo_reset(host);
2427 host->data = NULL; */
2429 dw_mci_command_complete(host, host->mrq->stop);
2431 if (host->mrq->stop)
2432 dw_mci_command_complete(host, host->mrq->stop);
2434 host->cmd_status = 0;
2437 dw_mci_request_end(host, host->mrq);
2440 case STATE_DATA_ERROR:
2441 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2442 &host->pending_events))
2445 state = STATE_DATA_BUSY;
2448 } while (state != prev_state);
2450 host->state = state;
2452 spin_unlock(&host->lock);
2456 /* push final bytes to part_buf, only use during push */
2457 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2459 memcpy((void *)&host->part_buf, buf, cnt);
2460 host->part_buf_count = cnt;
2463 /* append bytes to part_buf, only use during push */
2464 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2466 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2467 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2468 host->part_buf_count += cnt;
2472 /* pull first bytes from part_buf, only use during pull */
2473 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2475 cnt = min(cnt, (int)host->part_buf_count);
2477 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2479 host->part_buf_count -= cnt;
2480 host->part_buf_start += cnt;
2485 /* pull final bytes from the part_buf, assuming it's just been filled */
2486 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2488 memcpy(buf, &host->part_buf, cnt);
2489 host->part_buf_start = cnt;
2490 host->part_buf_count = (1 << host->data_shift) - cnt;
2493 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2495 struct mmc_data *data = host->data;
2498 /* try and push anything in the part_buf */
2499 if (unlikely(host->part_buf_count)) {
2500 int len = dw_mci_push_part_bytes(host, buf, cnt);
2503 if (host->part_buf_count == 2) {
2504 mci_writew(host, DATA(host->data_offset),
2506 host->part_buf_count = 0;
2509 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2510 if (unlikely((unsigned long)buf & 0x1)) {
2512 u16 aligned_buf[64];
2513 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2514 int items = len >> 1;
2516 /* memcpy from input buffer into aligned buffer */
2517 memcpy(aligned_buf, buf, len);
2520 /* push data from aligned buffer into fifo */
2521 for (i = 0; i < items; ++i)
2522 mci_writew(host, DATA(host->data_offset),
2529 for (; cnt >= 2; cnt -= 2)
2530 mci_writew(host, DATA(host->data_offset), *pdata++);
2533 /* put anything remaining in the part_buf */
2535 dw_mci_set_part_bytes(host, buf, cnt);
2536 /* Push data if we have reached the expected data length */
2537 if ((data->bytes_xfered + init_cnt) ==
2538 (data->blksz * data->blocks))
2539 mci_writew(host, DATA(host->data_offset),
2544 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2546 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2547 if (unlikely((unsigned long)buf & 0x1)) {
2549 /* pull data from fifo into aligned buffer */
2550 u16 aligned_buf[64];
2551 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2552 int items = len >> 1;
2554 for (i = 0; i < items; ++i)
2555 aligned_buf[i] = mci_readw(host,
2556 DATA(host->data_offset));
2557 /* memcpy from aligned buffer into output buffer */
2558 memcpy(buf, aligned_buf, len);
2566 for (; cnt >= 2; cnt -= 2)
2567 *pdata++ = mci_readw(host, DATA(host->data_offset));
2571 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2572 dw_mci_pull_final_bytes(host, buf, cnt);
2576 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2578 struct mmc_data *data = host->data;
2581 /* try and push anything in the part_buf */
2582 if (unlikely(host->part_buf_count)) {
2583 int len = dw_mci_push_part_bytes(host, buf, cnt);
2586 if (host->part_buf_count == 4) {
2587 mci_writel(host, DATA(host->data_offset),
2589 host->part_buf_count = 0;
2592 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2593 if (unlikely((unsigned long)buf & 0x3)) {
2595 u32 aligned_buf[32];
2596 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2597 int items = len >> 2;
2599 /* memcpy from input buffer into aligned buffer */
2600 memcpy(aligned_buf, buf, len);
2603 /* push data from aligned buffer into fifo */
2604 for (i = 0; i < items; ++i)
2605 mci_writel(host, DATA(host->data_offset),
2612 for (; cnt >= 4; cnt -= 4)
2613 mci_writel(host, DATA(host->data_offset), *pdata++);
2616 /* put anything remaining in the part_buf */
2618 dw_mci_set_part_bytes(host, buf, cnt);
2619 /* Push data if we have reached the expected data length */
2620 if ((data->bytes_xfered + init_cnt) ==
2621 (data->blksz * data->blocks))
2622 mci_writel(host, DATA(host->data_offset),
2627 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2629 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2630 if (unlikely((unsigned long)buf & 0x3)) {
2632 /* pull data from fifo into aligned buffer */
2633 u32 aligned_buf[32];
2634 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2635 int items = len >> 2;
2637 for (i = 0; i < items; ++i)
2638 aligned_buf[i] = mci_readl(host,
2639 DATA(host->data_offset));
2640 /* memcpy from aligned buffer into output buffer */
2641 memcpy(buf, aligned_buf, len);
2649 for (; cnt >= 4; cnt -= 4)
2650 *pdata++ = mci_readl(host, DATA(host->data_offset));
2654 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2655 dw_mci_pull_final_bytes(host, buf, cnt);
2659 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2661 struct mmc_data *data = host->data;
2664 /* try and push anything in the part_buf */
2665 if (unlikely(host->part_buf_count)) {
2666 int len = dw_mci_push_part_bytes(host, buf, cnt);
2670 if (host->part_buf_count == 8) {
2671 mci_writeq(host, DATA(host->data_offset),
2673 host->part_buf_count = 0;
2676 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2677 if (unlikely((unsigned long)buf & 0x7)) {
2679 u64 aligned_buf[16];
2680 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2681 int items = len >> 3;
2683 /* memcpy from input buffer into aligned buffer */
2684 memcpy(aligned_buf, buf, len);
2687 /* push data from aligned buffer into fifo */
2688 for (i = 0; i < items; ++i)
2689 mci_writeq(host, DATA(host->data_offset),
2696 for (; cnt >= 8; cnt -= 8)
2697 mci_writeq(host, DATA(host->data_offset), *pdata++);
2700 /* put anything remaining in the part_buf */
2702 dw_mci_set_part_bytes(host, buf, cnt);
2703 /* Push data if we have reached the expected data length */
2704 if ((data->bytes_xfered + init_cnt) ==
2705 (data->blksz * data->blocks))
2706 mci_writeq(host, DATA(host->data_offset),
2711 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2713 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2714 if (unlikely((unsigned long)buf & 0x7)) {
2716 /* pull data from fifo into aligned buffer */
2717 u64 aligned_buf[16];
2718 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2719 int items = len >> 3;
2721 for (i = 0; i < items; ++i)
2722 aligned_buf[i] = mci_readq(host,
2723 DATA(host->data_offset));
2724 /* memcpy from aligned buffer into output buffer */
2725 memcpy(buf, aligned_buf, len);
2733 for (; cnt >= 8; cnt -= 8)
2734 *pdata++ = mci_readq(host, DATA(host->data_offset));
2738 host->part_buf = mci_readq(host, DATA(host->data_offset));
2739 dw_mci_pull_final_bytes(host, buf, cnt);
2743 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2747 /* get remaining partial bytes */
2748 len = dw_mci_pull_part_bytes(host, buf, cnt);
2749 if (unlikely(len == cnt))
2754 /* get the rest of the data */
2755 host->pull_data(host, buf, cnt);
2758 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2760 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2762 unsigned int offset;
2763 struct mmc_data *data = host->data;
2764 int shift = host->data_shift;
2767 unsigned int remain, fcnt;
2769 if(!host->mmc->bus_refs){
2770 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2774 if (!sg_miter_next(sg_miter))
2777 host->sg = sg_miter->piter.sg;
2778 buf = sg_miter->addr;
2779 remain = sg_miter->length;
2783 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2784 << shift) + host->part_buf_count;
2785 len = min(remain, fcnt);
2788 dw_mci_pull_data(host, (void *)(buf + offset), len);
2789 data->bytes_xfered += len;
2794 sg_miter->consumed = offset;
2795 status = mci_readl(host, MINTSTS);
2796 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2797 /* if the RXDR is ready read again */
2798 } while ((status & SDMMC_INT_RXDR) ||
2799 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2802 if (!sg_miter_next(sg_miter))
2804 sg_miter->consumed = 0;
2806 sg_miter_stop(sg_miter);
2810 sg_miter_stop(sg_miter);
2814 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2817 static void dw_mci_write_data_pio(struct dw_mci *host)
2819 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2821 unsigned int offset;
2822 struct mmc_data *data = host->data;
2823 int shift = host->data_shift;
2826 unsigned int fifo_depth = host->fifo_depth;
2827 unsigned int remain, fcnt;
2829 if(!host->mmc->bus_refs){
2830 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2835 if (!sg_miter_next(sg_miter))
2838 host->sg = sg_miter->piter.sg;
2839 buf = sg_miter->addr;
2840 remain = sg_miter->length;
2844 fcnt = ((fifo_depth -
2845 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2846 << shift) - host->part_buf_count;
2847 len = min(remain, fcnt);
2850 host->push_data(host, (void *)(buf + offset), len);
2851 data->bytes_xfered += len;
2856 sg_miter->consumed = offset;
2857 status = mci_readl(host, MINTSTS);
2858 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2859 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2862 if (!sg_miter_next(sg_miter))
2864 sg_miter->consumed = 0;
2866 sg_miter_stop(sg_miter);
2870 sg_miter_stop(sg_miter);
2874 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2877 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2879 if (!host->cmd_status)
2880 host->cmd_status = status;
2887 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2888 tasklet_schedule(&host->tasklet);
2891 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2893 struct dw_mci *host = dev_id;
2894 u32 pending, sdio_int;
2897 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2900 * DTO fix - version 2.10a and below, and only if internal DMA
2903 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2905 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2906 pending |= SDMMC_INT_DATA_OVER;
2910 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2911 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2912 host->cmd_status = pending;
2914 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2915 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2917 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2920 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2921 /* if there is an error report DATA_ERROR */
2922 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2923 host->data_status = pending;
2925 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2927 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2928 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2929 tasklet_schedule(&host->tasklet);
2932 if (pending & SDMMC_INT_DATA_OVER) {
2933 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2934 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2935 if (!host->data_status)
2936 host->data_status = pending;
2938 if (host->dir_status == DW_MCI_RECV_STATUS) {
2939 if (host->sg != NULL)
2940 dw_mci_read_data_pio(host, true);
2942 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2943 tasklet_schedule(&host->tasklet);
2946 if (pending & SDMMC_INT_RXDR) {
2947 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2948 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2949 dw_mci_read_data_pio(host, false);
2952 if (pending & SDMMC_INT_TXDR) {
2953 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2954 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2955 dw_mci_write_data_pio(host);
2958 if (pending & SDMMC_INT_VSI) {
2959 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2960 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2961 dw_mci_cmd_interrupt(host, pending);
2964 if (pending & SDMMC_INT_CMD_DONE) {
2965 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2966 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2967 dw_mci_cmd_interrupt(host, pending);
2970 if (pending & SDMMC_INT_CD) {
2971 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2972 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2973 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2974 queue_work(host->card_workqueue, &host->card_work);
2977 if (pending & SDMMC_INT_HLE) {
2978 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2979 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2983 /* Handle SDIO Interrupts */
2984 for (i = 0; i < host->num_slots; i++) {
2985 struct dw_mci_slot *slot = host->slot[i];
2987 if (host->verid < DW_MMC_240A)
2988 sdio_int = SDMMC_INT_SDIO(i);
2990 sdio_int = SDMMC_INT_SDIO(i + 8);
2992 if (pending & sdio_int) {
2993 mci_writel(host, RINTSTS, sdio_int);
2994 mmc_signal_sdio_irq(slot->mmc);
3000 #ifdef CONFIG_MMC_DW_IDMAC
3001 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
3002 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
3003 /* Handle DMA interrupts */
3004 pending = mci_readl(host, IDSTS);
3005 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
3006 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
3007 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
3008 host->dma_ops->complete((void *)host);
3016 static void dw_mci_work_routine_card(struct work_struct *work)
3018 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
3021 for (i = 0; i < host->num_slots; i++) {
3022 struct dw_mci_slot *slot = host->slot[i];
3023 struct mmc_host *mmc = slot->mmc;
3024 struct mmc_request *mrq;
3027 present = dw_mci_get_cd(mmc);
3029 /* Card insert, switch data line to uart function, and vice verse.
3030 eONLY audi chip need switched by software, using udbg tag in dts!
3032 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
3034 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3035 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3036 mmc_hostname(host->mmc));
3038 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3039 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3040 mmc_hostname(host->mmc));
3044 while (present != slot->last_detect_state) {
3045 dev_dbg(&slot->mmc->class_dev, "card %s\n",
3046 present ? "inserted" : "removed");
3047 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
3048 present ? "inserted" : "removed.", mmc_hostname(mmc));
3050 dw_mci_ctrl_all_reset(host);
3051 /* Stop edma when rountine card triggered */
3052 if(cpu_is_rk3036() || cpu_is_rk312x())
3053 if(host->dma_ops && host->dma_ops->stop)
3054 host->dma_ops->stop(host);
3055 rk_send_wakeup_key();//wake up system
3056 spin_lock_bh(&host->lock);
3058 /* Card change detected */
3059 slot->last_detect_state = present;
3061 /* Clean up queue if present */
3064 if (mrq == host->mrq) {
3068 switch (host->state) {
3071 case STATE_SENDING_CMD:
3072 mrq->cmd->error = -ENOMEDIUM;
3076 case STATE_SENDING_DATA:
3077 mrq->data->error = -ENOMEDIUM;
3078 dw_mci_stop_dma(host);
3080 case STATE_DATA_BUSY:
3081 case STATE_DATA_ERROR:
3082 if (mrq->data->error == -EINPROGRESS)
3083 mrq->data->error = -ENOMEDIUM;
3087 case STATE_SENDING_STOP:
3088 mrq->stop->error = -ENOMEDIUM;
3092 dw_mci_request_end(host, mrq);
3094 list_del(&slot->queue_node);
3095 mrq->cmd->error = -ENOMEDIUM;
3097 mrq->data->error = -ENOMEDIUM;
3099 mrq->stop->error = -ENOMEDIUM;
3101 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3102 mrq->cmd->opcode, mmc_hostname(mmc));
3104 spin_unlock(&host->lock);
3105 mmc_request_done(slot->mmc, mrq);
3106 spin_lock(&host->lock);
3110 /* Power down slot */
3112 /* Clear down the FIFO */
3113 dw_mci_fifo_reset(host);
3114 #ifdef CONFIG_MMC_DW_IDMAC
3115 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3116 dw_mci_idmac_reset(host);
3121 spin_unlock_bh(&host->lock);
3123 present = dw_mci_get_cd(mmc);
3126 mmc_detect_change(slot->mmc,
3127 msecs_to_jiffies(host->pdata->detect_delay_ms));
3132 /* given a slot id, find out the device node representing that slot */
3133 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3135 struct device_node *np;
3139 if (!dev || !dev->of_node)
3142 for_each_child_of_node(dev->of_node, np) {
3143 addr = of_get_property(np, "reg", &len);
3144 if (!addr || (len < sizeof(int)))
3146 if (be32_to_cpup(addr) == slot)
3152 static struct dw_mci_of_slot_quirks {
3155 } of_slot_quirks[] = {
3157 .quirk = "disable-wp",
3158 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3162 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3164 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3169 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3170 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3171 quirks |= of_slot_quirks[idx].id;
3176 /* find out bus-width for a given slot */
3177 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3179 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3185 if (of_property_read_u32(np, "bus-width", &bus_wd))
3186 dev_err(dev, "bus-width property not found, assuming width"
3192 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3193 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3195 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3201 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3203 /* Having a missing entry is valid; return silently */
3204 if (!gpio_is_valid(gpio))
3207 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3208 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3212 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3218 /* find the write protect gpio for a given slot; or -1 if none specified */
3219 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3221 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3227 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3229 /* Having a missing entry is valid; return silently */
3230 if (!gpio_is_valid(gpio))
3233 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3234 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3241 /* find the cd gpio for a given slot */
3242 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3243 struct mmc_host *mmc)
3245 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3251 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3253 /* Having a missing entry is valid; return silently */
3254 if (!gpio_is_valid(gpio))
3257 if (mmc_gpio_request_cd(mmc, gpio, 0))
3258 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3261 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3263 struct mmc_host *mmc = dev_id;
3264 struct dw_mci_slot *slot = mmc_priv(mmc);
3265 struct dw_mci *host = slot->host;
3266 int gpio_cd = slot->cd_gpio;
3268 (gpio_get_value(gpio_cd) == 0) ?
3269 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
3270 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
3272 /* wakeup system whether gpio debounce or not */
3273 rk_send_wakeup_key();
3275 /* no need to trigger detect flow when rescan is disabled.
3276 This case happended in dpm, that we just wakeup system and
3277 let suspend_post notify callback handle it.
3279 if(mmc->rescan_disable == 0)
3280 queue_work(host->card_workqueue, &host->card_work);
3282 printk("%s: rescan been disabled!\n", __FUNCTION__);
3287 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3288 struct mmc_host *mmc)
3290 struct dw_mci_slot *slot = mmc_priv(mmc);
3291 struct dw_mci *host = slot->host;
3295 /* Having a missing entry is valid; return silently */
3296 if (!gpio_is_valid(gpio))
3299 irq = gpio_to_irq(gpio);
3301 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3302 NULL, dw_mci_gpio_cd_irqt,
3303 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3307 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3309 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3310 enable_irq_wake(irq);
3313 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3317 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3318 struct mmc_host *mmc)
3320 if (!gpio_is_valid(gpio))
3323 if (gpio_to_irq(gpio) >= 0) {
3324 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3325 devm_gpio_free(&mmc->class_dev, gpio);
3328 #else /* CONFIG_OF */
3329 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3333 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3337 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3341 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3345 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3346 struct mmc_host *mmc)
3350 #endif /* CONFIG_OF */
3352 /* @host: dw_mci host prvdata
3353 * Init pinctrl for each platform. Usually we assign
3354 * "defalut" tag for functional usage, "idle" tag for gpio
3355 * state and "udbg" tag for uart_dbg if any.
3357 static void dw_mci_init_pinctrl(struct dw_mci *host)
3359 /* Fixme: DON'T TOUCH EMMC SETTING! */
3360 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3363 /* Get pinctrl for DTS */
3364 host->pinctrl = devm_pinctrl_get(host->dev);
3365 if (IS_ERR(host->pinctrl)) {
3366 dev_err(host->dev, "%s: No pinctrl used!\n",
3367 mmc_hostname(host->mmc));
3371 /* Lookup idle state */
3372 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3373 PINCTRL_STATE_IDLE);
3374 if (IS_ERR(host->pins_idle)) {
3375 dev_err(host->dev, "%s: No idle tag found!\n",
3376 mmc_hostname(host->mmc));
3378 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3379 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3380 mmc_hostname(host->mmc));
3383 /* Lookup default state */
3384 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3385 PINCTRL_STATE_DEFAULT);
3386 if (IS_ERR(host->pins_default)) {
3387 dev_err(host->dev, "%s: No default pinctrl found!\n",
3388 mmc_hostname(host->mmc));
3390 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3391 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3392 mmc_hostname(host->mmc));
3395 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3396 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3397 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3398 if (IS_ERR(host->pins_udbg)) {
3399 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3400 mmc_hostname(host->mmc));
3402 if (!dw_mci_get_cd(host->mmc))
3403 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3404 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3405 mmc_hostname(host->mmc));
3410 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3411 unsigned long mode, void *unused)
3413 struct mmc_host *host = container_of(
3414 notify_block, struct mmc_host, pm_notify);
3415 unsigned long flags;
3418 case PM_HIBERNATION_PREPARE:
3419 case PM_SUSPEND_PREPARE:
3420 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3421 spin_lock_irqsave(&host->lock, flags);
3422 host->rescan_disable = 1;
3423 spin_unlock_irqrestore(&host->lock, flags);
3424 if (cancel_delayed_work(&host->detect))
3425 wake_unlock(&host->detect_wake_lock);
3428 case PM_POST_SUSPEND:
3429 case PM_POST_HIBERNATION:
3430 case PM_POST_RESTORE:
3431 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3432 spin_lock_irqsave(&host->lock, flags);
3433 host->rescan_disable = 0;
3434 spin_unlock_irqrestore(&host->lock, flags);
3435 mmc_detect_change(host, 10);
3441 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3443 struct mmc_host *mmc;
3444 struct dw_mci_slot *slot;
3445 const struct dw_mci_drv_data *drv_data = host->drv_data;
3450 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3454 slot = mmc_priv(mmc);
3458 host->slot[id] = slot;
3461 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3463 mmc->ops = &dw_mci_ops;
3465 if (of_property_read_u32_array(host->dev->of_node,
3466 "clock-freq-min-max", freq, 2)) {
3467 mmc->f_min = DW_MCI_FREQ_MIN;
3468 mmc->f_max = DW_MCI_FREQ_MAX;
3470 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3471 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3473 mmc->f_min = freq[0];
3474 mmc->f_max = freq[1];
3476 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3477 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3480 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3482 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3483 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3484 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3485 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3486 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3487 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3489 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3490 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3491 if (register_pm_notifier(&mmc->pm_notify)) {
3492 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3493 goto err_pm_notifier;
3497 if (host->cid == DW_MCI_TYPE_RK3368) {
3498 if (IS_ERR(host->grf))
3499 pr_err("rk_sdmmc: dts couldn't find grf regmap for 3368\n");
3501 /* Disable force_jtag */
3502 regmap_write(host->grf, 0x43c, (1<<13)<<16 | (0 << 13));
3503 } else if (cpu_is_rk3288()) {
3504 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
3508 /* We assume only low-level chip use gpio_cd */
3509 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
3510 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3511 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3512 if (gpio_is_valid(slot->cd_gpio)) {
3513 /* Request gpio int for card detection */
3514 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3516 slot->cd_gpio = -ENODEV;
3517 dev_err(host->dev, "failed to get your cd-gpios!\n");
3521 if (host->pdata->get_ocr)
3522 mmc->ocr_avail = host->pdata->get_ocr(id);
3525 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3526 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3527 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3528 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3532 * Start with slot power disabled, it will be enabled when a card
3535 if (host->pdata->setpower)
3536 host->pdata->setpower(id, 0);
3538 if (host->pdata->caps)
3539 mmc->caps = host->pdata->caps;
3541 if (host->pdata->pm_caps)
3542 mmc->pm_caps = host->pdata->pm_caps;
3544 if (host->dev->of_node) {
3545 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3549 ctrl_id = to_platform_device(host->dev)->id;
3551 if (drv_data && drv_data->caps)
3552 mmc->caps |= drv_data->caps[ctrl_id];
3553 if (drv_data && drv_data->hold_reg_flag)
3554 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3556 /* set the compatibility of driver. */
3557 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3558 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3560 if (host->pdata->caps2)
3561 mmc->caps2 = host->pdata->caps2;
3563 if (host->pdata->get_bus_wd)
3564 bus_width = host->pdata->get_bus_wd(slot->id);
3565 else if (host->dev->of_node)
3566 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3570 switch (bus_width) {
3572 mmc->caps |= MMC_CAP_8_BIT_DATA;
3574 mmc->caps |= MMC_CAP_4_BIT_DATA;
3577 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3578 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3579 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3580 mmc->caps |= MMC_CAP_SDIO_IRQ;
3581 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3582 mmc->caps |= MMC_CAP_HW_RESET;
3583 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3584 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3585 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3586 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3587 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3588 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3589 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3590 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3592 /*Assign pm_caps pass to pm_flags*/
3593 mmc->pm_flags = mmc->pm_caps;
3595 if (host->pdata->blk_settings) {
3596 mmc->max_segs = host->pdata->blk_settings->max_segs;
3597 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3598 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3599 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3600 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3602 /* Useful defaults if platform data is unset. */
3603 #ifdef CONFIG_MMC_DW_IDMAC
3604 mmc->max_segs = host->ring_size;
3605 mmc->max_blk_size = 65536;
3606 mmc->max_blk_count = host->ring_size;
3607 mmc->max_seg_size = 0x1000;
3608 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3609 if(cpu_is_rk3036() || cpu_is_rk312x()){
3610 /* fixup for external dmac setting */
3612 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3613 mmc->max_blk_count = 65535;
3614 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3615 mmc->max_seg_size = mmc->max_req_size;
3619 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3620 mmc->max_blk_count = 512;
3621 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3622 mmc->max_seg_size = mmc->max_req_size;
3623 #endif /* CONFIG_MMC_DW_IDMAC */
3627 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3629 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3634 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3635 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3639 if (IS_ERR(host->vmmc)) {
3640 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3643 ret = regulator_enable(host->vmmc);
3646 "failed to enable regulator: %d\n", ret);
3653 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3655 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3656 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3658 dw_mci_init_pinctrl(host);
3659 ret = mmc_add_host(mmc);
3663 #if defined(CONFIG_DEBUG_FS)
3664 dw_mci_init_debugfs(slot);
3667 /* Card initially undetected */
3668 slot->last_detect_state = 1;
3672 unregister_pm_notifier(&mmc->pm_notify);
3675 if (gpio_is_valid(slot->cd_gpio))
3676 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3681 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3683 /* Shutdown detect IRQ */
3684 if (slot->host->pdata->exit)
3685 slot->host->pdata->exit(id);
3687 /* Debugfs stuff is cleaned up by mmc core */
3688 mmc_remove_host(slot->mmc);
3689 slot->host->slot[id] = NULL;
3690 mmc_free_host(slot->mmc);
3693 static void dw_mci_init_dma(struct dw_mci *host)
3695 /* Alloc memory for sg translation */
3696 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3697 &host->sg_dma, GFP_KERNEL);
3698 if (!host->sg_cpu) {
3699 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3704 memset(host->sg_cpu, 0, PAGE_SIZE);
3707 /* Determine which DMA interface to use */
3708 #if defined(CONFIG_MMC_DW_IDMAC)
3709 if(cpu_is_rk3036() || cpu_is_rk312x()){
3710 host->dma_ops = &dw_mci_edmac_ops;
3711 dev_info(host->dev, "Using external DMA controller.\n");
3713 host->dma_ops = &dw_mci_idmac_ops;
3714 dev_info(host->dev, "Using internal DMA controller.\n");
3721 if (host->dma_ops->init && host->dma_ops->start &&
3722 host->dma_ops->stop && host->dma_ops->cleanup) {
3723 if (host->dma_ops->init(host)) {
3724 dev_err(host->dev, "%s: Unable to initialize "
3725 "DMA Controller.\n", __func__);
3729 dev_err(host->dev, "DMA initialization not found.\n");
3737 dev_info(host->dev, "Using PIO mode.\n");
3742 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3744 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3747 ctrl = mci_readl(host, CTRL);
3749 mci_writel(host, CTRL, ctrl);
3751 /* wait till resets clear */
3753 ctrl = mci_readl(host, CTRL);
3754 if (!(ctrl & reset))
3756 } while (time_before(jiffies, timeout));
3759 "Timeout resetting block (ctrl reset %#x)\n",
3765 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3768 * Reseting generates a block interrupt, hence setting
3769 * the scatter-gather pointer to NULL.
3772 sg_miter_stop(&host->sg_miter);
3776 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3779 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3781 return dw_mci_ctrl_reset(host,
3782 SDMMC_CTRL_FIFO_RESET |
3784 SDMMC_CTRL_DMA_RESET);
3789 static struct dw_mci_of_quirks {
3794 .quirk = "broken-cd",
3795 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3799 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3801 struct dw_mci_board *pdata;
3802 struct device *dev = host->dev;
3803 struct device_node *np = dev->of_node;
3804 const struct dw_mci_drv_data *drv_data = host->drv_data;
3806 u32 clock_frequency;
3808 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3810 dev_err(dev, "could not allocate memory for pdata\n");
3811 return ERR_PTR(-ENOMEM);
3814 /* find out number of slots supported */
3815 if (of_property_read_u32(dev->of_node, "num-slots",
3816 &pdata->num_slots)) {
3817 dev_info(dev, "num-slots property not found, "
3818 "assuming 1 slot is available\n");
3819 pdata->num_slots = 1;
3823 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3824 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3825 pdata->quirks |= of_quirks[idx].id;
3828 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3829 dev_info(dev, "fifo-depth property not found, using "
3830 "value of FIFOTH register as default\n");
3832 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3834 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3835 pdata->bus_hz = clock_frequency;
3837 if (drv_data && drv_data->parse_dt) {
3838 ret = drv_data->parse_dt(host);
3840 return ERR_PTR(ret);
3843 if (of_find_property(np, "keep-power-in-suspend", NULL))
3844 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3846 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3847 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3849 if (of_find_property(np, "supports-highspeed", NULL))
3850 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3852 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3853 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3855 if (of_find_property(np, "supports-DDR_MODE", NULL))
3856 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3858 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3859 pdata->caps2 |= MMC_CAP2_HS200;
3861 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3862 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3864 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3865 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3867 if (of_get_property(np, "cd-inverted", NULL))
3868 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3869 if (of_get_property(np, "bootpart-no-access", NULL))
3870 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3875 #else /* CONFIG_OF */
3876 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3878 return ERR_PTR(-EINVAL);
3880 #endif /* CONFIG_OF */
3882 int dw_mci_probe(struct dw_mci *host)
3884 const struct dw_mci_drv_data *drv_data = host->drv_data;
3885 int width, i, ret = 0;
3891 host->pdata = dw_mci_parse_dt(host);
3892 if (IS_ERR(host->pdata)) {
3893 dev_err(host->dev, "platform data not available\n");
3898 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3900 "Platform data must supply select_slot function\n");
3905 * In 2.40a spec, Data offset is changed.
3906 * Need to check the version-id and set data-offset for DATA register.
3908 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3909 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3911 if (host->verid < DW_MMC_240A)
3912 host->data_offset = DATA_OFFSET;
3914 host->data_offset = DATA_240A_OFFSET;
3917 host->hpclk_mmc= devm_clk_get(host->dev, "hpclk_mmc");
3918 if (IS_ERR(host->hpclk_mmc)) {
3919 dev_err(host->dev, "failed to get hpclk_mmc\n");
3921 clk_prepare_enable(host->hpclk_mmc);
3925 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3926 if (IS_ERR(host->hclk_mmc)) {
3927 dev_err(host->dev, "failed to get hclk_mmc\n");
3928 ret = PTR_ERR(host->hclk_mmc);
3932 clk_prepare_enable(host->hclk_mmc);
3935 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3936 if (IS_ERR(host->clk_mmc)) {
3937 dev_err(host->dev, "failed to get clk mmc_per\n");
3938 ret = PTR_ERR(host->clk_mmc);
3942 host->bus_hz = host->pdata->bus_hz;
3943 if (!host->bus_hz) {
3944 dev_err(host->dev,"Platform data must supply bus speed\n");
3949 if (host->verid < DW_MMC_240A)
3950 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3952 //rockchip: fix divider 2 in clksum before controlller
3953 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3956 dev_err(host->dev, "failed to set clk mmc\n");
3959 clk_prepare_enable(host->clk_mmc);
3961 if (drv_data && drv_data->setup_clock) {
3962 ret = drv_data->setup_clock(host);
3965 "implementation specific clock setup failed\n");
3970 host->quirks = host->pdata->quirks;
3971 host->irq_state = true;
3972 host->set_speed = 0;
3974 host->svi_flags = 0;
3976 spin_lock_init(&host->lock);
3977 spin_lock_init(&host->slock);
3979 INIT_LIST_HEAD(&host->queue);
3981 * Get the host data width - this assumes that HCON has been set with
3982 * the correct values.
3984 i = (mci_readl(host, HCON) >> 7) & 0x7;
3986 host->push_data = dw_mci_push_data16;
3987 host->pull_data = dw_mci_pull_data16;
3989 host->data_shift = 1;
3990 } else if (i == 2) {
3991 host->push_data = dw_mci_push_data64;
3992 host->pull_data = dw_mci_pull_data64;
3994 host->data_shift = 3;
3996 /* Check for a reserved value, and warn if it is */
3998 "HCON reports a reserved host data width!\n"
3999 "Defaulting to 32-bit access.\n");
4000 host->push_data = dw_mci_push_data32;
4001 host->pull_data = dw_mci_pull_data32;
4003 host->data_shift = 2;
4006 /* Reset all blocks */
4007 if (!dw_mci_ctrl_all_reset(host))
4010 host->dma_ops = host->pdata->dma_ops;
4011 dw_mci_init_dma(host);
4013 /* Clear the interrupts for the host controller */
4014 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4015 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4017 /* Put in max timeout */
4018 mci_writel(host, TMOUT, 0xFFFFFFFF);
4021 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
4022 * Tx Mark = fifo_size / 2 DMA Size = 8
4024 if (!host->pdata->fifo_depth) {
4026 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
4027 * have been overwritten by the bootloader, just like we're
4028 * about to do, so if you know the value for your hardware, you
4029 * should put it in the platform data.
4031 fifo_size = mci_readl(host, FIFOTH);
4032 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
4034 fifo_size = host->pdata->fifo_depth;
4036 host->fifo_depth = fifo_size;
4038 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
4039 mci_writel(host, FIFOTH, host->fifoth_val);
4041 /* disable clock to CIU */
4042 mci_writel(host, CLKENA, 0);
4043 mci_writel(host, CLKSRC, 0);
4045 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
4046 host->card_workqueue = alloc_workqueue("dw-mci-card",
4047 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
4048 if (!host->card_workqueue) {
4052 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4053 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4054 host->irq_flags, "dw-mci", host);
4058 if (host->pdata->num_slots)
4059 host->num_slots = host->pdata->num_slots;
4061 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4063 /* We need at least one slot to succeed */
4064 for (i = 0; i < host->num_slots; i++) {
4065 ret = dw_mci_init_slot(host, i);
4067 dev_dbg(host->dev, "slot %d init failed\n", i);
4073 * Enable interrupts for command done, data over, data empty, card det,
4074 * receive ready and error such as transmit, receive timeout, crc error
4076 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4077 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4078 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4079 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4080 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4081 regs |= SDMMC_INT_CD;
4083 mci_writel(host, INTMASK, regs);
4085 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4087 dev_info(host->dev, "DW MMC controller at irq %d, "
4088 "%d bit host data width, "
4090 host->irq, width, fifo_size);
4093 dev_info(host->dev, "%d slots initialized\n", init_slots);
4095 dev_dbg(host->dev, "attempted to initialize %d slots, "
4096 "but failed on all\n", host->num_slots);
4101 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4102 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4107 destroy_workqueue(host->card_workqueue);
4110 if (host->use_dma && host->dma_ops->exit)
4111 host->dma_ops->exit(host);
4114 regulator_disable(host->vmmc);
4115 regulator_put(host->vmmc);
4119 if (!IS_ERR(host->clk_mmc))
4120 clk_disable_unprepare(host->clk_mmc);
4122 if (!IS_ERR(host->hclk_mmc))
4123 clk_disable_unprepare(host->hclk_mmc);
4126 EXPORT_SYMBOL(dw_mci_probe);
4128 void dw_mci_remove(struct dw_mci *host)
4130 struct mmc_host *mmc = host->mmc;
4131 struct dw_mci_slot *slot = mmc_priv(mmc);
4134 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4135 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4137 for(i = 0; i < host->num_slots; i++){
4138 dev_dbg(host->dev, "remove slot %d\n", i);
4140 dw_mci_cleanup_slot(host->slot[i], i);
4143 /* disable clock to CIU */
4144 mci_writel(host, CLKENA, 0);
4145 mci_writel(host, CLKSRC, 0);
4147 destroy_workqueue(host->card_workqueue);
4148 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4149 unregister_pm_notifier(&host->mmc->pm_notify);
4151 if (host->use_dma && host->dma_ops->exit)
4152 host->dma_ops->exit(host);
4154 if (gpio_is_valid(slot->cd_gpio))
4155 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4158 regulator_disable(host->vmmc);
4159 regulator_put(host->vmmc);
4161 if (!IS_ERR(host->clk_mmc))
4162 clk_disable_unprepare(host->clk_mmc);
4164 if (!IS_ERR(host->hclk_mmc))
4165 clk_disable_unprepare(host->hclk_mmc);
4166 if (!IS_ERR(host->hpclk_mmc))
4167 clk_disable_unprepare(host->hpclk_mmc);
4169 EXPORT_SYMBOL(dw_mci_remove);
4173 #ifdef CONFIG_PM_SLEEP
4175 * TODO: we should probably disable the clock to the card in the suspend path.
4177 extern int get_wifi_chip_type(void);
4178 int dw_mci_suspend(struct dw_mci *host)
4180 int present = dw_mci_get_cd(host->mmc);
4182 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4183 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4187 regulator_disable(host->vmmc);
4189 /*only for sdmmc controller*/
4190 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4191 disable_irq(host->irq);
4193 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4194 MMC_DBG_ERR_FUNC(host->mmc,
4195 "Idle pinctrl setting failed! [%s]",
4196 mmc_hostname(host->mmc));
4199 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4200 mci_writel(host, INTMASK, 0x00);
4201 mci_writel(host, CTRL, 0x00);
4203 /* Soc rk3126/3036 already in gpio_cd mode */
4204 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4205 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4206 enable_irq_wake(host->mmc->slot.cd_irq);
4211 EXPORT_SYMBOL(dw_mci_suspend);
4213 int dw_mci_resume(struct dw_mci *host)
4217 struct dw_mci_slot *slot;
4218 int present = dw_mci_get_cd(host->mmc);
4220 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4221 (get_wifi_chip_type() == WIFI_ESP8089 ||
4222 get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4225 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4226 slot = mmc_priv(host->mmc);
4227 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4231 /*only for sdmmc controller*/
4232 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4233 /* Soc rk3126/3036 already in gpio_cd mode */
4234 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4235 disable_irq_wake(host->mmc->slot.cd_irq);
4236 mmc_gpio_free_cd(host->mmc);
4240 if (!IS_ERR(host->pins_udbg)) {
4241 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4242 MMC_DBG_ERR_FUNC(host->mmc,
4243 "Idle pinctrl setting failed! [%s]",
4244 mmc_hostname(host->mmc));
4245 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
4246 MMC_DBG_ERR_FUNC(host->mmc,
4247 "%s: Udbg pinctrl setting failed! [%s]",
4248 mmc_hostname(host->mmc));
4250 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4251 MMC_DBG_ERR_FUNC(host->mmc,
4252 "Default pinctrl setting failed! [%s]",
4253 mmc_hostname(host->mmc));
4256 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4257 MMC_DBG_ERR_FUNC(host->mmc,
4258 "Default pinctrl setting failed! [%s]",
4259 mmc_hostname(host->mmc));
4264 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4265 else if(cpu_is_rk3036())
4266 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4267 else if(cpu_is_rk312x())
4268 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4269 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4272 ret = regulator_enable(host->vmmc);
4275 "failed to enable regulator: %d\n", ret);
4280 if(!dw_mci_ctrl_all_reset(host)){
4285 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4286 if(host->use_dma && host->dma_ops->init)
4287 host->dma_ops->init(host);
4290 * Restore the initial value at FIFOTH register
4291 * And Invalidate the prev_blksz with zero
4293 mci_writel(host, FIFOTH, host->fifoth_val);
4294 host->prev_blksz = 0;
4295 /* Put in max timeout */
4296 mci_writel(host, TMOUT, 0xFFFFFFFF);
4298 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4299 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4301 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4302 regs |= SDMMC_INT_CD;
4303 mci_writel(host, INTMASK, regs);
4304 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4305 /*only for sdmmc controller*/
4306 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)){
4307 enable_irq(host->irq);
4310 for(i = 0; i < host->num_slots; i++){
4311 struct dw_mci_slot *slot = host->slot[i];
4314 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4315 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4316 dw_mci_setup_bus(slot, true);
4322 EXPORT_SYMBOL(dw_mci_resume);
4323 #endif /* CONFIG_PM_SLEEP */
4325 static int __init dw_mci_init(void)
4327 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4331 static void __exit dw_mci_exit(void)
4335 module_init(dw_mci_init);
4336 module_exit(dw_mci_exit);
4338 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4339 MODULE_AUTHOR("NXP Semiconductor VietNam");
4340 MODULE_AUTHOR("Imagination Technologies Ltd");
4341 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4342 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4343 MODULE_LICENSE("GPL v2");