2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
49 #include <linux/mfd/syscon.h>
50 #include <linux/regmap.h>
52 #include "rk_sdmmc_dbg.h"
53 #include <linux/regulator/rockchip_io_vol_domain.h>
54 #include "../../clk/rockchip/clk-ops.h"
56 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
58 /* Common flag combinations */
59 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
60 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
62 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
64 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
65 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
66 #define DW_MCI_SEND_STATUS 1
67 #define DW_MCI_RECV_STATUS 2
68 #define DW_MCI_DMA_THRESHOLD 16
70 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
71 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
73 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
74 #define SDMMC_DATA_TIMEOUT_SD 500
75 #define SDMMC_DATA_TIMEOUT_SDIO 250
76 #define SDMMC_DATA_TIMEOUT_EMMC 2500
78 #define SDMMC_CMD_RTO_MAX_HOLD 200
79 #define SDMMC_WAIT_FOR_UNBUSY 2500
81 #ifdef CONFIG_MMC_DW_IDMAC
82 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
83 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
84 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
88 u32 des0; /* Control Descriptor */
89 #define IDMAC_DES0_DIC BIT(1)
90 #define IDMAC_DES0_LD BIT(2)
91 #define IDMAC_DES0_FD BIT(3)
92 #define IDMAC_DES0_CH BIT(4)
93 #define IDMAC_DES0_ER BIT(5)
94 #define IDMAC_DES0_CES BIT(30)
95 #define IDMAC_DES0_OWN BIT(31)
97 u32 des1; /* Buffer sizes */
98 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
99 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
101 u32 des2; /* buffer 1 physical address */
103 u32 des3; /* buffer 2 physical address */
105 #endif /* CONFIG_MMC_DW_IDMAC */
107 static const u8 tuning_blk_pattern_4bit[] = {
108 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
109 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
110 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
111 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
112 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
113 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
114 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
115 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
118 static const u8 tuning_blk_pattern_8bit[] = {
119 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
120 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
121 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
122 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
123 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
124 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
125 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
126 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
127 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
128 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
129 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
130 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
131 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
132 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
133 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
134 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
137 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
138 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
139 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
140 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
142 /*printk the all register of current host*/
144 static int dw_mci_regs_printk(struct dw_mci *host)
146 struct sdmmc_reg *regs = dw_mci_regs;
148 while( regs->name != 0 ){
149 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
152 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
157 #if defined(CONFIG_DEBUG_FS)
158 static int dw_mci_req_show(struct seq_file *s, void *v)
160 struct dw_mci_slot *slot = s->private;
161 struct mmc_request *mrq;
162 struct mmc_command *cmd;
163 struct mmc_command *stop;
164 struct mmc_data *data;
166 /* Make sure we get a consistent snapshot */
167 spin_lock_bh(&slot->host->lock);
177 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
178 cmd->opcode, cmd->arg, cmd->flags,
179 cmd->resp[0], cmd->resp[1], cmd->resp[2],
180 cmd->resp[2], cmd->error);
182 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
183 data->bytes_xfered, data->blocks,
184 data->blksz, data->flags, data->error);
187 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
188 stop->opcode, stop->arg, stop->flags,
189 stop->resp[0], stop->resp[1], stop->resp[2],
190 stop->resp[2], stop->error);
193 spin_unlock_bh(&slot->host->lock);
198 static int dw_mci_req_open(struct inode *inode, struct file *file)
200 return single_open(file, dw_mci_req_show, inode->i_private);
203 static const struct file_operations dw_mci_req_fops = {
204 .owner = THIS_MODULE,
205 .open = dw_mci_req_open,
208 .release = single_release,
211 static int dw_mci_regs_show(struct seq_file *s, void *v)
213 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
214 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
215 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
216 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
217 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
218 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
223 static int dw_mci_regs_open(struct inode *inode, struct file *file)
225 return single_open(file, dw_mci_regs_show, inode->i_private);
228 static const struct file_operations dw_mci_regs_fops = {
229 .owner = THIS_MODULE,
230 .open = dw_mci_regs_open,
233 .release = single_release,
236 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
238 struct mmc_host *mmc = slot->mmc;
239 struct dw_mci *host = slot->host;
243 root = mmc->debugfs_root;
247 node = debugfs_create_file("regs", S_IRUSR, root, host,
252 node = debugfs_create_file("req", S_IRUSR, root, slot,
257 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
261 node = debugfs_create_x32("pending_events", S_IRUSR, root,
262 (u32 *)&host->pending_events);
266 node = debugfs_create_x32("completed_events", S_IRUSR, root,
267 (u32 *)&host->completed_events);
274 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
276 #endif /* defined(CONFIG_DEBUG_FS) */
278 static void dw_mci_set_timeout(struct dw_mci *host)
280 /* timeout (maximum) */
281 mci_writel(host, TMOUT, 0xffffffff);
284 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
286 struct mmc_data *data;
287 struct dw_mci_slot *slot = mmc_priv(mmc);
288 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
290 cmd->error = -EINPROGRESS;
294 if (cmdr == MMC_STOP_TRANSMISSION)
295 cmdr |= SDMMC_CMD_STOP;
297 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
299 if (cmd->flags & MMC_RSP_PRESENT) {
300 /* We expect a response, so set this bit */
301 cmdr |= SDMMC_CMD_RESP_EXP;
302 if (cmd->flags & MMC_RSP_136)
303 cmdr |= SDMMC_CMD_RESP_LONG;
306 if (cmd->flags & MMC_RSP_CRC)
307 cmdr |= SDMMC_CMD_RESP_CRC;
311 cmdr |= SDMMC_CMD_DAT_EXP;
312 if (data->flags & MMC_DATA_STREAM)
313 cmdr |= SDMMC_CMD_STRM_MODE;
314 if (data->flags & MMC_DATA_WRITE)
315 cmdr |= SDMMC_CMD_DAT_WR;
318 if (drv_data && drv_data->prepare_command)
319 drv_data->prepare_command(slot->host, &cmdr);
325 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
327 struct mmc_command *stop;
333 stop = &host->stop_abort;
335 memset(stop, 0, sizeof(struct mmc_command));
337 if (cmdr == MMC_READ_SINGLE_BLOCK ||
338 cmdr == MMC_READ_MULTIPLE_BLOCK ||
339 cmdr == MMC_WRITE_BLOCK ||
340 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
341 stop->opcode = MMC_STOP_TRANSMISSION;
343 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
344 } else if (cmdr == SD_IO_RW_EXTENDED) {
345 stop->opcode = SD_IO_RW_DIRECT;
346 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
347 ((cmd->arg >> 28) & 0x7);
348 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
353 cmdr = stop->opcode | SDMMC_CMD_STOP |
354 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
359 static void dw_mci_start_command(struct dw_mci *host,
360 struct mmc_command *cmd, u32 cmd_flags)
362 struct dw_mci_slot *slot = host->slot[0];
363 /*temporality fix slot[0] due to host->num_slots equal to 1*/
365 host->pre_cmd = host->cmd;
368 "start command: ARGR=0x%08x CMDR=0x%08x\n",
369 cmd->arg, cmd_flags);
371 if(SD_SWITCH_VOLTAGE == cmd->opcode){
372 /*confirm non-low-power mode*/
373 mci_writel(host, CMDARG, 0);
374 dw_mci_disable_low_power(slot);
376 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
377 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
379 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
382 mci_writel(host, CMDARG, cmd->arg);
385 /* fix the value to 1 in some Soc,for example RK3188. */
386 if(host->mmc->hold_reg_flag)
387 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
389 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
393 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
395 dw_mci_start_command(host, data->stop, host->stop_cmdr);
398 /* DMA interface functions */
399 static void dw_mci_stop_dma(struct dw_mci *host)
401 if (host->using_dma) {
402 /* Fixme: No need to terminate edma, may cause flush op */
403 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
404 host->dma_ops->stop(host);
405 host->dma_ops->cleanup(host);
408 /* Data transfer was stopped by the interrupt handler */
409 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
412 static int dw_mci_get_dma_dir(struct mmc_data *data)
414 if (data->flags & MMC_DATA_WRITE)
415 return DMA_TO_DEVICE;
417 return DMA_FROM_DEVICE;
420 #ifdef CONFIG_MMC_DW_IDMAC
421 static void dw_mci_dma_cleanup(struct dw_mci *host)
423 struct mmc_data *data = host->data;
426 if (!data->host_cookie)
427 dma_unmap_sg(host->dev,
430 dw_mci_get_dma_dir(data));
433 static void dw_mci_idmac_reset(struct dw_mci *host)
435 u32 bmod = mci_readl(host, BMOD);
436 /* Software reset of DMA */
437 bmod |= SDMMC_IDMAC_SWRESET;
438 mci_writel(host, BMOD, bmod);
441 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
445 /* Disable and reset the IDMAC interface */
446 temp = mci_readl(host, CTRL);
447 temp &= ~SDMMC_CTRL_USE_IDMAC;
448 temp |= SDMMC_CTRL_DMA_RESET;
449 mci_writel(host, CTRL, temp);
451 /* Stop the IDMAC running */
452 temp = mci_readl(host, BMOD);
453 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
454 temp |= SDMMC_IDMAC_SWRESET;
455 mci_writel(host, BMOD, temp);
458 static void dw_mci_idmac_complete_dma(void *arg)
460 struct dw_mci *host = arg;
461 struct mmc_data *data = host->data;
463 dev_vdbg(host->dev, "DMA complete\n");
466 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
467 host->mrq->cmd->opcode,host->mrq->cmd->arg,
468 data->blocks,data->blksz,mmc_hostname(host->mmc));
471 host->dma_ops->cleanup(host);
474 * If the card was removed, data will be NULL. No point in trying to
475 * send the stop command or waiting for NBUSY in this case.
478 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
479 tasklet_schedule(&host->tasklet);
483 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
487 struct idmac_desc *desc = host->sg_cpu;
489 for (i = 0; i < sg_len; i++, desc++) {
490 unsigned int length = sg_dma_len(&data->sg[i]);
491 u32 mem_addr = sg_dma_address(&data->sg[i]);
493 /* Set the OWN bit and disable interrupts for this descriptor */
494 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
497 IDMAC_SET_BUFFER1_SIZE(desc, length);
499 /* Physical address to DMA to/from */
500 desc->des2 = mem_addr;
503 /* Set first descriptor */
505 desc->des0 |= IDMAC_DES0_FD;
507 /* Set last descriptor */
508 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
509 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
510 desc->des0 |= IDMAC_DES0_LD;
515 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
519 dw_mci_translate_sglist(host, host->data, sg_len);
521 /* Select IDMAC interface */
522 temp = mci_readl(host, CTRL);
523 temp |= SDMMC_CTRL_USE_IDMAC;
524 mci_writel(host, CTRL, temp);
528 /* Enable the IDMAC */
529 temp = mci_readl(host, BMOD);
530 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
531 mci_writel(host, BMOD, temp);
533 /* Start it running */
534 mci_writel(host, PLDMND, 1);
537 static int dw_mci_idmac_init(struct dw_mci *host)
539 struct idmac_desc *p;
542 /* Number of descriptors in the ring buffer */
543 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
545 /* Forward link the descriptor list */
546 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
547 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
549 /* Set the last descriptor as the end-of-ring descriptor */
550 p->des3 = host->sg_dma;
551 p->des0 = IDMAC_DES0_ER;
553 dw_mci_idmac_reset(host);
555 /* Mask out interrupts - get Tx & Rx complete only */
556 mci_writel(host, IDSTS, IDMAC_INT_CLR);
557 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
560 /* Set the descriptor base address */
561 mci_writel(host, DBADDR, host->sg_dma);
565 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
566 .init = dw_mci_idmac_init,
567 .start = dw_mci_idmac_start_dma,
568 .stop = dw_mci_idmac_stop_dma,
569 .complete = dw_mci_idmac_complete_dma,
570 .cleanup = dw_mci_dma_cleanup,
574 static void dw_mci_edma_cleanup(struct dw_mci *host)
576 struct mmc_data *data = host->data;
579 if (!data->host_cookie)
580 dma_unmap_sg(host->dev,
581 data->sg, data->sg_len,
582 dw_mci_get_dma_dir(data));
585 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
587 dmaengine_terminate_all(host->dms->ch);
590 static void dw_mci_edmac_complete_dma(void *arg)
592 struct dw_mci *host = arg;
593 struct mmc_data *data = host->data;
595 dev_vdbg(host->dev, "DMA complete\n");
598 if(data->flags & MMC_DATA_READ)
599 /* Invalidate cache after read */
600 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
601 data->sg_len, DMA_FROM_DEVICE);
603 host->dma_ops->cleanup(host);
606 * If the card was removed, data will be NULL. No point in trying to
607 * send the stop command or waiting for NBUSY in this case.
610 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
611 tasklet_schedule(&host->tasklet);
615 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
617 struct dma_slave_config slave_config;
618 struct dma_async_tx_descriptor *desc = NULL;
619 struct scatterlist *sgl = host->data->sg;
620 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
621 u32 sg_elems = host->data->sg_len;
622 u32 fifoth_val, mburst;
626 /* Set external dma config: burst size, burst width*/
627 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
628 slave_config.src_addr = slave_config.dst_addr;
629 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
630 slave_config.src_addr_width = slave_config.dst_addr_width;
632 /* Match FIFO dma burst MSIZE with external dma config*/
633 fifoth_val = mci_readl(host, FIFOTH);
634 mburst = mszs[(fifoth_val >> 28) & 0x7];
636 /* edmac limit burst to 16, but work around for rk3036 to 8 */
637 if (unlikely(cpu_is_rk3036()))
642 slave_config.dst_maxburst = (mburst > burst_limit) ? burst_limit : mburst;
643 slave_config.src_maxburst = slave_config.dst_maxburst;
645 if(host->data->flags & MMC_DATA_WRITE){
646 slave_config.direction = DMA_MEM_TO_DEV;
647 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
649 dev_err(host->dev, "error in dw_mci edma configuration.\n");
653 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
654 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
656 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
659 /* Set dw_mci_edmac_complete_dma as callback */
660 desc->callback = dw_mci_edmac_complete_dma;
661 desc->callback_param = (void *)host;
662 dmaengine_submit(desc);
664 /* Flush cache before write */
665 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
666 sg_elems, DMA_TO_DEVICE);
667 dma_async_issue_pending(host->dms->ch);
670 slave_config.direction = DMA_DEV_TO_MEM;
671 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
673 dev_err(host->dev, "error in dw_mci edma configuration.\n");
676 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
677 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
679 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
682 /* set dw_mci_edmac_complete_dma as callback */
683 desc->callback = dw_mci_edmac_complete_dma;
684 desc->callback_param = (void *)host;
685 dmaengine_submit(desc);
686 dma_async_issue_pending(host->dms->ch);
690 static int dw_mci_edmac_init(struct dw_mci *host)
692 /* Request external dma channel, SHOULD decide chn in dts */
694 host->dms = (struct dw_mci_dma_slave *)kmalloc
695 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
696 if (NULL == host->dms) {
697 dev_err(host->dev, "No enough memory to alloc dms.\n");
701 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
702 if (!host->dms->ch) {
703 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
704 host->dms->ch->chan_id);
711 if (NULL != host->dms) {
719 static void dw_mci_edmac_exit(struct dw_mci *host)
721 if (NULL != host->dms) {
722 if (NULL != host->dms->ch) {
723 dma_release_channel(host->dms->ch);
724 host->dms->ch = NULL;
731 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
732 .init = dw_mci_edmac_init,
733 .exit = dw_mci_edmac_exit,
734 .start = dw_mci_edmac_start_dma,
735 .stop = dw_mci_edmac_stop_dma,
736 .complete = dw_mci_edmac_complete_dma,
737 .cleanup = dw_mci_edma_cleanup,
739 #endif /* CONFIG_MMC_DW_IDMAC */
741 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
742 struct mmc_data *data,
745 struct scatterlist *sg;
746 unsigned int i, sg_len;
748 if (!next && data->host_cookie)
749 return data->host_cookie;
752 * We don't do DMA on "complex" transfers, i.e. with
753 * non-word-aligned buffers or lengths. Also, we don't bother
754 * with all the DMA setup overhead for short transfers.
756 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
762 for_each_sg(data->sg, sg, data->sg_len, i) {
763 if (sg->offset & 3 || sg->length & 3)
767 sg_len = dma_map_sg(host->dev,
770 dw_mci_get_dma_dir(data));
775 data->host_cookie = sg_len;
780 static void dw_mci_pre_req(struct mmc_host *mmc,
781 struct mmc_request *mrq,
784 struct dw_mci_slot *slot = mmc_priv(mmc);
785 struct mmc_data *data = mrq->data;
787 if (!slot->host->use_dma || !data)
790 if (data->host_cookie) {
791 data->host_cookie = 0;
795 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
796 data->host_cookie = 0;
799 static void dw_mci_post_req(struct mmc_host *mmc,
800 struct mmc_request *mrq,
803 struct dw_mci_slot *slot = mmc_priv(mmc);
804 struct mmc_data *data = mrq->data;
806 if (!slot->host->use_dma || !data)
809 if (data->host_cookie)
810 dma_unmap_sg(slot->host->dev,
813 dw_mci_get_dma_dir(data));
814 data->host_cookie = 0;
817 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
819 #ifdef CONFIG_MMC_DW_IDMAC
820 unsigned int blksz = data->blksz;
821 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
822 u32 fifo_width = 1 << host->data_shift;
823 u32 blksz_depth = blksz / fifo_width, fifoth_val;
824 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
825 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
827 tx_wmark = (host->fifo_depth) / 2;
828 tx_wmark_invers = host->fifo_depth - tx_wmark;
832 * if blksz is not a multiple of the FIFO width
834 if (blksz % fifo_width) {
841 if (!((blksz_depth % mszs[idx]) ||
842 (tx_wmark_invers % mszs[idx]))) {
844 rx_wmark = mszs[idx] - 1;
849 * If idx is '0', it won't be tried
850 * Thus, initial values are uesed
853 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
854 mci_writel(host, FIFOTH, fifoth_val);
859 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
861 unsigned int blksz = data->blksz;
862 u32 blksz_depth, fifo_depth;
865 WARN_ON(!(data->flags & MMC_DATA_READ));
867 if (host->timing != MMC_TIMING_MMC_HS200 &&
868 host->timing != MMC_TIMING_UHS_SDR104)
871 blksz_depth = blksz / (1 << host->data_shift);
872 fifo_depth = host->fifo_depth;
874 if (blksz_depth > fifo_depth)
878 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
879 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
880 * Currently just choose blksz.
883 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
887 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
890 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
898 /* If we don't have a channel, we can't do DMA */
902 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
904 /* Fixme: No need terminate edma, may cause flush op */
905 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
906 host->dma_ops->stop(host);
913 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
914 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
918 * Decide the MSIZE and RX/TX Watermark.
919 * If current block size is same with previous size,
920 * no need to update fifoth.
922 if (host->prev_blksz != data->blksz)
923 dw_mci_adjust_fifoth(host, data);
926 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
928 /* Enable the DMA interface */
929 temp = mci_readl(host, CTRL);
930 temp |= SDMMC_CTRL_DMA_ENABLE;
931 mci_writel(host, CTRL, temp);
933 /* Disable RX/TX IRQs, let DMA handle it */
934 spin_lock_irqsave(&host->slock, flags);
935 temp = mci_readl(host, INTMASK);
936 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
937 mci_writel(host, INTMASK, temp);
938 spin_unlock_irqrestore(&host->slock, flags);
940 host->dma_ops->start(host, sg_len);
945 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
950 data->error = -EINPROGRESS;
952 //WARN_ON(host->data);
957 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
959 if (data->flags & MMC_DATA_READ) {
960 host->dir_status = DW_MCI_RECV_STATUS;
961 dw_mci_ctrl_rd_thld(host, data);
963 host->dir_status = DW_MCI_SEND_STATUS;
966 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
967 data->blocks, data->blksz, mmc_hostname(host->mmc));
969 if (dw_mci_submit_data_dma(host, data)) {
970 int flags = SG_MITER_ATOMIC;
971 if (host->data->flags & MMC_DATA_READ)
972 flags |= SG_MITER_TO_SG;
974 flags |= SG_MITER_FROM_SG;
976 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
978 host->part_buf_start = 0;
979 host->part_buf_count = 0;
981 spin_lock_irqsave(&host->slock, flag);
982 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
983 temp = mci_readl(host, INTMASK);
984 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
985 mci_writel(host, INTMASK, temp);
986 spin_unlock_irqrestore(&host->slock, flag);
988 temp = mci_readl(host, CTRL);
989 temp &= ~SDMMC_CTRL_DMA_ENABLE;
990 mci_writel(host, CTRL, temp);
993 * Use the initial fifoth_val for PIO mode.
994 * If next issued data may be transfered by DMA mode,
995 * prev_blksz should be invalidated.
997 mci_writel(host, FIFOTH, host->fifoth_val);
998 host->prev_blksz = 0;
1001 * Keep the current block size.
1002 * It will be used to decide whether to update
1003 * fifoth register next time.
1005 host->prev_blksz = data->blksz;
1009 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1011 struct dw_mci *host = slot->host;
1012 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1013 unsigned int cmd_status = 0;
1014 #ifdef SDMMC_WAIT_FOR_UNBUSY
1016 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1018 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1020 ret = time_before(jiffies, timeout);
1021 cmd_status = mci_readl(host, STATUS);
1022 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1026 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1027 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1030 mci_writel(host, CMDARG, arg);
1032 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1033 if(cmd & SDMMC_CMD_UPD_CLK)
1034 timeout = jiffies + msecs_to_jiffies(50);
1036 timeout = jiffies + msecs_to_jiffies(500);
1037 while (time_before(jiffies, timeout)) {
1038 cmd_status = mci_readl(host, CMD);
1039 if (!(cmd_status & SDMMC_CMD_START))
1042 dev_err(&slot->mmc->class_dev,
1043 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1044 cmd, arg, cmd_status);
1047 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1049 struct dw_mci *host = slot->host;
1050 unsigned int tempck,clock = slot->clock;
1055 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1056 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1059 mci_writel(host, CLKENA, 0);
1060 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1061 if(host->svi_flags == 0)
1062 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1064 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1066 } else if (clock != host->current_speed || force_clkinit) {
1067 div = host->bus_hz / clock;
1068 if (host->bus_hz % clock && host->bus_hz > clock)
1070 * move the + 1 after the divide to prevent
1071 * over-clocking the card.
1075 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1077 if ((clock << div) != slot->__clk_old || force_clkinit) {
1078 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1079 dev_info(&slot->mmc->class_dev,
1080 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1081 slot->id, host->bus_hz, clock,
1084 host->set_speed = tempck;
1085 host->set_div = div;
1089 mci_writel(host, CLKENA, 0);
1090 mci_writel(host, CLKSRC, 0);
1094 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1096 if(clock <= 400*1000){
1097 MMC_DBG_BOOT_FUNC(host->mmc,
1098 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1099 clock * 2, mmc_hostname(host->mmc));
1100 /* clk_mmc will change parents to 24MHz xtal*/
1101 clk_set_rate(host->clk_mmc, clock * 2);
1104 host->set_div = div;
1108 MMC_DBG_BOOT_FUNC(host->mmc,
1109 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1110 mmc_hostname(host->mmc));
1113 MMC_DBG_ERR_FUNC(host->mmc,
1114 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1115 mmc_hostname(host->mmc));
1117 host->set_div = div;
1118 host->bus_hz = host->set_speed * 2;
1119 MMC_DBG_BOOT_FUNC(host->mmc,
1120 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1121 div, host->bus_hz, mmc_hostname(host->mmc));
1123 /* BUG may be here, come on, Linux BSP engineer looks!
1124 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1125 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1126 some oops happened like that:
1127 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1128 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1129 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1130 mmc0: new high speed DDR MMC card at address 0001
1131 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1133 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1134 mmcblk0: retrying using single block read
1135 mmcblk0: error -110 sending status command, retrying
1137 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1140 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1141 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1144 host->set_div = div;
1145 host->bus_hz = host->set_speed * 2;
1146 MMC_DBG_BOOT_FUNC(host->mmc,
1147 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1148 div, host->bus_hz, mmc_hostname(host->mmc));
1151 if (host->verid < DW_MMC_240A)
1152 clk_set_rate(host->clk_mmc,(host->bus_hz));
1154 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1160 /* set clock to desired speed */
1161 mci_writel(host, CLKDIV, div);
1165 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1167 /* enable clock; only low power if no SDIO */
1168 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1170 if (host->verid < DW_MMC_240A)
1171 sdio_int = SDMMC_INT_SDIO(slot->id);
1173 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1175 if (!(mci_readl(host, INTMASK) & sdio_int))
1176 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1177 mci_writel(host, CLKENA, clk_en_a);
1181 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1182 /* keep the clock with reflecting clock dividor */
1183 slot->__clk_old = clock << div;
1186 host->current_speed = clock;
1188 if(slot->ctype != slot->pre_ctype)
1189 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1191 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1192 mmc_hostname(host->mmc));
1193 slot->pre_ctype = slot->ctype;
1195 /* Set the current slot bus width */
1196 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1199 extern struct mmc_card *this_card;
1200 static void dw_mci_wait_unbusy(struct dw_mci *host)
1203 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1204 unsigned long time_loop;
1205 unsigned int status;
1208 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1210 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1211 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1212 /* Special care for (secure)erase timeout calculation */
1214 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1217 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1218 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1219 300000 * (this_card->ext_csd.sec_erase_mult)) :
1220 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1224 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1225 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1226 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1227 timeout = SDMMC_DATA_TIMEOUT_SD;
1230 time_loop = jiffies + msecs_to_jiffies(timeout);
1232 status = mci_readl(host, STATUS);
1233 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1235 } while (time_before(jiffies, time_loop));
1240 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1243 * 0--status is busy.
1244 * 1--status is unbusy.
1246 int dw_mci_card_busy(struct mmc_host *mmc)
1248 struct dw_mci_slot *slot = mmc_priv(mmc);
1249 struct dw_mci *host = slot->host;
1251 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1252 host->svi_flags, mmc_hostname(host->mmc));
1255 if(host->svi_flags == 0){
1257 host->svi_flags = 1;
1258 return host->svi_flags;
1261 host->svi_flags = 0;
1262 return host->svi_flags;
1268 static void __dw_mci_start_request(struct dw_mci *host,
1269 struct dw_mci_slot *slot,
1270 struct mmc_command *cmd)
1272 struct mmc_request *mrq;
1273 struct mmc_data *data;
1277 if (host->pdata->select_slot)
1278 host->pdata->select_slot(slot->id);
1280 host->cur_slot = slot;
1283 dw_mci_wait_unbusy(host);
1285 host->pending_events = 0;
1286 host->completed_events = 0;
1287 host->data_status = 0;
1291 dw_mci_set_timeout(host);
1292 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1293 mci_writel(host, BLKSIZ, data->blksz);
1296 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1298 /* this is the first command, send the initialization clock */
1299 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1300 cmdflags |= SDMMC_CMD_INIT;
1303 dw_mci_submit_data(host, data);
1307 dw_mci_start_command(host, cmd, cmdflags);
1310 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1313 static void dw_mci_start_request(struct dw_mci *host,
1314 struct dw_mci_slot *slot)
1316 struct mmc_request *mrq = slot->mrq;
1317 struct mmc_command *cmd;
1319 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1320 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1322 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1323 __dw_mci_start_request(host, slot, cmd);
1326 /* must be called with host->lock held */
1327 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1328 struct mmc_request *mrq)
1330 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1335 if (host->state == STATE_IDLE) {
1336 host->state = STATE_SENDING_CMD;
1337 dw_mci_start_request(host, slot);
1339 list_add_tail(&slot->queue_node, &host->queue);
1343 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1345 struct dw_mci_slot *slot = mmc_priv(mmc);
1346 struct dw_mci *host = slot->host;
1351 * The check for card presence and queueing of the request must be
1352 * atomic, otherwise the card could be removed in between and the
1353 * request wouldn't fail until another card was inserted.
1355 spin_lock_bh(&host->lock);
1357 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1358 spin_unlock_bh(&host->lock);
1359 mrq->cmd->error = -ENOMEDIUM;
1360 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1361 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1363 mmc_request_done(mmc, mrq);
1367 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1368 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1370 dw_mci_queue_request(host, slot, mrq);
1372 spin_unlock_bh(&host->lock);
1375 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1377 struct dw_mci_slot *slot = mmc_priv(mmc);
1378 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1379 struct dw_mci *host = slot->host;
1381 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1384 #ifdef SDMMC_WAIT_FOR_UNBUSY
1385 unsigned long time_loop;
1388 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1389 if(host->svi_flags == 1)
1390 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1392 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1394 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1397 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1398 printk("%d..%s: no card. [%s]\n", \
1399 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1404 ret = time_before(jiffies, time_loop);
1405 regs = mci_readl(slot->host, STATUS);
1406 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1412 printk("slot->flags = %lu ", slot->flags);
1413 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1414 if(host->svi_flags != 1)
1417 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1418 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1422 switch (ios->bus_width) {
1423 case MMC_BUS_WIDTH_4:
1424 slot->ctype = SDMMC_CTYPE_4BIT;
1426 case MMC_BUS_WIDTH_8:
1427 slot->ctype = SDMMC_CTYPE_8BIT;
1430 /* set default 1 bit mode */
1431 slot->ctype = SDMMC_CTYPE_1BIT;
1432 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1435 regs = mci_readl(slot->host, UHS_REG);
1438 if (ios->timing == MMC_TIMING_UHS_DDR50)
1439 regs |= ((0x1 << slot->id) << 16);
1441 regs &= ~((0x1 << slot->id) << 16);
1443 mci_writel(slot->host, UHS_REG, regs);
1444 slot->host->timing = ios->timing;
1447 * Use mirror of ios->clock to prevent race with mmc
1448 * core ios update when finding the minimum.
1450 slot->clock = ios->clock;
1452 if (drv_data && drv_data->set_ios)
1453 drv_data->set_ios(slot->host, ios);
1455 /* Slot specific timing and width adjustment */
1456 dw_mci_setup_bus(slot, false);
1460 switch (ios->power_mode) {
1462 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1464 if (slot->host->pdata->setpower)
1465 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1466 regs = mci_readl(slot->host, PWREN);
1467 regs |= (1 << slot->id);
1468 mci_writel(slot->host, PWREN, regs);
1471 /* Power down slot */
1472 if(slot->host->pdata->setpower)
1473 slot->host->pdata->setpower(slot->id, 0);
1474 regs = mci_readl(slot->host, PWREN);
1475 regs &= ~(1 << slot->id);
1476 mci_writel(slot->host, PWREN, regs);
1483 static int dw_mci_get_ro(struct mmc_host *mmc)
1486 struct dw_mci_slot *slot = mmc_priv(mmc);
1487 struct dw_mci_board *brd = slot->host->pdata;
1489 /* Use platform get_ro function, else try on board write protect */
1490 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1492 else if(brd->get_ro)
1493 read_only = brd->get_ro(slot->id);
1494 else if(gpio_is_valid(slot->wp_gpio))
1495 read_only = gpio_get_value(slot->wp_gpio);
1498 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1500 dev_dbg(&mmc->class_dev, "card is %s\n",
1501 read_only ? "read-only" : "read-write");
1506 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1508 struct dw_mci_slot *slot = mmc_priv(mmc);
1509 struct dw_mci *host = slot->host;
1510 /*struct dw_mci_board *brd = slot->host->pdata;*/
1512 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1515 spin_lock_bh(&host->lock);
1518 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1520 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1522 spin_unlock_bh(&host->lock);
1524 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1525 if (!IS_ERR(host->hpclk_mmc) &&
1526 __clk_is_enabled(host->hpclk_mmc) == false)
1527 clk_prepare_enable(host->hpclk_mmc);
1528 if (__clk_is_enabled(host->hclk_mmc) == false)
1529 clk_prepare_enable(host->hclk_mmc);
1530 if (__clk_is_enabled(host->clk_mmc) == false)
1531 clk_prepare_enable(host->clk_mmc);
1533 if (__clk_is_enabled(host->clk_mmc) == true)
1534 clk_disable_unprepare(slot->host->clk_mmc);
1535 if (__clk_is_enabled(host->hclk_mmc) == true)
1536 clk_disable_unprepare(slot->host->hclk_mmc);
1537 if (!IS_ERR(host->hpclk_mmc) &&
1538 __clk_is_enabled(host->hpclk_mmc) == true)
1539 clk_disable_unprepare(slot->host->hpclk_mmc);
1542 mmc_detect_change(slot->mmc, 20);
1548 static int dw_mci_get_cd(struct mmc_host *mmc)
1551 struct dw_mci_slot *slot = mmc_priv(mmc);
1552 struct dw_mci_board *brd = slot->host->pdata;
1553 struct dw_mci *host = slot->host;
1554 int gpio_cd = mmc_gpio_get_cd(mmc);
1555 int force_jtag_bit, force_jtag_reg;
1559 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
1560 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1561 gpio_cd = slot->cd_gpio;
1562 irq = gpio_to_irq(gpio_cd);
1563 if (gpio_is_valid(gpio_cd)) {
1564 gpio_val = gpio_get_value(gpio_cd);
1565 if (soc_is_rk3036()) {
1566 force_jtag_bit = 11;
1567 force_jtag_reg = RK312X_GRF_SOC_CON0;
1568 } else if (soc_is_rk3126() || soc_is_rk3126b()) {
1569 force_jtag_reg = RK312X_GRF_SOC_CON0;
1573 if (gpio_val == gpio_get_value(gpio_cd)) {
1574 gpio_cd = gpio_get_value(gpio_cd) == 0 ? 1 : 0;
1576 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1577 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1578 grf_writel((0x1 << (force_jtag_bit + 16)) | (1 << force_jtag_bit),
1581 dw_mci_ctrl_all_reset(host);
1583 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
1584 /* Really card detected: SHOULD disable force_jtag */
1585 grf_writel((0x1 << (force_jtag_bit + 16)) | (0 << force_jtag_bit),
1590 gpio_val = gpio_get_value(gpio_cd);
1592 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
1593 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1594 return slot->last_detect_state;
1597 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1601 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1602 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1604 /* Use platform get_cd function, else try onboard card detect */
1605 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1607 else if (brd->get_cd)
1608 present = !brd->get_cd(slot->id);
1609 else if (!IS_ERR_VALUE(gpio_cd))
1612 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1615 spin_lock_bh(&host->lock);
1617 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1618 dev_dbg(&mmc->class_dev, "card is present\n");
1620 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1621 dev_dbg(&mmc->class_dev, "card is not present\n");
1623 spin_unlock_bh(&host->lock);
1630 * Dts Should caps emmc controller with poll-hw-reset
1632 static void dw_mci_hw_reset(struct mmc_host *mmc)
1634 struct dw_mci_slot *slot = mmc_priv(mmc);
1635 struct dw_mci *host = slot->host;
1640 unsigned long timeout;
1643 /* (1) CMD12 to end any transfer in process */
1644 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1645 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1647 if(host->mmc->hold_reg_flag)
1648 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1649 mci_writel(host, CMDARG, 0);
1651 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1653 timeout = jiffies + msecs_to_jiffies(500);
1655 ret = time_before(jiffies, timeout);
1656 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1661 MMC_DBG_ERR_FUNC(host->mmc,
1662 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1663 __func__, mmc_hostname(host->mmc));
1665 /* (2) wait DTO, even if no response is sent back by card */
1667 timeout = jiffies + msecs_to_jiffies(5);
1669 ret = time_before(jiffies, timeout);
1670 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1671 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1677 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1679 /* Software reset - BMOD[0] for IDMA only */
1680 regs = mci_readl(host, BMOD);
1681 regs |= SDMMC_IDMAC_SWRESET;
1682 mci_writel(host, BMOD, regs);
1683 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1684 regs = mci_readl(host, BMOD);
1685 if(regs & SDMMC_IDMAC_SWRESET)
1686 MMC_DBG_WARN_FUNC(host->mmc,
1687 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1688 __func__, mmc_hostname(host->mmc));
1690 /* DMA reset - CTRL[2] */
1691 regs = mci_readl(host, CTRL);
1692 regs |= SDMMC_CTRL_DMA_RESET;
1693 mci_writel(host, CTRL, regs);
1694 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1695 regs = mci_readl(host, CTRL);
1696 if(regs & SDMMC_CTRL_DMA_RESET)
1697 MMC_DBG_WARN_FUNC(host->mmc,
1698 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1699 __func__, mmc_hostname(host->mmc));
1701 /* FIFO reset - CTRL[1] */
1702 regs = mci_readl(host, CTRL);
1703 regs |= SDMMC_CTRL_FIFO_RESET;
1704 mci_writel(host, CTRL, regs);
1705 mdelay(1); /* no timing limited, 1ms is random value */
1706 regs = mci_readl(host, CTRL);
1707 if(regs & SDMMC_CTRL_FIFO_RESET)
1708 MMC_DBG_WARN_FUNC(host->mmc,
1709 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1710 __func__, mmc_hostname(host->mmc));
1713 According to eMMC spec
1714 tRstW >= 1us ; RST_n pulse width
1715 tRSCA >= 200us ; RST_n to Command time
1716 tRSTH >= 1us ; RST_n high period
1718 mci_writel(slot->host, PWREN, 0x0);
1719 mci_writel(slot->host, RST_N, 0x0);
1721 udelay(10); /* 10us for bad quality eMMc. */
1723 mci_writel(slot->host, PWREN, 0x1);
1724 mci_writel(slot->host, RST_N, 0x1);
1726 usleep_range(500, 1000); /* at least 500(> 200us) */
1730 * Disable lower power mode.
1732 * Low power mode will stop the card clock when idle. According to the
1733 * description of the CLKENA register we should disable low power mode
1734 * for SDIO cards if we need SDIO interrupts to work.
1736 * This function is fast if low power mode is already disabled.
1738 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1740 struct dw_mci *host = slot->host;
1742 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1744 clk_en_a = mci_readl(host, CLKENA);
1746 if (clk_en_a & clken_low_pwr) {
1747 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1748 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1749 SDMMC_CMD_PRV_DAT_WAIT, 0);
1753 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1755 struct dw_mci_slot *slot = mmc_priv(mmc);
1756 struct dw_mci *host = slot->host;
1757 unsigned long flags;
1761 spin_lock_irqsave(&host->slock, flags);
1763 /* Enable/disable Slot Specific SDIO interrupt */
1764 int_mask = mci_readl(host, INTMASK);
1766 if (host->verid < DW_MMC_240A)
1767 sdio_int = SDMMC_INT_SDIO(slot->id);
1769 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1773 * Turn off low power mode if it was enabled. This is a bit of
1774 * a heavy operation and we disable / enable IRQs a lot, so
1775 * we'll leave low power mode disabled and it will get
1776 * re-enabled again in dw_mci_setup_bus().
1778 dw_mci_disable_low_power(slot);
1780 mci_writel(host, INTMASK,
1781 (int_mask | sdio_int));
1783 mci_writel(host, INTMASK,
1784 (int_mask & ~sdio_int));
1787 spin_unlock_irqrestore(&host->slock, flags);
1790 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1792 IO_DOMAIN_12 = 1200,
1793 IO_DOMAIN_18 = 1800,
1794 IO_DOMAIN_33 = 3300,
1796 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1806 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1807 __FUNCTION__, mmc_hostname(host->mmc));
1810 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1811 __FUNCTION__, mmc_hostname(host->mmc));
1815 if (cpu_is_rk3288()) {
1816 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1817 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1820 } else if (host->cid == DW_MCI_TYPE_RK3368) {
1821 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1822 regmap_write(host->grf, 0x900, (voltage << 6) | (1 << 22));
1826 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1827 __FUNCTION__, mmc_hostname(host->mmc));
1831 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1832 struct mmc_ios *ios)
1835 unsigned int value,uhs_reg;
1838 * Signal Voltage Switching is only applicable for Host Controllers
1841 if (host->verid < DW_MMC_240A)
1844 uhs_reg = mci_readl(host, UHS_REG);
1845 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1846 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1848 switch (ios->signal_voltage) {
1849 case MMC_SIGNAL_VOLTAGE_330:
1850 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1852 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1853 /* regulator_put(host->vmmc); //to be done in remove function. */
1855 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1856 __func__, regulator_get_voltage(host->vmmc), ret);
1858 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1859 " failed\n", mmc_hostname(host->mmc));
1862 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1864 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1865 __FUNCTION__, mmc_hostname(host->mmc));
1867 /* set High-power mode */
1868 value = mci_readl(host, CLKENA);
1869 value &= ~SDMMC_CLKEN_LOW_PWR;
1870 mci_writel(host,CLKENA , value);
1872 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1873 mci_writel(host,UHS_REG , uhs_reg);
1876 usleep_range(5000, 5500);
1878 /* 3.3V regulator output should be stable within 5 ms */
1879 uhs_reg = mci_readl(host, UHS_REG);
1880 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1883 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1884 mmc_hostname(host->mmc));
1887 case MMC_SIGNAL_VOLTAGE_180:
1889 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1890 /* regulator_put(host->vmmc);//to be done in remove function. */
1892 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1893 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1895 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1896 " failed\n", mmc_hostname(host->mmc));
1899 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1903 * Enable 1.8V Signal Enable in the Host Control2
1906 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1909 usleep_range(5000, 5500);
1910 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1911 __FUNCTION__,mmc_hostname(host->mmc));
1913 /* 1.8V regulator output should be stable within 5 ms */
1914 uhs_reg = mci_readl(host, UHS_REG);
1915 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1918 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1919 mmc_hostname(host->mmc));
1922 case MMC_SIGNAL_VOLTAGE_120:
1924 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1926 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1927 " failed\n", mmc_hostname(host->mmc));
1933 /* No signal voltage switch required */
1939 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1940 struct mmc_ios *ios)
1942 struct dw_mci_slot *slot = mmc_priv(mmc);
1943 struct dw_mci *host = slot->host;
1946 if (host->verid < DW_MMC_240A)
1949 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1955 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1957 struct dw_mci_slot *slot = mmc_priv(mmc);
1958 struct dw_mci *host = slot->host;
1959 const struct dw_mci_drv_data *drv_data = host->drv_data;
1960 struct dw_mci_tuning_data tuning_data;
1963 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1964 if(cpu_is_rk3036() || cpu_is_rk312x())
1967 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1968 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1969 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1970 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1971 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1972 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1973 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1977 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1978 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1979 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1982 "Undefined command(%d) for tuning\n", opcode);
1987 /* Recommend sample phase and delayline
1988 Fixme: Mix-use these three controllers will cause
1991 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1992 tuning_data.con_id = 3;
1993 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1994 tuning_data.con_id = 1;
1996 tuning_data.con_id = 0;
1998 /* 0: driver, from host->devices
1999 1: sample, from devices->host
2001 tuning_data.tuning_type = 1;
2003 if (drv_data && drv_data->execute_tuning)
2004 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
2009 static void dw_mci_post_tmo(struct mmc_host *mmc)
2011 struct dw_mci_slot *slot = mmc_priv(mmc);
2012 struct dw_mci *host = slot->host;
2013 host->cur_slot->mrq = NULL;
2015 host->state = STATE_IDLE;
2018 static const struct mmc_host_ops dw_mci_ops = {
2019 .request = dw_mci_request,
2020 .pre_req = dw_mci_pre_req,
2021 .post_req = dw_mci_post_req,
2022 .set_ios = dw_mci_set_ios,
2023 .get_ro = dw_mci_get_ro,
2024 .get_cd = dw_mci_get_cd,
2025 .set_sdio_status = dw_mci_set_sdio_status,
2026 .hw_reset = dw_mci_hw_reset,
2027 .enable_sdio_irq = dw_mci_enable_sdio_irq,
2028 .execute_tuning = dw_mci_execute_tuning,
2029 .post_tmo = dw_mci_post_tmo,
2030 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
2031 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
2032 .card_busy = dw_mci_card_busy,
2037 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
2039 unsigned long flags;
2044 local_irq_save(flags);
2045 if(host->irq_state != irqflag)
2047 host->irq_state = irqflag;
2050 enable_irq(host->irq);
2054 disable_irq(host->irq);
2057 local_irq_restore(flags);
2061 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2062 __releases(&host->lock)
2063 __acquires(&host->lock)
2065 if(DW_MCI_SEND_STATUS == host->dir_status){
2067 if( MMC_BUS_TEST_W != host->cmd->opcode){
2068 if(host->data_status & SDMMC_INT_DCRC)
2069 host->data->error = -EILSEQ;
2070 else if(host->data_status & SDMMC_INT_EBE)
2071 host->data->error = -ETIMEDOUT;
2073 dw_mci_wait_unbusy(host);
2076 dw_mci_wait_unbusy(host);
2081 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2082 __releases(&host->lock)
2083 __acquires(&host->lock)
2085 struct dw_mci_slot *slot;
2086 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2088 //WARN_ON(host->cmd || host->data);
2090 dw_mci_deal_data_end(host, mrq);
2093 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2094 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2096 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2097 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2099 host->cur_slot->mrq = NULL;
2101 if (!list_empty(&host->queue)) {
2102 slot = list_entry(host->queue.next,
2103 struct dw_mci_slot, queue_node);
2104 list_del(&slot->queue_node);
2105 dev_vdbg(host->dev, "list not empty: %s is next\n",
2106 mmc_hostname(slot->mmc));
2107 host->state = STATE_SENDING_CMD;
2108 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2109 dw_mci_start_request(host, slot);
2111 dev_vdbg(host->dev, "list empty\n");
2112 host->state = STATE_IDLE;
2115 spin_unlock(&host->lock);
2116 mmc_request_done(prev_mmc, mrq);
2117 spin_lock(&host->lock);
2120 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2122 u32 status = host->cmd_status;
2124 host->cmd_status = 0;
2126 /* Read the response from the card (up to 16 bytes) */
2127 if (cmd->flags & MMC_RSP_PRESENT) {
2128 if (cmd->flags & MMC_RSP_136) {
2129 cmd->resp[3] = mci_readl(host, RESP0);
2130 cmd->resp[2] = mci_readl(host, RESP1);
2131 cmd->resp[1] = mci_readl(host, RESP2);
2132 cmd->resp[0] = mci_readl(host, RESP3);
2134 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2135 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2137 cmd->resp[0] = mci_readl(host, RESP0);
2141 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2142 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2146 if (status & SDMMC_INT_RTO)
2148 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2151 cmd->error = -ETIMEDOUT;
2152 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2153 cmd->error = -EILSEQ;
2154 }else if (status & SDMMC_INT_RESP_ERR){
2159 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2160 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2163 if(MMC_SEND_STATUS != cmd->opcode)
2164 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2165 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2166 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2170 /* newer ip versions need a delay between retries */
2171 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2177 static void dw_mci_tasklet_func(unsigned long priv)
2179 struct dw_mci *host = (struct dw_mci *)priv;
2180 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2181 struct mmc_data *data;
2182 struct mmc_command *cmd;
2183 enum dw_mci_state state;
2184 enum dw_mci_state prev_state;
2185 u32 status, cmd_flags;
2186 unsigned long timeout = 0;
2189 spin_lock(&host->lock);
2191 state = host->state;
2201 case STATE_SENDING_CMD:
2202 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2203 &host->pending_events))
2208 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2209 dw_mci_command_complete(host, cmd);
2210 if (cmd == host->mrq->sbc && !cmd->error) {
2211 prev_state = state = STATE_SENDING_CMD;
2212 __dw_mci_start_request(host, host->cur_slot,
2217 if (cmd->data && cmd->error) {
2218 dw_mci_stop_dma(host);
2221 send_stop_cmd(host, data);
2222 state = STATE_SENDING_STOP;
2225 /* host->data = NULL; */
2228 send_stop_abort(host, data);
2229 state = STATE_SENDING_STOP;
2232 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2235 if (!host->mrq->data || cmd->error) {
2236 dw_mci_request_end(host, host->mrq);
2240 prev_state = state = STATE_SENDING_DATA;
2243 case STATE_SENDING_DATA:
2244 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2245 dw_mci_stop_dma(host);
2248 send_stop_cmd(host, data);
2250 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2251 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2252 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2254 mci_writel(host, CMDARG, 0);
2256 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2257 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2259 if(host->mmc->hold_reg_flag)
2260 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2262 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2264 timeout = jiffies + msecs_to_jiffies(500);
2267 ret = time_before(jiffies, timeout);
2268 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2272 MMC_DBG_ERR_FUNC(host->mmc,
2273 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2274 __func__, mmc_hostname(host->mmc));
2277 send_stop_abort(host, data);
2279 state = STATE_DATA_ERROR;
2283 MMC_DBG_CMD_FUNC(host->mmc,
2284 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2285 prev_state,state, mmc_hostname(host->mmc));
2287 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2288 &host->pending_events))
2290 MMC_DBG_INFO_FUNC(host->mmc,
2291 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2292 prev_state,state,mmc_hostname(host->mmc));
2294 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2295 prev_state = state = STATE_DATA_BUSY;
2298 case STATE_DATA_BUSY:
2299 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2300 &host->pending_events))
2303 dw_mci_deal_data_end(host, host->mrq);
2304 MMC_DBG_INFO_FUNC(host->mmc,
2305 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2306 prev_state,state,mmc_hostname(host->mmc));
2308 /* host->data = NULL; */
2309 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2310 status = host->data_status;
2312 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2313 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2314 MMC_DBG_ERR_FUNC(host->mmc,
2315 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2316 prev_state,state, status, mmc_hostname(host->mmc));
2318 if (status & SDMMC_INT_DRTO) {
2319 data->error = -ETIMEDOUT;
2320 } else if (status & SDMMC_INT_DCRC) {
2321 data->error = -EILSEQ;
2322 } else if (status & SDMMC_INT_EBE &&
2323 host->dir_status == DW_MCI_SEND_STATUS){
2325 * No data CRC status was returned.
2326 * The number of bytes transferred will
2327 * be exaggerated in PIO mode.
2329 data->bytes_xfered = 0;
2330 data->error = -ETIMEDOUT;
2339 * After an error, there may be data lingering
2340 * in the FIFO, so reset it - doing so
2341 * generates a block interrupt, hence setting
2342 * the scatter-gather pointer to NULL.
2344 dw_mci_fifo_reset(host);
2346 data->bytes_xfered = data->blocks * data->blksz;
2351 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2352 prev_state,state,mmc_hostname(host->mmc));
2353 dw_mci_request_end(host, host->mrq);
2356 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2357 prev_state,state,mmc_hostname(host->mmc));
2359 if (host->mrq->sbc && !data->error) {
2360 data->stop->error = 0;
2362 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2363 prev_state,state,mmc_hostname(host->mmc));
2365 dw_mci_request_end(host, host->mrq);
2369 prev_state = state = STATE_SENDING_STOP;
2371 send_stop_cmd(host, data);
2373 if (data->stop && !data->error) {
2374 /* stop command for open-ended transfer*/
2376 send_stop_abort(host, data);
2380 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2381 prev_state,state,mmc_hostname(host->mmc));
2383 case STATE_SENDING_STOP:
2384 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2387 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2388 prev_state, state, mmc_hostname(host->mmc));
2390 /* CMD error in data command */
2391 if (host->mrq->cmd->error && host->mrq->data) {
2392 dw_mci_fifo_reset(host);
2396 host->data = NULL; */
2398 dw_mci_command_complete(host, host->mrq->stop);
2400 if (host->mrq->stop)
2401 dw_mci_command_complete(host, host->mrq->stop);
2403 host->cmd_status = 0;
2406 dw_mci_request_end(host, host->mrq);
2409 case STATE_DATA_ERROR:
2410 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2411 &host->pending_events))
2414 state = STATE_DATA_BUSY;
2417 } while (state != prev_state);
2419 host->state = state;
2421 spin_unlock(&host->lock);
2425 /* push final bytes to part_buf, only use during push */
2426 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2428 memcpy((void *)&host->part_buf, buf, cnt);
2429 host->part_buf_count = cnt;
2432 /* append bytes to part_buf, only use during push */
2433 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2435 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2436 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2437 host->part_buf_count += cnt;
2441 /* pull first bytes from part_buf, only use during pull */
2442 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2444 cnt = min(cnt, (int)host->part_buf_count);
2446 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2448 host->part_buf_count -= cnt;
2449 host->part_buf_start += cnt;
2454 /* pull final bytes from the part_buf, assuming it's just been filled */
2455 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2457 memcpy(buf, &host->part_buf, cnt);
2458 host->part_buf_start = cnt;
2459 host->part_buf_count = (1 << host->data_shift) - cnt;
2462 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2464 struct mmc_data *data = host->data;
2467 /* try and push anything in the part_buf */
2468 if (unlikely(host->part_buf_count)) {
2469 int len = dw_mci_push_part_bytes(host, buf, cnt);
2472 if (host->part_buf_count == 2) {
2473 mci_writew(host, DATA(host->data_offset),
2475 host->part_buf_count = 0;
2478 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2479 if (unlikely((unsigned long)buf & 0x1)) {
2481 u16 aligned_buf[64];
2482 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2483 int items = len >> 1;
2485 /* memcpy from input buffer into aligned buffer */
2486 memcpy(aligned_buf, buf, len);
2489 /* push data from aligned buffer into fifo */
2490 for (i = 0; i < items; ++i)
2491 mci_writew(host, DATA(host->data_offset),
2498 for (; cnt >= 2; cnt -= 2)
2499 mci_writew(host, DATA(host->data_offset), *pdata++);
2502 /* put anything remaining in the part_buf */
2504 dw_mci_set_part_bytes(host, buf, cnt);
2505 /* Push data if we have reached the expected data length */
2506 if ((data->bytes_xfered + init_cnt) ==
2507 (data->blksz * data->blocks))
2508 mci_writew(host, DATA(host->data_offset),
2513 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2515 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2516 if (unlikely((unsigned long)buf & 0x1)) {
2518 /* pull data from fifo into aligned buffer */
2519 u16 aligned_buf[64];
2520 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2521 int items = len >> 1;
2523 for (i = 0; i < items; ++i)
2524 aligned_buf[i] = mci_readw(host,
2525 DATA(host->data_offset));
2526 /* memcpy from aligned buffer into output buffer */
2527 memcpy(buf, aligned_buf, len);
2535 for (; cnt >= 2; cnt -= 2)
2536 *pdata++ = mci_readw(host, DATA(host->data_offset));
2540 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2541 dw_mci_pull_final_bytes(host, buf, cnt);
2545 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2547 struct mmc_data *data = host->data;
2550 /* try and push anything in the part_buf */
2551 if (unlikely(host->part_buf_count)) {
2552 int len = dw_mci_push_part_bytes(host, buf, cnt);
2555 if (host->part_buf_count == 4) {
2556 mci_writel(host, DATA(host->data_offset),
2558 host->part_buf_count = 0;
2561 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2562 if (unlikely((unsigned long)buf & 0x3)) {
2564 u32 aligned_buf[32];
2565 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2566 int items = len >> 2;
2568 /* memcpy from input buffer into aligned buffer */
2569 memcpy(aligned_buf, buf, len);
2572 /* push data from aligned buffer into fifo */
2573 for (i = 0; i < items; ++i)
2574 mci_writel(host, DATA(host->data_offset),
2581 for (; cnt >= 4; cnt -= 4)
2582 mci_writel(host, DATA(host->data_offset), *pdata++);
2585 /* put anything remaining in the part_buf */
2587 dw_mci_set_part_bytes(host, buf, cnt);
2588 /* Push data if we have reached the expected data length */
2589 if ((data->bytes_xfered + init_cnt) ==
2590 (data->blksz * data->blocks))
2591 mci_writel(host, DATA(host->data_offset),
2596 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2598 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2599 if (unlikely((unsigned long)buf & 0x3)) {
2601 /* pull data from fifo into aligned buffer */
2602 u32 aligned_buf[32];
2603 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2604 int items = len >> 2;
2606 for (i = 0; i < items; ++i)
2607 aligned_buf[i] = mci_readl(host,
2608 DATA(host->data_offset));
2609 /* memcpy from aligned buffer into output buffer */
2610 memcpy(buf, aligned_buf, len);
2618 for (; cnt >= 4; cnt -= 4)
2619 *pdata++ = mci_readl(host, DATA(host->data_offset));
2623 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2624 dw_mci_pull_final_bytes(host, buf, cnt);
2628 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2630 struct mmc_data *data = host->data;
2633 /* try and push anything in the part_buf */
2634 if (unlikely(host->part_buf_count)) {
2635 int len = dw_mci_push_part_bytes(host, buf, cnt);
2639 if (host->part_buf_count == 8) {
2640 mci_writeq(host, DATA(host->data_offset),
2642 host->part_buf_count = 0;
2645 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2646 if (unlikely((unsigned long)buf & 0x7)) {
2648 u64 aligned_buf[16];
2649 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2650 int items = len >> 3;
2652 /* memcpy from input buffer into aligned buffer */
2653 memcpy(aligned_buf, buf, len);
2656 /* push data from aligned buffer into fifo */
2657 for (i = 0; i < items; ++i)
2658 mci_writeq(host, DATA(host->data_offset),
2665 for (; cnt >= 8; cnt -= 8)
2666 mci_writeq(host, DATA(host->data_offset), *pdata++);
2669 /* put anything remaining in the part_buf */
2671 dw_mci_set_part_bytes(host, buf, cnt);
2672 /* Push data if we have reached the expected data length */
2673 if ((data->bytes_xfered + init_cnt) ==
2674 (data->blksz * data->blocks))
2675 mci_writeq(host, DATA(host->data_offset),
2680 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2682 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2683 if (unlikely((unsigned long)buf & 0x7)) {
2685 /* pull data from fifo into aligned buffer */
2686 u64 aligned_buf[16];
2687 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2688 int items = len >> 3;
2690 for (i = 0; i < items; ++i)
2691 aligned_buf[i] = mci_readq(host,
2692 DATA(host->data_offset));
2693 /* memcpy from aligned buffer into output buffer */
2694 memcpy(buf, aligned_buf, len);
2702 for (; cnt >= 8; cnt -= 8)
2703 *pdata++ = mci_readq(host, DATA(host->data_offset));
2707 host->part_buf = mci_readq(host, DATA(host->data_offset));
2708 dw_mci_pull_final_bytes(host, buf, cnt);
2712 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2716 /* get remaining partial bytes */
2717 len = dw_mci_pull_part_bytes(host, buf, cnt);
2718 if (unlikely(len == cnt))
2723 /* get the rest of the data */
2724 host->pull_data(host, buf, cnt);
2727 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2729 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2731 unsigned int offset;
2732 struct mmc_data *data = host->data;
2733 int shift = host->data_shift;
2736 unsigned int remain, fcnt;
2738 if(!host->mmc->bus_refs){
2739 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2743 if (!sg_miter_next(sg_miter))
2746 host->sg = sg_miter->piter.sg;
2747 buf = sg_miter->addr;
2748 remain = sg_miter->length;
2752 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2753 << shift) + host->part_buf_count;
2754 len = min(remain, fcnt);
2757 dw_mci_pull_data(host, (void *)(buf + offset), len);
2758 data->bytes_xfered += len;
2763 sg_miter->consumed = offset;
2764 status = mci_readl(host, MINTSTS);
2765 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2766 /* if the RXDR is ready read again */
2767 } while ((status & SDMMC_INT_RXDR) ||
2768 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2771 if (!sg_miter_next(sg_miter))
2773 sg_miter->consumed = 0;
2775 sg_miter_stop(sg_miter);
2779 sg_miter_stop(sg_miter);
2783 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2786 static void dw_mci_write_data_pio(struct dw_mci *host)
2788 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2790 unsigned int offset;
2791 struct mmc_data *data = host->data;
2792 int shift = host->data_shift;
2795 unsigned int fifo_depth = host->fifo_depth;
2796 unsigned int remain, fcnt;
2798 if(!host->mmc->bus_refs){
2799 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2804 if (!sg_miter_next(sg_miter))
2807 host->sg = sg_miter->piter.sg;
2808 buf = sg_miter->addr;
2809 remain = sg_miter->length;
2813 fcnt = ((fifo_depth -
2814 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2815 << shift) - host->part_buf_count;
2816 len = min(remain, fcnt);
2819 host->push_data(host, (void *)(buf + offset), len);
2820 data->bytes_xfered += len;
2825 sg_miter->consumed = offset;
2826 status = mci_readl(host, MINTSTS);
2827 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2828 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2831 if (!sg_miter_next(sg_miter))
2833 sg_miter->consumed = 0;
2835 sg_miter_stop(sg_miter);
2839 sg_miter_stop(sg_miter);
2843 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2846 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2848 if (!host->cmd_status)
2849 host->cmd_status = status;
2856 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2857 tasklet_schedule(&host->tasklet);
2860 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2862 struct dw_mci *host = dev_id;
2863 u32 pending, sdio_int;
2866 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2869 * DTO fix - version 2.10a and below, and only if internal DMA
2872 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2874 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2875 pending |= SDMMC_INT_DATA_OVER;
2879 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2880 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2881 host->cmd_status = pending;
2883 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2884 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2886 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2889 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2890 /* if there is an error report DATA_ERROR */
2891 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2892 host->data_status = pending;
2894 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2896 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2897 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2898 tasklet_schedule(&host->tasklet);
2901 if (pending & SDMMC_INT_DATA_OVER) {
2902 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2903 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2904 if (!host->data_status)
2905 host->data_status = pending;
2907 if (host->dir_status == DW_MCI_RECV_STATUS) {
2908 if (host->sg != NULL)
2909 dw_mci_read_data_pio(host, true);
2911 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2912 tasklet_schedule(&host->tasklet);
2915 if (pending & SDMMC_INT_RXDR) {
2916 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2917 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2918 dw_mci_read_data_pio(host, false);
2921 if (pending & SDMMC_INT_TXDR) {
2922 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2923 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2924 dw_mci_write_data_pio(host);
2927 if (pending & SDMMC_INT_VSI) {
2928 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2929 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2930 dw_mci_cmd_interrupt(host, pending);
2933 if (pending & SDMMC_INT_CMD_DONE) {
2934 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2935 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2936 dw_mci_cmd_interrupt(host, pending);
2939 if (pending & SDMMC_INT_CD) {
2940 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2941 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2942 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2943 queue_work(host->card_workqueue, &host->card_work);
2946 if (pending & SDMMC_INT_HLE) {
2947 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2948 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2952 /* Handle SDIO Interrupts */
2953 for (i = 0; i < host->num_slots; i++) {
2954 struct dw_mci_slot *slot = host->slot[i];
2956 if (host->verid < DW_MMC_240A)
2957 sdio_int = SDMMC_INT_SDIO(i);
2959 sdio_int = SDMMC_INT_SDIO(i + 8);
2961 if (pending & sdio_int) {
2962 mci_writel(host, RINTSTS, sdio_int);
2963 mmc_signal_sdio_irq(slot->mmc);
2969 #ifdef CONFIG_MMC_DW_IDMAC
2970 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2971 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2972 /* Handle DMA interrupts */
2973 pending = mci_readl(host, IDSTS);
2974 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2975 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2976 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2977 host->dma_ops->complete((void *)host);
2985 static void dw_mci_work_routine_card(struct work_struct *work)
2987 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2990 for (i = 0; i < host->num_slots; i++) {
2991 struct dw_mci_slot *slot = host->slot[i];
2992 struct mmc_host *mmc = slot->mmc;
2993 struct mmc_request *mrq;
2996 present = dw_mci_get_cd(mmc);
2998 /* Card insert, switch data line to uart function, and vice verse.
2999 eONLY audi chip need switched by software, using udbg tag in dts!
3001 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
3003 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3004 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3005 mmc_hostname(host->mmc));
3007 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3008 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3009 mmc_hostname(host->mmc));
3013 while (present != slot->last_detect_state) {
3014 dev_dbg(&slot->mmc->class_dev, "card %s\n",
3015 present ? "inserted" : "removed");
3016 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
3017 present ? "inserted" : "removed.", mmc_hostname(mmc));
3019 dw_mci_ctrl_all_reset(host);
3020 /* Stop edma when rountine card triggered */
3021 if(cpu_is_rk3036() || cpu_is_rk312x())
3022 if(host->dma_ops && host->dma_ops->stop)
3023 host->dma_ops->stop(host);
3024 rk_send_wakeup_key();//wake up system
3025 spin_lock_bh(&host->lock);
3027 /* Card change detected */
3028 slot->last_detect_state = present;
3030 /* Clean up queue if present */
3033 if (mrq == host->mrq) {
3037 switch (host->state) {
3040 case STATE_SENDING_CMD:
3041 mrq->cmd->error = -ENOMEDIUM;
3045 case STATE_SENDING_DATA:
3046 mrq->data->error = -ENOMEDIUM;
3047 dw_mci_stop_dma(host);
3049 case STATE_DATA_BUSY:
3050 case STATE_DATA_ERROR:
3051 if (mrq->data->error == -EINPROGRESS)
3052 mrq->data->error = -ENOMEDIUM;
3056 case STATE_SENDING_STOP:
3057 mrq->stop->error = -ENOMEDIUM;
3061 dw_mci_request_end(host, mrq);
3063 list_del(&slot->queue_node);
3064 mrq->cmd->error = -ENOMEDIUM;
3066 mrq->data->error = -ENOMEDIUM;
3068 mrq->stop->error = -ENOMEDIUM;
3070 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3071 mrq->cmd->opcode, mmc_hostname(mmc));
3073 spin_unlock(&host->lock);
3074 mmc_request_done(slot->mmc, mrq);
3075 spin_lock(&host->lock);
3079 /* Power down slot */
3081 /* Clear down the FIFO */
3082 dw_mci_fifo_reset(host);
3083 #ifdef CONFIG_MMC_DW_IDMAC
3084 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3085 dw_mci_idmac_reset(host);
3090 spin_unlock_bh(&host->lock);
3092 present = dw_mci_get_cd(mmc);
3095 mmc_detect_change(slot->mmc,
3096 msecs_to_jiffies(host->pdata->detect_delay_ms));
3101 /* given a slot id, find out the device node representing that slot */
3102 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3104 struct device_node *np;
3108 if (!dev || !dev->of_node)
3111 for_each_child_of_node(dev->of_node, np) {
3112 addr = of_get_property(np, "reg", &len);
3113 if (!addr || (len < sizeof(int)))
3115 if (be32_to_cpup(addr) == slot)
3121 static struct dw_mci_of_slot_quirks {
3124 } of_slot_quirks[] = {
3126 .quirk = "disable-wp",
3127 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3131 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3133 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3138 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3139 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3140 quirks |= of_slot_quirks[idx].id;
3145 /* find out bus-width for a given slot */
3146 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3148 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3154 if (of_property_read_u32(np, "bus-width", &bus_wd))
3155 dev_err(dev, "bus-width property not found, assuming width"
3161 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3162 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3164 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3170 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3172 /* Having a missing entry is valid; return silently */
3173 if (!gpio_is_valid(gpio))
3176 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3177 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3181 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3187 /* find the write protect gpio for a given slot; or -1 if none specified */
3188 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3190 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3196 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3198 /* Having a missing entry is valid; return silently */
3199 if (!gpio_is_valid(gpio))
3202 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3203 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3210 /* find the cd gpio for a given slot */
3211 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3212 struct mmc_host *mmc)
3214 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3220 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3222 /* Having a missing entry is valid; return silently */
3223 if (!gpio_is_valid(gpio))
3226 if (mmc_gpio_request_cd(mmc, gpio, 0))
3227 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3230 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3232 struct mmc_host *mmc = dev_id;
3233 struct dw_mci_slot *slot = mmc_priv(mmc);
3234 struct dw_mci *host = slot->host;
3235 int gpio_cd = slot->cd_gpio;
3237 (gpio_get_value(gpio_cd) == 0) ?
3238 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
3239 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
3241 /* wakeup system whether gpio debounce or not */
3242 rk_send_wakeup_key();
3244 /* no need to trigger detect flow when rescan is disabled.
3245 This case happended in dpm, that we just wakeup system and
3246 let suspend_post notify callback handle it.
3248 if(mmc->rescan_disable == 0)
3249 queue_work(host->card_workqueue, &host->card_work);
3251 printk("%s: rescan been disabled!\n", __FUNCTION__);
3256 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3257 struct mmc_host *mmc)
3259 struct dw_mci_slot *slot = mmc_priv(mmc);
3260 struct dw_mci *host = slot->host;
3264 /* Having a missing entry is valid; return silently */
3265 if (!gpio_is_valid(gpio))
3268 irq = gpio_to_irq(gpio);
3270 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3271 NULL, dw_mci_gpio_cd_irqt,
3272 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3276 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3278 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3279 enable_irq_wake(irq);
3282 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3286 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3287 struct mmc_host *mmc)
3289 if (!gpio_is_valid(gpio))
3292 if (gpio_to_irq(gpio) >= 0) {
3293 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3294 devm_gpio_free(&mmc->class_dev, gpio);
3297 #else /* CONFIG_OF */
3298 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3302 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3306 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3310 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3314 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3315 struct mmc_host *mmc)
3319 #endif /* CONFIG_OF */
3321 /* @host: dw_mci host prvdata
3322 * Init pinctrl for each platform. Usually we assign
3323 * "defalut" tag for functional usage, "idle" tag for gpio
3324 * state and "udbg" tag for uart_dbg if any.
3326 static void dw_mci_init_pinctrl(struct dw_mci *host)
3328 /* Fixme: DON'T TOUCH EMMC SETTING! */
3329 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3332 /* Get pinctrl for DTS */
3333 host->pinctrl = devm_pinctrl_get(host->dev);
3334 if (IS_ERR(host->pinctrl)) {
3335 dev_err(host->dev, "%s: No pinctrl used!\n",
3336 mmc_hostname(host->mmc));
3340 /* Lookup idle state */
3341 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3342 PINCTRL_STATE_IDLE);
3343 if (IS_ERR(host->pins_idle)) {
3344 dev_err(host->dev, "%s: No idle tag found!\n",
3345 mmc_hostname(host->mmc));
3347 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3348 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3349 mmc_hostname(host->mmc));
3352 /* Lookup default state */
3353 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3354 PINCTRL_STATE_DEFAULT);
3355 if (IS_ERR(host->pins_default)) {
3356 dev_err(host->dev, "%s: No default pinctrl found!\n",
3357 mmc_hostname(host->mmc));
3359 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3360 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3361 mmc_hostname(host->mmc));
3364 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3365 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3366 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3367 if (IS_ERR(host->pins_udbg)) {
3368 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3369 mmc_hostname(host->mmc));
3371 if (!dw_mci_get_cd(host->mmc))
3372 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3373 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3374 mmc_hostname(host->mmc));
3379 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3380 unsigned long mode, void *unused)
3382 struct mmc_host *host = container_of(
3383 notify_block, struct mmc_host, pm_notify);
3384 unsigned long flags;
3387 case PM_HIBERNATION_PREPARE:
3388 case PM_SUSPEND_PREPARE:
3389 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3390 spin_lock_irqsave(&host->lock, flags);
3391 host->rescan_disable = 1;
3392 spin_unlock_irqrestore(&host->lock, flags);
3393 if (cancel_delayed_work(&host->detect))
3394 wake_unlock(&host->detect_wake_lock);
3397 case PM_POST_SUSPEND:
3398 case PM_POST_HIBERNATION:
3399 case PM_POST_RESTORE:
3400 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3401 spin_lock_irqsave(&host->lock, flags);
3402 host->rescan_disable = 0;
3403 spin_unlock_irqrestore(&host->lock, flags);
3404 mmc_detect_change(host, 10);
3410 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3412 struct mmc_host *mmc;
3413 struct dw_mci_slot *slot;
3414 const struct dw_mci_drv_data *drv_data = host->drv_data;
3419 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3423 slot = mmc_priv(mmc);
3427 host->slot[id] = slot;
3430 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3432 mmc->ops = &dw_mci_ops;
3434 if (of_property_read_u32_array(host->dev->of_node,
3435 "clock-freq-min-max", freq, 2)) {
3436 mmc->f_min = DW_MCI_FREQ_MIN;
3437 mmc->f_max = DW_MCI_FREQ_MAX;
3439 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3440 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3442 mmc->f_min = freq[0];
3443 mmc->f_max = freq[1];
3445 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3446 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3449 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3451 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3452 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3453 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3454 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3455 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3456 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3458 if (of_find_property(host->dev->of_node, "supports-tSD", NULL))
3459 mmc->restrict_caps |= RESTRICT_CARD_TYPE_TSD;
3461 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3462 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3463 if (register_pm_notifier(&mmc->pm_notify)) {
3464 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3465 goto err_pm_notifier;
3469 /* We assume only low-level chip use gpio_cd */
3470 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
3471 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3472 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3473 if (gpio_is_valid(slot->cd_gpio)) {
3474 /* Request gpio int for card detection */
3475 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3477 slot->cd_gpio = -ENODEV;
3478 dev_err(host->dev, "failed to get your cd-gpios!\n");
3482 if (host->pdata->get_ocr)
3483 mmc->ocr_avail = host->pdata->get_ocr(id);
3486 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3487 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3488 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3489 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3493 * Start with slot power disabled, it will be enabled when a card
3496 if (host->pdata->setpower)
3497 host->pdata->setpower(id, 0);
3499 if (host->pdata->caps)
3500 mmc->caps = host->pdata->caps;
3502 if (host->pdata->pm_caps)
3503 mmc->pm_caps = host->pdata->pm_caps;
3505 if (host->dev->of_node) {
3506 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3510 ctrl_id = to_platform_device(host->dev)->id;
3512 if (drv_data && drv_data->caps)
3513 mmc->caps |= drv_data->caps[ctrl_id];
3514 if (drv_data && drv_data->hold_reg_flag)
3515 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3517 /* set the compatibility of driver. */
3518 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3519 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3521 if (host->pdata->caps2)
3522 mmc->caps2 = host->pdata->caps2;
3524 if (host->pdata->get_bus_wd)
3525 bus_width = host->pdata->get_bus_wd(slot->id);
3526 else if (host->dev->of_node)
3527 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3531 switch (bus_width) {
3533 mmc->caps |= MMC_CAP_8_BIT_DATA;
3535 mmc->caps |= MMC_CAP_4_BIT_DATA;
3538 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3539 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3540 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3541 mmc->caps |= MMC_CAP_SDIO_IRQ;
3542 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3543 mmc->caps |= MMC_CAP_HW_RESET;
3544 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3545 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3546 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3547 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3548 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3549 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3550 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3551 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3553 /*Assign pm_caps pass to pm_flags*/
3554 mmc->pm_flags = mmc->pm_caps;
3556 if (host->pdata->blk_settings) {
3557 mmc->max_segs = host->pdata->blk_settings->max_segs;
3558 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3559 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3560 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3561 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3563 /* Useful defaults if platform data is unset. */
3564 #ifdef CONFIG_MMC_DW_IDMAC
3565 mmc->max_segs = host->ring_size;
3566 mmc->max_blk_size = 65536;
3567 mmc->max_blk_count = host->ring_size;
3568 mmc->max_seg_size = 0x1000;
3569 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3570 if(cpu_is_rk3036() || cpu_is_rk312x()){
3571 /* fixup for external dmac setting */
3573 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3574 mmc->max_blk_count = 65535;
3575 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3576 mmc->max_seg_size = mmc->max_req_size;
3580 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3581 mmc->max_blk_count = 512;
3582 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3583 mmc->max_seg_size = mmc->max_req_size;
3584 #endif /* CONFIG_MMC_DW_IDMAC */
3588 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3590 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3595 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3596 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3600 if (IS_ERR(host->vmmc)) {
3601 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3604 ret = regulator_enable(host->vmmc);
3607 "failed to enable regulator: %d\n", ret);
3614 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3616 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3617 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3619 dw_mci_init_pinctrl(host);
3620 ret = mmc_add_host(mmc);
3624 #if defined(CONFIG_DEBUG_FS)
3625 dw_mci_init_debugfs(slot);
3628 /* Card initially undetected */
3629 slot->last_detect_state = 1;
3633 unregister_pm_notifier(&mmc->pm_notify);
3636 if (gpio_is_valid(slot->cd_gpio))
3637 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3642 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3644 /* Shutdown detect IRQ */
3645 if (slot->host->pdata->exit)
3646 slot->host->pdata->exit(id);
3648 /* Debugfs stuff is cleaned up by mmc core */
3649 mmc_remove_host(slot->mmc);
3650 slot->host->slot[id] = NULL;
3651 mmc_free_host(slot->mmc);
3654 static void dw_mci_init_dma(struct dw_mci *host)
3656 /* Alloc memory for sg translation */
3657 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3658 &host->sg_dma, GFP_KERNEL);
3659 if (!host->sg_cpu) {
3660 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3665 memset(host->sg_cpu, 0, PAGE_SIZE);
3668 /* Determine which DMA interface to use */
3669 #if defined(CONFIG_MMC_DW_IDMAC)
3670 if(cpu_is_rk3036() || cpu_is_rk312x()){
3671 host->dma_ops = &dw_mci_edmac_ops;
3672 dev_info(host->dev, "Using external DMA controller.\n");
3674 host->dma_ops = &dw_mci_idmac_ops;
3675 dev_info(host->dev, "Using internal DMA controller.\n");
3682 if (host->dma_ops->init && host->dma_ops->start &&
3683 host->dma_ops->stop && host->dma_ops->cleanup) {
3684 if (host->dma_ops->init(host)) {
3685 dev_err(host->dev, "%s: Unable to initialize "
3686 "DMA Controller.\n", __func__);
3690 dev_err(host->dev, "DMA initialization not found.\n");
3698 dev_info(host->dev, "Using PIO mode.\n");
3703 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3705 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3708 ctrl = mci_readl(host, CTRL);
3710 mci_writel(host, CTRL, ctrl);
3712 /* wait till resets clear */
3714 ctrl = mci_readl(host, CTRL);
3715 if (!(ctrl & reset))
3717 } while (time_before(jiffies, timeout));
3720 "Timeout resetting block (ctrl reset %#x)\n",
3726 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3729 * Reseting generates a block interrupt, hence setting
3730 * the scatter-gather pointer to NULL.
3733 sg_miter_stop(&host->sg_miter);
3737 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3740 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3742 return dw_mci_ctrl_reset(host,
3743 SDMMC_CTRL_FIFO_RESET |
3745 SDMMC_CTRL_DMA_RESET);
3750 static struct dw_mci_of_quirks {
3755 .quirk = "broken-cd",
3756 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3760 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3762 struct dw_mci_board *pdata;
3763 struct device *dev = host->dev;
3764 struct device_node *np = dev->of_node;
3765 const struct dw_mci_drv_data *drv_data = host->drv_data;
3767 u32 clock_frequency;
3769 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3771 dev_err(dev, "could not allocate memory for pdata\n");
3772 return ERR_PTR(-ENOMEM);
3775 /* find out number of slots supported */
3776 if (of_property_read_u32(dev->of_node, "num-slots",
3777 &pdata->num_slots)) {
3778 dev_info(dev, "num-slots property not found, "
3779 "assuming 1 slot is available\n");
3780 pdata->num_slots = 1;
3784 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3785 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3786 pdata->quirks |= of_quirks[idx].id;
3789 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3790 dev_info(dev, "fifo-depth property not found, using "
3791 "value of FIFOTH register as default\n");
3793 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3795 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3796 pdata->bus_hz = clock_frequency;
3798 if (drv_data && drv_data->parse_dt) {
3799 ret = drv_data->parse_dt(host);
3801 return ERR_PTR(ret);
3804 if (of_find_property(np, "keep-power-in-suspend", NULL))
3805 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3807 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3808 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3810 if (of_find_property(np, "supports-highspeed", NULL))
3811 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3813 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3814 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3816 if (of_find_property(np, "supports-DDR_MODE", NULL))
3817 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3819 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3820 pdata->caps2 |= MMC_CAP2_HS200;
3822 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3823 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3825 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3826 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3828 if (of_get_property(np, "cd-inverted", NULL))
3829 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3830 if (of_get_property(np, "bootpart-no-access", NULL))
3831 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3836 #else /* CONFIG_OF */
3837 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3839 return ERR_PTR(-EINVAL);
3841 #endif /* CONFIG_OF */
3843 int dw_mci_probe(struct dw_mci *host)
3845 const struct dw_mci_drv_data *drv_data = host->drv_data;
3846 int width, i, ret = 0;
3852 host->pdata = dw_mci_parse_dt(host);
3853 if (IS_ERR(host->pdata)) {
3854 dev_err(host->dev, "platform data not available\n");
3859 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3861 "Platform data must supply select_slot function\n");
3866 * In 2.40a spec, Data offset is changed.
3867 * Need to check the version-id and set data-offset for DATA register.
3869 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3870 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3872 if (host->verid < DW_MMC_240A)
3873 host->data_offset = DATA_OFFSET;
3875 host->data_offset = DATA_240A_OFFSET;
3878 host->hpclk_mmc= devm_clk_get(host->dev, "hpclk_mmc");
3879 if (IS_ERR(host->hpclk_mmc)) {
3880 dev_err(host->dev, "failed to get hpclk_mmc\n");
3882 clk_prepare_enable(host->hpclk_mmc);
3886 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3887 if (IS_ERR(host->hclk_mmc)) {
3888 dev_err(host->dev, "failed to get hclk_mmc\n");
3889 ret = PTR_ERR(host->hclk_mmc);
3893 clk_prepare_enable(host->hclk_mmc);
3896 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3897 if (IS_ERR(host->clk_mmc)) {
3898 dev_err(host->dev, "failed to get clk mmc_per\n");
3899 ret = PTR_ERR(host->clk_mmc);
3903 host->bus_hz = host->pdata->bus_hz;
3904 if (!host->bus_hz) {
3905 dev_err(host->dev,"Platform data must supply bus speed\n");
3910 if (host->verid < DW_MMC_240A)
3911 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3913 //rockchip: fix divider 2 in clksum before controlller
3914 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3917 dev_err(host->dev, "failed to set clk mmc\n");
3920 clk_prepare_enable(host->clk_mmc);
3922 if (drv_data && drv_data->setup_clock) {
3923 ret = drv_data->setup_clock(host);
3926 "implementation specific clock setup failed\n");
3931 host->quirks = host->pdata->quirks;
3932 host->irq_state = true;
3933 host->set_speed = 0;
3935 host->svi_flags = 0;
3937 spin_lock_init(&host->lock);
3938 spin_lock_init(&host->slock);
3940 INIT_LIST_HEAD(&host->queue);
3942 * Get the host data width - this assumes that HCON has been set with
3943 * the correct values.
3945 i = (mci_readl(host, HCON) >> 7) & 0x7;
3947 host->push_data = dw_mci_push_data16;
3948 host->pull_data = dw_mci_pull_data16;
3950 host->data_shift = 1;
3951 } else if (i == 2) {
3952 host->push_data = dw_mci_push_data64;
3953 host->pull_data = dw_mci_pull_data64;
3955 host->data_shift = 3;
3957 /* Check for a reserved value, and warn if it is */
3959 "HCON reports a reserved host data width!\n"
3960 "Defaulting to 32-bit access.\n");
3961 host->push_data = dw_mci_push_data32;
3962 host->pull_data = dw_mci_pull_data32;
3964 host->data_shift = 2;
3967 /* Reset all blocks */
3968 if (!dw_mci_ctrl_all_reset(host))
3971 host->dma_ops = host->pdata->dma_ops;
3972 dw_mci_init_dma(host);
3974 /* Clear the interrupts for the host controller */
3975 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3976 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3978 /* Put in max timeout */
3979 mci_writel(host, TMOUT, 0xFFFFFFFF);
3982 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3983 * Tx Mark = fifo_size / 2 DMA Size = 8
3985 if (!host->pdata->fifo_depth) {
3987 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3988 * have been overwritten by the bootloader, just like we're
3989 * about to do, so if you know the value for your hardware, you
3990 * should put it in the platform data.
3992 fifo_size = mci_readl(host, FIFOTH);
3993 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3995 fifo_size = host->pdata->fifo_depth;
3997 host->fifo_depth = fifo_size;
3999 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
4000 mci_writel(host, FIFOTH, host->fifoth_val);
4002 /* disable clock to CIU */
4003 mci_writel(host, CLKENA, 0);
4004 mci_writel(host, CLKSRC, 0);
4006 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
4007 host->card_workqueue = alloc_workqueue("dw-mci-card",
4008 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
4009 if (!host->card_workqueue) {
4013 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4014 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4015 host->irq_flags, "dw-mci", host);
4019 if (host->pdata->num_slots)
4020 host->num_slots = host->pdata->num_slots;
4022 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4024 /* We need at least one slot to succeed */
4025 for (i = 0; i < host->num_slots; i++) {
4026 ret = dw_mci_init_slot(host, i);
4028 dev_dbg(host->dev, "slot %d init failed\n", i);
4034 * Enable interrupts for command done, data over, data empty, card det,
4035 * receive ready and error such as transmit, receive timeout, crc error
4037 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4038 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4039 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4040 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4041 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4042 regs |= SDMMC_INT_CD;
4044 mci_writel(host, INTMASK, regs);
4046 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4048 dev_info(host->dev, "DW MMC controller at irq %d, "
4049 "%d bit host data width, "
4051 host->irq, width, fifo_size);
4054 dev_info(host->dev, "%d slots initialized\n", init_slots);
4056 dev_dbg(host->dev, "attempted to initialize %d slots, "
4057 "but failed on all\n", host->num_slots);
4062 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4063 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4068 destroy_workqueue(host->card_workqueue);
4071 if (host->use_dma && host->dma_ops->exit)
4072 host->dma_ops->exit(host);
4075 regulator_disable(host->vmmc);
4076 regulator_put(host->vmmc);
4080 if (!IS_ERR(host->clk_mmc))
4081 clk_disable_unprepare(host->clk_mmc);
4083 if (!IS_ERR(host->hclk_mmc))
4084 clk_disable_unprepare(host->hclk_mmc);
4087 EXPORT_SYMBOL(dw_mci_probe);
4089 void dw_mci_remove(struct dw_mci *host)
4091 struct mmc_host *mmc = host->mmc;
4092 struct dw_mci_slot *slot = mmc_priv(mmc);
4095 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4096 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4098 for(i = 0; i < host->num_slots; i++){
4099 dev_dbg(host->dev, "remove slot %d\n", i);
4101 dw_mci_cleanup_slot(host->slot[i], i);
4104 /* disable clock to CIU */
4105 mci_writel(host, CLKENA, 0);
4106 mci_writel(host, CLKSRC, 0);
4108 destroy_workqueue(host->card_workqueue);
4109 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4110 unregister_pm_notifier(&host->mmc->pm_notify);
4112 if (host->use_dma && host->dma_ops->exit)
4113 host->dma_ops->exit(host);
4115 if (gpio_is_valid(slot->cd_gpio))
4116 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4119 regulator_disable(host->vmmc);
4120 regulator_put(host->vmmc);
4122 if (!IS_ERR(host->clk_mmc))
4123 clk_disable_unprepare(host->clk_mmc);
4125 if (!IS_ERR(host->hclk_mmc))
4126 clk_disable_unprepare(host->hclk_mmc);
4127 if (!IS_ERR(host->hpclk_mmc))
4128 clk_disable_unprepare(host->hpclk_mmc);
4130 EXPORT_SYMBOL(dw_mci_remove);
4134 #ifdef CONFIG_PM_SLEEP
4136 * TODO: we should probably disable the clock to the card in the suspend path.
4138 extern int get_wifi_chip_type(void);
4139 int dw_mci_suspend(struct dw_mci *host)
4141 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4142 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() == WIFI_RTKWIFI))
4146 regulator_disable(host->vmmc);
4148 /*only for sdmmc controller*/
4149 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4150 disable_irq(host->irq);
4151 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4152 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4153 mmc_hostname(host->mmc));
4155 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4156 mci_writel(host, INTMASK, 0x00);
4157 mci_writel(host, CTRL, 0x00);
4159 /* Soc rk3126/3036 already in gpio_cd mode */
4160 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4161 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4162 enable_irq_wake(host->mmc->slot.cd_irq);
4167 EXPORT_SYMBOL(dw_mci_suspend);
4169 int dw_mci_resume(struct dw_mci *host)
4171 int i, ret, retry_cnt = 0;
4173 struct dw_mci_slot *slot;
4175 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4176 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() == WIFI_RTKWIFI))
4181 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4182 slot = mmc_priv(host->mmc);
4183 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4187 /*only for sdmmc controller*/
4188 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4189 /* Soc rk3126/3036 already in gpio_cd mode */
4190 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4191 disable_irq_wake(host->mmc->slot.cd_irq);
4192 mmc_gpio_free_cd(host->mmc);
4194 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4195 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4196 mmc_hostname(host->mmc));
4200 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4201 else if(cpu_is_rk3036())
4202 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4203 else if(cpu_is_rk312x())
4204 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4205 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4206 else if(host->cid == DW_MCI_TYPE_RK3368)
4207 regmap_write(host->grf, 0x43c, ((1 << 13) << 16) | (0 << 13));
4210 ret = regulator_enable(host->vmmc);
4213 "failed to enable regulator: %d\n", ret);
4218 if(!dw_mci_ctrl_all_reset(host)){
4223 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4224 if(host->use_dma && host->dma_ops->init)
4225 host->dma_ops->init(host);
4228 * Restore the initial value at FIFOTH register
4229 * And Invalidate the prev_blksz with zero
4231 mci_writel(host, FIFOTH, host->fifoth_val);
4232 host->prev_blksz = 0;
4233 /* Put in max timeout */
4234 mci_writel(host, TMOUT, 0xFFFFFFFF);
4236 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4237 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4239 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4240 regs |= SDMMC_INT_CD;
4241 mci_writel(host, INTMASK, regs);
4242 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4243 /*only for sdmmc controller*/
4244 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4245 enable_irq(host->irq);
4248 for(i = 0; i < host->num_slots; i++){
4249 struct dw_mci_slot *slot = host->slot[i];
4252 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4253 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4254 dw_mci_setup_bus(slot, true);
4260 EXPORT_SYMBOL(dw_mci_resume);
4261 #endif /* CONFIG_PM_SLEEP */
4263 static int __init dw_mci_init(void)
4265 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4269 static void __exit dw_mci_exit(void)
4273 module_init(dw_mci_init);
4274 module_exit(dw_mci_exit);
4276 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4277 MODULE_AUTHOR("NXP Semiconductor VietNam");
4278 MODULE_AUTHOR("Imagination Technologies Ltd");
4279 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4280 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4281 MODULE_LICENSE("GPL v2");