2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
50 #include "rk_sdmmc_dbg.h"
51 #include <linux/regulator/rockchip_io_vol_domain.h>
52 #include "../../clk/rockchip/clk-ops.h"
54 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
56 /* Common flag combinations */
57 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
58 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
60 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
62 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
63 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
64 #define DW_MCI_SEND_STATUS 1
65 #define DW_MCI_RECV_STATUS 2
66 #define DW_MCI_DMA_THRESHOLD 16
68 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
69 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
71 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
72 #define SDMMC_DATA_TIMEOUT_SD 500
73 #define SDMMC_DATA_TIMEOUT_SDIO 250
74 #define SDMMC_DATA_TIMEOUT_EMMC 2500
76 #define SDMMC_CMD_RTO_MAX_HOLD 200
77 #define SDMMC_WAIT_FOR_UNBUSY 2500
79 #ifdef CONFIG_MMC_DW_IDMAC
80 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
81 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
82 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
86 u32 des0; /* Control Descriptor */
87 #define IDMAC_DES0_DIC BIT(1)
88 #define IDMAC_DES0_LD BIT(2)
89 #define IDMAC_DES0_FD BIT(3)
90 #define IDMAC_DES0_CH BIT(4)
91 #define IDMAC_DES0_ER BIT(5)
92 #define IDMAC_DES0_CES BIT(30)
93 #define IDMAC_DES0_OWN BIT(31)
95 u32 des1; /* Buffer sizes */
96 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
97 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
99 u32 des2; /* buffer 1 physical address */
101 u32 des3; /* buffer 2 physical address */
103 #endif /* CONFIG_MMC_DW_IDMAC */
105 static const u8 tuning_blk_pattern_4bit[] = {
106 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
107 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
108 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
109 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
110 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
111 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
112 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
113 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
116 static const u8 tuning_blk_pattern_8bit[] = {
117 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
118 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
119 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
120 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
121 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
122 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
123 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
124 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
125 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
126 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
127 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
128 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
129 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
130 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
131 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
132 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
135 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
136 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
137 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
138 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
140 /*printk the all register of current host*/
142 static int dw_mci_regs_printk(struct dw_mci *host)
144 struct sdmmc_reg *regs = dw_mci_regs;
146 while( regs->name != 0 ){
147 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
150 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
155 #if defined(CONFIG_DEBUG_FS)
156 static int dw_mci_req_show(struct seq_file *s, void *v)
158 struct dw_mci_slot *slot = s->private;
159 struct mmc_request *mrq;
160 struct mmc_command *cmd;
161 struct mmc_command *stop;
162 struct mmc_data *data;
164 /* Make sure we get a consistent snapshot */
165 spin_lock_bh(&slot->host->lock);
175 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
176 cmd->opcode, cmd->arg, cmd->flags,
177 cmd->resp[0], cmd->resp[1], cmd->resp[2],
178 cmd->resp[2], cmd->error);
180 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
181 data->bytes_xfered, data->blocks,
182 data->blksz, data->flags, data->error);
185 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
186 stop->opcode, stop->arg, stop->flags,
187 stop->resp[0], stop->resp[1], stop->resp[2],
188 stop->resp[2], stop->error);
191 spin_unlock_bh(&slot->host->lock);
196 static int dw_mci_req_open(struct inode *inode, struct file *file)
198 return single_open(file, dw_mci_req_show, inode->i_private);
201 static const struct file_operations dw_mci_req_fops = {
202 .owner = THIS_MODULE,
203 .open = dw_mci_req_open,
206 .release = single_release,
209 static int dw_mci_regs_show(struct seq_file *s, void *v)
211 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
212 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
213 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
214 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
215 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
216 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
221 static int dw_mci_regs_open(struct inode *inode, struct file *file)
223 return single_open(file, dw_mci_regs_show, inode->i_private);
226 static const struct file_operations dw_mci_regs_fops = {
227 .owner = THIS_MODULE,
228 .open = dw_mci_regs_open,
231 .release = single_release,
234 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
236 struct mmc_host *mmc = slot->mmc;
237 struct dw_mci *host = slot->host;
241 root = mmc->debugfs_root;
245 node = debugfs_create_file("regs", S_IRUSR, root, host,
250 node = debugfs_create_file("req", S_IRUSR, root, slot,
255 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
259 node = debugfs_create_x32("pending_events", S_IRUSR, root,
260 (u32 *)&host->pending_events);
264 node = debugfs_create_x32("completed_events", S_IRUSR, root,
265 (u32 *)&host->completed_events);
272 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
274 #endif /* defined(CONFIG_DEBUG_FS) */
276 static void dw_mci_set_timeout(struct dw_mci *host)
278 /* timeout (maximum) */
279 mci_writel(host, TMOUT, 0xffffffff);
282 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
284 struct mmc_data *data;
285 struct dw_mci_slot *slot = mmc_priv(mmc);
286 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
288 cmd->error = -EINPROGRESS;
292 if (cmdr == MMC_STOP_TRANSMISSION)
293 cmdr |= SDMMC_CMD_STOP;
295 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
297 if (cmd->flags & MMC_RSP_PRESENT) {
298 /* We expect a response, so set this bit */
299 cmdr |= SDMMC_CMD_RESP_EXP;
300 if (cmd->flags & MMC_RSP_136)
301 cmdr |= SDMMC_CMD_RESP_LONG;
304 if (cmd->flags & MMC_RSP_CRC)
305 cmdr |= SDMMC_CMD_RESP_CRC;
309 cmdr |= SDMMC_CMD_DAT_EXP;
310 if (data->flags & MMC_DATA_STREAM)
311 cmdr |= SDMMC_CMD_STRM_MODE;
312 if (data->flags & MMC_DATA_WRITE)
313 cmdr |= SDMMC_CMD_DAT_WR;
316 if (drv_data && drv_data->prepare_command)
317 drv_data->prepare_command(slot->host, &cmdr);
323 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
325 struct mmc_command *stop;
331 stop = &host->stop_abort;
333 memset(stop, 0, sizeof(struct mmc_command));
335 if (cmdr == MMC_READ_SINGLE_BLOCK ||
336 cmdr == MMC_READ_MULTIPLE_BLOCK ||
337 cmdr == MMC_WRITE_BLOCK ||
338 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
339 stop->opcode = MMC_STOP_TRANSMISSION;
341 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
342 } else if (cmdr == SD_IO_RW_EXTENDED) {
343 stop->opcode = SD_IO_RW_DIRECT;
344 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
345 ((cmd->arg >> 28) & 0x7);
346 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
351 cmdr = stop->opcode | SDMMC_CMD_STOP |
352 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
357 static void dw_mci_start_command(struct dw_mci *host,
358 struct mmc_command *cmd, u32 cmd_flags)
360 struct dw_mci_slot *slot = host->slot[0];
361 /*temporality fix slot[0] due to host->num_slots equal to 1*/
363 host->pre_cmd = host->cmd;
366 "start command: ARGR=0x%08x CMDR=0x%08x\n",
367 cmd->arg, cmd_flags);
369 if(SD_SWITCH_VOLTAGE == cmd->opcode){
370 /*confirm non-low-power mode*/
371 mci_writel(host, CMDARG, 0);
372 dw_mci_disable_low_power(slot);
374 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
375 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
377 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
380 mci_writel(host, CMDARG, cmd->arg);
383 /* fix the value to 1 in some Soc,for example RK3188. */
384 if(host->mmc->hold_reg_flag)
385 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
387 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
391 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
393 dw_mci_start_command(host, data->stop, host->stop_cmdr);
396 /* DMA interface functions */
397 static void dw_mci_stop_dma(struct dw_mci *host)
399 if (host->using_dma) {
400 /* Fixme: No need to terminate edma, may cause flush op */
401 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
402 host->dma_ops->stop(host);
403 host->dma_ops->cleanup(host);
406 /* Data transfer was stopped by the interrupt handler */
407 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
410 static int dw_mci_get_dma_dir(struct mmc_data *data)
412 if (data->flags & MMC_DATA_WRITE)
413 return DMA_TO_DEVICE;
415 return DMA_FROM_DEVICE;
418 #ifdef CONFIG_MMC_DW_IDMAC
419 static void dw_mci_dma_cleanup(struct dw_mci *host)
421 struct mmc_data *data = host->data;
424 if (!data->host_cookie)
425 dma_unmap_sg(host->dev,
428 dw_mci_get_dma_dir(data));
431 static void dw_mci_idmac_reset(struct dw_mci *host)
433 u32 bmod = mci_readl(host, BMOD);
434 /* Software reset of DMA */
435 bmod |= SDMMC_IDMAC_SWRESET;
436 mci_writel(host, BMOD, bmod);
439 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
443 /* Disable and reset the IDMAC interface */
444 temp = mci_readl(host, CTRL);
445 temp &= ~SDMMC_CTRL_USE_IDMAC;
446 temp |= SDMMC_CTRL_DMA_RESET;
447 mci_writel(host, CTRL, temp);
449 /* Stop the IDMAC running */
450 temp = mci_readl(host, BMOD);
451 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
452 temp |= SDMMC_IDMAC_SWRESET;
453 mci_writel(host, BMOD, temp);
456 static void dw_mci_idmac_complete_dma(void *arg)
458 struct dw_mci *host = arg;
459 struct mmc_data *data = host->data;
461 dev_vdbg(host->dev, "DMA complete\n");
464 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
465 host->mrq->cmd->opcode,host->mrq->cmd->arg,
466 data->blocks,data->blksz,mmc_hostname(host->mmc));
469 host->dma_ops->cleanup(host);
472 * If the card was removed, data will be NULL. No point in trying to
473 * send the stop command or waiting for NBUSY in this case.
476 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
477 tasklet_schedule(&host->tasklet);
481 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
485 struct idmac_desc *desc = host->sg_cpu;
487 for (i = 0; i < sg_len; i++, desc++) {
488 unsigned int length = sg_dma_len(&data->sg[i]);
489 u32 mem_addr = sg_dma_address(&data->sg[i]);
491 /* Set the OWN bit and disable interrupts for this descriptor */
492 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
495 IDMAC_SET_BUFFER1_SIZE(desc, length);
497 /* Physical address to DMA to/from */
498 desc->des2 = mem_addr;
501 /* Set first descriptor */
503 desc->des0 |= IDMAC_DES0_FD;
505 /* Set last descriptor */
506 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
507 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
508 desc->des0 |= IDMAC_DES0_LD;
513 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
517 dw_mci_translate_sglist(host, host->data, sg_len);
519 /* Select IDMAC interface */
520 temp = mci_readl(host, CTRL);
521 temp |= SDMMC_CTRL_USE_IDMAC;
522 mci_writel(host, CTRL, temp);
526 /* Enable the IDMAC */
527 temp = mci_readl(host, BMOD);
528 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
529 mci_writel(host, BMOD, temp);
531 /* Start it running */
532 mci_writel(host, PLDMND, 1);
535 static int dw_mci_idmac_init(struct dw_mci *host)
537 struct idmac_desc *p;
540 /* Number of descriptors in the ring buffer */
541 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
543 /* Forward link the descriptor list */
544 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
545 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
547 /* Set the last descriptor as the end-of-ring descriptor */
548 p->des3 = host->sg_dma;
549 p->des0 = IDMAC_DES0_ER;
551 dw_mci_idmac_reset(host);
553 /* Mask out interrupts - get Tx & Rx complete only */
554 mci_writel(host, IDSTS, IDMAC_INT_CLR);
555 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
558 /* Set the descriptor base address */
559 mci_writel(host, DBADDR, host->sg_dma);
563 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
564 .init = dw_mci_idmac_init,
565 .start = dw_mci_idmac_start_dma,
566 .stop = dw_mci_idmac_stop_dma,
567 .complete = dw_mci_idmac_complete_dma,
568 .cleanup = dw_mci_dma_cleanup,
572 static void dw_mci_edma_cleanup(struct dw_mci *host)
574 struct mmc_data *data = host->data;
577 if (!data->host_cookie)
578 dma_unmap_sg(host->dev,
579 data->sg, data->sg_len,
580 dw_mci_get_dma_dir(data));
583 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
585 dmaengine_terminate_all(host->dms->ch);
588 static void dw_mci_edmac_complete_dma(void *arg)
590 struct dw_mci *host = arg;
591 struct mmc_data *data = host->data;
593 dev_vdbg(host->dev, "DMA complete\n");
596 if(data->flags & MMC_DATA_READ)
597 /* Invalidate cache after read */
598 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
599 data->sg_len, DMA_FROM_DEVICE);
601 host->dma_ops->cleanup(host);
604 * If the card was removed, data will be NULL. No point in trying to
605 * send the stop command or waiting for NBUSY in this case.
608 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
609 tasklet_schedule(&host->tasklet);
613 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
615 struct dma_slave_config slave_config;
616 struct dma_async_tx_descriptor *desc = NULL;
617 struct scatterlist *sgl = host->data->sg;
618 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
619 u32 sg_elems = host->data->sg_len;
620 u32 fifoth_val, mburst;
623 /* Set external dma config: burst size, burst width*/
624 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
625 slave_config.src_addr = slave_config.dst_addr;
626 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
627 slave_config.src_addr_width = slave_config.dst_addr_width;
629 /* Match FIFO dma burst MSIZE with external dma config*/
630 fifoth_val = mci_readl(host, FIFOTH);
631 mburst = mszs[(fifoth_val >> 28) & 0x7];
633 /* edmac limit burst to 16 */
634 slave_config.dst_maxburst = (mburst > 16) ? 16 : mburst;
635 slave_config.src_maxburst = slave_config.dst_maxburst;
637 if(host->data->flags & MMC_DATA_WRITE){
638 slave_config.direction = DMA_MEM_TO_DEV;
639 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
641 dev_err(host->dev, "error in dw_mci edma configuration.\n");
645 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
646 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
648 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
651 /* Set dw_mci_edmac_complete_dma as callback */
652 desc->callback = dw_mci_edmac_complete_dma;
653 desc->callback_param = (void *)host;
654 dmaengine_submit(desc);
656 /* Flush cache before write */
657 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
658 sg_elems, DMA_TO_DEVICE);
659 dma_async_issue_pending(host->dms->ch);
662 slave_config.direction = DMA_DEV_TO_MEM;
663 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
665 dev_err(host->dev, "error in dw_mci edma configuration.\n");
668 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
669 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
671 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
674 /* set dw_mci_edmac_complete_dma as callback */
675 desc->callback = dw_mci_edmac_complete_dma;
676 desc->callback_param = (void *)host;
677 dmaengine_submit(desc);
678 dma_async_issue_pending(host->dms->ch);
682 static int dw_mci_edmac_init(struct dw_mci *host)
684 /* Request external dma channel, SHOULD decide chn in dts */
686 host->dms = (struct dw_mci_dma_slave *)kmalloc
687 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
688 if (NULL == host->dms) {
689 dev_err(host->dev, "No enough memory to alloc dms.\n");
693 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
694 if (!host->dms->ch) {
695 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
696 host->dms->ch->chan_id);
703 if (NULL != host->dms) {
711 static void dw_mci_edmac_exit(struct dw_mci *host)
713 if (NULL != host->dms) {
714 if (NULL != host->dms->ch) {
715 dma_release_channel(host->dms->ch);
716 host->dms->ch = NULL;
723 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
724 .init = dw_mci_edmac_init,
725 .exit = dw_mci_edmac_exit,
726 .start = dw_mci_edmac_start_dma,
727 .stop = dw_mci_edmac_stop_dma,
728 .complete = dw_mci_edmac_complete_dma,
729 .cleanup = dw_mci_edma_cleanup,
731 #endif /* CONFIG_MMC_DW_IDMAC */
733 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
734 struct mmc_data *data,
737 struct scatterlist *sg;
738 unsigned int i, sg_len;
740 if (!next && data->host_cookie)
741 return data->host_cookie;
744 * We don't do DMA on "complex" transfers, i.e. with
745 * non-word-aligned buffers or lengths. Also, we don't bother
746 * with all the DMA setup overhead for short transfers.
748 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
754 for_each_sg(data->sg, sg, data->sg_len, i) {
755 if (sg->offset & 3 || sg->length & 3)
759 sg_len = dma_map_sg(host->dev,
762 dw_mci_get_dma_dir(data));
767 data->host_cookie = sg_len;
772 static void dw_mci_pre_req(struct mmc_host *mmc,
773 struct mmc_request *mrq,
776 struct dw_mci_slot *slot = mmc_priv(mmc);
777 struct mmc_data *data = mrq->data;
779 if (!slot->host->use_dma || !data)
782 if (data->host_cookie) {
783 data->host_cookie = 0;
787 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
788 data->host_cookie = 0;
791 static void dw_mci_post_req(struct mmc_host *mmc,
792 struct mmc_request *mrq,
795 struct dw_mci_slot *slot = mmc_priv(mmc);
796 struct mmc_data *data = mrq->data;
798 if (!slot->host->use_dma || !data)
801 if (data->host_cookie)
802 dma_unmap_sg(slot->host->dev,
805 dw_mci_get_dma_dir(data));
806 data->host_cookie = 0;
809 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
811 #ifdef CONFIG_MMC_DW_IDMAC
812 unsigned int blksz = data->blksz;
813 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
814 u32 fifo_width = 1 << host->data_shift;
815 u32 blksz_depth = blksz / fifo_width, fifoth_val;
816 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
817 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
819 tx_wmark = (host->fifo_depth) / 2;
820 tx_wmark_invers = host->fifo_depth - tx_wmark;
824 * if blksz is not a multiple of the FIFO width
826 if (blksz % fifo_width) {
833 if (!((blksz_depth % mszs[idx]) ||
834 (tx_wmark_invers % mszs[idx]))) {
836 rx_wmark = mszs[idx] - 1;
841 * If idx is '0', it won't be tried
842 * Thus, initial values are uesed
845 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
846 mci_writel(host, FIFOTH, fifoth_val);
851 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
853 unsigned int blksz = data->blksz;
854 u32 blksz_depth, fifo_depth;
857 WARN_ON(!(data->flags & MMC_DATA_READ));
859 if (host->timing != MMC_TIMING_MMC_HS200 &&
860 host->timing != MMC_TIMING_UHS_SDR104)
863 blksz_depth = blksz / (1 << host->data_shift);
864 fifo_depth = host->fifo_depth;
866 if (blksz_depth > fifo_depth)
870 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
871 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
872 * Currently just choose blksz.
875 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
879 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
882 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
890 /* If we don't have a channel, we can't do DMA */
894 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
896 /* Fixme: No need terminate edma, may cause flush op */
897 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
898 host->dma_ops->stop(host);
905 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
906 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
910 * Decide the MSIZE and RX/TX Watermark.
911 * If current block size is same with previous size,
912 * no need to update fifoth.
914 if (host->prev_blksz != data->blksz)
915 dw_mci_adjust_fifoth(host, data);
918 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
920 /* Enable the DMA interface */
921 temp = mci_readl(host, CTRL);
922 temp |= SDMMC_CTRL_DMA_ENABLE;
923 mci_writel(host, CTRL, temp);
925 /* Disable RX/TX IRQs, let DMA handle it */
926 spin_lock_irqsave(&host->slock, flags);
927 temp = mci_readl(host, INTMASK);
928 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
929 mci_writel(host, INTMASK, temp);
930 spin_unlock_irqrestore(&host->slock, flags);
932 host->dma_ops->start(host, sg_len);
937 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
942 data->error = -EINPROGRESS;
949 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
951 if (data->flags & MMC_DATA_READ) {
952 host->dir_status = DW_MCI_RECV_STATUS;
953 dw_mci_ctrl_rd_thld(host, data);
955 host->dir_status = DW_MCI_SEND_STATUS;
958 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
959 data->blocks, data->blksz, mmc_hostname(host->mmc));
961 if (dw_mci_submit_data_dma(host, data)) {
962 int flags = SG_MITER_ATOMIC;
963 if (host->data->flags & MMC_DATA_READ)
964 flags |= SG_MITER_TO_SG;
966 flags |= SG_MITER_FROM_SG;
968 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
970 host->part_buf_start = 0;
971 host->part_buf_count = 0;
973 spin_lock_irqsave(&host->slock, flag);
974 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
975 temp = mci_readl(host, INTMASK);
976 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
977 mci_writel(host, INTMASK, temp);
978 spin_unlock_irqrestore(&host->slock, flag);
980 temp = mci_readl(host, CTRL);
981 temp &= ~SDMMC_CTRL_DMA_ENABLE;
982 mci_writel(host, CTRL, temp);
985 * Use the initial fifoth_val for PIO mode.
986 * If next issued data may be transfered by DMA mode,
987 * prev_blksz should be invalidated.
989 mci_writel(host, FIFOTH, host->fifoth_val);
990 host->prev_blksz = 0;
993 * Keep the current block size.
994 * It will be used to decide whether to update
995 * fifoth register next time.
997 host->prev_blksz = data->blksz;
1001 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1003 struct dw_mci *host = slot->host;
1004 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1005 unsigned int cmd_status = 0;
1006 #ifdef SDMMC_WAIT_FOR_UNBUSY
1008 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1010 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1012 ret = time_before(jiffies, timeout);
1013 cmd_status = mci_readl(host, STATUS);
1014 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1018 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1019 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1022 mci_writel(host, CMDARG, arg);
1024 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1025 if(cmd & SDMMC_CMD_UPD_CLK)
1026 timeout = jiffies + msecs_to_jiffies(50);
1028 timeout = jiffies + msecs_to_jiffies(500);
1029 while (time_before(jiffies, timeout)) {
1030 cmd_status = mci_readl(host, CMD);
1031 if (!(cmd_status & SDMMC_CMD_START))
1034 dev_err(&slot->mmc->class_dev,
1035 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1036 cmd, arg, cmd_status);
1039 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1041 struct dw_mci *host = slot->host;
1042 unsigned int tempck,clock = slot->clock;
1047 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1048 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1051 mci_writel(host, CLKENA, 0);
1052 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1053 if(host->svi_flags == 0)
1054 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1056 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1058 } else if (clock != host->current_speed || force_clkinit) {
1059 div = host->bus_hz / clock;
1060 if (host->bus_hz % clock && host->bus_hz > clock)
1062 * move the + 1 after the divide to prevent
1063 * over-clocking the card.
1067 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1069 if ((clock << div) != slot->__clk_old || force_clkinit) {
1070 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1071 dev_info(&slot->mmc->class_dev,
1072 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1073 slot->id, host->bus_hz, clock,
1076 host->set_speed = tempck;
1077 host->set_div = div;
1081 mci_writel(host, CLKENA, 0);
1082 mci_writel(host, CLKSRC, 0);
1086 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1088 if(clock <= 400*1000){
1089 MMC_DBG_BOOT_FUNC(host->mmc,
1090 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1091 clock * 2, mmc_hostname(host->mmc));
1092 /* clk_mmc will change parents to 24MHz xtal*/
1093 clk_set_rate(host->clk_mmc, clock * 2);
1096 host->set_div = div;
1100 MMC_DBG_BOOT_FUNC(host->mmc,
1101 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1102 mmc_hostname(host->mmc));
1105 MMC_DBG_ERR_FUNC(host->mmc,
1106 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1107 mmc_hostname(host->mmc));
1109 host->set_div = div;
1110 host->bus_hz = host->set_speed * 2;
1111 MMC_DBG_BOOT_FUNC(host->mmc,
1112 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1113 div, host->bus_hz, mmc_hostname(host->mmc));
1115 /* BUG may be here, come on, Linux BSP engineer looks!
1116 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1117 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1118 some oops happened like that:
1119 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1120 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1121 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1122 mmc0: new high speed DDR MMC card at address 0001
1123 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1125 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1126 mmcblk0: retrying using single block read
1127 mmcblk0: error -110 sending status command, retrying
1129 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1132 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1133 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1136 host->set_div = div;
1137 host->bus_hz = host->set_speed * 2;
1138 MMC_DBG_BOOT_FUNC(host->mmc,
1139 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1140 div, host->bus_hz, mmc_hostname(host->mmc));
1143 if (host->verid < DW_MMC_240A)
1144 clk_set_rate(host->clk_mmc,(host->bus_hz));
1146 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1152 /* set clock to desired speed */
1153 mci_writel(host, CLKDIV, div);
1157 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1159 /* enable clock; only low power if no SDIO */
1160 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1162 if (host->verid < DW_MMC_240A)
1163 sdio_int = SDMMC_INT_SDIO(slot->id);
1165 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1167 if (!(mci_readl(host, INTMASK) & sdio_int))
1168 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1169 mci_writel(host, CLKENA, clk_en_a);
1173 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1174 /* keep the clock with reflecting clock dividor */
1175 slot->__clk_old = clock << div;
1178 host->current_speed = clock;
1180 if(slot->ctype != slot->pre_ctype)
1181 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1183 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1184 mmc_hostname(host->mmc));
1185 slot->pre_ctype = slot->ctype;
1187 /* Set the current slot bus width */
1188 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1191 extern struct mmc_card *this_card;
1192 static void dw_mci_wait_unbusy(struct dw_mci *host)
1195 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1196 unsigned long time_loop;
1197 unsigned int status;
1200 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1202 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1203 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1204 /* Special care for (secure)erase timeout calculation */
1206 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1209 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1210 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1211 300000 * (this_card->ext_csd.sec_erase_mult)) :
1212 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1216 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1217 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1218 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1219 timeout = SDMMC_DATA_TIMEOUT_SD;
1222 time_loop = jiffies + msecs_to_jiffies(timeout);
1224 status = mci_readl(host, STATUS);
1225 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1227 } while (time_before(jiffies, time_loop));
1232 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1235 * 0--status is busy.
1236 * 1--status is unbusy.
1238 int dw_mci_card_busy(struct mmc_host *mmc)
1240 struct dw_mci_slot *slot = mmc_priv(mmc);
1241 struct dw_mci *host = slot->host;
1243 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1244 host->svi_flags, mmc_hostname(host->mmc));
1247 if(host->svi_flags == 0){
1249 host->svi_flags = 1;
1250 return host->svi_flags;
1253 host->svi_flags = 0;
1254 return host->svi_flags;
1260 static void __dw_mci_start_request(struct dw_mci *host,
1261 struct dw_mci_slot *slot,
1262 struct mmc_command *cmd)
1264 struct mmc_request *mrq;
1265 struct mmc_data *data;
1269 if (host->pdata->select_slot)
1270 host->pdata->select_slot(slot->id);
1272 host->cur_slot = slot;
1275 dw_mci_wait_unbusy(host);
1277 host->pending_events = 0;
1278 host->completed_events = 0;
1279 host->data_status = 0;
1283 dw_mci_set_timeout(host);
1284 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1285 mci_writel(host, BLKSIZ, data->blksz);
1288 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1290 /* this is the first command, send the initialization clock */
1291 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1292 cmdflags |= SDMMC_CMD_INIT;
1295 dw_mci_submit_data(host, data);
1299 dw_mci_start_command(host, cmd, cmdflags);
1302 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1305 static void dw_mci_start_request(struct dw_mci *host,
1306 struct dw_mci_slot *slot)
1308 struct mmc_request *mrq = slot->mrq;
1309 struct mmc_command *cmd;
1311 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1312 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1314 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1315 __dw_mci_start_request(host, slot, cmd);
1318 /* must be called with host->lock held */
1319 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1320 struct mmc_request *mrq)
1322 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1327 if (host->state == STATE_IDLE) {
1328 host->state = STATE_SENDING_CMD;
1329 dw_mci_start_request(host, slot);
1331 list_add_tail(&slot->queue_node, &host->queue);
1335 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1337 struct dw_mci_slot *slot = mmc_priv(mmc);
1338 struct dw_mci *host = slot->host;
1343 * The check for card presence and queueing of the request must be
1344 * atomic, otherwise the card could be removed in between and the
1345 * request wouldn't fail until another card was inserted.
1347 spin_lock_bh(&host->lock);
1349 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1350 spin_unlock_bh(&host->lock);
1351 mrq->cmd->error = -ENOMEDIUM;
1352 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1353 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1355 mmc_request_done(mmc, mrq);
1359 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1360 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1362 dw_mci_queue_request(host, slot, mrq);
1364 spin_unlock_bh(&host->lock);
1367 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1369 struct dw_mci_slot *slot = mmc_priv(mmc);
1370 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1371 struct dw_mci *host = slot->host;
1373 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1376 #ifdef SDMMC_WAIT_FOR_UNBUSY
1377 unsigned long time_loop;
1380 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1381 if(host->svi_flags == 1)
1382 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1384 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1386 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1389 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1390 printk("%d..%s: no card. [%s]\n", \
1391 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1396 ret = time_before(jiffies, time_loop);
1397 regs = mci_readl(slot->host, STATUS);
1398 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1404 printk("slot->flags = %lu ", slot->flags);
1405 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1406 if(host->svi_flags != 1)
1409 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1410 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1414 switch (ios->bus_width) {
1415 case MMC_BUS_WIDTH_4:
1416 slot->ctype = SDMMC_CTYPE_4BIT;
1418 case MMC_BUS_WIDTH_8:
1419 slot->ctype = SDMMC_CTYPE_8BIT;
1422 /* set default 1 bit mode */
1423 slot->ctype = SDMMC_CTYPE_1BIT;
1424 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1427 regs = mci_readl(slot->host, UHS_REG);
1430 if (ios->timing == MMC_TIMING_UHS_DDR50)
1431 regs |= ((0x1 << slot->id) << 16);
1433 regs &= ~((0x1 << slot->id) << 16);
1435 mci_writel(slot->host, UHS_REG, regs);
1436 slot->host->timing = ios->timing;
1439 * Use mirror of ios->clock to prevent race with mmc
1440 * core ios update when finding the minimum.
1442 slot->clock = ios->clock;
1444 if (drv_data && drv_data->set_ios)
1445 drv_data->set_ios(slot->host, ios);
1447 /* Slot specific timing and width adjustment */
1448 dw_mci_setup_bus(slot, false);
1452 switch (ios->power_mode) {
1454 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1456 if (slot->host->pdata->setpower)
1457 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1458 regs = mci_readl(slot->host, PWREN);
1459 regs |= (1 << slot->id);
1460 mci_writel(slot->host, PWREN, regs);
1463 /* Power down slot */
1464 if(slot->host->pdata->setpower)
1465 slot->host->pdata->setpower(slot->id, 0);
1466 regs = mci_readl(slot->host, PWREN);
1467 regs &= ~(1 << slot->id);
1468 mci_writel(slot->host, PWREN, regs);
1475 static int dw_mci_get_ro(struct mmc_host *mmc)
1478 struct dw_mci_slot *slot = mmc_priv(mmc);
1479 struct dw_mci_board *brd = slot->host->pdata;
1481 /* Use platform get_ro function, else try on board write protect */
1482 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1484 else if(brd->get_ro)
1485 read_only = brd->get_ro(slot->id);
1486 else if(gpio_is_valid(slot->wp_gpio))
1487 read_only = gpio_get_value(slot->wp_gpio);
1490 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1492 dev_dbg(&mmc->class_dev, "card is %s\n",
1493 read_only ? "read-only" : "read-write");
1498 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1500 struct dw_mci_slot *slot = mmc_priv(mmc);
1501 struct dw_mci *host = slot->host;
1502 /*struct dw_mci_board *brd = slot->host->pdata;*/
1504 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1507 spin_lock_bh(&host->lock);
1510 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1512 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1514 spin_unlock_bh(&host->lock);
1516 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1517 if(__clk_is_enabled(host->hclk_mmc) == false)
1518 clk_prepare_enable(host->hclk_mmc);
1519 if(__clk_is_enabled(host->clk_mmc) == false)
1520 clk_prepare_enable(host->clk_mmc);
1522 if(__clk_is_enabled(host->clk_mmc) == true)
1523 clk_disable_unprepare(slot->host->clk_mmc);
1524 if(__clk_is_enabled(host->hclk_mmc) == true)
1525 clk_disable_unprepare(slot->host->hclk_mmc);
1528 mmc_detect_change(slot->mmc, 20);
1534 static int dw_mci_get_cd(struct mmc_host *mmc)
1537 struct dw_mci_slot *slot = mmc_priv(mmc);
1538 struct dw_mci_board *brd = slot->host->pdata;
1539 struct dw_mci *host = slot->host;
1540 int gpio_cd = mmc_gpio_get_cd(mmc);
1543 if (cpu_is_rk312x() &&
1545 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1546 gpio_cd = slot->cd_gpio;
1547 if (gpio_is_valid(gpio_cd)) {
1548 gpio_val = gpio_get_value(gpio_cd);
1550 if (gpio_val == gpio_get_value(gpio_cd)) {
1551 gpio_cd = gpio_get_value(gpio_cd) == 0 ? 1 : 0;
1553 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1554 grf_writel((0x1 << 24) | (1 << 8), RK312X_GRF_SOC_CON0);
1555 dw_mci_ctrl_all_reset(host);
1557 /* Really card detected: SHOULD disable force_jtag */
1558 grf_writel((0x1 << 24) | (0 << 8), RK312X_GRF_SOC_CON0);
1562 return slot->last_detect_state;
1565 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1569 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1570 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1572 /* Use platform get_cd function, else try onboard card detect */
1573 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1575 else if (brd->get_cd)
1576 present = !brd->get_cd(slot->id);
1577 else if (!IS_ERR_VALUE(gpio_cd))
1580 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1583 spin_lock_bh(&host->lock);
1585 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1586 dev_dbg(&mmc->class_dev, "card is present\n");
1588 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1589 dev_dbg(&mmc->class_dev, "card is not present\n");
1591 spin_unlock_bh(&host->lock);
1598 * Dts Should caps emmc controller with poll-hw-reset
1600 static void dw_mci_hw_reset(struct mmc_host *mmc)
1602 struct dw_mci_slot *slot = mmc_priv(mmc);
1603 struct dw_mci *host = slot->host;
1608 unsigned long timeout;
1611 /* (1) CMD12 to end any transfer in process */
1612 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1613 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1615 if(host->mmc->hold_reg_flag)
1616 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1617 mci_writel(host, CMDARG, 0);
1619 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1621 timeout = jiffies + msecs_to_jiffies(500);
1623 ret = time_before(jiffies, timeout);
1624 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1629 MMC_DBG_ERR_FUNC(host->mmc,
1630 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1631 __func__, mmc_hostname(host->mmc));
1633 /* (2) wait DTO, even if no response is sent back by card */
1635 timeout = jiffies + msecs_to_jiffies(5);
1637 ret = time_before(jiffies, timeout);
1638 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1639 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1645 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1647 /* Software reset - BMOD[0] for IDMA only */
1648 regs = mci_readl(host, BMOD);
1649 regs |= SDMMC_IDMAC_SWRESET;
1650 mci_writel(host, BMOD, regs);
1651 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1652 regs = mci_readl(host, BMOD);
1653 if(regs & SDMMC_IDMAC_SWRESET)
1654 MMC_DBG_WARN_FUNC(host->mmc,
1655 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1656 __func__, mmc_hostname(host->mmc));
1658 /* DMA reset - CTRL[2] */
1659 regs = mci_readl(host, CTRL);
1660 regs |= SDMMC_CTRL_DMA_RESET;
1661 mci_writel(host, CTRL, regs);
1662 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1663 regs = mci_readl(host, CTRL);
1664 if(regs & SDMMC_CTRL_DMA_RESET)
1665 MMC_DBG_WARN_FUNC(host->mmc,
1666 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1667 __func__, mmc_hostname(host->mmc));
1669 /* FIFO reset - CTRL[1] */
1670 regs = mci_readl(host, CTRL);
1671 regs |= SDMMC_CTRL_FIFO_RESET;
1672 mci_writel(host, CTRL, regs);
1673 mdelay(1); /* no timing limited, 1ms is random value */
1674 regs = mci_readl(host, CTRL);
1675 if(regs & SDMMC_CTRL_FIFO_RESET)
1676 MMC_DBG_WARN_FUNC(host->mmc,
1677 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1678 __func__, mmc_hostname(host->mmc));
1681 According to eMMC spec
1682 tRstW >= 1us ; RST_n pulse width
1683 tRSCA >= 200us ; RST_n to Command time
1684 tRSTH >= 1us ; RST_n high period
1686 mci_writel(slot->host, PWREN, 0x0);
1687 mci_writel(slot->host, RST_N, 0x0);
1689 udelay(10); /* 10us for bad quality eMMc. */
1691 mci_writel(slot->host, PWREN, 0x1);
1692 mci_writel(slot->host, RST_N, 0x1);
1694 usleep_range(500, 1000); /* at least 500(> 200us) */
1698 * Disable lower power mode.
1700 * Low power mode will stop the card clock when idle. According to the
1701 * description of the CLKENA register we should disable low power mode
1702 * for SDIO cards if we need SDIO interrupts to work.
1704 * This function is fast if low power mode is already disabled.
1706 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1708 struct dw_mci *host = slot->host;
1710 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1712 clk_en_a = mci_readl(host, CLKENA);
1714 if (clk_en_a & clken_low_pwr) {
1715 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1716 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1717 SDMMC_CMD_PRV_DAT_WAIT, 0);
1721 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1723 struct dw_mci_slot *slot = mmc_priv(mmc);
1724 struct dw_mci *host = slot->host;
1725 unsigned long flags;
1729 spin_lock_irqsave(&host->slock, flags);
1731 /* Enable/disable Slot Specific SDIO interrupt */
1732 int_mask = mci_readl(host, INTMASK);
1734 if (host->verid < DW_MMC_240A)
1735 sdio_int = SDMMC_INT_SDIO(slot->id);
1737 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1741 * Turn off low power mode if it was enabled. This is a bit of
1742 * a heavy operation and we disable / enable IRQs a lot, so
1743 * we'll leave low power mode disabled and it will get
1744 * re-enabled again in dw_mci_setup_bus().
1746 dw_mci_disable_low_power(slot);
1748 mci_writel(host, INTMASK,
1749 (int_mask | sdio_int));
1751 mci_writel(host, INTMASK,
1752 (int_mask & ~sdio_int));
1755 spin_unlock_irqrestore(&host->slock, flags);
1758 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1760 IO_DOMAIN_12 = 1200,
1761 IO_DOMAIN_18 = 1800,
1762 IO_DOMAIN_33 = 3300,
1764 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1774 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1775 __FUNCTION__, mmc_hostname(host->mmc));
1778 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1779 __FUNCTION__, mmc_hostname(host->mmc));
1783 if(cpu_is_rk3288()){
1784 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1785 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1789 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1790 __FUNCTION__, mmc_hostname(host->mmc));
1794 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1795 struct mmc_ios *ios)
1798 unsigned int value,uhs_reg;
1801 * Signal Voltage Switching is only applicable for Host Controllers
1804 if (host->verid < DW_MMC_240A)
1807 uhs_reg = mci_readl(host, UHS_REG);
1808 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1809 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1811 switch (ios->signal_voltage) {
1812 case MMC_SIGNAL_VOLTAGE_330:
1813 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1815 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1816 /* regulator_put(host->vmmc); //to be done in remove function. */
1818 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1819 __func__, regulator_get_voltage(host->vmmc), ret);
1821 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1822 " failed\n", mmc_hostname(host->mmc));
1825 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1827 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1828 __FUNCTION__, mmc_hostname(host->mmc));
1830 /* set High-power mode */
1831 value = mci_readl(host, CLKENA);
1832 value &= ~SDMMC_CLKEN_LOW_PWR;
1833 mci_writel(host,CLKENA , value);
1835 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1836 mci_writel(host,UHS_REG , uhs_reg);
1839 usleep_range(5000, 5500);
1841 /* 3.3V regulator output should be stable within 5 ms */
1842 uhs_reg = mci_readl(host, UHS_REG);
1843 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1846 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1847 mmc_hostname(host->mmc));
1850 case MMC_SIGNAL_VOLTAGE_180:
1852 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1853 /* regulator_put(host->vmmc);//to be done in remove function. */
1855 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1856 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1858 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1859 " failed\n", mmc_hostname(host->mmc));
1862 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1866 * Enable 1.8V Signal Enable in the Host Control2
1869 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1872 usleep_range(5000, 5500);
1873 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1874 __FUNCTION__,mmc_hostname(host->mmc));
1876 /* 1.8V regulator output should be stable within 5 ms */
1877 uhs_reg = mci_readl(host, UHS_REG);
1878 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1881 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1882 mmc_hostname(host->mmc));
1885 case MMC_SIGNAL_VOLTAGE_120:
1887 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1889 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1890 " failed\n", mmc_hostname(host->mmc));
1896 /* No signal voltage switch required */
1902 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1903 struct mmc_ios *ios)
1905 struct dw_mci_slot *slot = mmc_priv(mmc);
1906 struct dw_mci *host = slot->host;
1909 if (host->verid < DW_MMC_240A)
1912 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1918 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1920 struct dw_mci_slot *slot = mmc_priv(mmc);
1921 struct dw_mci *host = slot->host;
1922 const struct dw_mci_drv_data *drv_data = host->drv_data;
1923 struct dw_mci_tuning_data tuning_data;
1926 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1927 if(cpu_is_rk3036() || cpu_is_rk312x())
1930 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1931 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1932 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1933 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1934 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1935 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1936 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1940 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1941 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1942 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1945 "Undefined command(%d) for tuning\n", opcode);
1950 /* Recommend sample phase and delayline
1951 Fixme: Mix-use these three controllers will cause
1954 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1955 tuning_data.con_id = 3;
1956 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1957 tuning_data.con_id = 1;
1959 tuning_data.con_id = 0;
1961 /* 0: driver, from host->devices
1962 1: sample, from devices->host
1964 tuning_data.tuning_type = 1;
1966 if (drv_data && drv_data->execute_tuning)
1967 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1972 static const struct mmc_host_ops dw_mci_ops = {
1973 .request = dw_mci_request,
1974 .pre_req = dw_mci_pre_req,
1975 .post_req = dw_mci_post_req,
1976 .set_ios = dw_mci_set_ios,
1977 .get_ro = dw_mci_get_ro,
1978 .get_cd = dw_mci_get_cd,
1979 .set_sdio_status = dw_mci_set_sdio_status,
1980 .hw_reset = dw_mci_hw_reset,
1981 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1982 .execute_tuning = dw_mci_execute_tuning,
1983 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1984 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1985 .card_busy = dw_mci_card_busy,
1990 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1992 unsigned long flags;
1997 local_irq_save(flags);
1998 if(host->irq_state != irqflag)
2000 host->irq_state = irqflag;
2003 enable_irq(host->irq);
2007 disable_irq(host->irq);
2010 local_irq_restore(flags);
2014 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2015 __releases(&host->lock)
2016 __acquires(&host->lock)
2018 if(DW_MCI_SEND_STATUS == host->dir_status){
2020 if( MMC_BUS_TEST_W != host->cmd->opcode){
2021 if(host->data_status & SDMMC_INT_DCRC)
2022 host->data->error = -EILSEQ;
2023 else if(host->data_status & SDMMC_INT_EBE)
2024 host->data->error = -ETIMEDOUT;
2026 dw_mci_wait_unbusy(host);
2029 dw_mci_wait_unbusy(host);
2034 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2035 __releases(&host->lock)
2036 __acquires(&host->lock)
2038 struct dw_mci_slot *slot;
2039 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2041 WARN_ON(host->cmd || host->data);
2043 del_timer_sync(&host->dto_timer);
2044 dw_mci_deal_data_end(host, mrq);
2047 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2048 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2050 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2051 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2053 host->cur_slot->mrq = NULL;
2055 if (!list_empty(&host->queue)) {
2056 slot = list_entry(host->queue.next,
2057 struct dw_mci_slot, queue_node);
2058 list_del(&slot->queue_node);
2059 dev_vdbg(host->dev, "list not empty: %s is next\n",
2060 mmc_hostname(slot->mmc));
2061 host->state = STATE_SENDING_CMD;
2062 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2063 dw_mci_start_request(host, slot);
2065 dev_vdbg(host->dev, "list empty\n");
2066 host->state = STATE_IDLE;
2069 spin_unlock(&host->lock);
2070 mmc_request_done(prev_mmc, mrq);
2071 spin_lock(&host->lock);
2074 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2076 u32 status = host->cmd_status;
2078 host->cmd_status = 0;
2080 /* Read the response from the card (up to 16 bytes) */
2081 if (cmd->flags & MMC_RSP_PRESENT) {
2082 if (cmd->flags & MMC_RSP_136) {
2083 cmd->resp[3] = mci_readl(host, RESP0);
2084 cmd->resp[2] = mci_readl(host, RESP1);
2085 cmd->resp[1] = mci_readl(host, RESP2);
2086 cmd->resp[0] = mci_readl(host, RESP3);
2088 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2089 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2091 cmd->resp[0] = mci_readl(host, RESP0);
2095 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2096 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2100 if (status & SDMMC_INT_RTO)
2102 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2105 cmd->error = -ETIMEDOUT;
2106 del_timer_sync(&host->dto_timer);
2107 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2108 del_timer_sync(&host->dto_timer);
2109 cmd->error = -EILSEQ;
2110 }else if (status & SDMMC_INT_RESP_ERR){
2111 del_timer_sync(&host->dto_timer);
2116 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2117 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2120 del_timer_sync(&host->dto_timer);
2121 if(MMC_SEND_STATUS != cmd->opcode)
2122 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2123 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2124 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2128 /* newer ip versions need a delay between retries */
2129 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2135 static void dw_mci_tasklet_func(unsigned long priv)
2137 struct dw_mci *host = (struct dw_mci *)priv;
2138 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2139 struct mmc_data *data;
2140 struct mmc_command *cmd;
2141 enum dw_mci_state state;
2142 enum dw_mci_state prev_state;
2143 u32 status, cmd_flags;
2144 unsigned long timeout = 0;
2147 spin_lock(&host->lock);
2149 state = host->state;
2159 case STATE_SENDING_CMD:
2160 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2161 &host->pending_events))
2166 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2167 dw_mci_command_complete(host, cmd);
2168 if (cmd == host->mrq->sbc && !cmd->error) {
2169 prev_state = state = STATE_SENDING_CMD;
2170 __dw_mci_start_request(host, host->cur_slot,
2175 if (cmd->data && cmd->error) {
2176 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2177 dw_mci_stop_dma(host);
2180 send_stop_cmd(host, data);
2181 state = STATE_SENDING_STOP;
2187 send_stop_abort(host, data);
2188 state = STATE_SENDING_STOP;
2191 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2194 if (!host->mrq->data || cmd->error) {
2195 dw_mci_request_end(host, host->mrq);
2199 prev_state = state = STATE_SENDING_DATA;
2202 case STATE_SENDING_DATA:
2203 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2204 dw_mci_stop_dma(host);
2207 send_stop_cmd(host, data);
2209 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2210 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2211 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2213 mci_writel(host, CMDARG, 0);
2215 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2216 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2218 if(host->mmc->hold_reg_flag)
2219 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2221 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2223 timeout = jiffies + msecs_to_jiffies(500);
2226 ret = time_before(jiffies, timeout);
2227 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2231 MMC_DBG_ERR_FUNC(host->mmc,
2232 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2233 __func__, mmc_hostname(host->mmc));
2236 send_stop_abort(host, data);
2238 state = STATE_DATA_ERROR;
2242 MMC_DBG_CMD_FUNC(host->mmc,
2243 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2244 prev_state,state, mmc_hostname(host->mmc));
2246 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2247 &host->pending_events))
2249 MMC_DBG_INFO_FUNC(host->mmc,
2250 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2251 prev_state,state,mmc_hostname(host->mmc));
2253 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2254 prev_state = state = STATE_DATA_BUSY;
2257 case STATE_DATA_BUSY:
2258 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2259 &host->pending_events))
2262 dw_mci_deal_data_end(host, host->mrq);
2263 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2264 MMC_DBG_INFO_FUNC(host->mmc,
2265 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2266 prev_state,state,mmc_hostname(host->mmc));
2269 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2270 status = host->data_status;
2272 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2273 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2274 MMC_DBG_ERR_FUNC(host->mmc,
2275 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2276 prev_state,state, status, mmc_hostname(host->mmc));
2278 if (status & SDMMC_INT_DRTO) {
2279 data->error = -ETIMEDOUT;
2280 } else if (status & SDMMC_INT_DCRC) {
2281 data->error = -EILSEQ;
2282 } else if (status & SDMMC_INT_EBE &&
2283 host->dir_status == DW_MCI_SEND_STATUS){
2285 * No data CRC status was returned.
2286 * The number of bytes transferred will
2287 * be exaggerated in PIO mode.
2289 data->bytes_xfered = 0;
2290 data->error = -ETIMEDOUT;
2299 * After an error, there may be data lingering
2300 * in the FIFO, so reset it - doing so
2301 * generates a block interrupt, hence setting
2302 * the scatter-gather pointer to NULL.
2304 dw_mci_fifo_reset(host);
2306 data->bytes_xfered = data->blocks * data->blksz;
2311 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2312 prev_state,state,mmc_hostname(host->mmc));
2313 dw_mci_request_end(host, host->mrq);
2316 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2317 prev_state,state,mmc_hostname(host->mmc));
2319 if (host->mrq->sbc && !data->error) {
2320 data->stop->error = 0;
2322 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2323 prev_state,state,mmc_hostname(host->mmc));
2325 dw_mci_request_end(host, host->mrq);
2329 prev_state = state = STATE_SENDING_STOP;
2331 send_stop_cmd(host, data);
2333 if (data->stop && !data->error) {
2334 /* stop command for open-ended transfer*/
2336 send_stop_abort(host, data);
2340 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2341 prev_state,state,mmc_hostname(host->mmc));
2343 case STATE_SENDING_STOP:
2344 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2347 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2348 prev_state, state, mmc_hostname(host->mmc));
2350 /* CMD error in data command */
2351 if (host->mrq->cmd->error && host->mrq->data) {
2352 dw_mci_fifo_reset(host);
2358 dw_mci_command_complete(host, host->mrq->stop);
2360 if (host->mrq->stop)
2361 dw_mci_command_complete(host, host->mrq->stop);
2363 host->cmd_status = 0;
2366 dw_mci_request_end(host, host->mrq);
2369 case STATE_DATA_ERROR:
2370 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2371 &host->pending_events))
2374 state = STATE_DATA_BUSY;
2377 } while (state != prev_state);
2379 host->state = state;
2381 spin_unlock(&host->lock);
2385 /* push final bytes to part_buf, only use during push */
2386 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2388 memcpy((void *)&host->part_buf, buf, cnt);
2389 host->part_buf_count = cnt;
2392 /* append bytes to part_buf, only use during push */
2393 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2395 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2396 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2397 host->part_buf_count += cnt;
2401 /* pull first bytes from part_buf, only use during pull */
2402 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2404 cnt = min(cnt, (int)host->part_buf_count);
2406 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2408 host->part_buf_count -= cnt;
2409 host->part_buf_start += cnt;
2414 /* pull final bytes from the part_buf, assuming it's just been filled */
2415 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2417 memcpy(buf, &host->part_buf, cnt);
2418 host->part_buf_start = cnt;
2419 host->part_buf_count = (1 << host->data_shift) - cnt;
2422 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2424 struct mmc_data *data = host->data;
2427 /* try and push anything in the part_buf */
2428 if (unlikely(host->part_buf_count)) {
2429 int len = dw_mci_push_part_bytes(host, buf, cnt);
2432 if (host->part_buf_count == 2) {
2433 mci_writew(host, DATA(host->data_offset),
2435 host->part_buf_count = 0;
2438 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2439 if (unlikely((unsigned long)buf & 0x1)) {
2441 u16 aligned_buf[64];
2442 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2443 int items = len >> 1;
2445 /* memcpy from input buffer into aligned buffer */
2446 memcpy(aligned_buf, buf, len);
2449 /* push data from aligned buffer into fifo */
2450 for (i = 0; i < items; ++i)
2451 mci_writew(host, DATA(host->data_offset),
2458 for (; cnt >= 2; cnt -= 2)
2459 mci_writew(host, DATA(host->data_offset), *pdata++);
2462 /* put anything remaining in the part_buf */
2464 dw_mci_set_part_bytes(host, buf, cnt);
2465 /* Push data if we have reached the expected data length */
2466 if ((data->bytes_xfered + init_cnt) ==
2467 (data->blksz * data->blocks))
2468 mci_writew(host, DATA(host->data_offset),
2473 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2475 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2476 if (unlikely((unsigned long)buf & 0x1)) {
2478 /* pull data from fifo into aligned buffer */
2479 u16 aligned_buf[64];
2480 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2481 int items = len >> 1;
2483 for (i = 0; i < items; ++i)
2484 aligned_buf[i] = mci_readw(host,
2485 DATA(host->data_offset));
2486 /* memcpy from aligned buffer into output buffer */
2487 memcpy(buf, aligned_buf, len);
2495 for (; cnt >= 2; cnt -= 2)
2496 *pdata++ = mci_readw(host, DATA(host->data_offset));
2500 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2501 dw_mci_pull_final_bytes(host, buf, cnt);
2505 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2507 struct mmc_data *data = host->data;
2510 /* try and push anything in the part_buf */
2511 if (unlikely(host->part_buf_count)) {
2512 int len = dw_mci_push_part_bytes(host, buf, cnt);
2515 if (host->part_buf_count == 4) {
2516 mci_writel(host, DATA(host->data_offset),
2518 host->part_buf_count = 0;
2521 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2522 if (unlikely((unsigned long)buf & 0x3)) {
2524 u32 aligned_buf[32];
2525 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2526 int items = len >> 2;
2528 /* memcpy from input buffer into aligned buffer */
2529 memcpy(aligned_buf, buf, len);
2532 /* push data from aligned buffer into fifo */
2533 for (i = 0; i < items; ++i)
2534 mci_writel(host, DATA(host->data_offset),
2541 for (; cnt >= 4; cnt -= 4)
2542 mci_writel(host, DATA(host->data_offset), *pdata++);
2545 /* put anything remaining in the part_buf */
2547 dw_mci_set_part_bytes(host, buf, cnt);
2548 /* Push data if we have reached the expected data length */
2549 if ((data->bytes_xfered + init_cnt) ==
2550 (data->blksz * data->blocks))
2551 mci_writel(host, DATA(host->data_offset),
2556 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2558 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2559 if (unlikely((unsigned long)buf & 0x3)) {
2561 /* pull data from fifo into aligned buffer */
2562 u32 aligned_buf[32];
2563 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2564 int items = len >> 2;
2566 for (i = 0; i < items; ++i)
2567 aligned_buf[i] = mci_readl(host,
2568 DATA(host->data_offset));
2569 /* memcpy from aligned buffer into output buffer */
2570 memcpy(buf, aligned_buf, len);
2578 for (; cnt >= 4; cnt -= 4)
2579 *pdata++ = mci_readl(host, DATA(host->data_offset));
2583 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2584 dw_mci_pull_final_bytes(host, buf, cnt);
2588 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2590 struct mmc_data *data = host->data;
2593 /* try and push anything in the part_buf */
2594 if (unlikely(host->part_buf_count)) {
2595 int len = dw_mci_push_part_bytes(host, buf, cnt);
2599 if (host->part_buf_count == 8) {
2600 mci_writeq(host, DATA(host->data_offset),
2602 host->part_buf_count = 0;
2605 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2606 if (unlikely((unsigned long)buf & 0x7)) {
2608 u64 aligned_buf[16];
2609 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2610 int items = len >> 3;
2612 /* memcpy from input buffer into aligned buffer */
2613 memcpy(aligned_buf, buf, len);
2616 /* push data from aligned buffer into fifo */
2617 for (i = 0; i < items; ++i)
2618 mci_writeq(host, DATA(host->data_offset),
2625 for (; cnt >= 8; cnt -= 8)
2626 mci_writeq(host, DATA(host->data_offset), *pdata++);
2629 /* put anything remaining in the part_buf */
2631 dw_mci_set_part_bytes(host, buf, cnt);
2632 /* Push data if we have reached the expected data length */
2633 if ((data->bytes_xfered + init_cnt) ==
2634 (data->blksz * data->blocks))
2635 mci_writeq(host, DATA(host->data_offset),
2640 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2642 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2643 if (unlikely((unsigned long)buf & 0x7)) {
2645 /* pull data from fifo into aligned buffer */
2646 u64 aligned_buf[16];
2647 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2648 int items = len >> 3;
2650 for (i = 0; i < items; ++i)
2651 aligned_buf[i] = mci_readq(host,
2652 DATA(host->data_offset));
2653 /* memcpy from aligned buffer into output buffer */
2654 memcpy(buf, aligned_buf, len);
2662 for (; cnt >= 8; cnt -= 8)
2663 *pdata++ = mci_readq(host, DATA(host->data_offset));
2667 host->part_buf = mci_readq(host, DATA(host->data_offset));
2668 dw_mci_pull_final_bytes(host, buf, cnt);
2672 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2676 /* get remaining partial bytes */
2677 len = dw_mci_pull_part_bytes(host, buf, cnt);
2678 if (unlikely(len == cnt))
2683 /* get the rest of the data */
2684 host->pull_data(host, buf, cnt);
2687 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2689 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2691 unsigned int offset;
2692 struct mmc_data *data = host->data;
2693 int shift = host->data_shift;
2696 unsigned int remain, fcnt;
2698 if(!host->mmc->bus_refs){
2699 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2703 if (!sg_miter_next(sg_miter))
2706 host->sg = sg_miter->piter.sg;
2707 buf = sg_miter->addr;
2708 remain = sg_miter->length;
2712 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2713 << shift) + host->part_buf_count;
2714 len = min(remain, fcnt);
2717 dw_mci_pull_data(host, (void *)(buf + offset), len);
2718 data->bytes_xfered += len;
2723 sg_miter->consumed = offset;
2724 status = mci_readl(host, MINTSTS);
2725 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2726 /* if the RXDR is ready read again */
2727 } while ((status & SDMMC_INT_RXDR) ||
2728 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2731 if (!sg_miter_next(sg_miter))
2733 sg_miter->consumed = 0;
2735 sg_miter_stop(sg_miter);
2739 sg_miter_stop(sg_miter);
2743 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2746 static void dw_mci_write_data_pio(struct dw_mci *host)
2748 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2750 unsigned int offset;
2751 struct mmc_data *data = host->data;
2752 int shift = host->data_shift;
2755 unsigned int fifo_depth = host->fifo_depth;
2756 unsigned int remain, fcnt;
2758 if(!host->mmc->bus_refs){
2759 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2764 if (!sg_miter_next(sg_miter))
2767 host->sg = sg_miter->piter.sg;
2768 buf = sg_miter->addr;
2769 remain = sg_miter->length;
2773 fcnt = ((fifo_depth -
2774 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2775 << shift) - host->part_buf_count;
2776 len = min(remain, fcnt);
2779 host->push_data(host, (void *)(buf + offset), len);
2780 data->bytes_xfered += len;
2785 sg_miter->consumed = offset;
2786 status = mci_readl(host, MINTSTS);
2787 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2788 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2791 if (!sg_miter_next(sg_miter))
2793 sg_miter->consumed = 0;
2795 sg_miter_stop(sg_miter);
2799 sg_miter_stop(sg_miter);
2803 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2806 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2808 u32 multi, unit = SZ_2M;
2810 if (!host->cmd_status)
2811 host->cmd_status = status;
2816 if ((MMC_STOP_TRANSMISSION != host->cmd->opcode)) {
2817 multi = (mci_readl(host, BYTCNT) / unit) +
2818 ((mci_readl(host, BYTCNT) % unit) ? 1 :0 ) +
2819 ((host->cmd->retries > 2) ? 2 : host->cmd->retries);
2820 /* Max limit time: 8s for dto */
2821 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4000 * multi));
2826 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2827 tasklet_schedule(&host->tasklet);
2830 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2832 struct dw_mci *host = dev_id;
2833 u32 pending, sdio_int;
2836 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2839 * DTO fix - version 2.10a and below, and only if internal DMA
2842 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2844 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2845 pending |= SDMMC_INT_DATA_OVER;
2849 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2850 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2851 host->cmd_status = pending;
2853 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2854 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2856 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2859 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2860 /* if there is an error report DATA_ERROR */
2861 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2862 host->data_status = pending;
2864 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2866 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2867 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2868 tasklet_schedule(&host->tasklet);
2871 if (pending & SDMMC_INT_DATA_OVER) {
2872 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2873 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2874 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2875 if (!host->data_status)
2876 host->data_status = pending;
2878 if (host->dir_status == DW_MCI_RECV_STATUS) {
2879 if (host->sg != NULL)
2880 dw_mci_read_data_pio(host, true);
2882 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2883 tasklet_schedule(&host->tasklet);
2886 if (pending & SDMMC_INT_RXDR) {
2887 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2888 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2889 dw_mci_read_data_pio(host, false);
2892 if (pending & SDMMC_INT_TXDR) {
2893 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2894 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2895 dw_mci_write_data_pio(host);
2898 if (pending & SDMMC_INT_VSI) {
2899 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2900 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2901 dw_mci_cmd_interrupt(host, pending);
2904 if (pending & SDMMC_INT_CMD_DONE) {
2905 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2906 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2907 dw_mci_cmd_interrupt(host, pending);
2910 if (pending & SDMMC_INT_CD) {
2911 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2912 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2913 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2914 queue_work(host->card_workqueue, &host->card_work);
2917 if (pending & SDMMC_INT_HLE) {
2918 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2919 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2923 /* Handle SDIO Interrupts */
2924 for (i = 0; i < host->num_slots; i++) {
2925 struct dw_mci_slot *slot = host->slot[i];
2927 if (host->verid < DW_MMC_240A)
2928 sdio_int = SDMMC_INT_SDIO(i);
2930 sdio_int = SDMMC_INT_SDIO(i + 8);
2932 if (pending & sdio_int) {
2933 mci_writel(host, RINTSTS, sdio_int);
2934 mmc_signal_sdio_irq(slot->mmc);
2940 #ifdef CONFIG_MMC_DW_IDMAC
2941 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2942 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2943 /* Handle DMA interrupts */
2944 pending = mci_readl(host, IDSTS);
2945 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2946 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2947 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2948 host->dma_ops->complete((void *)host);
2956 static void dw_mci_work_routine_card(struct work_struct *work)
2958 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2961 for (i = 0; i < host->num_slots; i++) {
2962 struct dw_mci_slot *slot = host->slot[i];
2963 struct mmc_host *mmc = slot->mmc;
2964 struct mmc_request *mrq;
2967 present = dw_mci_get_cd(mmc);
2969 /* Card insert, switch data line to uart function, and vice verse.
2970 * ONLY audi chip need switched by software, using udbg tag in dts!
2972 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
2974 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
2975 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
2976 mmc_hostname(host->mmc));
2978 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
2979 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
2980 mmc_hostname(host->mmc));
2984 while (present != slot->last_detect_state) {
2985 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2986 present ? "inserted" : "removed");
2987 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2988 present ? "inserted" : "removed.", mmc_hostname(mmc));
2990 dw_mci_ctrl_all_reset(host);
2991 /* Stop edma when rountine card triggered */
2992 if(cpu_is_rk3036() || cpu_is_rk312x())
2993 if(host->dma_ops && host->dma_ops->stop)
2994 host->dma_ops->stop(host);
2995 rk_send_wakeup_key();//wake up system
2996 spin_lock_bh(&host->lock);
2998 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2999 /* Card change detected */
3000 slot->last_detect_state = present;
3002 /* Clean up queue if present */
3005 if (mrq == host->mrq) {
3009 switch (host->state) {
3012 case STATE_SENDING_CMD:
3013 mrq->cmd->error = -ENOMEDIUM;
3017 case STATE_SENDING_DATA:
3018 mrq->data->error = -ENOMEDIUM;
3019 dw_mci_stop_dma(host);
3021 case STATE_DATA_BUSY:
3022 case STATE_DATA_ERROR:
3023 if (mrq->data->error == -EINPROGRESS)
3024 mrq->data->error = -ENOMEDIUM;
3028 case STATE_SENDING_STOP:
3029 mrq->stop->error = -ENOMEDIUM;
3033 dw_mci_request_end(host, mrq);
3035 list_del(&slot->queue_node);
3036 mrq->cmd->error = -ENOMEDIUM;
3038 mrq->data->error = -ENOMEDIUM;
3040 mrq->stop->error = -ENOMEDIUM;
3042 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3043 mrq->cmd->opcode, mmc_hostname(mmc));
3045 spin_unlock(&host->lock);
3046 mmc_request_done(slot->mmc, mrq);
3047 spin_lock(&host->lock);
3051 /* Power down slot */
3053 /* Clear down the FIFO */
3054 dw_mci_fifo_reset(host);
3055 #ifdef CONFIG_MMC_DW_IDMAC
3056 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3057 dw_mci_idmac_reset(host);
3062 spin_unlock_bh(&host->lock);
3064 present = dw_mci_get_cd(mmc);
3067 mmc_detect_change(slot->mmc,
3068 msecs_to_jiffies(host->pdata->detect_delay_ms));
3073 /* given a slot id, find out the device node representing that slot */
3074 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3076 struct device_node *np;
3080 if (!dev || !dev->of_node)
3083 for_each_child_of_node(dev->of_node, np) {
3084 addr = of_get_property(np, "reg", &len);
3085 if (!addr || (len < sizeof(int)))
3087 if (be32_to_cpup(addr) == slot)
3093 static struct dw_mci_of_slot_quirks {
3096 } of_slot_quirks[] = {
3098 .quirk = "disable-wp",
3099 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3103 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3105 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3110 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3111 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3112 quirks |= of_slot_quirks[idx].id;
3117 /* find out bus-width for a given slot */
3118 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3120 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3126 if (of_property_read_u32(np, "bus-width", &bus_wd))
3127 dev_err(dev, "bus-width property not found, assuming width"
3133 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3134 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3136 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3142 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3144 /* Having a missing entry is valid; return silently */
3145 if (!gpio_is_valid(gpio))
3148 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3149 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3153 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3159 /* find the write protect gpio for a given slot; or -1 if none specified */
3160 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3162 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3168 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3170 /* Having a missing entry is valid; return silently */
3171 if (!gpio_is_valid(gpio))
3174 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3175 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3182 /* find the cd gpio for a given slot */
3183 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3184 struct mmc_host *mmc)
3186 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3192 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3194 /* Having a missing entry is valid; return silently */
3195 if (!gpio_is_valid(gpio))
3198 if (mmc_gpio_request_cd(mmc, gpio, 0))
3199 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3202 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3204 struct mmc_host *mmc = dev_id;
3205 struct dw_mci_slot *slot = mmc_priv(mmc);
3206 struct dw_mci *host = slot->host;
3208 /* wakeup system whether gpio debounce or not */
3209 rk_send_wakeup_key();
3211 /* no need to trigger detect flow when rescan is disabled.
3212 This case happended in dpm, that we just wakeup system and
3213 let suspend_post notify callback handle it.
3215 if(mmc->rescan_disable == 0)
3216 queue_work(host->card_workqueue, &host->card_work);
3218 printk("%s: rescan been disabled!\n", __FUNCTION__);
3223 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3224 struct mmc_host *mmc)
3226 struct dw_mci_slot *slot = mmc_priv(mmc);
3227 struct dw_mci *host = slot->host;
3231 /* Having a missing entry is valid; return silently */
3232 if (!gpio_is_valid(gpio))
3235 irq = gpio_to_irq(gpio);
3237 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3238 NULL, dw_mci_gpio_cd_irqt,
3239 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
3243 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3245 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3246 enable_irq_wake(irq);
3249 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3253 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3254 struct mmc_host *mmc)
3256 if (!gpio_is_valid(gpio))
3259 if (gpio_to_irq(gpio) >= 0) {
3260 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3261 devm_gpio_free(&mmc->class_dev, gpio);
3264 #else /* CONFIG_OF */
3265 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3269 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3273 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3277 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3281 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3282 struct mmc_host *mmc)
3286 #endif /* CONFIG_OF */
3288 /* @host: dw_mci host prvdata
3289 * Init pinctrl for each platform. Usually we assign
3290 * "defalut" tag for functional usage, "idle" tag for gpio
3291 * state and "udbg" tag for uart_dbg if any.
3293 static void dw_mci_init_pinctrl(struct dw_mci *host)
3295 /* Fixme: DON'T TOUCH EMMC SETTING! */
3296 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3299 /* Get pinctrl for DTS */
3300 host->pinctrl = devm_pinctrl_get(host->dev);
3301 if (IS_ERR(host->pinctrl)) {
3302 dev_err(host->dev, "%s: No pinctrl used!\n",
3303 mmc_hostname(host->mmc));
3307 /* Lookup idle state */
3308 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3309 PINCTRL_STATE_IDLE);
3310 if (IS_ERR(host->pins_idle)) {
3311 dev_err(host->dev, "%s: No idle tag found!\n",
3312 mmc_hostname(host->mmc));
3314 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3315 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3316 mmc_hostname(host->mmc));
3319 /* Lookup default state */
3320 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3321 PINCTRL_STATE_DEFAULT);
3322 if (IS_ERR(host->pins_default)) {
3323 dev_err(host->dev, "%s: No default pinctrl found!\n",
3324 mmc_hostname(host->mmc));
3326 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3327 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3328 mmc_hostname(host->mmc));
3331 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3332 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3333 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3334 if (IS_ERR(host->pins_udbg)) {
3335 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3336 mmc_hostname(host->mmc));
3338 if (!dw_mci_get_cd(host->mmc))
3339 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3340 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3341 mmc_hostname(host->mmc));
3346 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3347 unsigned long mode, void *unused)
3349 struct mmc_host *host = container_of(
3350 notify_block, struct mmc_host, pm_notify);
3351 unsigned long flags;
3354 case PM_HIBERNATION_PREPARE:
3355 case PM_SUSPEND_PREPARE:
3356 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3357 spin_lock_irqsave(&host->lock, flags);
3358 host->rescan_disable = 1;
3359 spin_unlock_irqrestore(&host->lock, flags);
3360 if (cancel_delayed_work(&host->detect))
3361 wake_unlock(&host->detect_wake_lock);
3364 case PM_POST_SUSPEND:
3365 case PM_POST_HIBERNATION:
3366 case PM_POST_RESTORE:
3367 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3368 spin_lock_irqsave(&host->lock, flags);
3369 host->rescan_disable = 0;
3370 spin_unlock_irqrestore(&host->lock, flags);
3371 mmc_detect_change(host, 10);
3377 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3379 struct mmc_host *mmc;
3380 struct dw_mci_slot *slot;
3381 const struct dw_mci_drv_data *drv_data = host->drv_data;
3386 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3390 slot = mmc_priv(mmc);
3394 host->slot[id] = slot;
3397 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3399 mmc->ops = &dw_mci_ops;
3401 if (of_property_read_u32_array(host->dev->of_node,
3402 "clock-freq-min-max", freq, 2)) {
3403 mmc->f_min = DW_MCI_FREQ_MIN;
3404 mmc->f_max = DW_MCI_FREQ_MAX;
3406 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3407 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3409 mmc->f_min = freq[0];
3410 mmc->f_max = freq[1];
3412 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3413 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3416 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3418 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3419 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3420 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3421 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3422 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3423 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3425 if (of_find_property(host->dev->of_node, "supports-tSD", NULL))
3426 mmc->restrict_caps |= RESTRICT_CARD_TYPE_TSD;
3428 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3429 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3430 if (register_pm_notifier(&mmc->pm_notify)) {
3431 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3432 goto err_pm_notifier;
3436 /* We assume only low-level chip use gpio_cd */
3437 if (cpu_is_rk312x() &&
3439 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3440 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3441 if (gpio_is_valid(slot->cd_gpio)) {
3442 /* Request gpio int for card detection */
3443 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3445 slot->cd_gpio = -ENODEV;
3446 dev_err(host->dev, "failed to get your cd-gpios!\n");
3450 if (host->pdata->get_ocr)
3451 mmc->ocr_avail = host->pdata->get_ocr(id);
3454 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3455 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3456 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3457 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3461 * Start with slot power disabled, it will be enabled when a card
3464 if (host->pdata->setpower)
3465 host->pdata->setpower(id, 0);
3467 if (host->pdata->caps)
3468 mmc->caps = host->pdata->caps;
3470 if (host->pdata->pm_caps)
3471 mmc->pm_caps = host->pdata->pm_caps;
3473 if (host->dev->of_node) {
3474 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3478 ctrl_id = to_platform_device(host->dev)->id;
3480 if (drv_data && drv_data->caps)
3481 mmc->caps |= drv_data->caps[ctrl_id];
3482 if (drv_data && drv_data->hold_reg_flag)
3483 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3485 /* set the compatibility of driver. */
3486 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3487 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3489 if (host->pdata->caps2)
3490 mmc->caps2 = host->pdata->caps2;
3492 if (host->pdata->get_bus_wd)
3493 bus_width = host->pdata->get_bus_wd(slot->id);
3494 else if (host->dev->of_node)
3495 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3499 switch (bus_width) {
3501 mmc->caps |= MMC_CAP_8_BIT_DATA;
3503 mmc->caps |= MMC_CAP_4_BIT_DATA;
3506 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3507 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3508 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3509 mmc->caps |= MMC_CAP_SDIO_IRQ;
3510 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3511 mmc->caps |= MMC_CAP_HW_RESET;
3512 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3513 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3514 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3515 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3516 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3517 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3518 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3519 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3521 /*Assign pm_caps pass to pm_flags*/
3522 mmc->pm_flags = mmc->pm_caps;
3524 if (host->pdata->blk_settings) {
3525 mmc->max_segs = host->pdata->blk_settings->max_segs;
3526 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3527 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3528 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3529 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3531 /* Useful defaults if platform data is unset. */
3532 #ifdef CONFIG_MMC_DW_IDMAC
3533 mmc->max_segs = host->ring_size;
3534 mmc->max_blk_size = 65536;
3535 mmc->max_blk_count = host->ring_size;
3536 mmc->max_seg_size = 0x1000;
3537 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3538 if(cpu_is_rk3036() || cpu_is_rk312x()){
3539 /* fixup for external dmac setting */
3541 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3542 mmc->max_blk_count = 65535;
3543 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3544 mmc->max_seg_size = mmc->max_req_size;
3548 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3549 mmc->max_blk_count = 512;
3550 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3551 mmc->max_seg_size = mmc->max_req_size;
3552 #endif /* CONFIG_MMC_DW_IDMAC */
3556 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3558 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3563 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3564 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3568 if (IS_ERR(host->vmmc)) {
3569 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3572 ret = regulator_enable(host->vmmc);
3575 "failed to enable regulator: %d\n", ret);
3582 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3584 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3585 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3587 dw_mci_init_pinctrl(host);
3588 ret = mmc_add_host(mmc);
3592 #if defined(CONFIG_DEBUG_FS)
3593 dw_mci_init_debugfs(slot);
3596 /* Card initially undetected */
3597 slot->last_detect_state = 1;
3601 unregister_pm_notifier(&mmc->pm_notify);
3604 if (gpio_is_valid(slot->cd_gpio))
3605 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3610 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3612 /* Shutdown detect IRQ */
3613 if (slot->host->pdata->exit)
3614 slot->host->pdata->exit(id);
3616 /* Debugfs stuff is cleaned up by mmc core */
3617 mmc_remove_host(slot->mmc);
3618 slot->host->slot[id] = NULL;
3619 mmc_free_host(slot->mmc);
3622 static void dw_mci_init_dma(struct dw_mci *host)
3624 /* Alloc memory for sg translation */
3625 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3626 &host->sg_dma, GFP_KERNEL);
3627 if (!host->sg_cpu) {
3628 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3633 /* Determine which DMA interface to use */
3634 #if defined(CONFIG_MMC_DW_IDMAC)
3635 if(cpu_is_rk3036() || cpu_is_rk312x()){
3636 host->dma_ops = &dw_mci_edmac_ops;
3637 dev_info(host->dev, "Using external DMA controller.\n");
3639 host->dma_ops = &dw_mci_idmac_ops;
3640 dev_info(host->dev, "Using internal DMA controller.\n");
3647 if (host->dma_ops->init && host->dma_ops->start &&
3648 host->dma_ops->stop && host->dma_ops->cleanup) {
3649 if (host->dma_ops->init(host)) {
3650 dev_err(host->dev, "%s: Unable to initialize "
3651 "DMA Controller.\n", __func__);
3655 dev_err(host->dev, "DMA initialization not found.\n");
3663 dev_info(host->dev, "Using PIO mode.\n");
3668 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3670 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3673 ctrl = mci_readl(host, CTRL);
3675 mci_writel(host, CTRL, ctrl);
3677 /* wait till resets clear */
3679 ctrl = mci_readl(host, CTRL);
3680 if (!(ctrl & reset))
3682 } while (time_before(jiffies, timeout));
3685 "Timeout resetting block (ctrl reset %#x)\n",
3691 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3694 * Reseting generates a block interrupt, hence setting
3695 * the scatter-gather pointer to NULL.
3698 sg_miter_stop(&host->sg_miter);
3702 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3705 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3707 return dw_mci_ctrl_reset(host,
3708 SDMMC_CTRL_FIFO_RESET |
3710 SDMMC_CTRL_DMA_RESET);
3715 static struct dw_mci_of_quirks {
3720 .quirk = "broken-cd",
3721 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3725 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3727 struct dw_mci_board *pdata;
3728 struct device *dev = host->dev;
3729 struct device_node *np = dev->of_node;
3730 const struct dw_mci_drv_data *drv_data = host->drv_data;
3732 u32 clock_frequency;
3734 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3736 dev_err(dev, "could not allocate memory for pdata\n");
3737 return ERR_PTR(-ENOMEM);
3740 /* find out number of slots supported */
3741 if (of_property_read_u32(dev->of_node, "num-slots",
3742 &pdata->num_slots)) {
3743 dev_info(dev, "num-slots property not found, "
3744 "assuming 1 slot is available\n");
3745 pdata->num_slots = 1;
3749 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3750 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3751 pdata->quirks |= of_quirks[idx].id;
3754 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3755 dev_info(dev, "fifo-depth property not found, using "
3756 "value of FIFOTH register as default\n");
3758 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3760 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3761 pdata->bus_hz = clock_frequency;
3763 if (drv_data && drv_data->parse_dt) {
3764 ret = drv_data->parse_dt(host);
3766 return ERR_PTR(ret);
3769 if (of_find_property(np, "keep-power-in-suspend", NULL))
3770 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3772 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3773 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3775 if (of_find_property(np, "supports-highspeed", NULL))
3776 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3778 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3779 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3781 if (of_find_property(np, "supports-DDR_MODE", NULL))
3782 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3784 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3785 pdata->caps2 |= MMC_CAP2_HS200;
3787 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3788 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3790 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3791 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3793 if (of_get_property(np, "cd-inverted", NULL))
3794 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3795 if (of_get_property(np, "bootpart-no-access", NULL))
3796 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3801 #else /* CONFIG_OF */
3802 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3804 return ERR_PTR(-EINVAL);
3806 #endif /* CONFIG_OF */
3808 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3813 dev_err(host->dev, "host->state = 0x%x\n", host->state);
3814 switch(host->state){
3817 case STATE_SENDING_DATA:
3818 case STATE_DATA_BUSY:
3819 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3820 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3821 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3822 host->state = STATE_DATA_BUSY;
3823 if (!dw_mci_ctrl_all_reset(host)) {
3824 dev_err(host->dev, "dto: ctrl_all_reset failed!\n");
3828 /* NO requirement to reclaim slave chn using external dmac */
3829 #ifdef CONFIG_MMC_DW_IDMAC
3830 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3831 if (host->use_dma && host->dma_ops->init)
3832 host->dma_ops->init(host);
3836 * Restore the initial value at FIFOTH register
3837 * And Invalidate the prev_blksz with zero
3839 mci_writel(host, FIFOTH, host->fifoth_val);
3840 host->prev_blksz = 0;
3841 mci_writel(host, TMOUT, 0xFFFFFFFF);
3842 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3843 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3844 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3845 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3846 regs |= SDMMC_INT_CD;
3848 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)) {
3849 if (host->verid < DW_MMC_240A)
3850 sdio_int = SDMMC_INT_SDIO(0);
3852 sdio_int = SDMMC_INT_SDIO(8);
3854 if (mci_readl(host, INTMASK) & sdio_int)
3858 mci_writel(host, INTMASK, regs);
3859 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3860 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3861 tasklet_schedule(&host->tasklet);
3867 static void dw_mci_dto_timeout(unsigned long host_data)
3869 struct dw_mci *host = (struct dw_mci *) host_data;
3871 disable_irq(host->irq);
3873 dev_err(host->dev, "data_over interrupt timeout!\n");
3874 host->data_status = SDMMC_INT_EBE;
3875 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3876 dw_mci_dealwith_timeout(host);
3878 enable_irq(host->irq);
3881 int dw_mci_probe(struct dw_mci *host)
3883 const struct dw_mci_drv_data *drv_data = host->drv_data;
3884 int width, i, ret = 0;
3890 host->pdata = dw_mci_parse_dt(host);
3891 if (IS_ERR(host->pdata)) {
3892 dev_err(host->dev, "platform data not available\n");
3897 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3899 "Platform data must supply select_slot function\n");
3904 * In 2.40a spec, Data offset is changed.
3905 * Need to check the version-id and set data-offset for DATA register.
3907 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3908 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3910 if (host->verid < DW_MMC_240A)
3911 host->data_offset = DATA_OFFSET;
3913 host->data_offset = DATA_240A_OFFSET;
3916 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3917 if (IS_ERR(host->hclk_mmc)) {
3918 dev_err(host->dev, "failed to get hclk_mmc\n");
3919 ret = PTR_ERR(host->hclk_mmc);
3923 clk_prepare_enable(host->hclk_mmc);
3926 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3927 if (IS_ERR(host->clk_mmc)) {
3928 dev_err(host->dev, "failed to get clk mmc_per\n");
3929 ret = PTR_ERR(host->clk_mmc);
3933 host->bus_hz = host->pdata->bus_hz;
3934 if (!host->bus_hz) {
3935 dev_err(host->dev,"Platform data must supply bus speed\n");
3940 if (host->verid < DW_MMC_240A)
3941 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3943 //rockchip: fix divider 2 in clksum before controlller
3944 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3947 dev_err(host->dev, "failed to set clk mmc\n");
3950 clk_prepare_enable(host->clk_mmc);
3952 if (drv_data && drv_data->setup_clock) {
3953 ret = drv_data->setup_clock(host);
3956 "implementation specific clock setup failed\n");
3961 host->quirks = host->pdata->quirks;
3962 host->irq_state = true;
3963 host->set_speed = 0;
3965 host->svi_flags = 0;
3967 spin_lock_init(&host->lock);
3968 spin_lock_init(&host->slock);
3970 INIT_LIST_HEAD(&host->queue);
3972 * Get the host data width - this assumes that HCON has been set with
3973 * the correct values.
3975 i = (mci_readl(host, HCON) >> 7) & 0x7;
3977 host->push_data = dw_mci_push_data16;
3978 host->pull_data = dw_mci_pull_data16;
3980 host->data_shift = 1;
3981 } else if (i == 2) {
3982 host->push_data = dw_mci_push_data64;
3983 host->pull_data = dw_mci_pull_data64;
3985 host->data_shift = 3;
3987 /* Check for a reserved value, and warn if it is */
3989 "HCON reports a reserved host data width!\n"
3990 "Defaulting to 32-bit access.\n");
3991 host->push_data = dw_mci_push_data32;
3992 host->pull_data = dw_mci_pull_data32;
3994 host->data_shift = 2;
3997 /* Reset all blocks */
3998 if (!dw_mci_ctrl_all_reset(host))
4001 host->dma_ops = host->pdata->dma_ops;
4002 dw_mci_init_dma(host);
4004 /* Clear the interrupts for the host controller */
4005 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4006 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4008 /* Put in max timeout */
4009 mci_writel(host, TMOUT, 0xFFFFFFFF);
4012 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
4013 * Tx Mark = fifo_size / 2 DMA Size = 8
4015 if (!host->pdata->fifo_depth) {
4017 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
4018 * have been overwritten by the bootloader, just like we're
4019 * about to do, so if you know the value for your hardware, you
4020 * should put it in the platform data.
4022 fifo_size = mci_readl(host, FIFOTH);
4023 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
4025 fifo_size = host->pdata->fifo_depth;
4027 host->fifo_depth = fifo_size;
4029 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
4030 mci_writel(host, FIFOTH, host->fifoth_val);
4032 /* disable clock to CIU */
4033 mci_writel(host, CLKENA, 0);
4034 mci_writel(host, CLKSRC, 0);
4036 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
4037 host->card_workqueue = alloc_workqueue("dw-mci-card",
4038 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
4039 if (!host->card_workqueue) {
4043 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4044 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4045 host->irq_flags, "dw-mci", host);
4049 if (host->pdata->num_slots)
4050 host->num_slots = host->pdata->num_slots;
4052 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4054 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
4055 /* We need at least one slot to succeed */
4056 for (i = 0; i < host->num_slots; i++) {
4057 ret = dw_mci_init_slot(host, i);
4059 dev_dbg(host->dev, "slot %d init failed\n", i);
4065 * Enable interrupts for command done, data over, data empty, card det,
4066 * receive ready and error such as transmit, receive timeout, crc error
4068 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4069 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4070 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4071 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4072 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4073 regs |= SDMMC_INT_CD;
4075 mci_writel(host, INTMASK, regs);
4077 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4079 dev_info(host->dev, "DW MMC controller at irq %d, "
4080 "%d bit host data width, "
4082 host->irq, width, fifo_size);
4085 dev_info(host->dev, "%d slots initialized\n", init_slots);
4087 dev_dbg(host->dev, "attempted to initialize %d slots, "
4088 "but failed on all\n", host->num_slots);
4093 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4094 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4099 destroy_workqueue(host->card_workqueue);
4102 if (host->use_dma && host->dma_ops->exit)
4103 host->dma_ops->exit(host);
4106 regulator_disable(host->vmmc);
4107 regulator_put(host->vmmc);
4111 if (!IS_ERR(host->clk_mmc))
4112 clk_disable_unprepare(host->clk_mmc);
4114 if (!IS_ERR(host->hclk_mmc))
4115 clk_disable_unprepare(host->hclk_mmc);
4119 EXPORT_SYMBOL(dw_mci_probe);
4121 void dw_mci_remove(struct dw_mci *host)
4123 struct mmc_host *mmc = host->mmc;
4124 struct dw_mci_slot *slot = mmc_priv(mmc);
4127 del_timer_sync(&host->dto_timer);
4129 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4130 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4132 for(i = 0; i < host->num_slots; i++){
4133 dev_dbg(host->dev, "remove slot %d\n", i);
4135 dw_mci_cleanup_slot(host->slot[i], i);
4138 /* disable clock to CIU */
4139 mci_writel(host, CLKENA, 0);
4140 mci_writel(host, CLKSRC, 0);
4142 destroy_workqueue(host->card_workqueue);
4143 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4144 unregister_pm_notifier(&host->mmc->pm_notify);
4146 if(host->use_dma && host->dma_ops->exit)
4147 host->dma_ops->exit(host);
4149 if (gpio_is_valid(slot->cd_gpio))
4150 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4153 regulator_disable(host->vmmc);
4154 regulator_put(host->vmmc);
4156 if(!IS_ERR(host->clk_mmc))
4157 clk_disable_unprepare(host->clk_mmc);
4159 if(!IS_ERR(host->hclk_mmc))
4160 clk_disable_unprepare(host->hclk_mmc);
4162 EXPORT_SYMBOL(dw_mci_remove);
4166 #ifdef CONFIG_PM_SLEEP
4168 * TODO: we should probably disable the clock to the card in the suspend path.
4170 extern int get_wifi_chip_type(void);
4171 int dw_mci_suspend(struct dw_mci *host)
4173 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4174 (get_wifi_chip_type() == WIFI_ESP8089))
4178 regulator_disable(host->vmmc);
4180 /*only for sdmmc controller*/
4181 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4182 disable_irq(host->irq);
4183 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4184 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4185 mmc_hostname(host->mmc));
4187 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4188 mci_writel(host, INTMASK, 0x00);
4189 mci_writel(host, CTRL, 0x00);
4191 /* Soc rk3126 already in gpio_cd mode */
4192 if (!(cpu_is_rk312x() && soc_is_rk3126())) {
4193 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4194 enable_irq_wake(host->mmc->slot.cd_irq);
4199 EXPORT_SYMBOL(dw_mci_suspend);
4201 int dw_mci_resume(struct dw_mci *host)
4203 int i, ret, retry_cnt = 0;
4205 struct dw_mci_slot *slot;
4207 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4208 (get_wifi_chip_type() == WIFI_ESP8089))
4213 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4214 slot = mmc_priv(host->mmc);
4215 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4219 /*only for sdmmc controller*/
4220 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4221 /* Soc rk3126 already in gpio_cd mode */
4222 if (!(cpu_is_rk312x() && soc_is_rk3126())) {
4223 disable_irq_wake(host->mmc->slot.cd_irq);
4224 mmc_gpio_free_cd(host->mmc);
4226 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4227 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4228 mmc_hostname(host->mmc));
4232 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4233 else if(cpu_is_rk3036())
4234 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4235 else if(cpu_is_rk312x())
4236 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4237 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4240 ret = regulator_enable(host->vmmc);
4243 "failed to enable regulator: %d\n", ret);
4248 if(!dw_mci_ctrl_all_reset(host)){
4253 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4254 if(host->use_dma && host->dma_ops->init)
4255 host->dma_ops->init(host);
4258 * Restore the initial value at FIFOTH register
4259 * And Invalidate the prev_blksz with zero
4261 mci_writel(host, FIFOTH, host->fifoth_val);
4262 host->prev_blksz = 0;
4263 /* Put in max timeout */
4264 mci_writel(host, TMOUT, 0xFFFFFFFF);
4266 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4267 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4269 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4270 regs |= SDMMC_INT_CD;
4271 mci_writel(host, INTMASK, regs);
4272 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4273 /*only for sdmmc controller*/
4274 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4275 enable_irq(host->irq);
4278 for(i = 0; i < host->num_slots; i++){
4279 struct dw_mci_slot *slot = host->slot[i];
4282 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4283 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4284 dw_mci_setup_bus(slot, true);
4290 EXPORT_SYMBOL(dw_mci_resume);
4291 #endif /* CONFIG_PM_SLEEP */
4293 static int __init dw_mci_init(void)
4295 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4299 static void __exit dw_mci_exit(void)
4303 module_init(dw_mci_init);
4304 module_exit(dw_mci_exit);
4306 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4307 MODULE_AUTHOR("NXP Semiconductor VietNam");
4308 MODULE_AUTHOR("Imagination Technologies Ltd");
4309 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4310 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4311 MODULE_LICENSE("GPL v2");