2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
50 #include "rk_sdmmc_dbg.h"
51 #include <linux/regulator/rockchip_io_vol_domain.h>
52 #include "../../clk/rockchip/clk-ops.h"
54 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
56 /* Common flag combinations */
57 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
58 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
60 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
62 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
63 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
64 #define DW_MCI_SEND_STATUS 1
65 #define DW_MCI_RECV_STATUS 2
66 #define DW_MCI_DMA_THRESHOLD 16
68 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
69 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
71 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
72 #define SDMMC_DATA_TIMEOUT_SD 500
73 #define SDMMC_DATA_TIMEOUT_SDIO 250
74 #define SDMMC_DATA_TIMEOUT_EMMC 2500
76 #define SDMMC_CMD_RTO_MAX_HOLD 200
77 #define SDMMC_WAIT_FOR_UNBUSY 2500
79 #ifdef CONFIG_MMC_DW_IDMAC
80 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
81 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
82 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
86 u32 des0; /* Control Descriptor */
87 #define IDMAC_DES0_DIC BIT(1)
88 #define IDMAC_DES0_LD BIT(2)
89 #define IDMAC_DES0_FD BIT(3)
90 #define IDMAC_DES0_CH BIT(4)
91 #define IDMAC_DES0_ER BIT(5)
92 #define IDMAC_DES0_CES BIT(30)
93 #define IDMAC_DES0_OWN BIT(31)
95 u32 des1; /* Buffer sizes */
96 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
97 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
99 u32 des2; /* buffer 1 physical address */
101 u32 des3; /* buffer 2 physical address */
103 #endif /* CONFIG_MMC_DW_IDMAC */
105 static const u8 tuning_blk_pattern_4bit[] = {
106 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
107 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
108 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
109 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
110 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
111 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
112 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
113 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
116 static const u8 tuning_blk_pattern_8bit[] = {
117 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
118 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
119 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
120 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
121 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
122 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
123 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
124 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
125 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
126 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
127 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
128 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
129 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
130 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
131 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
132 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
135 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
136 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
137 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
138 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
140 /*printk the all register of current host*/
142 static int dw_mci_regs_printk(struct dw_mci *host)
144 struct sdmmc_reg *regs = dw_mci_regs;
146 while( regs->name != 0 ){
147 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
150 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
155 #if defined(CONFIG_DEBUG_FS)
156 static int dw_mci_req_show(struct seq_file *s, void *v)
158 struct dw_mci_slot *slot = s->private;
159 struct mmc_request *mrq;
160 struct mmc_command *cmd;
161 struct mmc_command *stop;
162 struct mmc_data *data;
164 /* Make sure we get a consistent snapshot */
165 spin_lock_bh(&slot->host->lock);
175 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
176 cmd->opcode, cmd->arg, cmd->flags,
177 cmd->resp[0], cmd->resp[1], cmd->resp[2],
178 cmd->resp[2], cmd->error);
180 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
181 data->bytes_xfered, data->blocks,
182 data->blksz, data->flags, data->error);
185 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
186 stop->opcode, stop->arg, stop->flags,
187 stop->resp[0], stop->resp[1], stop->resp[2],
188 stop->resp[2], stop->error);
191 spin_unlock_bh(&slot->host->lock);
196 static int dw_mci_req_open(struct inode *inode, struct file *file)
198 return single_open(file, dw_mci_req_show, inode->i_private);
201 static const struct file_operations dw_mci_req_fops = {
202 .owner = THIS_MODULE,
203 .open = dw_mci_req_open,
206 .release = single_release,
209 static int dw_mci_regs_show(struct seq_file *s, void *v)
211 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
212 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
213 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
214 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
215 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
216 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
221 static int dw_mci_regs_open(struct inode *inode, struct file *file)
223 return single_open(file, dw_mci_regs_show, inode->i_private);
226 static const struct file_operations dw_mci_regs_fops = {
227 .owner = THIS_MODULE,
228 .open = dw_mci_regs_open,
231 .release = single_release,
234 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
236 struct mmc_host *mmc = slot->mmc;
237 struct dw_mci *host = slot->host;
241 root = mmc->debugfs_root;
245 node = debugfs_create_file("regs", S_IRUSR, root, host,
250 node = debugfs_create_file("req", S_IRUSR, root, slot,
255 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
259 node = debugfs_create_x32("pending_events", S_IRUSR, root,
260 (u32 *)&host->pending_events);
264 node = debugfs_create_x32("completed_events", S_IRUSR, root,
265 (u32 *)&host->completed_events);
272 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
274 #endif /* defined(CONFIG_DEBUG_FS) */
276 static void dw_mci_set_timeout(struct dw_mci *host)
278 /* timeout (maximum) */
279 mci_writel(host, TMOUT, 0xffffffff);
282 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
284 struct mmc_data *data;
285 struct dw_mci_slot *slot = mmc_priv(mmc);
286 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
288 cmd->error = -EINPROGRESS;
292 if (cmdr == MMC_STOP_TRANSMISSION)
293 cmdr |= SDMMC_CMD_STOP;
295 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
297 if (cmd->flags & MMC_RSP_PRESENT) {
298 /* We expect a response, so set this bit */
299 cmdr |= SDMMC_CMD_RESP_EXP;
300 if (cmd->flags & MMC_RSP_136)
301 cmdr |= SDMMC_CMD_RESP_LONG;
304 if (cmd->flags & MMC_RSP_CRC)
305 cmdr |= SDMMC_CMD_RESP_CRC;
309 cmdr |= SDMMC_CMD_DAT_EXP;
310 if (data->flags & MMC_DATA_STREAM)
311 cmdr |= SDMMC_CMD_STRM_MODE;
312 if (data->flags & MMC_DATA_WRITE)
313 cmdr |= SDMMC_CMD_DAT_WR;
316 if (drv_data && drv_data->prepare_command)
317 drv_data->prepare_command(slot->host, &cmdr);
323 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
325 struct mmc_command *stop;
331 stop = &host->stop_abort;
333 memset(stop, 0, sizeof(struct mmc_command));
335 if (cmdr == MMC_READ_SINGLE_BLOCK ||
336 cmdr == MMC_READ_MULTIPLE_BLOCK ||
337 cmdr == MMC_WRITE_BLOCK ||
338 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
339 stop->opcode = MMC_STOP_TRANSMISSION;
341 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
342 } else if (cmdr == SD_IO_RW_EXTENDED) {
343 stop->opcode = SD_IO_RW_DIRECT;
344 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
345 ((cmd->arg >> 28) & 0x7);
346 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
351 cmdr = stop->opcode | SDMMC_CMD_STOP |
352 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
357 static void dw_mci_start_command(struct dw_mci *host,
358 struct mmc_command *cmd, u32 cmd_flags)
360 struct dw_mci_slot *slot = host->slot[0];
361 /*temporality fix slot[0] due to host->num_slots equal to 1*/
363 host->pre_cmd = host->cmd;
366 "start command: ARGR=0x%08x CMDR=0x%08x\n",
367 cmd->arg, cmd_flags);
369 if(SD_SWITCH_VOLTAGE == cmd->opcode){
370 /*confirm non-low-power mode*/
371 mci_writel(host, CMDARG, 0);
372 dw_mci_disable_low_power(slot);
374 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
375 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
377 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
380 mci_writel(host, CMDARG, cmd->arg);
383 /* fix the value to 1 in some Soc,for example RK3188. */
384 if(host->mmc->hold_reg_flag)
385 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
387 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
391 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
393 dw_mci_start_command(host, data->stop, host->stop_cmdr);
396 /* DMA interface functions */
397 static void dw_mci_stop_dma(struct dw_mci *host)
399 if (host->using_dma) {
400 /* Fixme: No need to terminate edma, may cause flush op */
401 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
402 host->dma_ops->stop(host);
403 host->dma_ops->cleanup(host);
406 /* Data transfer was stopped by the interrupt handler */
407 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
410 static int dw_mci_get_dma_dir(struct mmc_data *data)
412 if (data->flags & MMC_DATA_WRITE)
413 return DMA_TO_DEVICE;
415 return DMA_FROM_DEVICE;
418 #ifdef CONFIG_MMC_DW_IDMAC
419 static void dw_mci_dma_cleanup(struct dw_mci *host)
421 struct mmc_data *data = host->data;
424 if (!data->host_cookie)
425 dma_unmap_sg(host->dev,
428 dw_mci_get_dma_dir(data));
431 static void dw_mci_idmac_reset(struct dw_mci *host)
433 u32 bmod = mci_readl(host, BMOD);
434 /* Software reset of DMA */
435 bmod |= SDMMC_IDMAC_SWRESET;
436 mci_writel(host, BMOD, bmod);
439 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
443 /* Disable and reset the IDMAC interface */
444 temp = mci_readl(host, CTRL);
445 temp &= ~SDMMC_CTRL_USE_IDMAC;
446 temp |= SDMMC_CTRL_DMA_RESET;
447 mci_writel(host, CTRL, temp);
449 /* Stop the IDMAC running */
450 temp = mci_readl(host, BMOD);
451 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
452 temp |= SDMMC_IDMAC_SWRESET;
453 mci_writel(host, BMOD, temp);
456 static void dw_mci_idmac_complete_dma(void *arg)
458 struct dw_mci *host = arg;
459 struct mmc_data *data = host->data;
461 dev_vdbg(host->dev, "DMA complete\n");
464 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
465 host->mrq->cmd->opcode,host->mrq->cmd->arg,
466 data->blocks,data->blksz,mmc_hostname(host->mmc));
469 host->dma_ops->cleanup(host);
472 * If the card was removed, data will be NULL. No point in trying to
473 * send the stop command or waiting for NBUSY in this case.
476 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
477 tasklet_schedule(&host->tasklet);
481 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
485 struct idmac_desc *desc = host->sg_cpu;
487 for (i = 0; i < sg_len; i++, desc++) {
488 unsigned int length = sg_dma_len(&data->sg[i]);
489 u32 mem_addr = sg_dma_address(&data->sg[i]);
491 /* Set the OWN bit and disable interrupts for this descriptor */
492 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
495 IDMAC_SET_BUFFER1_SIZE(desc, length);
497 /* Physical address to DMA to/from */
498 desc->des2 = mem_addr;
501 /* Set first descriptor */
503 desc->des0 |= IDMAC_DES0_FD;
505 /* Set last descriptor */
506 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
507 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
508 desc->des0 |= IDMAC_DES0_LD;
513 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
517 dw_mci_translate_sglist(host, host->data, sg_len);
519 /* Select IDMAC interface */
520 temp = mci_readl(host, CTRL);
521 temp |= SDMMC_CTRL_USE_IDMAC;
522 mci_writel(host, CTRL, temp);
526 /* Enable the IDMAC */
527 temp = mci_readl(host, BMOD);
528 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
529 mci_writel(host, BMOD, temp);
531 /* Start it running */
532 mci_writel(host, PLDMND, 1);
535 static int dw_mci_idmac_init(struct dw_mci *host)
537 struct idmac_desc *p;
540 /* Number of descriptors in the ring buffer */
541 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
543 /* Forward link the descriptor list */
544 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
545 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
547 /* Set the last descriptor as the end-of-ring descriptor */
548 p->des3 = host->sg_dma;
549 p->des0 = IDMAC_DES0_ER;
551 dw_mci_idmac_reset(host);
553 /* Mask out interrupts - get Tx & Rx complete only */
554 mci_writel(host, IDSTS, IDMAC_INT_CLR);
555 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
558 /* Set the descriptor base address */
559 mci_writel(host, DBADDR, host->sg_dma);
563 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
564 .init = dw_mci_idmac_init,
565 .start = dw_mci_idmac_start_dma,
566 .stop = dw_mci_idmac_stop_dma,
567 .complete = dw_mci_idmac_complete_dma,
568 .cleanup = dw_mci_dma_cleanup,
572 static void dw_mci_edma_cleanup(struct dw_mci *host)
574 struct mmc_data *data = host->data;
577 if (!data->host_cookie)
578 dma_unmap_sg(host->dev,
579 data->sg, data->sg_len,
580 dw_mci_get_dma_dir(data));
583 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
585 dmaengine_terminate_all(host->dms->ch);
588 static void dw_mci_edmac_complete_dma(void *arg)
590 struct dw_mci *host = arg;
591 struct mmc_data *data = host->data;
593 dev_vdbg(host->dev, "DMA complete\n");
596 if(data->flags & MMC_DATA_READ)
597 /* Invalidate cache after read */
598 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
599 data->sg_len, DMA_FROM_DEVICE);
601 host->dma_ops->cleanup(host);
604 * If the card was removed, data will be NULL. No point in trying to
605 * send the stop command or waiting for NBUSY in this case.
608 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
609 tasklet_schedule(&host->tasklet);
613 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
615 struct dma_slave_config slave_config;
616 struct dma_async_tx_descriptor *desc = NULL;
617 struct scatterlist *sgl = host->data->sg;
618 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
619 u32 sg_elems = host->data->sg_len;
620 u32 fifoth_val, mburst;
623 /* Set external dma config: burst size, burst width*/
624 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
625 slave_config.src_addr = slave_config.dst_addr;
626 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
627 slave_config.src_addr_width = slave_config.dst_addr_width;
629 /* Match FIFO dma burst MSIZE with external dma config*/
630 fifoth_val = mci_readl(host, FIFOTH);
631 mburst = mszs[(fifoth_val >> 28) & 0x7];
633 /* edmac limit burst to 16 */
634 slave_config.dst_maxburst = (mburst > 16) ? 16 : mburst;
635 slave_config.src_maxburst = slave_config.dst_maxburst;
637 if(host->data->flags & MMC_DATA_WRITE){
638 slave_config.direction = DMA_MEM_TO_DEV;
639 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
641 dev_err(host->dev, "error in dw_mci edma configuration.\n");
645 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
646 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
648 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
651 /* Set dw_mci_edmac_complete_dma as callback */
652 desc->callback = dw_mci_edmac_complete_dma;
653 desc->callback_param = (void *)host;
654 dmaengine_submit(desc);
656 /* Flush cache before write */
657 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
658 sg_elems, DMA_TO_DEVICE);
659 dma_async_issue_pending(host->dms->ch);
662 slave_config.direction = DMA_DEV_TO_MEM;
663 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
665 dev_err(host->dev, "error in dw_mci edma configuration.\n");
668 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
669 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
671 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
674 /* set dw_mci_edmac_complete_dma as callback */
675 desc->callback = dw_mci_edmac_complete_dma;
676 desc->callback_param = (void *)host;
677 dmaengine_submit(desc);
678 dma_async_issue_pending(host->dms->ch);
682 static int dw_mci_edmac_init(struct dw_mci *host)
684 /* Request external dma channel, SHOULD decide chn in dts */
686 host->dms = (struct dw_mci_dma_slave *)kmalloc
687 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
688 if (NULL == host->dms) {
689 dev_err(host->dev, "No enough memory to alloc dms.\n");
693 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
694 if (!host->dms->ch) {
695 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
696 host->dms->ch->chan_id);
703 if (NULL != host->dms) {
711 static void dw_mci_edmac_exit(struct dw_mci *host)
713 if (NULL != host->dms) {
714 if (NULL != host->dms->ch) {
715 dma_release_channel(host->dms->ch);
716 host->dms->ch = NULL;
723 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
724 .init = dw_mci_edmac_init,
725 .exit = dw_mci_edmac_exit,
726 .start = dw_mci_edmac_start_dma,
727 .stop = dw_mci_edmac_stop_dma,
728 .complete = dw_mci_edmac_complete_dma,
729 .cleanup = dw_mci_edma_cleanup,
731 #endif /* CONFIG_MMC_DW_IDMAC */
733 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
734 struct mmc_data *data,
737 struct scatterlist *sg;
738 unsigned int i, sg_len;
740 if (!next && data->host_cookie)
741 return data->host_cookie;
744 * We don't do DMA on "complex" transfers, i.e. with
745 * non-word-aligned buffers or lengths. Also, we don't bother
746 * with all the DMA setup overhead for short transfers.
748 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
754 for_each_sg(data->sg, sg, data->sg_len, i) {
755 if (sg->offset & 3 || sg->length & 3)
759 sg_len = dma_map_sg(host->dev,
762 dw_mci_get_dma_dir(data));
767 data->host_cookie = sg_len;
772 static void dw_mci_pre_req(struct mmc_host *mmc,
773 struct mmc_request *mrq,
776 struct dw_mci_slot *slot = mmc_priv(mmc);
777 struct mmc_data *data = mrq->data;
779 if (!slot->host->use_dma || !data)
782 if (data->host_cookie) {
783 data->host_cookie = 0;
787 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
788 data->host_cookie = 0;
791 static void dw_mci_post_req(struct mmc_host *mmc,
792 struct mmc_request *mrq,
795 struct dw_mci_slot *slot = mmc_priv(mmc);
796 struct mmc_data *data = mrq->data;
798 if (!slot->host->use_dma || !data)
801 if (data->host_cookie)
802 dma_unmap_sg(slot->host->dev,
805 dw_mci_get_dma_dir(data));
806 data->host_cookie = 0;
809 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
811 #ifdef CONFIG_MMC_DW_IDMAC
812 unsigned int blksz = data->blksz;
813 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
814 u32 fifo_width = 1 << host->data_shift;
815 u32 blksz_depth = blksz / fifo_width, fifoth_val;
816 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
817 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
819 tx_wmark = (host->fifo_depth) / 2;
820 tx_wmark_invers = host->fifo_depth - tx_wmark;
824 * if blksz is not a multiple of the FIFO width
826 if (blksz % fifo_width) {
833 if (!((blksz_depth % mszs[idx]) ||
834 (tx_wmark_invers % mszs[idx]))) {
836 rx_wmark = mszs[idx] - 1;
841 * If idx is '0', it won't be tried
842 * Thus, initial values are uesed
845 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
846 mci_writel(host, FIFOTH, fifoth_val);
851 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
853 unsigned int blksz = data->blksz;
854 u32 blksz_depth, fifo_depth;
857 WARN_ON(!(data->flags & MMC_DATA_READ));
859 if (host->timing != MMC_TIMING_MMC_HS200 &&
860 host->timing != MMC_TIMING_UHS_SDR104)
863 blksz_depth = blksz / (1 << host->data_shift);
864 fifo_depth = host->fifo_depth;
866 if (blksz_depth > fifo_depth)
870 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
871 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
872 * Currently just choose blksz.
875 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
879 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
882 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
890 /* If we don't have a channel, we can't do DMA */
894 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
896 /* Fixme: No need terminate edma, may cause flush op */
897 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
898 host->dma_ops->stop(host);
905 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
906 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
910 * Decide the MSIZE and RX/TX Watermark.
911 * If current block size is same with previous size,
912 * no need to update fifoth.
914 if (host->prev_blksz != data->blksz)
915 dw_mci_adjust_fifoth(host, data);
918 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
920 /* Enable the DMA interface */
921 temp = mci_readl(host, CTRL);
922 temp |= SDMMC_CTRL_DMA_ENABLE;
923 mci_writel(host, CTRL, temp);
925 /* Disable RX/TX IRQs, let DMA handle it */
926 spin_lock_irqsave(&host->slock, flags);
927 temp = mci_readl(host, INTMASK);
928 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
929 mci_writel(host, INTMASK, temp);
930 spin_unlock_irqrestore(&host->slock, flags);
932 host->dma_ops->start(host, sg_len);
937 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
942 data->error = -EINPROGRESS;
949 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
951 if (data->flags & MMC_DATA_READ) {
952 host->dir_status = DW_MCI_RECV_STATUS;
953 dw_mci_ctrl_rd_thld(host, data);
955 host->dir_status = DW_MCI_SEND_STATUS;
958 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
959 data->blocks, data->blksz, mmc_hostname(host->mmc));
961 if (dw_mci_submit_data_dma(host, data)) {
962 int flags = SG_MITER_ATOMIC;
963 if (host->data->flags & MMC_DATA_READ)
964 flags |= SG_MITER_TO_SG;
966 flags |= SG_MITER_FROM_SG;
968 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
970 host->part_buf_start = 0;
971 host->part_buf_count = 0;
973 spin_lock_irqsave(&host->slock, flag);
974 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
975 temp = mci_readl(host, INTMASK);
976 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
977 mci_writel(host, INTMASK, temp);
978 spin_unlock_irqrestore(&host->slock, flag);
980 temp = mci_readl(host, CTRL);
981 temp &= ~SDMMC_CTRL_DMA_ENABLE;
982 mci_writel(host, CTRL, temp);
985 * Use the initial fifoth_val for PIO mode.
986 * If next issued data may be transfered by DMA mode,
987 * prev_blksz should be invalidated.
989 mci_writel(host, FIFOTH, host->fifoth_val);
990 host->prev_blksz = 0;
993 * Keep the current block size.
994 * It will be used to decide whether to update
995 * fifoth register next time.
997 host->prev_blksz = data->blksz;
1001 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1003 struct dw_mci *host = slot->host;
1004 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1005 unsigned int cmd_status = 0;
1006 #ifdef SDMMC_WAIT_FOR_UNBUSY
1008 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1010 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1012 ret = time_before(jiffies, timeout);
1013 cmd_status = mci_readl(host, STATUS);
1014 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1018 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1019 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1022 mci_writel(host, CMDARG, arg);
1024 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1025 if(cmd & SDMMC_CMD_UPD_CLK)
1026 timeout = jiffies + msecs_to_jiffies(50);
1028 timeout = jiffies + msecs_to_jiffies(500);
1029 while (time_before(jiffies, timeout)) {
1030 cmd_status = mci_readl(host, CMD);
1031 if (!(cmd_status & SDMMC_CMD_START))
1034 dev_err(&slot->mmc->class_dev,
1035 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1036 cmd, arg, cmd_status);
1039 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1041 struct dw_mci *host = slot->host;
1042 unsigned int tempck,clock = slot->clock;
1047 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1048 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1051 mci_writel(host, CLKENA, 0);
1052 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1053 if(host->svi_flags == 0)
1054 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1056 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1058 } else if (clock != host->current_speed || force_clkinit) {
1059 div = host->bus_hz / clock;
1060 if (host->bus_hz % clock && host->bus_hz > clock)
1062 * move the + 1 after the divide to prevent
1063 * over-clocking the card.
1067 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1069 if ((clock << div) != slot->__clk_old || force_clkinit) {
1070 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1071 dev_info(&slot->mmc->class_dev,
1072 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1073 slot->id, host->bus_hz, clock,
1076 host->set_speed = tempck;
1077 host->set_div = div;
1081 mci_writel(host, CLKENA, 0);
1082 mci_writel(host, CLKSRC, 0);
1086 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1088 if(clock <= 400*1000){
1089 MMC_DBG_BOOT_FUNC(host->mmc,
1090 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1091 clock * 2, mmc_hostname(host->mmc));
1092 /* clk_mmc will change parents to 24MHz xtal*/
1093 clk_set_rate(host->clk_mmc, clock * 2);
1096 host->set_div = div;
1100 MMC_DBG_BOOT_FUNC(host->mmc,
1101 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1102 mmc_hostname(host->mmc));
1105 MMC_DBG_ERR_FUNC(host->mmc,
1106 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1107 mmc_hostname(host->mmc));
1109 host->set_div = div;
1110 host->bus_hz = host->set_speed * 2;
1111 MMC_DBG_BOOT_FUNC(host->mmc,
1112 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1113 div, host->bus_hz, mmc_hostname(host->mmc));
1115 /* BUG may be here, come on, Linux BSP engineer looks!
1116 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1117 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1118 some oops happened like that:
1119 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1120 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1121 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1122 mmc0: new high speed DDR MMC card at address 0001
1123 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1125 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1126 mmcblk0: retrying using single block read
1127 mmcblk0: error -110 sending status command, retrying
1129 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1132 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1133 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1136 host->set_div = div;
1137 host->bus_hz = host->set_speed * 2;
1138 MMC_DBG_BOOT_FUNC(host->mmc,
1139 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1140 div, host->bus_hz, mmc_hostname(host->mmc));
1143 if (host->verid < DW_MMC_240A)
1144 clk_set_rate(host->clk_mmc,(host->bus_hz));
1146 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1152 /* set clock to desired speed */
1153 mci_writel(host, CLKDIV, div);
1157 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1159 /* enable clock; only low power if no SDIO */
1160 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1162 if (host->verid < DW_MMC_240A)
1163 sdio_int = SDMMC_INT_SDIO(slot->id);
1165 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1167 if (!(mci_readl(host, INTMASK) & sdio_int))
1168 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1169 mci_writel(host, CLKENA, clk_en_a);
1173 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1174 /* keep the clock with reflecting clock dividor */
1175 slot->__clk_old = clock << div;
1178 host->current_speed = clock;
1180 if(slot->ctype != slot->pre_ctype)
1181 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1183 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1184 mmc_hostname(host->mmc));
1185 slot->pre_ctype = slot->ctype;
1187 /* Set the current slot bus width */
1188 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1191 extern struct mmc_card *this_card;
1192 static void dw_mci_wait_unbusy(struct dw_mci *host)
1195 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1196 unsigned long time_loop;
1197 unsigned int status;
1200 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1202 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1203 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1204 /* Special care for (secure)erase timeout calculation */
1206 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1209 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1210 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1211 300000 * (this_card->ext_csd.sec_erase_mult)) :
1212 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1216 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1217 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1218 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1219 timeout = SDMMC_DATA_TIMEOUT_SD;
1222 time_loop = jiffies + msecs_to_jiffies(timeout);
1224 status = mci_readl(host, STATUS);
1225 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1227 } while (time_before(jiffies, time_loop));
1232 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1235 * 0--status is busy.
1236 * 1--status is unbusy.
1238 int dw_mci_card_busy(struct mmc_host *mmc)
1240 struct dw_mci_slot *slot = mmc_priv(mmc);
1241 struct dw_mci *host = slot->host;
1243 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1244 host->svi_flags, mmc_hostname(host->mmc));
1247 if(host->svi_flags == 0){
1249 host->svi_flags = 1;
1250 return host->svi_flags;
1253 host->svi_flags = 0;
1254 return host->svi_flags;
1260 static void __dw_mci_start_request(struct dw_mci *host,
1261 struct dw_mci_slot *slot,
1262 struct mmc_command *cmd)
1264 struct mmc_request *mrq;
1265 struct mmc_data *data;
1269 if (host->pdata->select_slot)
1270 host->pdata->select_slot(slot->id);
1272 host->cur_slot = slot;
1275 dw_mci_wait_unbusy(host);
1277 host->pending_events = 0;
1278 host->completed_events = 0;
1279 host->data_status = 0;
1283 dw_mci_set_timeout(host);
1284 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1285 mci_writel(host, BLKSIZ, data->blksz);
1288 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1290 /* this is the first command, send the initialization clock */
1291 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1292 cmdflags |= SDMMC_CMD_INIT;
1295 dw_mci_submit_data(host, data);
1299 dw_mci_start_command(host, cmd, cmdflags);
1302 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1305 static void dw_mci_start_request(struct dw_mci *host,
1306 struct dw_mci_slot *slot)
1308 struct mmc_request *mrq = slot->mrq;
1309 struct mmc_command *cmd;
1311 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1312 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1314 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1315 __dw_mci_start_request(host, slot, cmd);
1318 /* must be called with host->lock held */
1319 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1320 struct mmc_request *mrq)
1322 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1327 if (host->state == STATE_IDLE) {
1328 host->state = STATE_SENDING_CMD;
1329 dw_mci_start_request(host, slot);
1331 list_add_tail(&slot->queue_node, &host->queue);
1335 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1337 struct dw_mci_slot *slot = mmc_priv(mmc);
1338 struct dw_mci *host = slot->host;
1343 * The check for card presence and queueing of the request must be
1344 * atomic, otherwise the card could be removed in between and the
1345 * request wouldn't fail until another card was inserted.
1347 spin_lock_bh(&host->lock);
1349 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1350 spin_unlock_bh(&host->lock);
1351 mrq->cmd->error = -ENOMEDIUM;
1352 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1353 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1355 mmc_request_done(mmc, mrq);
1359 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1360 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1362 dw_mci_queue_request(host, slot, mrq);
1364 spin_unlock_bh(&host->lock);
1367 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1369 struct dw_mci_slot *slot = mmc_priv(mmc);
1370 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1371 struct dw_mci *host = slot->host;
1373 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1376 #ifdef SDMMC_WAIT_FOR_UNBUSY
1377 unsigned long time_loop;
1380 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1381 if(host->svi_flags == 1)
1382 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1384 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1386 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1389 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1390 printk("%d..%s: no card. [%s]\n", \
1391 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1396 ret = time_before(jiffies, time_loop);
1397 regs = mci_readl(slot->host, STATUS);
1398 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1404 printk("slot->flags = %lu ", slot->flags);
1405 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1406 if(host->svi_flags != 1)
1409 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1410 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1414 switch (ios->bus_width) {
1415 case MMC_BUS_WIDTH_4:
1416 slot->ctype = SDMMC_CTYPE_4BIT;
1418 case MMC_BUS_WIDTH_8:
1419 slot->ctype = SDMMC_CTYPE_8BIT;
1422 /* set default 1 bit mode */
1423 slot->ctype = SDMMC_CTYPE_1BIT;
1424 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1427 regs = mci_readl(slot->host, UHS_REG);
1430 if (ios->timing == MMC_TIMING_UHS_DDR50)
1431 regs |= ((0x1 << slot->id) << 16);
1433 regs &= ~((0x1 << slot->id) << 16);
1435 mci_writel(slot->host, UHS_REG, regs);
1436 slot->host->timing = ios->timing;
1439 * Use mirror of ios->clock to prevent race with mmc
1440 * core ios update when finding the minimum.
1442 slot->clock = ios->clock;
1444 if (drv_data && drv_data->set_ios)
1445 drv_data->set_ios(slot->host, ios);
1447 /* Slot specific timing and width adjustment */
1448 dw_mci_setup_bus(slot, false);
1452 switch (ios->power_mode) {
1454 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1456 if (slot->host->pdata->setpower)
1457 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1458 regs = mci_readl(slot->host, PWREN);
1459 regs |= (1 << slot->id);
1460 mci_writel(slot->host, PWREN, regs);
1463 /* Power down slot */
1464 if(slot->host->pdata->setpower)
1465 slot->host->pdata->setpower(slot->id, 0);
1466 regs = mci_readl(slot->host, PWREN);
1467 regs &= ~(1 << slot->id);
1468 mci_writel(slot->host, PWREN, regs);
1475 static int dw_mci_get_ro(struct mmc_host *mmc)
1478 struct dw_mci_slot *slot = mmc_priv(mmc);
1479 struct dw_mci_board *brd = slot->host->pdata;
1481 /* Use platform get_ro function, else try on board write protect */
1482 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1484 else if(brd->get_ro)
1485 read_only = brd->get_ro(slot->id);
1486 else if(gpio_is_valid(slot->wp_gpio))
1487 read_only = gpio_get_value(slot->wp_gpio);
1490 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1492 dev_dbg(&mmc->class_dev, "card is %s\n",
1493 read_only ? "read-only" : "read-write");
1498 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1500 struct dw_mci_slot *slot = mmc_priv(mmc);
1501 struct dw_mci *host = slot->host;
1502 /*struct dw_mci_board *brd = slot->host->pdata;*/
1504 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1507 spin_lock_bh(&host->lock);
1510 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1512 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1514 spin_unlock_bh(&host->lock);
1516 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1517 if(__clk_is_enabled(host->hclk_mmc) == false)
1518 clk_prepare_enable(host->hclk_mmc);
1519 if(__clk_is_enabled(host->clk_mmc) == false)
1520 clk_prepare_enable(host->clk_mmc);
1522 if(__clk_is_enabled(host->clk_mmc) == true)
1523 clk_disable_unprepare(slot->host->clk_mmc);
1524 if(__clk_is_enabled(host->hclk_mmc) == true)
1525 clk_disable_unprepare(slot->host->hclk_mmc);
1528 mmc_detect_change(slot->mmc, 20);
1534 static int dw_mci_get_cd(struct mmc_host *mmc)
1537 struct dw_mci_slot *slot = mmc_priv(mmc);
1538 struct dw_mci_board *brd = slot->host->pdata;
1539 struct dw_mci *host = slot->host;
1540 int gpio_cd = mmc_gpio_get_cd(mmc);
1543 if ((soc_is_rk3126() || soc_is_rk3126b()) &&
1544 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1545 gpio_cd = slot->cd_gpio;
1546 if (gpio_is_valid(gpio_cd)) {
1547 gpio_val = gpio_get_value(gpio_cd);
1549 if (gpio_val == gpio_get_value(gpio_cd)) {
1550 gpio_cd = (gpio_val == 0 ? 1 : 0);
1552 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1553 grf_writel((0x1 << 24) | (1 << 8), RK312X_GRF_SOC_CON0);
1554 dw_mci_ctrl_all_reset(host);
1556 /* Really card detected: SHOULD disable force_jtag */
1557 grf_writel((0x1 << 24) | (0 << 8), RK312X_GRF_SOC_CON0);
1561 return slot->last_detect_state;
1564 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1568 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1569 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1571 /* Use platform get_cd function, else try onboard card detect */
1572 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1574 else if (brd->get_cd)
1575 present = !brd->get_cd(slot->id);
1576 else if (!IS_ERR_VALUE(gpio_cd))
1579 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1582 spin_lock_bh(&host->lock);
1584 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1585 dev_dbg(&mmc->class_dev, "card is present\n");
1587 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1588 dev_dbg(&mmc->class_dev, "card is not present\n");
1590 spin_unlock_bh(&host->lock);
1597 * Dts Should caps emmc controller with poll-hw-reset
1599 static void dw_mci_hw_reset(struct mmc_host *mmc)
1601 struct dw_mci_slot *slot = mmc_priv(mmc);
1602 struct dw_mci *host = slot->host;
1607 unsigned long timeout;
1610 /* (1) CMD12 to end any transfer in process */
1611 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1612 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1614 if(host->mmc->hold_reg_flag)
1615 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1616 mci_writel(host, CMDARG, 0);
1618 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1620 timeout = jiffies + msecs_to_jiffies(500);
1622 ret = time_before(jiffies, timeout);
1623 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1628 MMC_DBG_ERR_FUNC(host->mmc,
1629 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1630 __func__, mmc_hostname(host->mmc));
1632 /* (2) wait DTO, even if no response is sent back by card */
1634 timeout = jiffies + msecs_to_jiffies(5);
1636 ret = time_before(jiffies, timeout);
1637 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1638 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1644 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1646 /* Software reset - BMOD[0] for IDMA only */
1647 regs = mci_readl(host, BMOD);
1648 regs |= SDMMC_IDMAC_SWRESET;
1649 mci_writel(host, BMOD, regs);
1650 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1651 regs = mci_readl(host, BMOD);
1652 if(regs & SDMMC_IDMAC_SWRESET)
1653 MMC_DBG_WARN_FUNC(host->mmc,
1654 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1655 __func__, mmc_hostname(host->mmc));
1657 /* DMA reset - CTRL[2] */
1658 regs = mci_readl(host, CTRL);
1659 regs |= SDMMC_CTRL_DMA_RESET;
1660 mci_writel(host, CTRL, regs);
1661 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1662 regs = mci_readl(host, CTRL);
1663 if(regs & SDMMC_CTRL_DMA_RESET)
1664 MMC_DBG_WARN_FUNC(host->mmc,
1665 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1666 __func__, mmc_hostname(host->mmc));
1668 /* FIFO reset - CTRL[1] */
1669 regs = mci_readl(host, CTRL);
1670 regs |= SDMMC_CTRL_FIFO_RESET;
1671 mci_writel(host, CTRL, regs);
1672 mdelay(1); /* no timing limited, 1ms is random value */
1673 regs = mci_readl(host, CTRL);
1674 if(regs & SDMMC_CTRL_FIFO_RESET)
1675 MMC_DBG_WARN_FUNC(host->mmc,
1676 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1677 __func__, mmc_hostname(host->mmc));
1680 According to eMMC spec
1681 tRstW >= 1us ; RST_n pulse width
1682 tRSCA >= 200us ; RST_n to Command time
1683 tRSTH >= 1us ; RST_n high period
1685 mci_writel(slot->host, PWREN, 0x0);
1686 mci_writel(slot->host, RST_N, 0x0);
1688 udelay(10); /* 10us for bad quality eMMc. */
1690 mci_writel(slot->host, PWREN, 0x1);
1691 mci_writel(slot->host, RST_N, 0x1);
1693 usleep_range(500, 1000); /* at least 500(> 200us) */
1697 * Disable lower power mode.
1699 * Low power mode will stop the card clock when idle. According to the
1700 * description of the CLKENA register we should disable low power mode
1701 * for SDIO cards if we need SDIO interrupts to work.
1703 * This function is fast if low power mode is already disabled.
1705 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1707 struct dw_mci *host = slot->host;
1709 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1711 clk_en_a = mci_readl(host, CLKENA);
1713 if (clk_en_a & clken_low_pwr) {
1714 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1715 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1716 SDMMC_CMD_PRV_DAT_WAIT, 0);
1720 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1722 struct dw_mci_slot *slot = mmc_priv(mmc);
1723 struct dw_mci *host = slot->host;
1724 unsigned long flags;
1728 spin_lock_irqsave(&host->slock, flags);
1730 /* Enable/disable Slot Specific SDIO interrupt */
1731 int_mask = mci_readl(host, INTMASK);
1733 if (host->verid < DW_MMC_240A)
1734 sdio_int = SDMMC_INT_SDIO(slot->id);
1736 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1740 * Turn off low power mode if it was enabled. This is a bit of
1741 * a heavy operation and we disable / enable IRQs a lot, so
1742 * we'll leave low power mode disabled and it will get
1743 * re-enabled again in dw_mci_setup_bus().
1745 dw_mci_disable_low_power(slot);
1747 mci_writel(host, INTMASK,
1748 (int_mask | sdio_int));
1750 mci_writel(host, INTMASK,
1751 (int_mask & ~sdio_int));
1754 spin_unlock_irqrestore(&host->slock, flags);
1757 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1759 IO_DOMAIN_12 = 1200,
1760 IO_DOMAIN_18 = 1800,
1761 IO_DOMAIN_33 = 3300,
1763 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1773 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1774 __FUNCTION__, mmc_hostname(host->mmc));
1777 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1778 __FUNCTION__, mmc_hostname(host->mmc));
1782 if(cpu_is_rk3288()){
1783 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1784 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1788 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1789 __FUNCTION__, mmc_hostname(host->mmc));
1793 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1794 struct mmc_ios *ios)
1797 unsigned int value,uhs_reg;
1800 * Signal Voltage Switching is only applicable for Host Controllers
1803 if (host->verid < DW_MMC_240A)
1806 uhs_reg = mci_readl(host, UHS_REG);
1807 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1808 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1810 switch (ios->signal_voltage) {
1811 case MMC_SIGNAL_VOLTAGE_330:
1812 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1814 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1815 /* regulator_put(host->vmmc); //to be done in remove function. */
1817 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1818 __func__, regulator_get_voltage(host->vmmc), ret);
1820 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1821 " failed\n", mmc_hostname(host->mmc));
1824 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1826 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1827 __FUNCTION__, mmc_hostname(host->mmc));
1829 /* set High-power mode */
1830 value = mci_readl(host, CLKENA);
1831 value &= ~SDMMC_CLKEN_LOW_PWR;
1832 mci_writel(host,CLKENA , value);
1834 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1835 mci_writel(host,UHS_REG , uhs_reg);
1838 usleep_range(5000, 5500);
1840 /* 3.3V regulator output should be stable within 5 ms */
1841 uhs_reg = mci_readl(host, UHS_REG);
1842 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1845 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1846 mmc_hostname(host->mmc));
1849 case MMC_SIGNAL_VOLTAGE_180:
1851 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1852 /* regulator_put(host->vmmc);//to be done in remove function. */
1854 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1855 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1857 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1858 " failed\n", mmc_hostname(host->mmc));
1861 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1865 * Enable 1.8V Signal Enable in the Host Control2
1868 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1871 usleep_range(5000, 5500);
1872 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1873 __FUNCTION__,mmc_hostname(host->mmc));
1875 /* 1.8V regulator output should be stable within 5 ms */
1876 uhs_reg = mci_readl(host, UHS_REG);
1877 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1880 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1881 mmc_hostname(host->mmc));
1884 case MMC_SIGNAL_VOLTAGE_120:
1886 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1888 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1889 " failed\n", mmc_hostname(host->mmc));
1895 /* No signal voltage switch required */
1901 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1902 struct mmc_ios *ios)
1904 struct dw_mci_slot *slot = mmc_priv(mmc);
1905 struct dw_mci *host = slot->host;
1908 if (host->verid < DW_MMC_240A)
1911 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1917 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1919 struct dw_mci_slot *slot = mmc_priv(mmc);
1920 struct dw_mci *host = slot->host;
1921 const struct dw_mci_drv_data *drv_data = host->drv_data;
1922 struct dw_mci_tuning_data tuning_data;
1925 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1926 if(cpu_is_rk3036() || cpu_is_rk312x())
1929 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1930 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1931 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1932 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1933 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1934 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1935 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1939 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1940 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1941 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1944 "Undefined command(%d) for tuning\n", opcode);
1949 /* Recommend sample phase and delayline
1950 Fixme: Mix-use these three controllers will cause
1953 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1954 tuning_data.con_id = 3;
1955 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1956 tuning_data.con_id = 1;
1958 tuning_data.con_id = 0;
1960 /* 0: driver, from host->devices
1961 1: sample, from devices->host
1963 tuning_data.tuning_type = 1;
1965 if (drv_data && drv_data->execute_tuning)
1966 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1971 static const struct mmc_host_ops dw_mci_ops = {
1972 .request = dw_mci_request,
1973 .pre_req = dw_mci_pre_req,
1974 .post_req = dw_mci_post_req,
1975 .set_ios = dw_mci_set_ios,
1976 .get_ro = dw_mci_get_ro,
1977 .get_cd = dw_mci_get_cd,
1978 .set_sdio_status = dw_mci_set_sdio_status,
1979 .hw_reset = dw_mci_hw_reset,
1980 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1981 .execute_tuning = dw_mci_execute_tuning,
1982 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1983 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1984 .card_busy = dw_mci_card_busy,
1989 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1991 unsigned long flags;
1996 local_irq_save(flags);
1997 if(host->irq_state != irqflag)
1999 host->irq_state = irqflag;
2002 enable_irq(host->irq);
2006 disable_irq(host->irq);
2009 local_irq_restore(flags);
2013 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2014 __releases(&host->lock)
2015 __acquires(&host->lock)
2017 if(DW_MCI_SEND_STATUS == host->dir_status){
2019 if( MMC_BUS_TEST_W != host->cmd->opcode){
2020 if(host->data_status & SDMMC_INT_DCRC)
2021 host->data->error = -EILSEQ;
2022 else if(host->data_status & SDMMC_INT_EBE)
2023 host->data->error = -ETIMEDOUT;
2025 dw_mci_wait_unbusy(host);
2028 dw_mci_wait_unbusy(host);
2033 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2034 __releases(&host->lock)
2035 __acquires(&host->lock)
2037 struct dw_mci_slot *slot;
2038 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2040 WARN_ON(host->cmd || host->data);
2042 del_timer_sync(&host->dto_timer);
2043 dw_mci_deal_data_end(host, mrq);
2046 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2047 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2049 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2050 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2052 host->cur_slot->mrq = NULL;
2054 if (!list_empty(&host->queue)) {
2055 slot = list_entry(host->queue.next,
2056 struct dw_mci_slot, queue_node);
2057 list_del(&slot->queue_node);
2058 dev_vdbg(host->dev, "list not empty: %s is next\n",
2059 mmc_hostname(slot->mmc));
2060 host->state = STATE_SENDING_CMD;
2061 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2062 dw_mci_start_request(host, slot);
2064 dev_vdbg(host->dev, "list empty\n");
2065 host->state = STATE_IDLE;
2068 spin_unlock(&host->lock);
2069 mmc_request_done(prev_mmc, mrq);
2070 spin_lock(&host->lock);
2073 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2075 u32 status = host->cmd_status;
2077 host->cmd_status = 0;
2079 /* Read the response from the card (up to 16 bytes) */
2080 if (cmd->flags & MMC_RSP_PRESENT) {
2081 if (cmd->flags & MMC_RSP_136) {
2082 cmd->resp[3] = mci_readl(host, RESP0);
2083 cmd->resp[2] = mci_readl(host, RESP1);
2084 cmd->resp[1] = mci_readl(host, RESP2);
2085 cmd->resp[0] = mci_readl(host, RESP3);
2087 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2088 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2090 cmd->resp[0] = mci_readl(host, RESP0);
2094 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2095 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2099 if (status & SDMMC_INT_RTO)
2101 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2104 cmd->error = -ETIMEDOUT;
2105 del_timer_sync(&host->dto_timer);
2106 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2107 del_timer_sync(&host->dto_timer);
2108 cmd->error = -EILSEQ;
2109 }else if (status & SDMMC_INT_RESP_ERR){
2110 del_timer_sync(&host->dto_timer);
2115 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2116 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2119 del_timer_sync(&host->dto_timer);
2120 if(MMC_SEND_STATUS != cmd->opcode)
2121 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2122 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2123 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2127 /* newer ip versions need a delay between retries */
2128 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2134 static void dw_mci_tasklet_func(unsigned long priv)
2136 struct dw_mci *host = (struct dw_mci *)priv;
2137 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2138 struct mmc_data *data;
2139 struct mmc_command *cmd;
2140 enum dw_mci_state state;
2141 enum dw_mci_state prev_state;
2142 u32 status, cmd_flags;
2143 unsigned long timeout = 0;
2146 spin_lock(&host->lock);
2148 state = host->state;
2158 case STATE_SENDING_CMD:
2159 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2160 &host->pending_events))
2165 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2166 dw_mci_command_complete(host, cmd);
2167 if (cmd == host->mrq->sbc && !cmd->error) {
2168 prev_state = state = STATE_SENDING_CMD;
2169 __dw_mci_start_request(host, host->cur_slot,
2174 if (cmd->data && cmd->error) {
2175 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2176 dw_mci_stop_dma(host);
2179 send_stop_cmd(host, data);
2180 state = STATE_SENDING_STOP;
2186 send_stop_abort(host, data);
2187 state = STATE_SENDING_STOP;
2190 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2193 if (!host->mrq->data || cmd->error) {
2194 dw_mci_request_end(host, host->mrq);
2198 prev_state = state = STATE_SENDING_DATA;
2201 case STATE_SENDING_DATA:
2202 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2203 dw_mci_stop_dma(host);
2206 send_stop_cmd(host, data);
2208 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2209 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2210 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2212 mci_writel(host, CMDARG, 0);
2214 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2215 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2217 if(host->mmc->hold_reg_flag)
2218 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2220 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2222 timeout = jiffies + msecs_to_jiffies(500);
2225 ret = time_before(jiffies, timeout);
2226 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2230 MMC_DBG_ERR_FUNC(host->mmc,
2231 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2232 __func__, mmc_hostname(host->mmc));
2235 send_stop_abort(host, data);
2237 state = STATE_DATA_ERROR;
2241 MMC_DBG_CMD_FUNC(host->mmc,
2242 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2243 prev_state,state, mmc_hostname(host->mmc));
2245 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2246 &host->pending_events))
2248 MMC_DBG_INFO_FUNC(host->mmc,
2249 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2250 prev_state,state,mmc_hostname(host->mmc));
2252 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2253 prev_state = state = STATE_DATA_BUSY;
2256 case STATE_DATA_BUSY:
2257 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2258 &host->pending_events))
2261 dw_mci_deal_data_end(host, host->mrq);
2262 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2263 MMC_DBG_INFO_FUNC(host->mmc,
2264 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2265 prev_state,state,mmc_hostname(host->mmc));
2268 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2269 status = host->data_status;
2271 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2272 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2273 MMC_DBG_ERR_FUNC(host->mmc,
2274 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2275 prev_state,state, status, mmc_hostname(host->mmc));
2277 if (status & SDMMC_INT_DRTO) {
2278 data->error = -ETIMEDOUT;
2279 } else if (status & SDMMC_INT_DCRC) {
2280 data->error = -EILSEQ;
2281 } else if (status & SDMMC_INT_EBE &&
2282 host->dir_status == DW_MCI_SEND_STATUS){
2284 * No data CRC status was returned.
2285 * The number of bytes transferred will
2286 * be exaggerated in PIO mode.
2288 data->bytes_xfered = 0;
2289 data->error = -ETIMEDOUT;
2298 * After an error, there may be data lingering
2299 * in the FIFO, so reset it - doing so
2300 * generates a block interrupt, hence setting
2301 * the scatter-gather pointer to NULL.
2303 dw_mci_fifo_reset(host);
2305 data->bytes_xfered = data->blocks * data->blksz;
2310 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2311 prev_state,state,mmc_hostname(host->mmc));
2312 dw_mci_request_end(host, host->mrq);
2315 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2316 prev_state,state,mmc_hostname(host->mmc));
2318 if (host->mrq->sbc && !data->error) {
2319 data->stop->error = 0;
2321 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2322 prev_state,state,mmc_hostname(host->mmc));
2324 dw_mci_request_end(host, host->mrq);
2328 prev_state = state = STATE_SENDING_STOP;
2330 send_stop_cmd(host, data);
2332 if (data->stop && !data->error) {
2333 /* stop command for open-ended transfer*/
2335 send_stop_abort(host, data);
2339 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2340 prev_state,state,mmc_hostname(host->mmc));
2342 case STATE_SENDING_STOP:
2343 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2346 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2347 prev_state, state, mmc_hostname(host->mmc));
2349 /* CMD error in data command */
2350 if (host->mrq->cmd->error && host->mrq->data) {
2351 dw_mci_fifo_reset(host);
2357 dw_mci_command_complete(host, host->mrq->stop);
2359 if (host->mrq->stop)
2360 dw_mci_command_complete(host, host->mrq->stop);
2362 host->cmd_status = 0;
2365 dw_mci_request_end(host, host->mrq);
2368 case STATE_DATA_ERROR:
2369 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2370 &host->pending_events))
2373 state = STATE_DATA_BUSY;
2376 } while (state != prev_state);
2378 host->state = state;
2380 spin_unlock(&host->lock);
2384 /* push final bytes to part_buf, only use during push */
2385 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2387 memcpy((void *)&host->part_buf, buf, cnt);
2388 host->part_buf_count = cnt;
2391 /* append bytes to part_buf, only use during push */
2392 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2394 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2395 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2396 host->part_buf_count += cnt;
2400 /* pull first bytes from part_buf, only use during pull */
2401 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2403 cnt = min(cnt, (int)host->part_buf_count);
2405 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2407 host->part_buf_count -= cnt;
2408 host->part_buf_start += cnt;
2413 /* pull final bytes from the part_buf, assuming it's just been filled */
2414 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2416 memcpy(buf, &host->part_buf, cnt);
2417 host->part_buf_start = cnt;
2418 host->part_buf_count = (1 << host->data_shift) - cnt;
2421 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2423 struct mmc_data *data = host->data;
2426 /* try and push anything in the part_buf */
2427 if (unlikely(host->part_buf_count)) {
2428 int len = dw_mci_push_part_bytes(host, buf, cnt);
2431 if (host->part_buf_count == 2) {
2432 mci_writew(host, DATA(host->data_offset),
2434 host->part_buf_count = 0;
2437 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2438 if (unlikely((unsigned long)buf & 0x1)) {
2440 u16 aligned_buf[64];
2441 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2442 int items = len >> 1;
2444 /* memcpy from input buffer into aligned buffer */
2445 memcpy(aligned_buf, buf, len);
2448 /* push data from aligned buffer into fifo */
2449 for (i = 0; i < items; ++i)
2450 mci_writew(host, DATA(host->data_offset),
2457 for (; cnt >= 2; cnt -= 2)
2458 mci_writew(host, DATA(host->data_offset), *pdata++);
2461 /* put anything remaining in the part_buf */
2463 dw_mci_set_part_bytes(host, buf, cnt);
2464 /* Push data if we have reached the expected data length */
2465 if ((data->bytes_xfered + init_cnt) ==
2466 (data->blksz * data->blocks))
2467 mci_writew(host, DATA(host->data_offset),
2472 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2474 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2475 if (unlikely((unsigned long)buf & 0x1)) {
2477 /* pull data from fifo into aligned buffer */
2478 u16 aligned_buf[64];
2479 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2480 int items = len >> 1;
2482 for (i = 0; i < items; ++i)
2483 aligned_buf[i] = mci_readw(host,
2484 DATA(host->data_offset));
2485 /* memcpy from aligned buffer into output buffer */
2486 memcpy(buf, aligned_buf, len);
2494 for (; cnt >= 2; cnt -= 2)
2495 *pdata++ = mci_readw(host, DATA(host->data_offset));
2499 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2500 dw_mci_pull_final_bytes(host, buf, cnt);
2504 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2506 struct mmc_data *data = host->data;
2509 /* try and push anything in the part_buf */
2510 if (unlikely(host->part_buf_count)) {
2511 int len = dw_mci_push_part_bytes(host, buf, cnt);
2514 if (host->part_buf_count == 4) {
2515 mci_writel(host, DATA(host->data_offset),
2517 host->part_buf_count = 0;
2520 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2521 if (unlikely((unsigned long)buf & 0x3)) {
2523 u32 aligned_buf[32];
2524 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2525 int items = len >> 2;
2527 /* memcpy from input buffer into aligned buffer */
2528 memcpy(aligned_buf, buf, len);
2531 /* push data from aligned buffer into fifo */
2532 for (i = 0; i < items; ++i)
2533 mci_writel(host, DATA(host->data_offset),
2540 for (; cnt >= 4; cnt -= 4)
2541 mci_writel(host, DATA(host->data_offset), *pdata++);
2544 /* put anything remaining in the part_buf */
2546 dw_mci_set_part_bytes(host, buf, cnt);
2547 /* Push data if we have reached the expected data length */
2548 if ((data->bytes_xfered + init_cnt) ==
2549 (data->blksz * data->blocks))
2550 mci_writel(host, DATA(host->data_offset),
2555 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2557 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2558 if (unlikely((unsigned long)buf & 0x3)) {
2560 /* pull data from fifo into aligned buffer */
2561 u32 aligned_buf[32];
2562 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2563 int items = len >> 2;
2565 for (i = 0; i < items; ++i)
2566 aligned_buf[i] = mci_readl(host,
2567 DATA(host->data_offset));
2568 /* memcpy from aligned buffer into output buffer */
2569 memcpy(buf, aligned_buf, len);
2577 for (; cnt >= 4; cnt -= 4)
2578 *pdata++ = mci_readl(host, DATA(host->data_offset));
2582 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2583 dw_mci_pull_final_bytes(host, buf, cnt);
2587 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2589 struct mmc_data *data = host->data;
2592 /* try and push anything in the part_buf */
2593 if (unlikely(host->part_buf_count)) {
2594 int len = dw_mci_push_part_bytes(host, buf, cnt);
2598 if (host->part_buf_count == 8) {
2599 mci_writeq(host, DATA(host->data_offset),
2601 host->part_buf_count = 0;
2604 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2605 if (unlikely((unsigned long)buf & 0x7)) {
2607 u64 aligned_buf[16];
2608 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2609 int items = len >> 3;
2611 /* memcpy from input buffer into aligned buffer */
2612 memcpy(aligned_buf, buf, len);
2615 /* push data from aligned buffer into fifo */
2616 for (i = 0; i < items; ++i)
2617 mci_writeq(host, DATA(host->data_offset),
2624 for (; cnt >= 8; cnt -= 8)
2625 mci_writeq(host, DATA(host->data_offset), *pdata++);
2628 /* put anything remaining in the part_buf */
2630 dw_mci_set_part_bytes(host, buf, cnt);
2631 /* Push data if we have reached the expected data length */
2632 if ((data->bytes_xfered + init_cnt) ==
2633 (data->blksz * data->blocks))
2634 mci_writeq(host, DATA(host->data_offset),
2639 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2641 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2642 if (unlikely((unsigned long)buf & 0x7)) {
2644 /* pull data from fifo into aligned buffer */
2645 u64 aligned_buf[16];
2646 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2647 int items = len >> 3;
2649 for (i = 0; i < items; ++i)
2650 aligned_buf[i] = mci_readq(host,
2651 DATA(host->data_offset));
2652 /* memcpy from aligned buffer into output buffer */
2653 memcpy(buf, aligned_buf, len);
2661 for (; cnt >= 8; cnt -= 8)
2662 *pdata++ = mci_readq(host, DATA(host->data_offset));
2666 host->part_buf = mci_readq(host, DATA(host->data_offset));
2667 dw_mci_pull_final_bytes(host, buf, cnt);
2671 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2675 /* get remaining partial bytes */
2676 len = dw_mci_pull_part_bytes(host, buf, cnt);
2677 if (unlikely(len == cnt))
2682 /* get the rest of the data */
2683 host->pull_data(host, buf, cnt);
2686 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2688 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2690 unsigned int offset;
2691 struct mmc_data *data = host->data;
2692 int shift = host->data_shift;
2695 unsigned int remain, fcnt;
2697 if(!host->mmc->bus_refs){
2698 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2702 if (!sg_miter_next(sg_miter))
2705 host->sg = sg_miter->piter.sg;
2706 buf = sg_miter->addr;
2707 remain = sg_miter->length;
2711 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2712 << shift) + host->part_buf_count;
2713 len = min(remain, fcnt);
2716 dw_mci_pull_data(host, (void *)(buf + offset), len);
2717 data->bytes_xfered += len;
2722 sg_miter->consumed = offset;
2723 status = mci_readl(host, MINTSTS);
2724 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2725 /* if the RXDR is ready read again */
2726 } while ((status & SDMMC_INT_RXDR) ||
2727 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2730 if (!sg_miter_next(sg_miter))
2732 sg_miter->consumed = 0;
2734 sg_miter_stop(sg_miter);
2738 sg_miter_stop(sg_miter);
2742 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2745 static void dw_mci_write_data_pio(struct dw_mci *host)
2747 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2749 unsigned int offset;
2750 struct mmc_data *data = host->data;
2751 int shift = host->data_shift;
2754 unsigned int fifo_depth = host->fifo_depth;
2755 unsigned int remain, fcnt;
2757 if(!host->mmc->bus_refs){
2758 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2763 if (!sg_miter_next(sg_miter))
2766 host->sg = sg_miter->piter.sg;
2767 buf = sg_miter->addr;
2768 remain = sg_miter->length;
2772 fcnt = ((fifo_depth -
2773 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2774 << shift) - host->part_buf_count;
2775 len = min(remain, fcnt);
2778 host->push_data(host, (void *)(buf + offset), len);
2779 data->bytes_xfered += len;
2784 sg_miter->consumed = offset;
2785 status = mci_readl(host, MINTSTS);
2786 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2787 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2790 if (!sg_miter_next(sg_miter))
2792 sg_miter->consumed = 0;
2794 sg_miter_stop(sg_miter);
2798 sg_miter_stop(sg_miter);
2802 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2805 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2807 u32 multi, unit = SZ_2M;
2809 if (!host->cmd_status)
2810 host->cmd_status = status;
2815 if ((MMC_STOP_TRANSMISSION != host->cmd->opcode)) {
2816 multi = (mci_readl(host, BYTCNT) / unit) +
2817 ((mci_readl(host, BYTCNT) % unit) ? 1 :0 ) +
2818 ((host->cmd->retries > 2) ? 2 : host->cmd->retries);
2819 /* Max limit time: 8s for dto */
2820 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4000 * multi));
2825 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2826 tasklet_schedule(&host->tasklet);
2829 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2831 struct dw_mci *host = dev_id;
2832 u32 pending, sdio_int;
2835 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2838 * DTO fix - version 2.10a and below, and only if internal DMA
2841 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2843 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2844 pending |= SDMMC_INT_DATA_OVER;
2848 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2849 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2850 host->cmd_status = pending;
2852 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2853 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2855 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2858 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2859 /* if there is an error report DATA_ERROR */
2860 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2861 host->data_status = pending;
2863 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2865 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2866 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2867 tasklet_schedule(&host->tasklet);
2870 if (pending & SDMMC_INT_DATA_OVER) {
2871 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2872 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2873 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2874 if (!host->data_status)
2875 host->data_status = pending;
2877 if (host->dir_status == DW_MCI_RECV_STATUS) {
2878 if (host->sg != NULL)
2879 dw_mci_read_data_pio(host, true);
2881 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2882 tasklet_schedule(&host->tasklet);
2885 if (pending & SDMMC_INT_RXDR) {
2886 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2887 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2888 dw_mci_read_data_pio(host, false);
2891 if (pending & SDMMC_INT_TXDR) {
2892 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2893 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2894 dw_mci_write_data_pio(host);
2897 if (pending & SDMMC_INT_VSI) {
2898 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2899 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2900 dw_mci_cmd_interrupt(host, pending);
2903 if (pending & SDMMC_INT_CMD_DONE) {
2904 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2905 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2906 dw_mci_cmd_interrupt(host, pending);
2909 if (pending & SDMMC_INT_CD) {
2910 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2911 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2912 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2913 queue_work(host->card_workqueue, &host->card_work);
2916 if (pending & SDMMC_INT_HLE) {
2917 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2918 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2922 /* Handle SDIO Interrupts */
2923 for (i = 0; i < host->num_slots; i++) {
2924 struct dw_mci_slot *slot = host->slot[i];
2926 if (host->verid < DW_MMC_240A)
2927 sdio_int = SDMMC_INT_SDIO(i);
2929 sdio_int = SDMMC_INT_SDIO(i + 8);
2931 if (pending & sdio_int) {
2932 mci_writel(host, RINTSTS, sdio_int);
2933 mmc_signal_sdio_irq(slot->mmc);
2939 #ifdef CONFIG_MMC_DW_IDMAC
2940 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2941 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2942 /* Handle DMA interrupts */
2943 pending = mci_readl(host, IDSTS);
2944 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2945 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2946 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2947 host->dma_ops->complete((void *)host);
2955 static void dw_mci_work_routine_card(struct work_struct *work)
2957 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2960 for (i = 0; i < host->num_slots; i++) {
2961 struct dw_mci_slot *slot = host->slot[i];
2962 struct mmc_host *mmc = slot->mmc;
2963 struct mmc_request *mrq;
2966 present = dw_mci_get_cd(mmc);
2968 /* Card insert, switch data line to uart function, and vice verse.
2969 * ONLY audi chip need switched by software, using udbg tag in dts!
2971 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
2973 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
2974 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
2975 mmc_hostname(host->mmc));
2977 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
2978 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
2979 mmc_hostname(host->mmc));
2983 while (present != slot->last_detect_state) {
2984 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2985 present ? "inserted" : "removed");
2986 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2987 present ? "inserted" : "removed.", mmc_hostname(mmc));
2989 dw_mci_ctrl_all_reset(host);
2990 /* Stop edma when rountine card triggered */
2991 if(cpu_is_rk3036() || cpu_is_rk312x())
2992 if(host->dma_ops && host->dma_ops->stop)
2993 host->dma_ops->stop(host);
2994 rk_send_wakeup_key();//wake up system
2995 spin_lock_bh(&host->lock);
2997 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2998 /* Card change detected */
2999 slot->last_detect_state = present;
3001 /* Clean up queue if present */
3004 if (mrq == host->mrq) {
3008 switch (host->state) {
3011 case STATE_SENDING_CMD:
3012 mrq->cmd->error = -ENOMEDIUM;
3016 case STATE_SENDING_DATA:
3017 mrq->data->error = -ENOMEDIUM;
3018 dw_mci_stop_dma(host);
3020 case STATE_DATA_BUSY:
3021 case STATE_DATA_ERROR:
3022 if (mrq->data->error == -EINPROGRESS)
3023 mrq->data->error = -ENOMEDIUM;
3027 case STATE_SENDING_STOP:
3028 mrq->stop->error = -ENOMEDIUM;
3032 dw_mci_request_end(host, mrq);
3034 list_del(&slot->queue_node);
3035 mrq->cmd->error = -ENOMEDIUM;
3037 mrq->data->error = -ENOMEDIUM;
3039 mrq->stop->error = -ENOMEDIUM;
3041 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3042 mrq->cmd->opcode, mmc_hostname(mmc));
3044 spin_unlock(&host->lock);
3045 mmc_request_done(slot->mmc, mrq);
3046 spin_lock(&host->lock);
3050 /* Power down slot */
3052 /* Clear down the FIFO */
3053 dw_mci_fifo_reset(host);
3054 #ifdef CONFIG_MMC_DW_IDMAC
3055 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3056 dw_mci_idmac_reset(host);
3061 spin_unlock_bh(&host->lock);
3063 present = dw_mci_get_cd(mmc);
3066 mmc_detect_change(slot->mmc,
3067 msecs_to_jiffies(host->pdata->detect_delay_ms));
3072 /* given a slot id, find out the device node representing that slot */
3073 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3075 struct device_node *np;
3079 if (!dev || !dev->of_node)
3082 for_each_child_of_node(dev->of_node, np) {
3083 addr = of_get_property(np, "reg", &len);
3084 if (!addr || (len < sizeof(int)))
3086 if (be32_to_cpup(addr) == slot)
3092 static struct dw_mci_of_slot_quirks {
3095 } of_slot_quirks[] = {
3097 .quirk = "disable-wp",
3098 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3102 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3104 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3109 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3110 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3111 quirks |= of_slot_quirks[idx].id;
3116 /* find out bus-width for a given slot */
3117 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3119 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3125 if (of_property_read_u32(np, "bus-width", &bus_wd))
3126 dev_err(dev, "bus-width property not found, assuming width"
3132 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3133 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3135 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3141 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3143 /* Having a missing entry is valid; return silently */
3144 if (!gpio_is_valid(gpio))
3147 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3148 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3152 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3158 /* find the write protect gpio for a given slot; or -1 if none specified */
3159 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3161 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3167 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3169 /* Having a missing entry is valid; return silently */
3170 if (!gpio_is_valid(gpio))
3173 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3174 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3181 /* find the cd gpio for a given slot */
3182 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3183 struct mmc_host *mmc)
3185 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3191 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3193 /* Having a missing entry is valid; return silently */
3194 if (!gpio_is_valid(gpio))
3197 if (mmc_gpio_request_cd(mmc, gpio, 0))
3198 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3201 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3203 struct mmc_host *mmc = dev_id;
3204 struct dw_mci_slot *slot = mmc_priv(mmc);
3205 struct dw_mci *host = slot->host;
3207 /* wakeup system whether gpio debounce or not */
3208 rk_send_wakeup_key();
3210 /* no need to trigger detect flow when rescan is disabled.
3211 This case happended in dpm, that we just wakeup system and
3212 let suspend_post notify callback handle it.
3214 if(mmc->rescan_disable == 0)
3215 queue_work(host->card_workqueue, &host->card_work);
3217 printk("%s: rescan been disabled!\n", __FUNCTION__);
3222 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3223 struct mmc_host *mmc)
3225 struct dw_mci_slot *slot = mmc_priv(mmc);
3226 struct dw_mci *host = slot->host;
3230 /* Having a missing entry is valid; return silently */
3231 if (!gpio_is_valid(gpio))
3234 irq = gpio_to_irq(gpio);
3236 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3237 NULL, dw_mci_gpio_cd_irqt,
3238 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
3242 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3244 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3245 enable_irq_wake(irq);
3248 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3252 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3253 struct mmc_host *mmc)
3255 if (!gpio_is_valid(gpio))
3258 if (gpio_to_irq(gpio) >= 0) {
3259 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3260 devm_gpio_free(&mmc->class_dev, gpio);
3263 #else /* CONFIG_OF */
3264 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3268 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3272 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3276 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3280 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3281 struct mmc_host *mmc)
3285 #endif /* CONFIG_OF */
3287 /* @host: dw_mci host prvdata
3288 * Init pinctrl for each platform. Usually we assign
3289 * "defalut" tag for functional usage, "idle" tag for gpio
3290 * state and "udbg" tag for uart_dbg if any.
3292 static void dw_mci_init_pinctrl(struct dw_mci *host)
3294 /* Fixme: DON'T TOUCH EMMC SETTING! */
3295 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3298 /* Get pinctrl for DTS */
3299 host->pinctrl = devm_pinctrl_get(host->dev);
3300 if (IS_ERR(host->pinctrl)) {
3301 dev_err(host->dev, "%s: No pinctrl used!\n",
3302 mmc_hostname(host->mmc));
3306 /* Lookup idle state */
3307 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3308 PINCTRL_STATE_IDLE);
3309 if (IS_ERR(host->pins_idle)) {
3310 dev_err(host->dev, "%s: No idle tag found!\n",
3311 mmc_hostname(host->mmc));
3313 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3314 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3315 mmc_hostname(host->mmc));
3318 /* Lookup default state */
3319 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3320 PINCTRL_STATE_DEFAULT);
3321 if (IS_ERR(host->pins_default)) {
3322 dev_err(host->dev, "%s: No default pinctrl found!\n",
3323 mmc_hostname(host->mmc));
3325 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3326 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3327 mmc_hostname(host->mmc));
3330 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3331 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3332 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3333 if (IS_ERR(host->pins_udbg)) {
3334 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3335 mmc_hostname(host->mmc));
3337 if (!dw_mci_get_cd(host->mmc))
3338 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3339 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3340 mmc_hostname(host->mmc));
3345 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3346 unsigned long mode, void *unused)
3348 struct mmc_host *host = container_of(
3349 notify_block, struct mmc_host, pm_notify);
3350 unsigned long flags;
3353 case PM_HIBERNATION_PREPARE:
3354 case PM_SUSPEND_PREPARE:
3355 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3356 spin_lock_irqsave(&host->lock, flags);
3357 host->rescan_disable = 1;
3358 spin_unlock_irqrestore(&host->lock, flags);
3359 if (cancel_delayed_work(&host->detect))
3360 wake_unlock(&host->detect_wake_lock);
3363 case PM_POST_SUSPEND:
3364 case PM_POST_HIBERNATION:
3365 case PM_POST_RESTORE:
3366 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3367 spin_lock_irqsave(&host->lock, flags);
3368 host->rescan_disable = 0;
3369 spin_unlock_irqrestore(&host->lock, flags);
3370 mmc_detect_change(host, 10);
3376 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3378 struct mmc_host *mmc;
3379 struct dw_mci_slot *slot;
3380 const struct dw_mci_drv_data *drv_data = host->drv_data;
3385 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3389 slot = mmc_priv(mmc);
3393 host->slot[id] = slot;
3396 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3398 mmc->ops = &dw_mci_ops;
3400 if (of_property_read_u32_array(host->dev->of_node,
3401 "clock-freq-min-max", freq, 2)) {
3402 mmc->f_min = DW_MCI_FREQ_MIN;
3403 mmc->f_max = DW_MCI_FREQ_MAX;
3405 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3406 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3408 mmc->f_min = freq[0];
3409 mmc->f_max = freq[1];
3411 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3412 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3415 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3417 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3418 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3419 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3420 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3421 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3422 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3424 if (of_find_property(host->dev->of_node, "supports-tSD", NULL))
3425 mmc->restrict_caps |= RESTRICT_CARD_TYPE_TSD;
3427 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3428 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3429 if (register_pm_notifier(&mmc->pm_notify)) {
3430 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3431 goto err_pm_notifier;
3435 /* We assume only low-level chip use gpio_cd */
3436 if ((soc_is_rk3126() || soc_is_rk3126b()) &&
3437 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3438 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3439 if (gpio_is_valid(slot->cd_gpio)) {
3440 /* Request gpio int for card detection */
3441 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3443 slot->cd_gpio = -ENODEV;
3444 dev_err(host->dev, "failed to get your cd-gpios!\n");
3448 if (host->pdata->get_ocr)
3449 mmc->ocr_avail = host->pdata->get_ocr(id);
3452 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3453 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3454 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3455 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3459 * Start with slot power disabled, it will be enabled when a card
3462 if (host->pdata->setpower)
3463 host->pdata->setpower(id, 0);
3465 if (host->pdata->caps)
3466 mmc->caps = host->pdata->caps;
3468 if (host->pdata->pm_caps)
3469 mmc->pm_caps = host->pdata->pm_caps;
3471 if (host->dev->of_node) {
3472 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3476 ctrl_id = to_platform_device(host->dev)->id;
3478 if (drv_data && drv_data->caps)
3479 mmc->caps |= drv_data->caps[ctrl_id];
3480 if (drv_data && drv_data->hold_reg_flag)
3481 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3483 /* set the compatibility of driver. */
3484 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3485 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3487 if (host->pdata->caps2)
3488 mmc->caps2 = host->pdata->caps2;
3490 if (host->pdata->get_bus_wd)
3491 bus_width = host->pdata->get_bus_wd(slot->id);
3492 else if (host->dev->of_node)
3493 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3497 switch (bus_width) {
3499 mmc->caps |= MMC_CAP_8_BIT_DATA;
3501 mmc->caps |= MMC_CAP_4_BIT_DATA;
3504 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3505 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3506 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3507 mmc->caps |= MMC_CAP_SDIO_IRQ;
3508 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3509 mmc->caps |= MMC_CAP_HW_RESET;
3510 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3511 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3512 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3513 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3514 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3515 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3516 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3517 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3519 /*Assign pm_caps pass to pm_flags*/
3520 mmc->pm_flags = mmc->pm_caps;
3522 if (host->pdata->blk_settings) {
3523 mmc->max_segs = host->pdata->blk_settings->max_segs;
3524 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3525 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3526 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3527 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3529 /* Useful defaults if platform data is unset. */
3530 #ifdef CONFIG_MMC_DW_IDMAC
3531 mmc->max_segs = host->ring_size;
3532 mmc->max_blk_size = 65536;
3533 mmc->max_blk_count = host->ring_size;
3534 mmc->max_seg_size = 0x1000;
3535 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3536 if(cpu_is_rk3036() || cpu_is_rk312x()){
3537 /* fixup for external dmac setting */
3539 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3540 mmc->max_blk_count = 65535;
3541 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3542 mmc->max_seg_size = mmc->max_req_size;
3546 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3547 mmc->max_blk_count = 512;
3548 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3549 mmc->max_seg_size = mmc->max_req_size;
3550 #endif /* CONFIG_MMC_DW_IDMAC */
3554 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3556 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3561 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3562 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3566 if (IS_ERR(host->vmmc)) {
3567 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3570 ret = regulator_enable(host->vmmc);
3573 "failed to enable regulator: %d\n", ret);
3580 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3582 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3583 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3585 dw_mci_init_pinctrl(host);
3586 ret = mmc_add_host(mmc);
3590 #if defined(CONFIG_DEBUG_FS)
3591 dw_mci_init_debugfs(slot);
3594 /* Card initially undetected */
3595 slot->last_detect_state = 1;
3599 unregister_pm_notifier(&mmc->pm_notify);
3602 if (gpio_is_valid(slot->cd_gpio))
3603 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3608 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3610 /* Shutdown detect IRQ */
3611 if (slot->host->pdata->exit)
3612 slot->host->pdata->exit(id);
3614 /* Debugfs stuff is cleaned up by mmc core */
3615 mmc_remove_host(slot->mmc);
3616 slot->host->slot[id] = NULL;
3617 mmc_free_host(slot->mmc);
3620 static void dw_mci_init_dma(struct dw_mci *host)
3622 /* Alloc memory for sg translation */
3623 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3624 &host->sg_dma, GFP_KERNEL);
3625 if (!host->sg_cpu) {
3626 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3631 /* Determine which DMA interface to use */
3632 #if defined(CONFIG_MMC_DW_IDMAC)
3633 if(cpu_is_rk3036() || cpu_is_rk312x()){
3634 host->dma_ops = &dw_mci_edmac_ops;
3635 dev_info(host->dev, "Using external DMA controller.\n");
3637 host->dma_ops = &dw_mci_idmac_ops;
3638 dev_info(host->dev, "Using internal DMA controller.\n");
3645 if (host->dma_ops->init && host->dma_ops->start &&
3646 host->dma_ops->stop && host->dma_ops->cleanup) {
3647 if (host->dma_ops->init(host)) {
3648 dev_err(host->dev, "%s: Unable to initialize "
3649 "DMA Controller.\n", __func__);
3653 dev_err(host->dev, "DMA initialization not found.\n");
3661 dev_info(host->dev, "Using PIO mode.\n");
3666 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3668 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3671 ctrl = mci_readl(host, CTRL);
3673 mci_writel(host, CTRL, ctrl);
3675 /* wait till resets clear */
3677 ctrl = mci_readl(host, CTRL);
3678 if (!(ctrl & reset))
3680 } while (time_before(jiffies, timeout));
3683 "Timeout resetting block (ctrl reset %#x)\n",
3689 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3692 * Reseting generates a block interrupt, hence setting
3693 * the scatter-gather pointer to NULL.
3696 sg_miter_stop(&host->sg_miter);
3700 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3703 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3705 return dw_mci_ctrl_reset(host,
3706 SDMMC_CTRL_FIFO_RESET |
3708 SDMMC_CTRL_DMA_RESET);
3713 static struct dw_mci_of_quirks {
3718 .quirk = "broken-cd",
3719 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3723 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3725 struct dw_mci_board *pdata;
3726 struct device *dev = host->dev;
3727 struct device_node *np = dev->of_node;
3728 const struct dw_mci_drv_data *drv_data = host->drv_data;
3730 u32 clock_frequency;
3732 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3734 dev_err(dev, "could not allocate memory for pdata\n");
3735 return ERR_PTR(-ENOMEM);
3738 /* find out number of slots supported */
3739 if (of_property_read_u32(dev->of_node, "num-slots",
3740 &pdata->num_slots)) {
3741 dev_info(dev, "num-slots property not found, "
3742 "assuming 1 slot is available\n");
3743 pdata->num_slots = 1;
3747 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3748 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3749 pdata->quirks |= of_quirks[idx].id;
3752 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3753 dev_info(dev, "fifo-depth property not found, using "
3754 "value of FIFOTH register as default\n");
3756 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3758 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3759 pdata->bus_hz = clock_frequency;
3761 if (drv_data && drv_data->parse_dt) {
3762 ret = drv_data->parse_dt(host);
3764 return ERR_PTR(ret);
3767 if (of_find_property(np, "keep-power-in-suspend", NULL))
3768 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3770 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3771 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3773 if (of_find_property(np, "supports-highspeed", NULL))
3774 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3776 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3777 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3779 if (of_find_property(np, "supports-DDR_MODE", NULL))
3780 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3782 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3783 pdata->caps2 |= MMC_CAP2_HS200;
3785 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3786 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3788 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3789 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3791 if (of_get_property(np, "cd-inverted", NULL))
3792 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3793 if (of_get_property(np, "bootpart-no-access", NULL))
3794 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3799 #else /* CONFIG_OF */
3800 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3802 return ERR_PTR(-EINVAL);
3804 #endif /* CONFIG_OF */
3806 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3811 dev_err(host->dev, "host->state = 0x%x\n", host->state);
3812 switch(host->state){
3815 case STATE_SENDING_DATA:
3816 case STATE_DATA_BUSY:
3817 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3818 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3819 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3820 host->state = STATE_DATA_BUSY;
3821 if (!dw_mci_ctrl_all_reset(host)) {
3822 dev_err(host->dev, "dto: ctrl_all_reset failed!\n");
3826 /* NO requirement to reclaim slave chn using external dmac */
3827 #ifdef CONFIG_MMC_DW_IDMAC
3828 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3829 if (host->use_dma && host->dma_ops->init)
3830 host->dma_ops->init(host);
3834 * Restore the initial value at FIFOTH register
3835 * And Invalidate the prev_blksz with zero
3837 mci_writel(host, FIFOTH, host->fifoth_val);
3838 host->prev_blksz = 0;
3839 mci_writel(host, TMOUT, 0xFFFFFFFF);
3840 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3841 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3842 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3843 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3844 regs |= SDMMC_INT_CD;
3846 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)) {
3847 if (host->verid < DW_MMC_240A)
3848 sdio_int = SDMMC_INT_SDIO(0);
3850 sdio_int = SDMMC_INT_SDIO(8);
3852 if (mci_readl(host, INTMASK) & sdio_int)
3856 mci_writel(host, INTMASK, regs);
3857 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3858 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3859 tasklet_schedule(&host->tasklet);
3865 static void dw_mci_dto_timeout(unsigned long host_data)
3867 struct dw_mci *host = (struct dw_mci *) host_data;
3869 disable_irq(host->irq);
3871 dev_err(host->dev, "data_over interrupt timeout!\n");
3872 host->data_status = SDMMC_INT_EBE;
3873 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3874 dw_mci_dealwith_timeout(host);
3876 enable_irq(host->irq);
3879 int dw_mci_probe(struct dw_mci *host)
3881 const struct dw_mci_drv_data *drv_data = host->drv_data;
3882 int width, i, ret = 0;
3888 host->pdata = dw_mci_parse_dt(host);
3889 if (IS_ERR(host->pdata)) {
3890 dev_err(host->dev, "platform data not available\n");
3895 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3897 "Platform data must supply select_slot function\n");
3902 * In 2.40a spec, Data offset is changed.
3903 * Need to check the version-id and set data-offset for DATA register.
3905 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3906 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3908 if (host->verid < DW_MMC_240A)
3909 host->data_offset = DATA_OFFSET;
3911 host->data_offset = DATA_240A_OFFSET;
3914 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3915 if (IS_ERR(host->hclk_mmc)) {
3916 dev_err(host->dev, "failed to get hclk_mmc\n");
3917 ret = PTR_ERR(host->hclk_mmc);
3921 clk_prepare_enable(host->hclk_mmc);
3924 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3925 if (IS_ERR(host->clk_mmc)) {
3926 dev_err(host->dev, "failed to get clk mmc_per\n");
3927 ret = PTR_ERR(host->clk_mmc);
3931 host->bus_hz = host->pdata->bus_hz;
3932 if (!host->bus_hz) {
3933 dev_err(host->dev,"Platform data must supply bus speed\n");
3938 if (host->verid < DW_MMC_240A)
3939 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3941 //rockchip: fix divider 2 in clksum before controlller
3942 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3945 dev_err(host->dev, "failed to set clk mmc\n");
3948 clk_prepare_enable(host->clk_mmc);
3950 if (drv_data && drv_data->setup_clock) {
3951 ret = drv_data->setup_clock(host);
3954 "implementation specific clock setup failed\n");
3959 host->quirks = host->pdata->quirks;
3960 host->irq_state = true;
3961 host->set_speed = 0;
3963 host->svi_flags = 0;
3965 spin_lock_init(&host->lock);
3966 spin_lock_init(&host->slock);
3968 INIT_LIST_HEAD(&host->queue);
3970 * Get the host data width - this assumes that HCON has been set with
3971 * the correct values.
3973 i = (mci_readl(host, HCON) >> 7) & 0x7;
3975 host->push_data = dw_mci_push_data16;
3976 host->pull_data = dw_mci_pull_data16;
3978 host->data_shift = 1;
3979 } else if (i == 2) {
3980 host->push_data = dw_mci_push_data64;
3981 host->pull_data = dw_mci_pull_data64;
3983 host->data_shift = 3;
3985 /* Check for a reserved value, and warn if it is */
3987 "HCON reports a reserved host data width!\n"
3988 "Defaulting to 32-bit access.\n");
3989 host->push_data = dw_mci_push_data32;
3990 host->pull_data = dw_mci_pull_data32;
3992 host->data_shift = 2;
3995 /* Reset all blocks */
3996 if (!dw_mci_ctrl_all_reset(host))
3999 host->dma_ops = host->pdata->dma_ops;
4000 dw_mci_init_dma(host);
4002 /* Clear the interrupts for the host controller */
4003 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4004 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4006 /* Put in max timeout */
4007 mci_writel(host, TMOUT, 0xFFFFFFFF);
4010 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
4011 * Tx Mark = fifo_size / 2 DMA Size = 8
4013 if (!host->pdata->fifo_depth) {
4015 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
4016 * have been overwritten by the bootloader, just like we're
4017 * about to do, so if you know the value for your hardware, you
4018 * should put it in the platform data.
4020 fifo_size = mci_readl(host, FIFOTH);
4021 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
4023 fifo_size = host->pdata->fifo_depth;
4025 host->fifo_depth = fifo_size;
4027 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
4028 mci_writel(host, FIFOTH, host->fifoth_val);
4030 /* disable clock to CIU */
4031 mci_writel(host, CLKENA, 0);
4032 mci_writel(host, CLKSRC, 0);
4034 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
4035 host->card_workqueue = alloc_workqueue("dw-mci-card",
4036 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
4037 if (!host->card_workqueue) {
4041 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4042 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4043 host->irq_flags, "dw-mci", host);
4047 if (host->pdata->num_slots)
4048 host->num_slots = host->pdata->num_slots;
4050 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4052 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
4053 /* We need at least one slot to succeed */
4054 for (i = 0; i < host->num_slots; i++) {
4055 ret = dw_mci_init_slot(host, i);
4057 dev_dbg(host->dev, "slot %d init failed\n", i);
4063 * Enable interrupts for command done, data over, data empty, card det,
4064 * receive ready and error such as transmit, receive timeout, crc error
4066 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4067 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4068 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4069 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4070 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4071 regs |= SDMMC_INT_CD;
4073 mci_writel(host, INTMASK, regs);
4075 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4077 dev_info(host->dev, "DW MMC controller at irq %d, "
4078 "%d bit host data width, "
4080 host->irq, width, fifo_size);
4083 dev_info(host->dev, "%d slots initialized\n", init_slots);
4085 dev_dbg(host->dev, "attempted to initialize %d slots, "
4086 "but failed on all\n", host->num_slots);
4091 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4092 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4097 destroy_workqueue(host->card_workqueue);
4100 if (host->use_dma && host->dma_ops->exit)
4101 host->dma_ops->exit(host);
4104 regulator_disable(host->vmmc);
4105 regulator_put(host->vmmc);
4109 if (!IS_ERR(host->clk_mmc))
4110 clk_disable_unprepare(host->clk_mmc);
4112 if (!IS_ERR(host->hclk_mmc))
4113 clk_disable_unprepare(host->hclk_mmc);
4117 EXPORT_SYMBOL(dw_mci_probe);
4119 void dw_mci_remove(struct dw_mci *host)
4121 struct mmc_host *mmc = host->mmc;
4122 struct dw_mci_slot *slot = mmc_priv(mmc);
4125 del_timer_sync(&host->dto_timer);
4127 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4128 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4130 for(i = 0; i < host->num_slots; i++){
4131 dev_dbg(host->dev, "remove slot %d\n", i);
4133 dw_mci_cleanup_slot(host->slot[i], i);
4136 /* disable clock to CIU */
4137 mci_writel(host, CLKENA, 0);
4138 mci_writel(host, CLKSRC, 0);
4140 destroy_workqueue(host->card_workqueue);
4141 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4142 unregister_pm_notifier(&host->mmc->pm_notify);
4144 if(host->use_dma && host->dma_ops->exit)
4145 host->dma_ops->exit(host);
4147 if (gpio_is_valid(slot->cd_gpio))
4148 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4151 regulator_disable(host->vmmc);
4152 regulator_put(host->vmmc);
4154 if(!IS_ERR(host->clk_mmc))
4155 clk_disable_unprepare(host->clk_mmc);
4157 if(!IS_ERR(host->hclk_mmc))
4158 clk_disable_unprepare(host->hclk_mmc);
4160 EXPORT_SYMBOL(dw_mci_remove);
4164 #ifdef CONFIG_PM_SLEEP
4166 * TODO: we should probably disable the clock to the card in the suspend path.
4168 extern int get_wifi_chip_type(void);
4169 int dw_mci_suspend(struct dw_mci *host)
4171 struct dw_mci_slot *slot = mmc_priv(host->mmc);
4173 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4174 (get_wifi_chip_type() == WIFI_ESP8089))
4178 regulator_disable(host->vmmc);
4180 /*only for sdmmc controller*/
4181 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4182 disable_irq(host->irq);
4183 if (test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
4184 if (!IS_ERR(host->pins_idle) &&
4185 pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4186 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4187 mmc_hostname(host->mmc));
4189 if (IS_ERR(host->pins_udbg) && !IS_ERR(host->pins_idle) &&
4190 pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4191 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4192 mmc_hostname(host->mmc));
4195 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4196 mci_writel(host, INTMASK, 0x00);
4197 mci_writel(host, CTRL, 0x00);
4199 /* Soc rk3126 already in gpio_cd mode */
4200 if (!soc_is_rk3126() && !soc_is_rk3126b()) {
4201 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4202 enable_irq_wake(host->mmc->slot.cd_irq);
4207 EXPORT_SYMBOL(dw_mci_suspend);
4209 int dw_mci_resume(struct dw_mci *host)
4213 struct dw_mci_slot *slot;
4215 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4216 (get_wifi_chip_type() == WIFI_ESP8089))
4221 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4222 slot = mmc_priv(host->mmc);
4223 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4227 /*only for sdmmc controller*/
4228 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4229 /* Soc rk3126 already in gpio_cd mode */
4230 if (!soc_is_rk3126() && !soc_is_rk3126b()) {
4231 disable_irq_wake(host->mmc->slot.cd_irq);
4232 mmc_gpio_free_cd(host->mmc);
4235 if (test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
4236 if (!IS_ERR(host->pins_default) &&
4237 pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4238 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4239 mmc_hostname(host->mmc));
4241 if (IS_ERR(host->pins_udbg) && !IS_ERR(host->pins_default) &&
4242 pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4243 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4244 mmc_hostname(host->mmc));
4249 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4250 else if(cpu_is_rk3036())
4251 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4252 else if(cpu_is_rk312x())
4253 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4254 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4257 ret = regulator_enable(host->vmmc);
4260 "failed to enable regulator: %d\n", ret);
4265 if(!dw_mci_ctrl_all_reset(host)){
4270 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4271 if(host->use_dma && host->dma_ops->init)
4272 host->dma_ops->init(host);
4275 * Restore the initial value at FIFOTH register
4276 * And Invalidate the prev_blksz with zero
4278 mci_writel(host, FIFOTH, host->fifoth_val);
4279 host->prev_blksz = 0;
4280 /* Put in max timeout */
4281 mci_writel(host, TMOUT, 0xFFFFFFFF);
4283 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4284 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4286 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4287 regs |= SDMMC_INT_CD;
4288 mci_writel(host, INTMASK, regs);
4289 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4290 /*only for sdmmc controller*/
4291 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)){
4292 enable_irq(host->irq);
4295 for(i = 0; i < host->num_slots; i++){
4296 struct dw_mci_slot *slot = host->slot[i];
4299 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4300 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4301 dw_mci_setup_bus(slot, true);
4307 EXPORT_SYMBOL(dw_mci_resume);
4308 #endif /* CONFIG_PM_SLEEP */
4310 static int __init dw_mci_init(void)
4312 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4316 static void __exit dw_mci_exit(void)
4320 module_init(dw_mci_init);
4321 module_exit(dw_mci_exit);
4323 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4324 MODULE_AUTHOR("NXP Semiconductor VietNam");
4325 MODULE_AUTHOR("Imagination Technologies Ltd");
4326 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4327 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4328 MODULE_LICENSE("GPL v2");