2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
49 #include <linux/mfd/syscon.h>
50 #include <linux/regmap.h>
51 #include <linux/log2.h>
53 #include "rk_sdmmc_dbg.h"
54 #include <linux/regulator/rockchip_io_vol_domain.h>
55 #include "../../clk/rockchip/clk-ops.h"
57 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
59 /* Common flag combinations */
60 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
61 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
63 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
65 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
66 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
67 #define DW_MCI_SEND_STATUS 1
68 #define DW_MCI_RECV_STATUS 2
69 #define DW_MCI_DMA_THRESHOLD 16
71 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
72 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
74 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
75 #define SDMMC_DATA_TIMEOUT_SD 500
76 #define SDMMC_DATA_TIMEOUT_SDIO 250
77 #define SDMMC_DATA_TIMEOUT_EMMC 2500
79 #define SDMMC_CMD_RTO_MAX_HOLD 200
80 #define SDMMC_WAIT_FOR_UNBUSY 2500
82 #ifdef CONFIG_MMC_DW_IDMAC
83 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
84 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
85 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
89 u32 des0; /* Control Descriptor */
90 #define IDMAC_DES0_DIC BIT(1)
91 #define IDMAC_DES0_LD BIT(2)
92 #define IDMAC_DES0_FD BIT(3)
93 #define IDMAC_DES0_CH BIT(4)
94 #define IDMAC_DES0_ER BIT(5)
95 #define IDMAC_DES0_CES BIT(30)
96 #define IDMAC_DES0_OWN BIT(31)
98 u32 des1; /* Buffer sizes */
99 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
100 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
102 u32 des2; /* buffer 1 physical address */
104 u32 des3; /* buffer 2 physical address */
106 #endif /* CONFIG_MMC_DW_IDMAC */
108 static const u8 tuning_blk_pattern_4bit[] = {
109 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
110 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
111 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
112 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
113 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
114 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
115 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
116 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
119 static const u8 tuning_blk_pattern_8bit[] = {
120 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
121 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
122 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
123 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
124 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
125 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
126 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
127 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
128 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
129 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
130 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
131 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
132 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
133 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
134 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
135 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
138 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
139 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
140 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
141 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
143 /*printk the all register of current host*/
145 static int dw_mci_regs_printk(struct dw_mci *host)
147 struct sdmmc_reg *regs = dw_mci_regs;
149 while( regs->name != 0 ){
150 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
153 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
158 #if defined(CONFIG_DEBUG_FS)
159 static int dw_mci_req_show(struct seq_file *s, void *v)
161 struct dw_mci_slot *slot = s->private;
162 struct mmc_request *mrq;
163 struct mmc_command *cmd;
164 struct mmc_command *stop;
165 struct mmc_data *data;
167 /* Make sure we get a consistent snapshot */
168 spin_lock_bh(&slot->host->lock);
178 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
179 cmd->opcode, cmd->arg, cmd->flags,
180 cmd->resp[0], cmd->resp[1], cmd->resp[2],
181 cmd->resp[2], cmd->error);
183 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
184 data->bytes_xfered, data->blocks,
185 data->blksz, data->flags, data->error);
188 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
189 stop->opcode, stop->arg, stop->flags,
190 stop->resp[0], stop->resp[1], stop->resp[2],
191 stop->resp[2], stop->error);
194 spin_unlock_bh(&slot->host->lock);
199 static int dw_mci_req_open(struct inode *inode, struct file *file)
201 return single_open(file, dw_mci_req_show, inode->i_private);
204 static const struct file_operations dw_mci_req_fops = {
205 .owner = THIS_MODULE,
206 .open = dw_mci_req_open,
209 .release = single_release,
212 static int dw_mci_regs_show(struct seq_file *s, void *v)
214 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
215 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
216 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
217 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
218 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
219 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
224 static int dw_mci_regs_open(struct inode *inode, struct file *file)
226 return single_open(file, dw_mci_regs_show, inode->i_private);
229 static const struct file_operations dw_mci_regs_fops = {
230 .owner = THIS_MODULE,
231 .open = dw_mci_regs_open,
234 .release = single_release,
237 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
239 struct mmc_host *mmc = slot->mmc;
240 struct dw_mci *host = slot->host;
244 root = mmc->debugfs_root;
248 node = debugfs_create_file("regs", S_IRUSR, root, host,
253 node = debugfs_create_file("req", S_IRUSR, root, slot,
258 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
262 node = debugfs_create_x32("pending_events", S_IRUSR, root,
263 (u32 *)&host->pending_events);
267 node = debugfs_create_x32("completed_events", S_IRUSR, root,
268 (u32 *)&host->completed_events);
275 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
277 #endif /* defined(CONFIG_DEBUG_FS) */
279 static void dw_mci_set_timeout(struct dw_mci *host)
281 /* timeout (maximum) */
282 mci_writel(host, TMOUT, 0xffffffff);
285 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
287 struct mmc_data *data;
288 struct dw_mci_slot *slot = mmc_priv(mmc);
289 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
291 cmd->error = -EINPROGRESS;
295 if (cmdr == MMC_STOP_TRANSMISSION)
296 cmdr |= SDMMC_CMD_STOP;
298 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
300 if (cmd->flags & MMC_RSP_PRESENT) {
301 /* We expect a response, so set this bit */
302 cmdr |= SDMMC_CMD_RESP_EXP;
303 if (cmd->flags & MMC_RSP_136)
304 cmdr |= SDMMC_CMD_RESP_LONG;
307 if (cmd->flags & MMC_RSP_CRC)
308 cmdr |= SDMMC_CMD_RESP_CRC;
312 cmdr |= SDMMC_CMD_DAT_EXP;
313 if (data->flags & MMC_DATA_STREAM)
314 cmdr |= SDMMC_CMD_STRM_MODE;
315 if (data->flags & MMC_DATA_WRITE)
316 cmdr |= SDMMC_CMD_DAT_WR;
319 if (drv_data && drv_data->prepare_command)
320 drv_data->prepare_command(slot->host, &cmdr);
326 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
328 struct mmc_command *stop;
334 stop = &host->stop_abort;
336 memset(stop, 0, sizeof(struct mmc_command));
338 if (cmdr == MMC_READ_SINGLE_BLOCK ||
339 cmdr == MMC_READ_MULTIPLE_BLOCK ||
340 cmdr == MMC_WRITE_BLOCK ||
341 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
342 stop->opcode = MMC_STOP_TRANSMISSION;
344 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
345 } else if (cmdr == SD_IO_RW_EXTENDED) {
346 stop->opcode = SD_IO_RW_DIRECT;
347 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
348 ((cmd->arg >> 28) & 0x7);
349 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
354 cmdr = stop->opcode | SDMMC_CMD_STOP |
355 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
360 static void dw_mci_start_command(struct dw_mci *host,
361 struct mmc_command *cmd, u32 cmd_flags)
363 struct dw_mci_slot *slot = host->slot[0];
364 /*temporality fix slot[0] due to host->num_slots equal to 1*/
366 host->pre_cmd = host->cmd;
369 "start command: ARGR=0x%08x CMDR=0x%08x\n",
370 cmd->arg, cmd_flags);
372 if(SD_SWITCH_VOLTAGE == cmd->opcode){
373 /*confirm non-low-power mode*/
374 mci_writel(host, CMDARG, 0);
375 dw_mci_disable_low_power(slot);
377 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
378 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
380 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
383 mci_writel(host, CMDARG, cmd->arg);
386 /* fix the value to 1 in some Soc,for example RK3188. */
387 if(host->mmc->hold_reg_flag)
388 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
390 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
394 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
396 dw_mci_start_command(host, data->stop, host->stop_cmdr);
399 /* DMA interface functions */
400 static void dw_mci_stop_dma(struct dw_mci *host)
402 if (host->using_dma) {
403 /* Fixme: No need to terminate edma, may cause flush op */
404 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
405 host->dma_ops->stop(host);
406 host->dma_ops->cleanup(host);
409 /* Data transfer was stopped by the interrupt handler */
410 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
413 static int dw_mci_get_dma_dir(struct mmc_data *data)
415 if (data->flags & MMC_DATA_WRITE)
416 return DMA_TO_DEVICE;
418 return DMA_FROM_DEVICE;
421 #ifdef CONFIG_MMC_DW_IDMAC
422 static void dw_mci_dma_cleanup(struct dw_mci *host)
424 struct mmc_data *data = host->data;
427 if (!data->host_cookie)
428 dma_unmap_sg(host->dev,
431 dw_mci_get_dma_dir(data));
434 static void dw_mci_idmac_reset(struct dw_mci *host)
436 u32 bmod = mci_readl(host, BMOD);
437 /* Software reset of DMA */
438 bmod |= SDMMC_IDMAC_SWRESET;
439 mci_writel(host, BMOD, bmod);
442 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
446 /* Disable and reset the IDMAC interface */
447 temp = mci_readl(host, CTRL);
448 temp &= ~SDMMC_CTRL_USE_IDMAC;
449 temp |= SDMMC_CTRL_DMA_RESET;
450 mci_writel(host, CTRL, temp);
452 /* Stop the IDMAC running */
453 temp = mci_readl(host, BMOD);
454 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
455 temp |= SDMMC_IDMAC_SWRESET;
456 mci_writel(host, BMOD, temp);
459 static void dw_mci_idmac_complete_dma(void *arg)
461 struct dw_mci *host = arg;
462 struct mmc_data *data = host->data;
464 dev_vdbg(host->dev, "DMA complete\n");
467 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
468 host->mrq->cmd->opcode,host->mrq->cmd->arg,
469 data->blocks,data->blksz,mmc_hostname(host->mmc));
472 host->dma_ops->cleanup(host);
475 * If the card was removed, data will be NULL. No point in trying to
476 * send the stop command or waiting for NBUSY in this case.
479 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
480 tasklet_schedule(&host->tasklet);
484 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
488 struct idmac_desc *desc = host->sg_cpu;
490 for (i = 0; i < sg_len; i++, desc++) {
491 unsigned int length = sg_dma_len(&data->sg[i]);
492 u32 mem_addr = sg_dma_address(&data->sg[i]);
494 /* Set the OWN bit and disable interrupts for this descriptor */
495 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
498 IDMAC_SET_BUFFER1_SIZE(desc, length);
500 /* Physical address to DMA to/from */
501 desc->des2 = mem_addr;
504 /* Set first descriptor */
506 desc->des0 |= IDMAC_DES0_FD;
508 /* Set last descriptor */
509 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
510 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
511 desc->des0 |= IDMAC_DES0_LD;
516 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
520 dw_mci_translate_sglist(host, host->data, sg_len);
522 /* Select IDMAC interface */
523 temp = mci_readl(host, CTRL);
524 temp |= SDMMC_CTRL_USE_IDMAC;
525 mci_writel(host, CTRL, temp);
529 /* Enable the IDMAC */
530 temp = mci_readl(host, BMOD);
531 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
532 mci_writel(host, BMOD, temp);
534 /* Start it running */
535 mci_writel(host, PLDMND, 1);
538 static int dw_mci_idmac_init(struct dw_mci *host)
540 struct idmac_desc *p;
543 /* Number of descriptors in the ring buffer */
544 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
546 /* Forward link the descriptor list */
547 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
548 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
550 /* Set the last descriptor as the end-of-ring descriptor */
551 p->des3 = host->sg_dma;
552 p->des0 = IDMAC_DES0_ER;
554 dw_mci_idmac_reset(host);
556 /* Mask out interrupts - get Tx & Rx complete only */
557 mci_writel(host, IDSTS, IDMAC_INT_CLR);
558 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
561 /* Set the descriptor base address */
562 mci_writel(host, DBADDR, host->sg_dma);
566 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
567 .init = dw_mci_idmac_init,
568 .start = dw_mci_idmac_start_dma,
569 .stop = dw_mci_idmac_stop_dma,
570 .complete = dw_mci_idmac_complete_dma,
571 .cleanup = dw_mci_dma_cleanup,
575 static void dw_mci_edma_cleanup(struct dw_mci *host)
577 struct mmc_data *data = host->data;
580 if (!data->host_cookie)
581 dma_unmap_sg(host->dev,
582 data->sg, data->sg_len,
583 dw_mci_get_dma_dir(data));
586 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
588 dmaengine_terminate_all(host->dms->ch);
591 static void dw_mci_edmac_complete_dma(void *arg)
593 struct dw_mci *host = arg;
594 struct mmc_data *data = host->data;
596 dev_vdbg(host->dev, "DMA complete\n");
599 if(data->flags & MMC_DATA_READ)
600 /* Invalidate cache after read */
601 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
602 data->sg_len, DMA_FROM_DEVICE);
604 host->dma_ops->cleanup(host);
607 * If the card was removed, data will be NULL. No point in trying to
608 * send the stop command or waiting for NBUSY in this case.
611 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
612 tasklet_schedule(&host->tasklet);
616 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
618 struct dma_slave_config slave_config;
619 struct dma_async_tx_descriptor *desc = NULL;
620 struct scatterlist *sgl = host->data->sg;
621 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
622 u32 sg_elems = host->data->sg_len;
623 u32 fifoth_val, mburst;
625 u32 idx, rx_wmark, tx_wmark;
628 /* Set external dma config: burst size, burst width*/
629 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
630 slave_config.src_addr = slave_config.dst_addr;
631 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
632 slave_config.src_addr_width = slave_config.dst_addr_width;
634 /* Match FIFO dma burst MSIZE with external dma config*/
635 fifoth_val = mci_readl(host, FIFOTH);
636 mburst = mszs[(fifoth_val >> 28) & 0x7];
638 /* edmac limit burst to 16, but work around for rk3036 to 8 */
639 if (unlikely(cpu_is_rk3036()))
644 if (mburst > burst_limit) {
645 mburst = burst_limit;
646 idx = (ilog2(mburst) > 0) ? (ilog2(mburst) - 1) : 0;
648 if (soc_is_rk3126b()) {
650 rx_wmark = (host->fifo_depth) / 2 - 1;
652 rx_wmark = mszs[idx] - 1;
655 tx_wmark = (host->fifo_depth) / 2;
656 fifoth_val = SDMMC_SET_FIFOTH(idx, rx_wmark, tx_wmark);
658 mci_writel(host, FIFOTH, fifoth_val);
661 slave_config.dst_maxburst = mburst;
662 slave_config.src_maxburst = slave_config.dst_maxburst;
664 if(host->data->flags & MMC_DATA_WRITE){
665 slave_config.direction = DMA_MEM_TO_DEV;
666 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
668 dev_err(host->dev, "error in dw_mci edma configuration.\n");
672 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
673 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
675 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
678 /* Set dw_mci_edmac_complete_dma as callback */
679 desc->callback = dw_mci_edmac_complete_dma;
680 desc->callback_param = (void *)host;
681 dmaengine_submit(desc);
683 /* Flush cache before write */
684 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
685 sg_elems, DMA_TO_DEVICE);
686 dma_async_issue_pending(host->dms->ch);
689 slave_config.direction = DMA_DEV_TO_MEM;
690 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
692 dev_err(host->dev, "error in dw_mci edma configuration.\n");
695 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
696 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
698 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
701 /* set dw_mci_edmac_complete_dma as callback */
702 desc->callback = dw_mci_edmac_complete_dma;
703 desc->callback_param = (void *)host;
704 dmaengine_submit(desc);
705 dma_async_issue_pending(host->dms->ch);
709 static int dw_mci_edmac_init(struct dw_mci *host)
711 /* Request external dma channel, SHOULD decide chn in dts */
713 host->dms = (struct dw_mci_dma_slave *)kmalloc
714 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
715 if (NULL == host->dms) {
716 dev_err(host->dev, "No enough memory to alloc dms.\n");
720 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
721 if (!host->dms->ch) {
722 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
723 host->dms->ch->chan_id);
730 if (NULL != host->dms) {
738 static void dw_mci_edmac_exit(struct dw_mci *host)
740 if (NULL != host->dms) {
741 if (NULL != host->dms->ch) {
742 dma_release_channel(host->dms->ch);
743 host->dms->ch = NULL;
750 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
751 .init = dw_mci_edmac_init,
752 .exit = dw_mci_edmac_exit,
753 .start = dw_mci_edmac_start_dma,
754 .stop = dw_mci_edmac_stop_dma,
755 .complete = dw_mci_edmac_complete_dma,
756 .cleanup = dw_mci_edma_cleanup,
758 #endif /* CONFIG_MMC_DW_IDMAC */
760 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
761 struct mmc_data *data,
764 struct scatterlist *sg;
765 unsigned int i, sg_len;
767 if (!next && data->host_cookie)
768 return data->host_cookie;
771 * We don't do DMA on "complex" transfers, i.e. with
772 * non-word-aligned buffers or lengths. Also, we don't bother
773 * with all the DMA setup overhead for short transfers.
775 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
781 for_each_sg(data->sg, sg, data->sg_len, i) {
782 if (sg->offset & 3 || sg->length & 3)
786 sg_len = dma_map_sg(host->dev,
789 dw_mci_get_dma_dir(data));
794 data->host_cookie = sg_len;
799 static void dw_mci_pre_req(struct mmc_host *mmc,
800 struct mmc_request *mrq,
803 struct dw_mci_slot *slot = mmc_priv(mmc);
804 struct mmc_data *data = mrq->data;
806 if (!slot->host->use_dma || !data)
809 if (data->host_cookie) {
810 data->host_cookie = 0;
814 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
815 data->host_cookie = 0;
818 static void dw_mci_post_req(struct mmc_host *mmc,
819 struct mmc_request *mrq,
822 struct dw_mci_slot *slot = mmc_priv(mmc);
823 struct mmc_data *data = mrq->data;
825 if (!slot->host->use_dma || !data)
828 if (data->host_cookie)
829 dma_unmap_sg(slot->host->dev,
832 dw_mci_get_dma_dir(data));
833 data->host_cookie = 0;
836 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
838 #ifdef CONFIG_MMC_DW_IDMAC
839 unsigned int blksz = data->blksz;
840 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
841 u32 fifo_width = 1 << host->data_shift;
842 u32 blksz_depth = blksz / fifo_width, fifoth_val;
843 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
844 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
846 tx_wmark = (host->fifo_depth) / 2;
847 tx_wmark_invers = host->fifo_depth - tx_wmark;
851 * if blksz is not a multiple of the FIFO width
853 if (blksz % fifo_width) {
860 if (!((blksz_depth % mszs[idx]) ||
861 (tx_wmark_invers % mszs[idx]))) {
863 rx_wmark = mszs[idx] - 1;
868 * If idx is '0', it won't be tried
869 * Thus, initial values are uesed
872 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
873 mci_writel(host, FIFOTH, fifoth_val);
878 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
880 unsigned int blksz = data->blksz;
881 u32 blksz_depth, fifo_depth;
884 WARN_ON(!(data->flags & MMC_DATA_READ));
886 if (host->timing != MMC_TIMING_MMC_HS200 &&
887 host->timing != MMC_TIMING_UHS_SDR104)
890 blksz_depth = blksz / (1 << host->data_shift);
891 fifo_depth = host->fifo_depth;
893 if (blksz_depth > fifo_depth)
897 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
898 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
899 * Currently just choose blksz.
902 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
906 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
909 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
917 /* If we don't have a channel, we can't do DMA */
921 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
923 /* Fixme: No need terminate edma, may cause flush op */
924 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
925 host->dma_ops->stop(host);
932 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
933 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
937 * Decide the MSIZE and RX/TX Watermark.
938 * If current block size is same with previous size,
939 * no need to update fifoth.
941 if (host->prev_blksz != data->blksz)
942 dw_mci_adjust_fifoth(host, data);
945 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
947 /* Enable the DMA interface */
948 temp = mci_readl(host, CTRL);
949 temp |= SDMMC_CTRL_DMA_ENABLE;
950 mci_writel(host, CTRL, temp);
952 /* Disable RX/TX IRQs, let DMA handle it */
953 spin_lock_irqsave(&host->slock, flags);
954 temp = mci_readl(host, INTMASK);
955 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
956 mci_writel(host, INTMASK, temp);
957 spin_unlock_irqrestore(&host->slock, flags);
959 host->dma_ops->start(host, sg_len);
964 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
969 data->error = -EINPROGRESS;
971 //WARN_ON(host->data);
976 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
978 if (data->flags & MMC_DATA_READ) {
979 host->dir_status = DW_MCI_RECV_STATUS;
980 dw_mci_ctrl_rd_thld(host, data);
982 host->dir_status = DW_MCI_SEND_STATUS;
985 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
986 data->blocks, data->blksz, mmc_hostname(host->mmc));
988 if (dw_mci_submit_data_dma(host, data)) {
989 int flags = SG_MITER_ATOMIC;
990 if (host->data->flags & MMC_DATA_READ)
991 flags |= SG_MITER_TO_SG;
993 flags |= SG_MITER_FROM_SG;
995 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
997 host->part_buf_start = 0;
998 host->part_buf_count = 0;
1000 spin_lock_irqsave(&host->slock, flag);
1001 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1002 temp = mci_readl(host, INTMASK);
1003 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1004 mci_writel(host, INTMASK, temp);
1005 spin_unlock_irqrestore(&host->slock, flag);
1007 temp = mci_readl(host, CTRL);
1008 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1009 mci_writel(host, CTRL, temp);
1012 * Use the initial fifoth_val for PIO mode.
1013 * If next issued data may be transfered by DMA mode,
1014 * prev_blksz should be invalidated.
1016 mci_writel(host, FIFOTH, host->fifoth_val);
1017 host->prev_blksz = 0;
1020 * Keep the current block size.
1021 * It will be used to decide whether to update
1022 * fifoth register next time.
1024 host->prev_blksz = data->blksz;
1028 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1030 struct dw_mci *host = slot->host;
1031 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1032 unsigned int cmd_status = 0;
1033 #ifdef SDMMC_WAIT_FOR_UNBUSY
1035 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1037 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1039 ret = time_before(jiffies, timeout);
1040 cmd_status = mci_readl(host, STATUS);
1041 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1045 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1046 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1049 mci_writel(host, CMDARG, arg);
1051 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1052 if(cmd & SDMMC_CMD_UPD_CLK)
1053 timeout = jiffies + msecs_to_jiffies(50);
1055 timeout = jiffies + msecs_to_jiffies(500);
1056 while (time_before(jiffies, timeout)) {
1057 cmd_status = mci_readl(host, CMD);
1058 if (!(cmd_status & SDMMC_CMD_START))
1061 dev_err(&slot->mmc->class_dev,
1062 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1063 cmd, arg, cmd_status);
1066 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1068 struct dw_mci *host = slot->host;
1069 unsigned int tempck,clock = slot->clock;
1074 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1075 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1078 mci_writel(host, CLKENA, 0);
1079 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1080 if(host->svi_flags == 0)
1081 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1083 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1085 } else if (clock != host->current_speed || force_clkinit) {
1086 div = host->bus_hz / clock;
1087 if (host->bus_hz % clock && host->bus_hz > clock)
1089 * move the + 1 after the divide to prevent
1090 * over-clocking the card.
1094 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1096 if ((clock << div) != slot->__clk_old || force_clkinit) {
1097 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1098 dev_info(&slot->mmc->class_dev,
1099 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1100 slot->id, host->bus_hz, clock,
1103 host->set_speed = tempck;
1104 host->set_div = div;
1108 mci_writel(host, CLKENA, 0);
1109 mci_writel(host, CLKSRC, 0);
1113 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1115 if(clock <= 400*1000){
1116 MMC_DBG_BOOT_FUNC(host->mmc,
1117 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1118 clock * 2, mmc_hostname(host->mmc));
1119 /* clk_mmc will change parents to 24MHz xtal*/
1120 clk_set_rate(host->clk_mmc, clock * 2);
1123 host->set_div = div;
1127 MMC_DBG_BOOT_FUNC(host->mmc,
1128 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1129 mmc_hostname(host->mmc));
1132 MMC_DBG_ERR_FUNC(host->mmc,
1133 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1134 mmc_hostname(host->mmc));
1136 host->set_div = div;
1137 host->bus_hz = host->set_speed * 2;
1138 MMC_DBG_BOOT_FUNC(host->mmc,
1139 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1140 div, host->bus_hz, mmc_hostname(host->mmc));
1142 /* BUG may be here, come on, Linux BSP engineer looks!
1143 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1144 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1145 some oops happened like that:
1146 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1147 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1148 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1149 mmc0: new high speed DDR MMC card at address 0001
1150 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1152 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1153 mmcblk0: retrying using single block read
1154 mmcblk0: error -110 sending status command, retrying
1156 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1159 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1160 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1163 host->set_div = div;
1164 host->bus_hz = host->set_speed * 2;
1165 MMC_DBG_BOOT_FUNC(host->mmc,
1166 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1167 div, host->bus_hz, mmc_hostname(host->mmc));
1170 if (host->verid < DW_MMC_240A)
1171 clk_set_rate(host->clk_mmc,(host->bus_hz));
1173 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1179 /* set clock to desired speed */
1180 mci_writel(host, CLKDIV, div);
1184 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1186 /* enable clock; only low power if no SDIO */
1187 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1189 if (host->verid < DW_MMC_240A)
1190 sdio_int = SDMMC_INT_SDIO(slot->id);
1192 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1194 if (!(mci_readl(host, INTMASK) & sdio_int))
1195 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1196 mci_writel(host, CLKENA, clk_en_a);
1200 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1201 /* keep the clock with reflecting clock dividor */
1202 slot->__clk_old = clock << div;
1205 host->current_speed = clock;
1207 if(slot->ctype != slot->pre_ctype)
1208 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1210 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1211 mmc_hostname(host->mmc));
1212 slot->pre_ctype = slot->ctype;
1214 /* Set the current slot bus width */
1215 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1218 extern struct mmc_card *this_card;
1219 static void dw_mci_wait_unbusy(struct dw_mci *host)
1222 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1223 unsigned long time_loop;
1224 unsigned int status;
1227 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1229 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1230 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1231 /* Special care for (secure)erase timeout calculation */
1233 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1236 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1237 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1238 300000 * (this_card->ext_csd.sec_erase_mult)) :
1239 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1243 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1244 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1245 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1246 timeout = SDMMC_DATA_TIMEOUT_SD;
1249 time_loop = jiffies + msecs_to_jiffies(timeout);
1251 status = mci_readl(host, STATUS);
1252 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1254 } while (time_before(jiffies, time_loop));
1259 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1262 * 0--status is busy.
1263 * 1--status is unbusy.
1265 int dw_mci_card_busy(struct mmc_host *mmc)
1267 struct dw_mci_slot *slot = mmc_priv(mmc);
1268 struct dw_mci *host = slot->host;
1270 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1271 host->svi_flags, mmc_hostname(host->mmc));
1274 if(host->svi_flags == 0){
1276 host->svi_flags = 1;
1277 return host->svi_flags;
1280 host->svi_flags = 0;
1281 return host->svi_flags;
1287 static void __dw_mci_start_request(struct dw_mci *host,
1288 struct dw_mci_slot *slot,
1289 struct mmc_command *cmd)
1291 struct mmc_request *mrq;
1292 struct mmc_data *data;
1296 if (host->pdata->select_slot)
1297 host->pdata->select_slot(slot->id);
1299 host->cur_slot = slot;
1302 dw_mci_wait_unbusy(host);
1304 host->pending_events = 0;
1305 host->completed_events = 0;
1306 host->data_status = 0;
1310 dw_mci_set_timeout(host);
1311 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1312 mci_writel(host, BLKSIZ, data->blksz);
1315 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1317 /* this is the first command, send the initialization clock */
1318 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1319 cmdflags |= SDMMC_CMD_INIT;
1322 dw_mci_submit_data(host, data);
1326 dw_mci_start_command(host, cmd, cmdflags);
1329 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1332 static void dw_mci_start_request(struct dw_mci *host,
1333 struct dw_mci_slot *slot)
1335 struct mmc_request *mrq = slot->mrq;
1336 struct mmc_command *cmd;
1338 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1339 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1341 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1342 __dw_mci_start_request(host, slot, cmd);
1345 /* must be called with host->lock held */
1346 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1347 struct mmc_request *mrq)
1349 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1354 if (host->state == STATE_IDLE) {
1355 host->state = STATE_SENDING_CMD;
1356 dw_mci_start_request(host, slot);
1358 list_add_tail(&slot->queue_node, &host->queue);
1362 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1364 struct dw_mci_slot *slot = mmc_priv(mmc);
1365 struct dw_mci *host = slot->host;
1370 * The check for card presence and queueing of the request must be
1371 * atomic, otherwise the card could be removed in between and the
1372 * request wouldn't fail until another card was inserted.
1374 spin_lock_bh(&host->lock);
1376 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1377 spin_unlock_bh(&host->lock);
1378 mrq->cmd->error = -ENOMEDIUM;
1379 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1380 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1382 mmc_request_done(mmc, mrq);
1386 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1387 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1389 dw_mci_queue_request(host, slot, mrq);
1391 spin_unlock_bh(&host->lock);
1394 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1396 struct dw_mci_slot *slot = mmc_priv(mmc);
1397 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1398 struct dw_mci *host = slot->host;
1400 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1403 #ifdef SDMMC_WAIT_FOR_UNBUSY
1404 unsigned long time_loop;
1407 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1408 if(host->svi_flags == 1)
1409 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1411 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1413 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1416 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1417 printk("%d..%s: no card. [%s]\n", \
1418 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1423 ret = time_before(jiffies, time_loop);
1424 regs = mci_readl(slot->host, STATUS);
1425 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1431 printk("slot->flags = %lu ", slot->flags);
1432 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1433 if(host->svi_flags != 1)
1436 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1437 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1441 switch (ios->bus_width) {
1442 case MMC_BUS_WIDTH_4:
1443 slot->ctype = SDMMC_CTYPE_4BIT;
1445 case MMC_BUS_WIDTH_8:
1446 slot->ctype = SDMMC_CTYPE_8BIT;
1449 /* set default 1 bit mode */
1450 slot->ctype = SDMMC_CTYPE_1BIT;
1451 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1454 regs = mci_readl(slot->host, UHS_REG);
1457 if (ios->timing == MMC_TIMING_UHS_DDR50)
1458 regs |= ((0x1 << slot->id) << 16);
1460 regs &= ~((0x1 << slot->id) << 16);
1462 mci_writel(slot->host, UHS_REG, regs);
1463 slot->host->timing = ios->timing;
1466 * Use mirror of ios->clock to prevent race with mmc
1467 * core ios update when finding the minimum.
1469 slot->clock = ios->clock;
1471 if (drv_data && drv_data->set_ios)
1472 drv_data->set_ios(slot->host, ios);
1474 /* Slot specific timing and width adjustment */
1475 dw_mci_setup_bus(slot, false);
1479 switch (ios->power_mode) {
1481 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1483 if (slot->host->pdata->setpower)
1484 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1485 regs = mci_readl(slot->host, PWREN);
1486 regs |= (1 << slot->id);
1487 mci_writel(slot->host, PWREN, regs);
1490 /* Power down slot */
1491 if(slot->host->pdata->setpower)
1492 slot->host->pdata->setpower(slot->id, 0);
1493 regs = mci_readl(slot->host, PWREN);
1494 regs &= ~(1 << slot->id);
1495 mci_writel(slot->host, PWREN, regs);
1502 static int dw_mci_get_ro(struct mmc_host *mmc)
1505 struct dw_mci_slot *slot = mmc_priv(mmc);
1506 struct dw_mci_board *brd = slot->host->pdata;
1508 /* Use platform get_ro function, else try on board write protect */
1509 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1511 else if(brd->get_ro)
1512 read_only = brd->get_ro(slot->id);
1513 else if(gpio_is_valid(slot->wp_gpio))
1514 read_only = gpio_get_value(slot->wp_gpio);
1517 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1519 dev_dbg(&mmc->class_dev, "card is %s\n",
1520 read_only ? "read-only" : "read-write");
1525 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1527 struct dw_mci_slot *slot = mmc_priv(mmc);
1528 struct dw_mci *host = slot->host;
1529 /*struct dw_mci_board *brd = slot->host->pdata;*/
1531 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1534 spin_lock_bh(&host->lock);
1537 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1539 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1541 spin_unlock_bh(&host->lock);
1543 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1544 if (!IS_ERR(host->hpclk_mmc) &&
1545 __clk_is_enabled(host->hpclk_mmc) == false)
1546 clk_prepare_enable(host->hpclk_mmc);
1547 if (__clk_is_enabled(host->hclk_mmc) == false)
1548 clk_prepare_enable(host->hclk_mmc);
1549 if (__clk_is_enabled(host->clk_mmc) == false)
1550 clk_prepare_enable(host->clk_mmc);
1552 if (__clk_is_enabled(host->clk_mmc) == true)
1553 clk_disable_unprepare(slot->host->clk_mmc);
1554 if (__clk_is_enabled(host->hclk_mmc) == true)
1555 clk_disable_unprepare(slot->host->hclk_mmc);
1556 if (!IS_ERR(host->hpclk_mmc) &&
1557 __clk_is_enabled(host->hpclk_mmc) == true)
1558 clk_disable_unprepare(slot->host->hpclk_mmc);
1561 mmc_detect_change(slot->mmc, 20);
1567 static int dw_mci_get_cd(struct mmc_host *mmc)
1570 struct dw_mci_slot *slot = mmc_priv(mmc);
1571 struct dw_mci_board *brd = slot->host->pdata;
1572 struct dw_mci *host = slot->host;
1573 int gpio_cd = mmc_gpio_get_cd(mmc);
1574 int force_jtag_bit, force_jtag_reg;
1578 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
1579 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1580 gpio_cd = slot->cd_gpio;
1581 irq = gpio_to_irq(gpio_cd);
1582 if (gpio_is_valid(gpio_cd)) {
1583 gpio_val = gpio_get_value(gpio_cd);
1584 if (soc_is_rk3036()) {
1585 force_jtag_bit = 11;
1586 force_jtag_reg = RK312X_GRF_SOC_CON0;
1587 } else if (soc_is_rk3126() || soc_is_rk3126b()) {
1588 force_jtag_reg = RK312X_GRF_SOC_CON0;
1592 if (gpio_val == gpio_get_value(gpio_cd)) {
1593 gpio_cd = (gpio_val == 0 ? 1 : 0);
1595 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1596 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1597 grf_writel((0x1 << (force_jtag_bit + 16)) | (1 << force_jtag_bit),
1600 dw_mci_ctrl_all_reset(host);
1602 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
1603 /* Really card detected: SHOULD disable force_jtag */
1604 grf_writel((0x1 << (force_jtag_bit + 16)) | (0 << force_jtag_bit),
1609 gpio_val = gpio_get_value(gpio_cd);
1611 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
1612 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1613 return slot->last_detect_state;
1616 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1620 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1621 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1623 /* Use platform get_cd function, else try onboard card detect */
1624 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1626 else if (brd->get_cd)
1627 present = !brd->get_cd(slot->id);
1628 else if (!IS_ERR_VALUE(gpio_cd))
1631 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1634 spin_lock_bh(&host->lock);
1636 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1637 dev_dbg(&mmc->class_dev, "card is present\n");
1639 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1640 dev_dbg(&mmc->class_dev, "card is not present\n");
1642 spin_unlock_bh(&host->lock);
1649 * Dts Should caps emmc controller with poll-hw-reset
1651 static void dw_mci_hw_reset(struct mmc_host *mmc)
1653 struct dw_mci_slot *slot = mmc_priv(mmc);
1654 struct dw_mci *host = slot->host;
1659 unsigned long timeout;
1662 /* (1) CMD12 to end any transfer in process */
1663 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1664 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1666 if(host->mmc->hold_reg_flag)
1667 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1668 mci_writel(host, CMDARG, 0);
1670 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1672 timeout = jiffies + msecs_to_jiffies(500);
1674 ret = time_before(jiffies, timeout);
1675 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1680 MMC_DBG_ERR_FUNC(host->mmc,
1681 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1682 __func__, mmc_hostname(host->mmc));
1684 /* (2) wait DTO, even if no response is sent back by card */
1686 timeout = jiffies + msecs_to_jiffies(5);
1688 ret = time_before(jiffies, timeout);
1689 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1690 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1696 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1698 /* Software reset - BMOD[0] for IDMA only */
1699 regs = mci_readl(host, BMOD);
1700 regs |= SDMMC_IDMAC_SWRESET;
1701 mci_writel(host, BMOD, regs);
1702 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1703 regs = mci_readl(host, BMOD);
1704 if(regs & SDMMC_IDMAC_SWRESET)
1705 MMC_DBG_WARN_FUNC(host->mmc,
1706 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1707 __func__, mmc_hostname(host->mmc));
1709 /* DMA reset - CTRL[2] */
1710 regs = mci_readl(host, CTRL);
1711 regs |= SDMMC_CTRL_DMA_RESET;
1712 mci_writel(host, CTRL, regs);
1713 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1714 regs = mci_readl(host, CTRL);
1715 if(regs & SDMMC_CTRL_DMA_RESET)
1716 MMC_DBG_WARN_FUNC(host->mmc,
1717 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1718 __func__, mmc_hostname(host->mmc));
1720 /* FIFO reset - CTRL[1] */
1721 regs = mci_readl(host, CTRL);
1722 regs |= SDMMC_CTRL_FIFO_RESET;
1723 mci_writel(host, CTRL, regs);
1724 mdelay(1); /* no timing limited, 1ms is random value */
1725 regs = mci_readl(host, CTRL);
1726 if(regs & SDMMC_CTRL_FIFO_RESET)
1727 MMC_DBG_WARN_FUNC(host->mmc,
1728 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1729 __func__, mmc_hostname(host->mmc));
1732 According to eMMC spec
1733 tRstW >= 1us ; RST_n pulse width
1734 tRSCA >= 200us ; RST_n to Command time
1735 tRSTH >= 1us ; RST_n high period
1737 mci_writel(slot->host, PWREN, 0x0);
1738 mci_writel(slot->host, RST_N, 0x0);
1740 udelay(10); /* 10us for bad quality eMMc. */
1742 mci_writel(slot->host, PWREN, 0x1);
1743 mci_writel(slot->host, RST_N, 0x1);
1745 usleep_range(500, 1000); /* at least 500(> 200us) */
1749 * Disable lower power mode.
1751 * Low power mode will stop the card clock when idle. According to the
1752 * description of the CLKENA register we should disable low power mode
1753 * for SDIO cards if we need SDIO interrupts to work.
1755 * This function is fast if low power mode is already disabled.
1757 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1759 struct dw_mci *host = slot->host;
1761 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1763 clk_en_a = mci_readl(host, CLKENA);
1765 if (clk_en_a & clken_low_pwr) {
1766 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1767 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1768 SDMMC_CMD_PRV_DAT_WAIT, 0);
1772 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1774 struct dw_mci_slot *slot = mmc_priv(mmc);
1775 struct dw_mci *host = slot->host;
1776 unsigned long flags;
1780 spin_lock_irqsave(&host->slock, flags);
1782 /* Enable/disable Slot Specific SDIO interrupt */
1783 int_mask = mci_readl(host, INTMASK);
1785 if (host->verid < DW_MMC_240A)
1786 sdio_int = SDMMC_INT_SDIO(slot->id);
1788 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1792 * Turn off low power mode if it was enabled. This is a bit of
1793 * a heavy operation and we disable / enable IRQs a lot, so
1794 * we'll leave low power mode disabled and it will get
1795 * re-enabled again in dw_mci_setup_bus().
1797 dw_mci_disable_low_power(slot);
1799 mci_writel(host, INTMASK,
1800 (int_mask | sdio_int));
1802 mci_writel(host, INTMASK,
1803 (int_mask & ~sdio_int));
1806 spin_unlock_irqrestore(&host->slock, flags);
1809 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1811 IO_DOMAIN_12 = 1200,
1812 IO_DOMAIN_18 = 1800,
1813 IO_DOMAIN_33 = 3300,
1815 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1825 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1826 __FUNCTION__, mmc_hostname(host->mmc));
1829 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1830 __FUNCTION__, mmc_hostname(host->mmc));
1834 if (cpu_is_rk3288()) {
1835 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1836 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1839 } else if (host->cid == DW_MCI_TYPE_RK3368) {
1840 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1841 regmap_write(host->grf, 0x900, (voltage << 6) | (1 << 22));
1845 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1846 __FUNCTION__, mmc_hostname(host->mmc));
1850 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1851 struct mmc_ios *ios)
1854 unsigned int value,uhs_reg;
1857 * Signal Voltage Switching is only applicable for Host Controllers
1860 if (host->verid < DW_MMC_240A)
1863 uhs_reg = mci_readl(host, UHS_REG);
1864 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1865 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1867 switch (ios->signal_voltage) {
1868 case MMC_SIGNAL_VOLTAGE_330:
1869 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1871 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1872 /* regulator_put(host->vmmc); //to be done in remove function. */
1874 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1875 __func__, regulator_get_voltage(host->vmmc), ret);
1877 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1878 " failed\n", mmc_hostname(host->mmc));
1881 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1883 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1884 __FUNCTION__, mmc_hostname(host->mmc));
1886 /* set High-power mode */
1887 value = mci_readl(host, CLKENA);
1888 value &= ~SDMMC_CLKEN_LOW_PWR;
1889 mci_writel(host,CLKENA , value);
1891 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1892 mci_writel(host,UHS_REG , uhs_reg);
1895 usleep_range(5000, 5500);
1897 /* 3.3V regulator output should be stable within 5 ms */
1898 uhs_reg = mci_readl(host, UHS_REG);
1899 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1902 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1903 mmc_hostname(host->mmc));
1906 case MMC_SIGNAL_VOLTAGE_180:
1908 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1909 /* regulator_put(host->vmmc);//to be done in remove function. */
1911 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1912 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1914 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1915 " failed\n", mmc_hostname(host->mmc));
1918 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1922 * Enable 1.8V Signal Enable in the Host Control2
1925 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1928 usleep_range(5000, 5500);
1929 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1930 __FUNCTION__,mmc_hostname(host->mmc));
1932 /* 1.8V regulator output should be stable within 5 ms */
1933 uhs_reg = mci_readl(host, UHS_REG);
1934 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1937 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1938 mmc_hostname(host->mmc));
1941 case MMC_SIGNAL_VOLTAGE_120:
1943 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1945 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1946 " failed\n", mmc_hostname(host->mmc));
1952 /* No signal voltage switch required */
1958 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1959 struct mmc_ios *ios)
1961 struct dw_mci_slot *slot = mmc_priv(mmc);
1962 struct dw_mci *host = slot->host;
1965 if (host->verid < DW_MMC_240A)
1968 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1974 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1976 struct dw_mci_slot *slot = mmc_priv(mmc);
1977 struct dw_mci *host = slot->host;
1978 const struct dw_mci_drv_data *drv_data = host->drv_data;
1979 struct dw_mci_tuning_data tuning_data;
1982 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1983 if(cpu_is_rk3036() || cpu_is_rk312x())
1986 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1987 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1988 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1989 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1990 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1991 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1992 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1996 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1997 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1998 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
2001 "Undefined command(%d) for tuning\n", opcode);
2006 /* Recommend sample phase and delayline
2007 Fixme: Mix-use these three controllers will cause
2010 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
2011 tuning_data.con_id = 3;
2012 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2013 tuning_data.con_id = 1;
2015 tuning_data.con_id = 0;
2017 /* 0: driver, from host->devices
2018 1: sample, from devices->host
2020 tuning_data.tuning_type = 1;
2022 if (drv_data && drv_data->execute_tuning)
2023 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
2028 static void dw_mci_post_tmo(struct mmc_host *mmc)
2030 struct dw_mci_slot *slot = mmc_priv(mmc);
2031 struct dw_mci *host = slot->host;
2032 host->cur_slot->mrq = NULL;
2034 host->state = STATE_IDLE;
2037 static const struct mmc_host_ops dw_mci_ops = {
2038 .request = dw_mci_request,
2039 .pre_req = dw_mci_pre_req,
2040 .post_req = dw_mci_post_req,
2041 .set_ios = dw_mci_set_ios,
2042 .get_ro = dw_mci_get_ro,
2043 .get_cd = dw_mci_get_cd,
2044 .set_sdio_status = dw_mci_set_sdio_status,
2045 .hw_reset = dw_mci_hw_reset,
2046 .enable_sdio_irq = dw_mci_enable_sdio_irq,
2047 .execute_tuning = dw_mci_execute_tuning,
2048 .post_tmo = dw_mci_post_tmo,
2049 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
2050 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
2051 .card_busy = dw_mci_card_busy,
2056 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
2058 unsigned long flags;
2063 local_irq_save(flags);
2064 if(host->irq_state != irqflag)
2066 host->irq_state = irqflag;
2069 enable_irq(host->irq);
2073 disable_irq(host->irq);
2076 local_irq_restore(flags);
2080 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2081 __releases(&host->lock)
2082 __acquires(&host->lock)
2084 if(DW_MCI_SEND_STATUS == host->dir_status){
2086 if( MMC_BUS_TEST_W != host->cmd->opcode){
2087 if(host->data_status & SDMMC_INT_DCRC)
2088 host->data->error = -EILSEQ;
2089 else if(host->data_status & SDMMC_INT_EBE)
2090 host->data->error = -ETIMEDOUT;
2092 dw_mci_wait_unbusy(host);
2095 dw_mci_wait_unbusy(host);
2100 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2101 __releases(&host->lock)
2102 __acquires(&host->lock)
2104 struct dw_mci_slot *slot;
2105 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2107 //WARN_ON(host->cmd || host->data);
2109 dw_mci_deal_data_end(host, mrq);
2112 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2113 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2115 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2116 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2118 host->cur_slot->mrq = NULL;
2120 if (!list_empty(&host->queue)) {
2121 slot = list_entry(host->queue.next,
2122 struct dw_mci_slot, queue_node);
2123 list_del(&slot->queue_node);
2124 dev_vdbg(host->dev, "list not empty: %s is next\n",
2125 mmc_hostname(slot->mmc));
2126 host->state = STATE_SENDING_CMD;
2127 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2128 dw_mci_start_request(host, slot);
2130 dev_vdbg(host->dev, "list empty\n");
2131 host->state = STATE_IDLE;
2134 spin_unlock(&host->lock);
2135 mmc_request_done(prev_mmc, mrq);
2136 spin_lock(&host->lock);
2139 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2141 u32 status = host->cmd_status;
2143 host->cmd_status = 0;
2145 /* Read the response from the card (up to 16 bytes) */
2146 if (cmd->flags & MMC_RSP_PRESENT) {
2147 if (cmd->flags & MMC_RSP_136) {
2148 cmd->resp[3] = mci_readl(host, RESP0);
2149 cmd->resp[2] = mci_readl(host, RESP1);
2150 cmd->resp[1] = mci_readl(host, RESP2);
2151 cmd->resp[0] = mci_readl(host, RESP3);
2153 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2154 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2156 cmd->resp[0] = mci_readl(host, RESP0);
2160 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2161 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2165 if (status & SDMMC_INT_RTO)
2167 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2170 cmd->error = -ETIMEDOUT;
2171 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2172 cmd->error = -EILSEQ;
2173 }else if (status & SDMMC_INT_RESP_ERR){
2178 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2179 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2182 if(MMC_SEND_STATUS != cmd->opcode)
2183 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2184 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2185 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2189 /* newer ip versions need a delay between retries */
2190 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2196 static void dw_mci_tasklet_func(unsigned long priv)
2198 struct dw_mci *host = (struct dw_mci *)priv;
2199 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2200 struct mmc_data *data;
2201 struct mmc_command *cmd;
2202 enum dw_mci_state state;
2203 enum dw_mci_state prev_state;
2204 u32 status, cmd_flags;
2205 unsigned long timeout = 0;
2208 spin_lock(&host->lock);
2210 state = host->state;
2220 case STATE_SENDING_CMD:
2221 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2222 &host->pending_events))
2227 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2228 dw_mci_command_complete(host, cmd);
2229 if (cmd == host->mrq->sbc && !cmd->error) {
2230 prev_state = state = STATE_SENDING_CMD;
2231 __dw_mci_start_request(host, host->cur_slot,
2236 if (cmd->data && cmd->error) {
2237 dw_mci_stop_dma(host);
2240 send_stop_cmd(host, data);
2241 state = STATE_SENDING_STOP;
2244 /* host->data = NULL; */
2247 send_stop_abort(host, data);
2248 state = STATE_SENDING_STOP;
2251 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2254 if (!host->mrq->data || cmd->error) {
2255 dw_mci_request_end(host, host->mrq);
2259 prev_state = state = STATE_SENDING_DATA;
2262 case STATE_SENDING_DATA:
2263 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2264 dw_mci_stop_dma(host);
2267 send_stop_cmd(host, data);
2269 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2270 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2271 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2273 mci_writel(host, CMDARG, 0);
2275 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2276 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2278 if(host->mmc->hold_reg_flag)
2279 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2281 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2283 timeout = jiffies + msecs_to_jiffies(500);
2286 ret = time_before(jiffies, timeout);
2287 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2291 MMC_DBG_ERR_FUNC(host->mmc,
2292 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2293 __func__, mmc_hostname(host->mmc));
2296 send_stop_abort(host, data);
2298 state = STATE_DATA_ERROR;
2302 MMC_DBG_CMD_FUNC(host->mmc,
2303 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2304 prev_state,state, mmc_hostname(host->mmc));
2306 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2307 &host->pending_events))
2309 MMC_DBG_INFO_FUNC(host->mmc,
2310 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2311 prev_state,state,mmc_hostname(host->mmc));
2313 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2314 prev_state = state = STATE_DATA_BUSY;
2317 case STATE_DATA_BUSY:
2318 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2319 &host->pending_events))
2322 dw_mci_deal_data_end(host, host->mrq);
2323 MMC_DBG_INFO_FUNC(host->mmc,
2324 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2325 prev_state,state,mmc_hostname(host->mmc));
2327 /* host->data = NULL; */
2328 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2329 status = host->data_status;
2331 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2332 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2333 MMC_DBG_ERR_FUNC(host->mmc,
2334 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2335 prev_state,state, status, mmc_hostname(host->mmc));
2337 if (status & SDMMC_INT_DRTO) {
2338 data->error = -ETIMEDOUT;
2339 } else if (status & SDMMC_INT_DCRC) {
2340 data->error = -EILSEQ;
2341 } else if (status & SDMMC_INT_EBE &&
2342 host->dir_status == DW_MCI_SEND_STATUS){
2344 * No data CRC status was returned.
2345 * The number of bytes transferred will
2346 * be exaggerated in PIO mode.
2348 data->bytes_xfered = 0;
2349 data->error = -ETIMEDOUT;
2358 * After an error, there may be data lingering
2359 * in the FIFO, so reset it - doing so
2360 * generates a block interrupt, hence setting
2361 * the scatter-gather pointer to NULL.
2363 dw_mci_fifo_reset(host);
2365 data->bytes_xfered = data->blocks * data->blksz;
2370 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2371 prev_state,state,mmc_hostname(host->mmc));
2372 dw_mci_request_end(host, host->mrq);
2375 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2376 prev_state,state,mmc_hostname(host->mmc));
2378 if (host->mrq->sbc && !data->error) {
2379 data->stop->error = 0;
2381 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2382 prev_state,state,mmc_hostname(host->mmc));
2384 dw_mci_request_end(host, host->mrq);
2388 prev_state = state = STATE_SENDING_STOP;
2390 send_stop_cmd(host, data);
2392 if (data->stop && !data->error) {
2393 /* stop command for open-ended transfer*/
2395 send_stop_abort(host, data);
2399 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2400 prev_state,state,mmc_hostname(host->mmc));
2402 case STATE_SENDING_STOP:
2403 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2406 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2407 prev_state, state, mmc_hostname(host->mmc));
2409 /* CMD error in data command */
2410 if (host->mrq->cmd->error && host->mrq->data) {
2411 dw_mci_fifo_reset(host);
2415 host->data = NULL; */
2417 dw_mci_command_complete(host, host->mrq->stop);
2419 if (host->mrq->stop)
2420 dw_mci_command_complete(host, host->mrq->stop);
2422 host->cmd_status = 0;
2425 dw_mci_request_end(host, host->mrq);
2428 case STATE_DATA_ERROR:
2429 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2430 &host->pending_events))
2433 state = STATE_DATA_BUSY;
2436 } while (state != prev_state);
2438 host->state = state;
2440 spin_unlock(&host->lock);
2444 /* push final bytes to part_buf, only use during push */
2445 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2447 memcpy((void *)&host->part_buf, buf, cnt);
2448 host->part_buf_count = cnt;
2451 /* append bytes to part_buf, only use during push */
2452 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2454 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2455 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2456 host->part_buf_count += cnt;
2460 /* pull first bytes from part_buf, only use during pull */
2461 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2463 cnt = min(cnt, (int)host->part_buf_count);
2465 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2467 host->part_buf_count -= cnt;
2468 host->part_buf_start += cnt;
2473 /* pull final bytes from the part_buf, assuming it's just been filled */
2474 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2476 memcpy(buf, &host->part_buf, cnt);
2477 host->part_buf_start = cnt;
2478 host->part_buf_count = (1 << host->data_shift) - cnt;
2481 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2483 struct mmc_data *data = host->data;
2486 /* try and push anything in the part_buf */
2487 if (unlikely(host->part_buf_count)) {
2488 int len = dw_mci_push_part_bytes(host, buf, cnt);
2491 if (host->part_buf_count == 2) {
2492 mci_writew(host, DATA(host->data_offset),
2494 host->part_buf_count = 0;
2497 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2498 if (unlikely((unsigned long)buf & 0x1)) {
2500 u16 aligned_buf[64];
2501 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2502 int items = len >> 1;
2504 /* memcpy from input buffer into aligned buffer */
2505 memcpy(aligned_buf, buf, len);
2508 /* push data from aligned buffer into fifo */
2509 for (i = 0; i < items; ++i)
2510 mci_writew(host, DATA(host->data_offset),
2517 for (; cnt >= 2; cnt -= 2)
2518 mci_writew(host, DATA(host->data_offset), *pdata++);
2521 /* put anything remaining in the part_buf */
2523 dw_mci_set_part_bytes(host, buf, cnt);
2524 /* Push data if we have reached the expected data length */
2525 if ((data->bytes_xfered + init_cnt) ==
2526 (data->blksz * data->blocks))
2527 mci_writew(host, DATA(host->data_offset),
2532 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2534 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2535 if (unlikely((unsigned long)buf & 0x1)) {
2537 /* pull data from fifo into aligned buffer */
2538 u16 aligned_buf[64];
2539 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2540 int items = len >> 1;
2542 for (i = 0; i < items; ++i)
2543 aligned_buf[i] = mci_readw(host,
2544 DATA(host->data_offset));
2545 /* memcpy from aligned buffer into output buffer */
2546 memcpy(buf, aligned_buf, len);
2554 for (; cnt >= 2; cnt -= 2)
2555 *pdata++ = mci_readw(host, DATA(host->data_offset));
2559 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2560 dw_mci_pull_final_bytes(host, buf, cnt);
2564 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2566 struct mmc_data *data = host->data;
2569 /* try and push anything in the part_buf */
2570 if (unlikely(host->part_buf_count)) {
2571 int len = dw_mci_push_part_bytes(host, buf, cnt);
2574 if (host->part_buf_count == 4) {
2575 mci_writel(host, DATA(host->data_offset),
2577 host->part_buf_count = 0;
2580 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2581 if (unlikely((unsigned long)buf & 0x3)) {
2583 u32 aligned_buf[32];
2584 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2585 int items = len >> 2;
2587 /* memcpy from input buffer into aligned buffer */
2588 memcpy(aligned_buf, buf, len);
2591 /* push data from aligned buffer into fifo */
2592 for (i = 0; i < items; ++i)
2593 mci_writel(host, DATA(host->data_offset),
2600 for (; cnt >= 4; cnt -= 4)
2601 mci_writel(host, DATA(host->data_offset), *pdata++);
2604 /* put anything remaining in the part_buf */
2606 dw_mci_set_part_bytes(host, buf, cnt);
2607 /* Push data if we have reached the expected data length */
2608 if ((data->bytes_xfered + init_cnt) ==
2609 (data->blksz * data->blocks))
2610 mci_writel(host, DATA(host->data_offset),
2615 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2617 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2618 if (unlikely((unsigned long)buf & 0x3)) {
2620 /* pull data from fifo into aligned buffer */
2621 u32 aligned_buf[32];
2622 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2623 int items = len >> 2;
2625 for (i = 0; i < items; ++i)
2626 aligned_buf[i] = mci_readl(host,
2627 DATA(host->data_offset));
2628 /* memcpy from aligned buffer into output buffer */
2629 memcpy(buf, aligned_buf, len);
2637 for (; cnt >= 4; cnt -= 4)
2638 *pdata++ = mci_readl(host, DATA(host->data_offset));
2642 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2643 dw_mci_pull_final_bytes(host, buf, cnt);
2647 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2649 struct mmc_data *data = host->data;
2652 /* try and push anything in the part_buf */
2653 if (unlikely(host->part_buf_count)) {
2654 int len = dw_mci_push_part_bytes(host, buf, cnt);
2658 if (host->part_buf_count == 8) {
2659 mci_writeq(host, DATA(host->data_offset),
2661 host->part_buf_count = 0;
2664 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2665 if (unlikely((unsigned long)buf & 0x7)) {
2667 u64 aligned_buf[16];
2668 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2669 int items = len >> 3;
2671 /* memcpy from input buffer into aligned buffer */
2672 memcpy(aligned_buf, buf, len);
2675 /* push data from aligned buffer into fifo */
2676 for (i = 0; i < items; ++i)
2677 mci_writeq(host, DATA(host->data_offset),
2684 for (; cnt >= 8; cnt -= 8)
2685 mci_writeq(host, DATA(host->data_offset), *pdata++);
2688 /* put anything remaining in the part_buf */
2690 dw_mci_set_part_bytes(host, buf, cnt);
2691 /* Push data if we have reached the expected data length */
2692 if ((data->bytes_xfered + init_cnt) ==
2693 (data->blksz * data->blocks))
2694 mci_writeq(host, DATA(host->data_offset),
2699 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2701 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2702 if (unlikely((unsigned long)buf & 0x7)) {
2704 /* pull data from fifo into aligned buffer */
2705 u64 aligned_buf[16];
2706 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2707 int items = len >> 3;
2709 for (i = 0; i < items; ++i)
2710 aligned_buf[i] = mci_readq(host,
2711 DATA(host->data_offset));
2712 /* memcpy from aligned buffer into output buffer */
2713 memcpy(buf, aligned_buf, len);
2721 for (; cnt >= 8; cnt -= 8)
2722 *pdata++ = mci_readq(host, DATA(host->data_offset));
2726 host->part_buf = mci_readq(host, DATA(host->data_offset));
2727 dw_mci_pull_final_bytes(host, buf, cnt);
2731 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2735 /* get remaining partial bytes */
2736 len = dw_mci_pull_part_bytes(host, buf, cnt);
2737 if (unlikely(len == cnt))
2742 /* get the rest of the data */
2743 host->pull_data(host, buf, cnt);
2746 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2748 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2750 unsigned int offset;
2751 struct mmc_data *data = host->data;
2752 int shift = host->data_shift;
2755 unsigned int remain, fcnt;
2757 if(!host->mmc->bus_refs){
2758 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2762 if (!sg_miter_next(sg_miter))
2765 host->sg = sg_miter->piter.sg;
2766 buf = sg_miter->addr;
2767 remain = sg_miter->length;
2771 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2772 << shift) + host->part_buf_count;
2773 len = min(remain, fcnt);
2776 dw_mci_pull_data(host, (void *)(buf + offset), len);
2777 data->bytes_xfered += len;
2782 sg_miter->consumed = offset;
2783 status = mci_readl(host, MINTSTS);
2784 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2785 /* if the RXDR is ready read again */
2786 } while ((status & SDMMC_INT_RXDR) ||
2787 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2790 if (!sg_miter_next(sg_miter))
2792 sg_miter->consumed = 0;
2794 sg_miter_stop(sg_miter);
2798 sg_miter_stop(sg_miter);
2802 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2805 static void dw_mci_write_data_pio(struct dw_mci *host)
2807 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2809 unsigned int offset;
2810 struct mmc_data *data = host->data;
2811 int shift = host->data_shift;
2814 unsigned int fifo_depth = host->fifo_depth;
2815 unsigned int remain, fcnt;
2817 if(!host->mmc->bus_refs){
2818 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2823 if (!sg_miter_next(sg_miter))
2826 host->sg = sg_miter->piter.sg;
2827 buf = sg_miter->addr;
2828 remain = sg_miter->length;
2832 fcnt = ((fifo_depth -
2833 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2834 << shift) - host->part_buf_count;
2835 len = min(remain, fcnt);
2838 host->push_data(host, (void *)(buf + offset), len);
2839 data->bytes_xfered += len;
2844 sg_miter->consumed = offset;
2845 status = mci_readl(host, MINTSTS);
2846 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2847 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2850 if (!sg_miter_next(sg_miter))
2852 sg_miter->consumed = 0;
2854 sg_miter_stop(sg_miter);
2858 sg_miter_stop(sg_miter);
2862 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2865 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2867 if (!host->cmd_status)
2868 host->cmd_status = status;
2875 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2876 tasklet_schedule(&host->tasklet);
2879 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2881 struct dw_mci *host = dev_id;
2882 u32 pending, sdio_int;
2885 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2888 * DTO fix - version 2.10a and below, and only if internal DMA
2891 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2893 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2894 pending |= SDMMC_INT_DATA_OVER;
2898 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2899 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2900 host->cmd_status = pending;
2902 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2903 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2905 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2908 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2909 /* if there is an error report DATA_ERROR */
2910 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2911 host->data_status = pending;
2913 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2915 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2916 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2917 tasklet_schedule(&host->tasklet);
2920 if (pending & SDMMC_INT_DATA_OVER) {
2921 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2922 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2923 if (!host->data_status)
2924 host->data_status = pending;
2926 if (host->dir_status == DW_MCI_RECV_STATUS) {
2927 if (host->sg != NULL)
2928 dw_mci_read_data_pio(host, true);
2930 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2931 tasklet_schedule(&host->tasklet);
2934 if (pending & SDMMC_INT_RXDR) {
2935 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2936 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2937 dw_mci_read_data_pio(host, false);
2940 if (pending & SDMMC_INT_TXDR) {
2941 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2942 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2943 dw_mci_write_data_pio(host);
2946 if (pending & SDMMC_INT_VSI) {
2947 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2948 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2949 dw_mci_cmd_interrupt(host, pending);
2952 if (pending & SDMMC_INT_CMD_DONE) {
2953 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2954 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2955 dw_mci_cmd_interrupt(host, pending);
2958 if (pending & SDMMC_INT_CD) {
2959 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2960 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2961 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2962 queue_work(host->card_workqueue, &host->card_work);
2965 if (pending & SDMMC_INT_HLE) {
2966 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2967 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2971 /* Handle SDIO Interrupts */
2972 for (i = 0; i < host->num_slots; i++) {
2973 struct dw_mci_slot *slot = host->slot[i];
2975 if (host->verid < DW_MMC_240A)
2976 sdio_int = SDMMC_INT_SDIO(i);
2978 sdio_int = SDMMC_INT_SDIO(i + 8);
2980 if (pending & sdio_int) {
2981 mci_writel(host, RINTSTS, sdio_int);
2982 mmc_signal_sdio_irq(slot->mmc);
2988 #ifdef CONFIG_MMC_DW_IDMAC
2989 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2990 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2991 /* Handle DMA interrupts */
2992 pending = mci_readl(host, IDSTS);
2993 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2994 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2995 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2996 host->dma_ops->complete((void *)host);
3004 static void dw_mci_work_routine_card(struct work_struct *work)
3006 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
3009 for (i = 0; i < host->num_slots; i++) {
3010 struct dw_mci_slot *slot = host->slot[i];
3011 struct mmc_host *mmc = slot->mmc;
3012 struct mmc_request *mrq;
3015 present = dw_mci_get_cd(mmc);
3017 /* Card insert, switch data line to uart function, and vice verse.
3018 eONLY audi chip need switched by software, using udbg tag in dts!
3020 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
3022 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3023 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3024 mmc_hostname(host->mmc));
3026 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3027 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3028 mmc_hostname(host->mmc));
3032 while (present != slot->last_detect_state) {
3033 dev_dbg(&slot->mmc->class_dev, "card %s\n",
3034 present ? "inserted" : "removed");
3035 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
3036 present ? "inserted" : "removed.", mmc_hostname(mmc));
3038 dw_mci_ctrl_all_reset(host);
3039 /* Stop edma when rountine card triggered */
3040 if(cpu_is_rk3036() || cpu_is_rk312x())
3041 if(host->dma_ops && host->dma_ops->stop)
3042 host->dma_ops->stop(host);
3043 rk_send_wakeup_key();//wake up system
3044 spin_lock_bh(&host->lock);
3046 /* Card change detected */
3047 slot->last_detect_state = present;
3049 /* Clean up queue if present */
3052 if (mrq == host->mrq) {
3056 switch (host->state) {
3059 case STATE_SENDING_CMD:
3060 mrq->cmd->error = -ENOMEDIUM;
3064 case STATE_SENDING_DATA:
3065 mrq->data->error = -ENOMEDIUM;
3066 dw_mci_stop_dma(host);
3068 case STATE_DATA_BUSY:
3069 case STATE_DATA_ERROR:
3070 if (mrq->data->error == -EINPROGRESS)
3071 mrq->data->error = -ENOMEDIUM;
3075 case STATE_SENDING_STOP:
3076 mrq->stop->error = -ENOMEDIUM;
3080 dw_mci_request_end(host, mrq);
3082 list_del(&slot->queue_node);
3083 mrq->cmd->error = -ENOMEDIUM;
3085 mrq->data->error = -ENOMEDIUM;
3087 mrq->stop->error = -ENOMEDIUM;
3089 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3090 mrq->cmd->opcode, mmc_hostname(mmc));
3092 spin_unlock(&host->lock);
3093 mmc_request_done(slot->mmc, mrq);
3094 spin_lock(&host->lock);
3098 /* Power down slot */
3100 /* Clear down the FIFO */
3101 dw_mci_fifo_reset(host);
3102 #ifdef CONFIG_MMC_DW_IDMAC
3103 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3104 dw_mci_idmac_reset(host);
3109 spin_unlock_bh(&host->lock);
3111 present = dw_mci_get_cd(mmc);
3114 mmc_detect_change(slot->mmc,
3115 msecs_to_jiffies(host->pdata->detect_delay_ms));
3120 /* given a slot id, find out the device node representing that slot */
3121 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3123 struct device_node *np;
3127 if (!dev || !dev->of_node)
3130 for_each_child_of_node(dev->of_node, np) {
3131 addr = of_get_property(np, "reg", &len);
3132 if (!addr || (len < sizeof(int)))
3134 if (be32_to_cpup(addr) == slot)
3140 static struct dw_mci_of_slot_quirks {
3143 } of_slot_quirks[] = {
3145 .quirk = "disable-wp",
3146 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3150 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3152 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3157 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3158 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3159 quirks |= of_slot_quirks[idx].id;
3164 /* find out bus-width for a given slot */
3165 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3167 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3173 if (of_property_read_u32(np, "bus-width", &bus_wd))
3174 dev_err(dev, "bus-width property not found, assuming width"
3180 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3181 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3183 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3189 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3191 /* Having a missing entry is valid; return silently */
3192 if (!gpio_is_valid(gpio))
3195 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3196 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3200 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3206 /* find the write protect gpio for a given slot; or -1 if none specified */
3207 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3209 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3215 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3217 /* Having a missing entry is valid; return silently */
3218 if (!gpio_is_valid(gpio))
3221 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3222 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3229 /* find the cd gpio for a given slot */
3230 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3231 struct mmc_host *mmc)
3233 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3239 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3241 /* Having a missing entry is valid; return silently */
3242 if (!gpio_is_valid(gpio))
3245 if (mmc_gpio_request_cd(mmc, gpio, 0))
3246 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3249 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3251 struct mmc_host *mmc = dev_id;
3252 struct dw_mci_slot *slot = mmc_priv(mmc);
3253 struct dw_mci *host = slot->host;
3254 int gpio_cd = slot->cd_gpio;
3256 (gpio_get_value(gpio_cd) == 0) ?
3257 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
3258 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
3260 /* wakeup system whether gpio debounce or not */
3261 rk_send_wakeup_key();
3263 /* no need to trigger detect flow when rescan is disabled.
3264 This case happended in dpm, that we just wakeup system and
3265 let suspend_post notify callback handle it.
3267 if(mmc->rescan_disable == 0)
3268 queue_work(host->card_workqueue, &host->card_work);
3270 printk("%s: rescan been disabled!\n", __FUNCTION__);
3275 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3276 struct mmc_host *mmc)
3278 struct dw_mci_slot *slot = mmc_priv(mmc);
3279 struct dw_mci *host = slot->host;
3283 /* Having a missing entry is valid; return silently */
3284 if (!gpio_is_valid(gpio))
3287 irq = gpio_to_irq(gpio);
3289 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3290 NULL, dw_mci_gpio_cd_irqt,
3291 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3295 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3297 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3298 enable_irq_wake(irq);
3301 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3305 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3306 struct mmc_host *mmc)
3308 if (!gpio_is_valid(gpio))
3311 if (gpio_to_irq(gpio) >= 0) {
3312 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3313 devm_gpio_free(&mmc->class_dev, gpio);
3316 #else /* CONFIG_OF */
3317 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3321 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3325 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3329 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3333 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3334 struct mmc_host *mmc)
3338 #endif /* CONFIG_OF */
3340 /* @host: dw_mci host prvdata
3341 * Init pinctrl for each platform. Usually we assign
3342 * "defalut" tag for functional usage, "idle" tag for gpio
3343 * state and "udbg" tag for uart_dbg if any.
3345 static void dw_mci_init_pinctrl(struct dw_mci *host)
3347 /* Fixme: DON'T TOUCH EMMC SETTING! */
3348 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3351 /* Get pinctrl for DTS */
3352 host->pinctrl = devm_pinctrl_get(host->dev);
3353 if (IS_ERR(host->pinctrl)) {
3354 dev_err(host->dev, "%s: No pinctrl used!\n",
3355 mmc_hostname(host->mmc));
3359 /* Lookup idle state */
3360 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3361 PINCTRL_STATE_IDLE);
3362 if (IS_ERR(host->pins_idle)) {
3363 dev_err(host->dev, "%s: No idle tag found!\n",
3364 mmc_hostname(host->mmc));
3366 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3367 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3368 mmc_hostname(host->mmc));
3371 /* Lookup default state */
3372 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3373 PINCTRL_STATE_DEFAULT);
3374 if (IS_ERR(host->pins_default)) {
3375 dev_err(host->dev, "%s: No default pinctrl found!\n",
3376 mmc_hostname(host->mmc));
3378 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3379 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3380 mmc_hostname(host->mmc));
3383 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3384 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3385 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3386 if (IS_ERR(host->pins_udbg)) {
3387 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3388 mmc_hostname(host->mmc));
3390 if (!dw_mci_get_cd(host->mmc))
3391 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3392 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3393 mmc_hostname(host->mmc));
3398 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3399 unsigned long mode, void *unused)
3401 struct mmc_host *host = container_of(
3402 notify_block, struct mmc_host, pm_notify);
3403 unsigned long flags;
3406 case PM_HIBERNATION_PREPARE:
3407 case PM_SUSPEND_PREPARE:
3408 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3409 spin_lock_irqsave(&host->lock, flags);
3410 host->rescan_disable = 1;
3411 spin_unlock_irqrestore(&host->lock, flags);
3412 if (cancel_delayed_work(&host->detect))
3413 wake_unlock(&host->detect_wake_lock);
3416 case PM_POST_SUSPEND:
3417 case PM_POST_HIBERNATION:
3418 case PM_POST_RESTORE:
3419 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3420 spin_lock_irqsave(&host->lock, flags);
3421 host->rescan_disable = 0;
3422 spin_unlock_irqrestore(&host->lock, flags);
3423 mmc_detect_change(host, 10);
3429 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3431 struct mmc_host *mmc;
3432 struct dw_mci_slot *slot;
3433 const struct dw_mci_drv_data *drv_data = host->drv_data;
3438 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3442 slot = mmc_priv(mmc);
3446 host->slot[id] = slot;
3449 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3451 mmc->ops = &dw_mci_ops;
3453 if (of_property_read_u32_array(host->dev->of_node,
3454 "clock-freq-min-max", freq, 2)) {
3455 mmc->f_min = DW_MCI_FREQ_MIN;
3456 mmc->f_max = DW_MCI_FREQ_MAX;
3458 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3459 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3461 mmc->f_min = freq[0];
3462 mmc->f_max = freq[1];
3464 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3465 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3468 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3470 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3471 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3472 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3473 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3474 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3475 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3477 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3478 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3479 if (register_pm_notifier(&mmc->pm_notify)) {
3480 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3481 goto err_pm_notifier;
3485 if (host->cid == DW_MCI_TYPE_RK3368) {
3486 if (IS_ERR(host->grf))
3487 pr_err("rk_sdmmc: dts couldn't find grf regmap for 3368\n");
3489 /* Disable force_jtag */
3490 regmap_write(host->grf, 0x43c, (1<<13)<<16 | (0 << 13));
3491 } else if (cpu_is_rk3288()) {
3492 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
3496 /* We assume only low-level chip use gpio_cd */
3497 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
3498 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3499 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3500 if (gpio_is_valid(slot->cd_gpio)) {
3501 /* Request gpio int for card detection */
3502 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3504 slot->cd_gpio = -ENODEV;
3505 dev_err(host->dev, "failed to get your cd-gpios!\n");
3509 if (host->pdata->get_ocr)
3510 mmc->ocr_avail = host->pdata->get_ocr(id);
3513 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3514 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3515 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3516 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3520 * Start with slot power disabled, it will be enabled when a card
3523 if (host->pdata->setpower)
3524 host->pdata->setpower(id, 0);
3526 if (host->pdata->caps)
3527 mmc->caps = host->pdata->caps;
3529 if (host->pdata->pm_caps)
3530 mmc->pm_caps = host->pdata->pm_caps;
3532 if (host->dev->of_node) {
3533 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3537 ctrl_id = to_platform_device(host->dev)->id;
3539 if (drv_data && drv_data->caps)
3540 mmc->caps |= drv_data->caps[ctrl_id];
3541 if (drv_data && drv_data->hold_reg_flag)
3542 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3544 /* set the compatibility of driver. */
3545 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3546 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3548 if (host->pdata->caps2)
3549 mmc->caps2 = host->pdata->caps2;
3551 if (host->pdata->get_bus_wd)
3552 bus_width = host->pdata->get_bus_wd(slot->id);
3553 else if (host->dev->of_node)
3554 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3558 switch (bus_width) {
3560 mmc->caps |= MMC_CAP_8_BIT_DATA;
3562 mmc->caps |= MMC_CAP_4_BIT_DATA;
3565 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3566 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3567 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3568 mmc->caps |= MMC_CAP_SDIO_IRQ;
3569 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3570 mmc->caps |= MMC_CAP_HW_RESET;
3571 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3572 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3573 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3574 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3575 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3576 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3577 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3578 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3580 /*Assign pm_caps pass to pm_flags*/
3581 mmc->pm_flags = mmc->pm_caps;
3583 if (host->pdata->blk_settings) {
3584 mmc->max_segs = host->pdata->blk_settings->max_segs;
3585 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3586 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3587 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3588 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3590 /* Useful defaults if platform data is unset. */
3591 #ifdef CONFIG_MMC_DW_IDMAC
3592 mmc->max_segs = host->ring_size;
3593 mmc->max_blk_size = 65536;
3594 mmc->max_blk_count = host->ring_size;
3595 mmc->max_seg_size = 0x1000;
3596 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3597 if(cpu_is_rk3036() || cpu_is_rk312x()){
3598 /* fixup for external dmac setting */
3600 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3601 mmc->max_blk_count = 65535;
3602 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3603 mmc->max_seg_size = mmc->max_req_size;
3607 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3608 mmc->max_blk_count = 512;
3609 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3610 mmc->max_seg_size = mmc->max_req_size;
3611 #endif /* CONFIG_MMC_DW_IDMAC */
3615 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3617 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3622 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3623 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3627 if (IS_ERR(host->vmmc)) {
3628 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3631 ret = regulator_enable(host->vmmc);
3634 "failed to enable regulator: %d\n", ret);
3641 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3643 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3644 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3646 dw_mci_init_pinctrl(host);
3647 ret = mmc_add_host(mmc);
3651 #if defined(CONFIG_DEBUG_FS)
3652 dw_mci_init_debugfs(slot);
3655 /* Card initially undetected */
3656 slot->last_detect_state = 1;
3660 unregister_pm_notifier(&mmc->pm_notify);
3663 if (gpio_is_valid(slot->cd_gpio))
3664 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3669 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3671 /* Shutdown detect IRQ */
3672 if (slot->host->pdata->exit)
3673 slot->host->pdata->exit(id);
3675 /* Debugfs stuff is cleaned up by mmc core */
3676 mmc_remove_host(slot->mmc);
3677 slot->host->slot[id] = NULL;
3678 mmc_free_host(slot->mmc);
3681 static void dw_mci_init_dma(struct dw_mci *host)
3683 /* Alloc memory for sg translation */
3684 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3685 &host->sg_dma, GFP_KERNEL);
3686 if (!host->sg_cpu) {
3687 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3692 memset(host->sg_cpu, 0, PAGE_SIZE);
3695 /* Determine which DMA interface to use */
3696 #if defined(CONFIG_MMC_DW_IDMAC)
3697 if(cpu_is_rk3036() || cpu_is_rk312x()){
3698 host->dma_ops = &dw_mci_edmac_ops;
3699 dev_info(host->dev, "Using external DMA controller.\n");
3701 host->dma_ops = &dw_mci_idmac_ops;
3702 dev_info(host->dev, "Using internal DMA controller.\n");
3709 if (host->dma_ops->init && host->dma_ops->start &&
3710 host->dma_ops->stop && host->dma_ops->cleanup) {
3711 if (host->dma_ops->init(host)) {
3712 dev_err(host->dev, "%s: Unable to initialize "
3713 "DMA Controller.\n", __func__);
3717 dev_err(host->dev, "DMA initialization not found.\n");
3725 dev_info(host->dev, "Using PIO mode.\n");
3730 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3732 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3735 ctrl = mci_readl(host, CTRL);
3737 mci_writel(host, CTRL, ctrl);
3739 /* wait till resets clear */
3741 ctrl = mci_readl(host, CTRL);
3742 if (!(ctrl & reset))
3744 } while (time_before(jiffies, timeout));
3747 "Timeout resetting block (ctrl reset %#x)\n",
3753 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3756 * Reseting generates a block interrupt, hence setting
3757 * the scatter-gather pointer to NULL.
3760 sg_miter_stop(&host->sg_miter);
3764 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3767 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3769 return dw_mci_ctrl_reset(host,
3770 SDMMC_CTRL_FIFO_RESET |
3772 SDMMC_CTRL_DMA_RESET);
3777 static struct dw_mci_of_quirks {
3782 .quirk = "broken-cd",
3783 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3787 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3789 struct dw_mci_board *pdata;
3790 struct device *dev = host->dev;
3791 struct device_node *np = dev->of_node;
3792 const struct dw_mci_drv_data *drv_data = host->drv_data;
3794 u32 clock_frequency;
3796 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3798 dev_err(dev, "could not allocate memory for pdata\n");
3799 return ERR_PTR(-ENOMEM);
3802 /* find out number of slots supported */
3803 if (of_property_read_u32(dev->of_node, "num-slots",
3804 &pdata->num_slots)) {
3805 dev_info(dev, "num-slots property not found, "
3806 "assuming 1 slot is available\n");
3807 pdata->num_slots = 1;
3811 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3812 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3813 pdata->quirks |= of_quirks[idx].id;
3816 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3817 dev_info(dev, "fifo-depth property not found, using "
3818 "value of FIFOTH register as default\n");
3820 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3822 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3823 pdata->bus_hz = clock_frequency;
3825 if (drv_data && drv_data->parse_dt) {
3826 ret = drv_data->parse_dt(host);
3828 return ERR_PTR(ret);
3831 if (of_find_property(np, "keep-power-in-suspend", NULL))
3832 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3834 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3835 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3837 if (of_find_property(np, "supports-highspeed", NULL))
3838 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3840 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3841 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3843 if (of_find_property(np, "supports-DDR_MODE", NULL))
3844 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3846 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3847 pdata->caps2 |= MMC_CAP2_HS200;
3849 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3850 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3852 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3853 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3855 if (of_get_property(np, "cd-inverted", NULL))
3856 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3857 if (of_get_property(np, "bootpart-no-access", NULL))
3858 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3863 #else /* CONFIG_OF */
3864 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3866 return ERR_PTR(-EINVAL);
3868 #endif /* CONFIG_OF */
3870 int dw_mci_probe(struct dw_mci *host)
3872 const struct dw_mci_drv_data *drv_data = host->drv_data;
3873 int width, i, ret = 0;
3879 host->pdata = dw_mci_parse_dt(host);
3880 if (IS_ERR(host->pdata)) {
3881 dev_err(host->dev, "platform data not available\n");
3886 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3888 "Platform data must supply select_slot function\n");
3893 * In 2.40a spec, Data offset is changed.
3894 * Need to check the version-id and set data-offset for DATA register.
3896 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3897 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3899 if (host->verid < DW_MMC_240A)
3900 host->data_offset = DATA_OFFSET;
3902 host->data_offset = DATA_240A_OFFSET;
3905 host->hpclk_mmc= devm_clk_get(host->dev, "hpclk_mmc");
3906 if (IS_ERR(host->hpclk_mmc)) {
3907 dev_err(host->dev, "failed to get hpclk_mmc\n");
3909 clk_prepare_enable(host->hpclk_mmc);
3913 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3914 if (IS_ERR(host->hclk_mmc)) {
3915 dev_err(host->dev, "failed to get hclk_mmc\n");
3916 ret = PTR_ERR(host->hclk_mmc);
3920 clk_prepare_enable(host->hclk_mmc);
3923 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3924 if (IS_ERR(host->clk_mmc)) {
3925 dev_err(host->dev, "failed to get clk mmc_per\n");
3926 ret = PTR_ERR(host->clk_mmc);
3930 host->bus_hz = host->pdata->bus_hz;
3931 if (!host->bus_hz) {
3932 dev_err(host->dev,"Platform data must supply bus speed\n");
3937 if (host->verid < DW_MMC_240A)
3938 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3940 //rockchip: fix divider 2 in clksum before controlller
3941 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3944 dev_err(host->dev, "failed to set clk mmc\n");
3947 clk_prepare_enable(host->clk_mmc);
3949 if (drv_data && drv_data->setup_clock) {
3950 ret = drv_data->setup_clock(host);
3953 "implementation specific clock setup failed\n");
3958 host->quirks = host->pdata->quirks;
3959 host->irq_state = true;
3960 host->set_speed = 0;
3962 host->svi_flags = 0;
3964 spin_lock_init(&host->lock);
3965 spin_lock_init(&host->slock);
3967 INIT_LIST_HEAD(&host->queue);
3969 * Get the host data width - this assumes that HCON has been set with
3970 * the correct values.
3972 i = (mci_readl(host, HCON) >> 7) & 0x7;
3974 host->push_data = dw_mci_push_data16;
3975 host->pull_data = dw_mci_pull_data16;
3977 host->data_shift = 1;
3978 } else if (i == 2) {
3979 host->push_data = dw_mci_push_data64;
3980 host->pull_data = dw_mci_pull_data64;
3982 host->data_shift = 3;
3984 /* Check for a reserved value, and warn if it is */
3986 "HCON reports a reserved host data width!\n"
3987 "Defaulting to 32-bit access.\n");
3988 host->push_data = dw_mci_push_data32;
3989 host->pull_data = dw_mci_pull_data32;
3991 host->data_shift = 2;
3994 /* Reset all blocks */
3995 if (!dw_mci_ctrl_all_reset(host))
3998 host->dma_ops = host->pdata->dma_ops;
3999 dw_mci_init_dma(host);
4001 /* Clear the interrupts for the host controller */
4002 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4003 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4005 /* Put in max timeout */
4006 mci_writel(host, TMOUT, 0xFFFFFFFF);
4009 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
4010 * Tx Mark = fifo_size / 2 DMA Size = 8
4012 if (!host->pdata->fifo_depth) {
4014 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
4015 * have been overwritten by the bootloader, just like we're
4016 * about to do, so if you know the value for your hardware, you
4017 * should put it in the platform data.
4019 fifo_size = mci_readl(host, FIFOTH);
4020 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
4022 fifo_size = host->pdata->fifo_depth;
4024 host->fifo_depth = fifo_size;
4026 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
4027 mci_writel(host, FIFOTH, host->fifoth_val);
4029 /* disable clock to CIU */
4030 mci_writel(host, CLKENA, 0);
4031 mci_writel(host, CLKSRC, 0);
4033 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
4034 host->card_workqueue = alloc_workqueue("dw-mci-card",
4035 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
4036 if (!host->card_workqueue) {
4040 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4041 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4042 host->irq_flags, "dw-mci", host);
4046 if (host->pdata->num_slots)
4047 host->num_slots = host->pdata->num_slots;
4049 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4051 /* We need at least one slot to succeed */
4052 for (i = 0; i < host->num_slots; i++) {
4053 ret = dw_mci_init_slot(host, i);
4055 dev_dbg(host->dev, "slot %d init failed\n", i);
4061 * Enable interrupts for command done, data over, data empty, card det,
4062 * receive ready and error such as transmit, receive timeout, crc error
4064 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4065 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4066 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4067 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4068 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4069 regs |= SDMMC_INT_CD;
4071 mci_writel(host, INTMASK, regs);
4073 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4075 dev_info(host->dev, "DW MMC controller at irq %d, "
4076 "%d bit host data width, "
4078 host->irq, width, fifo_size);
4081 dev_info(host->dev, "%d slots initialized\n", init_slots);
4083 dev_dbg(host->dev, "attempted to initialize %d slots, "
4084 "but failed on all\n", host->num_slots);
4089 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4090 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4095 destroy_workqueue(host->card_workqueue);
4098 if (host->use_dma && host->dma_ops->exit)
4099 host->dma_ops->exit(host);
4102 regulator_disable(host->vmmc);
4103 regulator_put(host->vmmc);
4107 if (!IS_ERR(host->clk_mmc))
4108 clk_disable_unprepare(host->clk_mmc);
4110 if (!IS_ERR(host->hclk_mmc))
4111 clk_disable_unprepare(host->hclk_mmc);
4114 EXPORT_SYMBOL(dw_mci_probe);
4116 void dw_mci_remove(struct dw_mci *host)
4118 struct mmc_host *mmc = host->mmc;
4119 struct dw_mci_slot *slot = mmc_priv(mmc);
4122 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4123 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4125 for(i = 0; i < host->num_slots; i++){
4126 dev_dbg(host->dev, "remove slot %d\n", i);
4128 dw_mci_cleanup_slot(host->slot[i], i);
4131 /* disable clock to CIU */
4132 mci_writel(host, CLKENA, 0);
4133 mci_writel(host, CLKSRC, 0);
4135 destroy_workqueue(host->card_workqueue);
4136 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4137 unregister_pm_notifier(&host->mmc->pm_notify);
4139 if (host->use_dma && host->dma_ops->exit)
4140 host->dma_ops->exit(host);
4142 if (gpio_is_valid(slot->cd_gpio))
4143 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4146 regulator_disable(host->vmmc);
4147 regulator_put(host->vmmc);
4149 if (!IS_ERR(host->clk_mmc))
4150 clk_disable_unprepare(host->clk_mmc);
4152 if (!IS_ERR(host->hclk_mmc))
4153 clk_disable_unprepare(host->hclk_mmc);
4154 if (!IS_ERR(host->hpclk_mmc))
4155 clk_disable_unprepare(host->hpclk_mmc);
4157 EXPORT_SYMBOL(dw_mci_remove);
4161 #ifdef CONFIG_PM_SLEEP
4163 * TODO: we should probably disable the clock to the card in the suspend path.
4165 extern int get_wifi_chip_type(void);
4166 int dw_mci_suspend(struct dw_mci *host)
4168 int present = dw_mci_get_cd(host->mmc);
4170 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4171 (get_wifi_chip_type() == WIFI_ESP8089 || get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4175 regulator_disable(host->vmmc);
4177 /*only for sdmmc controller*/
4178 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4179 disable_irq(host->irq);
4181 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4182 MMC_DBG_ERR_FUNC(host->mmc,
4183 "Idle pinctrl setting failed! [%s]",
4184 mmc_hostname(host->mmc));
4187 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4188 mci_writel(host, INTMASK, 0x00);
4189 mci_writel(host, CTRL, 0x00);
4191 /* Soc rk3126/3036 already in gpio_cd mode */
4192 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4193 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4194 enable_irq_wake(host->mmc->slot.cd_irq);
4199 EXPORT_SYMBOL(dw_mci_suspend);
4201 int dw_mci_resume(struct dw_mci *host)
4205 struct dw_mci_slot *slot;
4206 int present = dw_mci_get_cd(host->mmc);
4208 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4209 (get_wifi_chip_type() == WIFI_ESP8089 ||
4210 get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4213 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4214 slot = mmc_priv(host->mmc);
4215 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4219 /*only for sdmmc controller*/
4220 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4221 /* Soc rk3126/3036 already in gpio_cd mode */
4222 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4223 disable_irq_wake(host->mmc->slot.cd_irq);
4224 mmc_gpio_free_cd(host->mmc);
4228 if (!IS_ERR(host->pins_udbg)) {
4229 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4230 MMC_DBG_ERR_FUNC(host->mmc,
4231 "Idle pinctrl setting failed! [%s]",
4232 mmc_hostname(host->mmc));
4233 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
4234 MMC_DBG_ERR_FUNC(host->mmc,
4235 "%s: Udbg pinctrl setting failed! [%s]",
4236 mmc_hostname(host->mmc));
4238 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4239 MMC_DBG_ERR_FUNC(host->mmc,
4240 "Default pinctrl setting failed! [%s]",
4241 mmc_hostname(host->mmc));
4244 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4245 MMC_DBG_ERR_FUNC(host->mmc,
4246 "Default pinctrl setting failed! [%s]",
4247 mmc_hostname(host->mmc));
4252 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4253 else if(cpu_is_rk3036())
4254 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4255 else if(cpu_is_rk312x())
4256 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4257 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4260 ret = regulator_enable(host->vmmc);
4263 "failed to enable regulator: %d\n", ret);
4268 if(!dw_mci_ctrl_all_reset(host)){
4273 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4274 if(host->use_dma && host->dma_ops->init)
4275 host->dma_ops->init(host);
4278 * Restore the initial value at FIFOTH register
4279 * And Invalidate the prev_blksz with zero
4281 mci_writel(host, FIFOTH, host->fifoth_val);
4282 host->prev_blksz = 0;
4283 /* Put in max timeout */
4284 mci_writel(host, TMOUT, 0xFFFFFFFF);
4286 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4287 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4289 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4290 regs |= SDMMC_INT_CD;
4291 mci_writel(host, INTMASK, regs);
4292 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4293 /*only for sdmmc controller*/
4294 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)){
4295 enable_irq(host->irq);
4298 for(i = 0; i < host->num_slots; i++){
4299 struct dw_mci_slot *slot = host->slot[i];
4302 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4303 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4304 dw_mci_setup_bus(slot, true);
4310 EXPORT_SYMBOL(dw_mci_resume);
4311 #endif /* CONFIG_PM_SLEEP */
4313 static int __init dw_mci_init(void)
4315 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4319 static void __exit dw_mci_exit(void)
4323 module_init(dw_mci_init);
4324 module_exit(dw_mci_exit);
4326 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4327 MODULE_AUTHOR("NXP Semiconductor VietNam");
4328 MODULE_AUTHOR("Imagination Technologies Ltd");
4329 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4330 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4331 MODULE_LICENSE("GPL v2");