2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
49 #include <linux/mfd/syscon.h>
50 #include <linux/regmap.h>
51 #include <linux/log2.h>
53 #include "rk_sdmmc_dbg.h"
54 #include <linux/regulator/rockchip_io_vol_domain.h>
55 #include "../../clk/rockchip/clk-ops.h"
57 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
59 /* Common flag combinations */
60 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
61 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
63 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
65 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
66 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
67 #define DW_MCI_SEND_STATUS 1
68 #define DW_MCI_RECV_STATUS 2
69 #define DW_MCI_DMA_THRESHOLD 16
71 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
72 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
74 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
75 #define SDMMC_DATA_TIMEOUT_SD 500
76 #define SDMMC_DATA_TIMEOUT_SDIO 250
77 #define SDMMC_DATA_TIMEOUT_EMMC 2500
79 #define SDMMC_CMD_RTO_MAX_HOLD 200
80 #define SDMMC_WAIT_FOR_UNBUSY 2500
82 #define DW_REGS_SIZE (0x0098 + 4)
83 #define DW_REGS_NUM (0x0098 / 4)
85 #ifdef CONFIG_MMC_DW_IDMAC
86 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
87 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
88 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
92 u32 des0; /* Control Descriptor */
93 #define IDMAC_DES0_DIC BIT(1)
94 #define IDMAC_DES0_LD BIT(2)
95 #define IDMAC_DES0_FD BIT(3)
96 #define IDMAC_DES0_CH BIT(4)
97 #define IDMAC_DES0_ER BIT(5)
98 #define IDMAC_DES0_CES BIT(30)
99 #define IDMAC_DES0_OWN BIT(31)
101 u32 des1; /* Buffer sizes */
102 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
103 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
105 u32 des2; /* buffer 1 physical address */
107 u32 des3; /* buffer 2 physical address */
109 #endif /* CONFIG_MMC_DW_IDMAC */
111 static const u8 tuning_blk_pattern_4bit[] = {
112 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
113 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
114 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
115 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
116 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
117 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
118 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
119 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
122 static const u8 tuning_blk_pattern_8bit[] = {
123 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
124 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
125 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
126 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
127 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
128 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
129 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
130 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
131 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
132 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
133 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
134 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
135 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
136 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
137 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
138 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
141 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
142 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
143 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
144 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
146 /*printk the all register of current host*/
148 static int dw_mci_regs_printk(struct dw_mci *host)
150 struct sdmmc_reg *regs = dw_mci_regs;
152 while( regs->name != 0 ){
153 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
156 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
161 #if defined(CONFIG_DEBUG_FS)
162 static int dw_mci_req_show(struct seq_file *s, void *v)
164 struct dw_mci_slot *slot = s->private;
165 struct mmc_request *mrq;
166 struct mmc_command *cmd;
167 struct mmc_command *stop;
168 struct mmc_data *data;
170 /* Make sure we get a consistent snapshot */
171 spin_lock_bh(&slot->host->lock);
181 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
182 cmd->opcode, cmd->arg, cmd->flags,
183 cmd->resp[0], cmd->resp[1], cmd->resp[2],
184 cmd->resp[2], cmd->error);
186 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
187 data->bytes_xfered, data->blocks,
188 data->blksz, data->flags, data->error);
191 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
192 stop->opcode, stop->arg, stop->flags,
193 stop->resp[0], stop->resp[1], stop->resp[2],
194 stop->resp[2], stop->error);
197 spin_unlock_bh(&slot->host->lock);
202 static int dw_mci_req_open(struct inode *inode, struct file *file)
204 return single_open(file, dw_mci_req_show, inode->i_private);
207 static const struct file_operations dw_mci_req_fops = {
208 .owner = THIS_MODULE,
209 .open = dw_mci_req_open,
212 .release = single_release,
215 static int dw_mci_regs_show(struct seq_file *s, void *v)
217 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
218 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
219 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
220 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
221 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
222 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
227 static int dw_mci_regs_open(struct inode *inode, struct file *file)
229 return single_open(file, dw_mci_regs_show, inode->i_private);
232 static const struct file_operations dw_mci_regs_fops = {
233 .owner = THIS_MODULE,
234 .open = dw_mci_regs_open,
237 .release = single_release,
240 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
242 struct mmc_host *mmc = slot->mmc;
243 struct dw_mci *host = slot->host;
247 root = mmc->debugfs_root;
251 node = debugfs_create_file("regs", S_IRUSR, root, host,
256 node = debugfs_create_file("req", S_IRUSR, root, slot,
261 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
265 node = debugfs_create_x32("pending_events", S_IRUSR, root,
266 (u32 *)&host->pending_events);
270 node = debugfs_create_x32("completed_events", S_IRUSR, root,
271 (u32 *)&host->completed_events);
278 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
280 #endif /* defined(CONFIG_DEBUG_FS) */
282 static void dw_mci_set_timeout(struct dw_mci *host)
284 /* timeout (maximum) */
285 mci_writel(host, TMOUT, 0xffffffff);
288 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
290 struct mmc_data *data;
291 struct dw_mci_slot *slot = mmc_priv(mmc);
292 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
294 cmd->error = -EINPROGRESS;
298 if (cmdr == MMC_STOP_TRANSMISSION)
299 cmdr |= SDMMC_CMD_STOP;
301 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
303 if (cmd->flags & MMC_RSP_PRESENT) {
304 /* We expect a response, so set this bit */
305 cmdr |= SDMMC_CMD_RESP_EXP;
306 if (cmd->flags & MMC_RSP_136)
307 cmdr |= SDMMC_CMD_RESP_LONG;
310 if (cmd->flags & MMC_RSP_CRC)
311 cmdr |= SDMMC_CMD_RESP_CRC;
315 cmdr |= SDMMC_CMD_DAT_EXP;
316 if (data->flags & MMC_DATA_STREAM)
317 cmdr |= SDMMC_CMD_STRM_MODE;
318 if (data->flags & MMC_DATA_WRITE)
319 cmdr |= SDMMC_CMD_DAT_WR;
322 if (drv_data && drv_data->prepare_command)
323 drv_data->prepare_command(slot->host, &cmdr);
329 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
331 struct mmc_command *stop;
337 stop = &host->stop_abort;
339 memset(stop, 0, sizeof(struct mmc_command));
341 if (cmdr == MMC_READ_SINGLE_BLOCK ||
342 cmdr == MMC_READ_MULTIPLE_BLOCK ||
343 cmdr == MMC_WRITE_BLOCK ||
344 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
345 stop->opcode = MMC_STOP_TRANSMISSION;
347 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
348 } else if (cmdr == SD_IO_RW_EXTENDED) {
349 stop->opcode = SD_IO_RW_DIRECT;
350 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
351 ((cmd->arg >> 28) & 0x7);
352 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
357 cmdr = stop->opcode | SDMMC_CMD_STOP |
358 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
363 static void dw_mci_start_command(struct dw_mci *host,
364 struct mmc_command *cmd, u32 cmd_flags)
366 struct dw_mci_slot *slot = host->slot[0];
367 /*temporality fix slot[0] due to host->num_slots equal to 1*/
369 host->pre_cmd = host->cmd;
372 "start command: ARGR=0x%08x CMDR=0x%08x\n",
373 cmd->arg, cmd_flags);
375 if(SD_SWITCH_VOLTAGE == cmd->opcode){
376 /*confirm non-low-power mode*/
377 mci_writel(host, CMDARG, 0);
378 dw_mci_disable_low_power(slot);
380 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
381 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
383 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
386 mci_writel(host, CMDARG, cmd->arg);
389 /* fix the value to 1 in some Soc,for example RK3188. */
390 if(host->mmc->hold_reg_flag)
391 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
393 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
397 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
399 dw_mci_start_command(host, data->stop, host->stop_cmdr);
402 /* DMA interface functions */
403 static void dw_mci_stop_dma(struct dw_mci *host)
405 if (host->using_dma) {
406 /* Fixme: No need to terminate edma, may cause flush op */
407 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
408 host->dma_ops->stop(host);
409 host->dma_ops->cleanup(host);
412 /* Data transfer was stopped by the interrupt handler */
413 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
416 static int dw_mci_get_dma_dir(struct mmc_data *data)
418 if (data->flags & MMC_DATA_WRITE)
419 return DMA_TO_DEVICE;
421 return DMA_FROM_DEVICE;
424 #ifdef CONFIG_MMC_DW_IDMAC
425 static void dw_mci_dma_cleanup(struct dw_mci *host)
427 struct mmc_data *data = host->data;
430 if (!data->host_cookie)
431 dma_unmap_sg(host->dev,
434 dw_mci_get_dma_dir(data));
437 static void dw_mci_idmac_reset(struct dw_mci *host)
439 u32 bmod = mci_readl(host, BMOD);
440 /* Software reset of DMA */
441 bmod |= SDMMC_IDMAC_SWRESET;
442 mci_writel(host, BMOD, bmod);
445 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
449 /* Disable and reset the IDMAC interface */
450 temp = mci_readl(host, CTRL);
451 temp &= ~SDMMC_CTRL_USE_IDMAC;
452 temp |= SDMMC_CTRL_DMA_RESET;
453 mci_writel(host, CTRL, temp);
455 /* Stop the IDMAC running */
456 temp = mci_readl(host, BMOD);
457 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
458 temp |= SDMMC_IDMAC_SWRESET;
459 mci_writel(host, BMOD, temp);
462 static void dw_mci_idmac_complete_dma(void *arg)
464 struct dw_mci *host = arg;
465 struct mmc_data *data = host->data;
467 dev_vdbg(host->dev, "DMA complete\n");
470 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
471 host->mrq->cmd->opcode,host->mrq->cmd->arg,
472 data->blocks,data->blksz,mmc_hostname(host->mmc));
475 host->dma_ops->cleanup(host);
478 * If the card was removed, data will be NULL. No point in trying to
479 * send the stop command or waiting for NBUSY in this case.
482 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
483 tasklet_schedule(&host->tasklet);
487 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
491 struct idmac_desc *desc = host->sg_cpu;
493 for (i = 0; i < sg_len; i++, desc++) {
494 unsigned int length = sg_dma_len(&data->sg[i]);
495 u32 mem_addr = sg_dma_address(&data->sg[i]);
497 /* Set the OWN bit and disable interrupts for this descriptor */
498 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
501 IDMAC_SET_BUFFER1_SIZE(desc, length);
503 /* Physical address to DMA to/from */
504 desc->des2 = mem_addr;
507 /* Set first descriptor */
509 desc->des0 |= IDMAC_DES0_FD;
511 /* Set last descriptor */
512 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
513 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
514 desc->des0 |= IDMAC_DES0_LD;
519 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
523 dw_mci_translate_sglist(host, host->data, sg_len);
525 /* Select IDMAC interface */
526 temp = mci_readl(host, CTRL);
527 temp |= SDMMC_CTRL_USE_IDMAC;
528 mci_writel(host, CTRL, temp);
532 /* Enable the IDMAC */
533 temp = mci_readl(host, BMOD);
534 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
535 mci_writel(host, BMOD, temp);
537 /* Start it running */
538 mci_writel(host, PLDMND, 1);
541 static int dw_mci_idmac_init(struct dw_mci *host)
543 struct idmac_desc *p;
546 /* Number of descriptors in the ring buffer */
547 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
549 /* Forward link the descriptor list */
550 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
551 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
553 /* Set the last descriptor as the end-of-ring descriptor */
554 p->des3 = host->sg_dma;
555 p->des0 = IDMAC_DES0_ER;
557 dw_mci_idmac_reset(host);
559 /* Mask out interrupts - get Tx & Rx complete only */
560 mci_writel(host, IDSTS, IDMAC_INT_CLR);
561 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
564 /* Set the descriptor base address */
565 mci_writel(host, DBADDR, host->sg_dma);
569 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
570 .init = dw_mci_idmac_init,
571 .start = dw_mci_idmac_start_dma,
572 .stop = dw_mci_idmac_stop_dma,
573 .complete = dw_mci_idmac_complete_dma,
574 .cleanup = dw_mci_dma_cleanup,
578 static void dw_mci_edma_cleanup(struct dw_mci *host)
580 struct mmc_data *data = host->data;
583 if (!data->host_cookie)
584 dma_unmap_sg(host->dev,
585 data->sg, data->sg_len,
586 dw_mci_get_dma_dir(data));
589 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
591 dmaengine_terminate_all(host->dms->ch);
594 static void dw_mci_edmac_complete_dma(void *arg)
596 struct dw_mci *host = arg;
597 struct mmc_data *data = host->data;
599 dev_vdbg(host->dev, "DMA complete\n");
602 if(data->flags & MMC_DATA_READ)
603 /* Invalidate cache after read */
604 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
605 data->sg_len, DMA_FROM_DEVICE);
607 host->dma_ops->cleanup(host);
610 * If the card was removed, data will be NULL. No point in trying to
611 * send the stop command or waiting for NBUSY in this case.
614 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
615 tasklet_schedule(&host->tasklet);
619 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
621 struct dma_slave_config slave_config;
622 struct dma_async_tx_descriptor *desc = NULL;
623 struct scatterlist *sgl = host->data->sg;
624 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
625 u32 sg_elems = host->data->sg_len;
626 u32 fifoth_val, mburst;
628 u32 idx, rx_wmark, tx_wmark;
631 /* Set external dma config: burst size, burst width*/
632 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
633 slave_config.src_addr = slave_config.dst_addr;
634 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
635 slave_config.src_addr_width = slave_config.dst_addr_width;
637 /* Match FIFO dma burst MSIZE with external dma config*/
638 fifoth_val = mci_readl(host, FIFOTH);
639 mburst = mszs[(fifoth_val >> 28) & 0x7];
641 /* edmac limit burst to 16, but work around for rk3036 to 8 */
642 if (unlikely(cpu_is_rk3036()))
647 if (mburst > burst_limit) {
648 mburst = burst_limit;
649 idx = (ilog2(mburst) > 0) ? (ilog2(mburst) - 1) : 0;
651 rx_wmark = mszs[idx] - 1;
652 tx_wmark = (host->fifo_depth) / 2;
653 fifoth_val = SDMMC_SET_FIFOTH(idx, rx_wmark, tx_wmark);
655 mci_writel(host, FIFOTH, fifoth_val);
658 slave_config.dst_maxburst = mburst;
659 slave_config.src_maxburst = slave_config.dst_maxburst;
661 if(host->data->flags & MMC_DATA_WRITE){
662 slave_config.direction = DMA_MEM_TO_DEV;
663 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
665 dev_err(host->dev, "error in dw_mci edma configuration.\n");
669 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
670 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
672 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
675 /* Set dw_mci_edmac_complete_dma as callback */
676 desc->callback = dw_mci_edmac_complete_dma;
677 desc->callback_param = (void *)host;
678 dmaengine_submit(desc);
680 /* Flush cache before write */
681 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
682 sg_elems, DMA_TO_DEVICE);
683 dma_async_issue_pending(host->dms->ch);
686 slave_config.direction = DMA_DEV_TO_MEM;
687 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
689 dev_err(host->dev, "error in dw_mci edma configuration.\n");
692 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
693 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
695 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
698 /* set dw_mci_edmac_complete_dma as callback */
699 desc->callback = dw_mci_edmac_complete_dma;
700 desc->callback_param = (void *)host;
701 dmaengine_submit(desc);
702 dma_async_issue_pending(host->dms->ch);
706 static int dw_mci_edmac_init(struct dw_mci *host)
708 /* Request external dma channel, SHOULD decide chn in dts */
710 host->dms = (struct dw_mci_dma_slave *)kmalloc
711 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
712 if (NULL == host->dms) {
713 dev_err(host->dev, "No enough memory to alloc dms.\n");
717 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
718 if (!host->dms->ch) {
719 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
720 host->dms->ch->chan_id);
727 if (NULL != host->dms) {
735 static void dw_mci_edmac_exit(struct dw_mci *host)
737 if (NULL != host->dms) {
738 if (NULL != host->dms->ch) {
739 dma_release_channel(host->dms->ch);
740 host->dms->ch = NULL;
747 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
748 .init = dw_mci_edmac_init,
749 .exit = dw_mci_edmac_exit,
750 .start = dw_mci_edmac_start_dma,
751 .stop = dw_mci_edmac_stop_dma,
752 .complete = dw_mci_edmac_complete_dma,
753 .cleanup = dw_mci_edma_cleanup,
755 #endif /* CONFIG_MMC_DW_IDMAC */
757 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
758 struct mmc_data *data,
761 struct scatterlist *sg;
762 unsigned int i, sg_len;
764 if (!next && data->host_cookie)
765 return data->host_cookie;
768 * We don't do DMA on "complex" transfers, i.e. with
769 * non-word-aligned buffers or lengths. Also, we don't bother
770 * with all the DMA setup overhead for short transfers.
772 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
778 for_each_sg(data->sg, sg, data->sg_len, i) {
779 if (sg->offset & 3 || sg->length & 3)
783 sg_len = dma_map_sg(host->dev,
786 dw_mci_get_dma_dir(data));
791 data->host_cookie = sg_len;
796 static void dw_mci_pre_req(struct mmc_host *mmc,
797 struct mmc_request *mrq,
800 struct dw_mci_slot *slot = mmc_priv(mmc);
801 struct mmc_data *data = mrq->data;
803 if (!slot->host->use_dma || !data)
806 if (data->host_cookie) {
807 data->host_cookie = 0;
811 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
812 data->host_cookie = 0;
815 static void dw_mci_post_req(struct mmc_host *mmc,
816 struct mmc_request *mrq,
819 struct dw_mci_slot *slot = mmc_priv(mmc);
820 struct mmc_data *data = mrq->data;
822 if (!slot->host->use_dma || !data)
825 if (data->host_cookie)
826 dma_unmap_sg(slot->host->dev,
829 dw_mci_get_dma_dir(data));
830 data->host_cookie = 0;
833 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
835 #ifdef CONFIG_MMC_DW_IDMAC
836 unsigned int blksz = data->blksz;
837 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
838 u32 fifo_width = 1 << host->data_shift;
839 u32 blksz_depth = blksz / fifo_width, fifoth_val;
840 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
841 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
843 tx_wmark = (host->fifo_depth) / 2;
844 tx_wmark_invers = host->fifo_depth - tx_wmark;
848 * if blksz is not a multiple of the FIFO width
850 if (blksz % fifo_width) {
857 if (!((blksz_depth % mszs[idx]) ||
858 (tx_wmark_invers % mszs[idx]))) {
860 rx_wmark = mszs[idx] - 1;
865 * If idx is '0', it won't be tried
866 * Thus, initial values are uesed
869 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
870 mci_writel(host, FIFOTH, fifoth_val);
875 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
877 unsigned int blksz = data->blksz;
878 u32 blksz_depth, fifo_depth;
881 WARN_ON(!(data->flags & MMC_DATA_READ));
883 if (host->timing != MMC_TIMING_MMC_HS200 &&
884 host->timing != MMC_TIMING_UHS_SDR104)
887 blksz_depth = blksz / (1 << host->data_shift);
888 fifo_depth = host->fifo_depth;
890 if (blksz_depth > fifo_depth)
894 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
895 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
896 * Currently just choose blksz.
899 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
903 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
906 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
914 /* If we don't have a channel, we can't do DMA */
918 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
920 /* Fixme: No need terminate edma, may cause flush op */
921 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
922 host->dma_ops->stop(host);
929 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
930 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
934 * Decide the MSIZE and RX/TX Watermark.
935 * If current block size is same with previous size,
936 * no need to update fifoth.
938 if (host->prev_blksz != data->blksz)
939 dw_mci_adjust_fifoth(host, data);
942 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
944 /* Enable the DMA interface */
945 temp = mci_readl(host, CTRL);
946 temp |= SDMMC_CTRL_DMA_ENABLE;
947 mci_writel(host, CTRL, temp);
949 /* Disable RX/TX IRQs, let DMA handle it */
950 spin_lock_irqsave(&host->slock, flags);
951 temp = mci_readl(host, INTMASK);
952 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
953 mci_writel(host, INTMASK, temp);
954 spin_unlock_irqrestore(&host->slock, flags);
956 host->dma_ops->start(host, sg_len);
961 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
966 data->error = -EINPROGRESS;
968 //WARN_ON(host->data);
973 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
975 if (data->flags & MMC_DATA_READ) {
976 host->dir_status = DW_MCI_RECV_STATUS;
977 dw_mci_ctrl_rd_thld(host, data);
979 host->dir_status = DW_MCI_SEND_STATUS;
982 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
983 data->blocks, data->blksz, mmc_hostname(host->mmc));
985 if (dw_mci_submit_data_dma(host, data)) {
986 int flags = SG_MITER_ATOMIC;
987 if (host->data->flags & MMC_DATA_READ)
988 flags |= SG_MITER_TO_SG;
990 flags |= SG_MITER_FROM_SG;
992 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
994 host->part_buf_start = 0;
995 host->part_buf_count = 0;
997 spin_lock_irqsave(&host->slock, flag);
998 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
999 temp = mci_readl(host, INTMASK);
1000 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1001 mci_writel(host, INTMASK, temp);
1002 spin_unlock_irqrestore(&host->slock, flag);
1004 temp = mci_readl(host, CTRL);
1005 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1006 mci_writel(host, CTRL, temp);
1009 * Use the initial fifoth_val for PIO mode.
1010 * If next issued data may be transfered by DMA mode,
1011 * prev_blksz should be invalidated.
1013 mci_writel(host, FIFOTH, host->fifoth_val);
1014 host->prev_blksz = 0;
1017 * Keep the current block size.
1018 * It will be used to decide whether to update
1019 * fifoth register next time.
1021 host->prev_blksz = data->blksz;
1025 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1027 struct dw_mci *host = slot->host;
1028 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1029 unsigned int cmd_status = 0;
1030 #ifdef SDMMC_WAIT_FOR_UNBUSY
1032 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1034 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1036 ret = time_before(jiffies, timeout);
1037 cmd_status = mci_readl(host, STATUS);
1038 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1042 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1043 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1046 mci_writel(host, CMDARG, arg);
1048 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1049 if(cmd & SDMMC_CMD_UPD_CLK)
1050 timeout = jiffies + msecs_to_jiffies(50);
1052 timeout = jiffies + msecs_to_jiffies(500);
1053 while (time_before(jiffies, timeout)) {
1054 cmd_status = mci_readl(host, CMD);
1055 if (!(cmd_status & SDMMC_CMD_START))
1058 dev_err(&slot->mmc->class_dev,
1059 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1060 cmd, arg, cmd_status);
1063 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1065 struct dw_mci *host = slot->host;
1066 unsigned int tempck,clock = slot->clock;
1071 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1072 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1075 mci_writel(host, CLKENA, 0);
1076 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1077 if(host->svi_flags == 0)
1078 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1080 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1082 } else if (clock != host->current_speed || force_clkinit) {
1083 div = host->bus_hz / clock;
1084 if (host->bus_hz % clock && host->bus_hz > clock)
1086 * move the + 1 after the divide to prevent
1087 * over-clocking the card.
1091 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1093 if ((clock << div) != slot->__clk_old || force_clkinit) {
1094 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1095 dev_info(&slot->mmc->class_dev,
1096 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1097 slot->id, host->bus_hz, clock,
1100 host->set_speed = tempck;
1101 host->set_div = div;
1105 mci_writel(host, CLKENA, 0);
1106 mci_writel(host, CLKSRC, 0);
1110 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1112 if(clock <= 400*1000){
1113 MMC_DBG_BOOT_FUNC(host->mmc,
1114 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1115 clock * 2, mmc_hostname(host->mmc));
1116 /* clk_mmc will change parents to 24MHz xtal*/
1117 clk_set_rate(host->clk_mmc, clock * 2);
1120 host->set_div = div;
1124 MMC_DBG_BOOT_FUNC(host->mmc,
1125 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1126 mmc_hostname(host->mmc));
1129 MMC_DBG_ERR_FUNC(host->mmc,
1130 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1131 mmc_hostname(host->mmc));
1133 host->set_div = div;
1134 host->bus_hz = host->set_speed * 2;
1135 MMC_DBG_BOOT_FUNC(host->mmc,
1136 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1137 div, host->bus_hz, mmc_hostname(host->mmc));
1139 /* BUG may be here, come on, Linux BSP engineer looks!
1140 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1141 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1142 some oops happened like that:
1143 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1144 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1145 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1146 mmc0: new high speed DDR MMC card at address 0001
1147 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1149 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1150 mmcblk0: retrying using single block read
1151 mmcblk0: error -110 sending status command, retrying
1153 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1156 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1157 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1160 host->set_div = div;
1161 host->bus_hz = host->set_speed * 2;
1162 MMC_DBG_BOOT_FUNC(host->mmc,
1163 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1164 div, host->bus_hz, mmc_hostname(host->mmc));
1167 if (host->verid < DW_MMC_240A)
1168 clk_set_rate(host->clk_mmc,(host->bus_hz));
1170 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1176 /* set clock to desired speed */
1177 mci_writel(host, CLKDIV, div);
1181 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1183 /* enable clock; only low power if no SDIO */
1184 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1186 if (host->verid < DW_MMC_240A)
1187 sdio_int = SDMMC_INT_SDIO(slot->id);
1189 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1191 if (!(mci_readl(host, INTMASK) & sdio_int))
1192 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1193 mci_writel(host, CLKENA, clk_en_a);
1197 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1198 /* keep the clock with reflecting clock dividor */
1199 slot->__clk_old = clock << div;
1202 host->current_speed = clock;
1204 if(slot->ctype != slot->pre_ctype)
1205 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1207 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1208 mmc_hostname(host->mmc));
1209 slot->pre_ctype = slot->ctype;
1211 /* Set the current slot bus width */
1212 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1215 extern struct mmc_card *this_card;
1216 static void dw_mci_wait_unbusy(struct dw_mci *host)
1219 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1220 unsigned long time_loop;
1221 unsigned int status;
1224 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1226 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1227 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1228 /* Special care for (secure)erase timeout calculation */
1230 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1233 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1234 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1235 300000 * (this_card->ext_csd.sec_erase_mult)) :
1236 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1240 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1241 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1242 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1243 timeout = SDMMC_DATA_TIMEOUT_SD;
1246 time_loop = jiffies + msecs_to_jiffies(timeout);
1248 status = mci_readl(host, STATUS);
1249 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1251 } while (time_before(jiffies, time_loop));
1256 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1259 * 0--status is busy.
1260 * 1--status is unbusy.
1262 int dw_mci_card_busy(struct mmc_host *mmc)
1264 struct dw_mci_slot *slot = mmc_priv(mmc);
1265 struct dw_mci *host = slot->host;
1267 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1268 host->svi_flags, mmc_hostname(host->mmc));
1271 if(host->svi_flags == 0){
1273 host->svi_flags = 1;
1274 return host->svi_flags;
1277 host->svi_flags = 0;
1278 return host->svi_flags;
1284 static void __dw_mci_start_request(struct dw_mci *host,
1285 struct dw_mci_slot *slot,
1286 struct mmc_command *cmd)
1288 struct mmc_request *mrq;
1289 struct mmc_data *data;
1293 if (host->pdata->select_slot)
1294 host->pdata->select_slot(slot->id);
1296 host->cur_slot = slot;
1299 dw_mci_wait_unbusy(host);
1301 host->pending_events = 0;
1302 host->completed_events = 0;
1303 host->data_status = 0;
1307 dw_mci_set_timeout(host);
1308 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1309 mci_writel(host, BLKSIZ, data->blksz);
1312 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1314 /* this is the first command, send the initialization clock */
1315 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1316 cmdflags |= SDMMC_CMD_INIT;
1319 dw_mci_submit_data(host, data);
1323 dw_mci_start_command(host, cmd, cmdflags);
1326 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1329 static void dw_mci_start_request(struct dw_mci *host,
1330 struct dw_mci_slot *slot)
1332 struct mmc_request *mrq = slot->mrq;
1333 struct mmc_command *cmd;
1335 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1336 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1338 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1339 __dw_mci_start_request(host, slot, cmd);
1342 /* must be called with host->lock held */
1343 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1344 struct mmc_request *mrq)
1346 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1351 if (host->state == STATE_IDLE) {
1352 host->state = STATE_SENDING_CMD;
1353 dw_mci_start_request(host, slot);
1355 list_add_tail(&slot->queue_node, &host->queue);
1359 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1361 struct dw_mci_slot *slot = mmc_priv(mmc);
1362 struct dw_mci *host = slot->host;
1367 * The check for card presence and queueing of the request must be
1368 * atomic, otherwise the card could be removed in between and the
1369 * request wouldn't fail until another card was inserted.
1371 spin_lock_bh(&host->lock);
1373 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1374 spin_unlock_bh(&host->lock);
1375 mrq->cmd->error = -ENOMEDIUM;
1376 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1377 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1379 mmc_request_done(mmc, mrq);
1383 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1384 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1386 dw_mci_queue_request(host, slot, mrq);
1388 spin_unlock_bh(&host->lock);
1391 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1393 struct dw_mci_slot *slot = mmc_priv(mmc);
1394 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1395 struct dw_mci *host = slot->host;
1397 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1400 #ifdef SDMMC_WAIT_FOR_UNBUSY
1401 unsigned long time_loop;
1404 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1405 if(host->svi_flags == 1)
1406 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1408 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1410 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1413 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1414 printk("%d..%s: no card. [%s]\n", \
1415 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1420 ret = time_before(jiffies, time_loop);
1421 regs = mci_readl(slot->host, STATUS);
1422 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1428 printk("slot->flags = %lu ", slot->flags);
1429 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1430 if(host->svi_flags != 1)
1433 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1434 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1438 switch (ios->bus_width) {
1439 case MMC_BUS_WIDTH_4:
1440 slot->ctype = SDMMC_CTYPE_4BIT;
1442 case MMC_BUS_WIDTH_8:
1443 slot->ctype = SDMMC_CTYPE_8BIT;
1446 /* set default 1 bit mode */
1447 slot->ctype = SDMMC_CTYPE_1BIT;
1448 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1451 regs = mci_readl(slot->host, UHS_REG);
1454 if (ios->timing == MMC_TIMING_UHS_DDR50)
1455 regs |= ((0x1 << slot->id) << 16);
1457 regs &= ~((0x1 << slot->id) << 16);
1459 mci_writel(slot->host, UHS_REG, regs);
1460 slot->host->timing = ios->timing;
1463 * Use mirror of ios->clock to prevent race with mmc
1464 * core ios update when finding the minimum.
1466 slot->clock = ios->clock;
1468 if (drv_data && drv_data->set_ios)
1469 drv_data->set_ios(slot->host, ios);
1471 /* Slot specific timing and width adjustment */
1472 dw_mci_setup_bus(slot, false);
1476 switch (ios->power_mode) {
1478 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1480 if (slot->host->pdata->setpower)
1481 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1482 regs = mci_readl(slot->host, PWREN);
1483 regs |= (1 << slot->id);
1484 mci_writel(slot->host, PWREN, regs);
1487 /* Power down slot */
1488 if(slot->host->pdata->setpower)
1489 slot->host->pdata->setpower(slot->id, 0);
1490 regs = mci_readl(slot->host, PWREN);
1491 regs &= ~(1 << slot->id);
1492 mci_writel(slot->host, PWREN, regs);
1499 static int dw_mci_get_ro(struct mmc_host *mmc)
1502 struct dw_mci_slot *slot = mmc_priv(mmc);
1503 struct dw_mci_board *brd = slot->host->pdata;
1505 /* Use platform get_ro function, else try on board write protect */
1506 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1508 else if(brd->get_ro)
1509 read_only = brd->get_ro(slot->id);
1510 else if(gpio_is_valid(slot->wp_gpio))
1511 read_only = gpio_get_value(slot->wp_gpio);
1514 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1516 dev_dbg(&mmc->class_dev, "card is %s\n",
1517 read_only ? "read-only" : "read-write");
1522 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1524 struct dw_mci_slot *slot = mmc_priv(mmc);
1525 struct dw_mci *host = slot->host;
1526 /*struct dw_mci_board *brd = slot->host->pdata;*/
1528 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1531 spin_lock_bh(&host->lock);
1534 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1536 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1538 spin_unlock_bh(&host->lock);
1540 if (test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1541 if (__clk_is_enabled(host->hclk_mmc) == false)
1542 clk_prepare_enable(host->hclk_mmc);
1543 if (__clk_is_enabled(host->clk_mmc) == false)
1544 clk_prepare_enable(host->clk_mmc);
1546 if (__clk_is_enabled(host->clk_mmc) == true)
1547 clk_disable_unprepare(slot->host->clk_mmc);
1548 if (__clk_is_enabled(host->hclk_mmc) == true)
1549 clk_disable_unprepare(slot->host->hclk_mmc);
1552 mmc_detect_change(slot->mmc, 20);
1558 static int dw_mci_get_cd(struct mmc_host *mmc)
1561 struct dw_mci_slot *slot = mmc_priv(mmc);
1562 struct dw_mci_board *brd = slot->host->pdata;
1563 struct dw_mci *host = slot->host;
1564 int gpio_cd = mmc_gpio_get_cd(mmc);
1565 int force_jtag_bit, force_jtag_reg;
1569 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
1570 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1571 gpio_cd = slot->cd_gpio;
1572 irq = gpio_to_irq(gpio_cd);
1573 if (gpio_is_valid(gpio_cd)) {
1574 gpio_val = gpio_get_value(gpio_cd);
1575 if (soc_is_rk3036()) {
1576 force_jtag_bit = 11;
1577 force_jtag_reg = RK312X_GRF_SOC_CON0;
1578 } else if (soc_is_rk3126() || soc_is_rk3126b()) {
1579 force_jtag_reg = RK312X_GRF_SOC_CON0;
1583 if (gpio_val == gpio_get_value(gpio_cd)) {
1584 gpio_cd = (gpio_val == 0 ? 1 : 0);
1586 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1587 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1588 grf_writel((0x1 << (force_jtag_bit + 16)) | (1 << force_jtag_bit),
1591 dw_mci_ctrl_all_reset(host);
1593 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
1594 /* Really card detected: SHOULD disable force_jtag */
1595 grf_writel((0x1 << (force_jtag_bit + 16)) | (0 << force_jtag_bit),
1600 gpio_val = gpio_get_value(gpio_cd);
1602 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
1603 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1604 return slot->last_detect_state;
1607 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1611 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1612 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1614 /* Use platform get_cd function, else try onboard card detect */
1615 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1617 else if (brd->get_cd)
1618 present = !brd->get_cd(slot->id);
1619 else if (!IS_ERR_VALUE(gpio_cd))
1622 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1625 spin_lock_bh(&host->lock);
1627 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1628 dev_dbg(&mmc->class_dev, "card is present\n");
1630 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1631 dev_dbg(&mmc->class_dev, "card is not present\n");
1633 spin_unlock_bh(&host->lock);
1640 * Dts Should caps emmc controller with poll-hw-reset
1642 static void dw_mci_hw_reset(struct mmc_host *mmc)
1644 struct dw_mci_slot *slot = mmc_priv(mmc);
1645 struct dw_mci *host = slot->host;
1650 unsigned long timeout;
1653 /* (1) CMD12 to end any transfer in process */
1654 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1655 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1657 if(host->mmc->hold_reg_flag)
1658 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1659 mci_writel(host, CMDARG, 0);
1661 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1663 timeout = jiffies + msecs_to_jiffies(500);
1665 ret = time_before(jiffies, timeout);
1666 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1671 MMC_DBG_ERR_FUNC(host->mmc,
1672 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1673 __func__, mmc_hostname(host->mmc));
1675 /* (2) wait DTO, even if no response is sent back by card */
1677 timeout = jiffies + msecs_to_jiffies(5);
1679 ret = time_before(jiffies, timeout);
1680 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1681 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1687 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1689 /* Software reset - BMOD[0] for IDMA only */
1690 regs = mci_readl(host, BMOD);
1691 regs |= SDMMC_IDMAC_SWRESET;
1692 mci_writel(host, BMOD, regs);
1693 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1694 regs = mci_readl(host, BMOD);
1695 if(regs & SDMMC_IDMAC_SWRESET)
1696 MMC_DBG_WARN_FUNC(host->mmc,
1697 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1698 __func__, mmc_hostname(host->mmc));
1700 /* DMA reset - CTRL[2] */
1701 regs = mci_readl(host, CTRL);
1702 regs |= SDMMC_CTRL_DMA_RESET;
1703 mci_writel(host, CTRL, regs);
1704 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1705 regs = mci_readl(host, CTRL);
1706 if(regs & SDMMC_CTRL_DMA_RESET)
1707 MMC_DBG_WARN_FUNC(host->mmc,
1708 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1709 __func__, mmc_hostname(host->mmc));
1711 /* FIFO reset - CTRL[1] */
1712 regs = mci_readl(host, CTRL);
1713 regs |= SDMMC_CTRL_FIFO_RESET;
1714 mci_writel(host, CTRL, regs);
1715 mdelay(1); /* no timing limited, 1ms is random value */
1716 regs = mci_readl(host, CTRL);
1717 if(regs & SDMMC_CTRL_FIFO_RESET)
1718 MMC_DBG_WARN_FUNC(host->mmc,
1719 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1720 __func__, mmc_hostname(host->mmc));
1723 According to eMMC spec
1724 tRstW >= 1us ; RST_n pulse width
1725 tRSCA >= 200us ; RST_n to Command time
1726 tRSTH >= 1us ; RST_n high period
1728 mci_writel(slot->host, PWREN, 0x0);
1729 mci_writel(slot->host, RST_N, 0x0);
1731 udelay(10); /* 10us for bad quality eMMc. */
1733 mci_writel(slot->host, PWREN, 0x1);
1734 mci_writel(slot->host, RST_N, 0x1);
1736 usleep_range(500, 1000); /* at least 500(> 200us) */
1740 * Disable lower power mode.
1742 * Low power mode will stop the card clock when idle. According to the
1743 * description of the CLKENA register we should disable low power mode
1744 * for SDIO cards if we need SDIO interrupts to work.
1746 * This function is fast if low power mode is already disabled.
1748 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1750 struct dw_mci *host = slot->host;
1752 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1754 clk_en_a = mci_readl(host, CLKENA);
1756 if (clk_en_a & clken_low_pwr) {
1757 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1758 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1759 SDMMC_CMD_PRV_DAT_WAIT, 0);
1763 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1765 struct dw_mci_slot *slot = mmc_priv(mmc);
1766 struct dw_mci *host = slot->host;
1767 unsigned long flags;
1771 spin_lock_irqsave(&host->slock, flags);
1773 /* Enable/disable Slot Specific SDIO interrupt */
1774 int_mask = mci_readl(host, INTMASK);
1776 if (host->verid < DW_MMC_240A)
1777 sdio_int = SDMMC_INT_SDIO(slot->id);
1779 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1783 * Turn off low power mode if it was enabled. This is a bit of
1784 * a heavy operation and we disable / enable IRQs a lot, so
1785 * we'll leave low power mode disabled and it will get
1786 * re-enabled again in dw_mci_setup_bus().
1788 dw_mci_disable_low_power(slot);
1790 mci_writel(host, INTMASK,
1791 (int_mask | sdio_int));
1793 mci_writel(host, INTMASK,
1794 (int_mask & ~sdio_int));
1797 spin_unlock_irqrestore(&host->slock, flags);
1800 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1802 IO_DOMAIN_12 = 1200,
1803 IO_DOMAIN_18 = 1800,
1804 IO_DOMAIN_33 = 3300,
1806 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1816 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1817 __FUNCTION__, mmc_hostname(host->mmc));
1820 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1821 __FUNCTION__, mmc_hostname(host->mmc));
1825 if (cpu_is_rk3288()) {
1826 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1827 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1830 } else if (host->cid == DW_MCI_TYPE_RK3368) {
1831 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1832 regmap_write(host->grf, 0x900, (voltage << 6) | (1 << 22));
1836 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1837 __FUNCTION__, mmc_hostname(host->mmc));
1841 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1842 struct mmc_ios *ios)
1845 unsigned int value,uhs_reg;
1848 * Signal Voltage Switching is only applicable for Host Controllers
1851 if (host->verid < DW_MMC_240A)
1854 uhs_reg = mci_readl(host, UHS_REG);
1855 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1856 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1858 switch (ios->signal_voltage) {
1859 case MMC_SIGNAL_VOLTAGE_330:
1860 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1862 if (cpu_is_rk3288())
1863 ret = io_domain_regulator_set_voltage(
1864 host->vmmc, 3300000, 3300000);
1866 ret = regulator_set_voltage(host->vmmc, 3300000, 3300000);
1868 /* regulator_put(host->vmmc); //to be done in remove function. */
1870 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1871 __func__, regulator_get_voltage(host->vmmc), ret);
1873 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1874 " failed\n", mmc_hostname(host->mmc));
1877 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1879 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1880 __FUNCTION__, mmc_hostname(host->mmc));
1882 /* set High-power mode */
1883 value = mci_readl(host, CLKENA);
1884 value &= ~SDMMC_CLKEN_LOW_PWR;
1885 mci_writel(host,CLKENA , value);
1887 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1888 mci_writel(host,UHS_REG , uhs_reg);
1891 usleep_range(5000, 5500);
1893 /* 3.3V regulator output should be stable within 5 ms */
1894 uhs_reg = mci_readl(host, UHS_REG);
1895 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1898 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1899 mmc_hostname(host->mmc));
1902 case MMC_SIGNAL_VOLTAGE_180:
1904 if (cpu_is_rk3288())
1905 ret = io_domain_regulator_set_voltage(
1909 ret = regulator_set_voltage(
1912 /* regulator_put(host->vmmc);//to be done in remove function. */
1914 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1915 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1917 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1918 " failed\n", mmc_hostname(host->mmc));
1921 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1925 * Enable 1.8V Signal Enable in the Host Control2
1928 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1931 usleep_range(5000, 5500);
1932 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1933 __FUNCTION__,mmc_hostname(host->mmc));
1935 /* 1.8V regulator output should be stable within 5 ms */
1936 uhs_reg = mci_readl(host, UHS_REG);
1937 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1940 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1941 mmc_hostname(host->mmc));
1944 case MMC_SIGNAL_VOLTAGE_120:
1946 if (cpu_is_rk3288())
1947 ret = io_domain_regulator_set_voltage(
1951 ret = regulator_set_voltage(host->vmmc,
1954 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1955 " failed\n", mmc_hostname(host->mmc));
1961 /* No signal voltage switch required */
1967 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1968 struct mmc_ios *ios)
1970 struct dw_mci_slot *slot = mmc_priv(mmc);
1971 struct dw_mci *host = slot->host;
1974 if (host->verid < DW_MMC_240A)
1977 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1983 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1985 struct dw_mci_slot *slot = mmc_priv(mmc);
1986 struct dw_mci *host = slot->host;
1987 const struct dw_mci_drv_data *drv_data = host->drv_data;
1988 struct dw_mci_tuning_data tuning_data;
1991 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1992 if(cpu_is_rk3036() || cpu_is_rk312x())
1995 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1996 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1997 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1998 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1999 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
2000 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
2001 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
2005 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
2006 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
2007 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
2010 "Undefined command(%d) for tuning\n", opcode);
2015 /* Recommend sample phase and delayline
2016 Fixme: Mix-use these three controllers will cause
2019 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
2020 tuning_data.con_id = 3;
2021 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2022 tuning_data.con_id = 1;
2024 tuning_data.con_id = 0;
2026 /* 0: driver, from host->devices
2027 1: sample, from devices->host
2029 tuning_data.tuning_type = 1;
2031 if (drv_data && drv_data->execute_tuning)
2032 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
2037 static void dw_mci_post_tmo(struct mmc_host *mmc)
2039 struct dw_mci_slot *slot = mmc_priv(mmc);
2040 struct dw_mci *host = slot->host;
2041 host->cur_slot->mrq = NULL;
2043 host->state = STATE_IDLE;
2046 static const struct mmc_host_ops dw_mci_ops = {
2047 .request = dw_mci_request,
2048 .pre_req = dw_mci_pre_req,
2049 .post_req = dw_mci_post_req,
2050 .set_ios = dw_mci_set_ios,
2051 .get_ro = dw_mci_get_ro,
2052 .get_cd = dw_mci_get_cd,
2053 .set_sdio_status = dw_mci_set_sdio_status,
2054 .hw_reset = dw_mci_hw_reset,
2055 .enable_sdio_irq = dw_mci_enable_sdio_irq,
2056 .execute_tuning = dw_mci_execute_tuning,
2057 .post_tmo = dw_mci_post_tmo,
2058 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
2059 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
2060 .card_busy = dw_mci_card_busy,
2065 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
2067 unsigned long flags;
2072 local_irq_save(flags);
2073 if(host->irq_state != irqflag)
2075 host->irq_state = irqflag;
2078 enable_irq(host->irq);
2082 disable_irq(host->irq);
2085 local_irq_restore(flags);
2089 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2090 __releases(&host->lock)
2091 __acquires(&host->lock)
2093 if(DW_MCI_SEND_STATUS == host->dir_status){
2095 if( MMC_BUS_TEST_W != host->cmd->opcode){
2096 if(host->data_status & SDMMC_INT_DCRC)
2097 host->data->error = -EILSEQ;
2098 else if(host->data_status & SDMMC_INT_EBE)
2099 host->data->error = -ETIMEDOUT;
2101 dw_mci_wait_unbusy(host);
2104 dw_mci_wait_unbusy(host);
2109 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2110 __releases(&host->lock)
2111 __acquires(&host->lock)
2113 struct dw_mci_slot *slot;
2114 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2116 //WARN_ON(host->cmd || host->data);
2118 dw_mci_deal_data_end(host, mrq);
2121 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2122 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2124 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2125 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2127 host->cur_slot->mrq = NULL;
2129 if (!list_empty(&host->queue)) {
2130 slot = list_entry(host->queue.next,
2131 struct dw_mci_slot, queue_node);
2132 list_del(&slot->queue_node);
2133 dev_vdbg(host->dev, "list not empty: %s is next\n",
2134 mmc_hostname(slot->mmc));
2135 host->state = STATE_SENDING_CMD;
2136 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2137 dw_mci_start_request(host, slot);
2139 dev_vdbg(host->dev, "list empty\n");
2140 host->state = STATE_IDLE;
2143 spin_unlock(&host->lock);
2144 mmc_request_done(prev_mmc, mrq);
2145 spin_lock(&host->lock);
2148 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2150 u32 status = host->cmd_status;
2152 host->cmd_status = 0;
2154 /* Read the response from the card (up to 16 bytes) */
2155 if (cmd->flags & MMC_RSP_PRESENT) {
2156 if (cmd->flags & MMC_RSP_136) {
2157 cmd->resp[3] = mci_readl(host, RESP0);
2158 cmd->resp[2] = mci_readl(host, RESP1);
2159 cmd->resp[1] = mci_readl(host, RESP2);
2160 cmd->resp[0] = mci_readl(host, RESP3);
2162 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2163 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2165 cmd->resp[0] = mci_readl(host, RESP0);
2169 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2170 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2174 if (status & SDMMC_INT_RTO)
2176 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2179 cmd->error = -ETIMEDOUT;
2180 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2181 cmd->error = -EILSEQ;
2182 }else if (status & SDMMC_INT_RESP_ERR){
2187 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2188 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2191 if(MMC_SEND_STATUS != cmd->opcode)
2192 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2193 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2194 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2198 /* newer ip versions need a delay between retries */
2199 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2205 static void dw_mci_tasklet_func(unsigned long priv)
2207 struct dw_mci *host = (struct dw_mci *)priv;
2208 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2209 struct mmc_data *data;
2210 struct mmc_command *cmd;
2211 enum dw_mci_state state;
2212 enum dw_mci_state prev_state;
2213 u32 status, cmd_flags;
2214 unsigned long timeout = 0;
2217 spin_lock(&host->lock);
2219 state = host->state;
2229 case STATE_SENDING_CMD:
2230 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2231 &host->pending_events))
2236 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2237 dw_mci_command_complete(host, cmd);
2238 if (cmd == host->mrq->sbc && !cmd->error) {
2239 prev_state = state = STATE_SENDING_CMD;
2240 __dw_mci_start_request(host, host->cur_slot,
2245 if (cmd->data && cmd->error) {
2246 dw_mci_stop_dma(host);
2249 send_stop_cmd(host, data);
2250 state = STATE_SENDING_STOP;
2253 /* host->data = NULL; */
2256 send_stop_abort(host, data);
2257 state = STATE_SENDING_STOP;
2260 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2263 if (!host->mrq->data || cmd->error) {
2264 dw_mci_request_end(host, host->mrq);
2268 prev_state = state = STATE_SENDING_DATA;
2271 case STATE_SENDING_DATA:
2272 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2273 dw_mci_stop_dma(host);
2276 send_stop_cmd(host, data);
2278 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2279 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2280 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2282 mci_writel(host, CMDARG, 0);
2284 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2285 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2287 if(host->mmc->hold_reg_flag)
2288 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2290 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2292 timeout = jiffies + msecs_to_jiffies(500);
2295 ret = time_before(jiffies, timeout);
2296 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2300 MMC_DBG_ERR_FUNC(host->mmc,
2301 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2302 __func__, mmc_hostname(host->mmc));
2305 send_stop_abort(host, data);
2307 state = STATE_DATA_ERROR;
2311 MMC_DBG_CMD_FUNC(host->mmc,
2312 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2313 prev_state,state, mmc_hostname(host->mmc));
2315 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2316 &host->pending_events))
2318 MMC_DBG_INFO_FUNC(host->mmc,
2319 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2320 prev_state,state,mmc_hostname(host->mmc));
2322 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2323 prev_state = state = STATE_DATA_BUSY;
2326 case STATE_DATA_BUSY:
2327 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2328 &host->pending_events))
2331 dw_mci_deal_data_end(host, host->mrq);
2332 MMC_DBG_INFO_FUNC(host->mmc,
2333 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2334 prev_state,state,mmc_hostname(host->mmc));
2336 /* host->data = NULL; */
2337 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2338 status = host->data_status;
2340 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2341 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2342 MMC_DBG_ERR_FUNC(host->mmc,
2343 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2344 prev_state,state, status, mmc_hostname(host->mmc));
2346 if (status & SDMMC_INT_DRTO) {
2347 data->error = -ETIMEDOUT;
2348 } else if (status & SDMMC_INT_DCRC) {
2349 data->error = -EILSEQ;
2350 } else if (status & SDMMC_INT_EBE &&
2351 host->dir_status == DW_MCI_SEND_STATUS){
2353 * No data CRC status was returned.
2354 * The number of bytes transferred will
2355 * be exaggerated in PIO mode.
2357 data->bytes_xfered = 0;
2358 data->error = -ETIMEDOUT;
2367 * After an error, there may be data lingering
2368 * in the FIFO, so reset it - doing so
2369 * generates a block interrupt, hence setting
2370 * the scatter-gather pointer to NULL.
2372 dw_mci_fifo_reset(host);
2374 data->bytes_xfered = data->blocks * data->blksz;
2379 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2380 prev_state,state,mmc_hostname(host->mmc));
2381 dw_mci_request_end(host, host->mrq);
2384 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2385 prev_state,state,mmc_hostname(host->mmc));
2387 if (host->mrq->sbc && !data->error) {
2388 data->stop->error = 0;
2390 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2391 prev_state,state,mmc_hostname(host->mmc));
2393 dw_mci_request_end(host, host->mrq);
2397 prev_state = state = STATE_SENDING_STOP;
2399 send_stop_cmd(host, data);
2401 if (data->stop && !data->error) {
2402 /* stop command for open-ended transfer*/
2404 send_stop_abort(host, data);
2408 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2409 prev_state,state,mmc_hostname(host->mmc));
2411 case STATE_SENDING_STOP:
2412 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2415 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2416 prev_state, state, mmc_hostname(host->mmc));
2418 /* CMD error in data command */
2419 if (host->mrq->cmd->error && host->mrq->data) {
2420 dw_mci_fifo_reset(host);
2424 host->data = NULL; */
2426 dw_mci_command_complete(host, host->mrq->stop);
2428 if (host->mrq->stop)
2429 dw_mci_command_complete(host, host->mrq->stop);
2431 host->cmd_status = 0;
2434 dw_mci_request_end(host, host->mrq);
2437 case STATE_DATA_ERROR:
2438 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2439 &host->pending_events))
2442 state = STATE_DATA_BUSY;
2445 } while (state != prev_state);
2447 host->state = state;
2449 spin_unlock(&host->lock);
2453 /* push final bytes to part_buf, only use during push */
2454 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2456 memcpy((void *)&host->part_buf, buf, cnt);
2457 host->part_buf_count = cnt;
2460 /* append bytes to part_buf, only use during push */
2461 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2463 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2464 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2465 host->part_buf_count += cnt;
2469 /* pull first bytes from part_buf, only use during pull */
2470 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2472 cnt = min(cnt, (int)host->part_buf_count);
2474 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2476 host->part_buf_count -= cnt;
2477 host->part_buf_start += cnt;
2482 /* pull final bytes from the part_buf, assuming it's just been filled */
2483 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2485 memcpy(buf, &host->part_buf, cnt);
2486 host->part_buf_start = cnt;
2487 host->part_buf_count = (1 << host->data_shift) - cnt;
2490 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2492 struct mmc_data *data = host->data;
2495 /* try and push anything in the part_buf */
2496 if (unlikely(host->part_buf_count)) {
2497 int len = dw_mci_push_part_bytes(host, buf, cnt);
2500 if (host->part_buf_count == 2) {
2501 mci_writew(host, DATA(host->data_offset),
2503 host->part_buf_count = 0;
2506 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2507 if (unlikely((unsigned long)buf & 0x1)) {
2509 u16 aligned_buf[64];
2510 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2511 int items = len >> 1;
2513 /* memcpy from input buffer into aligned buffer */
2514 memcpy(aligned_buf, buf, len);
2517 /* push data from aligned buffer into fifo */
2518 for (i = 0; i < items; ++i)
2519 mci_writew(host, DATA(host->data_offset),
2526 for (; cnt >= 2; cnt -= 2)
2527 mci_writew(host, DATA(host->data_offset), *pdata++);
2530 /* put anything remaining in the part_buf */
2532 dw_mci_set_part_bytes(host, buf, cnt);
2533 /* Push data if we have reached the expected data length */
2534 if ((data->bytes_xfered + init_cnt) ==
2535 (data->blksz * data->blocks))
2536 mci_writew(host, DATA(host->data_offset),
2541 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2543 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2544 if (unlikely((unsigned long)buf & 0x1)) {
2546 /* pull data from fifo into aligned buffer */
2547 u16 aligned_buf[64];
2548 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2549 int items = len >> 1;
2551 for (i = 0; i < items; ++i)
2552 aligned_buf[i] = mci_readw(host,
2553 DATA(host->data_offset));
2554 /* memcpy from aligned buffer into output buffer */
2555 memcpy(buf, aligned_buf, len);
2563 for (; cnt >= 2; cnt -= 2)
2564 *pdata++ = mci_readw(host, DATA(host->data_offset));
2568 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2569 dw_mci_pull_final_bytes(host, buf, cnt);
2573 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2575 struct mmc_data *data = host->data;
2578 /* try and push anything in the part_buf */
2579 if (unlikely(host->part_buf_count)) {
2580 int len = dw_mci_push_part_bytes(host, buf, cnt);
2583 if (host->part_buf_count == 4) {
2584 mci_writel(host, DATA(host->data_offset),
2586 host->part_buf_count = 0;
2589 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2590 if (unlikely((unsigned long)buf & 0x3)) {
2592 u32 aligned_buf[32];
2593 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2594 int items = len >> 2;
2596 /* memcpy from input buffer into aligned buffer */
2597 memcpy(aligned_buf, buf, len);
2600 /* push data from aligned buffer into fifo */
2601 for (i = 0; i < items; ++i)
2602 mci_writel(host, DATA(host->data_offset),
2609 for (; cnt >= 4; cnt -= 4)
2610 mci_writel(host, DATA(host->data_offset), *pdata++);
2613 /* put anything remaining in the part_buf */
2615 dw_mci_set_part_bytes(host, buf, cnt);
2616 /* Push data if we have reached the expected data length */
2617 if ((data->bytes_xfered + init_cnt) ==
2618 (data->blksz * data->blocks))
2619 mci_writel(host, DATA(host->data_offset),
2624 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2626 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2627 if (unlikely((unsigned long)buf & 0x3)) {
2629 /* pull data from fifo into aligned buffer */
2630 u32 aligned_buf[32];
2631 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2632 int items = len >> 2;
2634 for (i = 0; i < items; ++i)
2635 aligned_buf[i] = mci_readl(host,
2636 DATA(host->data_offset));
2637 /* memcpy from aligned buffer into output buffer */
2638 memcpy(buf, aligned_buf, len);
2646 for (; cnt >= 4; cnt -= 4)
2647 *pdata++ = mci_readl(host, DATA(host->data_offset));
2651 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2652 dw_mci_pull_final_bytes(host, buf, cnt);
2656 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2658 struct mmc_data *data = host->data;
2661 /* try and push anything in the part_buf */
2662 if (unlikely(host->part_buf_count)) {
2663 int len = dw_mci_push_part_bytes(host, buf, cnt);
2667 if (host->part_buf_count == 8) {
2668 mci_writeq(host, DATA(host->data_offset),
2670 host->part_buf_count = 0;
2673 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2674 if (unlikely((unsigned long)buf & 0x7)) {
2676 u64 aligned_buf[16];
2677 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2678 int items = len >> 3;
2680 /* memcpy from input buffer into aligned buffer */
2681 memcpy(aligned_buf, buf, len);
2684 /* push data from aligned buffer into fifo */
2685 for (i = 0; i < items; ++i)
2686 mci_writeq(host, DATA(host->data_offset),
2693 for (; cnt >= 8; cnt -= 8)
2694 mci_writeq(host, DATA(host->data_offset), *pdata++);
2697 /* put anything remaining in the part_buf */
2699 dw_mci_set_part_bytes(host, buf, cnt);
2700 /* Push data if we have reached the expected data length */
2701 if ((data->bytes_xfered + init_cnt) ==
2702 (data->blksz * data->blocks))
2703 mci_writeq(host, DATA(host->data_offset),
2708 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2710 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2711 if (unlikely((unsigned long)buf & 0x7)) {
2713 /* pull data from fifo into aligned buffer */
2714 u64 aligned_buf[16];
2715 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2716 int items = len >> 3;
2718 for (i = 0; i < items; ++i)
2719 aligned_buf[i] = mci_readq(host,
2720 DATA(host->data_offset));
2721 /* memcpy from aligned buffer into output buffer */
2722 memcpy(buf, aligned_buf, len);
2730 for (; cnt >= 8; cnt -= 8)
2731 *pdata++ = mci_readq(host, DATA(host->data_offset));
2735 host->part_buf = mci_readq(host, DATA(host->data_offset));
2736 dw_mci_pull_final_bytes(host, buf, cnt);
2740 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2744 /* get remaining partial bytes */
2745 len = dw_mci_pull_part_bytes(host, buf, cnt);
2746 if (unlikely(len == cnt))
2751 /* get the rest of the data */
2752 host->pull_data(host, buf, cnt);
2755 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2757 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2759 unsigned int offset;
2760 struct mmc_data *data = host->data;
2761 int shift = host->data_shift;
2764 unsigned int remain, fcnt;
2766 if(!host->mmc->bus_refs){
2767 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2771 if (!sg_miter_next(sg_miter))
2774 host->sg = sg_miter->piter.sg;
2775 buf = sg_miter->addr;
2776 remain = sg_miter->length;
2780 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2781 << shift) + host->part_buf_count;
2782 len = min(remain, fcnt);
2785 dw_mci_pull_data(host, (void *)(buf + offset), len);
2786 data->bytes_xfered += len;
2791 sg_miter->consumed = offset;
2792 status = mci_readl(host, MINTSTS);
2793 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2794 /* if the RXDR is ready read again */
2795 } while ((status & SDMMC_INT_RXDR) ||
2796 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2799 if (!sg_miter_next(sg_miter))
2801 sg_miter->consumed = 0;
2803 sg_miter_stop(sg_miter);
2807 sg_miter_stop(sg_miter);
2811 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2814 static void dw_mci_write_data_pio(struct dw_mci *host)
2816 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2818 unsigned int offset;
2819 struct mmc_data *data = host->data;
2820 int shift = host->data_shift;
2823 unsigned int fifo_depth = host->fifo_depth;
2824 unsigned int remain, fcnt;
2826 if(!host->mmc->bus_refs){
2827 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2832 if (!sg_miter_next(sg_miter))
2835 host->sg = sg_miter->piter.sg;
2836 buf = sg_miter->addr;
2837 remain = sg_miter->length;
2841 fcnt = ((fifo_depth -
2842 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2843 << shift) - host->part_buf_count;
2844 len = min(remain, fcnt);
2847 host->push_data(host, (void *)(buf + offset), len);
2848 data->bytes_xfered += len;
2853 sg_miter->consumed = offset;
2854 status = mci_readl(host, MINTSTS);
2855 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2856 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2859 if (!sg_miter_next(sg_miter))
2861 sg_miter->consumed = 0;
2863 sg_miter_stop(sg_miter);
2867 sg_miter_stop(sg_miter);
2871 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2874 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2876 if (!host->cmd_status)
2877 host->cmd_status = status;
2884 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2885 tasklet_schedule(&host->tasklet);
2888 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2890 struct dw_mci *host = dev_id;
2891 u32 pending, sdio_int;
2894 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2897 * DTO fix - version 2.10a and below, and only if internal DMA
2900 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2902 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2903 pending |= SDMMC_INT_DATA_OVER;
2907 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2908 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2909 host->cmd_status = pending;
2911 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2912 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2914 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2917 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2918 /* if there is an error report DATA_ERROR */
2919 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2920 host->data_status = pending;
2922 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2924 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2925 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2926 tasklet_schedule(&host->tasklet);
2929 if (pending & SDMMC_INT_DATA_OVER) {
2930 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2931 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2932 if (!host->data_status)
2933 host->data_status = pending;
2935 if (host->dir_status == DW_MCI_RECV_STATUS) {
2936 if (host->sg != NULL)
2937 dw_mci_read_data_pio(host, true);
2939 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2940 tasklet_schedule(&host->tasklet);
2943 if (pending & SDMMC_INT_RXDR) {
2944 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2945 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2946 dw_mci_read_data_pio(host, false);
2949 if (pending & SDMMC_INT_TXDR) {
2950 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2951 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2952 dw_mci_write_data_pio(host);
2955 if (pending & SDMMC_INT_VSI) {
2956 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2957 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2958 dw_mci_cmd_interrupt(host, pending);
2961 if (pending & SDMMC_INT_CMD_DONE) {
2962 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2963 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2964 dw_mci_cmd_interrupt(host, pending);
2967 if (pending & SDMMC_INT_CD) {
2968 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2969 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2970 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2971 queue_work(host->card_workqueue, &host->card_work);
2974 if (pending & SDMMC_INT_HLE) {
2975 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2976 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2980 /* Handle SDIO Interrupts */
2981 for (i = 0; i < host->num_slots; i++) {
2982 struct dw_mci_slot *slot = host->slot[i];
2984 if (host->verid < DW_MMC_240A)
2985 sdio_int = SDMMC_INT_SDIO(i);
2987 sdio_int = SDMMC_INT_SDIO(i + 8);
2989 if (pending & sdio_int) {
2990 mci_writel(host, RINTSTS, sdio_int);
2991 mmc_signal_sdio_irq(slot->mmc);
2997 #ifdef CONFIG_MMC_DW_IDMAC
2998 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2999 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
3000 /* Handle DMA interrupts */
3001 pending = mci_readl(host, IDSTS);
3002 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
3003 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
3004 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
3005 host->dma_ops->complete((void *)host);
3013 static void dw_mci_work_routine_card(struct work_struct *work)
3015 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
3018 for (i = 0; i < host->num_slots; i++) {
3019 struct dw_mci_slot *slot = host->slot[i];
3020 struct mmc_host *mmc = slot->mmc;
3021 struct mmc_request *mrq;
3024 present = dw_mci_get_cd(mmc);
3026 /* Card insert, switch data line to uart function, and vice verse.
3027 eONLY audi chip need switched by software, using udbg tag in dts!
3029 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
3031 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3032 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3033 mmc_hostname(host->mmc));
3035 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3036 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3037 mmc_hostname(host->mmc));
3041 while (present != slot->last_detect_state) {
3042 dev_dbg(&slot->mmc->class_dev, "card %s\n",
3043 present ? "inserted" : "removed");
3044 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
3045 present ? "inserted" : "removed.", mmc_hostname(mmc));
3047 dw_mci_ctrl_all_reset(host);
3048 /* Stop edma when rountine card triggered */
3049 if(cpu_is_rk3036() || cpu_is_rk312x())
3050 if(host->dma_ops && host->dma_ops->stop)
3051 host->dma_ops->stop(host);
3052 rk_send_wakeup_key();//wake up system
3053 spin_lock_bh(&host->lock);
3055 /* Card change detected */
3056 slot->last_detect_state = present;
3058 /* Clean up queue if present */
3061 if (mrq == host->mrq) {
3065 switch (host->state) {
3068 case STATE_SENDING_CMD:
3069 mrq->cmd->error = -ENOMEDIUM;
3073 case STATE_SENDING_DATA:
3074 mrq->data->error = -ENOMEDIUM;
3075 dw_mci_stop_dma(host);
3077 case STATE_DATA_BUSY:
3078 case STATE_DATA_ERROR:
3079 if (mrq->data->error == -EINPROGRESS)
3080 mrq->data->error = -ENOMEDIUM;
3084 case STATE_SENDING_STOP:
3085 mrq->stop->error = -ENOMEDIUM;
3089 dw_mci_request_end(host, mrq);
3091 list_del(&slot->queue_node);
3092 mrq->cmd->error = -ENOMEDIUM;
3094 mrq->data->error = -ENOMEDIUM;
3096 mrq->stop->error = -ENOMEDIUM;
3098 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3099 mrq->cmd->opcode, mmc_hostname(mmc));
3101 spin_unlock(&host->lock);
3102 mmc_request_done(slot->mmc, mrq);
3103 spin_lock(&host->lock);
3107 /* Power down slot */
3109 /* Clear down the FIFO */
3110 dw_mci_fifo_reset(host);
3111 #ifdef CONFIG_MMC_DW_IDMAC
3112 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3113 dw_mci_idmac_reset(host);
3118 spin_unlock_bh(&host->lock);
3120 present = dw_mci_get_cd(mmc);
3123 mmc_detect_change(slot->mmc,
3124 msecs_to_jiffies(host->pdata->detect_delay_ms));
3129 /* given a slot id, find out the device node representing that slot */
3130 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3132 struct device_node *np;
3136 if (!dev || !dev->of_node)
3139 for_each_child_of_node(dev->of_node, np) {
3140 addr = of_get_property(np, "reg", &len);
3141 if (!addr || (len < sizeof(int)))
3143 if (be32_to_cpup(addr) == slot)
3149 static struct dw_mci_of_slot_quirks {
3152 } of_slot_quirks[] = {
3154 .quirk = "disable-wp",
3155 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3159 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3161 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3166 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3167 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3168 quirks |= of_slot_quirks[idx].id;
3173 /* find out bus-width for a given slot */
3174 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3176 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3182 if (of_property_read_u32(np, "bus-width", &bus_wd))
3183 dev_err(dev, "bus-width property not found, assuming width"
3189 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3190 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3192 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3198 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3200 /* Having a missing entry is valid; return silently */
3201 if (!gpio_is_valid(gpio))
3204 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3205 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3209 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3215 /* find the write protect gpio for a given slot; or -1 if none specified */
3216 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3218 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3224 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3226 /* Having a missing entry is valid; return silently */
3227 if (!gpio_is_valid(gpio))
3230 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3231 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3238 /* find the cd gpio for a given slot */
3239 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3240 struct mmc_host *mmc)
3242 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3248 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3250 /* Having a missing entry is valid; return silently */
3251 if (!gpio_is_valid(gpio))
3254 if (mmc_gpio_request_cd(mmc, gpio, 0))
3255 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3258 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3260 struct mmc_host *mmc = dev_id;
3261 struct dw_mci_slot *slot = mmc_priv(mmc);
3262 struct dw_mci *host = slot->host;
3263 int gpio_cd = slot->cd_gpio;
3265 (gpio_get_value(gpio_cd) == 0) ?
3266 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
3267 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
3269 /* wakeup system whether gpio debounce or not */
3270 rk_send_wakeup_key();
3272 /* no need to trigger detect flow when rescan is disabled.
3273 This case happended in dpm, that we just wakeup system and
3274 let suspend_post notify callback handle it.
3276 if(mmc->rescan_disable == 0)
3277 queue_work(host->card_workqueue, &host->card_work);
3279 printk("%s: rescan been disabled!\n", __FUNCTION__);
3284 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3285 struct mmc_host *mmc)
3287 struct dw_mci_slot *slot = mmc_priv(mmc);
3288 struct dw_mci *host = slot->host;
3292 /* Having a missing entry is valid; return silently */
3293 if (!gpio_is_valid(gpio))
3296 irq = gpio_to_irq(gpio);
3298 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3299 NULL, dw_mci_gpio_cd_irqt,
3300 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3304 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3306 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3307 enable_irq_wake(irq);
3310 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3314 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3315 struct mmc_host *mmc)
3317 if (!gpio_is_valid(gpio))
3320 if (gpio_to_irq(gpio) >= 0) {
3321 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3322 devm_gpio_free(&mmc->class_dev, gpio);
3325 #else /* CONFIG_OF */
3326 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3330 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3334 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3338 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3342 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3343 struct mmc_host *mmc)
3347 #endif /* CONFIG_OF */
3349 /* @host: dw_mci host prvdata
3350 * Init pinctrl for each platform. Usually we assign
3351 * "defalut" tag for functional usage, "idle" tag for gpio
3352 * state and "udbg" tag for uart_dbg if any.
3354 static void dw_mci_init_pinctrl(struct dw_mci *host)
3356 /* Fixme: DON'T TOUCH EMMC SETTING! */
3357 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3360 /* Get pinctrl for DTS */
3361 host->pinctrl = devm_pinctrl_get(host->dev);
3362 if (IS_ERR(host->pinctrl)) {
3363 dev_err(host->dev, "%s: No pinctrl used!\n",
3364 mmc_hostname(host->mmc));
3368 /* Lookup idle state */
3369 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3370 PINCTRL_STATE_IDLE);
3371 if (IS_ERR(host->pins_idle)) {
3372 dev_err(host->dev, "%s: No idle tag found!\n",
3373 mmc_hostname(host->mmc));
3375 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3376 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3377 mmc_hostname(host->mmc));
3380 /* Lookup default state */
3381 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3382 PINCTRL_STATE_DEFAULT);
3383 if (IS_ERR(host->pins_default)) {
3384 dev_err(host->dev, "%s: No default pinctrl found!\n",
3385 mmc_hostname(host->mmc));
3387 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3388 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3389 mmc_hostname(host->mmc));
3392 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3393 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3394 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3395 if (IS_ERR(host->pins_udbg)) {
3396 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3397 mmc_hostname(host->mmc));
3399 if (!dw_mci_get_cd(host->mmc))
3400 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3401 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3402 mmc_hostname(host->mmc));
3407 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3408 unsigned long mode, void *unused)
3410 struct mmc_host *host = container_of(
3411 notify_block, struct mmc_host, pm_notify);
3412 unsigned long flags;
3415 case PM_HIBERNATION_PREPARE:
3416 case PM_SUSPEND_PREPARE:
3417 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3418 spin_lock_irqsave(&host->lock, flags);
3419 host->rescan_disable = 1;
3420 spin_unlock_irqrestore(&host->lock, flags);
3421 if (cancel_delayed_work(&host->detect))
3422 wake_unlock(&host->detect_wake_lock);
3425 case PM_POST_SUSPEND:
3426 case PM_POST_HIBERNATION:
3427 case PM_POST_RESTORE:
3428 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3429 spin_lock_irqsave(&host->lock, flags);
3430 host->rescan_disable = 0;
3431 spin_unlock_irqrestore(&host->lock, flags);
3432 mmc_detect_change(host, 10);
3438 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3440 struct mmc_host *mmc;
3441 struct dw_mci_slot *slot;
3442 const struct dw_mci_drv_data *drv_data = host->drv_data;
3447 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3451 slot = mmc_priv(mmc);
3455 host->slot[id] = slot;
3458 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3460 mmc->ops = &dw_mci_ops;
3462 if (of_property_read_u32_array(host->dev->of_node,
3463 "clock-freq-min-max", freq, 2)) {
3464 mmc->f_min = DW_MCI_FREQ_MIN;
3465 mmc->f_max = DW_MCI_FREQ_MAX;
3467 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3468 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3470 mmc->f_min = freq[0];
3471 mmc->f_max = freq[1];
3473 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3474 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3477 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3479 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3480 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3481 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3482 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3483 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3484 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3486 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3487 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3488 if (register_pm_notifier(&mmc->pm_notify)) {
3489 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3490 goto err_pm_notifier;
3494 if (host->cid == DW_MCI_TYPE_RK3368) {
3495 if (IS_ERR(host->grf))
3496 pr_err("rk_sdmmc: dts couldn't find grf regmap for 3368\n");
3498 /* Disable force_jtag */
3499 regmap_write(host->grf, 0x43c, (1<<13)<<16 | (0 << 13));
3500 } else if (cpu_is_rk3288()) {
3501 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
3505 /* We assume only low-level chip use gpio_cd */
3506 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
3507 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3508 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3509 if (gpio_is_valid(slot->cd_gpio)) {
3510 /* Request gpio int for card detection */
3511 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3513 slot->cd_gpio = -ENODEV;
3514 dev_err(host->dev, "failed to get your cd-gpios!\n");
3518 if (host->pdata->get_ocr)
3519 mmc->ocr_avail = host->pdata->get_ocr(id);
3522 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3523 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3524 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3525 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3529 * Start with slot power disabled, it will be enabled when a card
3532 if (host->pdata->setpower)
3533 host->pdata->setpower(id, 0);
3535 if (host->pdata->caps)
3536 mmc->caps = host->pdata->caps;
3538 if (host->pdata->pm_caps)
3539 mmc->pm_caps = host->pdata->pm_caps;
3541 if (host->dev->of_node) {
3542 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3546 ctrl_id = to_platform_device(host->dev)->id;
3548 if (drv_data && drv_data->caps)
3549 mmc->caps |= drv_data->caps[ctrl_id];
3550 if (drv_data && drv_data->hold_reg_flag)
3551 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3553 /* set the compatibility of driver. */
3554 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3555 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3557 if (host->pdata->caps2)
3558 mmc->caps2 = host->pdata->caps2;
3560 if (host->pdata->get_bus_wd)
3561 bus_width = host->pdata->get_bus_wd(slot->id);
3562 else if (host->dev->of_node)
3563 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3567 switch (bus_width) {
3569 mmc->caps |= MMC_CAP_8_BIT_DATA;
3571 mmc->caps |= MMC_CAP_4_BIT_DATA;
3574 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3575 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3576 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3577 mmc->caps |= MMC_CAP_SDIO_IRQ;
3578 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3579 mmc->caps |= MMC_CAP_HW_RESET;
3580 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3581 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3582 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3583 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3584 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3585 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3586 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3587 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3589 /*Assign pm_caps pass to pm_flags*/
3590 mmc->pm_flags = mmc->pm_caps;
3592 if (host->pdata->blk_settings) {
3593 mmc->max_segs = host->pdata->blk_settings->max_segs;
3594 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3595 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3596 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3597 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3599 /* Useful defaults if platform data is unset. */
3600 #ifdef CONFIG_MMC_DW_IDMAC
3601 mmc->max_segs = host->ring_size;
3602 mmc->max_blk_size = 65536;
3603 mmc->max_blk_count = host->ring_size;
3604 mmc->max_seg_size = 0x1000;
3605 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3606 if(cpu_is_rk3036() || cpu_is_rk312x()){
3607 /* fixup for external dmac setting */
3609 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3610 mmc->max_blk_count = 65535;
3611 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3612 mmc->max_seg_size = mmc->max_req_size;
3616 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3617 mmc->max_blk_count = 512;
3618 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3619 mmc->max_seg_size = mmc->max_req_size;
3620 #endif /* CONFIG_MMC_DW_IDMAC */
3624 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3626 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3631 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3632 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3636 if (IS_ERR(host->vmmc)) {
3637 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3640 ret = regulator_enable(host->vmmc);
3643 "failed to enable regulator: %d\n", ret);
3650 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3652 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3653 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3655 dw_mci_init_pinctrl(host);
3656 ret = mmc_add_host(mmc);
3660 #if defined(CONFIG_DEBUG_FS)
3661 dw_mci_init_debugfs(slot);
3664 /* Card initially undetected */
3665 slot->last_detect_state = 1;
3669 unregister_pm_notifier(&mmc->pm_notify);
3672 if (gpio_is_valid(slot->cd_gpio))
3673 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3678 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3680 /* Shutdown detect IRQ */
3681 if (slot->host->pdata->exit)
3682 slot->host->pdata->exit(id);
3684 /* Debugfs stuff is cleaned up by mmc core */
3685 mmc_remove_host(slot->mmc);
3686 slot->host->slot[id] = NULL;
3687 mmc_free_host(slot->mmc);
3690 static void dw_mci_init_dma(struct dw_mci *host)
3692 /* Alloc memory for sg translation */
3693 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3694 &host->sg_dma, GFP_KERNEL);
3695 if (!host->sg_cpu) {
3696 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3701 memset(host->sg_cpu, 0, PAGE_SIZE);
3704 /* Determine which DMA interface to use */
3705 #if defined(CONFIG_MMC_DW_IDMAC)
3706 if(cpu_is_rk3036() || cpu_is_rk312x()){
3707 host->dma_ops = &dw_mci_edmac_ops;
3708 dev_info(host->dev, "Using external DMA controller.\n");
3710 host->dma_ops = &dw_mci_idmac_ops;
3711 dev_info(host->dev, "Using internal DMA controller.\n");
3718 if (host->dma_ops->init && host->dma_ops->start &&
3719 host->dma_ops->stop && host->dma_ops->cleanup) {
3720 if (host->dma_ops->init(host)) {
3721 dev_err(host->dev, "%s: Unable to initialize "
3722 "DMA Controller.\n", __func__);
3726 dev_err(host->dev, "DMA initialization not found.\n");
3734 dev_info(host->dev, "Using PIO mode.\n");
3739 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3741 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3744 ctrl = mci_readl(host, CTRL);
3746 mci_writel(host, CTRL, ctrl);
3748 /* wait till resets clear */
3750 ctrl = mci_readl(host, CTRL);
3751 if (!(ctrl & reset))
3753 } while (time_before(jiffies, timeout));
3756 "Timeout resetting block (ctrl reset %#x)\n",
3762 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3765 * Reseting generates a block interrupt, hence setting
3766 * the scatter-gather pointer to NULL.
3769 sg_miter_stop(&host->sg_miter);
3773 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3776 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3778 return dw_mci_ctrl_reset(host,
3779 SDMMC_CTRL_FIFO_RESET |
3781 SDMMC_CTRL_DMA_RESET);
3784 static void dw_mci_rst_pre_suspend(struct dw_mci *host)
3789 buffer = host->regs_buffer;
3791 for (index = 0; index < DW_REGS_NUM ; index++){
3792 *buffer = mci_readreg(host, index*4);
3793 MMC_DBG_INFO_FUNC(host->mmc, "[%s] :0x%08x.\n",
3794 dw_mci_regs[index].name, *buffer);
3798 *buffer = mci_readl(host,CDTHRCTL);
3799 MMC_DBG_INFO_FUNC(host->mmc, "[%s] :0x%08x.\n", "CARDTHRCTL", *buffer);
3802 static void dw_mci_rst_post_resume(struct dw_mci *host)
3807 buffer = host->regs_buffer;
3809 for (index = 0; index < DW_REGS_NUM; index++){
3810 mci_writereg(host, index*4, *buffer);
3813 mci_writel(host, CDTHRCTL, *buffer);
3816 static const struct dw_mci_rst_ops dw_mci_pdrst_ops = {
3817 .pre_suspend = dw_mci_rst_pre_suspend,
3818 .post_resume = dw_mci_rst_post_resume,
3823 static struct dw_mci_of_quirks {
3828 .quirk = "broken-cd",
3829 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3833 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3835 struct dw_mci_board *pdata;
3836 struct device *dev = host->dev;
3837 struct device_node *np = dev->of_node;
3838 const struct dw_mci_drv_data *drv_data = host->drv_data;
3840 u32 clock_frequency;
3842 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3844 dev_err(dev, "could not allocate memory for pdata\n");
3845 return ERR_PTR(-ENOMEM);
3848 /* find out number of slots supported */
3849 if (of_property_read_u32(dev->of_node, "num-slots",
3850 &pdata->num_slots)) {
3851 dev_info(dev, "num-slots property not found, "
3852 "assuming 1 slot is available\n");
3853 pdata->num_slots = 1;
3857 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3858 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3859 pdata->quirks |= of_quirks[idx].id;
3862 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3863 dev_info(dev, "fifo-depth property not found, using "
3864 "value of FIFOTH register as default\n");
3866 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3868 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3869 pdata->bus_hz = clock_frequency;
3871 if (drv_data && drv_data->parse_dt) {
3872 ret = drv_data->parse_dt(host);
3874 return ERR_PTR(ret);
3877 if (of_find_property(np, "keep-power-in-suspend", NULL))
3878 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3880 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3881 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3883 if (of_find_property(np, "supports-highspeed", NULL))
3884 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3886 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3887 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3889 if (of_find_property(np, "supports-DDR_MODE", NULL))
3890 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3892 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3893 pdata->caps2 |= MMC_CAP2_HS200;
3895 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3896 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3898 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3899 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3901 if (of_get_property(np, "cd-inverted", NULL))
3902 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3903 if (of_get_property(np, "bootpart-no-access", NULL))
3904 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3906 if (of_get_property(np, "controller-power-down", NULL)) {
3907 host->regs_buffer = (u32 *)devm_kzalloc(host->dev,
3908 DW_REGS_SIZE, GFP_KERNEL);
3909 if (!host->regs_buffer) {
3911 "could not allocate memory for regs_buffer\n");
3912 return ERR_PTR(-ENOMEM);
3915 host->rst_ops = &dw_mci_pdrst_ops;
3916 mmc_assume_removable = 0;
3922 #else /* CONFIG_OF */
3923 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3925 return ERR_PTR(-EINVAL);
3927 #endif /* CONFIG_OF */
3929 int dw_mci_probe(struct dw_mci *host)
3931 const struct dw_mci_drv_data *drv_data = host->drv_data;
3932 int width, i, ret = 0;
3938 host->pdata = dw_mci_parse_dt(host);
3939 if (IS_ERR(host->pdata)) {
3940 dev_err(host->dev, "platform data not available\n");
3945 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3947 "Platform data must supply select_slot function\n");
3952 * In 2.40a spec, Data offset is changed.
3953 * Need to check the version-id and set data-offset for DATA register.
3955 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3956 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3958 if (host->verid < DW_MMC_240A)
3959 host->data_offset = DATA_OFFSET;
3961 host->data_offset = DATA_240A_OFFSET;
3964 host->hpclk_mmc= devm_clk_get(host->dev, "hpclk_mmc");
3965 if (IS_ERR(host->hpclk_mmc)) {
3966 dev_err(host->dev, "failed to get hpclk_mmc\n");
3968 clk_prepare_enable(host->hpclk_mmc);
3972 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3973 if (IS_ERR(host->hclk_mmc)) {
3974 dev_err(host->dev, "failed to get hclk_mmc\n");
3975 ret = PTR_ERR(host->hclk_mmc);
3979 clk_prepare_enable(host->hclk_mmc);
3982 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3983 if (IS_ERR(host->clk_mmc)) {
3984 dev_err(host->dev, "failed to get clk mmc_per\n");
3985 ret = PTR_ERR(host->clk_mmc);
3989 host->bus_hz = host->pdata->bus_hz;
3990 if (!host->bus_hz) {
3991 dev_err(host->dev,"Platform data must supply bus speed\n");
3996 if (host->verid < DW_MMC_240A)
3997 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3999 //rockchip: fix divider 2 in clksum before controlller
4000 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
4003 dev_err(host->dev, "failed to set clk mmc\n");
4006 clk_prepare_enable(host->clk_mmc);
4008 if (drv_data && drv_data->setup_clock) {
4009 ret = drv_data->setup_clock(host);
4012 "implementation specific clock setup failed\n");
4017 host->quirks = host->pdata->quirks;
4018 host->irq_state = true;
4019 host->set_speed = 0;
4021 host->svi_flags = 0;
4023 spin_lock_init(&host->lock);
4024 spin_lock_init(&host->slock);
4026 INIT_LIST_HEAD(&host->queue);
4028 * Get the host data width - this assumes that HCON has been set with
4029 * the correct values.
4031 i = (mci_readl(host, HCON) >> 7) & 0x7;
4033 host->push_data = dw_mci_push_data16;
4034 host->pull_data = dw_mci_pull_data16;
4036 host->data_shift = 1;
4037 } else if (i == 2) {
4038 host->push_data = dw_mci_push_data64;
4039 host->pull_data = dw_mci_pull_data64;
4041 host->data_shift = 3;
4043 /* Check for a reserved value, and warn if it is */
4045 "HCON reports a reserved host data width!\n"
4046 "Defaulting to 32-bit access.\n");
4047 host->push_data = dw_mci_push_data32;
4048 host->pull_data = dw_mci_pull_data32;
4050 host->data_shift = 2;
4053 /* Reset all blocks */
4054 if (!dw_mci_ctrl_all_reset(host))
4057 host->dma_ops = host->pdata->dma_ops;
4058 dw_mci_init_dma(host);
4060 /* Clear the interrupts for the host controller */
4061 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4062 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4064 /* Put in max timeout */
4065 mci_writel(host, TMOUT, 0xFFFFFFFF);
4068 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
4069 * Tx Mark = fifo_size / 2 DMA Size = 8
4071 if (!host->pdata->fifo_depth) {
4073 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
4074 * have been overwritten by the bootloader, just like we're
4075 * about to do, so if you know the value for your hardware, you
4076 * should put it in the platform data.
4078 fifo_size = mci_readl(host, FIFOTH);
4079 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
4081 fifo_size = host->pdata->fifo_depth;
4083 host->fifo_depth = fifo_size;
4085 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
4086 mci_writel(host, FIFOTH, host->fifoth_val);
4088 /* disable clock to CIU */
4089 mci_writel(host, CLKENA, 0);
4090 mci_writel(host, CLKSRC, 0);
4092 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
4093 host->card_workqueue = alloc_workqueue("dw-mci-card",
4094 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
4095 if (!host->card_workqueue) {
4099 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4100 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4101 host->irq_flags, "dw-mci", host);
4105 if (host->pdata->num_slots)
4106 host->num_slots = host->pdata->num_slots;
4108 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4110 /* We need at least one slot to succeed */
4111 for (i = 0; i < host->num_slots; i++) {
4112 ret = dw_mci_init_slot(host, i);
4114 dev_dbg(host->dev, "slot %d init failed\n", i);
4120 * Enable interrupts for command done, data over, data empty, card det,
4121 * receive ready and error such as transmit, receive timeout, crc error
4123 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4124 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4125 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4126 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4127 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4128 regs |= SDMMC_INT_CD;
4130 mci_writel(host, INTMASK, regs);
4132 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4134 dev_info(host->dev, "DW MMC controller at irq %d, "
4135 "%d bit host data width, "
4137 host->irq, width, fifo_size);
4140 dev_info(host->dev, "%d slots initialized\n", init_slots);
4142 dev_dbg(host->dev, "attempted to initialize %d slots, "
4143 "but failed on all\n", host->num_slots);
4148 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4149 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4154 destroy_workqueue(host->card_workqueue);
4157 if (host->use_dma && host->dma_ops->exit)
4158 host->dma_ops->exit(host);
4161 regulator_disable(host->vmmc);
4162 regulator_put(host->vmmc);
4166 if (!IS_ERR(host->clk_mmc))
4167 clk_disable_unprepare(host->clk_mmc);
4169 if (!IS_ERR(host->hclk_mmc))
4170 clk_disable_unprepare(host->hclk_mmc);
4173 EXPORT_SYMBOL(dw_mci_probe);
4175 void dw_mci_remove(struct dw_mci *host)
4177 struct mmc_host *mmc = host->mmc;
4178 struct dw_mci_slot *slot = mmc_priv(mmc);
4181 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4182 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4184 for(i = 0; i < host->num_slots; i++){
4185 dev_dbg(host->dev, "remove slot %d\n", i);
4187 dw_mci_cleanup_slot(host->slot[i], i);
4190 /* disable clock to CIU */
4191 mci_writel(host, CLKENA, 0);
4192 mci_writel(host, CLKSRC, 0);
4194 destroy_workqueue(host->card_workqueue);
4195 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4196 unregister_pm_notifier(&host->mmc->pm_notify);
4198 if (host->use_dma && host->dma_ops->exit)
4199 host->dma_ops->exit(host);
4201 if (gpio_is_valid(slot->cd_gpio))
4202 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4205 regulator_disable(host->vmmc);
4206 regulator_put(host->vmmc);
4208 if (!IS_ERR(host->clk_mmc))
4209 clk_disable_unprepare(host->clk_mmc);
4211 if (!IS_ERR(host->hclk_mmc))
4212 clk_disable_unprepare(host->hclk_mmc);
4213 if (!IS_ERR(host->hpclk_mmc))
4214 clk_disable_unprepare(host->hpclk_mmc);
4216 EXPORT_SYMBOL(dw_mci_remove);
4220 #ifdef CONFIG_PM_SLEEP
4222 * TODO: we should probably disable the clock to the card in the suspend path.
4224 extern int get_wifi_chip_type(void);
4225 int dw_mci_suspend(struct dw_mci *host)
4227 int present = dw_mci_get_cd(host->mmc);
4229 if((host->mmc->restrict_caps &
4230 RESTRICT_CARD_TYPE_SDIO) &&
4231 (get_wifi_chip_type() == WIFI_ESP8089 ||
4232 get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4236 regulator_disable(host->vmmc);
4238 /* Only for sdmmc controller */
4239 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4240 disable_irq(host->irq);
4242 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4243 MMC_DBG_ERR_FUNC(host->mmc,
4244 "Idle pinctrl setting failed! [%s]",
4245 mmc_hostname(host->mmc));
4248 /* Soc rk3126/3036 already in gpio_cd mode */
4249 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4250 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4251 enable_irq_wake(host->mmc->slot.cd_irq);
4255 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4256 mci_writel(host, INTMASK, 0x00);
4257 mci_writel(host, CTRL, 0x00);
4259 if (host->rst_ops &&
4260 host->rst_ops->pre_suspend)
4261 host->rst_ops->pre_suspend(host);
4265 EXPORT_SYMBOL(dw_mci_suspend);
4267 int dw_mci_resume(struct dw_mci *host)
4271 struct dw_mci_slot *slot;
4272 int present = dw_mci_get_cd(host->mmc);
4274 if (host->rst_ops &&
4275 host->rst_ops->post_resume)
4276 host->rst_ops->post_resume(host);
4279 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4280 (get_wifi_chip_type() == WIFI_ESP8089 ||
4281 get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4284 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4285 slot = mmc_priv(host->mmc);
4286 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4290 /*only for sdmmc controller*/
4291 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4292 /* Soc rk3126/3036 already in gpio_cd mode */
4293 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4294 disable_irq_wake(host->mmc->slot.cd_irq);
4295 mmc_gpio_free_cd(host->mmc);
4299 if (!IS_ERR(host->pins_udbg)) {
4300 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4301 MMC_DBG_ERR_FUNC(host->mmc,
4302 "Idle pinctrl setting failed! [%s]",
4303 mmc_hostname(host->mmc));
4304 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
4305 MMC_DBG_ERR_FUNC(host->mmc,
4306 "Udbg pinctrl setting failed! [%s]",
4307 mmc_hostname(host->mmc));
4309 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4310 MMC_DBG_ERR_FUNC(host->mmc,
4311 "Default pinctrl setting failed! [%s]",
4312 mmc_hostname(host->mmc));
4315 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4316 MMC_DBG_ERR_FUNC(host->mmc,
4317 "Default pinctrl setting failed! [%s]",
4318 mmc_hostname(host->mmc));
4323 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4324 else if(cpu_is_rk3036())
4325 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4326 else if(cpu_is_rk312x())
4327 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4328 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4331 ret = regulator_enable(host->vmmc);
4334 "failed to enable regulator: %d\n", ret);
4339 if(!dw_mci_ctrl_all_reset(host)){
4344 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4345 if(host->use_dma && host->dma_ops->init)
4346 host->dma_ops->init(host);
4349 * Restore the initial value at FIFOTH register
4350 * And Invalidate the prev_blksz with zero
4352 mci_writel(host, FIFOTH, host->fifoth_val);
4353 host->prev_blksz = 0;
4354 /* Put in max timeout */
4355 mci_writel(host, TMOUT, 0xFFFFFFFF);
4357 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4358 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR |
4359 SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
4361 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4362 regs |= SDMMC_INT_CD;
4364 mci_writel(host, INTMASK, regs);
4365 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4367 /*only for sdmmc controller*/
4368 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)){
4369 enable_irq(host->irq);
4372 for(i = 0; i < host->num_slots; i++){
4373 struct dw_mci_slot *slot = host->slot[i];
4376 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4377 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4378 dw_mci_setup_bus(slot, true);
4384 EXPORT_SYMBOL(dw_mci_resume);
4385 #endif /* CONFIG_PM_SLEEP */
4387 static int __init dw_mci_init(void)
4389 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4393 static void __exit dw_mci_exit(void)
4397 module_init(dw_mci_init);
4398 module_exit(dw_mci_exit);
4400 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4401 MODULE_AUTHOR("NXP Semiconductor VietNam");
4402 MODULE_AUTHOR("Imagination Technologies Ltd");
4403 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4404 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4405 MODULE_LICENSE("GPL v2");