2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
49 #include <linux/mfd/syscon.h>
50 #include <linux/regmap.h>
51 #include <linux/log2.h>
53 #include "rk_sdmmc_dbg.h"
54 #include <linux/regulator/rockchip_io_vol_domain.h>
55 #include "../../clk/rockchip/clk-ops.h"
57 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
59 /* Common flag combinations */
60 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
61 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
63 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
65 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
66 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
67 #define DW_MCI_SEND_STATUS 1
68 #define DW_MCI_RECV_STATUS 2
69 #define DW_MCI_DMA_THRESHOLD 16
71 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
72 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
74 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
75 #define SDMMC_DATA_TIMEOUT_SD 500
76 #define SDMMC_DATA_TIMEOUT_SDIO 250
77 #define SDMMC_DATA_TIMEOUT_EMMC 2500
79 #define SDMMC_CMD_RTO_MAX_HOLD 200
80 #define SDMMC_WAIT_FOR_UNBUSY 2500
82 #define DW_REGS_SIZE (0x0098 + 4)
83 #define DW_REGS_NUM (0x0098 / 4)
85 #ifdef CONFIG_MMC_DW_IDMAC
86 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
87 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
88 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
92 u32 des0; /* Control Descriptor */
93 #define IDMAC_DES0_DIC BIT(1)
94 #define IDMAC_DES0_LD BIT(2)
95 #define IDMAC_DES0_FD BIT(3)
96 #define IDMAC_DES0_CH BIT(4)
97 #define IDMAC_DES0_ER BIT(5)
98 #define IDMAC_DES0_CES BIT(30)
99 #define IDMAC_DES0_OWN BIT(31)
101 u32 des1; /* Buffer sizes */
102 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
103 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
105 u32 des2; /* buffer 1 physical address */
107 u32 des3; /* buffer 2 physical address */
109 #endif /* CONFIG_MMC_DW_IDMAC */
111 static const u8 tuning_blk_pattern_4bit[] = {
112 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
113 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
114 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
115 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
116 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
117 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
118 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
119 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
122 static const u8 tuning_blk_pattern_8bit[] = {
123 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
124 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
125 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
126 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
127 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
128 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
129 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
130 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
131 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
132 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
133 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
134 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
135 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
136 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
137 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
138 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
141 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
142 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
143 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
144 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
146 /*printk the all register of current host*/
148 static int dw_mci_regs_printk(struct dw_mci *host)
150 struct sdmmc_reg *regs = dw_mci_regs;
152 while( regs->name != 0 ){
153 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
156 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
161 #if defined(CONFIG_DEBUG_FS)
162 static int dw_mci_req_show(struct seq_file *s, void *v)
164 struct dw_mci_slot *slot = s->private;
165 struct mmc_request *mrq;
166 struct mmc_command *cmd;
167 struct mmc_command *stop;
168 struct mmc_data *data;
170 /* Make sure we get a consistent snapshot */
171 spin_lock_bh(&slot->host->lock);
181 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
182 cmd->opcode, cmd->arg, cmd->flags,
183 cmd->resp[0], cmd->resp[1], cmd->resp[2],
184 cmd->resp[2], cmd->error);
186 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
187 data->bytes_xfered, data->blocks,
188 data->blksz, data->flags, data->error);
191 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
192 stop->opcode, stop->arg, stop->flags,
193 stop->resp[0], stop->resp[1], stop->resp[2],
194 stop->resp[2], stop->error);
197 spin_unlock_bh(&slot->host->lock);
202 static int dw_mci_req_open(struct inode *inode, struct file *file)
204 return single_open(file, dw_mci_req_show, inode->i_private);
207 static const struct file_operations dw_mci_req_fops = {
208 .owner = THIS_MODULE,
209 .open = dw_mci_req_open,
212 .release = single_release,
215 static int dw_mci_regs_show(struct seq_file *s, void *v)
217 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
218 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
219 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
220 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
221 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
222 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
227 static int dw_mci_regs_open(struct inode *inode, struct file *file)
229 return single_open(file, dw_mci_regs_show, inode->i_private);
232 static const struct file_operations dw_mci_regs_fops = {
233 .owner = THIS_MODULE,
234 .open = dw_mci_regs_open,
237 .release = single_release,
240 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
242 struct mmc_host *mmc = slot->mmc;
243 struct dw_mci *host = slot->host;
247 root = mmc->debugfs_root;
251 node = debugfs_create_file("regs", S_IRUSR, root, host,
256 node = debugfs_create_file("req", S_IRUSR, root, slot,
261 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
265 node = debugfs_create_x32("pending_events", S_IRUSR, root,
266 (u32 *)&host->pending_events);
270 node = debugfs_create_x32("completed_events", S_IRUSR, root,
271 (u32 *)&host->completed_events);
278 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
280 #endif /* defined(CONFIG_DEBUG_FS) */
282 static void dw_mci_set_timeout(struct dw_mci *host)
284 /* timeout (maximum) */
285 mci_writel(host, TMOUT, 0xffffffff);
288 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
290 struct mmc_data *data;
291 struct dw_mci_slot *slot = mmc_priv(mmc);
292 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
294 cmd->error = -EINPROGRESS;
298 if (cmdr == MMC_STOP_TRANSMISSION)
299 cmdr |= SDMMC_CMD_STOP;
301 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
303 if (cmd->flags & MMC_RSP_PRESENT) {
304 /* We expect a response, so set this bit */
305 cmdr |= SDMMC_CMD_RESP_EXP;
306 if (cmd->flags & MMC_RSP_136)
307 cmdr |= SDMMC_CMD_RESP_LONG;
310 if (cmd->flags & MMC_RSP_CRC)
311 cmdr |= SDMMC_CMD_RESP_CRC;
315 cmdr |= SDMMC_CMD_DAT_EXP;
316 if (data->flags & MMC_DATA_STREAM)
317 cmdr |= SDMMC_CMD_STRM_MODE;
318 if (data->flags & MMC_DATA_WRITE)
319 cmdr |= SDMMC_CMD_DAT_WR;
322 if (drv_data && drv_data->prepare_command)
323 drv_data->prepare_command(slot->host, &cmdr);
329 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
331 struct mmc_command *stop;
337 stop = &host->stop_abort;
339 memset(stop, 0, sizeof(struct mmc_command));
341 if (cmdr == MMC_READ_SINGLE_BLOCK ||
342 cmdr == MMC_READ_MULTIPLE_BLOCK ||
343 cmdr == MMC_WRITE_BLOCK ||
344 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
345 stop->opcode = MMC_STOP_TRANSMISSION;
347 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
348 } else if (cmdr == SD_IO_RW_EXTENDED) {
349 stop->opcode = SD_IO_RW_DIRECT;
350 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
351 ((cmd->arg >> 28) & 0x7);
352 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
357 cmdr = stop->opcode | SDMMC_CMD_STOP |
358 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
363 static void dw_mci_start_command(struct dw_mci *host,
364 struct mmc_command *cmd, u32 cmd_flags)
366 struct dw_mci_slot *slot = host->slot[0];
367 /*temporality fix slot[0] due to host->num_slots equal to 1*/
369 host->pre_cmd = host->cmd;
372 "start command: ARGR=0x%08x CMDR=0x%08x\n",
373 cmd->arg, cmd_flags);
375 if(SD_SWITCH_VOLTAGE == cmd->opcode){
376 /*confirm non-low-power mode*/
377 mci_writel(host, CMDARG, 0);
378 dw_mci_disable_low_power(slot);
380 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
381 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
383 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
386 mci_writel(host, CMDARG, cmd->arg);
389 /* fix the value to 1 in some Soc,for example RK3188. */
390 if(host->mmc->hold_reg_flag)
391 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
393 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
397 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
399 dw_mci_start_command(host, data->stop, host->stop_cmdr);
402 /* DMA interface functions */
403 static void dw_mci_stop_dma(struct dw_mci *host)
405 if (host->using_dma) {
406 /* Fixme: No need to terminate edma, may cause flush op */
407 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
408 host->dma_ops->stop(host);
409 host->dma_ops->cleanup(host);
412 /* Data transfer was stopped by the interrupt handler */
413 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
416 static int dw_mci_get_dma_dir(struct mmc_data *data)
418 if (data->flags & MMC_DATA_WRITE)
419 return DMA_TO_DEVICE;
421 return DMA_FROM_DEVICE;
424 #ifdef CONFIG_MMC_DW_IDMAC
425 static void dw_mci_dma_cleanup(struct dw_mci *host)
427 struct mmc_data *data = host->data;
430 if (!data->host_cookie)
431 dma_unmap_sg(host->dev,
434 dw_mci_get_dma_dir(data));
437 static void dw_mci_idmac_reset(struct dw_mci *host)
439 u32 bmod = mci_readl(host, BMOD);
440 /* Software reset of DMA */
441 bmod |= SDMMC_IDMAC_SWRESET;
442 mci_writel(host, BMOD, bmod);
445 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
449 /* Disable and reset the IDMAC interface */
450 temp = mci_readl(host, CTRL);
451 temp &= ~SDMMC_CTRL_USE_IDMAC;
452 temp |= SDMMC_CTRL_DMA_RESET;
453 mci_writel(host, CTRL, temp);
455 /* Stop the IDMAC running */
456 temp = mci_readl(host, BMOD);
457 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
458 temp |= SDMMC_IDMAC_SWRESET;
459 mci_writel(host, BMOD, temp);
462 static void dw_mci_idmac_complete_dma(void *arg)
464 struct dw_mci *host = arg;
465 struct mmc_data *data = host->data;
467 dev_vdbg(host->dev, "DMA complete\n");
470 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
471 host->mrq->cmd->opcode,host->mrq->cmd->arg,
472 data->blocks,data->blksz,mmc_hostname(host->mmc));
475 host->dma_ops->cleanup(host);
478 * If the card was removed, data will be NULL. No point in trying to
479 * send the stop command or waiting for NBUSY in this case.
482 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
483 tasklet_schedule(&host->tasklet);
487 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
491 struct idmac_desc *desc = host->sg_cpu;
493 for (i = 0; i < sg_len; i++, desc++) {
494 unsigned int length = sg_dma_len(&data->sg[i]);
495 u32 mem_addr = sg_dma_address(&data->sg[i]);
497 /* Set the OWN bit and disable interrupts for this descriptor */
498 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
501 IDMAC_SET_BUFFER1_SIZE(desc, length);
503 /* Physical address to DMA to/from */
504 desc->des2 = mem_addr;
507 /* Set first descriptor */
509 desc->des0 |= IDMAC_DES0_FD;
511 /* Set last descriptor */
512 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
513 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
514 desc->des0 |= IDMAC_DES0_LD;
519 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
523 dw_mci_translate_sglist(host, host->data, sg_len);
525 /* Select IDMAC interface */
526 temp = mci_readl(host, CTRL);
527 temp |= SDMMC_CTRL_USE_IDMAC;
528 mci_writel(host, CTRL, temp);
532 /* Enable the IDMAC */
533 temp = mci_readl(host, BMOD);
534 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
535 mci_writel(host, BMOD, temp);
537 /* Start it running */
538 mci_writel(host, PLDMND, 1);
541 static int dw_mci_idmac_init(struct dw_mci *host)
543 struct idmac_desc *p;
546 /* Number of descriptors in the ring buffer */
547 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
549 /* Forward link the descriptor list */
550 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
551 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
553 /* Set the last descriptor as the end-of-ring descriptor */
554 p->des3 = host->sg_dma;
555 p->des0 = IDMAC_DES0_ER;
557 dw_mci_idmac_reset(host);
559 /* Mask out interrupts - get Tx & Rx complete only */
560 mci_writel(host, IDSTS, IDMAC_INT_CLR);
561 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
564 /* Set the descriptor base address */
565 mci_writel(host, DBADDR, host->sg_dma);
569 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
570 .init = dw_mci_idmac_init,
571 .start = dw_mci_idmac_start_dma,
572 .stop = dw_mci_idmac_stop_dma,
573 .complete = dw_mci_idmac_complete_dma,
574 .cleanup = dw_mci_dma_cleanup,
578 static void dw_mci_edma_cleanup(struct dw_mci *host)
580 struct mmc_data *data = host->data;
583 if (!data->host_cookie)
584 dma_unmap_sg(host->dev,
585 data->sg, data->sg_len,
586 dw_mci_get_dma_dir(data));
589 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
591 dmaengine_terminate_all(host->dms->ch);
594 static void dw_mci_edmac_complete_dma(void *arg)
596 struct dw_mci *host = arg;
597 struct mmc_data *data = host->data;
599 dev_vdbg(host->dev, "DMA complete\n");
602 if(data->flags & MMC_DATA_READ)
603 /* Invalidate cache after read */
604 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
605 data->sg_len, DMA_FROM_DEVICE);
607 host->dma_ops->cleanup(host);
610 * If the card was removed, data will be NULL. No point in trying to
611 * send the stop command or waiting for NBUSY in this case.
614 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
615 tasklet_schedule(&host->tasklet);
619 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
621 struct dma_slave_config slave_config;
622 struct dma_async_tx_descriptor *desc = NULL;
623 struct scatterlist *sgl = host->data->sg;
624 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
625 u32 sg_elems = host->data->sg_len;
626 u32 fifoth_val, mburst;
628 u32 idx, rx_wmark, tx_wmark;
631 /* Set external dma config: burst size, burst width*/
632 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
633 slave_config.src_addr = slave_config.dst_addr;
634 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
635 slave_config.src_addr_width = slave_config.dst_addr_width;
637 /* Match FIFO dma burst MSIZE with external dma config*/
638 fifoth_val = mci_readl(host, FIFOTH);
639 mburst = mszs[(fifoth_val >> 28) & 0x7];
641 /* edmac limit burst to 16, but work around for rk3036 to 8 */
642 if (unlikely(cpu_is_rk3036()))
647 if (mburst > burst_limit) {
648 mburst = burst_limit;
649 idx = (ilog2(mburst) > 0) ? (ilog2(mburst) - 1) : 0;
651 rx_wmark = mszs[idx] - 1;
652 tx_wmark = (host->fifo_depth) / 2;
653 fifoth_val = SDMMC_SET_FIFOTH(idx, rx_wmark, tx_wmark);
655 mci_writel(host, FIFOTH, fifoth_val);
658 slave_config.dst_maxburst = mburst;
659 slave_config.src_maxburst = slave_config.dst_maxburst;
661 if(host->data->flags & MMC_DATA_WRITE){
662 slave_config.direction = DMA_MEM_TO_DEV;
663 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
665 dev_err(host->dev, "error in dw_mci edma configuration.\n");
669 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
670 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
672 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
675 /* Set dw_mci_edmac_complete_dma as callback */
676 desc->callback = dw_mci_edmac_complete_dma;
677 desc->callback_param = (void *)host;
678 dmaengine_submit(desc);
680 /* Flush cache before write */
681 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
682 sg_elems, DMA_TO_DEVICE);
683 dma_async_issue_pending(host->dms->ch);
686 slave_config.direction = DMA_DEV_TO_MEM;
687 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
689 dev_err(host->dev, "error in dw_mci edma configuration.\n");
692 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
693 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
695 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
698 /* set dw_mci_edmac_complete_dma as callback */
699 desc->callback = dw_mci_edmac_complete_dma;
700 desc->callback_param = (void *)host;
701 dmaengine_submit(desc);
702 dma_async_issue_pending(host->dms->ch);
706 static int dw_mci_edmac_init(struct dw_mci *host)
708 /* Request external dma channel, SHOULD decide chn in dts */
710 host->dms = (struct dw_mci_dma_slave *)kmalloc
711 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
712 if (NULL == host->dms) {
713 dev_err(host->dev, "No enough memory to alloc dms.\n");
717 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
718 if (!host->dms->ch) {
719 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
720 host->dms->ch->chan_id);
727 if (NULL != host->dms) {
735 static void dw_mci_edmac_exit(struct dw_mci *host)
737 if (NULL != host->dms) {
738 if (NULL != host->dms->ch) {
739 dma_release_channel(host->dms->ch);
740 host->dms->ch = NULL;
747 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
748 .init = dw_mci_edmac_init,
749 .exit = dw_mci_edmac_exit,
750 .start = dw_mci_edmac_start_dma,
751 .stop = dw_mci_edmac_stop_dma,
752 .complete = dw_mci_edmac_complete_dma,
753 .cleanup = dw_mci_edma_cleanup,
755 #endif /* CONFIG_MMC_DW_IDMAC */
757 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
758 struct mmc_data *data,
761 struct scatterlist *sg;
762 unsigned int i, sg_len;
764 if (!next && data->host_cookie)
765 return data->host_cookie;
768 * We don't do DMA on "complex" transfers, i.e. with
769 * non-word-aligned buffers or lengths. Also, we don't bother
770 * with all the DMA setup overhead for short transfers.
772 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
778 for_each_sg(data->sg, sg, data->sg_len, i) {
779 if (sg->offset & 3 || sg->length & 3)
783 sg_len = dma_map_sg(host->dev,
786 dw_mci_get_dma_dir(data));
791 data->host_cookie = sg_len;
796 static void dw_mci_pre_req(struct mmc_host *mmc,
797 struct mmc_request *mrq,
800 struct dw_mci_slot *slot = mmc_priv(mmc);
801 struct mmc_data *data = mrq->data;
803 if (!slot->host->use_dma || !data)
806 if (data->host_cookie) {
807 data->host_cookie = 0;
811 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
812 data->host_cookie = 0;
815 static void dw_mci_post_req(struct mmc_host *mmc,
816 struct mmc_request *mrq,
819 struct dw_mci_slot *slot = mmc_priv(mmc);
820 struct mmc_data *data = mrq->data;
822 if (!slot->host->use_dma || !data)
825 if (data->host_cookie)
826 dma_unmap_sg(slot->host->dev,
829 dw_mci_get_dma_dir(data));
830 data->host_cookie = 0;
833 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
835 #ifdef CONFIG_MMC_DW_IDMAC
836 unsigned int blksz = data->blksz;
837 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
838 u32 fifo_width = 1 << host->data_shift;
839 u32 blksz_depth = blksz / fifo_width, fifoth_val;
840 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
841 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
843 tx_wmark = (host->fifo_depth) / 2;
844 tx_wmark_invers = host->fifo_depth - tx_wmark;
848 * if blksz is not a multiple of the FIFO width
850 if (blksz % fifo_width) {
857 if (!((blksz_depth % mszs[idx]) ||
858 (tx_wmark_invers % mszs[idx]))) {
860 rx_wmark = mszs[idx] - 1;
865 * If idx is '0', it won't be tried
866 * Thus, initial values are uesed
869 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
870 mci_writel(host, FIFOTH, fifoth_val);
875 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
877 unsigned int blksz = data->blksz;
878 u32 blksz_depth, fifo_depth;
881 WARN_ON(!(data->flags & MMC_DATA_READ));
883 if (host->timing != MMC_TIMING_MMC_HS200 &&
884 host->timing != MMC_TIMING_UHS_SDR104)
887 blksz_depth = blksz / (1 << host->data_shift);
888 fifo_depth = host->fifo_depth;
890 if (blksz_depth > fifo_depth)
894 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
895 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
896 * Currently just choose blksz.
899 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
903 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
906 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
914 /* If we don't have a channel, we can't do DMA */
918 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
920 /* Fixme: No need terminate edma, may cause flush op */
921 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
922 host->dma_ops->stop(host);
929 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
930 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
934 * Decide the MSIZE and RX/TX Watermark.
935 * If current block size is same with previous size,
936 * no need to update fifoth.
938 if (host->prev_blksz != data->blksz)
939 dw_mci_adjust_fifoth(host, data);
942 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
944 /* Enable the DMA interface */
945 temp = mci_readl(host, CTRL);
946 temp |= SDMMC_CTRL_DMA_ENABLE;
947 mci_writel(host, CTRL, temp);
949 /* Disable RX/TX IRQs, let DMA handle it */
950 spin_lock_irqsave(&host->slock, flags);
951 temp = mci_readl(host, INTMASK);
952 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
953 mci_writel(host, INTMASK, temp);
954 spin_unlock_irqrestore(&host->slock, flags);
956 host->dma_ops->start(host, sg_len);
961 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
966 data->error = -EINPROGRESS;
968 //WARN_ON(host->data);
973 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
975 if (data->flags & MMC_DATA_READ) {
976 host->dir_status = DW_MCI_RECV_STATUS;
977 dw_mci_ctrl_rd_thld(host, data);
979 host->dir_status = DW_MCI_SEND_STATUS;
982 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
983 data->blocks, data->blksz, mmc_hostname(host->mmc));
985 if (dw_mci_submit_data_dma(host, data)) {
986 int flags = SG_MITER_ATOMIC;
987 if (host->data->flags & MMC_DATA_READ)
988 flags |= SG_MITER_TO_SG;
990 flags |= SG_MITER_FROM_SG;
992 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
994 host->part_buf_start = 0;
995 host->part_buf_count = 0;
997 spin_lock_irqsave(&host->slock, flag);
998 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
999 temp = mci_readl(host, INTMASK);
1000 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1001 mci_writel(host, INTMASK, temp);
1002 spin_unlock_irqrestore(&host->slock, flag);
1004 temp = mci_readl(host, CTRL);
1005 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1006 mci_writel(host, CTRL, temp);
1009 * Use the initial fifoth_val for PIO mode.
1010 * If next issued data may be transfered by DMA mode,
1011 * prev_blksz should be invalidated.
1013 mci_writel(host, FIFOTH, host->fifoth_val);
1014 host->prev_blksz = 0;
1017 * Keep the current block size.
1018 * It will be used to decide whether to update
1019 * fifoth register next time.
1021 host->prev_blksz = data->blksz;
1025 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1027 struct dw_mci *host = slot->host;
1028 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1029 unsigned int cmd_status = 0;
1030 #ifdef SDMMC_WAIT_FOR_UNBUSY
1032 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1034 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1036 ret = time_before(jiffies, timeout);
1037 cmd_status = mci_readl(host, STATUS);
1038 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1042 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1043 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1046 mci_writel(host, CMDARG, arg);
1048 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1049 if(cmd & SDMMC_CMD_UPD_CLK)
1050 timeout = jiffies + msecs_to_jiffies(50);
1052 timeout = jiffies + msecs_to_jiffies(500);
1053 while (time_before(jiffies, timeout)) {
1054 cmd_status = mci_readl(host, CMD);
1055 if (!(cmd_status & SDMMC_CMD_START))
1058 dev_err(&slot->mmc->class_dev,
1059 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1060 cmd, arg, cmd_status);
1063 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1065 struct dw_mci *host = slot->host;
1066 unsigned int tempck,clock = slot->clock;
1071 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1072 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1075 mci_writel(host, CLKENA, 0);
1076 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1077 if(host->svi_flags == 0)
1078 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1080 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1082 } else if (clock != host->current_speed || force_clkinit) {
1083 div = host->bus_hz / clock;
1084 if (host->bus_hz % clock && host->bus_hz > clock)
1086 * move the + 1 after the divide to prevent
1087 * over-clocking the card.
1091 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1093 if ((clock << div) != slot->__clk_old || force_clkinit) {
1094 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1095 dev_info(&slot->mmc->class_dev,
1096 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1097 slot->id, host->bus_hz, clock,
1100 host->set_speed = tempck;
1101 host->set_div = div;
1105 mci_writel(host, CLKENA, 0);
1106 mci_writel(host, CLKSRC, 0);
1110 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1112 if(clock <= 400*1000){
1113 MMC_DBG_BOOT_FUNC(host->mmc,
1114 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1115 clock * 2, mmc_hostname(host->mmc));
1116 /* clk_mmc will change parents to 24MHz xtal*/
1117 clk_set_rate(host->clk_mmc, clock * 2);
1120 host->set_div = div;
1124 MMC_DBG_BOOT_FUNC(host->mmc,
1125 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1126 mmc_hostname(host->mmc));
1129 MMC_DBG_ERR_FUNC(host->mmc,
1130 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1131 mmc_hostname(host->mmc));
1133 host->set_div = div;
1134 host->bus_hz = host->set_speed * 2;
1135 MMC_DBG_BOOT_FUNC(host->mmc,
1136 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1137 div, host->bus_hz, mmc_hostname(host->mmc));
1139 /* BUG may be here, come on, Linux BSP engineer looks!
1140 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1141 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1142 some oops happened like that:
1143 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1144 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1145 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1146 mmc0: new high speed DDR MMC card at address 0001
1147 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1149 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1150 mmcblk0: retrying using single block read
1151 mmcblk0: error -110 sending status command, retrying
1153 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1156 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1157 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1160 host->set_div = div;
1161 host->bus_hz = host->set_speed * 2;
1162 MMC_DBG_BOOT_FUNC(host->mmc,
1163 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1164 div, host->bus_hz, mmc_hostname(host->mmc));
1167 if (host->verid < DW_MMC_240A)
1168 clk_set_rate(host->clk_mmc,(host->bus_hz));
1170 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1176 /* set clock to desired speed */
1177 mci_writel(host, CLKDIV, div);
1181 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1183 /* enable clock; only low power if no SDIO */
1184 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1186 if (host->verid < DW_MMC_240A)
1187 sdio_int = SDMMC_INT_SDIO(slot->id);
1189 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1191 if (!(mci_readl(host, INTMASK) & sdio_int))
1192 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1193 mci_writel(host, CLKENA, clk_en_a);
1197 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1198 /* keep the clock with reflecting clock dividor */
1199 slot->__clk_old = clock << div;
1202 host->current_speed = clock;
1204 if(slot->ctype != slot->pre_ctype)
1205 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1207 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1208 mmc_hostname(host->mmc));
1209 slot->pre_ctype = slot->ctype;
1211 /* Set the current slot bus width */
1212 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1215 extern struct mmc_card *this_card;
1216 static void dw_mci_wait_unbusy(struct dw_mci *host)
1219 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1220 unsigned long time_loop;
1221 unsigned int status;
1224 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1226 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1227 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1228 /* Special care for (secure)erase timeout calculation */
1230 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1233 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1234 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1235 300000 * (this_card->ext_csd.sec_erase_mult)) :
1236 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1240 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1241 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1242 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1243 timeout = SDMMC_DATA_TIMEOUT_SD;
1246 time_loop = jiffies + msecs_to_jiffies(timeout);
1248 status = mci_readl(host, STATUS);
1249 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1251 } while (time_before(jiffies, time_loop));
1256 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1259 * 0--status is busy.
1260 * 1--status is unbusy.
1262 int dw_mci_card_busy(struct mmc_host *mmc)
1264 struct dw_mci_slot *slot = mmc_priv(mmc);
1265 struct dw_mci *host = slot->host;
1267 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1268 host->svi_flags, mmc_hostname(host->mmc));
1271 if(host->svi_flags == 0){
1273 host->svi_flags = 1;
1274 return host->svi_flags;
1277 host->svi_flags = 0;
1278 return host->svi_flags;
1284 static void __dw_mci_start_request(struct dw_mci *host,
1285 struct dw_mci_slot *slot,
1286 struct mmc_command *cmd)
1288 struct mmc_request *mrq;
1289 struct mmc_data *data;
1293 if (host->pdata->select_slot)
1294 host->pdata->select_slot(slot->id);
1296 host->cur_slot = slot;
1299 dw_mci_wait_unbusy(host);
1301 host->pending_events = 0;
1302 host->completed_events = 0;
1303 host->data_status = 0;
1307 dw_mci_set_timeout(host);
1308 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1309 mci_writel(host, BLKSIZ, data->blksz);
1312 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1314 /* this is the first command, send the initialization clock */
1315 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1316 cmdflags |= SDMMC_CMD_INIT;
1319 dw_mci_submit_data(host, data);
1323 dw_mci_start_command(host, cmd, cmdflags);
1326 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1329 static void dw_mci_start_request(struct dw_mci *host,
1330 struct dw_mci_slot *slot)
1332 struct mmc_request *mrq = slot->mrq;
1333 struct mmc_command *cmd;
1335 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1336 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1338 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1339 __dw_mci_start_request(host, slot, cmd);
1342 /* must be called with host->lock held */
1343 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1344 struct mmc_request *mrq)
1346 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1351 if (host->state == STATE_IDLE) {
1352 host->state = STATE_SENDING_CMD;
1353 dw_mci_start_request(host, slot);
1355 list_add_tail(&slot->queue_node, &host->queue);
1359 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1361 struct dw_mci_slot *slot = mmc_priv(mmc);
1362 struct dw_mci *host = slot->host;
1367 * The check for card presence and queueing of the request must be
1368 * atomic, otherwise the card could be removed in between and the
1369 * request wouldn't fail until another card was inserted.
1371 spin_lock_bh(&host->lock);
1373 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1374 spin_unlock_bh(&host->lock);
1375 mrq->cmd->error = -ENOMEDIUM;
1376 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1377 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1379 mmc_request_done(mmc, mrq);
1383 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1384 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1386 dw_mci_queue_request(host, slot, mrq);
1388 spin_unlock_bh(&host->lock);
1391 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1393 struct dw_mci_slot *slot = mmc_priv(mmc);
1394 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1395 struct dw_mci *host = slot->host;
1397 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1400 #ifdef SDMMC_WAIT_FOR_UNBUSY
1401 unsigned long time_loop;
1404 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1405 if(host->svi_flags == 1)
1406 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1408 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1410 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1413 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1414 printk("%d..%s: no card. [%s]\n", \
1415 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1420 ret = time_before(jiffies, time_loop);
1421 regs = mci_readl(slot->host, STATUS);
1422 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1428 printk("slot->flags = %lu ", slot->flags);
1429 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1430 if(host->svi_flags != 1)
1433 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1434 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1438 switch (ios->bus_width) {
1439 case MMC_BUS_WIDTH_4:
1440 slot->ctype = SDMMC_CTYPE_4BIT;
1442 case MMC_BUS_WIDTH_8:
1443 slot->ctype = SDMMC_CTYPE_8BIT;
1446 /* set default 1 bit mode */
1447 slot->ctype = SDMMC_CTYPE_1BIT;
1448 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1451 regs = mci_readl(slot->host, UHS_REG);
1454 if (ios->timing == MMC_TIMING_UHS_DDR50)
1455 regs |= ((0x1 << slot->id) << 16);
1457 regs &= ~((0x1 << slot->id) << 16);
1459 mci_writel(slot->host, UHS_REG, regs);
1460 slot->host->timing = ios->timing;
1463 * Use mirror of ios->clock to prevent race with mmc
1464 * core ios update when finding the minimum.
1466 slot->clock = ios->clock;
1468 if (drv_data && drv_data->set_ios)
1469 drv_data->set_ios(slot->host, ios);
1471 /* Slot specific timing and width adjustment */
1472 dw_mci_setup_bus(slot, false);
1476 switch (ios->power_mode) {
1478 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1480 if (slot->host->pdata->setpower)
1481 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1482 regs = mci_readl(slot->host, PWREN);
1483 regs |= (1 << slot->id);
1484 mci_writel(slot->host, PWREN, regs);
1487 /* Power down slot */
1488 if(slot->host->pdata->setpower)
1489 slot->host->pdata->setpower(slot->id, 0);
1490 regs = mci_readl(slot->host, PWREN);
1491 regs &= ~(1 << slot->id);
1492 mci_writel(slot->host, PWREN, regs);
1499 static int dw_mci_get_ro(struct mmc_host *mmc)
1502 struct dw_mci_slot *slot = mmc_priv(mmc);
1503 struct dw_mci_board *brd = slot->host->pdata;
1505 /* Use platform get_ro function, else try on board write protect */
1506 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1508 else if(brd->get_ro)
1509 read_only = brd->get_ro(slot->id);
1510 else if(gpio_is_valid(slot->wp_gpio))
1511 read_only = gpio_get_value(slot->wp_gpio);
1514 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1516 dev_dbg(&mmc->class_dev, "card is %s\n",
1517 read_only ? "read-only" : "read-write");
1522 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1524 struct dw_mci_slot *slot = mmc_priv(mmc);
1525 struct dw_mci *host = slot->host;
1526 /*struct dw_mci_board *brd = slot->host->pdata;*/
1528 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1531 spin_lock_bh(&host->lock);
1534 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1536 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1538 spin_unlock_bh(&host->lock);
1540 if (test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1541 if (__clk_is_enabled(host->hclk_mmc) == false)
1542 clk_prepare_enable(host->hclk_mmc);
1543 if (__clk_is_enabled(host->clk_mmc) == false)
1544 clk_prepare_enable(host->clk_mmc);
1546 if (__clk_is_enabled(host->clk_mmc) == true)
1547 clk_disable_unprepare(slot->host->clk_mmc);
1548 if (__clk_is_enabled(host->hclk_mmc) == true)
1549 clk_disable_unprepare(slot->host->hclk_mmc);
1552 mmc_detect_change(slot->mmc, 20);
1558 static int dw_mci_get_cd(struct mmc_host *mmc)
1561 struct dw_mci_slot *slot = mmc_priv(mmc);
1562 struct dw_mci_board *brd = slot->host->pdata;
1563 struct dw_mci *host = slot->host;
1564 int gpio_cd = mmc_gpio_get_cd(mmc);
1565 int force_jtag_bit, force_jtag_reg;
1569 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
1570 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1571 gpio_cd = slot->cd_gpio;
1572 irq = gpio_to_irq(gpio_cd);
1573 if (gpio_is_valid(gpio_cd)) {
1574 gpio_val = gpio_get_value(gpio_cd);
1575 if (soc_is_rk3036()) {
1576 force_jtag_bit = 11;
1577 force_jtag_reg = RK312X_GRF_SOC_CON0;
1578 } else if (soc_is_rk3126() || soc_is_rk3126b()) {
1579 force_jtag_reg = RK312X_GRF_SOC_CON0;
1583 if (gpio_val == gpio_get_value(gpio_cd)) {
1584 gpio_cd = (gpio_val == 0 ? 1 : 0);
1586 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1587 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1588 grf_writel((0x1 << (force_jtag_bit + 16)) | (1 << force_jtag_bit),
1591 dw_mci_ctrl_all_reset(host);
1593 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
1594 /* Really card detected: SHOULD disable force_jtag */
1595 grf_writel((0x1 << (force_jtag_bit + 16)) | (0 << force_jtag_bit),
1600 gpio_val = gpio_get_value(gpio_cd);
1602 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
1603 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1604 return slot->last_detect_state;
1607 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1611 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1612 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1614 /* Use platform get_cd function, else try onboard card detect */
1615 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1617 else if (brd->get_cd)
1618 present = !brd->get_cd(slot->id);
1619 else if (!IS_ERR_VALUE(gpio_cd))
1622 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1625 spin_lock_bh(&host->lock);
1627 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1628 dev_dbg(&mmc->class_dev, "card is present\n");
1630 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1631 dev_dbg(&mmc->class_dev, "card is not present\n");
1633 spin_unlock_bh(&host->lock);
1640 * Dts Should caps emmc controller with poll-hw-reset
1642 static void dw_mci_hw_reset(struct mmc_host *mmc)
1644 struct dw_mci_slot *slot = mmc_priv(mmc);
1645 struct dw_mci *host = slot->host;
1650 unsigned long timeout;
1653 /* (1) CMD12 to end any transfer in process */
1654 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1655 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1657 if(host->mmc->hold_reg_flag)
1658 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1659 mci_writel(host, CMDARG, 0);
1661 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1663 timeout = jiffies + msecs_to_jiffies(500);
1665 ret = time_before(jiffies, timeout);
1666 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1671 MMC_DBG_ERR_FUNC(host->mmc,
1672 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1673 __func__, mmc_hostname(host->mmc));
1675 /* (2) wait DTO, even if no response is sent back by card */
1677 timeout = jiffies + msecs_to_jiffies(5);
1679 ret = time_before(jiffies, timeout);
1680 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1681 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1687 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1689 /* Software reset - BMOD[0] for IDMA only */
1690 regs = mci_readl(host, BMOD);
1691 regs |= SDMMC_IDMAC_SWRESET;
1692 mci_writel(host, BMOD, regs);
1693 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1694 regs = mci_readl(host, BMOD);
1695 if(regs & SDMMC_IDMAC_SWRESET)
1696 MMC_DBG_WARN_FUNC(host->mmc,
1697 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1698 __func__, mmc_hostname(host->mmc));
1700 /* DMA reset - CTRL[2] */
1701 regs = mci_readl(host, CTRL);
1702 regs |= SDMMC_CTRL_DMA_RESET;
1703 mci_writel(host, CTRL, regs);
1704 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1705 regs = mci_readl(host, CTRL);
1706 if(regs & SDMMC_CTRL_DMA_RESET)
1707 MMC_DBG_WARN_FUNC(host->mmc,
1708 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1709 __func__, mmc_hostname(host->mmc));
1711 /* FIFO reset - CTRL[1] */
1712 regs = mci_readl(host, CTRL);
1713 regs |= SDMMC_CTRL_FIFO_RESET;
1714 mci_writel(host, CTRL, regs);
1715 mdelay(1); /* no timing limited, 1ms is random value */
1716 regs = mci_readl(host, CTRL);
1717 if(regs & SDMMC_CTRL_FIFO_RESET)
1718 MMC_DBG_WARN_FUNC(host->mmc,
1719 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1720 __func__, mmc_hostname(host->mmc));
1723 According to eMMC spec
1724 tRstW >= 1us ; RST_n pulse width
1725 tRSCA >= 200us ; RST_n to Command time
1726 tRSTH >= 1us ; RST_n high period
1728 mci_writel(slot->host, PWREN, 0x0);
1729 mci_writel(slot->host, RST_N, 0x0);
1731 udelay(10); /* 10us for bad quality eMMc. */
1733 mci_writel(slot->host, PWREN, 0x1);
1734 mci_writel(slot->host, RST_N, 0x1);
1736 usleep_range(500, 1000); /* at least 500(> 200us) */
1740 * Disable lower power mode.
1742 * Low power mode will stop the card clock when idle. According to the
1743 * description of the CLKENA register we should disable low power mode
1744 * for SDIO cards if we need SDIO interrupts to work.
1746 * This function is fast if low power mode is already disabled.
1748 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1750 struct dw_mci *host = slot->host;
1752 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1754 clk_en_a = mci_readl(host, CLKENA);
1756 if (clk_en_a & clken_low_pwr) {
1757 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1758 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1759 SDMMC_CMD_PRV_DAT_WAIT, 0);
1763 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1765 struct dw_mci_slot *slot = mmc_priv(mmc);
1766 struct dw_mci *host = slot->host;
1767 unsigned long flags;
1771 spin_lock_irqsave(&host->slock, flags);
1773 /* Enable/disable Slot Specific SDIO interrupt */
1774 int_mask = mci_readl(host, INTMASK);
1776 if (host->verid < DW_MMC_240A)
1777 sdio_int = SDMMC_INT_SDIO(slot->id);
1779 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1783 * Turn off low power mode if it was enabled. This is a bit of
1784 * a heavy operation and we disable / enable IRQs a lot, so
1785 * we'll leave low power mode disabled and it will get
1786 * re-enabled again in dw_mci_setup_bus().
1788 dw_mci_disable_low_power(slot);
1790 mci_writel(host, INTMASK,
1791 (int_mask | sdio_int));
1793 mci_writel(host, INTMASK,
1794 (int_mask & ~sdio_int));
1797 spin_unlock_irqrestore(&host->slock, flags);
1800 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1802 IO_DOMAIN_12 = 1200,
1803 IO_DOMAIN_18 = 1800,
1804 IO_DOMAIN_33 = 3300,
1806 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1816 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1817 __FUNCTION__, mmc_hostname(host->mmc));
1820 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1821 __FUNCTION__, mmc_hostname(host->mmc));
1825 if (cpu_is_rk3288()) {
1826 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1827 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1830 } else if (host->cid == DW_MCI_TYPE_RK3368) {
1831 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1832 regmap_write(host->grf, 0x900, (voltage << 6) | (1 << 22));
1836 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1837 __FUNCTION__, mmc_hostname(host->mmc));
1841 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1842 struct mmc_ios *ios)
1845 unsigned int value,uhs_reg;
1848 * Signal Voltage Switching is only applicable for Host Controllers
1851 if (host->verid < DW_MMC_240A)
1854 uhs_reg = mci_readl(host, UHS_REG);
1855 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1856 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1858 switch (ios->signal_voltage) {
1859 case MMC_SIGNAL_VOLTAGE_330:
1860 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1862 if (cpu_is_rk3288())
1863 ret = io_domain_regulator_set_voltage(
1864 host->vmmc, 3300000, 3300000);
1866 ret = regulator_set_voltage(host->vmmc, 3300000, 3300000);
1868 /* regulator_put(host->vmmc); //to be done in remove function. */
1870 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1871 __func__, regulator_get_voltage(host->vmmc), ret);
1873 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1874 " failed\n", mmc_hostname(host->mmc));
1877 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1879 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1880 __FUNCTION__, mmc_hostname(host->mmc));
1882 /* set High-power mode */
1883 value = mci_readl(host, CLKENA);
1884 value &= ~SDMMC_CLKEN_LOW_PWR;
1885 mci_writel(host,CLKENA , value);
1887 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1888 mci_writel(host,UHS_REG , uhs_reg);
1891 usleep_range(5000, 5500);
1893 /* 3.3V regulator output should be stable within 5 ms */
1894 uhs_reg = mci_readl(host, UHS_REG);
1895 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1898 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1899 mmc_hostname(host->mmc));
1902 case MMC_SIGNAL_VOLTAGE_180:
1904 if (cpu_is_rk3288())
1905 ret = io_domain_regulator_set_voltage(
1909 ret = regulator_set_voltage(
1912 /* regulator_put(host->vmmc);//to be done in remove function. */
1914 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1915 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1917 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1918 " failed\n", mmc_hostname(host->mmc));
1921 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1925 * Enable 1.8V Signal Enable in the Host Control2
1928 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1931 usleep_range(5000, 5500);
1932 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1933 __FUNCTION__,mmc_hostname(host->mmc));
1935 /* 1.8V regulator output should be stable within 5 ms */
1936 uhs_reg = mci_readl(host, UHS_REG);
1937 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1940 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1941 mmc_hostname(host->mmc));
1944 case MMC_SIGNAL_VOLTAGE_120:
1946 if (cpu_is_rk3288())
1947 ret = io_domain_regulator_set_voltage(
1951 ret = regulator_set_voltage(host->vmmc,
1954 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1955 " failed\n", mmc_hostname(host->mmc));
1961 /* No signal voltage switch required */
1967 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1968 struct mmc_ios *ios)
1970 struct dw_mci_slot *slot = mmc_priv(mmc);
1971 struct dw_mci *host = slot->host;
1974 if (host->verid < DW_MMC_240A)
1977 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1983 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1985 struct dw_mci_slot *slot = mmc_priv(mmc);
1986 struct dw_mci *host = slot->host;
1987 const struct dw_mci_drv_data *drv_data = host->drv_data;
1988 struct dw_mci_tuning_data tuning_data;
1991 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1992 if(cpu_is_rk3036() || cpu_is_rk312x())
1995 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1996 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1997 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1998 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1999 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
2000 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
2001 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
2005 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
2006 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
2007 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
2010 "Undefined command(%d) for tuning\n", opcode);
2015 /* Recommend sample phase and delayline
2016 Fixme: Mix-use these three controllers will cause
2019 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
2020 tuning_data.con_id = 3;
2021 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2022 tuning_data.con_id = 1;
2024 tuning_data.con_id = 0;
2026 /* 0: driver, from host->devices
2027 1: sample, from devices->host
2029 tuning_data.tuning_type = 1;
2031 if (drv_data && drv_data->execute_tuning)
2032 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
2037 static void dw_mci_post_tmo(struct mmc_host *mmc)
2039 struct dw_mci_slot *slot = mmc_priv(mmc);
2040 struct dw_mci *host = slot->host;
2041 struct mmc_data *data;
2042 u32 ret, i, regs, cmd_flags;
2044 unsigned long timeout = 0;
2045 bool ret_timeout = true;
2047 host->cur_slot->mrq = NULL;
2049 host->state = STATE_IDLE;
2053 printk("[%s] -- Timeout recovery procedure start --\n",
2054 mmc_hostname(host->mmc));
2056 if (data && (data->stop)) {
2057 send_stop_cmd(host, data);
2059 mci_writel(host, CMDARG, 0);
2061 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC |
2062 SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2064 if (host->mmc->hold_reg_flag)
2065 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2067 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2069 timeout = jiffies + msecs_to_jiffies(500);
2071 while(ret_timeout) {
2072 ret_timeout = time_before(jiffies, timeout);
2073 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2077 if (false == ret_timeout)
2078 MMC_DBG_ERR_FUNC(host->mmc, "stop recovery failed![%s]",
2079 mmc_hostname(host->mmc));
2082 if (!dw_mci_ctrl_all_reset(host)) {
2087 #ifdef CONFIG_MMC_DW_IDMAC
2088 if (host->use_dma && host->dma_ops->init)
2089 host->dma_ops->init(host);
2093 * Restore the initial value at FIFOTH register
2094 * And Invalidate the prev_blksz with zero
2096 mci_writel(host, FIFOTH, host->fifoth_val);
2097 host->prev_blksz = 0;
2098 mci_writel(host, TMOUT, 0xFFFFFFFF);
2099 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2100 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
2101 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
2102 if (!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
2103 regs |= SDMMC_INT_CD;
2105 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)) {
2106 if (host->verid < DW_MMC_240A)
2107 sdio_int = SDMMC_INT_SDIO(0);
2109 sdio_int = SDMMC_INT_SDIO(8);
2111 if (mci_readl(host, INTMASK) & sdio_int)
2115 mci_writel(host, INTMASK, regs);
2116 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2117 for (i = 0; i < host->num_slots; i++) {
2118 struct dw_mci_slot *slot = host->slot[i];
2121 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2122 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2123 dw_mci_setup_bus(slot, true);
2126 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2128 printk("[%s] -- Timeout recovery procedure finished --\n",
2129 mmc_hostname(host->mmc));
2133 static const struct mmc_host_ops dw_mci_ops = {
2134 .request = dw_mci_request,
2135 .pre_req = dw_mci_pre_req,
2136 .post_req = dw_mci_post_req,
2137 .set_ios = dw_mci_set_ios,
2138 .get_ro = dw_mci_get_ro,
2139 .get_cd = dw_mci_get_cd,
2140 .set_sdio_status = dw_mci_set_sdio_status,
2141 .hw_reset = dw_mci_hw_reset,
2142 .enable_sdio_irq = dw_mci_enable_sdio_irq,
2143 .execute_tuning = dw_mci_execute_tuning,
2144 .post_tmo = dw_mci_post_tmo,
2145 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
2146 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
2147 .card_busy = dw_mci_card_busy,
2152 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
2154 unsigned long flags;
2159 local_irq_save(flags);
2160 if(host->irq_state != irqflag)
2162 host->irq_state = irqflag;
2165 enable_irq(host->irq);
2169 disable_irq(host->irq);
2172 local_irq_restore(flags);
2176 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2177 __releases(&host->lock)
2178 __acquires(&host->lock)
2180 if(DW_MCI_SEND_STATUS == host->dir_status){
2182 if( MMC_BUS_TEST_W != host->cmd->opcode){
2183 if(host->data_status & SDMMC_INT_DCRC)
2184 host->data->error = -EILSEQ;
2185 else if(host->data_status & SDMMC_INT_EBE)
2186 host->data->error = -ETIMEDOUT;
2188 dw_mci_wait_unbusy(host);
2191 dw_mci_wait_unbusy(host);
2196 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2197 __releases(&host->lock)
2198 __acquires(&host->lock)
2200 struct dw_mci_slot *slot;
2201 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2203 //WARN_ON(host->cmd || host->data);
2205 dw_mci_deal_data_end(host, mrq);
2208 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2209 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2211 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2212 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2214 host->cur_slot->mrq = NULL;
2216 if (!list_empty(&host->queue)) {
2217 slot = list_entry(host->queue.next,
2218 struct dw_mci_slot, queue_node);
2219 list_del(&slot->queue_node);
2220 dev_vdbg(host->dev, "list not empty: %s is next\n",
2221 mmc_hostname(slot->mmc));
2222 host->state = STATE_SENDING_CMD;
2223 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2224 dw_mci_start_request(host, slot);
2226 dev_vdbg(host->dev, "list empty\n");
2227 host->state = STATE_IDLE;
2230 spin_unlock(&host->lock);
2231 mmc_request_done(prev_mmc, mrq);
2232 spin_lock(&host->lock);
2235 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2237 u32 status = host->cmd_status;
2239 host->cmd_status = 0;
2241 /* Read the response from the card (up to 16 bytes) */
2242 if (cmd->flags & MMC_RSP_PRESENT) {
2243 if (cmd->flags & MMC_RSP_136) {
2244 cmd->resp[3] = mci_readl(host, RESP0);
2245 cmd->resp[2] = mci_readl(host, RESP1);
2246 cmd->resp[1] = mci_readl(host, RESP2);
2247 cmd->resp[0] = mci_readl(host, RESP3);
2249 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2250 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2252 cmd->resp[0] = mci_readl(host, RESP0);
2256 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2257 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2261 if (status & SDMMC_INT_RTO)
2263 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2266 cmd->error = -ETIMEDOUT;
2267 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2268 cmd->error = -EILSEQ;
2269 }else if (status & SDMMC_INT_RESP_ERR){
2274 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2275 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2278 if(MMC_SEND_STATUS != cmd->opcode)
2279 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2280 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2281 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2285 /* newer ip versions need a delay between retries */
2286 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2292 static void dw_mci_tasklet_func(unsigned long priv)
2294 struct dw_mci *host = (struct dw_mci *)priv;
2295 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2296 struct mmc_data *data;
2297 struct mmc_command *cmd;
2298 enum dw_mci_state state;
2299 enum dw_mci_state prev_state;
2300 u32 status, cmd_flags;
2301 unsigned long timeout = 0;
2304 spin_lock(&host->lock);
2306 state = host->state;
2316 case STATE_SENDING_CMD:
2317 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2318 &host->pending_events))
2323 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2324 dw_mci_command_complete(host, cmd);
2325 if (cmd == host->mrq->sbc && !cmd->error) {
2326 prev_state = state = STATE_SENDING_CMD;
2327 __dw_mci_start_request(host, host->cur_slot,
2332 if (cmd->data && cmd->error) {
2333 dw_mci_stop_dma(host);
2336 send_stop_cmd(host, data);
2337 state = STATE_SENDING_STOP;
2340 /* host->data = NULL; */
2343 send_stop_abort(host, data);
2344 state = STATE_SENDING_STOP;
2347 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2350 if (!host->mrq->data || cmd->error) {
2351 dw_mci_request_end(host, host->mrq);
2355 prev_state = state = STATE_SENDING_DATA;
2358 case STATE_SENDING_DATA:
2359 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2360 dw_mci_stop_dma(host);
2363 send_stop_cmd(host, data);
2365 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2366 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2367 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2369 mci_writel(host, CMDARG, 0);
2371 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2372 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2374 if(host->mmc->hold_reg_flag)
2375 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2377 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2379 timeout = jiffies + msecs_to_jiffies(500);
2382 ret = time_before(jiffies, timeout);
2383 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2387 MMC_DBG_ERR_FUNC(host->mmc,
2388 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2389 __func__, mmc_hostname(host->mmc));
2392 send_stop_abort(host, data);
2394 state = STATE_DATA_ERROR;
2398 MMC_DBG_CMD_FUNC(host->mmc,
2399 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2400 prev_state,state, mmc_hostname(host->mmc));
2402 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2403 &host->pending_events))
2405 MMC_DBG_INFO_FUNC(host->mmc,
2406 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2407 prev_state,state,mmc_hostname(host->mmc));
2409 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2410 prev_state = state = STATE_DATA_BUSY;
2413 case STATE_DATA_BUSY:
2414 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2415 &host->pending_events))
2418 dw_mci_deal_data_end(host, host->mrq);
2419 MMC_DBG_INFO_FUNC(host->mmc,
2420 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2421 prev_state,state,mmc_hostname(host->mmc));
2423 /* host->data = NULL; */
2424 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2425 status = host->data_status;
2427 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2428 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2429 MMC_DBG_ERR_FUNC(host->mmc,
2430 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2431 prev_state,state, status, mmc_hostname(host->mmc));
2433 if (status & SDMMC_INT_DRTO) {
2434 data->error = -ETIMEDOUT;
2435 } else if (status & SDMMC_INT_DCRC) {
2436 data->error = -EILSEQ;
2437 } else if (status & SDMMC_INT_EBE &&
2438 host->dir_status == DW_MCI_SEND_STATUS){
2440 * No data CRC status was returned.
2441 * The number of bytes transferred will
2442 * be exaggerated in PIO mode.
2444 data->bytes_xfered = 0;
2445 data->error = -ETIMEDOUT;
2454 * After an error, there may be data lingering
2455 * in the FIFO, so reset it - doing so
2456 * generates a block interrupt, hence setting
2457 * the scatter-gather pointer to NULL.
2459 dw_mci_fifo_reset(host);
2461 data->bytes_xfered = data->blocks * data->blksz;
2466 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2467 prev_state,state,mmc_hostname(host->mmc));
2468 dw_mci_request_end(host, host->mrq);
2471 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2472 prev_state,state,mmc_hostname(host->mmc));
2474 if (host->mrq->sbc && !data->error) {
2475 data->stop->error = 0;
2477 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2478 prev_state,state,mmc_hostname(host->mmc));
2480 dw_mci_request_end(host, host->mrq);
2484 prev_state = state = STATE_SENDING_STOP;
2486 send_stop_cmd(host, data);
2488 if (data->stop && !data->error) {
2489 /* stop command for open-ended transfer*/
2491 send_stop_abort(host, data);
2495 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2496 prev_state,state,mmc_hostname(host->mmc));
2498 case STATE_SENDING_STOP:
2499 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2502 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2503 prev_state, state, mmc_hostname(host->mmc));
2505 /* CMD error in data command */
2506 if (host->mrq->cmd->error && host->mrq->data) {
2507 dw_mci_fifo_reset(host);
2511 host->data = NULL; */
2513 dw_mci_command_complete(host, host->mrq->stop);
2515 if (host->mrq->stop)
2516 dw_mci_command_complete(host, host->mrq->stop);
2518 host->cmd_status = 0;
2521 dw_mci_request_end(host, host->mrq);
2524 case STATE_DATA_ERROR:
2525 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2526 &host->pending_events))
2529 state = STATE_DATA_BUSY;
2532 } while (state != prev_state);
2534 host->state = state;
2536 spin_unlock(&host->lock);
2540 /* push final bytes to part_buf, only use during push */
2541 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2543 memcpy((void *)&host->part_buf, buf, cnt);
2544 host->part_buf_count = cnt;
2547 /* append bytes to part_buf, only use during push */
2548 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2550 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2551 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2552 host->part_buf_count += cnt;
2556 /* pull first bytes from part_buf, only use during pull */
2557 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2559 cnt = min(cnt, (int)host->part_buf_count);
2561 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2563 host->part_buf_count -= cnt;
2564 host->part_buf_start += cnt;
2569 /* pull final bytes from the part_buf, assuming it's just been filled */
2570 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2572 memcpy(buf, &host->part_buf, cnt);
2573 host->part_buf_start = cnt;
2574 host->part_buf_count = (1 << host->data_shift) - cnt;
2577 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2579 struct mmc_data *data = host->data;
2582 /* try and push anything in the part_buf */
2583 if (unlikely(host->part_buf_count)) {
2584 int len = dw_mci_push_part_bytes(host, buf, cnt);
2587 if (host->part_buf_count == 2) {
2588 mci_writew(host, DATA(host->data_offset),
2590 host->part_buf_count = 0;
2593 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2594 if (unlikely((unsigned long)buf & 0x1)) {
2596 u16 aligned_buf[64];
2597 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2598 int items = len >> 1;
2600 /* memcpy from input buffer into aligned buffer */
2601 memcpy(aligned_buf, buf, len);
2604 /* push data from aligned buffer into fifo */
2605 for (i = 0; i < items; ++i)
2606 mci_writew(host, DATA(host->data_offset),
2613 for (; cnt >= 2; cnt -= 2)
2614 mci_writew(host, DATA(host->data_offset), *pdata++);
2617 /* put anything remaining in the part_buf */
2619 dw_mci_set_part_bytes(host, buf, cnt);
2620 /* Push data if we have reached the expected data length */
2621 if ((data->bytes_xfered + init_cnt) ==
2622 (data->blksz * data->blocks))
2623 mci_writew(host, DATA(host->data_offset),
2628 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2630 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2631 if (unlikely((unsigned long)buf & 0x1)) {
2633 /* pull data from fifo into aligned buffer */
2634 u16 aligned_buf[64];
2635 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2636 int items = len >> 1;
2638 for (i = 0; i < items; ++i)
2639 aligned_buf[i] = mci_readw(host,
2640 DATA(host->data_offset));
2641 /* memcpy from aligned buffer into output buffer */
2642 memcpy(buf, aligned_buf, len);
2650 for (; cnt >= 2; cnt -= 2)
2651 *pdata++ = mci_readw(host, DATA(host->data_offset));
2655 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2656 dw_mci_pull_final_bytes(host, buf, cnt);
2660 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2662 struct mmc_data *data = host->data;
2665 /* try and push anything in the part_buf */
2666 if (unlikely(host->part_buf_count)) {
2667 int len = dw_mci_push_part_bytes(host, buf, cnt);
2670 if (host->part_buf_count == 4) {
2671 mci_writel(host, DATA(host->data_offset),
2673 host->part_buf_count = 0;
2676 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2677 if (unlikely((unsigned long)buf & 0x3)) {
2679 u32 aligned_buf[32];
2680 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2681 int items = len >> 2;
2683 /* memcpy from input buffer into aligned buffer */
2684 memcpy(aligned_buf, buf, len);
2687 /* push data from aligned buffer into fifo */
2688 for (i = 0; i < items; ++i)
2689 mci_writel(host, DATA(host->data_offset),
2696 for (; cnt >= 4; cnt -= 4)
2697 mci_writel(host, DATA(host->data_offset), *pdata++);
2700 /* put anything remaining in the part_buf */
2702 dw_mci_set_part_bytes(host, buf, cnt);
2703 /* Push data if we have reached the expected data length */
2704 if ((data->bytes_xfered + init_cnt) ==
2705 (data->blksz * data->blocks))
2706 mci_writel(host, DATA(host->data_offset),
2711 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2713 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2714 if (unlikely((unsigned long)buf & 0x3)) {
2716 /* pull data from fifo into aligned buffer */
2717 u32 aligned_buf[32];
2718 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2719 int items = len >> 2;
2721 for (i = 0; i < items; ++i)
2722 aligned_buf[i] = mci_readl(host,
2723 DATA(host->data_offset));
2724 /* memcpy from aligned buffer into output buffer */
2725 memcpy(buf, aligned_buf, len);
2733 for (; cnt >= 4; cnt -= 4)
2734 *pdata++ = mci_readl(host, DATA(host->data_offset));
2738 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2739 dw_mci_pull_final_bytes(host, buf, cnt);
2743 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2745 struct mmc_data *data = host->data;
2748 /* try and push anything in the part_buf */
2749 if (unlikely(host->part_buf_count)) {
2750 int len = dw_mci_push_part_bytes(host, buf, cnt);
2754 if (host->part_buf_count == 8) {
2755 mci_writeq(host, DATA(host->data_offset),
2757 host->part_buf_count = 0;
2760 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2761 if (unlikely((unsigned long)buf & 0x7)) {
2763 u64 aligned_buf[16];
2764 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2765 int items = len >> 3;
2767 /* memcpy from input buffer into aligned buffer */
2768 memcpy(aligned_buf, buf, len);
2771 /* push data from aligned buffer into fifo */
2772 for (i = 0; i < items; ++i)
2773 mci_writeq(host, DATA(host->data_offset),
2780 for (; cnt >= 8; cnt -= 8)
2781 mci_writeq(host, DATA(host->data_offset), *pdata++);
2784 /* put anything remaining in the part_buf */
2786 dw_mci_set_part_bytes(host, buf, cnt);
2787 /* Push data if we have reached the expected data length */
2788 if ((data->bytes_xfered + init_cnt) ==
2789 (data->blksz * data->blocks))
2790 mci_writeq(host, DATA(host->data_offset),
2795 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2797 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2798 if (unlikely((unsigned long)buf & 0x7)) {
2800 /* pull data from fifo into aligned buffer */
2801 u64 aligned_buf[16];
2802 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2803 int items = len >> 3;
2805 for (i = 0; i < items; ++i)
2806 aligned_buf[i] = mci_readq(host,
2807 DATA(host->data_offset));
2808 /* memcpy from aligned buffer into output buffer */
2809 memcpy(buf, aligned_buf, len);
2817 for (; cnt >= 8; cnt -= 8)
2818 *pdata++ = mci_readq(host, DATA(host->data_offset));
2822 host->part_buf = mci_readq(host, DATA(host->data_offset));
2823 dw_mci_pull_final_bytes(host, buf, cnt);
2827 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2831 /* get remaining partial bytes */
2832 len = dw_mci_pull_part_bytes(host, buf, cnt);
2833 if (unlikely(len == cnt))
2838 /* get the rest of the data */
2839 host->pull_data(host, buf, cnt);
2842 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2844 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2846 unsigned int offset;
2847 struct mmc_data *data = host->data;
2848 int shift = host->data_shift;
2851 unsigned int remain, fcnt;
2853 if(!host->mmc->bus_refs){
2854 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2858 if (!sg_miter_next(sg_miter))
2861 host->sg = sg_miter->piter.sg;
2862 buf = sg_miter->addr;
2863 remain = sg_miter->length;
2867 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2868 << shift) + host->part_buf_count;
2869 len = min(remain, fcnt);
2872 dw_mci_pull_data(host, (void *)(buf + offset), len);
2873 data->bytes_xfered += len;
2878 sg_miter->consumed = offset;
2879 status = mci_readl(host, MINTSTS);
2880 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2881 /* if the RXDR is ready read again */
2882 } while ((status & SDMMC_INT_RXDR) ||
2883 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2886 if (!sg_miter_next(sg_miter))
2888 sg_miter->consumed = 0;
2890 sg_miter_stop(sg_miter);
2894 sg_miter_stop(sg_miter);
2898 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2901 static void dw_mci_write_data_pio(struct dw_mci *host)
2903 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2905 unsigned int offset;
2906 struct mmc_data *data = host->data;
2907 int shift = host->data_shift;
2910 unsigned int fifo_depth = host->fifo_depth;
2911 unsigned int remain, fcnt;
2913 if(!host->mmc->bus_refs){
2914 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2919 if (!sg_miter_next(sg_miter))
2922 host->sg = sg_miter->piter.sg;
2923 buf = sg_miter->addr;
2924 remain = sg_miter->length;
2928 fcnt = ((fifo_depth -
2929 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2930 << shift) - host->part_buf_count;
2931 len = min(remain, fcnt);
2934 host->push_data(host, (void *)(buf + offset), len);
2935 data->bytes_xfered += len;
2940 sg_miter->consumed = offset;
2941 status = mci_readl(host, MINTSTS);
2942 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2943 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2946 if (!sg_miter_next(sg_miter))
2948 sg_miter->consumed = 0;
2950 sg_miter_stop(sg_miter);
2954 sg_miter_stop(sg_miter);
2958 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2961 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2963 if (!host->cmd_status)
2964 host->cmd_status = status;
2971 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2972 tasklet_schedule(&host->tasklet);
2975 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2977 struct dw_mci *host = dev_id;
2978 u32 pending, sdio_int;
2981 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2984 * DTO fix - version 2.10a and below, and only if internal DMA
2987 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2989 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2990 pending |= SDMMC_INT_DATA_OVER;
2994 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2995 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2996 host->cmd_status = pending;
2998 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2999 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
3001 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3004 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
3005 /* if there is an error report DATA_ERROR */
3006 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
3007 host->data_status = pending;
3009 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3011 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
3012 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
3013 tasklet_schedule(&host->tasklet);
3016 if (pending & SDMMC_INT_DATA_OVER) {
3017 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
3018 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
3019 if (!host->data_status)
3020 host->data_status = pending;
3022 if (host->dir_status == DW_MCI_RECV_STATUS) {
3023 if (host->sg != NULL)
3024 dw_mci_read_data_pio(host, true);
3026 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3027 tasklet_schedule(&host->tasklet);
3030 if (pending & SDMMC_INT_RXDR) {
3031 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
3032 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
3033 dw_mci_read_data_pio(host, false);
3036 if (pending & SDMMC_INT_TXDR) {
3037 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
3038 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
3039 dw_mci_write_data_pio(host);
3042 if (pending & SDMMC_INT_VSI) {
3043 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
3044 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
3045 dw_mci_cmd_interrupt(host, pending);
3048 if (pending & SDMMC_INT_CMD_DONE) {
3049 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
3050 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
3051 dw_mci_cmd_interrupt(host, pending);
3054 if (pending & SDMMC_INT_CD) {
3055 mci_writel(host, RINTSTS, SDMMC_INT_CD);
3056 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
3057 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
3058 queue_work(host->card_workqueue, &host->card_work);
3061 if (pending & SDMMC_INT_HLE) {
3062 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
3063 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
3067 /* Handle SDIO Interrupts */
3068 for (i = 0; i < host->num_slots; i++) {
3069 struct dw_mci_slot *slot = host->slot[i];
3071 if (host->verid < DW_MMC_240A)
3072 sdio_int = SDMMC_INT_SDIO(i);
3074 sdio_int = SDMMC_INT_SDIO(i + 8);
3076 if (pending & sdio_int) {
3077 mci_writel(host, RINTSTS, sdio_int);
3078 mmc_signal_sdio_irq(slot->mmc);
3084 #ifdef CONFIG_MMC_DW_IDMAC
3085 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
3086 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
3087 /* Handle DMA interrupts */
3088 pending = mci_readl(host, IDSTS);
3089 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
3090 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
3091 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
3092 host->dma_ops->complete((void *)host);
3100 static void dw_mci_work_routine_card(struct work_struct *work)
3102 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
3105 for (i = 0; i < host->num_slots; i++) {
3106 struct dw_mci_slot *slot = host->slot[i];
3107 struct mmc_host *mmc = slot->mmc;
3108 struct mmc_request *mrq;
3111 present = dw_mci_get_cd(mmc);
3113 /* Card insert, switch data line to uart function, and vice verse.
3114 eONLY audi chip need switched by software, using udbg tag in dts!
3116 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
3118 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3119 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3120 mmc_hostname(host->mmc));
3122 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3123 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3124 mmc_hostname(host->mmc));
3128 while (present != slot->last_detect_state) {
3129 dev_dbg(&slot->mmc->class_dev, "card %s\n",
3130 present ? "inserted" : "removed");
3131 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
3132 present ? "inserted" : "removed.", mmc_hostname(mmc));
3134 dw_mci_ctrl_all_reset(host);
3135 /* Stop edma when rountine card triggered */
3136 if(cpu_is_rk3036() || cpu_is_rk312x())
3137 if(host->dma_ops && host->dma_ops->stop)
3138 host->dma_ops->stop(host);
3139 rk_send_wakeup_key();//wake up system
3140 spin_lock_bh(&host->lock);
3142 /* Card change detected */
3143 slot->last_detect_state = present;
3145 /* Clean up queue if present */
3148 if (mrq == host->mrq) {
3152 switch (host->state) {
3155 case STATE_SENDING_CMD:
3156 mrq->cmd->error = -ENOMEDIUM;
3160 case STATE_SENDING_DATA:
3161 mrq->data->error = -ENOMEDIUM;
3162 dw_mci_stop_dma(host);
3164 case STATE_DATA_BUSY:
3165 case STATE_DATA_ERROR:
3166 if (mrq->data->error == -EINPROGRESS)
3167 mrq->data->error = -ENOMEDIUM;
3171 case STATE_SENDING_STOP:
3172 mrq->stop->error = -ENOMEDIUM;
3176 dw_mci_request_end(host, mrq);
3178 list_del(&slot->queue_node);
3179 mrq->cmd->error = -ENOMEDIUM;
3181 mrq->data->error = -ENOMEDIUM;
3183 mrq->stop->error = -ENOMEDIUM;
3185 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3186 mrq->cmd->opcode, mmc_hostname(mmc));
3188 spin_unlock(&host->lock);
3189 mmc_request_done(slot->mmc, mrq);
3190 spin_lock(&host->lock);
3194 /* Power down slot */
3196 /* Clear down the FIFO */
3197 dw_mci_fifo_reset(host);
3198 #ifdef CONFIG_MMC_DW_IDMAC
3199 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3200 dw_mci_idmac_reset(host);
3205 spin_unlock_bh(&host->lock);
3207 present = dw_mci_get_cd(mmc);
3210 mmc_detect_change(slot->mmc,
3211 msecs_to_jiffies(host->pdata->detect_delay_ms));
3216 /* given a slot id, find out the device node representing that slot */
3217 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3219 struct device_node *np;
3223 if (!dev || !dev->of_node)
3226 for_each_child_of_node(dev->of_node, np) {
3227 addr = of_get_property(np, "reg", &len);
3228 if (!addr || (len < sizeof(int)))
3230 if (be32_to_cpup(addr) == slot)
3236 static struct dw_mci_of_slot_quirks {
3239 } of_slot_quirks[] = {
3241 .quirk = "disable-wp",
3242 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3246 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3248 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3253 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3254 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3255 quirks |= of_slot_quirks[idx].id;
3260 /* find out bus-width for a given slot */
3261 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3263 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3269 if (of_property_read_u32(np, "bus-width", &bus_wd))
3270 dev_err(dev, "bus-width property not found, assuming width"
3276 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3277 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3279 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3285 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3287 /* Having a missing entry is valid; return silently */
3288 if (!gpio_is_valid(gpio))
3291 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3292 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3296 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3302 /* find the write protect gpio for a given slot; or -1 if none specified */
3303 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3305 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3311 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3313 /* Having a missing entry is valid; return silently */
3314 if (!gpio_is_valid(gpio))
3317 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3318 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3325 /* find the cd gpio for a given slot */
3326 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3327 struct mmc_host *mmc)
3329 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3335 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3337 /* Having a missing entry is valid; return silently */
3338 if (!gpio_is_valid(gpio))
3341 if (mmc_gpio_request_cd(mmc, gpio, 0))
3342 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3345 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3347 struct mmc_host *mmc = dev_id;
3348 struct dw_mci_slot *slot = mmc_priv(mmc);
3349 struct dw_mci *host = slot->host;
3350 int gpio_cd = slot->cd_gpio;
3352 (gpio_get_value(gpio_cd) == 0) ?
3353 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
3354 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
3356 /* wakeup system whether gpio debounce or not */
3357 rk_send_wakeup_key();
3359 /* no need to trigger detect flow when rescan is disabled.
3360 This case happended in dpm, that we just wakeup system and
3361 let suspend_post notify callback handle it.
3363 if(mmc->rescan_disable == 0)
3364 queue_work(host->card_workqueue, &host->card_work);
3366 printk("%s: rescan been disabled!\n", __FUNCTION__);
3371 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3372 struct mmc_host *mmc)
3374 struct dw_mci_slot *slot = mmc_priv(mmc);
3375 struct dw_mci *host = slot->host;
3379 /* Having a missing entry is valid; return silently */
3380 if (!gpio_is_valid(gpio))
3383 irq = gpio_to_irq(gpio);
3385 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3386 NULL, dw_mci_gpio_cd_irqt,
3387 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3391 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3393 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3394 enable_irq_wake(irq);
3397 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3401 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3402 struct mmc_host *mmc)
3404 if (!gpio_is_valid(gpio))
3407 if (gpio_to_irq(gpio) >= 0) {
3408 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3409 devm_gpio_free(&mmc->class_dev, gpio);
3412 #else /* CONFIG_OF */
3413 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3417 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3421 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3425 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3429 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3430 struct mmc_host *mmc)
3434 #endif /* CONFIG_OF */
3436 /* @host: dw_mci host prvdata
3437 * Init pinctrl for each platform. Usually we assign
3438 * "defalut" tag for functional usage, "idle" tag for gpio
3439 * state and "udbg" tag for uart_dbg if any.
3441 static void dw_mci_init_pinctrl(struct dw_mci *host)
3443 /* Fixme: DON'T TOUCH EMMC SETTING! */
3444 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3447 /* Get pinctrl for DTS */
3448 host->pinctrl = devm_pinctrl_get(host->dev);
3449 if (IS_ERR(host->pinctrl)) {
3450 dev_err(host->dev, "%s: No pinctrl used!\n",
3451 mmc_hostname(host->mmc));
3455 /* Lookup idle state */
3456 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3457 PINCTRL_STATE_IDLE);
3458 if (IS_ERR(host->pins_idle)) {
3459 dev_err(host->dev, "%s: No idle tag found!\n",
3460 mmc_hostname(host->mmc));
3462 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3463 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3464 mmc_hostname(host->mmc));
3467 /* Lookup default state */
3468 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3469 PINCTRL_STATE_DEFAULT);
3470 if (IS_ERR(host->pins_default)) {
3471 dev_err(host->dev, "%s: No default pinctrl found!\n",
3472 mmc_hostname(host->mmc));
3474 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3475 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3476 mmc_hostname(host->mmc));
3479 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3480 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3481 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3482 if (IS_ERR(host->pins_udbg)) {
3483 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3484 mmc_hostname(host->mmc));
3486 if (!dw_mci_get_cd(host->mmc))
3487 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3488 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3489 mmc_hostname(host->mmc));
3494 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3495 unsigned long mode, void *unused)
3497 struct mmc_host *host = container_of(
3498 notify_block, struct mmc_host, pm_notify);
3499 unsigned long flags;
3502 case PM_HIBERNATION_PREPARE:
3503 case PM_SUSPEND_PREPARE:
3504 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3505 spin_lock_irqsave(&host->lock, flags);
3506 host->rescan_disable = 1;
3507 spin_unlock_irqrestore(&host->lock, flags);
3508 if (cancel_delayed_work(&host->detect))
3509 wake_unlock(&host->detect_wake_lock);
3512 case PM_POST_SUSPEND:
3513 case PM_POST_HIBERNATION:
3514 case PM_POST_RESTORE:
3515 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3516 spin_lock_irqsave(&host->lock, flags);
3517 host->rescan_disable = 0;
3518 spin_unlock_irqrestore(&host->lock, flags);
3519 mmc_detect_change(host, 10);
3525 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3527 struct mmc_host *mmc;
3528 struct dw_mci_slot *slot;
3529 const struct dw_mci_drv_data *drv_data = host->drv_data;
3534 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3538 slot = mmc_priv(mmc);
3542 host->slot[id] = slot;
3545 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3547 mmc->ops = &dw_mci_ops;
3549 if (of_property_read_u32_array(host->dev->of_node,
3550 "clock-freq-min-max", freq, 2)) {
3551 mmc->f_min = DW_MCI_FREQ_MIN;
3552 mmc->f_max = DW_MCI_FREQ_MAX;
3554 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3555 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3557 mmc->f_min = freq[0];
3558 mmc->f_max = freq[1];
3560 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3561 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3564 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3566 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3567 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3568 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3569 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3570 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3571 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3573 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3574 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3575 if (register_pm_notifier(&mmc->pm_notify)) {
3576 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3577 goto err_pm_notifier;
3581 if (host->cid == DW_MCI_TYPE_RK3368) {
3582 if (IS_ERR(host->grf))
3583 pr_err("rk_sdmmc: dts couldn't find grf regmap for 3368\n");
3585 /* Disable force_jtag */
3586 regmap_write(host->grf, 0x43c, (1<<13)<<16 | (0 << 13));
3587 } else if (cpu_is_rk3288()) {
3588 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
3592 /* We assume only low-level chip use gpio_cd */
3593 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
3594 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3595 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3596 if (gpio_is_valid(slot->cd_gpio)) {
3597 /* Request gpio int for card detection */
3598 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3600 slot->cd_gpio = -ENODEV;
3601 dev_err(host->dev, "failed to get your cd-gpios!\n");
3605 if (host->pdata->get_ocr)
3606 mmc->ocr_avail = host->pdata->get_ocr(id);
3609 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3610 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3611 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3612 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3616 * Start with slot power disabled, it will be enabled when a card
3619 if (host->pdata->setpower)
3620 host->pdata->setpower(id, 0);
3622 if (host->pdata->caps)
3623 mmc->caps = host->pdata->caps;
3625 if (host->pdata->pm_caps)
3626 mmc->pm_caps = host->pdata->pm_caps;
3628 if (host->dev->of_node) {
3629 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3633 ctrl_id = to_platform_device(host->dev)->id;
3635 if (drv_data && drv_data->caps)
3636 mmc->caps |= drv_data->caps[ctrl_id];
3637 if (drv_data && drv_data->hold_reg_flag)
3638 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3640 /* set the compatibility of driver. */
3641 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3642 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3644 if (host->pdata->caps2)
3645 mmc->caps2 = host->pdata->caps2;
3647 if (host->pdata->get_bus_wd)
3648 bus_width = host->pdata->get_bus_wd(slot->id);
3649 else if (host->dev->of_node)
3650 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3654 switch (bus_width) {
3656 mmc->caps |= MMC_CAP_8_BIT_DATA;
3658 mmc->caps |= MMC_CAP_4_BIT_DATA;
3661 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3662 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3663 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3664 mmc->caps |= MMC_CAP_SDIO_IRQ;
3665 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3666 mmc->caps |= MMC_CAP_HW_RESET;
3667 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3668 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3669 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3670 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3671 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3672 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3673 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3674 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3676 /*Assign pm_caps pass to pm_flags*/
3677 mmc->pm_flags = mmc->pm_caps;
3679 if (host->pdata->blk_settings) {
3680 mmc->max_segs = host->pdata->blk_settings->max_segs;
3681 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3682 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3683 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3684 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3686 /* Useful defaults if platform data is unset. */
3687 #ifdef CONFIG_MMC_DW_IDMAC
3688 mmc->max_segs = host->ring_size;
3689 mmc->max_blk_size = 65536;
3690 mmc->max_blk_count = host->ring_size;
3691 mmc->max_seg_size = 0x1000;
3692 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3693 if(cpu_is_rk3036() || cpu_is_rk312x()){
3694 /* fixup for external dmac setting */
3696 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3697 mmc->max_blk_count = 65535;
3698 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3699 mmc->max_seg_size = mmc->max_req_size;
3703 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3704 mmc->max_blk_count = 512;
3705 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3706 mmc->max_seg_size = mmc->max_req_size;
3707 #endif /* CONFIG_MMC_DW_IDMAC */
3711 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3713 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3718 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3719 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3723 if (IS_ERR(host->vmmc)) {
3724 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3727 ret = regulator_enable(host->vmmc);
3730 "failed to enable regulator: %d\n", ret);
3737 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3739 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3740 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3742 dw_mci_init_pinctrl(host);
3743 ret = mmc_add_host(mmc);
3747 #if defined(CONFIG_DEBUG_FS)
3748 dw_mci_init_debugfs(slot);
3751 /* Card initially undetected */
3752 slot->last_detect_state = 1;
3756 unregister_pm_notifier(&mmc->pm_notify);
3759 if (gpio_is_valid(slot->cd_gpio))
3760 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3765 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3767 /* Shutdown detect IRQ */
3768 if (slot->host->pdata->exit)
3769 slot->host->pdata->exit(id);
3771 /* Debugfs stuff is cleaned up by mmc core */
3772 mmc_remove_host(slot->mmc);
3773 slot->host->slot[id] = NULL;
3774 mmc_free_host(slot->mmc);
3777 static void dw_mci_init_dma(struct dw_mci *host)
3779 /* Alloc memory for sg translation */
3780 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3781 &host->sg_dma, GFP_KERNEL);
3782 if (!host->sg_cpu) {
3783 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3788 memset(host->sg_cpu, 0, PAGE_SIZE);
3791 /* Determine which DMA interface to use */
3792 #if defined(CONFIG_MMC_DW_IDMAC)
3793 if(cpu_is_rk3036() || cpu_is_rk312x()){
3794 host->dma_ops = &dw_mci_edmac_ops;
3795 dev_info(host->dev, "Using external DMA controller.\n");
3797 host->dma_ops = &dw_mci_idmac_ops;
3798 dev_info(host->dev, "Using internal DMA controller.\n");
3805 if (host->dma_ops->init && host->dma_ops->start &&
3806 host->dma_ops->stop && host->dma_ops->cleanup) {
3807 if (host->dma_ops->init(host)) {
3808 dev_err(host->dev, "%s: Unable to initialize "
3809 "DMA Controller.\n", __func__);
3813 dev_err(host->dev, "DMA initialization not found.\n");
3821 dev_info(host->dev, "Using PIO mode.\n");
3826 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3828 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3831 ctrl = mci_readl(host, CTRL);
3833 mci_writel(host, CTRL, ctrl);
3835 /* wait till resets clear */
3837 ctrl = mci_readl(host, CTRL);
3838 if (!(ctrl & reset))
3840 } while (time_before(jiffies, timeout));
3843 "Timeout resetting block (ctrl reset %#x)\n",
3849 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3852 * Reseting generates a block interrupt, hence setting
3853 * the scatter-gather pointer to NULL.
3856 sg_miter_stop(&host->sg_miter);
3860 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3863 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3865 return dw_mci_ctrl_reset(host,
3866 SDMMC_CTRL_FIFO_RESET |
3868 SDMMC_CTRL_DMA_RESET);
3871 static void dw_mci_rst_pre_suspend(struct dw_mci *host)
3876 buffer = host->regs_buffer;
3878 for (index = 0; index < DW_REGS_NUM ; index++){
3879 *buffer = mci_readreg(host, index*4);
3880 MMC_DBG_INFO_FUNC(host->mmc, "[%s] :0x%08x.\n",
3881 dw_mci_regs[index].name, *buffer);
3885 *buffer = mci_readl(host,CDTHRCTL);
3886 MMC_DBG_INFO_FUNC(host->mmc, "[%s] :0x%08x.\n", "CARDTHRCTL", *buffer);
3889 static void dw_mci_rst_post_resume(struct dw_mci *host)
3894 buffer = host->regs_buffer;
3896 for (index = 0; index < DW_REGS_NUM; index++){
3897 mci_writereg(host, index*4, *buffer);
3900 mci_writel(host, CDTHRCTL, *buffer);
3903 static const struct dw_mci_rst_ops dw_mci_pdrst_ops = {
3904 .pre_suspend = dw_mci_rst_pre_suspend,
3905 .post_resume = dw_mci_rst_post_resume,
3910 static struct dw_mci_of_quirks {
3915 .quirk = "broken-cd",
3916 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3920 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3922 struct dw_mci_board *pdata;
3923 struct device *dev = host->dev;
3924 struct device_node *np = dev->of_node;
3925 const struct dw_mci_drv_data *drv_data = host->drv_data;
3927 u32 clock_frequency;
3929 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3931 dev_err(dev, "could not allocate memory for pdata\n");
3932 return ERR_PTR(-ENOMEM);
3935 /* find out number of slots supported */
3936 if (of_property_read_u32(dev->of_node, "num-slots",
3937 &pdata->num_slots)) {
3938 dev_info(dev, "num-slots property not found, "
3939 "assuming 1 slot is available\n");
3940 pdata->num_slots = 1;
3944 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3945 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3946 pdata->quirks |= of_quirks[idx].id;
3949 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3950 dev_info(dev, "fifo-depth property not found, using "
3951 "value of FIFOTH register as default\n");
3953 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3955 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3956 pdata->bus_hz = clock_frequency;
3958 if (drv_data && drv_data->parse_dt) {
3959 ret = drv_data->parse_dt(host);
3961 return ERR_PTR(ret);
3964 if (of_find_property(np, "keep-power-in-suspend", NULL))
3965 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3967 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3968 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3970 if (of_find_property(np, "supports-highspeed", NULL))
3971 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3973 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3974 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3976 if (of_find_property(np, "supports-DDR_MODE", NULL))
3977 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3979 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3980 pdata->caps2 |= MMC_CAP2_HS200;
3982 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3983 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3985 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3986 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3988 if (of_get_property(np, "cd-inverted", NULL))
3989 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3990 if (of_get_property(np, "bootpart-no-access", NULL))
3991 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3993 if (of_get_property(np, "controller-power-down", NULL)) {
3994 host->regs_buffer = (u32 *)devm_kzalloc(host->dev,
3995 DW_REGS_SIZE, GFP_KERNEL);
3996 if (!host->regs_buffer) {
3998 "could not allocate memory for regs_buffer\n");
3999 return ERR_PTR(-ENOMEM);
4002 host->rst_ops = &dw_mci_pdrst_ops;
4003 mmc_assume_removable = 0;
4009 #else /* CONFIG_OF */
4010 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
4012 return ERR_PTR(-EINVAL);
4014 #endif /* CONFIG_OF */
4016 int dw_mci_probe(struct dw_mci *host)
4018 const struct dw_mci_drv_data *drv_data = host->drv_data;
4019 int width, i, ret = 0;
4025 host->pdata = dw_mci_parse_dt(host);
4026 if (IS_ERR(host->pdata)) {
4027 dev_err(host->dev, "platform data not available\n");
4032 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
4034 "Platform data must supply select_slot function\n");
4039 * In 2.40a spec, Data offset is changed.
4040 * Need to check the version-id and set data-offset for DATA register.
4042 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
4043 dev_info(host->dev, "Version ID is %04x\n", host->verid);
4045 if (host->verid < DW_MMC_240A)
4046 host->data_offset = DATA_OFFSET;
4048 host->data_offset = DATA_240A_OFFSET;
4051 host->hpclk_mmc= devm_clk_get(host->dev, "hpclk_mmc");
4052 if (IS_ERR(host->hpclk_mmc)) {
4053 dev_err(host->dev, "failed to get hpclk_mmc\n");
4055 clk_prepare_enable(host->hpclk_mmc);
4059 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
4060 if (IS_ERR(host->hclk_mmc)) {
4061 dev_err(host->dev, "failed to get hclk_mmc\n");
4062 ret = PTR_ERR(host->hclk_mmc);
4066 clk_prepare_enable(host->hclk_mmc);
4069 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
4070 if (IS_ERR(host->clk_mmc)) {
4071 dev_err(host->dev, "failed to get clk mmc_per\n");
4072 ret = PTR_ERR(host->clk_mmc);
4076 host->bus_hz = host->pdata->bus_hz;
4077 if (!host->bus_hz) {
4078 dev_err(host->dev,"Platform data must supply bus speed\n");
4083 if (host->verid < DW_MMC_240A)
4084 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
4086 //rockchip: fix divider 2 in clksum before controlller
4087 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
4090 dev_err(host->dev, "failed to set clk mmc\n");
4093 clk_prepare_enable(host->clk_mmc);
4095 if (drv_data && drv_data->setup_clock) {
4096 ret = drv_data->setup_clock(host);
4099 "implementation specific clock setup failed\n");
4104 host->quirks = host->pdata->quirks;
4105 host->irq_state = true;
4106 host->set_speed = 0;
4108 host->svi_flags = 0;
4110 spin_lock_init(&host->lock);
4111 spin_lock_init(&host->slock);
4113 INIT_LIST_HEAD(&host->queue);
4115 * Get the host data width - this assumes that HCON has been set with
4116 * the correct values.
4118 i = (mci_readl(host, HCON) >> 7) & 0x7;
4120 host->push_data = dw_mci_push_data16;
4121 host->pull_data = dw_mci_pull_data16;
4123 host->data_shift = 1;
4124 } else if (i == 2) {
4125 host->push_data = dw_mci_push_data64;
4126 host->pull_data = dw_mci_pull_data64;
4128 host->data_shift = 3;
4130 /* Check for a reserved value, and warn if it is */
4132 "HCON reports a reserved host data width!\n"
4133 "Defaulting to 32-bit access.\n");
4134 host->push_data = dw_mci_push_data32;
4135 host->pull_data = dw_mci_pull_data32;
4137 host->data_shift = 2;
4140 /* Reset all blocks */
4141 if (!dw_mci_ctrl_all_reset(host))
4144 host->dma_ops = host->pdata->dma_ops;
4145 dw_mci_init_dma(host);
4147 /* Clear the interrupts for the host controller */
4148 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4149 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4151 /* Put in max timeout */
4152 mci_writel(host, TMOUT, 0xFFFFFFFF);
4155 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
4156 * Tx Mark = fifo_size / 2 DMA Size = 8
4158 if (!host->pdata->fifo_depth) {
4160 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
4161 * have been overwritten by the bootloader, just like we're
4162 * about to do, so if you know the value for your hardware, you
4163 * should put it in the platform data.
4165 fifo_size = mci_readl(host, FIFOTH);
4166 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
4168 fifo_size = host->pdata->fifo_depth;
4170 host->fifo_depth = fifo_size;
4172 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
4173 mci_writel(host, FIFOTH, host->fifoth_val);
4175 /* disable clock to CIU */
4176 mci_writel(host, CLKENA, 0);
4177 mci_writel(host, CLKSRC, 0);
4179 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
4180 host->card_workqueue = alloc_workqueue("dw-mci-card",
4181 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
4182 if (!host->card_workqueue) {
4186 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4187 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4188 host->irq_flags, "dw-mci", host);
4192 if (host->pdata->num_slots)
4193 host->num_slots = host->pdata->num_slots;
4195 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4197 /* We need at least one slot to succeed */
4198 for (i = 0; i < host->num_slots; i++) {
4199 ret = dw_mci_init_slot(host, i);
4201 dev_dbg(host->dev, "slot %d init failed\n", i);
4207 * Enable interrupts for command done, data over, data empty, card det,
4208 * receive ready and error such as transmit, receive timeout, crc error
4210 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4211 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4212 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4213 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4214 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4215 regs |= SDMMC_INT_CD;
4217 mci_writel(host, INTMASK, regs);
4219 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4221 dev_info(host->dev, "DW MMC controller at irq %d, "
4222 "%d bit host data width, "
4224 host->irq, width, fifo_size);
4227 dev_info(host->dev, "%d slots initialized\n", init_slots);
4229 dev_dbg(host->dev, "attempted to initialize %d slots, "
4230 "but failed on all\n", host->num_slots);
4235 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4236 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4241 destroy_workqueue(host->card_workqueue);
4244 if (host->use_dma && host->dma_ops->exit)
4245 host->dma_ops->exit(host);
4248 regulator_disable(host->vmmc);
4249 regulator_put(host->vmmc);
4253 if (!IS_ERR(host->clk_mmc))
4254 clk_disable_unprepare(host->clk_mmc);
4256 if (!IS_ERR(host->hclk_mmc))
4257 clk_disable_unprepare(host->hclk_mmc);
4260 EXPORT_SYMBOL(dw_mci_probe);
4262 void dw_mci_remove(struct dw_mci *host)
4264 struct mmc_host *mmc = host->mmc;
4265 struct dw_mci_slot *slot = mmc_priv(mmc);
4268 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4269 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4271 for(i = 0; i < host->num_slots; i++){
4272 dev_dbg(host->dev, "remove slot %d\n", i);
4274 dw_mci_cleanup_slot(host->slot[i], i);
4277 /* disable clock to CIU */
4278 mci_writel(host, CLKENA, 0);
4279 mci_writel(host, CLKSRC, 0);
4281 destroy_workqueue(host->card_workqueue);
4282 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4283 unregister_pm_notifier(&host->mmc->pm_notify);
4285 if (host->use_dma && host->dma_ops->exit)
4286 host->dma_ops->exit(host);
4288 if (gpio_is_valid(slot->cd_gpio))
4289 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4292 regulator_disable(host->vmmc);
4293 regulator_put(host->vmmc);
4295 if (!IS_ERR(host->clk_mmc))
4296 clk_disable_unprepare(host->clk_mmc);
4298 if (!IS_ERR(host->hclk_mmc))
4299 clk_disable_unprepare(host->hclk_mmc);
4300 if (!IS_ERR(host->hpclk_mmc))
4301 clk_disable_unprepare(host->hpclk_mmc);
4303 EXPORT_SYMBOL(dw_mci_remove);
4307 #ifdef CONFIG_PM_SLEEP
4309 * TODO: we should probably disable the clock to the card in the suspend path.
4311 extern int get_wifi_chip_type(void);
4312 int dw_mci_suspend(struct dw_mci *host)
4314 int present = dw_mci_get_cd(host->mmc);
4316 if((host->mmc->restrict_caps &
4317 RESTRICT_CARD_TYPE_SDIO) &&
4318 (get_wifi_chip_type() == WIFI_ESP8089 ||
4319 get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4323 regulator_disable(host->vmmc);
4325 /* Only for sdmmc controller */
4326 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4327 disable_irq(host->irq);
4329 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4330 MMC_DBG_ERR_FUNC(host->mmc,
4331 "Idle pinctrl setting failed! [%s]",
4332 mmc_hostname(host->mmc));
4335 /* Soc rk3126/3036 already in gpio_cd mode */
4336 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4337 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4338 enable_irq_wake(host->mmc->slot.cd_irq);
4342 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4343 mci_writel(host, INTMASK, 0x00);
4344 mci_writel(host, CTRL, 0x00);
4346 if (host->rst_ops &&
4347 host->rst_ops->pre_suspend)
4348 host->rst_ops->pre_suspend(host);
4352 EXPORT_SYMBOL(dw_mci_suspend);
4354 int dw_mci_resume(struct dw_mci *host)
4358 struct dw_mci_slot *slot;
4359 int present = dw_mci_get_cd(host->mmc);
4361 if (host->rst_ops &&
4362 host->rst_ops->post_resume)
4363 host->rst_ops->post_resume(host);
4366 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4367 (get_wifi_chip_type() == WIFI_ESP8089 ||
4368 get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4371 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4372 slot = mmc_priv(host->mmc);
4373 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4377 /*only for sdmmc controller*/
4378 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4379 /* Soc rk3126/3036 already in gpio_cd mode */
4380 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4381 disable_irq_wake(host->mmc->slot.cd_irq);
4382 mmc_gpio_free_cd(host->mmc);
4386 if (!IS_ERR(host->pins_udbg)) {
4387 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4388 MMC_DBG_ERR_FUNC(host->mmc,
4389 "Idle pinctrl setting failed! [%s]",
4390 mmc_hostname(host->mmc));
4391 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
4392 MMC_DBG_ERR_FUNC(host->mmc,
4393 "Udbg pinctrl setting failed! [%s]",
4394 mmc_hostname(host->mmc));
4396 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4397 MMC_DBG_ERR_FUNC(host->mmc,
4398 "Default pinctrl setting failed! [%s]",
4399 mmc_hostname(host->mmc));
4402 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4403 MMC_DBG_ERR_FUNC(host->mmc,
4404 "Default pinctrl setting failed! [%s]",
4405 mmc_hostname(host->mmc));
4410 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4411 else if(cpu_is_rk3036())
4412 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4413 else if(cpu_is_rk312x())
4414 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4415 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4418 ret = regulator_enable(host->vmmc);
4421 "failed to enable regulator: %d\n", ret);
4426 if(!dw_mci_ctrl_all_reset(host)){
4431 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4432 if(host->use_dma && host->dma_ops->init)
4433 host->dma_ops->init(host);
4436 * Restore the initial value at FIFOTH register
4437 * And Invalidate the prev_blksz with zero
4439 mci_writel(host, FIFOTH, host->fifoth_val);
4440 host->prev_blksz = 0;
4441 /* Put in max timeout */
4442 mci_writel(host, TMOUT, 0xFFFFFFFF);
4444 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4445 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR |
4446 SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
4448 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4449 regs |= SDMMC_INT_CD;
4451 mci_writel(host, INTMASK, regs);
4452 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4454 /*only for sdmmc controller*/
4455 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)){
4456 enable_irq(host->irq);
4459 for(i = 0; i < host->num_slots; i++){
4460 struct dw_mci_slot *slot = host->slot[i];
4463 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4464 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4465 dw_mci_setup_bus(slot, true);
4471 EXPORT_SYMBOL(dw_mci_resume);
4472 #endif /* CONFIG_PM_SLEEP */
4474 static int __init dw_mci_init(void)
4476 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4480 static void __exit dw_mci_exit(void)
4484 module_init(dw_mci_init);
4485 module_exit(dw_mci_exit);
4487 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4488 MODULE_AUTHOR("NXP Semiconductor VietNam");
4489 MODULE_AUTHOR("Imagination Technologies Ltd");
4490 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4491 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4492 MODULE_LICENSE("GPL v2");