2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/suspend.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/card.h>
38 #include <linux/mmc/sdio.h>
39 #include <linux/mmc/rk_mmc.h>
40 #include <linux/bitops.h>
41 #include <linux/regulator/consumer.h>
42 #include <linux/workqueue.h>
44 #include <linux/of_gpio.h>
45 #include <linux/mmc/slot-gpio.h>
46 #include <linux/clk-private.h>
47 #include <linux/rockchip/cpu.h>
48 #include <linux/rfkill-wlan.h>
49 #include <linux/mfd/syscon.h>
50 #include <linux/regmap.h>
51 #include <linux/log2.h>
53 #include "rk_sdmmc_dbg.h"
54 #include <linux/regulator/rockchip_io_vol_domain.h>
55 #include "../../clk/rockchip/clk-ops.h"
57 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
59 /* Common flag combinations */
60 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
61 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
63 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
65 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
66 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
67 #define DW_MCI_SEND_STATUS 1
68 #define DW_MCI_RECV_STATUS 2
69 #define DW_MCI_DMA_THRESHOLD 16
71 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
72 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
74 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
75 #define SDMMC_DATA_TIMEOUT_SD 500
76 #define SDMMC_DATA_TIMEOUT_SDIO 250
77 #define SDMMC_DATA_TIMEOUT_EMMC 2500
79 #define SDMMC_CMD_RTO_MAX_HOLD 200
80 #define SDMMC_WAIT_FOR_UNBUSY 2500
82 #define DW_REGS_SIZE (0x0098 + 4)
83 #define DW_REGS_NUM (0x0098 / 4)
85 #ifdef CONFIG_MMC_DW_IDMAC
86 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
87 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
88 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
92 u32 des0; /* Control Descriptor */
93 #define IDMAC_DES0_DIC BIT(1)
94 #define IDMAC_DES0_LD BIT(2)
95 #define IDMAC_DES0_FD BIT(3)
96 #define IDMAC_DES0_CH BIT(4)
97 #define IDMAC_DES0_ER BIT(5)
98 #define IDMAC_DES0_CES BIT(30)
99 #define IDMAC_DES0_OWN BIT(31)
101 u32 des1; /* Buffer sizes */
102 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
103 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
105 u32 des2; /* buffer 1 physical address */
107 u32 des3; /* buffer 2 physical address */
109 #endif /* CONFIG_MMC_DW_IDMAC */
111 static const u8 tuning_blk_pattern_4bit[] = {
112 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
113 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
114 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
115 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
116 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
117 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
118 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
119 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
122 static const u8 tuning_blk_pattern_8bit[] = {
123 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
124 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
125 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
126 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
127 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
128 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
129 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
130 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
131 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
132 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
133 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
134 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
135 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
136 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
137 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
138 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
142 static struct sdmmc_reg dw_mci_regs[] =
146 { 0x0008, "CLKDIV" },
147 { 0x000C, "CLKSRC" },
148 { 0x0010, "CLKENA" },
151 { 0x001C, "BLKSIZ" },
152 { 0x0020, "BYTCNT" },
153 { 0x0024, "INTMASK" },
154 { 0x0028, "CMDARG" },
160 { 0x0040, "MINSTS" },
161 { 0x0044, "RINTSTS" },
162 { 0x0048, "STATUS" },
163 { 0x004C, "FIFOTH" },
164 { 0x0050, "CDETECT" },
165 { 0x0054, "WRTPRT" },
167 { 0x005C, "TCBCNT" },
168 { 0x0060, "TBBCNT" },
169 { 0x0064, "DEBNCE" },
173 { 0x0074, "UHS_REG" },
176 { 0x0084, "PLDMND" },
177 { 0x0088, "DBADDR" },
179 { 0x0090, "IDINTEN" },
180 { 0x0094, "DSCADDR" },
181 { 0x0098, "BUFADDR" },
182 { 0x0100, "CARDTHRCTL" },
183 { 0x0104, "BackEndPwr" },
187 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
188 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
189 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
190 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
192 /*printk the all register of current host*/
194 static int dw_mci_regs_printk(struct dw_mci *host)
196 struct sdmmc_reg *regs = dw_mci_regs;
198 while( regs->name != 0 ){
199 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
202 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
207 #if defined(CONFIG_DEBUG_FS)
208 static int dw_mci_req_show(struct seq_file *s, void *v)
210 struct dw_mci_slot *slot = s->private;
211 struct mmc_request *mrq;
212 struct mmc_command *cmd;
213 struct mmc_command *stop;
214 struct mmc_data *data;
216 /* Make sure we get a consistent snapshot */
217 spin_lock_bh(&slot->host->lock);
227 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
228 cmd->opcode, cmd->arg, cmd->flags,
229 cmd->resp[0], cmd->resp[1], cmd->resp[2],
230 cmd->resp[2], cmd->error);
232 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
233 data->bytes_xfered, data->blocks,
234 data->blksz, data->flags, data->error);
237 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
238 stop->opcode, stop->arg, stop->flags,
239 stop->resp[0], stop->resp[1], stop->resp[2],
240 stop->resp[2], stop->error);
243 spin_unlock_bh(&slot->host->lock);
248 static int dw_mci_req_open(struct inode *inode, struct file *file)
250 return single_open(file, dw_mci_req_show, inode->i_private);
253 static const struct file_operations dw_mci_req_fops = {
254 .owner = THIS_MODULE,
255 .open = dw_mci_req_open,
258 .release = single_release,
261 static int dw_mci_regs_show(struct seq_file *s, void *v)
263 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
264 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
265 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
266 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
267 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
268 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
273 static int dw_mci_regs_open(struct inode *inode, struct file *file)
275 return single_open(file, dw_mci_regs_show, inode->i_private);
278 static const struct file_operations dw_mci_regs_fops = {
279 .owner = THIS_MODULE,
280 .open = dw_mci_regs_open,
283 .release = single_release,
286 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
288 struct mmc_host *mmc = slot->mmc;
289 struct dw_mci *host = slot->host;
293 root = mmc->debugfs_root;
297 node = debugfs_create_file("regs", S_IRUSR, root, host,
302 node = debugfs_create_file("req", S_IRUSR, root, slot,
307 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
311 node = debugfs_create_x32("pending_events", S_IRUSR, root,
312 (u32 *)&host->pending_events);
316 node = debugfs_create_x32("completed_events", S_IRUSR, root,
317 (u32 *)&host->completed_events);
324 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
326 #endif /* defined(CONFIG_DEBUG_FS) */
328 static void dw_mci_set_timeout(struct dw_mci *host)
330 /* timeout (maximum) */
331 mci_writel(host, TMOUT, 0xffffffff);
334 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
336 struct mmc_data *data;
337 struct dw_mci_slot *slot = mmc_priv(mmc);
338 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
340 cmd->error = -EINPROGRESS;
344 if (cmdr == MMC_STOP_TRANSMISSION)
345 cmdr |= SDMMC_CMD_STOP;
347 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
349 if (cmd->flags & MMC_RSP_PRESENT) {
350 /* We expect a response, so set this bit */
351 cmdr |= SDMMC_CMD_RESP_EXP;
352 if (cmd->flags & MMC_RSP_136)
353 cmdr |= SDMMC_CMD_RESP_LONG;
356 if (cmd->flags & MMC_RSP_CRC)
357 cmdr |= SDMMC_CMD_RESP_CRC;
361 cmdr |= SDMMC_CMD_DAT_EXP;
362 if (data->flags & MMC_DATA_STREAM)
363 cmdr |= SDMMC_CMD_STRM_MODE;
364 if (data->flags & MMC_DATA_WRITE)
365 cmdr |= SDMMC_CMD_DAT_WR;
368 if (drv_data && drv_data->prepare_command)
369 drv_data->prepare_command(slot->host, &cmdr);
375 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
377 struct mmc_command *stop;
383 stop = &host->stop_abort;
385 memset(stop, 0, sizeof(struct mmc_command));
387 if (cmdr == MMC_READ_SINGLE_BLOCK ||
388 cmdr == MMC_READ_MULTIPLE_BLOCK ||
389 cmdr == MMC_WRITE_BLOCK ||
390 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
391 stop->opcode = MMC_STOP_TRANSMISSION;
393 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
394 } else if (cmdr == SD_IO_RW_EXTENDED) {
395 stop->opcode = SD_IO_RW_DIRECT;
396 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
397 ((cmd->arg >> 28) & 0x7);
398 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
403 cmdr = stop->opcode | SDMMC_CMD_STOP |
404 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
409 static void dw_mci_start_command(struct dw_mci *host,
410 struct mmc_command *cmd, u32 cmd_flags)
412 struct dw_mci_slot *slot = host->slot[0];
413 /*temporality fix slot[0] due to host->num_slots equal to 1*/
415 host->pre_cmd = host->cmd;
418 "start command: ARGR=0x%08x CMDR=0x%08x\n",
419 cmd->arg, cmd_flags);
421 if(SD_SWITCH_VOLTAGE == cmd->opcode){
422 /*confirm non-low-power mode*/
423 mci_writel(host, CMDARG, 0);
424 dw_mci_disable_low_power(slot);
426 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
427 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
429 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
432 mci_writel(host, CMDARG, cmd->arg);
435 /* fix the value to 1 in some Soc,for example RK3188. */
436 if(host->mmc->hold_reg_flag)
437 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
439 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
443 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
445 dw_mci_start_command(host, data->stop, host->stop_cmdr);
448 /* DMA interface functions */
449 static void dw_mci_stop_dma(struct dw_mci *host)
451 if (host->using_dma) {
452 /* Fixme: No need to terminate edma, may cause flush op */
453 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
454 host->dma_ops->stop(host);
455 host->dma_ops->cleanup(host);
458 /* Data transfer was stopped by the interrupt handler */
459 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
462 static int dw_mci_get_dma_dir(struct mmc_data *data)
464 if (data->flags & MMC_DATA_WRITE)
465 return DMA_TO_DEVICE;
467 return DMA_FROM_DEVICE;
470 #ifdef CONFIG_MMC_DW_IDMAC
471 static void dw_mci_dma_cleanup(struct dw_mci *host)
473 struct mmc_data *data = host->data;
476 if (!data->host_cookie)
477 dma_unmap_sg(host->dev,
480 dw_mci_get_dma_dir(data));
483 static void dw_mci_idmac_reset(struct dw_mci *host)
485 u32 bmod = mci_readl(host, BMOD);
486 /* Software reset of DMA */
487 bmod |= SDMMC_IDMAC_SWRESET;
488 mci_writel(host, BMOD, bmod);
491 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
495 /* Disable and reset the IDMAC interface */
496 temp = mci_readl(host, CTRL);
497 temp &= ~SDMMC_CTRL_USE_IDMAC;
498 temp |= SDMMC_CTRL_DMA_RESET;
499 mci_writel(host, CTRL, temp);
501 /* Stop the IDMAC running */
502 temp = mci_readl(host, BMOD);
503 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
504 temp |= SDMMC_IDMAC_SWRESET;
505 mci_writel(host, BMOD, temp);
508 static void dw_mci_idmac_complete_dma(void *arg)
510 struct dw_mci *host = arg;
511 struct mmc_data *data = host->data;
513 dev_vdbg(host->dev, "DMA complete\n");
516 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
517 host->mrq->cmd->opcode,host->mrq->cmd->arg,
518 data->blocks,data->blksz,mmc_hostname(host->mmc));
521 host->dma_ops->cleanup(host);
524 * If the card was removed, data will be NULL. No point in trying to
525 * send the stop command or waiting for NBUSY in this case.
528 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
529 tasklet_schedule(&host->tasklet);
533 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
537 struct idmac_desc *desc = host->sg_cpu;
539 for (i = 0; i < sg_len; i++, desc++) {
540 unsigned int length = sg_dma_len(&data->sg[i]);
541 u32 mem_addr = sg_dma_address(&data->sg[i]);
543 /* Set the OWN bit and disable interrupts for this descriptor */
544 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
547 IDMAC_SET_BUFFER1_SIZE(desc, length);
549 /* Physical address to DMA to/from */
550 desc->des2 = mem_addr;
553 /* Set first descriptor */
555 desc->des0 |= IDMAC_DES0_FD;
557 /* Set last descriptor */
558 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
559 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
560 desc->des0 |= IDMAC_DES0_LD;
565 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
569 dw_mci_translate_sglist(host, host->data, sg_len);
571 /* Select IDMAC interface */
572 temp = mci_readl(host, CTRL);
573 temp |= SDMMC_CTRL_USE_IDMAC;
574 mci_writel(host, CTRL, temp);
578 /* Enable the IDMAC */
579 temp = mci_readl(host, BMOD);
580 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
581 mci_writel(host, BMOD, temp);
583 /* Start it running */
584 mci_writel(host, PLDMND, 1);
587 static int dw_mci_idmac_init(struct dw_mci *host)
589 struct idmac_desc *p;
592 /* Number of descriptors in the ring buffer */
593 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
595 /* Forward link the descriptor list */
596 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
597 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
599 /* Set the last descriptor as the end-of-ring descriptor */
600 p->des3 = host->sg_dma;
601 p->des0 = IDMAC_DES0_ER;
603 dw_mci_idmac_reset(host);
605 /* Mask out interrupts - get Tx & Rx complete only */
606 mci_writel(host, IDSTS, IDMAC_INT_CLR);
607 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
610 /* Set the descriptor base address */
611 mci_writel(host, DBADDR, host->sg_dma);
615 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
616 .init = dw_mci_idmac_init,
617 .start = dw_mci_idmac_start_dma,
618 .stop = dw_mci_idmac_stop_dma,
619 .complete = dw_mci_idmac_complete_dma,
620 .cleanup = dw_mci_dma_cleanup,
624 static void dw_mci_edma_cleanup(struct dw_mci *host)
626 struct mmc_data *data = host->data;
629 if (!data->host_cookie)
630 dma_unmap_sg(host->dev,
631 data->sg, data->sg_len,
632 dw_mci_get_dma_dir(data));
635 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
637 dmaengine_terminate_all(host->dms->ch);
640 static void dw_mci_edmac_complete_dma(void *arg)
642 struct dw_mci *host = arg;
643 struct mmc_data *data = host->data;
645 dev_vdbg(host->dev, "DMA complete\n");
648 if(data->flags & MMC_DATA_READ)
649 /* Invalidate cache after read */
650 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
651 data->sg_len, DMA_FROM_DEVICE);
653 host->dma_ops->cleanup(host);
656 * If the card was removed, data will be NULL. No point in trying to
657 * send the stop command or waiting for NBUSY in this case.
660 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
661 tasklet_schedule(&host->tasklet);
665 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
667 struct dma_slave_config slave_config;
668 struct dma_async_tx_descriptor *desc = NULL;
669 struct scatterlist *sgl = host->data->sg;
670 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
671 u32 sg_elems = host->data->sg_len;
672 u32 fifoth_val, mburst;
674 u32 idx, rx_wmark, tx_wmark;
677 /* Set external dma config: burst size, burst width*/
678 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
679 slave_config.src_addr = slave_config.dst_addr;
680 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
681 slave_config.src_addr_width = slave_config.dst_addr_width;
683 /* Match FIFO dma burst MSIZE with external dma config*/
684 fifoth_val = mci_readl(host, FIFOTH);
685 mburst = mszs[(fifoth_val >> 28) & 0x7];
687 /* edmac limit burst to 16, but work around for rk3036 to 8 */
688 if (unlikely(cpu_is_rk3036()))
693 if (mburst > burst_limit) {
694 mburst = burst_limit;
695 idx = (ilog2(mburst) > 0) ? (ilog2(mburst) - 1) : 0;
697 rx_wmark = mszs[idx] - 1;
698 tx_wmark = (host->fifo_depth) / 2;
699 fifoth_val = SDMMC_SET_FIFOTH(idx, rx_wmark, tx_wmark);
701 mci_writel(host, FIFOTH, fifoth_val);
704 slave_config.dst_maxburst = mburst;
705 slave_config.src_maxburst = slave_config.dst_maxburst;
707 if(host->data->flags & MMC_DATA_WRITE){
708 slave_config.direction = DMA_MEM_TO_DEV;
709 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
711 dev_err(host->dev, "error in dw_mci edma configuration.\n");
715 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
716 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
718 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
721 /* Set dw_mci_edmac_complete_dma as callback */
722 desc->callback = dw_mci_edmac_complete_dma;
723 desc->callback_param = (void *)host;
724 dmaengine_submit(desc);
726 /* Flush cache before write */
727 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
728 sg_elems, DMA_TO_DEVICE);
729 dma_async_issue_pending(host->dms->ch);
732 slave_config.direction = DMA_DEV_TO_MEM;
733 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
735 dev_err(host->dev, "error in dw_mci edma configuration.\n");
738 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
739 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
741 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
744 /* set dw_mci_edmac_complete_dma as callback */
745 desc->callback = dw_mci_edmac_complete_dma;
746 desc->callback_param = (void *)host;
747 dmaengine_submit(desc);
748 dma_async_issue_pending(host->dms->ch);
752 static int dw_mci_edmac_init(struct dw_mci *host)
754 /* Request external dma channel, SHOULD decide chn in dts */
756 host->dms = (struct dw_mci_dma_slave *)kmalloc
757 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
758 if (NULL == host->dms) {
759 dev_err(host->dev, "No enough memory to alloc dms.\n");
763 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
764 if (!host->dms->ch) {
765 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
766 host->dms->ch->chan_id);
773 if (NULL != host->dms) {
781 static void dw_mci_edmac_exit(struct dw_mci *host)
783 if (NULL != host->dms) {
784 if (NULL != host->dms->ch) {
785 dma_release_channel(host->dms->ch);
786 host->dms->ch = NULL;
793 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
794 .init = dw_mci_edmac_init,
795 .exit = dw_mci_edmac_exit,
796 .start = dw_mci_edmac_start_dma,
797 .stop = dw_mci_edmac_stop_dma,
798 .complete = dw_mci_edmac_complete_dma,
799 .cleanup = dw_mci_edma_cleanup,
801 #endif /* CONFIG_MMC_DW_IDMAC */
803 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
804 struct mmc_data *data,
807 struct scatterlist *sg;
808 unsigned int i, sg_len;
810 if (!next && data->host_cookie)
811 return data->host_cookie;
814 * We don't do DMA on "complex" transfers, i.e. with
815 * non-word-aligned buffers or lengths. Also, we don't bother
816 * with all the DMA setup overhead for short transfers.
818 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
824 for_each_sg(data->sg, sg, data->sg_len, i) {
825 if (sg->offset & 3 || sg->length & 3)
829 sg_len = dma_map_sg(host->dev,
832 dw_mci_get_dma_dir(data));
837 data->host_cookie = sg_len;
842 static void dw_mci_pre_req(struct mmc_host *mmc,
843 struct mmc_request *mrq,
846 struct dw_mci_slot *slot = mmc_priv(mmc);
847 struct mmc_data *data = mrq->data;
849 if (!slot->host->use_dma || !data)
852 if (data->host_cookie) {
853 data->host_cookie = 0;
857 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
858 data->host_cookie = 0;
861 static void dw_mci_post_req(struct mmc_host *mmc,
862 struct mmc_request *mrq,
865 struct dw_mci_slot *slot = mmc_priv(mmc);
866 struct mmc_data *data = mrq->data;
868 if (!slot->host->use_dma || !data)
871 if (data->host_cookie)
872 dma_unmap_sg(slot->host->dev,
875 dw_mci_get_dma_dir(data));
876 data->host_cookie = 0;
879 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
881 #ifdef CONFIG_MMC_DW_IDMAC
882 unsigned int blksz = data->blksz;
883 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
884 u32 fifo_width = 1 << host->data_shift;
885 u32 blksz_depth = blksz / fifo_width, fifoth_val;
886 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
887 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
889 tx_wmark = (host->fifo_depth) / 2;
890 tx_wmark_invers = host->fifo_depth - tx_wmark;
894 * if blksz is not a multiple of the FIFO width
896 if (blksz % fifo_width) {
903 if (!((blksz_depth % mszs[idx]) ||
904 (tx_wmark_invers % mszs[idx]))) {
906 rx_wmark = mszs[idx] - 1;
911 * If idx is '0', it won't be tried
912 * Thus, initial values are uesed
915 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
916 mci_writel(host, FIFOTH, fifoth_val);
921 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
923 unsigned int blksz = data->blksz;
924 u32 blksz_depth, fifo_depth;
927 WARN_ON(!(data->flags & MMC_DATA_READ));
929 if (host->timing != MMC_TIMING_MMC_HS200 &&
930 host->timing != MMC_TIMING_UHS_SDR104)
933 blksz_depth = blksz / (1 << host->data_shift);
934 fifo_depth = host->fifo_depth;
936 if (blksz_depth > fifo_depth)
940 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
941 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
942 * Currently just choose blksz.
945 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
949 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
952 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
960 /* If we don't have a channel, we can't do DMA */
964 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
966 /* Fixme: No need terminate edma, may cause flush op */
967 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
968 host->dma_ops->stop(host);
975 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
976 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
980 * Decide the MSIZE and RX/TX Watermark.
981 * If current block size is same with previous size,
982 * no need to update fifoth.
984 if (host->prev_blksz != data->blksz)
985 dw_mci_adjust_fifoth(host, data);
988 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
990 /* Enable the DMA interface */
991 temp = mci_readl(host, CTRL);
992 temp |= SDMMC_CTRL_DMA_ENABLE;
993 mci_writel(host, CTRL, temp);
995 /* Disable RX/TX IRQs, let DMA handle it */
996 spin_lock_irqsave(&host->slock, flags);
997 temp = mci_readl(host, INTMASK);
998 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
999 mci_writel(host, INTMASK, temp);
1000 spin_unlock_irqrestore(&host->slock, flags);
1002 host->dma_ops->start(host, sg_len);
1007 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1012 data->error = -EINPROGRESS;
1014 //WARN_ON(host->data);
1019 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
1021 if (data->flags & MMC_DATA_READ) {
1022 host->dir_status = DW_MCI_RECV_STATUS;
1023 dw_mci_ctrl_rd_thld(host, data);
1025 host->dir_status = DW_MCI_SEND_STATUS;
1028 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
1029 data->blocks, data->blksz, mmc_hostname(host->mmc));
1031 if (dw_mci_submit_data_dma(host, data)) {
1032 int flags = SG_MITER_ATOMIC;
1033 if (host->data->flags & MMC_DATA_READ)
1034 flags |= SG_MITER_TO_SG;
1036 flags |= SG_MITER_FROM_SG;
1038 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1039 host->sg = data->sg;
1040 host->part_buf_start = 0;
1041 host->part_buf_count = 0;
1043 spin_lock_irqsave(&host->slock, flag);
1044 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1045 temp = mci_readl(host, INTMASK);
1046 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1047 mci_writel(host, INTMASK, temp);
1048 spin_unlock_irqrestore(&host->slock, flag);
1050 temp = mci_readl(host, CTRL);
1051 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1052 mci_writel(host, CTRL, temp);
1055 * Use the initial fifoth_val for PIO mode.
1056 * If next issued data may be transfered by DMA mode,
1057 * prev_blksz should be invalidated.
1059 mci_writel(host, FIFOTH, host->fifoth_val);
1060 host->prev_blksz = 0;
1063 * Keep the current block size.
1064 * It will be used to decide whether to update
1065 * fifoth register next time.
1067 host->prev_blksz = data->blksz;
1071 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
1073 struct dw_mci *host = slot->host;
1074 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
1075 unsigned int cmd_status = 0;
1076 #ifdef SDMMC_WAIT_FOR_UNBUSY
1078 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1080 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1082 ret = time_before(jiffies, timeout);
1083 cmd_status = mci_readl(host, STATUS);
1084 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1088 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1089 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1092 mci_writel(host, CMDARG, arg);
1094 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1095 if(cmd & SDMMC_CMD_UPD_CLK)
1096 timeout = jiffies + msecs_to_jiffies(50);
1098 timeout = jiffies + msecs_to_jiffies(500);
1099 while (time_before(jiffies, timeout)) {
1100 cmd_status = mci_readl(host, CMD);
1101 if (!(cmd_status & SDMMC_CMD_START))
1104 dev_err(&slot->mmc->class_dev,
1105 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1106 cmd, arg, cmd_status);
1109 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1111 struct dw_mci *host = slot->host;
1112 unsigned int tempck,clock = slot->clock;
1117 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1118 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1121 mci_writel(host, CLKENA, 0);
1122 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1123 if(host->svi_flags == 0)
1124 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1126 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1128 } else if (clock != host->current_speed || force_clkinit) {
1129 div = host->bus_hz / clock;
1130 if (host->bus_hz % clock && host->bus_hz > clock)
1132 * move the + 1 after the divide to prevent
1133 * over-clocking the card.
1137 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1139 if ((clock << div) != slot->__clk_old || force_clkinit) {
1140 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1141 dev_info(&slot->mmc->class_dev,
1142 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1143 slot->id, host->bus_hz, clock,
1146 host->set_speed = tempck;
1147 host->set_div = div;
1151 mci_writel(host, CLKENA, 0);
1152 mci_writel(host, CLKSRC, 0);
1156 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1158 if(clock <= 400*1000){
1159 MMC_DBG_BOOT_FUNC(host->mmc,
1160 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1161 clock * 2, mmc_hostname(host->mmc));
1162 /* clk_mmc will change parents to 24MHz xtal*/
1163 clk_set_rate(host->clk_mmc, clock * 2);
1166 host->set_div = div;
1170 MMC_DBG_BOOT_FUNC(host->mmc,
1171 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1172 mmc_hostname(host->mmc));
1175 MMC_DBG_ERR_FUNC(host->mmc,
1176 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1177 mmc_hostname(host->mmc));
1179 host->set_div = div;
1180 host->bus_hz = host->set_speed * 2;
1181 MMC_DBG_BOOT_FUNC(host->mmc,
1182 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1183 div, host->bus_hz, mmc_hostname(host->mmc));
1185 /* BUG may be here, come on, Linux BSP engineer looks!
1186 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1187 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1188 some oops happened like that:
1189 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1190 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1191 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1192 mmc0: new high speed DDR MMC card at address 0001
1193 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1195 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1196 mmcblk0: retrying using single block read
1197 mmcblk0: error -110 sending status command, retrying
1199 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1202 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1203 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1206 host->set_div = div;
1207 host->bus_hz = host->set_speed * 2;
1208 MMC_DBG_BOOT_FUNC(host->mmc,
1209 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1210 div, host->bus_hz, mmc_hostname(host->mmc));
1213 if (host->verid < DW_MMC_240A)
1214 clk_set_rate(host->clk_mmc,(host->bus_hz));
1216 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1222 /* set clock to desired speed */
1223 mci_writel(host, CLKDIV, div);
1227 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1229 /* enable clock; only low power if no SDIO */
1230 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1232 if (host->verid < DW_MMC_240A)
1233 sdio_int = SDMMC_INT_SDIO(slot->id);
1235 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1237 if (!(mci_readl(host, INTMASK) & sdio_int))
1238 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1239 mci_writel(host, CLKENA, clk_en_a);
1243 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1244 /* keep the clock with reflecting clock dividor */
1245 slot->__clk_old = clock << div;
1248 host->current_speed = clock;
1250 if(slot->ctype != slot->pre_ctype)
1251 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1253 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1254 mmc_hostname(host->mmc));
1255 slot->pre_ctype = slot->ctype;
1257 /* Set the current slot bus width */
1258 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1261 extern struct mmc_card *this_card;
1262 static void dw_mci_wait_unbusy(struct dw_mci *host)
1265 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1266 unsigned long time_loop;
1267 unsigned int status;
1270 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1272 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1273 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1274 /* Special care for (secure)erase timeout calculation */
1276 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1279 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1)
1280 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1281 300000 * (this_card->ext_csd.sec_erase_mult)) :
1282 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1286 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1287 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1288 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1289 timeout = SDMMC_DATA_TIMEOUT_SD;
1292 time_loop = jiffies + msecs_to_jiffies(timeout);
1294 status = mci_readl(host, STATUS);
1295 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1297 } while (time_before(jiffies, time_loop));
1302 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1305 * 0--status is busy.
1306 * 1--status is unbusy.
1308 int dw_mci_card_busy(struct mmc_host *mmc)
1310 struct dw_mci_slot *slot = mmc_priv(mmc);
1311 struct dw_mci *host = slot->host;
1313 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1314 host->svi_flags, mmc_hostname(host->mmc));
1317 if(host->svi_flags == 0){
1319 host->svi_flags = 1;
1320 return host->svi_flags;
1323 host->svi_flags = 0;
1324 return host->svi_flags;
1330 static void __dw_mci_start_request(struct dw_mci *host,
1331 struct dw_mci_slot *slot,
1332 struct mmc_command *cmd)
1334 struct mmc_request *mrq;
1335 struct mmc_data *data;
1339 if (host->pdata->select_slot)
1340 host->pdata->select_slot(slot->id);
1342 host->cur_slot = slot;
1345 dw_mci_wait_unbusy(host);
1347 host->pending_events = 0;
1348 host->completed_events = 0;
1349 host->data_status = 0;
1353 dw_mci_set_timeout(host);
1354 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1355 mci_writel(host, BLKSIZ, data->blksz);
1358 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1360 /* this is the first command, send the initialization clock */
1361 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1362 cmdflags |= SDMMC_CMD_INIT;
1365 dw_mci_submit_data(host, data);
1369 dw_mci_start_command(host, cmd, cmdflags);
1372 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1375 static void dw_mci_start_request(struct dw_mci *host,
1376 struct dw_mci_slot *slot)
1378 struct mmc_request *mrq = slot->mrq;
1379 struct mmc_command *cmd;
1381 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1382 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1384 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1385 __dw_mci_start_request(host, slot, cmd);
1388 /* must be called with host->lock held */
1389 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1390 struct mmc_request *mrq)
1392 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1397 if (host->state == STATE_IDLE) {
1398 host->state = STATE_SENDING_CMD;
1399 dw_mci_start_request(host, slot);
1401 list_add_tail(&slot->queue_node, &host->queue);
1405 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1407 struct dw_mci_slot *slot = mmc_priv(mmc);
1408 struct dw_mci *host = slot->host;
1413 * The check for card presence and queueing of the request must be
1414 * atomic, otherwise the card could be removed in between and the
1415 * request wouldn't fail until another card was inserted.
1417 spin_lock_bh(&host->lock);
1419 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1420 spin_unlock_bh(&host->lock);
1421 mrq->cmd->error = -ENOMEDIUM;
1422 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1423 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1425 mmc_request_done(mmc, mrq);
1429 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1430 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1432 dw_mci_queue_request(host, slot, mrq);
1434 spin_unlock_bh(&host->lock);
1437 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1439 struct dw_mci_slot *slot = mmc_priv(mmc);
1440 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1441 struct dw_mci *host = slot->host;
1443 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1446 #ifdef SDMMC_WAIT_FOR_UNBUSY
1447 unsigned long time_loop;
1450 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1451 if(host->svi_flags == 1)
1452 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1454 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1456 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1459 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1460 printk("%d..%s: no card. [%s]\n", \
1461 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1466 ret = time_before(jiffies, time_loop);
1467 regs = mci_readl(slot->host, STATUS);
1468 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1474 printk("slot->flags = %lu ", slot->flags);
1475 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1476 if(host->svi_flags != 1)
1479 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1480 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1484 switch (ios->bus_width) {
1485 case MMC_BUS_WIDTH_4:
1486 slot->ctype = SDMMC_CTYPE_4BIT;
1488 case MMC_BUS_WIDTH_8:
1489 slot->ctype = SDMMC_CTYPE_8BIT;
1492 /* set default 1 bit mode */
1493 slot->ctype = SDMMC_CTYPE_1BIT;
1494 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1497 regs = mci_readl(slot->host, UHS_REG);
1500 if (ios->timing == MMC_TIMING_UHS_DDR50)
1501 regs |= ((0x1 << slot->id) << 16);
1503 regs &= ~((0x1 << slot->id) << 16);
1505 mci_writel(slot->host, UHS_REG, regs);
1506 slot->host->timing = ios->timing;
1509 * Use mirror of ios->clock to prevent race with mmc
1510 * core ios update when finding the minimum.
1512 slot->clock = ios->clock;
1514 if (drv_data && drv_data->set_ios)
1515 drv_data->set_ios(slot->host, ios);
1517 /* Slot specific timing and width adjustment */
1518 dw_mci_setup_bus(slot, false);
1522 switch (ios->power_mode) {
1524 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1526 if (slot->host->pdata->setpower)
1527 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1528 regs = mci_readl(slot->host, PWREN);
1529 regs |= (1 << slot->id);
1530 mci_writel(slot->host, PWREN, regs);
1533 /* Power down slot */
1534 if(slot->host->pdata->setpower)
1535 slot->host->pdata->setpower(slot->id, 0);
1536 regs = mci_readl(slot->host, PWREN);
1537 regs &= ~(1 << slot->id);
1538 mci_writel(slot->host, PWREN, regs);
1545 static int dw_mci_get_ro(struct mmc_host *mmc)
1548 struct dw_mci_slot *slot = mmc_priv(mmc);
1549 struct dw_mci_board *brd = slot->host->pdata;
1551 /* Use platform get_ro function, else try on board write protect */
1552 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1554 else if(brd->get_ro)
1555 read_only = brd->get_ro(slot->id);
1556 else if(gpio_is_valid(slot->wp_gpio))
1557 read_only = gpio_get_value(slot->wp_gpio);
1560 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1562 dev_dbg(&mmc->class_dev, "card is %s\n",
1563 read_only ? "read-only" : "read-write");
1568 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1570 struct dw_mci_slot *slot = mmc_priv(mmc);
1571 struct dw_mci *host = slot->host;
1572 /*struct dw_mci_board *brd = slot->host->pdata;*/
1574 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1577 spin_lock_bh(&host->lock);
1580 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1582 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1584 spin_unlock_bh(&host->lock);
1586 if (test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1587 if (__clk_is_enabled(host->hclk_mmc) == false)
1588 clk_prepare_enable(host->hclk_mmc);
1589 if (__clk_is_enabled(host->clk_mmc) == false)
1590 clk_prepare_enable(host->clk_mmc);
1592 if (__clk_is_enabled(host->clk_mmc) == true)
1593 clk_disable_unprepare(slot->host->clk_mmc);
1594 if (__clk_is_enabled(host->hclk_mmc) == true)
1595 clk_disable_unprepare(slot->host->hclk_mmc);
1598 mmc_detect_change(slot->mmc, 20);
1604 static int dw_mci_get_cd(struct mmc_host *mmc)
1607 struct dw_mci_slot *slot = mmc_priv(mmc);
1608 struct dw_mci_board *brd = slot->host->pdata;
1609 struct dw_mci *host = slot->host;
1610 int gpio_cd = mmc_gpio_get_cd(mmc);
1611 int force_jtag_bit, force_jtag_reg;
1615 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
1616 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1617 gpio_cd = slot->cd_gpio;
1618 irq = gpio_to_irq(gpio_cd);
1619 if (gpio_is_valid(gpio_cd)) {
1620 gpio_val = gpio_get_value(gpio_cd);
1621 if (soc_is_rk3036()) {
1622 force_jtag_bit = 11;
1623 force_jtag_reg = RK312X_GRF_SOC_CON0;
1624 } else if (soc_is_rk3126() || soc_is_rk3126b()) {
1625 force_jtag_reg = RK312X_GRF_SOC_CON0;
1629 if (gpio_val == gpio_get_value(gpio_cd)) {
1630 gpio_cd = (gpio_val == 0 ? 1 : 0);
1632 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1633 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1634 grf_writel((0x1 << (force_jtag_bit + 16)) | (1 << force_jtag_bit),
1637 dw_mci_ctrl_all_reset(host);
1639 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT);
1640 /* Really card detected: SHOULD disable force_jtag */
1641 grf_writel((0x1 << (force_jtag_bit + 16)) | (0 << force_jtag_bit),
1646 gpio_val = gpio_get_value(gpio_cd);
1648 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
1649 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
1650 return slot->last_detect_state;
1653 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1657 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1658 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1660 /* Use platform get_cd function, else try onboard card detect */
1661 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1663 else if (brd->get_cd)
1664 present = !brd->get_cd(slot->id);
1665 else if (!IS_ERR_VALUE(gpio_cd))
1668 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1671 spin_lock_bh(&host->lock);
1673 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1674 dev_dbg(&mmc->class_dev, "card is present\n");
1676 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1677 dev_dbg(&mmc->class_dev, "card is not present\n");
1679 spin_unlock_bh(&host->lock);
1686 * Dts Should caps emmc controller with poll-hw-reset
1688 static void dw_mci_hw_reset(struct mmc_host *mmc)
1690 struct dw_mci_slot *slot = mmc_priv(mmc);
1691 struct dw_mci *host = slot->host;
1696 unsigned long timeout;
1699 /* (1) CMD12 to end any transfer in process */
1700 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1701 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1703 if(host->mmc->hold_reg_flag)
1704 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1705 mci_writel(host, CMDARG, 0);
1707 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1709 timeout = jiffies + msecs_to_jiffies(500);
1711 ret = time_before(jiffies, timeout);
1712 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1717 MMC_DBG_ERR_FUNC(host->mmc,
1718 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1719 __func__, mmc_hostname(host->mmc));
1721 /* (2) wait DTO, even if no response is sent back by card */
1723 timeout = jiffies + msecs_to_jiffies(5);
1725 ret = time_before(jiffies, timeout);
1726 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1727 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1733 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1735 /* Software reset - BMOD[0] for IDMA only */
1736 regs = mci_readl(host, BMOD);
1737 regs |= SDMMC_IDMAC_SWRESET;
1738 mci_writel(host, BMOD, regs);
1739 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1740 regs = mci_readl(host, BMOD);
1741 if(regs & SDMMC_IDMAC_SWRESET)
1742 MMC_DBG_WARN_FUNC(host->mmc,
1743 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1744 __func__, mmc_hostname(host->mmc));
1746 /* DMA reset - CTRL[2] */
1747 regs = mci_readl(host, CTRL);
1748 regs |= SDMMC_CTRL_DMA_RESET;
1749 mci_writel(host, CTRL, regs);
1750 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1751 regs = mci_readl(host, CTRL);
1752 if(regs & SDMMC_CTRL_DMA_RESET)
1753 MMC_DBG_WARN_FUNC(host->mmc,
1754 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1755 __func__, mmc_hostname(host->mmc));
1757 /* FIFO reset - CTRL[1] */
1758 regs = mci_readl(host, CTRL);
1759 regs |= SDMMC_CTRL_FIFO_RESET;
1760 mci_writel(host, CTRL, regs);
1761 mdelay(1); /* no timing limited, 1ms is random value */
1762 regs = mci_readl(host, CTRL);
1763 if(regs & SDMMC_CTRL_FIFO_RESET)
1764 MMC_DBG_WARN_FUNC(host->mmc,
1765 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1766 __func__, mmc_hostname(host->mmc));
1769 According to eMMC spec
1770 tRstW >= 1us ; RST_n pulse width
1771 tRSCA >= 200us ; RST_n to Command time
1772 tRSTH >= 1us ; RST_n high period
1774 mci_writel(slot->host, PWREN, 0x0);
1775 mci_writel(slot->host, RST_N, 0x0);
1777 udelay(10); /* 10us for bad quality eMMc. */
1779 mci_writel(slot->host, PWREN, 0x1);
1780 mci_writel(slot->host, RST_N, 0x1);
1782 usleep_range(500, 1000); /* at least 500(> 200us) */
1786 * Disable lower power mode.
1788 * Low power mode will stop the card clock when idle. According to the
1789 * description of the CLKENA register we should disable low power mode
1790 * for SDIO cards if we need SDIO interrupts to work.
1792 * This function is fast if low power mode is already disabled.
1794 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1796 struct dw_mci *host = slot->host;
1798 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1800 clk_en_a = mci_readl(host, CLKENA);
1802 if (clk_en_a & clken_low_pwr) {
1803 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1804 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1805 SDMMC_CMD_PRV_DAT_WAIT, 0);
1809 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1811 struct dw_mci_slot *slot = mmc_priv(mmc);
1812 struct dw_mci *host = slot->host;
1813 unsigned long flags;
1817 spin_lock_irqsave(&host->slock, flags);
1819 /* Enable/disable Slot Specific SDIO interrupt */
1820 int_mask = mci_readl(host, INTMASK);
1822 if (host->verid < DW_MMC_240A)
1823 sdio_int = SDMMC_INT_SDIO(slot->id);
1825 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1829 * Turn off low power mode if it was enabled. This is a bit of
1830 * a heavy operation and we disable / enable IRQs a lot, so
1831 * we'll leave low power mode disabled and it will get
1832 * re-enabled again in dw_mci_setup_bus().
1834 dw_mci_disable_low_power(slot);
1836 mci_writel(host, INTMASK,
1837 (int_mask | sdio_int));
1839 mci_writel(host, INTMASK,
1840 (int_mask & ~sdio_int));
1843 spin_unlock_irqrestore(&host->slock, flags);
1846 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1848 IO_DOMAIN_12 = 1200,
1849 IO_DOMAIN_18 = 1800,
1850 IO_DOMAIN_33 = 3300,
1852 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1862 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1863 __FUNCTION__, mmc_hostname(host->mmc));
1866 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1867 __FUNCTION__, mmc_hostname(host->mmc));
1871 if (cpu_is_rk3288()) {
1872 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1873 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1876 } else if (host->cid == DW_MCI_TYPE_RK3368) {
1877 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1878 regmap_write(host->grf, 0x900, (voltage << 6) | (1 << 22));
1882 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1883 __FUNCTION__, mmc_hostname(host->mmc));
1887 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1888 struct mmc_ios *ios)
1891 unsigned int value,uhs_reg;
1894 * Signal Voltage Switching is only applicable for Host Controllers
1897 if (host->verid < DW_MMC_240A)
1900 uhs_reg = mci_readl(host, UHS_REG);
1901 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1902 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1904 switch (ios->signal_voltage) {
1905 case MMC_SIGNAL_VOLTAGE_330:
1906 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1908 if (cpu_is_rk3288())
1909 ret = io_domain_regulator_set_voltage(
1910 host->vmmc, 3300000, 3300000);
1912 ret = regulator_set_voltage(host->vmmc, 3300000, 3300000);
1914 /* regulator_put(host->vmmc); //to be done in remove function. */
1916 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1917 __func__, regulator_get_voltage(host->vmmc), ret);
1919 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1920 " failed\n", mmc_hostname(host->mmc));
1923 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1925 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1926 __FUNCTION__, mmc_hostname(host->mmc));
1928 /* set High-power mode */
1929 value = mci_readl(host, CLKENA);
1930 value &= ~SDMMC_CLKEN_LOW_PWR;
1931 mci_writel(host,CLKENA , value);
1933 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1934 mci_writel(host,UHS_REG , uhs_reg);
1937 usleep_range(5000, 5500);
1939 /* 3.3V regulator output should be stable within 5 ms */
1940 uhs_reg = mci_readl(host, UHS_REG);
1941 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1944 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1945 mmc_hostname(host->mmc));
1948 case MMC_SIGNAL_VOLTAGE_180:
1950 if (cpu_is_rk3288())
1951 ret = io_domain_regulator_set_voltage(
1955 ret = regulator_set_voltage(
1958 /* regulator_put(host->vmmc);//to be done in remove function. */
1960 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1961 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1963 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1964 " failed\n", mmc_hostname(host->mmc));
1967 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1971 * Enable 1.8V Signal Enable in the Host Control2
1974 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1977 usleep_range(5000, 5500);
1978 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1979 __FUNCTION__,mmc_hostname(host->mmc));
1981 /* 1.8V regulator output should be stable within 5 ms */
1982 uhs_reg = mci_readl(host, UHS_REG);
1983 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1986 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1987 mmc_hostname(host->mmc));
1990 case MMC_SIGNAL_VOLTAGE_120:
1992 if (cpu_is_rk3288())
1993 ret = io_domain_regulator_set_voltage(
1997 ret = regulator_set_voltage(host->vmmc,
2000 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
2001 " failed\n", mmc_hostname(host->mmc));
2007 /* No signal voltage switch required */
2013 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
2014 struct mmc_ios *ios)
2016 struct dw_mci_slot *slot = mmc_priv(mmc);
2017 struct dw_mci *host = slot->host;
2020 if (host->verid < DW_MMC_240A)
2023 err = dw_mci_do_start_signal_voltage_switch(host, ios);
2029 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2031 struct dw_mci_slot *slot = mmc_priv(mmc);
2032 struct dw_mci *host = slot->host;
2033 const struct dw_mci_drv_data *drv_data = host->drv_data;
2034 struct dw_mci_tuning_data tuning_data;
2037 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
2038 if(cpu_is_rk3036() || cpu_is_rk312x())
2041 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
2042 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
2043 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
2044 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
2045 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
2046 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
2047 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
2051 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
2052 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
2053 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
2056 "Undefined command(%d) for tuning\n", opcode);
2061 /* Recommend sample phase and delayline
2062 Fixme: Mix-use these three controllers will cause
2065 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
2066 tuning_data.con_id = 3;
2067 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2068 tuning_data.con_id = 1;
2070 tuning_data.con_id = 0;
2072 /* 0: driver, from host->devices
2073 1: sample, from devices->host
2075 tuning_data.tuning_type = 1;
2077 if (drv_data && drv_data->execute_tuning)
2078 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
2083 static void dw_mci_post_tmo(struct mmc_host *mmc)
2085 struct dw_mci_slot *slot = mmc_priv(mmc);
2086 struct dw_mci *host = slot->host;
2087 struct mmc_data *data;
2088 u32 ret, i, regs, cmd_flags;
2090 unsigned long timeout = 0;
2091 bool ret_timeout = true;
2093 host->cur_slot->mrq = NULL;
2095 host->state = STATE_IDLE;
2099 printk("[%s] -- Timeout recovery procedure start --\n",
2100 mmc_hostname(host->mmc));
2102 if (data && (data->stop)) {
2103 send_stop_cmd(host, data);
2105 mci_writel(host, CMDARG, 0);
2107 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC |
2108 SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2110 if (host->mmc->hold_reg_flag)
2111 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2113 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2115 timeout = jiffies + msecs_to_jiffies(500);
2117 while(ret_timeout) {
2118 ret_timeout = time_before(jiffies, timeout);
2119 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2123 if (false == ret_timeout)
2124 MMC_DBG_ERR_FUNC(host->mmc, "stop recovery failed![%s]",
2125 mmc_hostname(host->mmc));
2128 if (!dw_mci_ctrl_all_reset(host)) {
2133 #ifdef CONFIG_MMC_DW_IDMAC
2134 if (host->use_dma && host->dma_ops->init)
2135 host->dma_ops->init(host);
2139 * Restore the initial value at FIFOTH register
2140 * And Invalidate the prev_blksz with zero
2142 mci_writel(host, FIFOTH, host->fifoth_val);
2143 host->prev_blksz = 0;
2144 mci_writel(host, TMOUT, 0xFFFFFFFF);
2145 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2146 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
2147 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
2148 if (!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
2149 regs |= SDMMC_INT_CD;
2151 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)) {
2152 if (host->verid < DW_MMC_240A)
2153 sdio_int = SDMMC_INT_SDIO(0);
2155 sdio_int = SDMMC_INT_SDIO(8);
2157 if (mci_readl(host, INTMASK) & sdio_int)
2161 mci_writel(host, INTMASK, regs);
2162 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2163 for (i = 0; i < host->num_slots; i++) {
2164 struct dw_mci_slot *slot = host->slot[i];
2167 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2168 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2169 dw_mci_setup_bus(slot, true);
2172 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2174 printk("[%s] -- Timeout recovery procedure finished --\n",
2175 mmc_hostname(host->mmc));
2179 static const struct mmc_host_ops dw_mci_ops = {
2180 .request = dw_mci_request,
2181 .pre_req = dw_mci_pre_req,
2182 .post_req = dw_mci_post_req,
2183 .set_ios = dw_mci_set_ios,
2184 .get_ro = dw_mci_get_ro,
2185 .get_cd = dw_mci_get_cd,
2186 .set_sdio_status = dw_mci_set_sdio_status,
2187 .hw_reset = dw_mci_hw_reset,
2188 .enable_sdio_irq = dw_mci_enable_sdio_irq,
2189 .execute_tuning = dw_mci_execute_tuning,
2190 .post_tmo = dw_mci_post_tmo,
2191 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
2192 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
2193 .card_busy = dw_mci_card_busy,
2198 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
2200 unsigned long flags;
2205 local_irq_save(flags);
2206 if(host->irq_state != irqflag)
2208 host->irq_state = irqflag;
2211 enable_irq(host->irq);
2215 disable_irq(host->irq);
2218 local_irq_restore(flags);
2222 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2223 __releases(&host->lock)
2224 __acquires(&host->lock)
2226 if(DW_MCI_SEND_STATUS == host->dir_status){
2228 if( MMC_BUS_TEST_W != host->cmd->opcode){
2229 if(host->data_status & SDMMC_INT_DCRC)
2230 host->data->error = -EILSEQ;
2231 else if(host->data_status & SDMMC_INT_EBE)
2232 host->data->error = -ETIMEDOUT;
2234 dw_mci_wait_unbusy(host);
2237 dw_mci_wait_unbusy(host);
2242 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2243 __releases(&host->lock)
2244 __acquires(&host->lock)
2246 struct dw_mci_slot *slot;
2247 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2249 //WARN_ON(host->cmd || host->data);
2251 dw_mci_deal_data_end(host, mrq);
2254 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2255 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2257 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2258 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2260 host->cur_slot->mrq = NULL;
2262 if (!list_empty(&host->queue)) {
2263 slot = list_entry(host->queue.next,
2264 struct dw_mci_slot, queue_node);
2265 list_del(&slot->queue_node);
2266 dev_vdbg(host->dev, "list not empty: %s is next\n",
2267 mmc_hostname(slot->mmc));
2268 host->state = STATE_SENDING_CMD;
2269 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2270 dw_mci_start_request(host, slot);
2272 dev_vdbg(host->dev, "list empty\n");
2273 host->state = STATE_IDLE;
2276 spin_unlock(&host->lock);
2277 mmc_request_done(prev_mmc, mrq);
2278 spin_lock(&host->lock);
2281 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2283 u32 status = host->cmd_status;
2285 host->cmd_status = 0;
2287 /* Read the response from the card (up to 16 bytes) */
2288 if (cmd->flags & MMC_RSP_PRESENT) {
2289 if (cmd->flags & MMC_RSP_136) {
2290 cmd->resp[3] = mci_readl(host, RESP0);
2291 cmd->resp[2] = mci_readl(host, RESP1);
2292 cmd->resp[1] = mci_readl(host, RESP2);
2293 cmd->resp[0] = mci_readl(host, RESP3);
2295 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2296 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2298 cmd->resp[0] = mci_readl(host, RESP0);
2302 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2303 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2307 if (status & SDMMC_INT_RTO)
2309 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2312 cmd->error = -ETIMEDOUT;
2313 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2314 cmd->error = -EILSEQ;
2315 }else if (status & SDMMC_INT_RESP_ERR){
2320 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2321 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2324 if(MMC_SEND_STATUS != cmd->opcode)
2325 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2326 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2327 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2331 /* newer ip versions need a delay between retries */
2332 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2338 static void dw_mci_tasklet_func(unsigned long priv)
2340 struct dw_mci *host = (struct dw_mci *)priv;
2341 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2342 struct mmc_data *data;
2343 struct mmc_command *cmd;
2344 enum dw_mci_state state;
2345 enum dw_mci_state prev_state;
2346 u32 status, cmd_flags;
2347 unsigned long timeout = 0;
2350 spin_lock(&host->lock);
2352 state = host->state;
2362 case STATE_SENDING_CMD:
2363 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2364 &host->pending_events))
2369 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2370 dw_mci_command_complete(host, cmd);
2371 if (cmd == host->mrq->sbc && !cmd->error) {
2372 prev_state = state = STATE_SENDING_CMD;
2373 __dw_mci_start_request(host, host->cur_slot,
2378 if (cmd->data && cmd->error) {
2379 dw_mci_stop_dma(host);
2382 send_stop_cmd(host, data);
2383 state = STATE_SENDING_STOP;
2386 /* host->data = NULL; */
2389 send_stop_abort(host, data);
2390 state = STATE_SENDING_STOP;
2393 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2396 if (!host->mrq->data || cmd->error) {
2397 dw_mci_request_end(host, host->mrq);
2401 prev_state = state = STATE_SENDING_DATA;
2404 case STATE_SENDING_DATA:
2405 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2406 dw_mci_stop_dma(host);
2409 send_stop_cmd(host, data);
2411 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2412 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2413 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2415 mci_writel(host, CMDARG, 0);
2417 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2418 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2420 if(host->mmc->hold_reg_flag)
2421 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2423 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2425 timeout = jiffies + msecs_to_jiffies(500);
2428 ret = time_before(jiffies, timeout);
2429 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2433 MMC_DBG_ERR_FUNC(host->mmc,
2434 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2435 __func__, mmc_hostname(host->mmc));
2438 send_stop_abort(host, data);
2440 state = STATE_DATA_ERROR;
2444 MMC_DBG_CMD_FUNC(host->mmc,
2445 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2446 prev_state,state, mmc_hostname(host->mmc));
2448 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2449 &host->pending_events))
2451 MMC_DBG_INFO_FUNC(host->mmc,
2452 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2453 prev_state,state,mmc_hostname(host->mmc));
2455 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2456 prev_state = state = STATE_DATA_BUSY;
2459 case STATE_DATA_BUSY:
2460 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2461 &host->pending_events))
2464 dw_mci_deal_data_end(host, host->mrq);
2465 MMC_DBG_INFO_FUNC(host->mmc,
2466 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2467 prev_state,state,mmc_hostname(host->mmc));
2469 /* host->data = NULL; */
2470 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2471 status = host->data_status;
2473 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2474 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2475 MMC_DBG_ERR_FUNC(host->mmc,
2476 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2477 prev_state,state, status, mmc_hostname(host->mmc));
2479 if (status & SDMMC_INT_DRTO) {
2480 data->error = -ETIMEDOUT;
2481 } else if (status & SDMMC_INT_DCRC) {
2482 data->error = -EILSEQ;
2483 } else if (status & SDMMC_INT_EBE &&
2484 host->dir_status == DW_MCI_SEND_STATUS){
2486 * No data CRC status was returned.
2487 * The number of bytes transferred will
2488 * be exaggerated in PIO mode.
2490 data->bytes_xfered = 0;
2491 data->error = -ETIMEDOUT;
2500 * After an error, there may be data lingering
2501 * in the FIFO, so reset it - doing so
2502 * generates a block interrupt, hence setting
2503 * the scatter-gather pointer to NULL.
2505 dw_mci_fifo_reset(host);
2507 data->bytes_xfered = data->blocks * data->blksz;
2512 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2513 prev_state,state,mmc_hostname(host->mmc));
2514 dw_mci_request_end(host, host->mrq);
2517 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2518 prev_state,state,mmc_hostname(host->mmc));
2520 if (host->mrq->sbc && !data->error) {
2521 data->stop->error = 0;
2523 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2524 prev_state,state,mmc_hostname(host->mmc));
2526 dw_mci_request_end(host, host->mrq);
2530 prev_state = state = STATE_SENDING_STOP;
2532 send_stop_cmd(host, data);
2534 if (data->stop && !data->error) {
2535 /* stop command for open-ended transfer*/
2537 send_stop_abort(host, data);
2541 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2542 prev_state,state,mmc_hostname(host->mmc));
2544 case STATE_SENDING_STOP:
2545 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2548 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2549 prev_state, state, mmc_hostname(host->mmc));
2551 /* CMD error in data command */
2552 if (host->mrq->cmd->error && host->mrq->data) {
2553 dw_mci_fifo_reset(host);
2557 host->data = NULL; */
2559 dw_mci_command_complete(host, host->mrq->stop);
2561 if (host->mrq->stop)
2562 dw_mci_command_complete(host, host->mrq->stop);
2564 host->cmd_status = 0;
2567 dw_mci_request_end(host, host->mrq);
2570 case STATE_DATA_ERROR:
2571 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2572 &host->pending_events))
2575 state = STATE_DATA_BUSY;
2578 } while (state != prev_state);
2580 host->state = state;
2582 spin_unlock(&host->lock);
2586 /* push final bytes to part_buf, only use during push */
2587 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2589 memcpy((void *)&host->part_buf, buf, cnt);
2590 host->part_buf_count = cnt;
2593 /* append bytes to part_buf, only use during push */
2594 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2596 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2597 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2598 host->part_buf_count += cnt;
2602 /* pull first bytes from part_buf, only use during pull */
2603 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2605 cnt = min(cnt, (int)host->part_buf_count);
2607 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2609 host->part_buf_count -= cnt;
2610 host->part_buf_start += cnt;
2615 /* pull final bytes from the part_buf, assuming it's just been filled */
2616 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2618 memcpy(buf, &host->part_buf, cnt);
2619 host->part_buf_start = cnt;
2620 host->part_buf_count = (1 << host->data_shift) - cnt;
2623 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2625 struct mmc_data *data = host->data;
2628 /* try and push anything in the part_buf */
2629 if (unlikely(host->part_buf_count)) {
2630 int len = dw_mci_push_part_bytes(host, buf, cnt);
2633 if (host->part_buf_count == 2) {
2634 mci_writew(host, DATA(host->data_offset),
2636 host->part_buf_count = 0;
2639 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2640 if (unlikely((unsigned long)buf & 0x1)) {
2642 u16 aligned_buf[64];
2643 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2644 int items = len >> 1;
2646 /* memcpy from input buffer into aligned buffer */
2647 memcpy(aligned_buf, buf, len);
2650 /* push data from aligned buffer into fifo */
2651 for (i = 0; i < items; ++i)
2652 mci_writew(host, DATA(host->data_offset),
2659 for (; cnt >= 2; cnt -= 2)
2660 mci_writew(host, DATA(host->data_offset), *pdata++);
2663 /* put anything remaining in the part_buf */
2665 dw_mci_set_part_bytes(host, buf, cnt);
2666 /* Push data if we have reached the expected data length */
2667 if ((data->bytes_xfered + init_cnt) ==
2668 (data->blksz * data->blocks))
2669 mci_writew(host, DATA(host->data_offset),
2674 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2676 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2677 if (unlikely((unsigned long)buf & 0x1)) {
2679 /* pull data from fifo into aligned buffer */
2680 u16 aligned_buf[64];
2681 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2682 int items = len >> 1;
2684 for (i = 0; i < items; ++i)
2685 aligned_buf[i] = mci_readw(host,
2686 DATA(host->data_offset));
2687 /* memcpy from aligned buffer into output buffer */
2688 memcpy(buf, aligned_buf, len);
2696 for (; cnt >= 2; cnt -= 2)
2697 *pdata++ = mci_readw(host, DATA(host->data_offset));
2701 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2702 dw_mci_pull_final_bytes(host, buf, cnt);
2706 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2708 struct mmc_data *data = host->data;
2711 /* try and push anything in the part_buf */
2712 if (unlikely(host->part_buf_count)) {
2713 int len = dw_mci_push_part_bytes(host, buf, cnt);
2716 if (host->part_buf_count == 4) {
2717 mci_writel(host, DATA(host->data_offset),
2719 host->part_buf_count = 0;
2722 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2723 if (unlikely((unsigned long)buf & 0x3)) {
2725 u32 aligned_buf[32];
2726 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2727 int items = len >> 2;
2729 /* memcpy from input buffer into aligned buffer */
2730 memcpy(aligned_buf, buf, len);
2733 /* push data from aligned buffer into fifo */
2734 for (i = 0; i < items; ++i)
2735 mci_writel(host, DATA(host->data_offset),
2742 for (; cnt >= 4; cnt -= 4)
2743 mci_writel(host, DATA(host->data_offset), *pdata++);
2746 /* put anything remaining in the part_buf */
2748 dw_mci_set_part_bytes(host, buf, cnt);
2749 /* Push data if we have reached the expected data length */
2750 if ((data->bytes_xfered + init_cnt) ==
2751 (data->blksz * data->blocks))
2752 mci_writel(host, DATA(host->data_offset),
2757 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2759 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2760 if (unlikely((unsigned long)buf & 0x3)) {
2762 /* pull data from fifo into aligned buffer */
2763 u32 aligned_buf[32];
2764 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2765 int items = len >> 2;
2767 for (i = 0; i < items; ++i)
2768 aligned_buf[i] = mci_readl(host,
2769 DATA(host->data_offset));
2770 /* memcpy from aligned buffer into output buffer */
2771 memcpy(buf, aligned_buf, len);
2779 for (; cnt >= 4; cnt -= 4)
2780 *pdata++ = mci_readl(host, DATA(host->data_offset));
2784 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2785 dw_mci_pull_final_bytes(host, buf, cnt);
2789 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2791 struct mmc_data *data = host->data;
2794 /* try and push anything in the part_buf */
2795 if (unlikely(host->part_buf_count)) {
2796 int len = dw_mci_push_part_bytes(host, buf, cnt);
2800 if (host->part_buf_count == 8) {
2801 mci_writeq(host, DATA(host->data_offset),
2803 host->part_buf_count = 0;
2806 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2807 if (unlikely((unsigned long)buf & 0x7)) {
2809 u64 aligned_buf[16];
2810 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2811 int items = len >> 3;
2813 /* memcpy from input buffer into aligned buffer */
2814 memcpy(aligned_buf, buf, len);
2817 /* push data from aligned buffer into fifo */
2818 for (i = 0; i < items; ++i)
2819 mci_writeq(host, DATA(host->data_offset),
2826 for (; cnt >= 8; cnt -= 8)
2827 mci_writeq(host, DATA(host->data_offset), *pdata++);
2830 /* put anything remaining in the part_buf */
2832 dw_mci_set_part_bytes(host, buf, cnt);
2833 /* Push data if we have reached the expected data length */
2834 if ((data->bytes_xfered + init_cnt) ==
2835 (data->blksz * data->blocks))
2836 mci_writeq(host, DATA(host->data_offset),
2841 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2843 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2844 if (unlikely((unsigned long)buf & 0x7)) {
2846 /* pull data from fifo into aligned buffer */
2847 u64 aligned_buf[16];
2848 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2849 int items = len >> 3;
2851 for (i = 0; i < items; ++i)
2852 aligned_buf[i] = mci_readq(host,
2853 DATA(host->data_offset));
2854 /* memcpy from aligned buffer into output buffer */
2855 memcpy(buf, aligned_buf, len);
2863 for (; cnt >= 8; cnt -= 8)
2864 *pdata++ = mci_readq(host, DATA(host->data_offset));
2868 host->part_buf = mci_readq(host, DATA(host->data_offset));
2869 dw_mci_pull_final_bytes(host, buf, cnt);
2873 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2877 /* get remaining partial bytes */
2878 len = dw_mci_pull_part_bytes(host, buf, cnt);
2879 if (unlikely(len == cnt))
2884 /* get the rest of the data */
2885 host->pull_data(host, buf, cnt);
2888 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2890 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2892 unsigned int offset;
2893 struct mmc_data *data = host->data;
2894 int shift = host->data_shift;
2897 unsigned int remain, fcnt;
2899 if(!host->mmc->bus_refs){
2900 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2904 if (!sg_miter_next(sg_miter))
2907 host->sg = sg_miter->piter.sg;
2908 buf = sg_miter->addr;
2909 remain = sg_miter->length;
2913 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2914 << shift) + host->part_buf_count;
2915 len = min(remain, fcnt);
2918 dw_mci_pull_data(host, (void *)(buf + offset), len);
2919 data->bytes_xfered += len;
2924 sg_miter->consumed = offset;
2925 status = mci_readl(host, MINTSTS);
2926 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2927 /* if the RXDR is ready read again */
2928 } while ((status & SDMMC_INT_RXDR) ||
2929 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2932 if (!sg_miter_next(sg_miter))
2934 sg_miter->consumed = 0;
2936 sg_miter_stop(sg_miter);
2940 sg_miter_stop(sg_miter);
2944 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2947 static void dw_mci_write_data_pio(struct dw_mci *host)
2949 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2951 unsigned int offset;
2952 struct mmc_data *data = host->data;
2953 int shift = host->data_shift;
2956 unsigned int fifo_depth = host->fifo_depth;
2957 unsigned int remain, fcnt;
2959 if(!host->mmc->bus_refs){
2960 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2965 if (!sg_miter_next(sg_miter))
2968 host->sg = sg_miter->piter.sg;
2969 buf = sg_miter->addr;
2970 remain = sg_miter->length;
2974 fcnt = ((fifo_depth -
2975 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2976 << shift) - host->part_buf_count;
2977 len = min(remain, fcnt);
2980 host->push_data(host, (void *)(buf + offset), len);
2981 data->bytes_xfered += len;
2986 sg_miter->consumed = offset;
2987 status = mci_readl(host, MINTSTS);
2988 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2989 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2992 if (!sg_miter_next(sg_miter))
2994 sg_miter->consumed = 0;
2996 sg_miter_stop(sg_miter);
3000 sg_miter_stop(sg_miter);
3004 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
3007 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
3009 if (!host->cmd_status)
3010 host->cmd_status = status;
3017 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3018 tasklet_schedule(&host->tasklet);
3021 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
3023 struct dw_mci *host = dev_id;
3024 u32 pending, sdio_int;
3027 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3030 * DTO fix - version 2.10a and below, and only if internal DMA
3033 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
3035 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
3036 pending |= SDMMC_INT_DATA_OVER;
3040 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
3041 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
3042 host->cmd_status = pending;
3044 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
3045 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
3047 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3050 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
3051 /* if there is an error report DATA_ERROR */
3052 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
3053 host->data_status = pending;
3055 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3057 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
3058 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
3059 tasklet_schedule(&host->tasklet);
3062 if (pending & SDMMC_INT_DATA_OVER) {
3063 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
3064 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
3065 if (!host->data_status)
3066 host->data_status = pending;
3068 if (host->dir_status == DW_MCI_RECV_STATUS) {
3069 if (host->sg != NULL)
3070 dw_mci_read_data_pio(host, true);
3072 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3073 tasklet_schedule(&host->tasklet);
3076 if (pending & SDMMC_INT_RXDR) {
3077 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
3078 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
3079 dw_mci_read_data_pio(host, false);
3082 if (pending & SDMMC_INT_TXDR) {
3083 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
3084 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
3085 dw_mci_write_data_pio(host);
3088 if (pending & SDMMC_INT_VSI) {
3089 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
3090 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
3091 dw_mci_cmd_interrupt(host, pending);
3094 if (pending & SDMMC_INT_CMD_DONE) {
3095 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
3096 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
3097 dw_mci_cmd_interrupt(host, pending);
3100 if (pending & SDMMC_INT_CD) {
3101 mci_writel(host, RINTSTS, SDMMC_INT_CD);
3102 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
3103 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
3104 queue_work(host->card_workqueue, &host->card_work);
3107 if (pending & SDMMC_INT_HLE) {
3108 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
3109 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
3113 /* Handle SDIO Interrupts */
3114 for (i = 0; i < host->num_slots; i++) {
3115 struct dw_mci_slot *slot = host->slot[i];
3117 if (host->verid < DW_MMC_240A)
3118 sdio_int = SDMMC_INT_SDIO(i);
3120 sdio_int = SDMMC_INT_SDIO(i + 8);
3122 if (pending & sdio_int) {
3123 mci_writel(host, RINTSTS, sdio_int);
3124 mmc_signal_sdio_irq(slot->mmc);
3130 #ifdef CONFIG_MMC_DW_IDMAC
3131 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
3132 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
3133 /* Handle DMA interrupts */
3134 pending = mci_readl(host, IDSTS);
3135 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
3136 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
3137 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
3138 host->dma_ops->complete((void *)host);
3146 static void dw_mci_work_routine_card(struct work_struct *work)
3148 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
3151 for (i = 0; i < host->num_slots; i++) {
3152 struct dw_mci_slot *slot = host->slot[i];
3153 struct mmc_host *mmc = slot->mmc;
3154 struct mmc_request *mrq;
3157 present = dw_mci_get_cd(mmc);
3159 /* Card insert, switch data line to uart function, and vice verse.
3160 eONLY audi chip need switched by software, using udbg tag in dts!
3162 if (!(IS_ERR(host->pins_udbg)) && !(IS_ERR(host->pins_default))) {
3164 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3165 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3166 mmc_hostname(host->mmc));
3168 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3169 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3170 mmc_hostname(host->mmc));
3174 while (present != slot->last_detect_state) {
3175 dev_dbg(&slot->mmc->class_dev, "card %s\n",
3176 present ? "inserted" : "removed");
3177 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
3178 present ? "inserted" : "removed.", mmc_hostname(mmc));
3180 dw_mci_ctrl_all_reset(host);
3181 /* Stop edma when rountine card triggered */
3182 if(cpu_is_rk3036() || cpu_is_rk312x())
3183 if(host->dma_ops && host->dma_ops->stop)
3184 host->dma_ops->stop(host);
3185 rk_send_wakeup_key();//wake up system
3186 spin_lock_bh(&host->lock);
3188 /* Card change detected */
3189 slot->last_detect_state = present;
3191 /* Clean up queue if present */
3194 if (mrq == host->mrq) {
3198 switch (host->state) {
3201 case STATE_SENDING_CMD:
3202 mrq->cmd->error = -ENOMEDIUM;
3206 case STATE_SENDING_DATA:
3207 mrq->data->error = -ENOMEDIUM;
3208 dw_mci_stop_dma(host);
3210 case STATE_DATA_BUSY:
3211 case STATE_DATA_ERROR:
3212 if (mrq->data->error == -EINPROGRESS)
3213 mrq->data->error = -ENOMEDIUM;
3217 case STATE_SENDING_STOP:
3218 mrq->stop->error = -ENOMEDIUM;
3222 dw_mci_request_end(host, mrq);
3224 list_del(&slot->queue_node);
3225 mrq->cmd->error = -ENOMEDIUM;
3227 mrq->data->error = -ENOMEDIUM;
3229 mrq->stop->error = -ENOMEDIUM;
3231 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3232 mrq->cmd->opcode, mmc_hostname(mmc));
3234 spin_unlock(&host->lock);
3235 mmc_request_done(slot->mmc, mrq);
3236 spin_lock(&host->lock);
3240 /* Power down slot */
3242 /* Clear down the FIFO */
3243 dw_mci_fifo_reset(host);
3244 #ifdef CONFIG_MMC_DW_IDMAC
3245 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3246 dw_mci_idmac_reset(host);
3251 spin_unlock_bh(&host->lock);
3253 present = dw_mci_get_cd(mmc);
3256 mmc_detect_change(slot->mmc,
3257 msecs_to_jiffies(host->pdata->detect_delay_ms));
3262 /* given a slot id, find out the device node representing that slot */
3263 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3265 struct device_node *np;
3269 if (!dev || !dev->of_node)
3272 for_each_child_of_node(dev->of_node, np) {
3273 addr = of_get_property(np, "reg", &len);
3274 if (!addr || (len < sizeof(int)))
3276 if (be32_to_cpup(addr) == slot)
3282 static struct dw_mci_of_slot_quirks {
3285 } of_slot_quirks[] = {
3287 .quirk = "disable-wp",
3288 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3292 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3294 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3299 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3300 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3301 quirks |= of_slot_quirks[idx].id;
3306 /* find out bus-width for a given slot */
3307 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3309 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3315 if (of_property_read_u32(np, "bus-width", &bus_wd))
3316 dev_err(dev, "bus-width property not found, assuming width"
3322 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3323 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3325 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3331 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3333 /* Having a missing entry is valid; return silently */
3334 if (!gpio_is_valid(gpio))
3337 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3338 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3342 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3348 /* find the write protect gpio for a given slot; or -1 if none specified */
3349 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3351 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3357 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3359 /* Having a missing entry is valid; return silently */
3360 if (!gpio_is_valid(gpio))
3363 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3364 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3371 /* find the cd gpio for a given slot */
3372 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3373 struct mmc_host *mmc)
3375 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3381 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3383 /* Having a missing entry is valid; return silently */
3384 if (!gpio_is_valid(gpio))
3387 if (mmc_gpio_request_cd(mmc, gpio, 0))
3388 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3391 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3393 struct mmc_host *mmc = dev_id;
3394 struct dw_mci_slot *slot = mmc_priv(mmc);
3395 struct dw_mci *host = slot->host;
3396 int gpio_cd = slot->cd_gpio;
3398 (gpio_get_value(gpio_cd) == 0) ?
3399 irq_set_irq_type(irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT) :
3400 irq_set_irq_type(irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT);
3402 /* wakeup system whether gpio debounce or not */
3403 rk_send_wakeup_key();
3405 /* no need to trigger detect flow when rescan is disabled.
3406 This case happended in dpm, that we just wakeup system and
3407 let suspend_post notify callback handle it.
3409 if(mmc->rescan_disable == 0)
3410 queue_work(host->card_workqueue, &host->card_work);
3412 printk("%s: rescan been disabled!\n", __FUNCTION__);
3417 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3418 struct mmc_host *mmc)
3420 struct dw_mci_slot *slot = mmc_priv(mmc);
3421 struct dw_mci *host = slot->host;
3425 /* Having a missing entry is valid; return silently */
3426 if (!gpio_is_valid(gpio))
3429 irq = gpio_to_irq(gpio);
3431 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3432 NULL, dw_mci_gpio_cd_irqt,
3433 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
3437 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3439 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3440 enable_irq_wake(irq);
3443 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3447 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3448 struct mmc_host *mmc)
3450 if (!gpio_is_valid(gpio))
3453 if (gpio_to_irq(gpio) >= 0) {
3454 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3455 devm_gpio_free(&mmc->class_dev, gpio);
3458 #else /* CONFIG_OF */
3459 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3463 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3467 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3471 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3475 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3476 struct mmc_host *mmc)
3480 #endif /* CONFIG_OF */
3482 /* @host: dw_mci host prvdata
3483 * Init pinctrl for each platform. Usually we assign
3484 * "defalut" tag for functional usage, "idle" tag for gpio
3485 * state and "udbg" tag for uart_dbg if any.
3487 static void dw_mci_init_pinctrl(struct dw_mci *host)
3489 /* Fixme: DON'T TOUCH EMMC SETTING! */
3490 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
3493 /* Get pinctrl for DTS */
3494 host->pinctrl = devm_pinctrl_get(host->dev);
3495 if (IS_ERR(host->pinctrl)) {
3496 dev_err(host->dev, "%s: No pinctrl used!\n",
3497 mmc_hostname(host->mmc));
3501 /* Lookup idle state */
3502 host->pins_idle = pinctrl_lookup_state(host->pinctrl,
3503 PINCTRL_STATE_IDLE);
3504 if (IS_ERR(host->pins_idle)) {
3505 dev_err(host->dev, "%s: No idle tag found!\n",
3506 mmc_hostname(host->mmc));
3508 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3509 dev_err(host->dev, "%s: Idle pinctrl setting failed!\n",
3510 mmc_hostname(host->mmc));
3513 /* Lookup default state */
3514 host->pins_default = pinctrl_lookup_state(host->pinctrl,
3515 PINCTRL_STATE_DEFAULT);
3516 if (IS_ERR(host->pins_default)) {
3517 dev_err(host->dev, "%s: No default pinctrl found!\n",
3518 mmc_hostname(host->mmc));
3520 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3521 dev_err(host->dev, "%s: Default pinctrl setting failed!\n",
3522 mmc_hostname(host->mmc));
3525 /* Sd card data0/1 may be used for uart_dbg, so were data2/3 for Jtag */
3526 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3527 host->pins_udbg = pinctrl_lookup_state(host->pinctrl, "udbg");
3528 if (IS_ERR(host->pins_udbg)) {
3529 dev_warn(host->dev, "%s: No udbg pinctrl found!\n",
3530 mmc_hostname(host->mmc));
3532 if (!dw_mci_get_cd(host->mmc))
3533 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
3534 dev_err(host->dev, "%s: Udbg pinctrl setting failed!\n",
3535 mmc_hostname(host->mmc));
3540 static int dw_mci_pm_notify(struct notifier_block *notify_block,
3541 unsigned long mode, void *unused)
3543 struct mmc_host *host = container_of(
3544 notify_block, struct mmc_host, pm_notify);
3545 unsigned long flags;
3548 case PM_HIBERNATION_PREPARE:
3549 case PM_SUSPEND_PREPARE:
3550 dev_err(host->parent, "dw_mci_pm_notify: suspend prepare\n");
3551 spin_lock_irqsave(&host->lock, flags);
3552 host->rescan_disable = 1;
3553 spin_unlock_irqrestore(&host->lock, flags);
3554 if (cancel_delayed_work(&host->detect))
3555 wake_unlock(&host->detect_wake_lock);
3558 case PM_POST_SUSPEND:
3559 case PM_POST_HIBERNATION:
3560 case PM_POST_RESTORE:
3561 dev_err(host->parent, "dw_mci_pm_notify: post suspend\n");
3562 spin_lock_irqsave(&host->lock, flags);
3563 host->rescan_disable = 0;
3564 spin_unlock_irqrestore(&host->lock, flags);
3565 mmc_detect_change(host, 10);
3571 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3573 struct mmc_host *mmc;
3574 struct dw_mci_slot *slot;
3575 const struct dw_mci_drv_data *drv_data = host->drv_data;
3580 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3584 slot = mmc_priv(mmc);
3588 host->slot[id] = slot;
3591 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3593 mmc->ops = &dw_mci_ops;
3595 if (of_property_read_u32_array(host->dev->of_node,
3596 "clock-freq-min-max", freq, 2)) {
3597 mmc->f_min = DW_MCI_FREQ_MIN;
3598 mmc->f_max = DW_MCI_FREQ_MAX;
3600 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3601 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3603 mmc->f_min = freq[0];
3604 mmc->f_max = freq[1];
3606 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3607 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3610 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3612 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3613 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3614 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3615 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3616 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3617 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3619 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3620 mmc->pm_notify.notifier_call = dw_mci_pm_notify;
3621 if (register_pm_notifier(&mmc->pm_notify)) {
3622 printk(KERN_ERR "dw_mci: register_pm_notifier failed\n");
3623 goto err_pm_notifier;
3627 if (host->cid == DW_MCI_TYPE_RK3368) {
3628 if (IS_ERR(host->grf))
3629 pr_err("rk_sdmmc: dts couldn't find grf regmap for 3368\n");
3631 /* Disable force_jtag */
3632 regmap_write(host->grf, 0x43c, (1<<13)<<16 | (0 << 13));
3633 } else if (cpu_is_rk3288()) {
3634 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
3638 /* We assume only low-level chip use gpio_cd */
3639 if ((soc_is_rk3126() || soc_is_rk3126b() || soc_is_rk3036()) &&
3640 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3641 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3642 if (gpio_is_valid(slot->cd_gpio)) {
3643 /* Request gpio int for card detection */
3644 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3646 slot->cd_gpio = -ENODEV;
3647 dev_err(host->dev, "failed to get your cd-gpios!\n");
3651 if (host->pdata->get_ocr)
3652 mmc->ocr_avail = host->pdata->get_ocr(id);
3655 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3656 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3657 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3658 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3662 * Start with slot power disabled, it will be enabled when a card
3665 if (host->pdata->setpower)
3666 host->pdata->setpower(id, 0);
3668 if (host->pdata->caps)
3669 mmc->caps = host->pdata->caps;
3671 if (host->pdata->pm_caps)
3672 mmc->pm_caps = host->pdata->pm_caps;
3674 if (host->dev->of_node) {
3675 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3679 ctrl_id = to_platform_device(host->dev)->id;
3681 if (drv_data && drv_data->caps)
3682 mmc->caps |= drv_data->caps[ctrl_id];
3683 if (drv_data && drv_data->hold_reg_flag)
3684 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3686 /* set the compatibility of driver. */
3687 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3688 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3690 if (host->pdata->caps2)
3691 mmc->caps2 = host->pdata->caps2;
3693 if (host->pdata->get_bus_wd)
3694 bus_width = host->pdata->get_bus_wd(slot->id);
3695 else if (host->dev->of_node)
3696 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3700 switch (bus_width) {
3702 mmc->caps |= MMC_CAP_8_BIT_DATA;
3704 mmc->caps |= MMC_CAP_4_BIT_DATA;
3707 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3708 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3709 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3710 mmc->caps |= MMC_CAP_SDIO_IRQ;
3711 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3712 mmc->caps |= MMC_CAP_HW_RESET;
3713 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3714 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3715 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3716 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3717 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3718 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3719 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3720 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3722 /*Assign pm_caps pass to pm_flags*/
3723 mmc->pm_flags = mmc->pm_caps;
3725 if (host->pdata->blk_settings) {
3726 mmc->max_segs = host->pdata->blk_settings->max_segs;
3727 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3728 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3729 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3730 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3732 /* Useful defaults if platform data is unset. */
3733 #ifdef CONFIG_MMC_DW_IDMAC
3734 mmc->max_segs = host->ring_size;
3735 mmc->max_blk_size = 65536;
3736 mmc->max_blk_count = host->ring_size;
3737 mmc->max_seg_size = 0x1000;
3738 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3739 if(cpu_is_rk3036() || cpu_is_rk312x()){
3740 /* fixup for external dmac setting */
3742 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3743 mmc->max_blk_count = 65535;
3744 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3745 mmc->max_seg_size = mmc->max_req_size;
3749 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3750 mmc->max_blk_count = 512;
3751 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3752 mmc->max_seg_size = mmc->max_req_size;
3753 #endif /* CONFIG_MMC_DW_IDMAC */
3757 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3759 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3764 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3765 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3769 if (IS_ERR(host->vmmc)) {
3770 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3773 ret = regulator_enable(host->vmmc);
3776 "failed to enable regulator: %d\n", ret);
3783 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3785 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3786 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3788 dw_mci_init_pinctrl(host);
3789 ret = mmc_add_host(mmc);
3793 #if defined(CONFIG_DEBUG_FS)
3794 dw_mci_init_debugfs(slot);
3797 /* Card initially undetected */
3798 slot->last_detect_state = 1;
3802 unregister_pm_notifier(&mmc->pm_notify);
3805 if (gpio_is_valid(slot->cd_gpio))
3806 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3811 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3813 /* Shutdown detect IRQ */
3814 if (slot->host->pdata->exit)
3815 slot->host->pdata->exit(id);
3817 /* Debugfs stuff is cleaned up by mmc core */
3818 mmc_remove_host(slot->mmc);
3819 slot->host->slot[id] = NULL;
3820 mmc_free_host(slot->mmc);
3823 static void dw_mci_init_dma(struct dw_mci *host)
3825 /* Alloc memory for sg translation */
3826 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3827 &host->sg_dma, GFP_KERNEL);
3828 if (!host->sg_cpu) {
3829 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3834 memset(host->sg_cpu, 0, PAGE_SIZE);
3837 /* Determine which DMA interface to use */
3838 #if defined(CONFIG_MMC_DW_IDMAC)
3839 if(cpu_is_rk3036() || cpu_is_rk312x()){
3840 host->dma_ops = &dw_mci_edmac_ops;
3841 dev_info(host->dev, "Using external DMA controller.\n");
3843 host->dma_ops = &dw_mci_idmac_ops;
3844 dev_info(host->dev, "Using internal DMA controller.\n");
3851 if (host->dma_ops->init && host->dma_ops->start &&
3852 host->dma_ops->stop && host->dma_ops->cleanup) {
3853 if (host->dma_ops->init(host)) {
3854 dev_err(host->dev, "%s: Unable to initialize "
3855 "DMA Controller.\n", __func__);
3859 dev_err(host->dev, "DMA initialization not found.\n");
3867 dev_info(host->dev, "Using PIO mode.\n");
3872 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3874 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3877 ctrl = mci_readl(host, CTRL);
3879 mci_writel(host, CTRL, ctrl);
3881 /* wait till resets clear */
3883 ctrl = mci_readl(host, CTRL);
3884 if (!(ctrl & reset))
3886 } while (time_before(jiffies, timeout));
3889 "Timeout resetting block (ctrl reset %#x)\n",
3895 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3898 * Reseting generates a block interrupt, hence setting
3899 * the scatter-gather pointer to NULL.
3902 sg_miter_stop(&host->sg_miter);
3906 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3909 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3911 return dw_mci_ctrl_reset(host,
3912 SDMMC_CTRL_FIFO_RESET |
3914 SDMMC_CTRL_DMA_RESET);
3917 static void dw_mci_rst_pre_suspend(struct dw_mci *host)
3922 buffer = host->regs_buffer;
3924 for (index = 0; index < DW_REGS_NUM ; index++){
3925 *buffer = mci_readreg(host, index*4);
3926 MMC_DBG_INFO_FUNC(host->mmc, "[%s] :0x%08x.\n",
3927 dw_mci_regs[index].name, *buffer);
3931 *buffer = mci_readl(host,CDTHRCTL);
3932 MMC_DBG_INFO_FUNC(host->mmc, "[%s] :0x%08x.\n", "CARDTHRCTL", *buffer);
3935 static void dw_mci_rst_post_resume(struct dw_mci *host)
3940 buffer = host->regs_buffer;
3942 for (index = 0; index < DW_REGS_NUM; index++){
3943 mci_writereg(host, index*4, *buffer);
3946 mci_writel(host, CDTHRCTL, *buffer);
3949 static const struct dw_mci_rst_ops dw_mci_pdrst_ops = {
3950 .pre_suspend = dw_mci_rst_pre_suspend,
3951 .post_resume = dw_mci_rst_post_resume,
3956 static struct dw_mci_of_quirks {
3961 .quirk = "broken-cd",
3962 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3966 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3968 struct dw_mci_board *pdata;
3969 struct device *dev = host->dev;
3970 struct device_node *np = dev->of_node;
3971 const struct dw_mci_drv_data *drv_data = host->drv_data;
3973 u32 clock_frequency;
3975 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3977 dev_err(dev, "could not allocate memory for pdata\n");
3978 return ERR_PTR(-ENOMEM);
3981 /* find out number of slots supported */
3982 if (of_property_read_u32(dev->of_node, "num-slots",
3983 &pdata->num_slots)) {
3984 dev_info(dev, "num-slots property not found, "
3985 "assuming 1 slot is available\n");
3986 pdata->num_slots = 1;
3990 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3991 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3992 pdata->quirks |= of_quirks[idx].id;
3995 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3996 dev_info(dev, "fifo-depth property not found, using "
3997 "value of FIFOTH register as default\n");
3999 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
4001 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
4002 pdata->bus_hz = clock_frequency;
4004 if (drv_data && drv_data->parse_dt) {
4005 ret = drv_data->parse_dt(host);
4007 return ERR_PTR(ret);
4010 if (of_find_property(np, "keep-power-in-suspend", NULL))
4011 pdata->pm_caps |= MMC_PM_KEEP_POWER;
4013 if (of_find_property(np, "enable-sdio-wakeup", NULL))
4014 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
4016 if (of_find_property(np, "supports-highspeed", NULL))
4017 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4019 if (of_find_property(np, "supports-UHS_SDR104", NULL))
4020 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4022 if (of_find_property(np, "supports-DDR_MODE", NULL))
4023 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
4025 if (of_find_property(np, "caps2-mmc-hs200", NULL))
4026 pdata->caps2 |= MMC_CAP2_HS200;
4028 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
4029 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
4031 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
4032 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
4034 if (of_get_property(np, "cd-inverted", NULL))
4035 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
4036 if (of_get_property(np, "bootpart-no-access", NULL))
4037 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
4039 if (of_get_property(np, "controller-power-down", NULL)) {
4040 host->regs_buffer = (u32 *)devm_kzalloc(host->dev,
4041 DW_REGS_SIZE, GFP_KERNEL);
4042 if (!host->regs_buffer) {
4044 "could not allocate memory for regs_buffer\n");
4045 return ERR_PTR(-ENOMEM);
4048 host->rst_ops = &dw_mci_pdrst_ops;
4049 mmc_assume_removable = 0;
4055 #else /* CONFIG_OF */
4056 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
4058 return ERR_PTR(-EINVAL);
4060 #endif /* CONFIG_OF */
4062 int dw_mci_probe(struct dw_mci *host)
4064 const struct dw_mci_drv_data *drv_data = host->drv_data;
4065 int width, i, ret = 0;
4071 host->pdata = dw_mci_parse_dt(host);
4072 if (IS_ERR(host->pdata)) {
4073 dev_err(host->dev, "platform data not available\n");
4078 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
4080 "Platform data must supply select_slot function\n");
4085 * In 2.40a spec, Data offset is changed.
4086 * Need to check the version-id and set data-offset for DATA register.
4088 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
4089 dev_info(host->dev, "Version ID is %04x\n", host->verid);
4091 if (host->verid < DW_MMC_240A)
4092 host->data_offset = DATA_OFFSET;
4094 host->data_offset = DATA_240A_OFFSET;
4097 host->hpclk_mmc= devm_clk_get(host->dev, "hpclk_mmc");
4098 if (IS_ERR(host->hpclk_mmc)) {
4099 dev_err(host->dev, "failed to get hpclk_mmc\n");
4101 clk_prepare_enable(host->hpclk_mmc);
4105 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
4106 if (IS_ERR(host->hclk_mmc)) {
4107 dev_err(host->dev, "failed to get hclk_mmc\n");
4108 ret = PTR_ERR(host->hclk_mmc);
4112 clk_prepare_enable(host->hclk_mmc);
4115 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
4116 if (IS_ERR(host->clk_mmc)) {
4117 dev_err(host->dev, "failed to get clk mmc_per\n");
4118 ret = PTR_ERR(host->clk_mmc);
4122 host->bus_hz = host->pdata->bus_hz;
4123 if (!host->bus_hz) {
4124 dev_err(host->dev,"Platform data must supply bus speed\n");
4129 if (host->verid < DW_MMC_240A)
4130 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
4132 //rockchip: fix divider 2 in clksum before controlller
4133 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
4136 dev_err(host->dev, "failed to set clk mmc\n");
4139 clk_prepare_enable(host->clk_mmc);
4141 if (drv_data && drv_data->setup_clock) {
4142 ret = drv_data->setup_clock(host);
4145 "implementation specific clock setup failed\n");
4150 host->quirks = host->pdata->quirks;
4151 host->irq_state = true;
4152 host->set_speed = 0;
4154 host->svi_flags = 0;
4156 spin_lock_init(&host->lock);
4157 spin_lock_init(&host->slock);
4159 INIT_LIST_HEAD(&host->queue);
4161 * Get the host data width - this assumes that HCON has been set with
4162 * the correct values.
4164 i = (mci_readl(host, HCON) >> 7) & 0x7;
4166 host->push_data = dw_mci_push_data16;
4167 host->pull_data = dw_mci_pull_data16;
4169 host->data_shift = 1;
4170 } else if (i == 2) {
4171 host->push_data = dw_mci_push_data64;
4172 host->pull_data = dw_mci_pull_data64;
4174 host->data_shift = 3;
4176 /* Check for a reserved value, and warn if it is */
4178 "HCON reports a reserved host data width!\n"
4179 "Defaulting to 32-bit access.\n");
4180 host->push_data = dw_mci_push_data32;
4181 host->pull_data = dw_mci_pull_data32;
4183 host->data_shift = 2;
4186 /* Reset all blocks */
4187 if (!dw_mci_ctrl_all_reset(host))
4190 host->dma_ops = host->pdata->dma_ops;
4191 dw_mci_init_dma(host);
4193 /* Clear the interrupts for the host controller */
4194 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4195 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4197 /* Put in max timeout */
4198 mci_writel(host, TMOUT, 0xFFFFFFFF);
4201 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
4202 * Tx Mark = fifo_size / 2 DMA Size = 8
4204 if (!host->pdata->fifo_depth) {
4206 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
4207 * have been overwritten by the bootloader, just like we're
4208 * about to do, so if you know the value for your hardware, you
4209 * should put it in the platform data.
4211 fifo_size = mci_readl(host, FIFOTH);
4212 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
4214 fifo_size = host->pdata->fifo_depth;
4216 host->fifo_depth = fifo_size;
4218 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
4219 mci_writel(host, FIFOTH, host->fifoth_val);
4221 /* disable clock to CIU */
4222 mci_writel(host, CLKENA, 0);
4223 mci_writel(host, CLKSRC, 0);
4225 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
4226 host->card_workqueue = alloc_workqueue("dw-mci-card",
4227 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
4228 if (!host->card_workqueue) {
4232 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
4233 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4234 host->irq_flags, "dw-mci", host);
4238 if (host->pdata->num_slots)
4239 host->num_slots = host->pdata->num_slots;
4241 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4243 /* We need at least one slot to succeed */
4244 for (i = 0; i < host->num_slots; i++) {
4245 ret = dw_mci_init_slot(host, i);
4247 dev_dbg(host->dev, "slot %d init failed\n", i);
4253 * Enable interrupts for command done, data over, data empty, card det,
4254 * receive ready and error such as transmit, receive timeout, crc error
4256 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4257 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
4258 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
4259 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
4260 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
4261 regs |= SDMMC_INT_CD;
4263 mci_writel(host, INTMASK, regs);
4265 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4267 dev_info(host->dev, "DW MMC controller at irq %d, "
4268 "%d bit host data width, "
4270 host->irq, width, fifo_size);
4273 dev_info(host->dev, "%d slots initialized\n", init_slots);
4275 dev_dbg(host->dev, "attempted to initialize %d slots, "
4276 "but failed on all\n", host->num_slots);
4281 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4282 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4287 destroy_workqueue(host->card_workqueue);
4290 if (host->use_dma && host->dma_ops->exit)
4291 host->dma_ops->exit(host);
4294 regulator_disable(host->vmmc);
4295 regulator_put(host->vmmc);
4299 if (!IS_ERR(host->clk_mmc))
4300 clk_disable_unprepare(host->clk_mmc);
4302 if (!IS_ERR(host->hclk_mmc))
4303 clk_disable_unprepare(host->hclk_mmc);
4306 EXPORT_SYMBOL(dw_mci_probe);
4308 void dw_mci_remove(struct dw_mci *host)
4310 struct mmc_host *mmc = host->mmc;
4311 struct dw_mci_slot *slot = mmc_priv(mmc);
4314 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4315 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4317 for(i = 0; i < host->num_slots; i++){
4318 dev_dbg(host->dev, "remove slot %d\n", i);
4320 dw_mci_cleanup_slot(host->slot[i], i);
4323 /* disable clock to CIU */
4324 mci_writel(host, CLKENA, 0);
4325 mci_writel(host, CLKSRC, 0);
4327 destroy_workqueue(host->card_workqueue);
4328 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
4329 unregister_pm_notifier(&host->mmc->pm_notify);
4331 if (host->use_dma && host->dma_ops->exit)
4332 host->dma_ops->exit(host);
4334 if (gpio_is_valid(slot->cd_gpio))
4335 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4338 regulator_disable(host->vmmc);
4339 regulator_put(host->vmmc);
4341 if (!IS_ERR(host->clk_mmc))
4342 clk_disable_unprepare(host->clk_mmc);
4344 if (!IS_ERR(host->hclk_mmc))
4345 clk_disable_unprepare(host->hclk_mmc);
4346 if (!IS_ERR(host->hpclk_mmc))
4347 clk_disable_unprepare(host->hpclk_mmc);
4349 EXPORT_SYMBOL(dw_mci_remove);
4353 #ifdef CONFIG_PM_SLEEP
4355 * TODO: we should probably disable the clock to the card in the suspend path.
4357 extern int get_wifi_chip_type(void);
4358 int dw_mci_suspend(struct dw_mci *host)
4360 int present = dw_mci_get_cd(host->mmc);
4362 if((host->mmc->restrict_caps &
4363 RESTRICT_CARD_TYPE_SDIO) &&
4364 (get_wifi_chip_type() == WIFI_ESP8089 ||
4365 get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4369 regulator_disable(host->vmmc);
4371 /* Only for sdmmc controller */
4372 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4373 disable_irq(host->irq);
4375 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4376 MMC_DBG_ERR_FUNC(host->mmc,
4377 "Idle pinctrl setting failed! [%s]",
4378 mmc_hostname(host->mmc));
4381 /* Soc rk3126/3036 already in gpio_cd mode */
4382 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4383 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4384 enable_irq_wake(host->mmc->slot.cd_irq);
4388 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4389 mci_writel(host, INTMASK, 0x00);
4390 mci_writel(host, CTRL, 0x00);
4392 if (host->rst_ops &&
4393 host->rst_ops->pre_suspend)
4394 host->rst_ops->pre_suspend(host);
4398 EXPORT_SYMBOL(dw_mci_suspend);
4400 int dw_mci_resume(struct dw_mci *host)
4404 struct dw_mci_slot *slot;
4405 int present = dw_mci_get_cd(host->mmc);
4407 if (host->rst_ops &&
4408 host->rst_ops->post_resume)
4409 host->rst_ops->post_resume(host);
4412 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) &&
4413 (get_wifi_chip_type() == WIFI_ESP8089 ||
4414 get_wifi_chip_type() > WIFI_AP6XXX_SERIES))
4417 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4418 slot = mmc_priv(host->mmc);
4419 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4423 /*only for sdmmc controller*/
4424 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4425 /* Soc rk3126/3036 already in gpio_cd mode */
4426 if (!soc_is_rk3126() && !soc_is_rk3126b() && !soc_is_rk3036()) {
4427 disable_irq_wake(host->mmc->slot.cd_irq);
4428 mmc_gpio_free_cd(host->mmc);
4432 if (!IS_ERR(host->pins_udbg)) {
4433 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4434 MMC_DBG_ERR_FUNC(host->mmc,
4435 "Idle pinctrl setting failed! [%s]",
4436 mmc_hostname(host->mmc));
4437 if (pinctrl_select_state(host->pinctrl, host->pins_udbg) < 0)
4438 MMC_DBG_ERR_FUNC(host->mmc,
4439 "Udbg pinctrl setting failed! [%s]",
4440 mmc_hostname(host->mmc));
4442 if (pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4443 MMC_DBG_ERR_FUNC(host->mmc,
4444 "Default pinctrl setting failed! [%s]",
4445 mmc_hostname(host->mmc));
4448 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4449 MMC_DBG_ERR_FUNC(host->mmc,
4450 "Default pinctrl setting failed! [%s]",
4451 mmc_hostname(host->mmc));
4456 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4457 else if(cpu_is_rk3036())
4458 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4459 else if(cpu_is_rk312x())
4460 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4461 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4464 ret = regulator_enable(host->vmmc);
4467 "failed to enable regulator: %d\n", ret);
4472 if(!dw_mci_ctrl_all_reset(host)){
4477 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4478 if(host->use_dma && host->dma_ops->init)
4479 host->dma_ops->init(host);
4482 * Restore the initial value at FIFOTH register
4483 * And Invalidate the prev_blksz with zero
4485 mci_writel(host, FIFOTH, host->fifoth_val);
4486 host->prev_blksz = 0;
4487 /* Put in max timeout */
4488 mci_writel(host, TMOUT, 0xFFFFFFFF);
4490 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4491 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR |
4492 SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
4494 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4495 regs |= SDMMC_INT_CD;
4497 mci_writel(host, INTMASK, regs);
4498 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4500 /*only for sdmmc controller*/
4501 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)){
4502 enable_irq(host->irq);
4505 for(i = 0; i < host->num_slots; i++){
4506 struct dw_mci_slot *slot = host->slot[i];
4509 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4510 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4511 dw_mci_setup_bus(slot, true);
4517 EXPORT_SYMBOL(dw_mci_resume);
4518 #endif /* CONFIG_PM_SLEEP */
4520 static int __init dw_mci_init(void)
4522 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4526 static void __exit dw_mci_exit(void)
4530 module_init(dw_mci_init);
4531 module_exit(dw_mci_exit);
4533 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4534 MODULE_AUTHOR("NXP Semiconductor VietNam");
4535 MODULE_AUTHOR("Imagination Technologies Ltd");
4536 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4537 MODULE_AUTHOR("Bangwang Xie <xbw@rock-chips.com>");
4538 MODULE_LICENSE("GPL v2");