2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/mmc.h>
35 #include <linux/mmc/sd.h>
36 #include <linux/mmc/card.h>
37 #include <linux/mmc/sdio.h>
38 #include <linux/mmc/rk_mmc.h>
39 #include <linux/bitops.h>
40 #include <linux/regulator/consumer.h>
41 #include <linux/workqueue.h>
43 #include <linux/of_gpio.h>
44 #include <linux/mmc/slot-gpio.h>
45 #include <linux/clk-private.h>
46 #include <linux/rockchip/cpu.h>
49 #include "rk_sdmmc_dbg.h"
50 #include <linux/regulator/rockchip_io_vol_domain.h>
51 #include "../../clk/rockchip/clk-ops.h"
53 #define RK_SDMMC_DRIVER_VERSION "Ver 1.12 2014-07-08"
55 /* Common flag combinations */
56 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
57 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
59 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
61 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
62 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
63 #define DW_MCI_SEND_STATUS 1
64 #define DW_MCI_RECV_STATUS 2
65 #define DW_MCI_DMA_THRESHOLD 16
67 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
68 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
70 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
71 #define SDMMC_DATA_TIMEOUT_SD 500
72 #define SDMMC_DATA_TIMEOUT_SDIO 250
73 #define SDMMC_DATA_TIMEOUT_EMMC 2500
75 #define SDMMC_CMD_RTO_MAX_HOLD 200
76 #define SDMMC_WAIT_FOR_UNBUSY 2500
78 #ifdef CONFIG_MMC_DW_IDMAC
79 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
80 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
81 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
85 u32 des0; /* Control Descriptor */
86 #define IDMAC_DES0_DIC BIT(1)
87 #define IDMAC_DES0_LD BIT(2)
88 #define IDMAC_DES0_FD BIT(3)
89 #define IDMAC_DES0_CH BIT(4)
90 #define IDMAC_DES0_ER BIT(5)
91 #define IDMAC_DES0_CES BIT(30)
92 #define IDMAC_DES0_OWN BIT(31)
94 u32 des1; /* Buffer sizes */
95 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
96 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
98 u32 des2; /* buffer 1 physical address */
100 u32 des3; /* buffer 2 physical address */
102 #endif /* CONFIG_MMC_DW_IDMAC */
104 static const u8 tuning_blk_pattern_4bit[] = {
105 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
106 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
107 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
108 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
109 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
110 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
111 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
112 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
115 static const u8 tuning_blk_pattern_8bit[] = {
116 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
117 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
118 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
119 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
120 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
121 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
122 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
123 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
124 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
125 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
126 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
127 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
128 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
129 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
130 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
131 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
134 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
135 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
136 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
137 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
139 /*printk the all register of current host*/
141 static int dw_mci_regs_printk(struct dw_mci *host)
143 struct sdmmc_reg *regs = dw_mci_regs;
145 while( regs->name != 0 ){
146 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
149 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
154 #if defined(CONFIG_DEBUG_FS)
155 static int dw_mci_req_show(struct seq_file *s, void *v)
157 struct dw_mci_slot *slot = s->private;
158 struct mmc_request *mrq;
159 struct mmc_command *cmd;
160 struct mmc_command *stop;
161 struct mmc_data *data;
163 /* Make sure we get a consistent snapshot */
164 spin_lock_bh(&slot->host->lock);
174 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
175 cmd->opcode, cmd->arg, cmd->flags,
176 cmd->resp[0], cmd->resp[1], cmd->resp[2],
177 cmd->resp[2], cmd->error);
179 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
180 data->bytes_xfered, data->blocks,
181 data->blksz, data->flags, data->error);
184 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
185 stop->opcode, stop->arg, stop->flags,
186 stop->resp[0], stop->resp[1], stop->resp[2],
187 stop->resp[2], stop->error);
190 spin_unlock_bh(&slot->host->lock);
195 static int dw_mci_req_open(struct inode *inode, struct file *file)
197 return single_open(file, dw_mci_req_show, inode->i_private);
200 static const struct file_operations dw_mci_req_fops = {
201 .owner = THIS_MODULE,
202 .open = dw_mci_req_open,
205 .release = single_release,
208 static int dw_mci_regs_show(struct seq_file *s, void *v)
210 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
211 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
212 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
213 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
214 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
215 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
220 static int dw_mci_regs_open(struct inode *inode, struct file *file)
222 return single_open(file, dw_mci_regs_show, inode->i_private);
225 static const struct file_operations dw_mci_regs_fops = {
226 .owner = THIS_MODULE,
227 .open = dw_mci_regs_open,
230 .release = single_release,
233 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
235 struct mmc_host *mmc = slot->mmc;
236 struct dw_mci *host = slot->host;
240 root = mmc->debugfs_root;
244 node = debugfs_create_file("regs", S_IRUSR, root, host,
249 node = debugfs_create_file("req", S_IRUSR, root, slot,
254 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
258 node = debugfs_create_x32("pending_events", S_IRUSR, root,
259 (u32 *)&host->pending_events);
263 node = debugfs_create_x32("completed_events", S_IRUSR, root,
264 (u32 *)&host->completed_events);
271 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
273 #endif /* defined(CONFIG_DEBUG_FS) */
275 static void dw_mci_set_timeout(struct dw_mci *host)
277 /* timeout (maximum) */
278 mci_writel(host, TMOUT, 0xffffffff);
281 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
283 struct mmc_data *data;
284 struct dw_mci_slot *slot = mmc_priv(mmc);
285 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
287 cmd->error = -EINPROGRESS;
291 if (cmdr == MMC_STOP_TRANSMISSION)
292 cmdr |= SDMMC_CMD_STOP;
294 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
296 if (cmd->flags & MMC_RSP_PRESENT) {
297 /* We expect a response, so set this bit */
298 cmdr |= SDMMC_CMD_RESP_EXP;
299 if (cmd->flags & MMC_RSP_136)
300 cmdr |= SDMMC_CMD_RESP_LONG;
303 if (cmd->flags & MMC_RSP_CRC)
304 cmdr |= SDMMC_CMD_RESP_CRC;
308 cmdr |= SDMMC_CMD_DAT_EXP;
309 if (data->flags & MMC_DATA_STREAM)
310 cmdr |= SDMMC_CMD_STRM_MODE;
311 if (data->flags & MMC_DATA_WRITE)
312 cmdr |= SDMMC_CMD_DAT_WR;
315 if (drv_data && drv_data->prepare_command)
316 drv_data->prepare_command(slot->host, &cmdr);
322 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
324 struct mmc_command *stop;
330 stop = &host->stop_abort;
332 memset(stop, 0, sizeof(struct mmc_command));
334 if (cmdr == MMC_READ_SINGLE_BLOCK ||
335 cmdr == MMC_READ_MULTIPLE_BLOCK ||
336 cmdr == MMC_WRITE_BLOCK ||
337 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
338 stop->opcode = MMC_STOP_TRANSMISSION;
340 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
341 } else if (cmdr == SD_IO_RW_EXTENDED) {
342 stop->opcode = SD_IO_RW_DIRECT;
343 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
344 ((cmd->arg >> 28) & 0x7);
345 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
350 cmdr = stop->opcode | SDMMC_CMD_STOP |
351 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
356 static void dw_mci_start_command(struct dw_mci *host,
357 struct mmc_command *cmd, u32 cmd_flags)
359 struct dw_mci_slot *slot = host->slot[0];
360 /*temporality fix slot[0] due to host->num_slots equal to 1*/
362 host->pre_cmd = host->cmd;
365 "start command: ARGR=0x%08x CMDR=0x%08x\n",
366 cmd->arg, cmd_flags);
368 if(SD_SWITCH_VOLTAGE == cmd->opcode){
369 /*confirm non-low-power mode*/
370 mci_writel(host, CMDARG, 0);
371 dw_mci_disable_low_power(slot);
373 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
374 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
376 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
379 mci_writel(host, CMDARG, cmd->arg);
382 /* fix the value to 1 in some Soc,for example RK3188. */
383 if(host->mmc->hold_reg_flag)
384 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
386 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
390 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
392 dw_mci_start_command(host, data->stop, host->stop_cmdr);
395 /* DMA interface functions */
396 static void dw_mci_stop_dma(struct dw_mci *host)
398 if (host->using_dma) {
399 host->dma_ops->stop(host);
400 host->dma_ops->cleanup(host);
403 /* Data transfer was stopped by the interrupt handler */
404 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
407 static int dw_mci_get_dma_dir(struct mmc_data *data)
409 if (data->flags & MMC_DATA_WRITE)
410 return DMA_TO_DEVICE;
412 return DMA_FROM_DEVICE;
415 #ifdef CONFIG_MMC_DW_IDMAC
416 static void dw_mci_dma_cleanup(struct dw_mci *host)
418 struct mmc_data *data = host->data;
421 if (!data->host_cookie)
422 dma_unmap_sg(host->dev,
425 dw_mci_get_dma_dir(data));
428 static void dw_mci_idmac_reset(struct dw_mci *host)
430 u32 bmod = mci_readl(host, BMOD);
431 /* Software reset of DMA */
432 bmod |= SDMMC_IDMAC_SWRESET;
433 mci_writel(host, BMOD, bmod);
436 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
440 /* Disable and reset the IDMAC interface */
441 temp = mci_readl(host, CTRL);
442 temp &= ~SDMMC_CTRL_USE_IDMAC;
443 temp |= SDMMC_CTRL_DMA_RESET;
444 mci_writel(host, CTRL, temp);
446 /* Stop the IDMAC running */
447 temp = mci_readl(host, BMOD);
448 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
449 temp |= SDMMC_IDMAC_SWRESET;
450 mci_writel(host, BMOD, temp);
453 static void dw_mci_idmac_complete_dma(void *arg)
455 struct dw_mci *host = arg;
456 struct mmc_data *data = host->data;
458 dev_vdbg(host->dev, "DMA complete\n");
461 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
462 host->mrq->cmd->opcode,host->mrq->cmd->arg,
463 data->blocks,data->blksz,mmc_hostname(host->mmc));
466 host->dma_ops->cleanup(host);
469 * If the card was removed, data will be NULL. No point in trying to
470 * send the stop command or waiting for NBUSY in this case.
473 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
474 tasklet_schedule(&host->tasklet);
478 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
482 struct idmac_desc *desc = host->sg_cpu;
484 for (i = 0; i < sg_len; i++, desc++) {
485 unsigned int length = sg_dma_len(&data->sg[i]);
486 u32 mem_addr = sg_dma_address(&data->sg[i]);
488 /* Set the OWN bit and disable interrupts for this descriptor */
489 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
492 IDMAC_SET_BUFFER1_SIZE(desc, length);
494 /* Physical address to DMA to/from */
495 desc->des2 = mem_addr;
498 /* Set first descriptor */
500 desc->des0 |= IDMAC_DES0_FD;
502 /* Set last descriptor */
503 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
504 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
505 desc->des0 |= IDMAC_DES0_LD;
510 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
514 dw_mci_translate_sglist(host, host->data, sg_len);
516 /* Select IDMAC interface */
517 temp = mci_readl(host, CTRL);
518 temp |= SDMMC_CTRL_USE_IDMAC;
519 mci_writel(host, CTRL, temp);
523 /* Enable the IDMAC */
524 temp = mci_readl(host, BMOD);
525 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
526 mci_writel(host, BMOD, temp);
528 /* Start it running */
529 mci_writel(host, PLDMND, 1);
532 static int dw_mci_idmac_init(struct dw_mci *host)
534 struct idmac_desc *p;
537 /* Number of descriptors in the ring buffer */
538 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
540 /* Forward link the descriptor list */
541 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
542 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
544 /* Set the last descriptor as the end-of-ring descriptor */
545 p->des3 = host->sg_dma;
546 p->des0 = IDMAC_DES0_ER;
548 dw_mci_idmac_reset(host);
550 /* Mask out interrupts - get Tx & Rx complete only */
551 mci_writel(host, IDSTS, IDMAC_INT_CLR);
552 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
555 /* Set the descriptor base address */
556 mci_writel(host, DBADDR, host->sg_dma);
560 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
561 .init = dw_mci_idmac_init,
562 .start = dw_mci_idmac_start_dma,
563 .stop = dw_mci_idmac_stop_dma,
564 .complete = dw_mci_idmac_complete_dma,
565 .cleanup = dw_mci_dma_cleanup,
569 static void dw_mci_edma_cleanup(struct dw_mci *host)
571 struct mmc_data *data = host->data;
574 if (!data->host_cookie)
575 dma_unmap_sg(host->dev,
576 data->sg, data->sg_len,
577 dw_mci_get_dma_dir(data));
580 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
582 dmaengine_terminate_all(host->dms->ch);
585 static void dw_mci_edmac_complete_dma(void *arg)
587 struct dw_mci *host = arg;
588 struct mmc_data *data = host->data;
590 dev_vdbg(host->dev, "DMA complete\n");
593 if(data->flags & MMC_DATA_READ)
594 /* Invalidate cache after read */
595 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
596 data->sg_len, DMA_FROM_DEVICE);
598 host->dma_ops->cleanup(host);
601 * If the card was removed, data will be NULL. No point in trying to
602 * send the stop command or waiting for NBUSY in this case.
605 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
606 tasklet_schedule(&host->tasklet);
610 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
612 struct dma_slave_config slave_config;
613 struct dma_async_tx_descriptor *desc = NULL;
614 struct scatterlist *sgl = host->data->sg;
615 u32 sg_elems = host->data->sg_len;
618 /* Set external dma config: burst size, burst width*/
619 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
620 slave_config.src_addr = slave_config.dst_addr;
621 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
622 slave_config.src_addr_width = slave_config.dst_addr_width;
624 /* Match FIFO dma burst MSIZE with external dma config*/
625 slave_config.dst_maxburst = ((host->fifoth_val) >> 28) && 0x7;
626 slave_config.src_maxburst = slave_config.dst_maxburst;
628 if(host->data->flags & MMC_DATA_WRITE){
629 slave_config.direction = DMA_MEM_TO_DEV;
630 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
632 dev_err(host->dev, "error in dw_mci edma configuration.\n");
636 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
637 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
639 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
642 /* Set dw_mci_edmac_complete_dma as callback */
643 desc->callback = dw_mci_edmac_complete_dma;
644 desc->callback_param = (void *)host;
645 dmaengine_submit(desc);
647 /* Flush cache before write */
648 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
649 sg_elems, DMA_TO_DEVICE);
650 dma_async_issue_pending(host->dms->ch);
653 slave_config.direction = DMA_DEV_TO_MEM;
654 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
656 dev_err(host->dev, "error in dw_mci edma configuration.\n");
659 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
660 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
665 /* set dw_mci_edmac_complete_dma as callback */
666 desc->callback = dw_mci_edmac_complete_dma;
667 desc->callback_param = (void *)host;
668 dmaengine_submit(desc);
669 dma_async_issue_pending(host->dms->ch);
673 static int dw_mci_edmac_init(struct dw_mci *host)
675 /* 1) request external dma channel, SHOULD decide chn in dts */
676 host->dms = (struct dw_mci_dma_slave *)kmalloc(sizeof(struct dw_mci_dma_slave),GFP_KERNEL);
677 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
679 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
680 host->dms->ch->chan_id);
693 static void dw_mci_edmac_exit(struct dw_mci *host)
695 dma_release_channel(host->dms->ch);
698 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
699 .init = dw_mci_edmac_init,
700 .exit = dw_mci_edmac_exit,
701 .start = dw_mci_edmac_start_dma,
702 .stop = dw_mci_edmac_stop_dma,
703 .complete = dw_mci_edmac_complete_dma,
704 .cleanup = dw_mci_edma_cleanup,
706 #endif /* CONFIG_MMC_DW_IDMAC */
708 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
709 struct mmc_data *data,
712 struct scatterlist *sg;
713 unsigned int i, sg_len;
715 if (!next && data->host_cookie)
716 return data->host_cookie;
719 * We don't do DMA on "complex" transfers, i.e. with
720 * non-word-aligned buffers or lengths. Also, we don't bother
721 * with all the DMA setup overhead for short transfers.
723 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
729 for_each_sg(data->sg, sg, data->sg_len, i) {
730 if (sg->offset & 3 || sg->length & 3)
734 sg_len = dma_map_sg(host->dev,
737 dw_mci_get_dma_dir(data));
742 data->host_cookie = sg_len;
747 static void dw_mci_pre_req(struct mmc_host *mmc,
748 struct mmc_request *mrq,
751 struct dw_mci_slot *slot = mmc_priv(mmc);
752 struct mmc_data *data = mrq->data;
754 if (!slot->host->use_dma || !data)
757 if (data->host_cookie) {
758 data->host_cookie = 0;
762 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
763 data->host_cookie = 0;
766 static void dw_mci_post_req(struct mmc_host *mmc,
767 struct mmc_request *mrq,
770 struct dw_mci_slot *slot = mmc_priv(mmc);
771 struct mmc_data *data = mrq->data;
773 if (!slot->host->use_dma || !data)
776 if (data->host_cookie)
777 dma_unmap_sg(slot->host->dev,
780 dw_mci_get_dma_dir(data));
781 data->host_cookie = 0;
784 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
786 #ifdef CONFIG_MMC_DW_IDMAC
787 unsigned int blksz = data->blksz;
788 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
789 u32 fifo_width = 1 << host->data_shift;
790 u32 blksz_depth = blksz / fifo_width, fifoth_val;
791 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
792 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
794 tx_wmark = (host->fifo_depth) / 2;
795 tx_wmark_invers = host->fifo_depth - tx_wmark;
799 * if blksz is not a multiple of the FIFO width
801 if (blksz % fifo_width) {
808 if (!((blksz_depth % mszs[idx]) ||
809 (tx_wmark_invers % mszs[idx]))) {
811 rx_wmark = mszs[idx] - 1;
816 * If idx is '0', it won't be tried
817 * Thus, initial values are uesed
820 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
821 mci_writel(host, FIFOTH, fifoth_val);
825 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
827 unsigned int blksz = data->blksz;
828 u32 blksz_depth, fifo_depth;
831 WARN_ON(!(data->flags & MMC_DATA_READ));
833 if (host->timing != MMC_TIMING_MMC_HS200 &&
834 host->timing != MMC_TIMING_UHS_SDR104)
837 blksz_depth = blksz / (1 << host->data_shift);
838 fifo_depth = host->fifo_depth;
840 if (blksz_depth > fifo_depth)
844 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
845 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
846 * Currently just choose blksz.
849 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
853 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
856 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
863 /* If we don't have a channel, we can't do DMA */
867 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
869 host->dma_ops->stop(host);
876 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
877 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
881 * Decide the MSIZE and RX/TX Watermark.
882 * If current block size is same with previous size,
883 * no need to update fifoth.
885 if (host->prev_blksz != data->blksz)
886 dw_mci_adjust_fifoth(host, data);
889 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
891 /* Enable the DMA interface */
892 temp = mci_readl(host, CTRL);
893 temp |= SDMMC_CTRL_DMA_ENABLE;
894 mci_writel(host, CTRL, temp);
896 /* Disable RX/TX IRQs, let DMA handle it */
897 temp = mci_readl(host, INTMASK);
898 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
899 mci_writel(host, INTMASK, temp);
901 host->dma_ops->start(host, sg_len);
906 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
910 data->error = -EINPROGRESS;
917 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
919 if (data->flags & MMC_DATA_READ) {
920 host->dir_status = DW_MCI_RECV_STATUS;
921 dw_mci_ctrl_rd_thld(host, data);
923 host->dir_status = DW_MCI_SEND_STATUS;
926 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
927 data->blocks, data->blksz, mmc_hostname(host->mmc));
929 if (dw_mci_submit_data_dma(host, data)) {
930 int flags = SG_MITER_ATOMIC;
931 if (host->data->flags & MMC_DATA_READ)
932 flags |= SG_MITER_TO_SG;
934 flags |= SG_MITER_FROM_SG;
936 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
938 host->part_buf_start = 0;
939 host->part_buf_count = 0;
941 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
942 temp = mci_readl(host, INTMASK);
943 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
944 mci_writel(host, INTMASK, temp);
946 temp = mci_readl(host, CTRL);
947 temp &= ~SDMMC_CTRL_DMA_ENABLE;
948 mci_writel(host, CTRL, temp);
951 * Use the initial fifoth_val for PIO mode.
952 * If next issued data may be transfered by DMA mode,
953 * prev_blksz should be invalidated.
955 mci_writel(host, FIFOTH, host->fifoth_val);
956 host->prev_blksz = 0;
959 * Keep the current block size.
960 * It will be used to decide whether to update
961 * fifoth register next time.
963 host->prev_blksz = data->blksz;
967 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
969 struct dw_mci *host = slot->host;
970 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
971 unsigned int cmd_status = 0;
972 #ifdef SDMMC_WAIT_FOR_UNBUSY
974 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
976 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
978 ret = time_before(jiffies, timeout);
979 cmd_status = mci_readl(host, STATUS);
980 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
984 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
985 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
988 mci_writel(host, CMDARG, arg);
990 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
991 if(cmd & SDMMC_CMD_UPD_CLK)
992 timeout = jiffies + msecs_to_jiffies(50);
994 timeout = jiffies + msecs_to_jiffies(500);
995 while (time_before(jiffies, timeout)) {
996 cmd_status = mci_readl(host, CMD);
997 if (!(cmd_status & SDMMC_CMD_START))
1000 dev_err(&slot->mmc->class_dev,
1001 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1002 cmd, arg, cmd_status);
1005 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1007 struct dw_mci *host = slot->host;
1008 unsigned int tempck,clock = slot->clock;
1013 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1014 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1017 mci_writel(host, CLKENA, 0);
1018 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1019 if(host->svi_flags == 0)
1020 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1022 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1024 } else if (clock != host->current_speed || force_clkinit) {
1025 div = host->bus_hz / clock;
1026 if (host->bus_hz % clock && host->bus_hz > clock)
1028 * move the + 1 after the divide to prevent
1029 * over-clocking the card.
1033 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1035 if ((clock << div) != slot->__clk_old || force_clkinit) {
1036 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1037 dev_info(&slot->mmc->class_dev,
1038 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1039 slot->id, host->bus_hz, clock,
1042 host->set_speed = tempck;
1043 host->set_div = div;
1047 mci_writel(host, CLKENA, 0);
1048 mci_writel(host, CLKSRC, 0);
1052 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1054 if(clock <= 400*1000){
1055 MMC_DBG_BOOT_FUNC(host->mmc,
1056 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1057 clock * 2, mmc_hostname(host->mmc));
1058 /* clk_mmc will change parents to 24MHz xtal*/
1059 clk_set_rate(host->clk_mmc, clock * 2);
1062 host->set_div = div;
1066 MMC_DBG_BOOT_FUNC(host->mmc,
1067 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1068 mmc_hostname(host->mmc));
1071 MMC_DBG_ERR_FUNC(host->mmc,
1072 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1073 mmc_hostname(host->mmc));
1075 host->set_div = div;
1076 host->bus_hz = host->set_speed * 2;
1077 MMC_DBG_BOOT_FUNC(host->mmc,
1078 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1079 div, host->bus_hz, mmc_hostname(host->mmc));
1081 /* BUG may be here, come on, Linux BSP engineer looks!
1082 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1083 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1084 some oops happened like that:
1085 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1086 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1087 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1088 mmc0: new high speed DDR MMC card at address 0001
1089 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1091 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1092 mmcblk0: retrying using single block read
1093 mmcblk0: error -110 sending status command, retrying
1095 How to: If eMMC HW version < 4.51, or > 4.51 but no caps2-mmc-hs200 support in dts
1096 Please set dts emmc clk to 100M or 150M, I will workaround it!
1099 if (host->verid < DW_MMC_240A)
1100 clk_set_rate(host->clk_mmc,(host->bus_hz));
1102 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1108 /* set clock to desired speed */
1109 mci_writel(host, CLKDIV, div);
1113 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1115 /* enable clock; only low power if no SDIO */
1116 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1118 if (host->verid < DW_MMC_240A)
1119 sdio_int = SDMMC_INT_SDIO(slot->id);
1121 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1123 if (!(mci_readl(host, INTMASK) & sdio_int))
1124 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1125 mci_writel(host, CLKENA, clk_en_a);
1129 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1130 /* keep the clock with reflecting clock dividor */
1131 slot->__clk_old = clock << div;
1134 host->current_speed = clock;
1136 if(slot->ctype != slot->pre_ctype)
1137 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1139 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1140 mmc_hostname(host->mmc));
1141 slot->pre_ctype = slot->ctype;
1143 /* Set the current slot bus width */
1144 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1147 extern struct mmc_card *this_card;
1148 static void dw_mci_wait_unbusy(struct dw_mci *host)
1151 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1152 unsigned long time_loop;
1153 unsigned int status;
1156 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1158 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1159 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1160 /* Special care for (secure)erase timeout calculation */
1162 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1165 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1) ;
1166 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1167 300000 * (this_card->ext_csd.sec_erase_mult)) :
1168 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1172 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1173 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1174 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1175 timeout = SDMMC_DATA_TIMEOUT_SD;
1178 time_loop = jiffies + msecs_to_jiffies(timeout);
1180 status = mci_readl(host, STATUS);
1181 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1183 } while (time_before(jiffies, time_loop));
1188 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1191 * 0--status is busy.
1192 * 1--status is unbusy.
1194 int dw_mci_card_busy(struct mmc_host *mmc)
1196 struct dw_mci_slot *slot = mmc_priv(mmc);
1197 struct dw_mci *host = slot->host;
1199 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1200 host->svi_flags, mmc_hostname(host->mmc));
1203 if(host->svi_flags == 0){
1205 host->svi_flags = 1;
1206 return host->svi_flags;
1209 host->svi_flags = 0;
1210 return host->svi_flags;
1216 static void __dw_mci_start_request(struct dw_mci *host,
1217 struct dw_mci_slot *slot,
1218 struct mmc_command *cmd)
1220 struct mmc_request *mrq;
1221 struct mmc_data *data;
1225 if (host->pdata->select_slot)
1226 host->pdata->select_slot(slot->id);
1228 host->cur_slot = slot;
1231 dw_mci_wait_unbusy(host);
1233 host->pending_events = 0;
1234 host->completed_events = 0;
1235 host->data_status = 0;
1239 dw_mci_set_timeout(host);
1240 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1241 mci_writel(host, BLKSIZ, data->blksz);
1244 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1246 /* this is the first command, send the initialization clock */
1247 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1248 cmdflags |= SDMMC_CMD_INIT;
1251 dw_mci_submit_data(host, data);
1255 dw_mci_start_command(host, cmd, cmdflags);
1258 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1261 static void dw_mci_start_request(struct dw_mci *host,
1262 struct dw_mci_slot *slot)
1264 struct mmc_request *mrq = slot->mrq;
1265 struct mmc_command *cmd;
1267 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1268 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1270 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1271 __dw_mci_start_request(host, slot, cmd);
1274 /* must be called with host->lock held */
1275 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1276 struct mmc_request *mrq)
1278 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1283 if (host->state == STATE_IDLE) {
1284 host->state = STATE_SENDING_CMD;
1285 dw_mci_start_request(host, slot);
1287 list_add_tail(&slot->queue_node, &host->queue);
1291 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1293 struct dw_mci_slot *slot = mmc_priv(mmc);
1294 struct dw_mci *host = slot->host;
1299 * The check for card presence and queueing of the request must be
1300 * atomic, otherwise the card could be removed in between and the
1301 * request wouldn't fail until another card was inserted.
1303 spin_lock_bh(&host->lock);
1305 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1306 spin_unlock_bh(&host->lock);
1307 mrq->cmd->error = -ENOMEDIUM;
1308 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1309 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1311 mmc_request_done(mmc, mrq);
1315 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1316 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1318 dw_mci_queue_request(host, slot, mrq);
1320 spin_unlock_bh(&host->lock);
1323 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1325 struct dw_mci_slot *slot = mmc_priv(mmc);
1326 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1327 struct dw_mci *host = slot->host;
1329 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1332 #ifdef SDMMC_WAIT_FOR_UNBUSY
1333 unsigned long time_loop;
1336 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1337 if(host->svi_flags == 1)
1338 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1340 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1342 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1345 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1346 printk("%d..%s: no card. [%s]\n", \
1347 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1352 ret = time_before(jiffies, time_loop);
1353 regs = mci_readl(slot->host, STATUS);
1354 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1360 printk("slot->flags = %lu ", slot->flags);
1361 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1362 if(host->svi_flags != 1)
1365 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1366 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1370 switch (ios->bus_width) {
1371 case MMC_BUS_WIDTH_4:
1372 slot->ctype = SDMMC_CTYPE_4BIT;
1374 case MMC_BUS_WIDTH_8:
1375 slot->ctype = SDMMC_CTYPE_8BIT;
1378 /* set default 1 bit mode */
1379 slot->ctype = SDMMC_CTYPE_1BIT;
1380 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1383 regs = mci_readl(slot->host, UHS_REG);
1386 if (ios->timing == MMC_TIMING_UHS_DDR50)
1387 regs |= ((0x1 << slot->id) << 16);
1389 regs &= ~((0x1 << slot->id) << 16);
1391 mci_writel(slot->host, UHS_REG, regs);
1392 slot->host->timing = ios->timing;
1395 * Use mirror of ios->clock to prevent race with mmc
1396 * core ios update when finding the minimum.
1398 slot->clock = ios->clock;
1400 if (drv_data && drv_data->set_ios)
1401 drv_data->set_ios(slot->host, ios);
1403 /* Slot specific timing and width adjustment */
1404 dw_mci_setup_bus(slot, false);
1408 switch (ios->power_mode) {
1410 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1412 if (slot->host->pdata->setpower)
1413 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1414 regs = mci_readl(slot->host, PWREN);
1415 regs |= (1 << slot->id);
1416 mci_writel(slot->host, PWREN, regs);
1419 /* Power down slot */
1420 if(slot->host->pdata->setpower)
1421 slot->host->pdata->setpower(slot->id, 0);
1422 regs = mci_readl(slot->host, PWREN);
1423 regs &= ~(1 << slot->id);
1424 mci_writel(slot->host, PWREN, regs);
1431 static int dw_mci_get_ro(struct mmc_host *mmc)
1434 struct dw_mci_slot *slot = mmc_priv(mmc);
1435 struct dw_mci_board *brd = slot->host->pdata;
1437 /* Use platform get_ro function, else try on board write protect */
1438 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1440 else if(brd->get_ro)
1441 read_only = brd->get_ro(slot->id);
1442 else if(gpio_is_valid(slot->wp_gpio))
1443 read_only = gpio_get_value(slot->wp_gpio);
1446 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1448 dev_dbg(&mmc->class_dev, "card is %s\n",
1449 read_only ? "read-only" : "read-write");
1454 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1456 struct dw_mci_slot *slot = mmc_priv(mmc);
1457 struct dw_mci *host = slot->host;
1458 /*struct dw_mci_board *brd = slot->host->pdata;*/
1460 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1463 spin_lock_bh(&host->lock);
1466 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1468 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1470 spin_unlock_bh(&host->lock);
1472 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1473 if(__clk_is_enabled(host->hclk_mmc) == false)
1474 clk_prepare_enable(host->hclk_mmc);
1475 if(__clk_is_enabled(host->clk_mmc) == false)
1476 clk_prepare_enable(host->clk_mmc);
1478 if(__clk_is_enabled(host->clk_mmc) == true)
1479 clk_disable_unprepare(slot->host->clk_mmc);
1480 if(__clk_is_enabled(host->hclk_mmc) == true)
1481 clk_disable_unprepare(slot->host->hclk_mmc);
1484 mmc_detect_change(slot->mmc, 20);
1490 static int dw_mci_get_cd(struct mmc_host *mmc)
1493 struct dw_mci_slot *slot = mmc_priv(mmc);
1494 struct dw_mci_board *brd = slot->host->pdata;
1495 struct dw_mci *host = slot->host;
1496 int gpio_cd = mmc_gpio_get_cd(mmc);
1499 if (cpu_is_rk312x() &&
1501 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1502 gpio_cd = slot->cd_gpio;
1503 if (gpio_is_valid(gpio_cd)) {
1504 gpio_val = gpio_get_value_cansleep(gpio_cd);
1506 if (gpio_val == gpio_get_value_cansleep(gpio_cd)) {
1507 gpio_cd = gpio_get_value_cansleep(gpio_cd) == 0 ? 1 : 0;
1509 dw_mci_ctrl_all_reset(host);
1512 return slot->last_detect_state;
1515 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1519 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1520 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1522 /* Use platform get_cd function, else try onboard card detect */
1523 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1525 else if (brd->get_cd)
1526 present = !brd->get_cd(slot->id);
1527 else if (!IS_ERR_VALUE(gpio_cd))
1530 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1533 spin_lock_bh(&host->lock);
1535 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1536 dev_dbg(&mmc->class_dev, "card is present\n");
1538 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1539 dev_dbg(&mmc->class_dev, "card is not present\n");
1541 spin_unlock_bh(&host->lock);
1548 * Dts Should caps emmc controller with poll-hw-reset
1550 static void dw_mci_hw_reset(struct mmc_host *mmc)
1552 struct dw_mci_slot *slot = mmc_priv(mmc);
1553 struct dw_mci *host = slot->host;
1558 unsigned long timeout;
1561 /* (1) CMD12 to end any transfer in process */
1562 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1563 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1565 if(host->mmc->hold_reg_flag)
1566 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1567 mci_writel(host, CMDARG, 0);
1569 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1571 timeout = jiffies + msecs_to_jiffies(500);
1573 ret = time_before(jiffies, timeout);
1574 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1579 MMC_DBG_ERR_FUNC(host->mmc,
1580 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1581 __func__, mmc_hostname(host->mmc));
1583 /* (2) wait DTO, even if no response is sent back by card */
1585 timeout = jiffies + msecs_to_jiffies(5);
1587 ret = time_before(jiffies, timeout);
1588 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1589 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1595 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1597 /* Software reset - BMOD[0] for IDMA only */
1598 regs = mci_readl(host, BMOD);
1599 regs |= SDMMC_IDMAC_SWRESET;
1600 mci_writel(host, BMOD, regs);
1601 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1602 regs = mci_readl(host, BMOD);
1603 if(regs & SDMMC_IDMAC_SWRESET)
1604 MMC_DBG_WARN_FUNC(host->mmc,
1605 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1606 __func__, mmc_hostname(host->mmc));
1608 /* DMA reset - CTRL[2] */
1609 regs = mci_readl(host, CTRL);
1610 regs |= SDMMC_CTRL_DMA_RESET;
1611 mci_writel(host, CTRL, regs);
1612 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1613 regs = mci_readl(host, CTRL);
1614 if(regs & SDMMC_CTRL_DMA_RESET)
1615 MMC_DBG_WARN_FUNC(host->mmc,
1616 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1617 __func__, mmc_hostname(host->mmc));
1619 /* FIFO reset - CTRL[1] */
1620 regs = mci_readl(host, CTRL);
1621 regs |= SDMMC_CTRL_FIFO_RESET;
1622 mci_writel(host, CTRL, regs);
1623 mdelay(1); /* no timing limited, 1ms is random value */
1624 regs = mci_readl(host, CTRL);
1625 if(regs & SDMMC_CTRL_FIFO_RESET)
1626 MMC_DBG_WARN_FUNC(host->mmc,
1627 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1628 __func__, mmc_hostname(host->mmc));
1631 According to eMMC spec
1632 tRstW >= 1us ; RST_n pulse width
1633 tRSCA >= 200us ; RST_n to Command time
1634 tRSTH >= 1us ; RST_n high period
1636 mci_writel(slot->host, PWREN, 0x0);
1637 mci_writel(slot->host, RST_N, 0x0);
1639 udelay(10); /* 10us for bad quality eMMc. */
1641 mci_writel(slot->host, PWREN, 0x1);
1642 mci_writel(slot->host, RST_N, 0x1);
1644 usleep_range(500, 1000); /* at least 500(> 200us) */
1648 * Disable lower power mode.
1650 * Low power mode will stop the card clock when idle. According to the
1651 * description of the CLKENA register we should disable low power mode
1652 * for SDIO cards if we need SDIO interrupts to work.
1654 * This function is fast if low power mode is already disabled.
1656 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1658 struct dw_mci *host = slot->host;
1660 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1662 clk_en_a = mci_readl(host, CLKENA);
1664 if (clk_en_a & clken_low_pwr) {
1665 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1666 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1667 SDMMC_CMD_PRV_DAT_WAIT, 0);
1671 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1673 struct dw_mci_slot *slot = mmc_priv(mmc);
1674 struct dw_mci *host = slot->host;
1678 /* Enable/disable Slot Specific SDIO interrupt */
1679 int_mask = mci_readl(host, INTMASK);
1681 if (host->verid < DW_MMC_240A)
1682 sdio_int = SDMMC_INT_SDIO(slot->id);
1684 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1688 * Turn off low power mode if it was enabled. This is a bit of
1689 * a heavy operation and we disable / enable IRQs a lot, so
1690 * we'll leave low power mode disabled and it will get
1691 * re-enabled again in dw_mci_setup_bus().
1693 dw_mci_disable_low_power(slot);
1695 mci_writel(host, INTMASK,
1696 (int_mask | sdio_int));
1698 mci_writel(host, INTMASK,
1699 (int_mask & ~sdio_int));
1703 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1705 IO_DOMAIN_12 = 1200,
1706 IO_DOMAIN_18 = 1800,
1707 IO_DOMAIN_33 = 3300,
1709 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1719 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1720 __FUNCTION__, mmc_hostname(host->mmc));
1723 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1724 __FUNCTION__, mmc_hostname(host->mmc));
1728 if(cpu_is_rk3288()){
1729 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1730 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1734 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1735 __FUNCTION__, mmc_hostname(host->mmc));
1739 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1740 struct mmc_ios *ios)
1743 unsigned int value,uhs_reg;
1746 * Signal Voltage Switching is only applicable for Host Controllers
1749 if (host->verid < DW_MMC_240A)
1752 uhs_reg = mci_readl(host, UHS_REG);
1753 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1754 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1756 switch (ios->signal_voltage) {
1757 case MMC_SIGNAL_VOLTAGE_330:
1758 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1760 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1761 /* regulator_put(host->vmmc); //to be done in remove function. */
1763 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1764 __func__, regulator_get_voltage(host->vmmc), ret);
1766 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1767 " failed\n", mmc_hostname(host->mmc));
1770 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1772 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1773 __FUNCTION__, mmc_hostname(host->mmc));
1775 /* set High-power mode */
1776 value = mci_readl(host, CLKENA);
1777 value &= ~SDMMC_CLKEN_LOW_PWR;
1778 mci_writel(host,CLKENA , value);
1780 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1781 mci_writel(host,UHS_REG , uhs_reg);
1784 usleep_range(5000, 5500);
1786 /* 3.3V regulator output should be stable within 5 ms */
1787 uhs_reg = mci_readl(host, UHS_REG);
1788 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1791 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1792 mmc_hostname(host->mmc));
1795 case MMC_SIGNAL_VOLTAGE_180:
1797 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1798 /* regulator_put(host->vmmc);//to be done in remove function. */
1800 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1801 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1803 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1804 " failed\n", mmc_hostname(host->mmc));
1807 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1811 * Enable 1.8V Signal Enable in the Host Control2
1814 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1817 usleep_range(5000, 5500);
1818 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1819 __FUNCTION__,mmc_hostname(host->mmc));
1821 /* 1.8V regulator output should be stable within 5 ms */
1822 uhs_reg = mci_readl(host, UHS_REG);
1823 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1826 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1827 mmc_hostname(host->mmc));
1830 case MMC_SIGNAL_VOLTAGE_120:
1832 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1834 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1835 " failed\n", mmc_hostname(host->mmc));
1841 /* No signal voltage switch required */
1847 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1848 struct mmc_ios *ios)
1850 struct dw_mci_slot *slot = mmc_priv(mmc);
1851 struct dw_mci *host = slot->host;
1854 if (host->verid < DW_MMC_240A)
1857 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1863 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1865 struct dw_mci_slot *slot = mmc_priv(mmc);
1866 struct dw_mci *host = slot->host;
1867 const struct dw_mci_drv_data *drv_data = host->drv_data;
1868 struct dw_mci_tuning_data tuning_data;
1871 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1872 if(cpu_is_rk3036() || cpu_is_rk312x())
1875 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1876 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1877 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1878 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1879 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1880 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1881 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1885 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1886 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1887 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1890 "Undefined command(%d) for tuning\n", opcode);
1895 /* Recommend sample phase and delayline
1896 Fixme: Mix-use these three controllers will cause
1899 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1900 tuning_data.con_id = 3;
1901 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1902 tuning_data.con_id = 1;
1904 tuning_data.con_id = 0;
1906 /* 0: driver, from host->devices
1907 1: sample, from devices->host
1909 tuning_data.tuning_type = 1;
1911 if (drv_data && drv_data->execute_tuning)
1912 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1917 static const struct mmc_host_ops dw_mci_ops = {
1918 .request = dw_mci_request,
1919 .pre_req = dw_mci_pre_req,
1920 .post_req = dw_mci_post_req,
1921 .set_ios = dw_mci_set_ios,
1922 .get_ro = dw_mci_get_ro,
1923 .get_cd = dw_mci_get_cd,
1924 .set_sdio_status = dw_mci_set_sdio_status,
1925 .hw_reset = dw_mci_hw_reset,
1926 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1927 .execute_tuning = dw_mci_execute_tuning,
1928 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1929 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1930 .card_busy = dw_mci_card_busy,
1935 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1937 unsigned long flags;
1942 local_irq_save(flags);
1943 if(host->irq_state != irqflag)
1945 host->irq_state = irqflag;
1948 enable_irq(host->irq);
1952 disable_irq(host->irq);
1955 local_irq_restore(flags);
1959 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1960 __releases(&host->lock)
1961 __acquires(&host->lock)
1963 if(DW_MCI_SEND_STATUS == host->dir_status){
1965 if( MMC_BUS_TEST_W != host->cmd->opcode){
1966 if(host->data_status & SDMMC_INT_DCRC)
1967 host->data->error = -EILSEQ;
1968 else if(host->data_status & SDMMC_INT_EBE)
1969 host->data->error = -ETIMEDOUT;
1971 dw_mci_wait_unbusy(host);
1974 dw_mci_wait_unbusy(host);
1979 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1980 __releases(&host->lock)
1981 __acquires(&host->lock)
1983 struct dw_mci_slot *slot;
1984 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1986 WARN_ON(host->cmd || host->data);
1988 del_timer_sync(&host->dto_timer);
1989 dw_mci_deal_data_end(host, mrq);
1992 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1993 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
1995 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
1996 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
1998 host->cur_slot->mrq = NULL;
2000 if (!list_empty(&host->queue)) {
2001 slot = list_entry(host->queue.next,
2002 struct dw_mci_slot, queue_node);
2003 list_del(&slot->queue_node);
2004 dev_vdbg(host->dev, "list not empty: %s is next\n",
2005 mmc_hostname(slot->mmc));
2006 host->state = STATE_SENDING_CMD;
2007 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2008 dw_mci_start_request(host, slot);
2010 dev_vdbg(host->dev, "list empty\n");
2011 host->state = STATE_IDLE;
2014 spin_unlock(&host->lock);
2015 mmc_request_done(prev_mmc, mrq);
2016 spin_lock(&host->lock);
2019 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2021 u32 status = host->cmd_status;
2023 host->cmd_status = 0;
2025 /* Read the response from the card (up to 16 bytes) */
2026 if (cmd->flags & MMC_RSP_PRESENT) {
2027 if (cmd->flags & MMC_RSP_136) {
2028 cmd->resp[3] = mci_readl(host, RESP0);
2029 cmd->resp[2] = mci_readl(host, RESP1);
2030 cmd->resp[1] = mci_readl(host, RESP2);
2031 cmd->resp[0] = mci_readl(host, RESP3);
2033 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2034 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2036 cmd->resp[0] = mci_readl(host, RESP0);
2040 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2041 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2045 if (status & SDMMC_INT_RTO)
2047 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2050 cmd->error = -ETIMEDOUT;
2051 del_timer_sync(&host->dto_timer);
2052 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2053 del_timer_sync(&host->dto_timer);
2054 cmd->error = -EILSEQ;
2055 }else if (status & SDMMC_INT_RESP_ERR){
2056 del_timer_sync(&host->dto_timer);
2061 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2062 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2065 del_timer_sync(&host->dto_timer);
2066 if(MMC_SEND_STATUS != cmd->opcode)
2067 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2068 MMC_DBG_ERR_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2069 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2073 /* newer ip versions need a delay between retries */
2074 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2080 static void dw_mci_tasklet_func(unsigned long priv)
2082 struct dw_mci *host = (struct dw_mci *)priv;
2083 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2084 struct mmc_data *data;
2085 struct mmc_command *cmd;
2086 enum dw_mci_state state;
2087 enum dw_mci_state prev_state;
2088 u32 status, cmd_flags;
2089 unsigned long timeout = 0;
2092 spin_lock(&host->lock);
2094 state = host->state;
2104 case STATE_SENDING_CMD:
2105 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2106 &host->pending_events))
2111 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2112 dw_mci_command_complete(host, cmd);
2113 if (cmd == host->mrq->sbc && !cmd->error) {
2114 prev_state = state = STATE_SENDING_CMD;
2115 __dw_mci_start_request(host, host->cur_slot,
2120 if (cmd->data && cmd->error) {
2121 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2122 dw_mci_stop_dma(host);
2125 send_stop_cmd(host, data);
2126 state = STATE_SENDING_STOP;
2132 send_stop_abort(host, data);
2133 state = STATE_SENDING_STOP;
2136 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2139 if (!host->mrq->data || cmd->error) {
2140 dw_mci_request_end(host, host->mrq);
2144 prev_state = state = STATE_SENDING_DATA;
2147 case STATE_SENDING_DATA:
2148 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2149 dw_mci_stop_dma(host);
2152 send_stop_cmd(host, data);
2154 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2155 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2156 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2158 mci_writel(host, CMDARG, 0);
2160 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2161 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2163 if(host->mmc->hold_reg_flag)
2164 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2166 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2168 timeout = jiffies + msecs_to_jiffies(500);
2171 ret = time_before(jiffies, timeout);
2172 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2176 MMC_DBG_ERR_FUNC(host->mmc,
2177 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2178 __func__, mmc_hostname(host->mmc));
2181 send_stop_abort(host, data);
2183 state = STATE_DATA_ERROR;
2187 MMC_DBG_CMD_FUNC(host->mmc,
2188 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2189 prev_state,state, mmc_hostname(host->mmc));
2191 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2192 &host->pending_events))
2194 MMC_DBG_INFO_FUNC(host->mmc,
2195 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2196 prev_state,state,mmc_hostname(host->mmc));
2198 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2199 prev_state = state = STATE_DATA_BUSY;
2202 case STATE_DATA_BUSY:
2203 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2204 &host->pending_events))
2207 dw_mci_deal_data_end(host, host->mrq);
2208 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2209 MMC_DBG_INFO_FUNC(host->mmc,
2210 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2211 prev_state,state,mmc_hostname(host->mmc));
2214 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2215 status = host->data_status;
2217 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2218 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2219 MMC_DBG_ERR_FUNC(host->mmc,
2220 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2221 prev_state,state, status, mmc_hostname(host->mmc));
2223 if (status & SDMMC_INT_DRTO) {
2224 data->error = -ETIMEDOUT;
2225 } else if (status & SDMMC_INT_DCRC) {
2226 data->error = -EILSEQ;
2227 } else if (status & SDMMC_INT_EBE &&
2228 host->dir_status == DW_MCI_SEND_STATUS){
2230 * No data CRC status was returned.
2231 * The number of bytes transferred will
2232 * be exaggerated in PIO mode.
2234 data->bytes_xfered = 0;
2235 data->error = -ETIMEDOUT;
2244 * After an error, there may be data lingering
2245 * in the FIFO, so reset it - doing so
2246 * generates a block interrupt, hence setting
2247 * the scatter-gather pointer to NULL.
2249 dw_mci_fifo_reset(host);
2251 data->bytes_xfered = data->blocks * data->blksz;
2256 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2257 prev_state,state,mmc_hostname(host->mmc));
2258 dw_mci_request_end(host, host->mrq);
2261 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2262 prev_state,state,mmc_hostname(host->mmc));
2264 if (host->mrq->sbc && !data->error) {
2265 data->stop->error = 0;
2267 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2268 prev_state,state,mmc_hostname(host->mmc));
2270 dw_mci_request_end(host, host->mrq);
2274 prev_state = state = STATE_SENDING_STOP;
2276 send_stop_cmd(host, data);
2278 if (data->stop && !data->error) {
2279 /* stop command for open-ended transfer*/
2281 send_stop_abort(host, data);
2285 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2286 prev_state,state,mmc_hostname(host->mmc));
2288 case STATE_SENDING_STOP:
2289 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2292 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2293 prev_state, state, mmc_hostname(host->mmc));
2295 /* CMD error in data command */
2296 if (host->mrq->cmd->error && host->mrq->data) {
2297 dw_mci_fifo_reset(host);
2303 dw_mci_command_complete(host, host->mrq->stop);
2305 if (host->mrq->stop)
2306 dw_mci_command_complete(host, host->mrq->stop);
2308 host->cmd_status = 0;
2311 dw_mci_request_end(host, host->mrq);
2314 case STATE_DATA_ERROR:
2315 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2316 &host->pending_events))
2319 state = STATE_DATA_BUSY;
2322 } while (state != prev_state);
2324 host->state = state;
2326 spin_unlock(&host->lock);
2330 /* push final bytes to part_buf, only use during push */
2331 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2333 memcpy((void *)&host->part_buf, buf, cnt);
2334 host->part_buf_count = cnt;
2337 /* append bytes to part_buf, only use during push */
2338 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2340 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2341 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2342 host->part_buf_count += cnt;
2346 /* pull first bytes from part_buf, only use during pull */
2347 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2349 cnt = min(cnt, (int)host->part_buf_count);
2351 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2353 host->part_buf_count -= cnt;
2354 host->part_buf_start += cnt;
2359 /* pull final bytes from the part_buf, assuming it's just been filled */
2360 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2362 memcpy(buf, &host->part_buf, cnt);
2363 host->part_buf_start = cnt;
2364 host->part_buf_count = (1 << host->data_shift) - cnt;
2367 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2369 struct mmc_data *data = host->data;
2372 /* try and push anything in the part_buf */
2373 if (unlikely(host->part_buf_count)) {
2374 int len = dw_mci_push_part_bytes(host, buf, cnt);
2377 if (host->part_buf_count == 2) {
2378 mci_writew(host, DATA(host->data_offset),
2380 host->part_buf_count = 0;
2383 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2384 if (unlikely((unsigned long)buf & 0x1)) {
2386 u16 aligned_buf[64];
2387 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2388 int items = len >> 1;
2390 /* memcpy from input buffer into aligned buffer */
2391 memcpy(aligned_buf, buf, len);
2394 /* push data from aligned buffer into fifo */
2395 for (i = 0; i < items; ++i)
2396 mci_writew(host, DATA(host->data_offset),
2403 for (; cnt >= 2; cnt -= 2)
2404 mci_writew(host, DATA(host->data_offset), *pdata++);
2407 /* put anything remaining in the part_buf */
2409 dw_mci_set_part_bytes(host, buf, cnt);
2410 /* Push data if we have reached the expected data length */
2411 if ((data->bytes_xfered + init_cnt) ==
2412 (data->blksz * data->blocks))
2413 mci_writew(host, DATA(host->data_offset),
2418 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2420 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2421 if (unlikely((unsigned long)buf & 0x1)) {
2423 /* pull data from fifo into aligned buffer */
2424 u16 aligned_buf[64];
2425 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2426 int items = len >> 1;
2428 for (i = 0; i < items; ++i)
2429 aligned_buf[i] = mci_readw(host,
2430 DATA(host->data_offset));
2431 /* memcpy from aligned buffer into output buffer */
2432 memcpy(buf, aligned_buf, len);
2440 for (; cnt >= 2; cnt -= 2)
2441 *pdata++ = mci_readw(host, DATA(host->data_offset));
2445 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2446 dw_mci_pull_final_bytes(host, buf, cnt);
2450 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2452 struct mmc_data *data = host->data;
2455 /* try and push anything in the part_buf */
2456 if (unlikely(host->part_buf_count)) {
2457 int len = dw_mci_push_part_bytes(host, buf, cnt);
2460 if (host->part_buf_count == 4) {
2461 mci_writel(host, DATA(host->data_offset),
2463 host->part_buf_count = 0;
2466 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2467 if (unlikely((unsigned long)buf & 0x3)) {
2469 u32 aligned_buf[32];
2470 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2471 int items = len >> 2;
2473 /* memcpy from input buffer into aligned buffer */
2474 memcpy(aligned_buf, buf, len);
2477 /* push data from aligned buffer into fifo */
2478 for (i = 0; i < items; ++i)
2479 mci_writel(host, DATA(host->data_offset),
2486 for (; cnt >= 4; cnt -= 4)
2487 mci_writel(host, DATA(host->data_offset), *pdata++);
2490 /* put anything remaining in the part_buf */
2492 dw_mci_set_part_bytes(host, buf, cnt);
2493 /* Push data if we have reached the expected data length */
2494 if ((data->bytes_xfered + init_cnt) ==
2495 (data->blksz * data->blocks))
2496 mci_writel(host, DATA(host->data_offset),
2501 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2503 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2504 if (unlikely((unsigned long)buf & 0x3)) {
2506 /* pull data from fifo into aligned buffer */
2507 u32 aligned_buf[32];
2508 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2509 int items = len >> 2;
2511 for (i = 0; i < items; ++i)
2512 aligned_buf[i] = mci_readl(host,
2513 DATA(host->data_offset));
2514 /* memcpy from aligned buffer into output buffer */
2515 memcpy(buf, aligned_buf, len);
2523 for (; cnt >= 4; cnt -= 4)
2524 *pdata++ = mci_readl(host, DATA(host->data_offset));
2528 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2529 dw_mci_pull_final_bytes(host, buf, cnt);
2533 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2535 struct mmc_data *data = host->data;
2538 /* try and push anything in the part_buf */
2539 if (unlikely(host->part_buf_count)) {
2540 int len = dw_mci_push_part_bytes(host, buf, cnt);
2544 if (host->part_buf_count == 8) {
2545 mci_writeq(host, DATA(host->data_offset),
2547 host->part_buf_count = 0;
2550 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2551 if (unlikely((unsigned long)buf & 0x7)) {
2553 u64 aligned_buf[16];
2554 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2555 int items = len >> 3;
2557 /* memcpy from input buffer into aligned buffer */
2558 memcpy(aligned_buf, buf, len);
2561 /* push data from aligned buffer into fifo */
2562 for (i = 0; i < items; ++i)
2563 mci_writeq(host, DATA(host->data_offset),
2570 for (; cnt >= 8; cnt -= 8)
2571 mci_writeq(host, DATA(host->data_offset), *pdata++);
2574 /* put anything remaining in the part_buf */
2576 dw_mci_set_part_bytes(host, buf, cnt);
2577 /* Push data if we have reached the expected data length */
2578 if ((data->bytes_xfered + init_cnt) ==
2579 (data->blksz * data->blocks))
2580 mci_writeq(host, DATA(host->data_offset),
2585 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2587 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2588 if (unlikely((unsigned long)buf & 0x7)) {
2590 /* pull data from fifo into aligned buffer */
2591 u64 aligned_buf[16];
2592 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2593 int items = len >> 3;
2595 for (i = 0; i < items; ++i)
2596 aligned_buf[i] = mci_readq(host,
2597 DATA(host->data_offset));
2598 /* memcpy from aligned buffer into output buffer */
2599 memcpy(buf, aligned_buf, len);
2607 for (; cnt >= 8; cnt -= 8)
2608 *pdata++ = mci_readq(host, DATA(host->data_offset));
2612 host->part_buf = mci_readq(host, DATA(host->data_offset));
2613 dw_mci_pull_final_bytes(host, buf, cnt);
2617 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2621 /* get remaining partial bytes */
2622 len = dw_mci_pull_part_bytes(host, buf, cnt);
2623 if (unlikely(len == cnt))
2628 /* get the rest of the data */
2629 host->pull_data(host, buf, cnt);
2632 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2634 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2636 unsigned int offset;
2637 struct mmc_data *data = host->data;
2638 int shift = host->data_shift;
2641 unsigned int remain, fcnt;
2643 if(!host->mmc->bus_refs){
2644 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2648 if (!sg_miter_next(sg_miter))
2651 host->sg = sg_miter->piter.sg;
2652 buf = sg_miter->addr;
2653 remain = sg_miter->length;
2657 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2658 << shift) + host->part_buf_count;
2659 len = min(remain, fcnt);
2662 dw_mci_pull_data(host, (void *)(buf + offset), len);
2663 data->bytes_xfered += len;
2668 sg_miter->consumed = offset;
2669 status = mci_readl(host, MINTSTS);
2670 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2671 /* if the RXDR is ready read again */
2672 } while ((status & SDMMC_INT_RXDR) ||
2673 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2676 if (!sg_miter_next(sg_miter))
2678 sg_miter->consumed = 0;
2680 sg_miter_stop(sg_miter);
2684 sg_miter_stop(sg_miter);
2688 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2691 static void dw_mci_write_data_pio(struct dw_mci *host)
2693 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2695 unsigned int offset;
2696 struct mmc_data *data = host->data;
2697 int shift = host->data_shift;
2700 unsigned int fifo_depth = host->fifo_depth;
2701 unsigned int remain, fcnt;
2703 if(!host->mmc->bus_refs){
2704 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2709 if (!sg_miter_next(sg_miter))
2712 host->sg = sg_miter->piter.sg;
2713 buf = sg_miter->addr;
2714 remain = sg_miter->length;
2718 fcnt = ((fifo_depth -
2719 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2720 << shift) - host->part_buf_count;
2721 len = min(remain, fcnt);
2724 host->push_data(host, (void *)(buf + offset), len);
2725 data->bytes_xfered += len;
2730 sg_miter->consumed = offset;
2731 status = mci_readl(host, MINTSTS);
2732 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2733 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2736 if (!sg_miter_next(sg_miter))
2738 sg_miter->consumed = 0;
2740 sg_miter_stop(sg_miter);
2744 sg_miter_stop(sg_miter);
2748 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2751 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2755 if (!host->cmd_status)
2756 host->cmd_status = status;
2761 if((MMC_STOP_TRANSMISSION != host->cmd->opcode))
2764 multi = mci_readl(host, BYTCNT)/unit;
2765 multi += ((mci_readl(host, BYTCNT) % unit) ? 1 :0 );
2766 multi = (multi > 0) ? multi : 1;
2767 multi += (host->cmd->retries > 2)? 2 : host->cmd->retries;
2768 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4500 * multi));//max wait 8s larger
2773 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2774 tasklet_schedule(&host->tasklet);
2777 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2779 struct dw_mci *host = dev_id;
2780 u32 pending, sdio_int;
2783 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2786 * DTO fix - version 2.10a and below, and only if internal DMA
2789 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2791 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2792 pending |= SDMMC_INT_DATA_OVER;
2796 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2797 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2798 host->cmd_status = pending;
2800 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2801 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2803 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2806 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2807 /* if there is an error report DATA_ERROR */
2808 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2809 host->data_status = pending;
2811 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2813 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2814 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2815 tasklet_schedule(&host->tasklet);
2818 if (pending & SDMMC_INT_DATA_OVER) {
2819 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2820 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2821 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2822 if (!host->data_status)
2823 host->data_status = pending;
2825 if (host->dir_status == DW_MCI_RECV_STATUS) {
2826 if (host->sg != NULL)
2827 dw_mci_read_data_pio(host, true);
2829 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2830 tasklet_schedule(&host->tasklet);
2833 if (pending & SDMMC_INT_RXDR) {
2834 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2835 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2836 dw_mci_read_data_pio(host, false);
2839 if (pending & SDMMC_INT_TXDR) {
2840 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2841 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2842 dw_mci_write_data_pio(host);
2845 if (pending & SDMMC_INT_VSI) {
2846 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2847 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2848 dw_mci_cmd_interrupt(host, pending);
2851 if (pending & SDMMC_INT_CMD_DONE) {
2852 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2853 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2854 dw_mci_cmd_interrupt(host, pending);
2857 if (pending & SDMMC_INT_CD) {
2858 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2859 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2860 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2861 queue_work(host->card_workqueue, &host->card_work);
2864 if (pending & SDMMC_INT_HLE) {
2865 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2866 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2870 /* Handle SDIO Interrupts */
2871 for (i = 0; i < host->num_slots; i++) {
2872 struct dw_mci_slot *slot = host->slot[i];
2874 if (host->verid < DW_MMC_240A)
2875 sdio_int = SDMMC_INT_SDIO(i);
2877 sdio_int = SDMMC_INT_SDIO(i + 8);
2879 if (pending & sdio_int) {
2880 mci_writel(host, RINTSTS, sdio_int);
2881 mmc_signal_sdio_irq(slot->mmc);
2887 #ifdef CONFIG_MMC_DW_IDMAC
2888 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2889 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2890 /* Handle DMA interrupts */
2891 pending = mci_readl(host, IDSTS);
2892 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2893 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2894 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2895 host->dma_ops->complete((void *)host);
2903 static void dw_mci_work_routine_card(struct work_struct *work)
2905 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2908 for (i = 0; i < host->num_slots; i++) {
2909 struct dw_mci_slot *slot = host->slot[i];
2910 struct mmc_host *mmc = slot->mmc;
2911 struct mmc_request *mrq;
2914 present = dw_mci_get_cd(mmc);
2915 while (present != slot->last_detect_state) {
2916 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2917 present ? "inserted" : "removed");
2918 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2919 present ? "inserted" : "removed.", mmc_hostname(mmc));
2921 rk_send_wakeup_key();//wake up system
2922 spin_lock_bh(&host->lock);
2924 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2925 /* Card change detected */
2926 slot->last_detect_state = present;
2928 /* Clean up queue if present */
2931 if (mrq == host->mrq) {
2935 switch (host->state) {
2938 case STATE_SENDING_CMD:
2939 mrq->cmd->error = -ENOMEDIUM;
2943 case STATE_SENDING_DATA:
2944 mrq->data->error = -ENOMEDIUM;
2945 dw_mci_stop_dma(host);
2947 case STATE_DATA_BUSY:
2948 case STATE_DATA_ERROR:
2949 if (mrq->data->error == -EINPROGRESS)
2950 mrq->data->error = -ENOMEDIUM;
2954 case STATE_SENDING_STOP:
2955 mrq->stop->error = -ENOMEDIUM;
2959 dw_mci_request_end(host, mrq);
2961 list_del(&slot->queue_node);
2962 mrq->cmd->error = -ENOMEDIUM;
2964 mrq->data->error = -ENOMEDIUM;
2966 mrq->stop->error = -ENOMEDIUM;
2968 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
2969 mrq->cmd->opcode, mmc_hostname(mmc));
2971 spin_unlock(&host->lock);
2972 mmc_request_done(slot->mmc, mrq);
2973 spin_lock(&host->lock);
2977 /* Power down slot */
2979 /* Clear down the FIFO */
2980 dw_mci_fifo_reset(host);
2981 #ifdef CONFIG_MMC_DW_IDMAC
2982 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
2983 dw_mci_idmac_reset(host);
2988 spin_unlock_bh(&host->lock);
2990 present = dw_mci_get_cd(mmc);
2993 mmc_detect_change(slot->mmc,
2994 msecs_to_jiffies(host->pdata->detect_delay_ms));
2999 /* given a slot id, find out the device node representing that slot */
3000 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3002 struct device_node *np;
3006 if (!dev || !dev->of_node)
3009 for_each_child_of_node(dev->of_node, np) {
3010 addr = of_get_property(np, "reg", &len);
3011 if (!addr || (len < sizeof(int)))
3013 if (be32_to_cpup(addr) == slot)
3019 static struct dw_mci_of_slot_quirks {
3022 } of_slot_quirks[] = {
3024 .quirk = "disable-wp",
3025 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3029 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3031 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3036 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3037 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3038 quirks |= of_slot_quirks[idx].id;
3043 /* find out bus-width for a given slot */
3044 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3046 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3052 if (of_property_read_u32(np, "bus-width", &bus_wd))
3053 dev_err(dev, "bus-width property not found, assuming width"
3059 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3060 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3062 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3068 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3070 /* Having a missing entry is valid; return silently */
3071 if (!gpio_is_valid(gpio))
3074 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3075 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3079 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3085 /* find the write protect gpio for a given slot; or -1 if none specified */
3086 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3088 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3094 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3096 /* Having a missing entry is valid; return silently */
3097 if (!gpio_is_valid(gpio))
3100 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3101 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3108 /* find the cd gpio for a given slot */
3109 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3110 struct mmc_host *mmc)
3112 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3118 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3120 /* Having a missing entry is valid; return silently */
3121 if (!gpio_is_valid(gpio))
3124 if (mmc_gpio_request_cd(mmc, gpio, 0))
3125 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3128 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3130 struct mmc_host *mmc = dev_id;
3131 struct dw_mci_slot *slot = mmc_priv(mmc);
3132 struct dw_mci *host = slot->host;
3134 queue_work(host->card_workqueue, &host->card_work);
3139 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3140 struct mmc_host *mmc)
3142 struct dw_mci_slot *slot = mmc_priv(mmc);
3143 struct dw_mci *host = slot->host;
3147 /* Having a missing entry is valid; return silently */
3148 if (!gpio_is_valid(gpio))
3151 irq = gpio_to_irq(gpio);
3153 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3154 NULL, dw_mci_gpio_cd_irqt,
3155 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
3160 dev_err(host->dev, "Request cd-gpio interrupt error!\n");
3163 #else /* CONFIG_OF */
3164 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3168 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3172 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3176 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3180 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3181 struct mmc_host *mmc)
3185 #endif /* CONFIG_OF */
3187 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3189 struct mmc_host *mmc;
3190 struct dw_mci_slot *slot;
3191 const struct dw_mci_drv_data *drv_data = host->drv_data;
3196 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3200 slot = mmc_priv(mmc);
3204 host->slot[id] = slot;
3207 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3209 mmc->ops = &dw_mci_ops;
3211 if (of_property_read_u32_array(host->dev->of_node,
3212 "clock-freq-min-max", freq, 2)) {
3213 mmc->f_min = DW_MCI_FREQ_MIN;
3214 mmc->f_max = DW_MCI_FREQ_MAX;
3216 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
3217 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3219 mmc->f_min = freq[0];
3220 mmc->f_max = freq[1];
3222 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
3223 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3226 if(strstr("mmc0",mmc_hostname(mmc)))
3227 printk("Line%d..%s: The rk_sdmmc %s",__LINE__, __FUNCTION__,RK_SDMMC_DRIVER_VERSION);
3229 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3230 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3231 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3232 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3233 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3234 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3236 /* We assume only low-level chip use gpio_cd */
3237 if (cpu_is_rk312x() &&
3239 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3240 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3241 if (gpio_is_valid(slot->cd_gpio)) {
3242 /* Request gpio int for card detection */
3243 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3245 slot->cd_gpio = -ENODEV;
3246 dev_err(host->dev, "failed to get your cd-gpios!\n");
3250 if (host->pdata->get_ocr)
3251 mmc->ocr_avail = host->pdata->get_ocr(id);
3254 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3255 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3256 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3257 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3261 * Start with slot power disabled, it will be enabled when a card
3264 if (host->pdata->setpower)
3265 host->pdata->setpower(id, 0);
3267 if (host->pdata->caps)
3268 mmc->caps = host->pdata->caps;
3270 if (host->pdata->pm_caps)
3271 mmc->pm_caps = host->pdata->pm_caps;
3273 if (host->dev->of_node) {
3274 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3278 ctrl_id = to_platform_device(host->dev)->id;
3280 if (drv_data && drv_data->caps)
3281 mmc->caps |= drv_data->caps[ctrl_id];
3282 if (drv_data && drv_data->hold_reg_flag)
3283 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3285 /* set the compatibility of driver. */
3286 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3287 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3289 if (host->pdata->caps2)
3290 mmc->caps2 = host->pdata->caps2;
3292 if (host->pdata->get_bus_wd)
3293 bus_width = host->pdata->get_bus_wd(slot->id);
3294 else if (host->dev->of_node)
3295 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3299 switch (bus_width) {
3301 mmc->caps |= MMC_CAP_8_BIT_DATA;
3303 mmc->caps |= MMC_CAP_4_BIT_DATA;
3306 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3307 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3308 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3309 mmc->caps |= MMC_CAP_SDIO_IRQ;
3310 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3311 mmc->caps |= MMC_CAP_HW_RESET;
3312 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3313 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3314 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3315 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3316 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3317 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3318 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3319 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3321 /*Assign pm_caps pass to pm_flags*/
3322 mmc->pm_flags = mmc->pm_caps;
3324 if (host->pdata->blk_settings) {
3325 mmc->max_segs = host->pdata->blk_settings->max_segs;
3326 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3327 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3328 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3329 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3331 /* Useful defaults if platform data is unset. */
3332 #ifdef CONFIG_MMC_DW_IDMAC
3333 mmc->max_segs = host->ring_size;
3334 mmc->max_blk_size = 65536;
3335 mmc->max_blk_count = host->ring_size;
3336 mmc->max_seg_size = 0x1000;
3337 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3338 if(cpu_is_rk3036() || cpu_is_rk312x()){
3339 /* fixup for external dmac setting */
3341 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3342 mmc->max_blk_count = 65535;
3343 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3344 mmc->max_seg_size = mmc->max_req_size;
3348 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3349 mmc->max_blk_count = 512;
3350 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3351 mmc->max_seg_size = mmc->max_req_size;
3352 #endif /* CONFIG_MMC_DW_IDMAC */
3356 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3358 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3363 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3364 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3368 if (IS_ERR(host->vmmc)) {
3369 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3372 ret = regulator_enable(host->vmmc);
3375 "failed to enable regulator: %d\n", ret);
3382 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3384 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3385 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3387 ret = mmc_add_host(mmc);
3391 /* Pinctrl set default iomux state to fucntion port.
3392 * Fixme: DON'T TOUCH EMMC SETTING!
3394 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3396 host->pinctrl = devm_pinctrl_get(host->dev);
3397 if(IS_ERR(host->pinctrl)){
3398 printk("%s: Warning : No pinctrl used!\n",mmc_hostname(host->mmc));
3400 host->pins_idle= pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_IDLE);
3401 if(IS_ERR(host->pins_default)){
3402 printk("%s: Warning : No IDLE pinctrl matched!\n", mmc_hostname(host->mmc));
3406 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3407 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
3410 host->pins_default = pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_DEFAULT);
3411 if(IS_ERR(host->pins_default)){
3412 printk("%s: Warning : No default pinctrl matched!\n", mmc_hostname(host->mmc));
3416 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3417 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
3423 #if defined(CONFIG_DEBUG_FS)
3424 dw_mci_init_debugfs(slot);
3427 /* Card initially undetected */
3428 slot->last_detect_state = 1;
3437 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3439 /* Shutdown detect IRQ */
3440 if (slot->host->pdata->exit)
3441 slot->host->pdata->exit(id);
3443 /* Debugfs stuff is cleaned up by mmc core */
3444 mmc_remove_host(slot->mmc);
3445 slot->host->slot[id] = NULL;
3446 mmc_free_host(slot->mmc);
3449 static void dw_mci_init_dma(struct dw_mci *host)
3451 /* Alloc memory for sg translation */
3452 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3453 &host->sg_dma, GFP_KERNEL);
3454 if (!host->sg_cpu) {
3455 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3460 /* Determine which DMA interface to use */
3461 #if defined(CONFIG_MMC_DW_IDMAC)
3462 if(cpu_is_rk3036() || cpu_is_rk312x()){
3463 host->dma_ops = &dw_mci_edmac_ops;
3464 dev_info(host->dev, "Using external DMA controller.\n");
3466 host->dma_ops = &dw_mci_idmac_ops;
3467 dev_info(host->dev, "Using internal DMA controller.\n");
3474 if (host->dma_ops->init && host->dma_ops->start &&
3475 host->dma_ops->stop && host->dma_ops->cleanup) {
3476 if (host->dma_ops->init(host)) {
3477 dev_err(host->dev, "%s: Unable to initialize "
3478 "DMA Controller.\n", __func__);
3482 dev_err(host->dev, "DMA initialization not found.\n");
3490 dev_info(host->dev, "Using PIO mode.\n");
3495 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3497 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3500 ctrl = mci_readl(host, CTRL);
3502 mci_writel(host, CTRL, ctrl);
3504 /* wait till resets clear */
3506 ctrl = mci_readl(host, CTRL);
3507 if (!(ctrl & reset))
3509 } while (time_before(jiffies, timeout));
3512 "Timeout resetting block (ctrl reset %#x)\n",
3518 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3521 * Reseting generates a block interrupt, hence setting
3522 * the scatter-gather pointer to NULL.
3525 sg_miter_stop(&host->sg_miter);
3529 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3532 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3534 return dw_mci_ctrl_reset(host,
3535 SDMMC_CTRL_FIFO_RESET |
3537 SDMMC_CTRL_DMA_RESET);
3542 static struct dw_mci_of_quirks {
3547 .quirk = "broken-cd",
3548 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3552 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3554 struct dw_mci_board *pdata;
3555 struct device *dev = host->dev;
3556 struct device_node *np = dev->of_node;
3557 const struct dw_mci_drv_data *drv_data = host->drv_data;
3559 u32 clock_frequency;
3561 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3563 dev_err(dev, "could not allocate memory for pdata\n");
3564 return ERR_PTR(-ENOMEM);
3567 /* find out number of slots supported */
3568 if (of_property_read_u32(dev->of_node, "num-slots",
3569 &pdata->num_slots)) {
3570 dev_info(dev, "num-slots property not found, "
3571 "assuming 1 slot is available\n");
3572 pdata->num_slots = 1;
3576 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3577 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3578 pdata->quirks |= of_quirks[idx].id;
3581 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3582 dev_info(dev, "fifo-depth property not found, using "
3583 "value of FIFOTH register as default\n");
3585 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3587 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3588 pdata->bus_hz = clock_frequency;
3590 if (drv_data && drv_data->parse_dt) {
3591 ret = drv_data->parse_dt(host);
3593 return ERR_PTR(ret);
3596 if (of_find_property(np, "keep-power-in-suspend", NULL))
3597 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3599 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3600 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3602 if (of_find_property(np, "supports-highspeed", NULL))
3603 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3605 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3606 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3608 if (of_find_property(np, "supports-DDR_MODE", NULL))
3609 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3611 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3612 pdata->caps2 |= MMC_CAP2_HS200;
3614 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3615 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3617 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3618 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3620 if (of_get_property(np, "cd-inverted", NULL))
3621 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3622 if (of_get_property(np, "bootpart-no-access", NULL))
3623 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3628 #else /* CONFIG_OF */
3629 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3631 return ERR_PTR(-EINVAL);
3633 #endif /* CONFIG_OF */
3635 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3639 switch(host->state){
3642 case STATE_SENDING_DATA:
3643 case STATE_DATA_BUSY:
3644 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3645 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3646 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3647 host->state = STATE_DATA_BUSY;
3648 if (!dw_mci_ctrl_all_reset(host)) {
3653 /* NO requirement to reclaim slave chn using external dmac */
3654 #ifdef CONFIG_MMC_DW_IDMAC
3655 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3656 if (host->use_dma && host->dma_ops->init)
3657 host->dma_ops->init(host);
3661 * Restore the initial value at FIFOTH register
3662 * And Invalidate the prev_blksz with zero
3664 mci_writel(host, FIFOTH, host->fifoth_val);
3665 host->prev_blksz = 0;
3666 mci_writel(host, TMOUT, 0xFFFFFFFF);
3667 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3668 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3669 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3670 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3671 regs |= SDMMC_INT_CD;
3672 mci_writel(host, INTMASK, regs);
3673 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3674 for (i = 0; i < host->num_slots; i++) {
3675 struct dw_mci_slot *slot = host->slot[i];
3678 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3679 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3680 dw_mci_setup_bus(slot, true);
3683 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3684 tasklet_schedule(&host->tasklet);
3690 static void dw_mci_dto_timeout(unsigned long host_data)
3692 struct dw_mci *host = (struct dw_mci *) host_data;
3694 disable_irq(host->irq);
3696 host->data_status = SDMMC_INT_EBE;
3697 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3698 dw_mci_dealwith_timeout(host);
3700 enable_irq(host->irq);
3702 int dw_mci_probe(struct dw_mci *host)
3704 const struct dw_mci_drv_data *drv_data = host->drv_data;
3705 int width, i, ret = 0;
3711 host->pdata = dw_mci_parse_dt(host);
3712 if (IS_ERR(host->pdata)) {
3713 dev_err(host->dev, "platform data not available\n");
3718 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3720 "Platform data must supply select_slot function\n");
3725 * In 2.40a spec, Data offset is changed.
3726 * Need to check the version-id and set data-offset for DATA register.
3728 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3729 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3731 if (host->verid < DW_MMC_240A)
3732 host->data_offset = DATA_OFFSET;
3734 host->data_offset = DATA_240A_OFFSET;
3737 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3738 if (IS_ERR(host->hclk_mmc)) {
3739 dev_err(host->dev, "failed to get hclk_mmc\n");
3740 ret = PTR_ERR(host->hclk_mmc);
3744 clk_prepare_enable(host->hclk_mmc);
3747 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3748 if (IS_ERR(host->clk_mmc)) {
3749 dev_err(host->dev, "failed to get clk mmc_per\n");
3750 ret = PTR_ERR(host->clk_mmc);
3754 host->bus_hz = host->pdata->bus_hz;
3755 if (!host->bus_hz) {
3756 dev_err(host->dev,"Platform data must supply bus speed\n");
3761 if (host->verid < DW_MMC_240A)
3762 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3764 //rockchip: fix divider 2 in clksum before controlller
3765 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3768 dev_err(host->dev, "failed to set clk mmc\n");
3771 clk_prepare_enable(host->clk_mmc);
3773 if (drv_data && drv_data->setup_clock) {
3774 ret = drv_data->setup_clock(host);
3777 "implementation specific clock setup failed\n");
3782 host->quirks = host->pdata->quirks;
3783 host->irq_state = true;
3784 host->set_speed = 0;
3786 host->svi_flags = 0;
3788 spin_lock_init(&host->lock);
3789 INIT_LIST_HEAD(&host->queue);
3792 * Get the host data width - this assumes that HCON has been set with
3793 * the correct values.
3795 i = (mci_readl(host, HCON) >> 7) & 0x7;
3797 host->push_data = dw_mci_push_data16;
3798 host->pull_data = dw_mci_pull_data16;
3800 host->data_shift = 1;
3801 } else if (i == 2) {
3802 host->push_data = dw_mci_push_data64;
3803 host->pull_data = dw_mci_pull_data64;
3805 host->data_shift = 3;
3807 /* Check for a reserved value, and warn if it is */
3809 "HCON reports a reserved host data width!\n"
3810 "Defaulting to 32-bit access.\n");
3811 host->push_data = dw_mci_push_data32;
3812 host->pull_data = dw_mci_pull_data32;
3814 host->data_shift = 2;
3817 /* Reset all blocks */
3818 if (!dw_mci_ctrl_all_reset(host))
3821 host->dma_ops = host->pdata->dma_ops;
3822 dw_mci_init_dma(host);
3824 /* Clear the interrupts for the host controller */
3825 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3826 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3828 /* Put in max timeout */
3829 mci_writel(host, TMOUT, 0xFFFFFFFF);
3832 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3833 * Tx Mark = fifo_size / 2 DMA Size = 8
3835 if (!host->pdata->fifo_depth) {
3837 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3838 * have been overwritten by the bootloader, just like we're
3839 * about to do, so if you know the value for your hardware, you
3840 * should put it in the platform data.
3842 fifo_size = mci_readl(host, FIFOTH);
3843 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3845 fifo_size = host->pdata->fifo_depth;
3847 host->fifo_depth = fifo_size;
3849 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3850 mci_writel(host, FIFOTH, host->fifoth_val);
3852 /* disable clock to CIU */
3853 mci_writel(host, CLKENA, 0);
3854 mci_writel(host, CLKSRC, 0);
3856 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3857 host->card_workqueue = alloc_workqueue("dw-mci-card",
3858 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3859 if (!host->card_workqueue) {
3863 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3864 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3865 host->irq_flags, "dw-mci", host);
3869 if (host->pdata->num_slots)
3870 host->num_slots = host->pdata->num_slots;
3872 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
3874 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
3875 /* We need at least one slot to succeed */
3876 for (i = 0; i < host->num_slots; i++) {
3877 ret = dw_mci_init_slot(host, i);
3879 dev_dbg(host->dev, "slot %d init failed\n", i);
3885 * Enable interrupts for command done, data over, data empty, card det,
3886 * receive ready and error such as transmit, receive timeout, crc error
3888 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3889 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
3890 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
3891 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3892 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3893 regs |= SDMMC_INT_CD;
3895 mci_writel(host, INTMASK, regs);
3897 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
3899 dev_info(host->dev, "DW MMC controller at irq %d, "
3900 "%d bit host data width, "
3902 host->irq, width, fifo_size);
3905 dev_info(host->dev, "%d slots initialized\n", init_slots);
3907 dev_dbg(host->dev, "attempted to initialize %d slots, "
3908 "but failed on all\n", host->num_slots);
3913 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
3914 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
3919 destroy_workqueue(host->card_workqueue);
3922 if (host->use_dma && host->dma_ops->exit)
3923 host->dma_ops->exit(host);
3926 regulator_disable(host->vmmc);
3927 regulator_put(host->vmmc);
3931 if (!IS_ERR(host->clk_mmc))
3932 clk_disable_unprepare(host->clk_mmc);
3934 if (!IS_ERR(host->hclk_mmc))
3935 clk_disable_unprepare(host->hclk_mmc);
3939 EXPORT_SYMBOL(dw_mci_probe);
3941 void dw_mci_remove(struct dw_mci *host)
3944 del_timer_sync(&host->dto_timer);
3946 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3947 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3949 for(i = 0; i < host->num_slots; i++){
3950 dev_dbg(host->dev, "remove slot %d\n", i);
3952 dw_mci_cleanup_slot(host->slot[i], i);
3955 /* disable clock to CIU */
3956 mci_writel(host, CLKENA, 0);
3957 mci_writel(host, CLKSRC, 0);
3959 destroy_workqueue(host->card_workqueue);
3961 if(host->use_dma && host->dma_ops->exit)
3962 host->dma_ops->exit(host);
3965 regulator_disable(host->vmmc);
3966 regulator_put(host->vmmc);
3968 if(!IS_ERR(host->clk_mmc))
3969 clk_disable_unprepare(host->clk_mmc);
3971 if(!IS_ERR(host->hclk_mmc))
3972 clk_disable_unprepare(host->hclk_mmc);
3974 EXPORT_SYMBOL(dw_mci_remove);
3978 #ifdef CONFIG_PM_SLEEP
3980 * TODO: we should probably disable the clock to the card in the suspend path.
3982 int dw_mci_suspend(struct dw_mci *host)
3985 regulator_disable(host->vmmc);
3987 if(host->use_dma && host->dma_ops->exit)
3988 host->dma_ops->exit(host);
3990 /*only for sdmmc controller*/
3991 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3992 host->mmc->rescan_disable = 1;
3993 if (cancel_delayed_work_sync(&host->mmc->detect))
3994 wake_unlock(&host->mmc->detect_wake_lock);
3996 disable_irq(host->irq);
3997 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3998 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
3999 mmc_hostname(host->mmc));
4001 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4002 mci_writel(host, INTMASK, 0x00);
4003 mci_writel(host, CTRL, 0x00);
4005 /* Soc rk3126 already in gpio_cd mode */
4006 if (!(cpu_is_rk312x() && soc_is_rk3126())) {
4007 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4008 enable_irq_wake(host->mmc->slot.cd_irq);
4013 EXPORT_SYMBOL(dw_mci_suspend);
4015 int dw_mci_resume(struct dw_mci *host)
4017 int i, ret, retry_cnt = 0;
4019 struct dw_mci_slot *slot;
4021 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO){
4022 slot = mmc_priv(host->mmc);
4024 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4027 /*only for sdmmc controller*/
4028 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4029 /* Soc rk3126 already in gpio_cd mode */
4030 if (!(cpu_is_rk312x() && soc_is_rk3126())) {
4031 disable_irq_wake(host->mmc->slot.cd_irq);
4032 mmc_gpio_free_cd(host->mmc);
4034 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4035 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4036 mmc_hostname(host->mmc));
4037 host->mmc->rescan_disable = 0;
4040 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4041 else if(cpu_is_rk3036())
4042 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4043 else if(cpu_is_rk312x())
4044 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4045 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4048 ret = regulator_enable(host->vmmc);
4051 "failed to enable regulator: %d\n", ret);
4056 if(!dw_mci_ctrl_all_reset(host)){
4061 if(host->use_dma && host->dma_ops->init)
4062 host->dma_ops->init(host);
4065 * Restore the initial value at FIFOTH register
4066 * And Invalidate the prev_blksz with zero
4068 mci_writel(host, FIFOTH, host->fifoth_val);
4069 host->prev_blksz = 0;
4070 /* Put in max timeout */
4071 mci_writel(host, TMOUT, 0xFFFFFFFF);
4073 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4074 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4076 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4077 regs |= SDMMC_INT_CD;
4078 mci_writel(host, INTMASK, regs);
4079 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4080 /*only for sdmmc controller*/
4081 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4082 enable_irq(host->irq);
4085 for(i = 0; i < host->num_slots; i++){
4086 struct dw_mci_slot *slot = host->slot[i];
4089 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4090 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4091 dw_mci_setup_bus(slot, true);
4097 EXPORT_SYMBOL(dw_mci_resume);
4098 #endif /* CONFIG_PM_SLEEP */
4100 static int __init dw_mci_init(void)
4102 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4106 static void __exit dw_mci_exit(void)
4110 module_init(dw_mci_init);
4111 module_exit(dw_mci_exit);
4113 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4114 MODULE_AUTHOR("NXP Semiconductor VietNam");
4115 MODULE_AUTHOR("Imagination Technologies Ltd");
4116 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4117 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
4118 MODULE_LICENSE("GPL v2");