2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/mmc.h>
35 #include <linux/mmc/sd.h>
36 #include <linux/mmc/sdio.h>
37 #include <linux/mmc/rk_mmc.h>
38 #include <linux/bitops.h>
39 #include <linux/regulator/consumer.h>
40 #include <linux/workqueue.h>
42 #include <linux/of_gpio.h>
43 #include <linux/mmc/slot-gpio.h>
44 #include <linux/clk-private.h>
45 #include <linux/rockchip/cpu.h>
48 #include "rk_sdmmc_dbg.h"
49 #include <linux/regulator/rockchip_io_vol_domain.h>
50 #include "../../clk/rockchip/clk-ops.h"
52 #define grf_writel(v, offset) do { writel_relaxed(v, RK_GRF_VIRT + offset); dsb(); } while (0)
54 #define RK_SDMMC_DRIVER_VERSION "Ver 1.11 2014-06-05"
56 /* Common flag combinations */
57 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
58 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
60 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
62 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
63 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
64 #define DW_MCI_SEND_STATUS 1
65 #define DW_MCI_RECV_STATUS 2
66 #define DW_MCI_DMA_THRESHOLD 16
68 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
69 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
71 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
72 #define SDMMC_DATA_TIMEOUT_SD 500
73 #define SDMMC_DATA_TIMEOUT_SDIO 250
74 #define SDMMC_DATA_TIMEOUT_EMMC 2500
76 #define SDMMC_CMD_RTO_MAX_HOLD 200
77 #define SDMMC_WAIT_FOR_UNBUSY 2500
79 #ifdef CONFIG_MMC_DW_IDMAC
80 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
81 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
82 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
86 u32 des0; /* Control Descriptor */
87 #define IDMAC_DES0_DIC BIT(1)
88 #define IDMAC_DES0_LD BIT(2)
89 #define IDMAC_DES0_FD BIT(3)
90 #define IDMAC_DES0_CH BIT(4)
91 #define IDMAC_DES0_ER BIT(5)
92 #define IDMAC_DES0_CES BIT(30)
93 #define IDMAC_DES0_OWN BIT(31)
95 u32 des1; /* Buffer sizes */
96 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
97 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
99 u32 des2; /* buffer 1 physical address */
101 u32 des3; /* buffer 2 physical address */
103 #endif /* CONFIG_MMC_DW_IDMAC */
105 static const u8 tuning_blk_pattern_4bit[] = {
106 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
107 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
108 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
109 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
110 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
111 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
112 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
113 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
116 static const u8 tuning_blk_pattern_8bit[] = {
117 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
118 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
119 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
120 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
121 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
122 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
123 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
124 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
125 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
126 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
127 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
128 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
129 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
130 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
131 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
132 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
135 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
136 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
137 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
138 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
140 /*printk the all register of current host*/
142 static int dw_mci_regs_printk(struct dw_mci *host)
144 struct sdmmc_reg *regs = dw_mci_regs;
146 while( regs->name != 0 ){
147 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
150 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
155 #if defined(CONFIG_DEBUG_FS)
156 static int dw_mci_req_show(struct seq_file *s, void *v)
158 struct dw_mci_slot *slot = s->private;
159 struct mmc_request *mrq;
160 struct mmc_command *cmd;
161 struct mmc_command *stop;
162 struct mmc_data *data;
164 /* Make sure we get a consistent snapshot */
165 spin_lock_bh(&slot->host->lock);
175 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
176 cmd->opcode, cmd->arg, cmd->flags,
177 cmd->resp[0], cmd->resp[1], cmd->resp[2],
178 cmd->resp[2], cmd->error);
180 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
181 data->bytes_xfered, data->blocks,
182 data->blksz, data->flags, data->error);
185 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
186 stop->opcode, stop->arg, stop->flags,
187 stop->resp[0], stop->resp[1], stop->resp[2],
188 stop->resp[2], stop->error);
191 spin_unlock_bh(&slot->host->lock);
196 static int dw_mci_req_open(struct inode *inode, struct file *file)
198 return single_open(file, dw_mci_req_show, inode->i_private);
201 static const struct file_operations dw_mci_req_fops = {
202 .owner = THIS_MODULE,
203 .open = dw_mci_req_open,
206 .release = single_release,
209 static int dw_mci_regs_show(struct seq_file *s, void *v)
211 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
212 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
213 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
214 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
215 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
216 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
221 static int dw_mci_regs_open(struct inode *inode, struct file *file)
223 return single_open(file, dw_mci_regs_show, inode->i_private);
226 static const struct file_operations dw_mci_regs_fops = {
227 .owner = THIS_MODULE,
228 .open = dw_mci_regs_open,
231 .release = single_release,
234 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
236 struct mmc_host *mmc = slot->mmc;
237 struct dw_mci *host = slot->host;
241 root = mmc->debugfs_root;
245 node = debugfs_create_file("regs", S_IRUSR, root, host,
250 node = debugfs_create_file("req", S_IRUSR, root, slot,
255 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
259 node = debugfs_create_x32("pending_events", S_IRUSR, root,
260 (u32 *)&host->pending_events);
264 node = debugfs_create_x32("completed_events", S_IRUSR, root,
265 (u32 *)&host->completed_events);
272 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
274 #endif /* defined(CONFIG_DEBUG_FS) */
276 static void dw_mci_set_timeout(struct dw_mci *host)
278 /* timeout (maximum) */
279 mci_writel(host, TMOUT, 0xffffffff);
282 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
284 struct mmc_data *data;
285 struct dw_mci_slot *slot = mmc_priv(mmc);
286 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
288 cmd->error = -EINPROGRESS;
292 if (cmdr == MMC_STOP_TRANSMISSION)
293 cmdr |= SDMMC_CMD_STOP;
295 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
297 if (cmd->flags & MMC_RSP_PRESENT) {
298 /* We expect a response, so set this bit */
299 cmdr |= SDMMC_CMD_RESP_EXP;
300 if (cmd->flags & MMC_RSP_136)
301 cmdr |= SDMMC_CMD_RESP_LONG;
304 if (cmd->flags & MMC_RSP_CRC)
305 cmdr |= SDMMC_CMD_RESP_CRC;
309 cmdr |= SDMMC_CMD_DAT_EXP;
310 if (data->flags & MMC_DATA_STREAM)
311 cmdr |= SDMMC_CMD_STRM_MODE;
312 if (data->flags & MMC_DATA_WRITE)
313 cmdr |= SDMMC_CMD_DAT_WR;
316 if (drv_data && drv_data->prepare_command)
317 drv_data->prepare_command(slot->host, &cmdr);
323 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
325 struct mmc_command *stop;
331 stop = &host->stop_abort;
333 memset(stop, 0, sizeof(struct mmc_command));
335 if (cmdr == MMC_READ_SINGLE_BLOCK ||
336 cmdr == MMC_READ_MULTIPLE_BLOCK ||
337 cmdr == MMC_WRITE_BLOCK ||
338 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
339 stop->opcode = MMC_STOP_TRANSMISSION;
341 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
342 } else if (cmdr == SD_IO_RW_EXTENDED) {
343 stop->opcode = SD_IO_RW_DIRECT;
344 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
345 ((cmd->arg >> 28) & 0x7);
346 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
351 cmdr = stop->opcode | SDMMC_CMD_STOP |
352 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
357 static void dw_mci_start_command(struct dw_mci *host,
358 struct mmc_command *cmd, u32 cmd_flags)
360 struct dw_mci_slot *slot = host->slot[0];
361 /*temporality fix slot[0] due to host->num_slots equal to 1*/
363 host->pre_cmd = host->cmd;
366 "start command: ARGR=0x%08x CMDR=0x%08x\n",
367 cmd->arg, cmd_flags);
369 if(SD_SWITCH_VOLTAGE == cmd->opcode){
370 /*confirm non-low-power mode*/
371 mci_writel(host, CMDARG, 0);
372 dw_mci_disable_low_power(slot);
374 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
375 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
377 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
380 mci_writel(host, CMDARG, cmd->arg);
383 /* fix the value to 1 in some Soc,for example RK3188. */
384 if(host->mmc->hold_reg_flag)
385 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
387 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
391 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
393 dw_mci_start_command(host, data->stop, host->stop_cmdr);
396 /* DMA interface functions */
397 static void dw_mci_stop_dma(struct dw_mci *host)
399 if (host->using_dma) {
400 host->dma_ops->stop(host);
401 host->dma_ops->cleanup(host);
404 /* Data transfer was stopped by the interrupt handler */
405 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
408 static int dw_mci_get_dma_dir(struct mmc_data *data)
410 if (data->flags & MMC_DATA_WRITE)
411 return DMA_TO_DEVICE;
413 return DMA_FROM_DEVICE;
416 #ifdef CONFIG_MMC_DW_IDMAC
417 static void dw_mci_dma_cleanup(struct dw_mci *host)
419 struct mmc_data *data = host->data;
422 if (!data->host_cookie)
423 dma_unmap_sg(host->dev,
426 dw_mci_get_dma_dir(data));
429 static void dw_mci_idmac_reset(struct dw_mci *host)
431 u32 bmod = mci_readl(host, BMOD);
432 /* Software reset of DMA */
433 bmod |= SDMMC_IDMAC_SWRESET;
434 mci_writel(host, BMOD, bmod);
437 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
441 /* Disable and reset the IDMAC interface */
442 temp = mci_readl(host, CTRL);
443 temp &= ~SDMMC_CTRL_USE_IDMAC;
444 temp |= SDMMC_CTRL_DMA_RESET;
445 mci_writel(host, CTRL, temp);
447 /* Stop the IDMAC running */
448 temp = mci_readl(host, BMOD);
449 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
450 temp |= SDMMC_IDMAC_SWRESET;
451 mci_writel(host, BMOD, temp);
454 static void dw_mci_idmac_complete_dma(void *arg)
456 struct dw_mci *host = arg;
457 struct mmc_data *data = host->data;
459 dev_vdbg(host->dev, "DMA complete\n");
462 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
463 host->mrq->cmd->opcode,host->mrq->cmd->arg,
464 data->blocks,data->blksz,mmc_hostname(host->mmc));
467 host->dma_ops->cleanup(host);
470 * If the card was removed, data will be NULL. No point in trying to
471 * send the stop command or waiting for NBUSY in this case.
474 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
475 tasklet_schedule(&host->tasklet);
479 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
483 struct idmac_desc *desc = host->sg_cpu;
485 for (i = 0; i < sg_len; i++, desc++) {
486 unsigned int length = sg_dma_len(&data->sg[i]);
487 u32 mem_addr = sg_dma_address(&data->sg[i]);
489 /* Set the OWN bit and disable interrupts for this descriptor */
490 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
493 IDMAC_SET_BUFFER1_SIZE(desc, length);
495 /* Physical address to DMA to/from */
496 desc->des2 = mem_addr;
499 /* Set first descriptor */
501 desc->des0 |= IDMAC_DES0_FD;
503 /* Set last descriptor */
504 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
505 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
506 desc->des0 |= IDMAC_DES0_LD;
511 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
515 dw_mci_translate_sglist(host, host->data, sg_len);
517 /* Select IDMAC interface */
518 temp = mci_readl(host, CTRL);
519 temp |= SDMMC_CTRL_USE_IDMAC;
520 mci_writel(host, CTRL, temp);
524 /* Enable the IDMAC */
525 temp = mci_readl(host, BMOD);
526 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
527 mci_writel(host, BMOD, temp);
529 /* Start it running */
530 mci_writel(host, PLDMND, 1);
533 static int dw_mci_idmac_init(struct dw_mci *host)
535 struct idmac_desc *p;
538 /* Number of descriptors in the ring buffer */
539 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
541 /* Forward link the descriptor list */
542 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
543 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
545 /* Set the last descriptor as the end-of-ring descriptor */
546 p->des3 = host->sg_dma;
547 p->des0 = IDMAC_DES0_ER;
549 dw_mci_idmac_reset(host);
551 /* Mask out interrupts - get Tx & Rx complete only */
552 mci_writel(host, IDSTS, IDMAC_INT_CLR);
553 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
556 /* Set the descriptor base address */
557 mci_writel(host, DBADDR, host->sg_dma);
561 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
562 .init = dw_mci_idmac_init,
563 .start = dw_mci_idmac_start_dma,
564 .stop = dw_mci_idmac_stop_dma,
565 .complete = dw_mci_idmac_complete_dma,
566 .cleanup = dw_mci_dma_cleanup,
568 #endif /* CONFIG_MMC_DW_IDMAC */
570 #ifdef CONFIG_MMC_DW_EDMAC
571 static void dw_mci_edma_cleanup(struct dw_mci *host)
573 struct mmc_data *data = host->data;
576 if (!data->host_cookie)
577 dma_unmap_sg(host->dev,
578 data->sg, data->sg_len,
579 dw_mci_get_dma_dir(data));
582 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
584 dmaengine_terminate_all(host->dms->ch);
587 static void dw_mci_edmac_complete_dma(void *arg)
589 struct dw_mci *host = arg;
590 struct mmc_data *data = host->data;
592 dev_vdbg(host->dev, "DMA complete\n");
595 if(data->flags & MMC_DATA_READ)
596 /* Invalidate cache after read */
597 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
598 data->sg_len, DMA_FROM_DEVICE);
600 host->dma_ops->cleanup(host);
603 * If the card was removed, data will be NULL. No point in trying to
604 * send the stop command or waiting for NBUSY in this case.
607 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
608 tasklet_schedule(&host->tasklet);
612 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
614 struct dma_slave_config slave_config;
615 struct dma_async_tx_descriptor *desc = NULL;
616 struct scatterlist *sgl = host->data->sg;
617 u32 sg_elems = host->data->sg_len;
620 /* Set external dma config: burst size, burst width*/
621 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
622 slave_config.src_addr = slave_config.dst_addr;
623 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
624 slave_config.src_addr_width = slave_config.dst_addr_width;
626 /* Match FIFO dma burst MSIZE with external dma config*/
627 slave_config.dst_maxburst = ((host->fifoth_val) >> 28) && 0x7;
628 slave_config.src_maxburst = slave_config.dst_maxburst;
630 if(host->data->flags & MMC_DATA_WRITE){
631 slave_config.direction = DMA_MEM_TO_DEV;
632 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
634 dev_err(host->dev, "error in dw_mci edma configuration.\n");
638 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
639 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
641 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
644 /* Set dw_mci_edmac_complete_dma as callback */
645 desc->callback = dw_mci_edmac_complete_dma;
646 desc->callback_param = (void *)host;
647 dmaengine_submit(desc);
649 /* Flush cache before write */
650 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
651 sg_elems, DMA_TO_DEVICE);
652 dma_async_issue_pending(host->dms->ch);
655 slave_config.direction = DMA_DEV_TO_MEM;
656 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
658 dev_err(host->dev, "error in dw_mci edma configuration.\n");
661 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
662 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
664 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
667 /* set dw_mci_edmac_complete_dma as callback */
668 desc->callback = dw_mci_edmac_complete_dma;
669 desc->callback_param = (void *)host;
670 dmaengine_submit(desc);
671 dma_async_issue_pending(host->dms->ch);
675 static int dw_mci_edmac_init(struct dw_mci *host)
677 MMC_DBG_BOOT_FUNC(host->mmc,"dw_mci_edmac_init: Soc is 0x%x [%s]\n",
678 (unsigned int)(rockchip_soc_id & ROCKCHIP_CPU_MASK), mmc_hostname(host->mmc));
680 /* 1) request external dma channel, SHOULD decide chn in dts */
681 host->dms = (struct dw_mci_dma_slave *)kmalloc(sizeof(struct dw_mci_dma_slave),GFP_KERNEL);
682 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
684 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
685 host->dms->ch->chan_id);
698 static void dw_mci_edmac_exit(struct dw_mci *host)
700 dma_release_channel(host->dms->ch);
703 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
704 .init = dw_mci_edmac_init,
705 .exit = dw_mci_edmac_exit,
706 .start = dw_mci_edmac_start_dma,
707 .stop = dw_mci_edmac_stop_dma,
708 .complete = dw_mci_edmac_complete_dma,
709 .cleanup = dw_mci_edma_cleanup,
712 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
713 struct mmc_data *data,
716 struct scatterlist *sg;
717 unsigned int i, sg_len;
719 if (!next && data->host_cookie)
720 return data->host_cookie;
723 * We don't do DMA on "complex" transfers, i.e. with
724 * non-word-aligned buffers or lengths. Also, we don't bother
725 * with all the DMA setup overhead for short transfers.
727 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
733 for_each_sg(data->sg, sg, data->sg_len, i) {
734 if (sg->offset & 3 || sg->length & 3)
738 sg_len = dma_map_sg(host->dev,
741 dw_mci_get_dma_dir(data));
746 data->host_cookie = sg_len;
751 static void dw_mci_pre_req(struct mmc_host *mmc,
752 struct mmc_request *mrq,
755 struct dw_mci_slot *slot = mmc_priv(mmc);
756 struct mmc_data *data = mrq->data;
758 if (!slot->host->use_dma || !data)
761 if (data->host_cookie) {
762 data->host_cookie = 0;
766 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
767 data->host_cookie = 0;
770 static void dw_mci_post_req(struct mmc_host *mmc,
771 struct mmc_request *mrq,
774 struct dw_mci_slot *slot = mmc_priv(mmc);
775 struct mmc_data *data = mrq->data;
777 if (!slot->host->use_dma || !data)
780 if (data->host_cookie)
781 dma_unmap_sg(slot->host->dev,
784 dw_mci_get_dma_dir(data));
785 data->host_cookie = 0;
788 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
790 #if defined(CONFIG_MMC_DW_IDMAC) || defined(CONFIG_MMC_DW_EDMAC)
791 unsigned int blksz = data->blksz;
792 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
793 u32 fifo_width = 1 << host->data_shift;
794 u32 blksz_depth = blksz / fifo_width, fifoth_val;
795 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
796 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
798 tx_wmark = (host->fifo_depth) / 2;
799 tx_wmark_invers = host->fifo_depth - tx_wmark;
803 * if blksz is not a multiple of the FIFO width
805 if (blksz % fifo_width) {
812 if (!((blksz_depth % mszs[idx]) ||
813 (tx_wmark_invers % mszs[idx]))) {
815 rx_wmark = mszs[idx] - 1;
820 * If idx is '0', it won't be tried
821 * Thus, initial values are uesed
824 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
825 mci_writel(host, FIFOTH, fifoth_val);
829 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
831 unsigned int blksz = data->blksz;
832 u32 blksz_depth, fifo_depth;
835 WARN_ON(!(data->flags & MMC_DATA_READ));
837 if (host->timing != MMC_TIMING_MMC_HS200 &&
838 host->timing != MMC_TIMING_UHS_SDR104)
841 blksz_depth = blksz / (1 << host->data_shift);
842 fifo_depth = host->fifo_depth;
844 if (blksz_depth > fifo_depth)
848 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
849 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
850 * Currently just choose blksz.
853 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
857 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
860 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
867 /* If we don't have a channel, we can't do DMA */
871 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
873 host->dma_ops->stop(host);
880 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
881 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
885 * Decide the MSIZE and RX/TX Watermark.
886 * If current block size is same with previous size,
887 * no need to update fifoth.
889 if (host->prev_blksz != data->blksz)
890 dw_mci_adjust_fifoth(host, data);
893 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
895 /* Enable the DMA interface */
896 temp = mci_readl(host, CTRL);
897 temp |= SDMMC_CTRL_DMA_ENABLE;
898 mci_writel(host, CTRL, temp);
900 /* Disable RX/TX IRQs, let DMA handle it */
901 temp = mci_readl(host, INTMASK);
902 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
903 mci_writel(host, INTMASK, temp);
905 host->dma_ops->start(host, sg_len);
910 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
914 data->error = -EINPROGRESS;
921 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
923 if (data->flags & MMC_DATA_READ) {
924 host->dir_status = DW_MCI_RECV_STATUS;
925 dw_mci_ctrl_rd_thld(host, data);
927 host->dir_status = DW_MCI_SEND_STATUS;
930 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
931 data->blocks, data->blksz, mmc_hostname(host->mmc));
933 if (dw_mci_submit_data_dma(host, data)) {
934 int flags = SG_MITER_ATOMIC;
935 if (host->data->flags & MMC_DATA_READ)
936 flags |= SG_MITER_TO_SG;
938 flags |= SG_MITER_FROM_SG;
940 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
942 host->part_buf_start = 0;
943 host->part_buf_count = 0;
945 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
946 temp = mci_readl(host, INTMASK);
947 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
948 mci_writel(host, INTMASK, temp);
950 temp = mci_readl(host, CTRL);
951 temp &= ~SDMMC_CTRL_DMA_ENABLE;
952 mci_writel(host, CTRL, temp);
955 * Use the initial fifoth_val for PIO mode.
956 * If next issued data may be transfered by DMA mode,
957 * prev_blksz should be invalidated.
959 mci_writel(host, FIFOTH, host->fifoth_val);
960 host->prev_blksz = 0;
963 * Keep the current block size.
964 * It will be used to decide whether to update
965 * fifoth register next time.
967 host->prev_blksz = data->blksz;
971 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
973 struct dw_mci *host = slot->host;
974 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
975 unsigned int cmd_status = 0;
976 #ifdef SDMMC_WAIT_FOR_UNBUSY
978 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
980 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
982 ret = time_before(jiffies, timeout);
983 cmd_status = mci_readl(host, STATUS);
984 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
988 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
989 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
992 mci_writel(host, CMDARG, arg);
994 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
995 if(cmd & SDMMC_CMD_UPD_CLK)
996 timeout = jiffies + msecs_to_jiffies(50);
998 timeout = jiffies + msecs_to_jiffies(500);
999 while (time_before(jiffies, timeout)) {
1000 cmd_status = mci_readl(host, CMD);
1001 if (!(cmd_status & SDMMC_CMD_START))
1004 dev_err(&slot->mmc->class_dev,
1005 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1006 cmd, arg, cmd_status);
1009 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1011 struct dw_mci *host = slot->host;
1012 unsigned int tempck,clock = slot->clock;
1017 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1018 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1021 mci_writel(host, CLKENA, 0);
1022 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1023 if(host->svi_flags == 0)
1024 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1026 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1028 } else if (clock != host->current_speed || force_clkinit) {
1029 div = host->bus_hz / clock;
1030 if (host->bus_hz % clock && host->bus_hz > clock)
1032 * move the + 1 after the divide to prevent
1033 * over-clocking the card.
1037 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1039 if ((clock << div) != slot->__clk_old || force_clkinit) {
1040 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1041 dev_info(&slot->mmc->class_dev,
1042 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1043 slot->id, host->bus_hz, clock,
1046 host->set_speed = tempck;
1047 host->set_div = div;
1051 mci_writel(host, CLKENA, 0);
1052 mci_writel(host, CLKSRC, 0);
1056 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1058 if(clock <= 400*1000){
1059 MMC_DBG_BOOT_FUNC(host->mmc,
1060 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1061 clock * 2, mmc_hostname(host->mmc));
1062 /* clk_mmc will change parents to 24MHz xtal*/
1063 clk_set_rate(host->clk_mmc, clock * 2);
1066 host->set_div = div;
1070 MMC_DBG_BOOT_FUNC(host->mmc,
1071 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1072 mmc_hostname(host->mmc));
1075 MMC_DBG_ERR_FUNC(host->mmc,
1076 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1077 mmc_hostname(host->mmc));
1079 host->set_div = div;
1080 host->bus_hz = host->set_speed * 2;
1081 MMC_DBG_BOOT_FUNC(host->mmc,
1082 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1083 div, host->bus_hz, mmc_hostname(host->mmc));
1085 /* BUG may be here, come on, Linux BSP engineer looks!
1086 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1087 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1088 some oops happened like that:
1089 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1090 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1091 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1092 mmc0: new high speed DDR MMC card at address 0001
1093 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1095 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1096 mmcblk0: retrying using single block read
1097 mmcblk0: error -110 sending status command, retrying
1099 How to: If eMMC HW version < 4.51, or > 4.51 but no caps2-mmc-hs200 support in dts
1100 Please set dts emmc clk to 100M or 150M, I will workaround it!
1103 if (host->verid < DW_MMC_240A)
1104 clk_set_rate(host->clk_mmc,(host->bus_hz));
1106 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1112 /* set clock to desired speed */
1113 mci_writel(host, CLKDIV, div);
1117 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1119 /* enable clock; only low power if no SDIO */
1120 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1122 if (host->verid < DW_MMC_240A)
1123 sdio_int = SDMMC_INT_SDIO(slot->id);
1125 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1127 if (!(mci_readl(host, INTMASK) & sdio_int))
1128 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1129 mci_writel(host, CLKENA, clk_en_a);
1133 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1134 /* keep the clock with reflecting clock dividor */
1135 slot->__clk_old = clock << div;
1138 host->current_speed = clock;
1140 if(slot->ctype != slot->pre_ctype)
1141 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1143 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1144 mmc_hostname(host->mmc));
1145 slot->pre_ctype = slot->ctype;
1147 /* Set the current slot bus width */
1148 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1151 static void dw_mci_wait_unbusy(struct dw_mci *host)
1154 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1155 unsigned long time_loop;
1156 unsigned int status;
1158 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1160 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1161 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1162 else if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1163 timeout = SDMMC_DATA_TIMEOUT_SD;
1165 time_loop = jiffies + msecs_to_jiffies(timeout);
1167 status = mci_readl(host, STATUS);
1168 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1170 } while (time_before(jiffies, time_loop));
1174 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1177 * 0--status is busy.
1178 * 1--status is unbusy.
1180 int dw_mci_card_busy(struct mmc_host *mmc)
1182 struct dw_mci_slot *slot = mmc_priv(mmc);
1183 struct dw_mci *host = slot->host;
1185 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1186 host->svi_flags, mmc_hostname(host->mmc));
1189 if(host->svi_flags == 0){
1191 host->svi_flags = 1;
1192 return host->svi_flags;
1195 host->svi_flags = 0;
1196 return host->svi_flags;
1202 static void __dw_mci_start_request(struct dw_mci *host,
1203 struct dw_mci_slot *slot,
1204 struct mmc_command *cmd)
1206 struct mmc_request *mrq;
1207 struct mmc_data *data;
1211 if (host->pdata->select_slot)
1212 host->pdata->select_slot(slot->id);
1214 host->cur_slot = slot;
1217 dw_mci_wait_unbusy(host);
1219 host->pending_events = 0;
1220 host->completed_events = 0;
1221 host->data_status = 0;
1225 dw_mci_set_timeout(host);
1226 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1227 mci_writel(host, BLKSIZ, data->blksz);
1230 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1232 /* this is the first command, send the initialization clock */
1233 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1234 cmdflags |= SDMMC_CMD_INIT;
1237 dw_mci_submit_data(host, data);
1241 dw_mci_start_command(host, cmd, cmdflags);
1244 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1247 static void dw_mci_start_request(struct dw_mci *host,
1248 struct dw_mci_slot *slot)
1250 struct mmc_request *mrq = slot->mrq;
1251 struct mmc_command *cmd;
1253 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1254 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1256 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1257 __dw_mci_start_request(host, slot, cmd);
1260 /* must be called with host->lock held */
1261 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1262 struct mmc_request *mrq)
1264 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1269 if (host->state == STATE_IDLE) {
1270 host->state = STATE_SENDING_CMD;
1271 dw_mci_start_request(host, slot);
1273 list_add_tail(&slot->queue_node, &host->queue);
1277 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1279 struct dw_mci_slot *slot = mmc_priv(mmc);
1280 struct dw_mci *host = slot->host;
1285 * The check for card presence and queueing of the request must be
1286 * atomic, otherwise the card could be removed in between and the
1287 * request wouldn't fail until another card was inserted.
1289 spin_lock_bh(&host->lock);
1291 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1292 spin_unlock_bh(&host->lock);
1293 mrq->cmd->error = -ENOMEDIUM;
1294 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1295 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1297 mmc_request_done(mmc, mrq);
1301 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1302 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1304 dw_mci_queue_request(host, slot, mrq);
1306 spin_unlock_bh(&host->lock);
1309 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1311 struct dw_mci_slot *slot = mmc_priv(mmc);
1312 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1313 struct dw_mci *host = slot->host;
1315 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1318 #ifdef SDMMC_WAIT_FOR_UNBUSY
1319 unsigned long time_loop;
1322 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1323 if(host->svi_flags == 1)
1324 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1326 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1328 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1331 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1332 printk("%d..%s: no card. [%s]\n", \
1333 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1338 ret = time_before(jiffies, time_loop);
1339 regs = mci_readl(slot->host, STATUS);
1340 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1346 printk("slot->flags = %lu ", slot->flags);
1347 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1348 if(host->svi_flags != 1)
1351 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1352 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1356 switch (ios->bus_width) {
1357 case MMC_BUS_WIDTH_4:
1358 slot->ctype = SDMMC_CTYPE_4BIT;
1360 case MMC_BUS_WIDTH_8:
1361 slot->ctype = SDMMC_CTYPE_8BIT;
1364 /* set default 1 bit mode */
1365 slot->ctype = SDMMC_CTYPE_1BIT;
1366 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1369 regs = mci_readl(slot->host, UHS_REG);
1372 if (ios->timing == MMC_TIMING_UHS_DDR50)
1373 regs |= ((0x1 << slot->id) << 16);
1375 regs &= ~((0x1 << slot->id) << 16);
1377 mci_writel(slot->host, UHS_REG, regs);
1378 slot->host->timing = ios->timing;
1381 * Use mirror of ios->clock to prevent race with mmc
1382 * core ios update when finding the minimum.
1384 slot->clock = ios->clock;
1386 if (drv_data && drv_data->set_ios)
1387 drv_data->set_ios(slot->host, ios);
1389 /* Slot specific timing and width adjustment */
1390 dw_mci_setup_bus(slot, false);
1394 switch (ios->power_mode) {
1396 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1398 if (slot->host->pdata->setpower)
1399 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1400 regs = mci_readl(slot->host, PWREN);
1401 regs |= (1 << slot->id);
1402 mci_writel(slot->host, PWREN, regs);
1405 /* Power down slot */
1406 if(slot->host->pdata->setpower)
1407 slot->host->pdata->setpower(slot->id, 0);
1408 regs = mci_readl(slot->host, PWREN);
1409 regs &= ~(1 << slot->id);
1410 mci_writel(slot->host, PWREN, regs);
1417 static int dw_mci_get_ro(struct mmc_host *mmc)
1420 struct dw_mci_slot *slot = mmc_priv(mmc);
1421 struct dw_mci_board *brd = slot->host->pdata;
1423 /* Use platform get_ro function, else try on board write protect */
1424 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1426 else if(brd->get_ro)
1427 read_only = brd->get_ro(slot->id);
1428 else if(gpio_is_valid(slot->wp_gpio))
1429 read_only = gpio_get_value(slot->wp_gpio);
1432 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1434 dev_dbg(&mmc->class_dev, "card is %s\n",
1435 read_only ? "read-only" : "read-write");
1440 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1442 struct dw_mci_slot *slot = mmc_priv(mmc);
1443 struct dw_mci *host = slot->host;
1444 /*struct dw_mci_board *brd = slot->host->pdata;*/
1446 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1449 spin_lock_bh(&host->lock);
1452 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1454 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1456 spin_unlock_bh(&host->lock);
1458 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1459 if(__clk_is_enabled(host->hclk_mmc) == false)
1460 clk_prepare_enable(host->hclk_mmc);
1461 if(__clk_is_enabled(host->clk_mmc) == false)
1462 clk_prepare_enable(host->clk_mmc);
1464 if(__clk_is_enabled(host->clk_mmc) == true)
1465 clk_disable_unprepare(slot->host->clk_mmc);
1466 if(__clk_is_enabled(host->hclk_mmc) == true)
1467 clk_disable_unprepare(slot->host->hclk_mmc);
1470 mmc_detect_change(slot->mmc, 20);
1476 static int dw_mci_get_cd(struct mmc_host *mmc)
1479 struct dw_mci_slot *slot = mmc_priv(mmc);
1480 struct dw_mci_board *brd = slot->host->pdata;
1481 struct dw_mci *host = slot->host;
1482 int gpio_cd = mmc_gpio_get_cd(mmc);
1484 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1485 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1487 /* Use platform get_cd function, else try onboard card detect */
1488 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1490 else if (brd->get_cd)
1491 present = !brd->get_cd(slot->id);
1492 else if (!IS_ERR_VALUE(gpio_cd))
1495 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1497 spin_lock_bh(&host->lock);
1499 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1500 dev_dbg(&mmc->class_dev, "card is present\n");
1502 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1503 dev_dbg(&mmc->class_dev, "card is not present\n");
1505 spin_unlock_bh(&host->lock);
1512 * Dts Should caps emmc controller with poll-hw-reset
1514 static void dw_mci_hw_reset(struct mmc_host *mmc)
1516 struct dw_mci_slot *slot = mmc_priv(mmc);
1517 struct dw_mci *host = slot->host;
1522 unsigned long timeout;
1525 /* (1) CMD12 to end any transfer in process */
1526 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1527 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1529 if(host->mmc->hold_reg_flag)
1530 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1531 mci_writel(host, CMDARG, 0);
1533 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1535 timeout = jiffies + msecs_to_jiffies(500);
1537 ret = time_before(jiffies, timeout);
1538 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1543 MMC_DBG_ERR_FUNC(host->mmc,
1544 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1545 __func__, mmc_hostname(host->mmc));
1547 /* (2) wait DTO, even if no response is sent back by card */
1549 timeout = jiffies + msecs_to_jiffies(5);
1551 ret = time_before(jiffies, timeout);
1552 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1553 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1559 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1561 /* Software reset - BMOD[0] for IDMA only */
1562 regs = mci_readl(host, BMOD);
1563 regs |= SDMMC_IDMAC_SWRESET;
1564 mci_writel(host, BMOD, regs);
1565 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1566 regs = mci_readl(host, BMOD);
1567 if(regs & SDMMC_IDMAC_SWRESET)
1568 MMC_DBG_WARN_FUNC(host->mmc,
1569 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1570 __func__, mmc_hostname(host->mmc));
1572 /* DMA reset - CTRL[2] */
1573 regs = mci_readl(host, CTRL);
1574 regs |= SDMMC_CTRL_DMA_RESET;
1575 mci_writel(host, CTRL, regs);
1576 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1577 regs = mci_readl(host, CTRL);
1578 if(regs & SDMMC_CTRL_DMA_RESET)
1579 MMC_DBG_WARN_FUNC(host->mmc,
1580 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1581 __func__, mmc_hostname(host->mmc));
1583 /* FIFO reset - CTRL[1] */
1584 regs = mci_readl(host, CTRL);
1585 regs |= SDMMC_CTRL_FIFO_RESET;
1586 mci_writel(host, CTRL, regs);
1587 mdelay(1); /* no timing limited, 1ms is random value */
1588 regs = mci_readl(host, CTRL);
1589 if(regs & SDMMC_CTRL_FIFO_RESET)
1590 MMC_DBG_WARN_FUNC(host->mmc,
1591 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1592 __func__, mmc_hostname(host->mmc));
1595 According to eMMC spec
1596 tRstW >= 1us ; RST_n pulse width
1597 tRSCA >= 200us ; RST_n to Command time
1598 tRSTH >= 1us ; RST_n high period
1600 mci_writel(slot->host, PWREN, 0x0);
1601 mci_writel(slot->host, RST_N, 0x0);
1603 udelay(10); /* 10us for bad quality eMMc. */
1605 mci_writel(slot->host, PWREN, 0x1);
1606 mci_writel(slot->host, RST_N, 0x1);
1608 usleep_range(500, 1000); /* at least 500(> 200us) */
1612 * Disable lower power mode.
1614 * Low power mode will stop the card clock when idle. According to the
1615 * description of the CLKENA register we should disable low power mode
1616 * for SDIO cards if we need SDIO interrupts to work.
1618 * This function is fast if low power mode is already disabled.
1620 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1622 struct dw_mci *host = slot->host;
1624 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1626 clk_en_a = mci_readl(host, CLKENA);
1628 if (clk_en_a & clken_low_pwr) {
1629 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1630 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1631 SDMMC_CMD_PRV_DAT_WAIT, 0);
1635 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1637 struct dw_mci_slot *slot = mmc_priv(mmc);
1638 struct dw_mci *host = slot->host;
1642 /* Enable/disable Slot Specific SDIO interrupt */
1643 int_mask = mci_readl(host, INTMASK);
1645 if (host->verid < DW_MMC_240A)
1646 sdio_int = SDMMC_INT_SDIO(slot->id);
1648 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1652 * Turn off low power mode if it was enabled. This is a bit of
1653 * a heavy operation and we disable / enable IRQs a lot, so
1654 * we'll leave low power mode disabled and it will get
1655 * re-enabled again in dw_mci_setup_bus().
1657 dw_mci_disable_low_power(slot);
1659 mci_writel(host, INTMASK,
1660 (int_mask | sdio_int));
1662 mci_writel(host, INTMASK,
1663 (int_mask & ~sdio_int));
1667 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1669 IO_DOMAIN_12 = 1200,
1670 IO_DOMAIN_18 = 1800,
1671 IO_DOMAIN_33 = 3300,
1673 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1683 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1684 __FUNCTION__, mmc_hostname(host->mmc));
1687 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1688 __FUNCTION__, mmc_hostname(host->mmc));
1692 if(cpu_is_rk3288()){
1693 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1694 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1698 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1699 __FUNCTION__, mmc_hostname(host->mmc));
1703 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1704 struct mmc_ios *ios)
1707 unsigned int value,uhs_reg;
1710 * Signal Voltage Switching is only applicable for Host Controllers
1713 if (host->verid < DW_MMC_240A)
1716 uhs_reg = mci_readl(host, UHS_REG);
1717 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1718 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1720 switch (ios->signal_voltage) {
1721 case MMC_SIGNAL_VOLTAGE_330:
1722 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1724 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1725 /* regulator_put(host->vmmc); //to be done in remove function. */
1727 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1728 __func__, regulator_get_voltage(host->vmmc), ret);
1730 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1731 " failed\n", mmc_hostname(host->mmc));
1734 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1736 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1737 __FUNCTION__, mmc_hostname(host->mmc));
1739 /* set High-power mode */
1740 value = mci_readl(host, CLKENA);
1741 value &= ~SDMMC_CLKEN_LOW_PWR;
1742 mci_writel(host,CLKENA , value);
1744 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1745 mci_writel(host,UHS_REG , uhs_reg);
1748 usleep_range(5000, 5500);
1750 /* 3.3V regulator output should be stable within 5 ms */
1751 uhs_reg = mci_readl(host, UHS_REG);
1752 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1755 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1756 mmc_hostname(host->mmc));
1759 case MMC_SIGNAL_VOLTAGE_180:
1761 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1762 /* regulator_put(host->vmmc);//to be done in remove function. */
1764 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1765 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1767 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1768 " failed\n", mmc_hostname(host->mmc));
1771 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1775 * Enable 1.8V Signal Enable in the Host Control2
1778 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1781 usleep_range(5000, 5500);
1782 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1783 __FUNCTION__,mmc_hostname(host->mmc));
1785 /* 1.8V regulator output should be stable within 5 ms */
1786 uhs_reg = mci_readl(host, UHS_REG);
1787 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1790 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1791 mmc_hostname(host->mmc));
1794 case MMC_SIGNAL_VOLTAGE_120:
1796 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1798 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1799 " failed\n", mmc_hostname(host->mmc));
1805 /* No signal voltage switch required */
1811 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1812 struct mmc_ios *ios)
1814 struct dw_mci_slot *slot = mmc_priv(mmc);
1815 struct dw_mci *host = slot->host;
1818 if (host->verid < DW_MMC_240A)
1821 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1827 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1829 struct dw_mci_slot *slot = mmc_priv(mmc);
1830 struct dw_mci *host = slot->host;
1831 const struct dw_mci_drv_data *drv_data = host->drv_data;
1832 struct dw_mci_tuning_data tuning_data;
1835 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning
1836 if(cpu_is_3036() || cpu_is_3126())
1842 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1843 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1844 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1845 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1846 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1847 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1848 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1852 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1853 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1854 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1857 "Undefined command(%d) for tuning\n", opcode);
1862 /* Recommend sample phase and delayline
1863 Fixme: Mix-use these three controllers will cause
1866 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1867 tuning_data.con_id = 3;
1868 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1869 tuning_data.con_id = 1;
1871 tuning_data.con_id = 0;
1873 /* 0: driver, from host->devices
1874 1: sample, from devices->host
1876 tuning_data.tuning_type = 1;
1878 if (drv_data && drv_data->execute_tuning)
1879 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1884 static const struct mmc_host_ops dw_mci_ops = {
1885 .request = dw_mci_request,
1886 .pre_req = dw_mci_pre_req,
1887 .post_req = dw_mci_post_req,
1888 .set_ios = dw_mci_set_ios,
1889 .get_ro = dw_mci_get_ro,
1890 .get_cd = dw_mci_get_cd,
1891 .set_sdio_status = dw_mci_set_sdio_status,
1892 .hw_reset = dw_mci_hw_reset,
1893 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1894 .execute_tuning = dw_mci_execute_tuning,
1895 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1896 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1897 .card_busy = dw_mci_card_busy,
1902 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1904 unsigned long flags;
1909 local_irq_save(flags);
1910 if(host->irq_state != irqflag)
1912 host->irq_state = irqflag;
1915 enable_irq(host->irq);
1919 disable_irq(host->irq);
1922 local_irq_restore(flags);
1926 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1927 __releases(&host->lock)
1928 __acquires(&host->lock)
1930 if(DW_MCI_SEND_STATUS == host->dir_status){
1932 if( MMC_BUS_TEST_W != host->cmd->opcode){
1933 if(host->data_status & SDMMC_INT_DCRC)
1934 host->data->error = -EILSEQ;
1935 else if(host->data_status & SDMMC_INT_EBE)
1936 host->data->error = -ETIMEDOUT;
1938 dw_mci_wait_unbusy(host);
1941 dw_mci_wait_unbusy(host);
1946 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1947 __releases(&host->lock)
1948 __acquires(&host->lock)
1950 struct dw_mci_slot *slot;
1951 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1953 WARN_ON(host->cmd || host->data);
1955 del_timer_sync(&host->dto_timer);
1956 dw_mci_deal_data_end(host, mrq);
1959 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1960 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
1962 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
1963 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
1965 host->cur_slot->mrq = NULL;
1967 if (!list_empty(&host->queue)) {
1968 slot = list_entry(host->queue.next,
1969 struct dw_mci_slot, queue_node);
1970 list_del(&slot->queue_node);
1971 dev_vdbg(host->dev, "list not empty: %s is next\n",
1972 mmc_hostname(slot->mmc));
1973 host->state = STATE_SENDING_CMD;
1974 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
1975 dw_mci_start_request(host, slot);
1977 dev_vdbg(host->dev, "list empty\n");
1978 host->state = STATE_IDLE;
1981 spin_unlock(&host->lock);
1982 mmc_request_done(prev_mmc, mrq);
1983 spin_lock(&host->lock);
1986 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1988 u32 status = host->cmd_status;
1990 host->cmd_status = 0;
1992 /* Read the response from the card (up to 16 bytes) */
1993 if (cmd->flags & MMC_RSP_PRESENT) {
1994 if (cmd->flags & MMC_RSP_136) {
1995 cmd->resp[3] = mci_readl(host, RESP0);
1996 cmd->resp[2] = mci_readl(host, RESP1);
1997 cmd->resp[1] = mci_readl(host, RESP2);
1998 cmd->resp[0] = mci_readl(host, RESP3);
2000 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2001 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2003 cmd->resp[0] = mci_readl(host, RESP0);
2007 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2008 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2012 if (status & SDMMC_INT_RTO)
2014 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2017 cmd->error = -ETIMEDOUT;
2018 del_timer_sync(&host->dto_timer);
2019 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2020 del_timer_sync(&host->dto_timer);
2021 cmd->error = -EILSEQ;
2022 }else if (status & SDMMC_INT_RESP_ERR){
2023 del_timer_sync(&host->dto_timer);
2028 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2029 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2032 del_timer_sync(&host->dto_timer);
2033 if(MMC_SEND_STATUS != cmd->opcode)
2034 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2035 MMC_DBG_ERR_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2036 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2040 /* newer ip versions need a delay between retries */
2041 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2047 static void dw_mci_tasklet_func(unsigned long priv)
2049 struct dw_mci *host = (struct dw_mci *)priv;
2050 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2051 struct mmc_data *data;
2052 struct mmc_command *cmd;
2053 enum dw_mci_state state;
2054 enum dw_mci_state prev_state;
2055 u32 status, cmd_flags;
2056 unsigned long timeout = 0;
2059 spin_lock(&host->lock);
2061 state = host->state;
2071 case STATE_SENDING_CMD:
2072 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2073 &host->pending_events))
2078 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2079 dw_mci_command_complete(host, cmd);
2080 if (cmd == host->mrq->sbc && !cmd->error) {
2081 prev_state = state = STATE_SENDING_CMD;
2082 __dw_mci_start_request(host, host->cur_slot,
2087 if (cmd->data && cmd->error) {
2088 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2089 dw_mci_stop_dma(host);
2092 send_stop_cmd(host, data);
2093 state = STATE_SENDING_STOP;
2099 send_stop_abort(host, data);
2100 state = STATE_SENDING_STOP;
2103 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2106 if (!host->mrq->data || cmd->error) {
2107 dw_mci_request_end(host, host->mrq);
2111 prev_state = state = STATE_SENDING_DATA;
2114 case STATE_SENDING_DATA:
2115 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2116 dw_mci_stop_dma(host);
2119 send_stop_cmd(host, data);
2121 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2122 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2123 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2125 mci_writel(host, CMDARG, 0);
2127 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2128 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2130 if(host->mmc->hold_reg_flag)
2131 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2133 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2135 timeout = jiffies + msecs_to_jiffies(500);
2138 ret = time_before(jiffies, timeout);
2139 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2143 MMC_DBG_ERR_FUNC(host->mmc,
2144 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2145 __func__, mmc_hostname(host->mmc));
2148 send_stop_abort(host, data);
2150 state = STATE_DATA_ERROR;
2154 MMC_DBG_CMD_FUNC(host->mmc,
2155 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2156 prev_state,state, mmc_hostname(host->mmc));
2158 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2159 &host->pending_events))
2161 MMC_DBG_INFO_FUNC(host->mmc,
2162 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2163 prev_state,state,mmc_hostname(host->mmc));
2165 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2166 prev_state = state = STATE_DATA_BUSY;
2169 case STATE_DATA_BUSY:
2170 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2171 &host->pending_events))
2174 dw_mci_deal_data_end(host, host->mrq);
2175 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2176 MMC_DBG_INFO_FUNC(host->mmc,
2177 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2178 prev_state,state,mmc_hostname(host->mmc));
2181 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2182 status = host->data_status;
2184 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2185 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2186 MMC_DBG_ERR_FUNC(host->mmc,
2187 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2188 prev_state,state, status, mmc_hostname(host->mmc));
2190 if (status & SDMMC_INT_DRTO) {
2191 data->error = -ETIMEDOUT;
2192 } else if (status & SDMMC_INT_DCRC) {
2193 data->error = -EILSEQ;
2194 } else if (status & SDMMC_INT_EBE &&
2195 host->dir_status == DW_MCI_SEND_STATUS){
2197 * No data CRC status was returned.
2198 * The number of bytes transferred will
2199 * be exaggerated in PIO mode.
2201 data->bytes_xfered = 0;
2202 data->error = -ETIMEDOUT;
2211 * After an error, there may be data lingering
2212 * in the FIFO, so reset it - doing so
2213 * generates a block interrupt, hence setting
2214 * the scatter-gather pointer to NULL.
2216 dw_mci_fifo_reset(host);
2218 data->bytes_xfered = data->blocks * data->blksz;
2223 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2224 prev_state,state,mmc_hostname(host->mmc));
2225 dw_mci_request_end(host, host->mrq);
2228 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2229 prev_state,state,mmc_hostname(host->mmc));
2231 if (host->mrq->sbc && !data->error) {
2232 data->stop->error = 0;
2234 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2235 prev_state,state,mmc_hostname(host->mmc));
2237 dw_mci_request_end(host, host->mrq);
2241 prev_state = state = STATE_SENDING_STOP;
2243 send_stop_cmd(host, data);
2245 if (data->stop && !data->error) {
2246 /* stop command for open-ended transfer*/
2248 send_stop_abort(host, data);
2252 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2253 prev_state,state,mmc_hostname(host->mmc));
2255 case STATE_SENDING_STOP:
2256 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2259 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2260 prev_state, state, mmc_hostname(host->mmc));
2262 /* CMD error in data command */
2263 if (host->mrq->cmd->error && host->mrq->data) {
2264 dw_mci_fifo_reset(host);
2270 dw_mci_command_complete(host, host->mrq->stop);
2272 if (host->mrq->stop)
2273 dw_mci_command_complete(host, host->mrq->stop);
2275 host->cmd_status = 0;
2278 dw_mci_request_end(host, host->mrq);
2281 case STATE_DATA_ERROR:
2282 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2283 &host->pending_events))
2286 state = STATE_DATA_BUSY;
2289 } while (state != prev_state);
2291 host->state = state;
2293 spin_unlock(&host->lock);
2297 /* push final bytes to part_buf, only use during push */
2298 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2300 memcpy((void *)&host->part_buf, buf, cnt);
2301 host->part_buf_count = cnt;
2304 /* append bytes to part_buf, only use during push */
2305 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2307 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2308 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2309 host->part_buf_count += cnt;
2313 /* pull first bytes from part_buf, only use during pull */
2314 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2316 cnt = min(cnt, (int)host->part_buf_count);
2318 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2320 host->part_buf_count -= cnt;
2321 host->part_buf_start += cnt;
2326 /* pull final bytes from the part_buf, assuming it's just been filled */
2327 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2329 memcpy(buf, &host->part_buf, cnt);
2330 host->part_buf_start = cnt;
2331 host->part_buf_count = (1 << host->data_shift) - cnt;
2334 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2336 struct mmc_data *data = host->data;
2339 /* try and push anything in the part_buf */
2340 if (unlikely(host->part_buf_count)) {
2341 int len = dw_mci_push_part_bytes(host, buf, cnt);
2344 if (host->part_buf_count == 2) {
2345 mci_writew(host, DATA(host->data_offset),
2347 host->part_buf_count = 0;
2350 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2351 if (unlikely((unsigned long)buf & 0x1)) {
2353 u16 aligned_buf[64];
2354 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2355 int items = len >> 1;
2357 /* memcpy from input buffer into aligned buffer */
2358 memcpy(aligned_buf, buf, len);
2361 /* push data from aligned buffer into fifo */
2362 for (i = 0; i < items; ++i)
2363 mci_writew(host, DATA(host->data_offset),
2370 for (; cnt >= 2; cnt -= 2)
2371 mci_writew(host, DATA(host->data_offset), *pdata++);
2374 /* put anything remaining in the part_buf */
2376 dw_mci_set_part_bytes(host, buf, cnt);
2377 /* Push data if we have reached the expected data length */
2378 if ((data->bytes_xfered + init_cnt) ==
2379 (data->blksz * data->blocks))
2380 mci_writew(host, DATA(host->data_offset),
2385 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2387 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2388 if (unlikely((unsigned long)buf & 0x1)) {
2390 /* pull data from fifo into aligned buffer */
2391 u16 aligned_buf[64];
2392 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2393 int items = len >> 1;
2395 for (i = 0; i < items; ++i)
2396 aligned_buf[i] = mci_readw(host,
2397 DATA(host->data_offset));
2398 /* memcpy from aligned buffer into output buffer */
2399 memcpy(buf, aligned_buf, len);
2407 for (; cnt >= 2; cnt -= 2)
2408 *pdata++ = mci_readw(host, DATA(host->data_offset));
2412 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2413 dw_mci_pull_final_bytes(host, buf, cnt);
2417 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2419 struct mmc_data *data = host->data;
2422 /* try and push anything in the part_buf */
2423 if (unlikely(host->part_buf_count)) {
2424 int len = dw_mci_push_part_bytes(host, buf, cnt);
2427 if (host->part_buf_count == 4) {
2428 mci_writel(host, DATA(host->data_offset),
2430 host->part_buf_count = 0;
2433 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2434 if (unlikely((unsigned long)buf & 0x3)) {
2436 u32 aligned_buf[32];
2437 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2438 int items = len >> 2;
2440 /* memcpy from input buffer into aligned buffer */
2441 memcpy(aligned_buf, buf, len);
2444 /* push data from aligned buffer into fifo */
2445 for (i = 0; i < items; ++i)
2446 mci_writel(host, DATA(host->data_offset),
2453 for (; cnt >= 4; cnt -= 4)
2454 mci_writel(host, DATA(host->data_offset), *pdata++);
2457 /* put anything remaining in the part_buf */
2459 dw_mci_set_part_bytes(host, buf, cnt);
2460 /* Push data if we have reached the expected data length */
2461 if ((data->bytes_xfered + init_cnt) ==
2462 (data->blksz * data->blocks))
2463 mci_writel(host, DATA(host->data_offset),
2468 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2470 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2471 if (unlikely((unsigned long)buf & 0x3)) {
2473 /* pull data from fifo into aligned buffer */
2474 u32 aligned_buf[32];
2475 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2476 int items = len >> 2;
2478 for (i = 0; i < items; ++i)
2479 aligned_buf[i] = mci_readl(host,
2480 DATA(host->data_offset));
2481 /* memcpy from aligned buffer into output buffer */
2482 memcpy(buf, aligned_buf, len);
2490 for (; cnt >= 4; cnt -= 4)
2491 *pdata++ = mci_readl(host, DATA(host->data_offset));
2495 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2496 dw_mci_pull_final_bytes(host, buf, cnt);
2500 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2502 struct mmc_data *data = host->data;
2505 /* try and push anything in the part_buf */
2506 if (unlikely(host->part_buf_count)) {
2507 int len = dw_mci_push_part_bytes(host, buf, cnt);
2511 if (host->part_buf_count == 8) {
2512 mci_writeq(host, DATA(host->data_offset),
2514 host->part_buf_count = 0;
2517 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2518 if (unlikely((unsigned long)buf & 0x7)) {
2520 u64 aligned_buf[16];
2521 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2522 int items = len >> 3;
2524 /* memcpy from input buffer into aligned buffer */
2525 memcpy(aligned_buf, buf, len);
2528 /* push data from aligned buffer into fifo */
2529 for (i = 0; i < items; ++i)
2530 mci_writeq(host, DATA(host->data_offset),
2537 for (; cnt >= 8; cnt -= 8)
2538 mci_writeq(host, DATA(host->data_offset), *pdata++);
2541 /* put anything remaining in the part_buf */
2543 dw_mci_set_part_bytes(host, buf, cnt);
2544 /* Push data if we have reached the expected data length */
2545 if ((data->bytes_xfered + init_cnt) ==
2546 (data->blksz * data->blocks))
2547 mci_writeq(host, DATA(host->data_offset),
2552 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2554 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2555 if (unlikely((unsigned long)buf & 0x7)) {
2557 /* pull data from fifo into aligned buffer */
2558 u64 aligned_buf[16];
2559 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2560 int items = len >> 3;
2562 for (i = 0; i < items; ++i)
2563 aligned_buf[i] = mci_readq(host,
2564 DATA(host->data_offset));
2565 /* memcpy from aligned buffer into output buffer */
2566 memcpy(buf, aligned_buf, len);
2574 for (; cnt >= 8; cnt -= 8)
2575 *pdata++ = mci_readq(host, DATA(host->data_offset));
2579 host->part_buf = mci_readq(host, DATA(host->data_offset));
2580 dw_mci_pull_final_bytes(host, buf, cnt);
2584 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2588 /* get remaining partial bytes */
2589 len = dw_mci_pull_part_bytes(host, buf, cnt);
2590 if (unlikely(len == cnt))
2595 /* get the rest of the data */
2596 host->pull_data(host, buf, cnt);
2599 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2601 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2603 unsigned int offset;
2604 struct mmc_data *data = host->data;
2605 int shift = host->data_shift;
2608 unsigned int remain, fcnt;
2610 if(!host->mmc->bus_refs){
2611 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2615 if (!sg_miter_next(sg_miter))
2618 host->sg = sg_miter->piter.sg;
2619 buf = sg_miter->addr;
2620 remain = sg_miter->length;
2624 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2625 << shift) + host->part_buf_count;
2626 len = min(remain, fcnt);
2629 dw_mci_pull_data(host, (void *)(buf + offset), len);
2630 data->bytes_xfered += len;
2635 sg_miter->consumed = offset;
2636 status = mci_readl(host, MINTSTS);
2637 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2638 /* if the RXDR is ready read again */
2639 } while ((status & SDMMC_INT_RXDR) ||
2640 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2643 if (!sg_miter_next(sg_miter))
2645 sg_miter->consumed = 0;
2647 sg_miter_stop(sg_miter);
2651 sg_miter_stop(sg_miter);
2655 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2658 static void dw_mci_write_data_pio(struct dw_mci *host)
2660 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2662 unsigned int offset;
2663 struct mmc_data *data = host->data;
2664 int shift = host->data_shift;
2667 unsigned int fifo_depth = host->fifo_depth;
2668 unsigned int remain, fcnt;
2670 if(!host->mmc->bus_refs){
2671 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2676 if (!sg_miter_next(sg_miter))
2679 host->sg = sg_miter->piter.sg;
2680 buf = sg_miter->addr;
2681 remain = sg_miter->length;
2685 fcnt = ((fifo_depth -
2686 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2687 << shift) - host->part_buf_count;
2688 len = min(remain, fcnt);
2691 host->push_data(host, (void *)(buf + offset), len);
2692 data->bytes_xfered += len;
2697 sg_miter->consumed = offset;
2698 status = mci_readl(host, MINTSTS);
2699 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2700 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2703 if (!sg_miter_next(sg_miter))
2705 sg_miter->consumed = 0;
2707 sg_miter_stop(sg_miter);
2711 sg_miter_stop(sg_miter);
2715 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2718 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2722 if (!host->cmd_status)
2723 host->cmd_status = status;
2728 if((MMC_STOP_TRANSMISSION != host->cmd->opcode))
2731 multi = mci_readl(host, BYTCNT)/unit;
2732 multi += ((mci_readl(host, BYTCNT) % unit) ? 1 :0 );
2733 multi = (multi > 0) ? multi : 1;
2734 multi += (host->cmd->retries > 2)? 2 : host->cmd->retries;
2735 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4500 * multi));//max wait 8s larger
2740 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2741 tasklet_schedule(&host->tasklet);
2744 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2746 struct dw_mci *host = dev_id;
2747 u32 pending, sdio_int;
2750 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2753 * DTO fix - version 2.10a and below, and only if internal DMA
2756 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2758 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2759 pending |= SDMMC_INT_DATA_OVER;
2763 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2764 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2765 host->cmd_status = pending;
2767 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2768 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2770 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2773 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2774 /* if there is an error report DATA_ERROR */
2775 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2776 host->data_status = pending;
2778 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2780 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2781 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2782 tasklet_schedule(&host->tasklet);
2785 if (pending & SDMMC_INT_DATA_OVER) {
2786 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2787 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2788 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2789 if (!host->data_status)
2790 host->data_status = pending;
2792 if (host->dir_status == DW_MCI_RECV_STATUS) {
2793 if (host->sg != NULL)
2794 dw_mci_read_data_pio(host, true);
2796 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2797 tasklet_schedule(&host->tasklet);
2800 if (pending & SDMMC_INT_RXDR) {
2801 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2802 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2803 dw_mci_read_data_pio(host, false);
2806 if (pending & SDMMC_INT_TXDR) {
2807 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2808 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2809 dw_mci_write_data_pio(host);
2812 if (pending & SDMMC_INT_VSI) {
2813 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2814 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2815 dw_mci_cmd_interrupt(host, pending);
2818 if (pending & SDMMC_INT_CMD_DONE) {
2819 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2820 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2821 dw_mci_cmd_interrupt(host, pending);
2824 if (pending & SDMMC_INT_CD) {
2825 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2826 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2827 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2828 queue_work(host->card_workqueue, &host->card_work);
2831 if (pending & SDMMC_INT_HLE) {
2832 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2833 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2837 /* Handle SDIO Interrupts */
2838 for (i = 0; i < host->num_slots; i++) {
2839 struct dw_mci_slot *slot = host->slot[i];
2841 if (host->verid < DW_MMC_240A)
2842 sdio_int = SDMMC_INT_SDIO(i);
2844 sdio_int = SDMMC_INT_SDIO(i + 8);
2846 if (pending & sdio_int) {
2847 mci_writel(host, RINTSTS, sdio_int);
2848 mmc_signal_sdio_irq(slot->mmc);
2854 #ifdef CONFIG_MMC_DW_IDMAC
2855 /* Handle DMA interrupts */
2856 pending = mci_readl(host, IDSTS);
2857 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2858 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2859 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2860 host->dma_ops->complete((void *)host);
2867 static void dw_mci_work_routine_card(struct work_struct *work)
2869 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2872 for (i = 0; i < host->num_slots; i++) {
2873 struct dw_mci_slot *slot = host->slot[i];
2874 struct mmc_host *mmc = slot->mmc;
2875 struct mmc_request *mrq;
2878 present = dw_mci_get_cd(mmc);
2879 while (present != slot->last_detect_state) {
2880 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2881 present ? "inserted" : "removed");
2882 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2883 present ? "inserted" : "removed.", mmc_hostname(mmc));
2885 rk_send_wakeup_key();//wake up system
2886 spin_lock_bh(&host->lock);
2888 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2889 /* Card change detected */
2890 slot->last_detect_state = present;
2892 /* Clean up queue if present */
2895 if (mrq == host->mrq) {
2899 switch (host->state) {
2902 case STATE_SENDING_CMD:
2903 mrq->cmd->error = -ENOMEDIUM;
2907 case STATE_SENDING_DATA:
2908 mrq->data->error = -ENOMEDIUM;
2909 dw_mci_stop_dma(host);
2911 case STATE_DATA_BUSY:
2912 case STATE_DATA_ERROR:
2913 if (mrq->data->error == -EINPROGRESS)
2914 mrq->data->error = -ENOMEDIUM;
2918 case STATE_SENDING_STOP:
2919 mrq->stop->error = -ENOMEDIUM;
2923 dw_mci_request_end(host, mrq);
2925 list_del(&slot->queue_node);
2926 mrq->cmd->error = -ENOMEDIUM;
2928 mrq->data->error = -ENOMEDIUM;
2930 mrq->stop->error = -ENOMEDIUM;
2932 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
2933 mrq->cmd->opcode, mmc_hostname(mmc));
2935 spin_unlock(&host->lock);
2936 mmc_request_done(slot->mmc, mrq);
2937 spin_lock(&host->lock);
2941 /* Power down slot */
2943 /* Clear down the FIFO */
2944 dw_mci_fifo_reset(host);
2945 #ifdef CONFIG_MMC_DW_IDMAC
2946 dw_mci_idmac_reset(host);
2951 spin_unlock_bh(&host->lock);
2953 present = dw_mci_get_cd(mmc);
2956 mmc_detect_change(slot->mmc,
2957 msecs_to_jiffies(host->pdata->detect_delay_ms));
2962 /* given a slot id, find out the device node representing that slot */
2963 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2965 struct device_node *np;
2969 if (!dev || !dev->of_node)
2972 for_each_child_of_node(dev->of_node, np) {
2973 addr = of_get_property(np, "reg", &len);
2974 if (!addr || (len < sizeof(int)))
2976 if (be32_to_cpup(addr) == slot)
2982 static struct dw_mci_of_slot_quirks {
2985 } of_slot_quirks[] = {
2987 .quirk = "disable-wp",
2988 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2992 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2994 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2999 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3000 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3001 quirks |= of_slot_quirks[idx].id;
3006 /* find out bus-width for a given slot */
3007 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3009 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3015 if (of_property_read_u32(np, "bus-width", &bus_wd))
3016 dev_err(dev, "bus-width property not found, assuming width"
3022 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3023 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3025 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3031 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3033 /* Having a missing entry is valid; return silently */
3034 if (!gpio_is_valid(gpio))
3037 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3038 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3042 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3048 /* find the write protect gpio for a given slot; or -1 if none specified */
3049 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3051 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3057 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3059 /* Having a missing entry is valid; return silently */
3060 if (!gpio_is_valid(gpio))
3063 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3064 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3071 /* find the cd gpio for a given slot */
3072 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3073 struct mmc_host *mmc)
3075 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3081 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3083 /* Having a missing entry is valid; return silently */
3084 if (!gpio_is_valid(gpio))
3087 if (mmc_gpio_request_cd(mmc, gpio, 0))
3088 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3090 #else /* CONFIG_OF */
3091 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3095 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3099 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3103 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3107 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3108 struct mmc_host *mmc)
3112 #endif /* CONFIG_OF */
3114 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3116 struct mmc_host *mmc;
3117 struct dw_mci_slot *slot;
3118 const struct dw_mci_drv_data *drv_data = host->drv_data;
3123 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3127 slot = mmc_priv(mmc);
3131 host->slot[id] = slot;
3134 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3136 mmc->ops = &dw_mci_ops;
3138 if (of_property_read_u32_array(host->dev->of_node,
3139 "clock-freq-min-max", freq, 2)) {
3140 mmc->f_min = DW_MCI_FREQ_MIN;
3141 mmc->f_max = DW_MCI_FREQ_MAX;
3143 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
3144 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3146 mmc->f_min = freq[0];
3147 mmc->f_max = freq[1];
3149 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
3150 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3153 if(strstr("mmc0",mmc_hostname(mmc)))
3154 printk("Line%d..%s: The rk_sdmmc %s",__LINE__, __FUNCTION__,RK_SDMMC_DRIVER_VERSION);
3156 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3157 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3158 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3159 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3160 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3161 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3163 if (host->pdata->get_ocr)
3164 mmc->ocr_avail = host->pdata->get_ocr(id);
3167 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3168 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3169 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3170 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3174 * Start with slot power disabled, it will be enabled when a card
3177 if (host->pdata->setpower)
3178 host->pdata->setpower(id, 0);
3180 if (host->pdata->caps)
3181 mmc->caps = host->pdata->caps;
3183 if (host->pdata->pm_caps)
3184 mmc->pm_caps = host->pdata->pm_caps;
3186 if (host->dev->of_node) {
3187 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3191 ctrl_id = to_platform_device(host->dev)->id;
3193 if (drv_data && drv_data->caps)
3194 mmc->caps |= drv_data->caps[ctrl_id];
3195 if (drv_data && drv_data->hold_reg_flag)
3196 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3198 /* set the compatibility of driver. */
3199 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3200 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3202 if (host->pdata->caps2)
3203 mmc->caps2 = host->pdata->caps2;
3205 if (host->pdata->get_bus_wd)
3206 bus_width = host->pdata->get_bus_wd(slot->id);
3207 else if (host->dev->of_node)
3208 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3212 switch (bus_width) {
3214 mmc->caps |= MMC_CAP_8_BIT_DATA;
3216 mmc->caps |= MMC_CAP_4_BIT_DATA;
3219 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3220 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3221 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3222 mmc->caps |= MMC_CAP_SDIO_IRQ;
3223 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3224 mmc->caps |= MMC_CAP_HW_RESET;
3225 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3226 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3227 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3228 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3229 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3230 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3231 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3232 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3234 /*Assign pm_caps pass to pm_flags*/
3235 mmc->pm_flags = mmc->pm_caps;
3237 if (host->pdata->blk_settings) {
3238 mmc->max_segs = host->pdata->blk_settings->max_segs;
3239 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3240 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3241 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3242 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3244 /* Useful defaults if platform data is unset. */
3245 #ifdef CONFIG_MMC_DW_IDMAC
3246 mmc->max_segs = host->ring_size;
3247 mmc->max_blk_size = 65536;
3248 mmc->max_blk_count = host->ring_size;
3249 mmc->max_seg_size = 0x1000;
3250 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3253 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3254 mmc->max_blk_count = 512;
3255 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3256 mmc->max_seg_size = mmc->max_req_size;
3257 #endif /* CONFIG_MMC_DW_IDMAC */
3260 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3262 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3267 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3268 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3272 if (IS_ERR(host->vmmc)) {
3273 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3276 ret = regulator_enable(host->vmmc);
3279 "failed to enable regulator: %d\n", ret);
3286 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3288 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3289 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3291 ret = mmc_add_host(mmc);
3295 /* Pinctrl set default iomux state to fucntion port.
3296 * Fixme: DON'T TOUCH EMMC SETTING!
3298 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3300 host->pinctrl = devm_pinctrl_get(host->dev);
3301 if(IS_ERR(host->pinctrl)){
3302 printk("%s: Warning : No pinctrl used!\n",mmc_hostname(host->mmc));
3304 host->pins_idle= pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_IDLE);
3305 if(IS_ERR(host->pins_default)){
3306 printk("%s: Warning : No IDLE pinctrl matched!\n", mmc_hostname(host->mmc));
3310 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3311 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
3314 host->pins_default = pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_DEFAULT);
3315 if(IS_ERR(host->pins_default)){
3316 printk("%s: Warning : No default pinctrl matched!\n", mmc_hostname(host->mmc));
3320 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3321 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
3327 #if defined(CONFIG_DEBUG_FS)
3328 dw_mci_init_debugfs(slot);
3331 /* Card initially undetected */
3332 slot->last_detect_state = 1;
3341 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3343 /* Shutdown detect IRQ */
3344 if (slot->host->pdata->exit)
3345 slot->host->pdata->exit(id);
3347 /* Debugfs stuff is cleaned up by mmc core */
3348 mmc_remove_host(slot->mmc);
3349 slot->host->slot[id] = NULL;
3350 mmc_free_host(slot->mmc);
3353 static void dw_mci_init_dma(struct dw_mci *host)
3355 /* Alloc memory for sg translation */
3356 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3357 &host->sg_dma, GFP_KERNEL);
3358 if (!host->sg_cpu) {
3359 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3364 /* Determine which DMA interface to use */
3365 #if defined(CONFIG_MMC_DW_IDMAC)
3366 host->dma_ops = &dw_mci_idmac_ops;
3367 dev_info(host->dev, "Using internal DMA controller.\n");
3368 #elif defined(CONFIG_MMC_DW_EDMAC)
3369 host->dma_ops = &dw_mci_edmac_ops;
3370 dev_info(host->dev, "Using external DMA controller.\n");
3376 if (host->dma_ops->init && host->dma_ops->start &&
3377 host->dma_ops->stop && host->dma_ops->cleanup) {
3378 if (host->dma_ops->init(host)) {
3379 dev_err(host->dev, "%s: Unable to initialize "
3380 "DMA Controller.\n", __func__);
3384 dev_err(host->dev, "DMA initialization not found.\n");
3392 dev_info(host->dev, "Using PIO mode.\n");
3397 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3399 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3402 ctrl = mci_readl(host, CTRL);
3404 mci_writel(host, CTRL, ctrl);
3406 /* wait till resets clear */
3408 ctrl = mci_readl(host, CTRL);
3409 if (!(ctrl & reset))
3411 } while (time_before(jiffies, timeout));
3414 "Timeout resetting block (ctrl reset %#x)\n",
3420 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3423 * Reseting generates a block interrupt, hence setting
3424 * the scatter-gather pointer to NULL.
3427 sg_miter_stop(&host->sg_miter);
3431 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3434 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3436 return dw_mci_ctrl_reset(host,
3437 SDMMC_CTRL_FIFO_RESET |
3439 SDMMC_CTRL_DMA_RESET);
3444 static struct dw_mci_of_quirks {
3449 .quirk = "broken-cd",
3450 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3454 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3456 struct dw_mci_board *pdata;
3457 struct device *dev = host->dev;
3458 struct device_node *np = dev->of_node;
3459 const struct dw_mci_drv_data *drv_data = host->drv_data;
3461 u32 clock_frequency;
3463 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3465 dev_err(dev, "could not allocate memory for pdata\n");
3466 return ERR_PTR(-ENOMEM);
3469 /* find out number of slots supported */
3470 if (of_property_read_u32(dev->of_node, "num-slots",
3471 &pdata->num_slots)) {
3472 dev_info(dev, "num-slots property not found, "
3473 "assuming 1 slot is available\n");
3474 pdata->num_slots = 1;
3478 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3479 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3480 pdata->quirks |= of_quirks[idx].id;
3483 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3484 dev_info(dev, "fifo-depth property not found, using "
3485 "value of FIFOTH register as default\n");
3487 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3489 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3490 pdata->bus_hz = clock_frequency;
3492 if (drv_data && drv_data->parse_dt) {
3493 ret = drv_data->parse_dt(host);
3495 return ERR_PTR(ret);
3498 if (of_find_property(np, "keep-power-in-suspend", NULL))
3499 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3501 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3502 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3504 if (of_find_property(np, "supports-highspeed", NULL))
3505 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3507 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3508 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3510 if (of_find_property(np, "supports-DDR_MODE", NULL))
3511 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3513 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3514 pdata->caps2 |= MMC_CAP2_HS200;
3516 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3517 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3519 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3520 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3522 if (of_get_property(np, "cd-inverted", NULL))
3523 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3524 if (of_get_property(np, "bootpart-no-access", NULL))
3525 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3530 #else /* CONFIG_OF */
3531 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3533 return ERR_PTR(-EINVAL);
3535 #endif /* CONFIG_OF */
3537 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3541 switch(host->state){
3544 case STATE_SENDING_DATA:
3545 case STATE_DATA_BUSY:
3546 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3547 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3548 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3549 host->state = STATE_DATA_BUSY;
3550 if (!dw_mci_ctrl_all_reset(host)) {
3555 /* NO requirement to reclaim slave chn using external dmac */
3556 #ifdef CONFIG_MMC_DW_IDMAC
3557 if (host->use_dma && host->dma_ops->init)
3558 host->dma_ops->init(host);
3562 * Restore the initial value at FIFOTH register
3563 * And Invalidate the prev_blksz with zero
3565 mci_writel(host, FIFOTH, host->fifoth_val);
3566 host->prev_blksz = 0;
3567 mci_writel(host, TMOUT, 0xFFFFFFFF);
3568 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3569 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3570 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3571 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3572 regs |= SDMMC_INT_CD;
3573 mci_writel(host, INTMASK, regs);
3574 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3575 for (i = 0; i < host->num_slots; i++) {
3576 struct dw_mci_slot *slot = host->slot[i];
3579 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3580 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3581 dw_mci_setup_bus(slot, true);
3584 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3585 tasklet_schedule(&host->tasklet);
3591 static void dw_mci_dto_timeout(unsigned long host_data)
3593 struct dw_mci *host = (struct dw_mci *) host_data;
3595 disable_irq(host->irq);
3597 host->data_status = SDMMC_INT_EBE;
3598 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3599 dw_mci_dealwith_timeout(host);
3601 enable_irq(host->irq);
3603 int dw_mci_probe(struct dw_mci *host)
3605 const struct dw_mci_drv_data *drv_data = host->drv_data;
3606 int width, i, ret = 0;
3612 host->pdata = dw_mci_parse_dt(host);
3613 if (IS_ERR(host->pdata)) {
3614 dev_err(host->dev, "platform data not available\n");
3619 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3621 "Platform data must supply select_slot function\n");
3626 * In 2.40a spec, Data offset is changed.
3627 * Need to check the version-id and set data-offset for DATA register.
3629 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3630 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3632 if (host->verid < DW_MMC_240A)
3633 host->data_offset = DATA_OFFSET;
3635 host->data_offset = DATA_240A_OFFSET;
3638 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3639 if (IS_ERR(host->hclk_mmc)) {
3640 dev_err(host->dev, "failed to get hclk_mmc\n");
3641 ret = PTR_ERR(host->hclk_mmc);
3645 clk_prepare_enable(host->hclk_mmc);
3648 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3649 if (IS_ERR(host->clk_mmc)) {
3650 dev_err(host->dev, "failed to get clk mmc_per\n");
3651 ret = PTR_ERR(host->clk_mmc);
3655 host->bus_hz = host->pdata->bus_hz;
3656 if (!host->bus_hz) {
3657 dev_err(host->dev,"Platform data must supply bus speed\n");
3662 if (host->verid < DW_MMC_240A)
3663 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3665 //rockchip: fix divider 2 in clksum before controlller
3666 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3669 dev_err(host->dev, "failed to set clk mmc\n");
3672 clk_prepare_enable(host->clk_mmc);
3674 if (drv_data && drv_data->setup_clock) {
3675 ret = drv_data->setup_clock(host);
3678 "implementation specific clock setup failed\n");
3683 host->quirks = host->pdata->quirks;
3684 host->irq_state = true;
3685 host->set_speed = 0;
3687 host->svi_flags = 0;
3689 spin_lock_init(&host->lock);
3690 INIT_LIST_HEAD(&host->queue);
3693 * Get the host data width - this assumes that HCON has been set with
3694 * the correct values.
3696 i = (mci_readl(host, HCON) >> 7) & 0x7;
3698 host->push_data = dw_mci_push_data16;
3699 host->pull_data = dw_mci_pull_data16;
3701 host->data_shift = 1;
3702 } else if (i == 2) {
3703 host->push_data = dw_mci_push_data64;
3704 host->pull_data = dw_mci_pull_data64;
3706 host->data_shift = 3;
3708 /* Check for a reserved value, and warn if it is */
3710 "HCON reports a reserved host data width!\n"
3711 "Defaulting to 32-bit access.\n");
3712 host->push_data = dw_mci_push_data32;
3713 host->pull_data = dw_mci_pull_data32;
3715 host->data_shift = 2;
3718 /* Reset all blocks */
3719 if (!dw_mci_ctrl_all_reset(host))
3722 host->dma_ops = host->pdata->dma_ops;
3723 dw_mci_init_dma(host);
3725 /* Clear the interrupts for the host controller */
3726 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3727 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3729 /* Put in max timeout */
3730 mci_writel(host, TMOUT, 0xFFFFFFFF);
3733 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3734 * Tx Mark = fifo_size / 2 DMA Size = 8
3736 if (!host->pdata->fifo_depth) {
3738 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3739 * have been overwritten by the bootloader, just like we're
3740 * about to do, so if you know the value for your hardware, you
3741 * should put it in the platform data.
3743 fifo_size = mci_readl(host, FIFOTH);
3744 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3746 fifo_size = host->pdata->fifo_depth;
3748 host->fifo_depth = fifo_size;
3750 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3751 mci_writel(host, FIFOTH, host->fifoth_val);
3753 /* disable clock to CIU */
3754 mci_writel(host, CLKENA, 0);
3755 mci_writel(host, CLKSRC, 0);
3757 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3758 host->card_workqueue = alloc_workqueue("dw-mci-card",
3759 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3760 if (!host->card_workqueue) {
3764 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3765 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3766 host->irq_flags, "dw-mci", host);
3770 if (host->pdata->num_slots)
3771 host->num_slots = host->pdata->num_slots;
3773 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
3775 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
3776 /* We need at least one slot to succeed */
3777 for (i = 0; i < host->num_slots; i++) {
3778 ret = dw_mci_init_slot(host, i);
3780 dev_dbg(host->dev, "slot %d init failed\n", i);
3786 * Enable interrupts for command done, data over, data empty, card det,
3787 * receive ready and error such as transmit, receive timeout, crc error
3789 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3790 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
3791 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
3792 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3793 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3794 regs |= SDMMC_INT_CD;
3796 mci_writel(host, INTMASK, regs);
3798 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
3800 dev_info(host->dev, "DW MMC controller at irq %d, "
3801 "%d bit host data width, "
3803 host->irq, width, fifo_size);
3806 dev_info(host->dev, "%d slots initialized\n", init_slots);
3808 dev_dbg(host->dev, "attempted to initialize %d slots, "
3809 "but failed on all\n", host->num_slots);
3814 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
3815 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
3820 destroy_workqueue(host->card_workqueue);
3823 if (host->use_dma && host->dma_ops->exit)
3824 host->dma_ops->exit(host);
3827 regulator_disable(host->vmmc);
3828 regulator_put(host->vmmc);
3832 if (!IS_ERR(host->clk_mmc))
3833 clk_disable_unprepare(host->clk_mmc);
3835 if (!IS_ERR(host->hclk_mmc))
3836 clk_disable_unprepare(host->hclk_mmc);
3840 EXPORT_SYMBOL(dw_mci_probe);
3842 void dw_mci_remove(struct dw_mci *host)
3845 del_timer_sync(&host->dto_timer);
3847 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3848 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3850 for(i = 0; i < host->num_slots; i++){
3851 dev_dbg(host->dev, "remove slot %d\n", i);
3853 dw_mci_cleanup_slot(host->slot[i], i);
3856 /* disable clock to CIU */
3857 mci_writel(host, CLKENA, 0);
3858 mci_writel(host, CLKSRC, 0);
3860 destroy_workqueue(host->card_workqueue);
3862 if(host->use_dma && host->dma_ops->exit)
3863 host->dma_ops->exit(host);
3866 regulator_disable(host->vmmc);
3867 regulator_put(host->vmmc);
3869 if(!IS_ERR(host->clk_mmc))
3870 clk_disable_unprepare(host->clk_mmc);
3872 if(!IS_ERR(host->hclk_mmc))
3873 clk_disable_unprepare(host->hclk_mmc);
3875 EXPORT_SYMBOL(dw_mci_remove);
3879 #ifdef CONFIG_PM_SLEEP
3881 * TODO: we should probably disable the clock to the card in the suspend path.
3883 int dw_mci_suspend(struct dw_mci *host)
3886 regulator_disable(host->vmmc);
3888 if(host->use_dma && host->dma_ops->exit)
3889 host->dma_ops->exit(host);
3891 /*only for sdmmc controller*/
3892 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD){
3893 host->mmc->rescan_disable = 1;
3894 if(cancel_delayed_work_sync(&host->mmc->detect))
3895 wake_unlock(&host->mmc->detect_wake_lock);
3897 disable_irq(host->irq);
3898 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3899 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
3900 mmc_hostname(host->mmc));
3901 dw_mci_of_get_cd_gpio(host->dev,0,host->mmc);
3902 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3903 mci_writel(host, INTMASK, 0x00);
3904 mci_writel(host, CTRL, 0x00);
3905 enable_irq_wake(host->mmc->slot.cd_irq);
3909 EXPORT_SYMBOL(dw_mci_suspend);
3911 int dw_mci_resume(struct dw_mci *host)
3913 int i, ret, retry_cnt = 0;
3915 struct dw_mci_slot *slot;
3917 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO){
3918 slot = mmc_priv(host->mmc);
3920 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
3923 /*only for sdmmc controller*/
3924 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3925 disable_irq_wake(host->mmc->slot.cd_irq);
3926 mmc_gpio_free_cd(host->mmc);
3927 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3928 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
3929 mmc_hostname(host->mmc));
3930 host->mmc->rescan_disable = 0;
3933 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
3935 else if(cpu_is_rk3036())
3936 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
3937 else if(cpu_is_rk3126())
3944 ret = regulator_enable(host->vmmc);
3947 "failed to enable regulator: %d\n", ret);
3952 if(!dw_mci_ctrl_all_reset(host)){
3957 if(host->use_dma && host->dma_ops->init)
3958 host->dma_ops->init(host);
3961 * Restore the initial value at FIFOTH register
3962 * And Invalidate the prev_blksz with zero
3964 mci_writel(host, FIFOTH, host->fifoth_val);
3965 host->prev_blksz = 0;
3966 /* Put in max timeout */
3967 mci_writel(host, TMOUT, 0xFFFFFFFF);
3969 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3970 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
3972 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3973 regs |= SDMMC_INT_CD;
3974 mci_writel(host, INTMASK, regs);
3975 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3976 /*only for sdmmc controller*/
3977 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
3978 enable_irq(host->irq);
3981 for(i = 0; i < host->num_slots; i++){
3982 struct dw_mci_slot *slot = host->slot[i];
3985 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
3986 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3987 dw_mci_setup_bus(slot, true);
3993 EXPORT_SYMBOL(dw_mci_resume);
3994 #endif /* CONFIG_PM_SLEEP */
3996 static int __init dw_mci_init(void)
3998 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4002 static void __exit dw_mci_exit(void)
4006 module_init(dw_mci_init);
4007 module_exit(dw_mci_exit);
4009 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4010 MODULE_AUTHOR("NXP Semiconductor VietNam");
4011 MODULE_AUTHOR("Imagination Technologies Ltd");
4012 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4013 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
4014 MODULE_LICENSE("GPL v2");