2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/mmc.h>
35 #include <linux/mmc/sd.h>
36 #include <linux/mmc/card.h>
37 #include <linux/mmc/sdio.h>
38 #include <linux/mmc/rk_mmc.h>
39 #include <linux/bitops.h>
40 #include <linux/regulator/consumer.h>
41 #include <linux/workqueue.h>
43 #include <linux/of_gpio.h>
44 #include <linux/mmc/slot-gpio.h>
45 #include <linux/clk-private.h>
46 #include <linux/rockchip/cpu.h>
49 #include "rk_sdmmc_dbg.h"
50 #include <linux/regulator/rockchip_io_vol_domain.h>
51 #include "../../clk/rockchip/clk-ops.h"
53 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
55 /* Common flag combinations */
56 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
57 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
59 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
61 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
62 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
63 #define DW_MCI_SEND_STATUS 1
64 #define DW_MCI_RECV_STATUS 2
65 #define DW_MCI_DMA_THRESHOLD 16
67 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
68 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
70 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
71 #define SDMMC_DATA_TIMEOUT_SD 500
72 #define SDMMC_DATA_TIMEOUT_SDIO 250
73 #define SDMMC_DATA_TIMEOUT_EMMC 2500
75 #define SDMMC_CMD_RTO_MAX_HOLD 200
76 #define SDMMC_WAIT_FOR_UNBUSY 2500
78 #ifdef CONFIG_MMC_DW_IDMAC
79 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
80 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
81 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
85 u32 des0; /* Control Descriptor */
86 #define IDMAC_DES0_DIC BIT(1)
87 #define IDMAC_DES0_LD BIT(2)
88 #define IDMAC_DES0_FD BIT(3)
89 #define IDMAC_DES0_CH BIT(4)
90 #define IDMAC_DES0_ER BIT(5)
91 #define IDMAC_DES0_CES BIT(30)
92 #define IDMAC_DES0_OWN BIT(31)
94 u32 des1; /* Buffer sizes */
95 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
96 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
98 u32 des2; /* buffer 1 physical address */
100 u32 des3; /* buffer 2 physical address */
102 #endif /* CONFIG_MMC_DW_IDMAC */
104 static const u8 tuning_blk_pattern_4bit[] = {
105 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
106 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
107 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
108 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
109 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
110 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
111 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
112 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
115 static const u8 tuning_blk_pattern_8bit[] = {
116 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
117 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
118 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
119 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
120 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
121 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
122 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
123 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
124 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
125 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
126 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
127 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
128 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
129 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
130 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
131 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
134 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
135 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
136 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
137 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
139 /*printk the all register of current host*/
141 static int dw_mci_regs_printk(struct dw_mci *host)
143 struct sdmmc_reg *regs = dw_mci_regs;
145 while( regs->name != 0 ){
146 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
149 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
154 #if defined(CONFIG_DEBUG_FS)
155 static int dw_mci_req_show(struct seq_file *s, void *v)
157 struct dw_mci_slot *slot = s->private;
158 struct mmc_request *mrq;
159 struct mmc_command *cmd;
160 struct mmc_command *stop;
161 struct mmc_data *data;
163 /* Make sure we get a consistent snapshot */
164 spin_lock_bh(&slot->host->lock);
174 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
175 cmd->opcode, cmd->arg, cmd->flags,
176 cmd->resp[0], cmd->resp[1], cmd->resp[2],
177 cmd->resp[2], cmd->error);
179 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
180 data->bytes_xfered, data->blocks,
181 data->blksz, data->flags, data->error);
184 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
185 stop->opcode, stop->arg, stop->flags,
186 stop->resp[0], stop->resp[1], stop->resp[2],
187 stop->resp[2], stop->error);
190 spin_unlock_bh(&slot->host->lock);
195 static int dw_mci_req_open(struct inode *inode, struct file *file)
197 return single_open(file, dw_mci_req_show, inode->i_private);
200 static const struct file_operations dw_mci_req_fops = {
201 .owner = THIS_MODULE,
202 .open = dw_mci_req_open,
205 .release = single_release,
208 static int dw_mci_regs_show(struct seq_file *s, void *v)
210 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
211 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
212 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
213 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
214 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
215 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
220 static int dw_mci_regs_open(struct inode *inode, struct file *file)
222 return single_open(file, dw_mci_regs_show, inode->i_private);
225 static const struct file_operations dw_mci_regs_fops = {
226 .owner = THIS_MODULE,
227 .open = dw_mci_regs_open,
230 .release = single_release,
233 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
235 struct mmc_host *mmc = slot->mmc;
236 struct dw_mci *host = slot->host;
240 root = mmc->debugfs_root;
244 node = debugfs_create_file("regs", S_IRUSR, root, host,
249 node = debugfs_create_file("req", S_IRUSR, root, slot,
254 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
258 node = debugfs_create_x32("pending_events", S_IRUSR, root,
259 (u32 *)&host->pending_events);
263 node = debugfs_create_x32("completed_events", S_IRUSR, root,
264 (u32 *)&host->completed_events);
271 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
273 #endif /* defined(CONFIG_DEBUG_FS) */
275 static void dw_mci_set_timeout(struct dw_mci *host)
277 /* timeout (maximum) */
278 mci_writel(host, TMOUT, 0xffffffff);
281 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
283 struct mmc_data *data;
284 struct dw_mci_slot *slot = mmc_priv(mmc);
285 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
287 cmd->error = -EINPROGRESS;
291 if (cmdr == MMC_STOP_TRANSMISSION)
292 cmdr |= SDMMC_CMD_STOP;
294 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
296 if (cmd->flags & MMC_RSP_PRESENT) {
297 /* We expect a response, so set this bit */
298 cmdr |= SDMMC_CMD_RESP_EXP;
299 if (cmd->flags & MMC_RSP_136)
300 cmdr |= SDMMC_CMD_RESP_LONG;
303 if (cmd->flags & MMC_RSP_CRC)
304 cmdr |= SDMMC_CMD_RESP_CRC;
308 cmdr |= SDMMC_CMD_DAT_EXP;
309 if (data->flags & MMC_DATA_STREAM)
310 cmdr |= SDMMC_CMD_STRM_MODE;
311 if (data->flags & MMC_DATA_WRITE)
312 cmdr |= SDMMC_CMD_DAT_WR;
315 if (drv_data && drv_data->prepare_command)
316 drv_data->prepare_command(slot->host, &cmdr);
322 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
324 struct mmc_command *stop;
330 stop = &host->stop_abort;
332 memset(stop, 0, sizeof(struct mmc_command));
334 if (cmdr == MMC_READ_SINGLE_BLOCK ||
335 cmdr == MMC_READ_MULTIPLE_BLOCK ||
336 cmdr == MMC_WRITE_BLOCK ||
337 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
338 stop->opcode = MMC_STOP_TRANSMISSION;
340 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
341 } else if (cmdr == SD_IO_RW_EXTENDED) {
342 stop->opcode = SD_IO_RW_DIRECT;
343 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
344 ((cmd->arg >> 28) & 0x7);
345 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
350 cmdr = stop->opcode | SDMMC_CMD_STOP |
351 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
356 static void dw_mci_start_command(struct dw_mci *host,
357 struct mmc_command *cmd, u32 cmd_flags)
359 struct dw_mci_slot *slot = host->slot[0];
360 /*temporality fix slot[0] due to host->num_slots equal to 1*/
362 host->pre_cmd = host->cmd;
365 "start command: ARGR=0x%08x CMDR=0x%08x\n",
366 cmd->arg, cmd_flags);
368 if(SD_SWITCH_VOLTAGE == cmd->opcode){
369 /*confirm non-low-power mode*/
370 mci_writel(host, CMDARG, 0);
371 dw_mci_disable_low_power(slot);
373 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
374 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
376 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
379 mci_writel(host, CMDARG, cmd->arg);
382 /* fix the value to 1 in some Soc,for example RK3188. */
383 if(host->mmc->hold_reg_flag)
384 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
386 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
390 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
392 dw_mci_start_command(host, data->stop, host->stop_cmdr);
395 /* DMA interface functions */
396 static void dw_mci_stop_dma(struct dw_mci *host)
398 if (host->using_dma) {
399 host->dma_ops->stop(host);
400 host->dma_ops->cleanup(host);
403 /* Data transfer was stopped by the interrupt handler */
404 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
407 static int dw_mci_get_dma_dir(struct mmc_data *data)
409 if (data->flags & MMC_DATA_WRITE)
410 return DMA_TO_DEVICE;
412 return DMA_FROM_DEVICE;
415 #ifdef CONFIG_MMC_DW_IDMAC
416 static void dw_mci_dma_cleanup(struct dw_mci *host)
418 struct mmc_data *data = host->data;
421 if (!data->host_cookie)
422 dma_unmap_sg(host->dev,
425 dw_mci_get_dma_dir(data));
428 static void dw_mci_idmac_reset(struct dw_mci *host)
430 u32 bmod = mci_readl(host, BMOD);
431 /* Software reset of DMA */
432 bmod |= SDMMC_IDMAC_SWRESET;
433 mci_writel(host, BMOD, bmod);
436 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
440 /* Disable and reset the IDMAC interface */
441 temp = mci_readl(host, CTRL);
442 temp &= ~SDMMC_CTRL_USE_IDMAC;
443 temp |= SDMMC_CTRL_DMA_RESET;
444 mci_writel(host, CTRL, temp);
446 /* Stop the IDMAC running */
447 temp = mci_readl(host, BMOD);
448 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
449 temp |= SDMMC_IDMAC_SWRESET;
450 mci_writel(host, BMOD, temp);
453 static void dw_mci_idmac_complete_dma(void *arg)
455 struct dw_mci *host = arg;
456 struct mmc_data *data = host->data;
458 dev_vdbg(host->dev, "DMA complete\n");
461 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
462 host->mrq->cmd->opcode,host->mrq->cmd->arg,
463 data->blocks,data->blksz,mmc_hostname(host->mmc));
466 host->dma_ops->cleanup(host);
469 * If the card was removed, data will be NULL. No point in trying to
470 * send the stop command or waiting for NBUSY in this case.
473 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
474 tasklet_schedule(&host->tasklet);
478 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
482 struct idmac_desc *desc = host->sg_cpu;
484 for (i = 0; i < sg_len; i++, desc++) {
485 unsigned int length = sg_dma_len(&data->sg[i]);
486 u32 mem_addr = sg_dma_address(&data->sg[i]);
488 /* Set the OWN bit and disable interrupts for this descriptor */
489 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
492 IDMAC_SET_BUFFER1_SIZE(desc, length);
494 /* Physical address to DMA to/from */
495 desc->des2 = mem_addr;
498 /* Set first descriptor */
500 desc->des0 |= IDMAC_DES0_FD;
502 /* Set last descriptor */
503 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
504 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
505 desc->des0 |= IDMAC_DES0_LD;
510 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
514 dw_mci_translate_sglist(host, host->data, sg_len);
516 /* Select IDMAC interface */
517 temp = mci_readl(host, CTRL);
518 temp |= SDMMC_CTRL_USE_IDMAC;
519 mci_writel(host, CTRL, temp);
523 /* Enable the IDMAC */
524 temp = mci_readl(host, BMOD);
525 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
526 mci_writel(host, BMOD, temp);
528 /* Start it running */
529 mci_writel(host, PLDMND, 1);
532 static int dw_mci_idmac_init(struct dw_mci *host)
534 struct idmac_desc *p;
537 /* Number of descriptors in the ring buffer */
538 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
540 /* Forward link the descriptor list */
541 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
542 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
544 /* Set the last descriptor as the end-of-ring descriptor */
545 p->des3 = host->sg_dma;
546 p->des0 = IDMAC_DES0_ER;
548 dw_mci_idmac_reset(host);
550 /* Mask out interrupts - get Tx & Rx complete only */
551 mci_writel(host, IDSTS, IDMAC_INT_CLR);
552 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
555 /* Set the descriptor base address */
556 mci_writel(host, DBADDR, host->sg_dma);
560 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
561 .init = dw_mci_idmac_init,
562 .start = dw_mci_idmac_start_dma,
563 .stop = dw_mci_idmac_stop_dma,
564 .complete = dw_mci_idmac_complete_dma,
565 .cleanup = dw_mci_dma_cleanup,
569 static void dw_mci_edma_cleanup(struct dw_mci *host)
571 struct mmc_data *data = host->data;
574 if (!data->host_cookie)
575 dma_unmap_sg(host->dev,
576 data->sg, data->sg_len,
577 dw_mci_get_dma_dir(data));
580 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
582 dmaengine_terminate_all(host->dms->ch);
585 static void dw_mci_edmac_complete_dma(void *arg)
587 struct dw_mci *host = arg;
588 struct mmc_data *data = host->data;
590 dev_vdbg(host->dev, "DMA complete\n");
593 if(data->flags & MMC_DATA_READ)
594 /* Invalidate cache after read */
595 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
596 data->sg_len, DMA_FROM_DEVICE);
598 host->dma_ops->cleanup(host);
601 * If the card was removed, data will be NULL. No point in trying to
602 * send the stop command or waiting for NBUSY in this case.
605 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
606 tasklet_schedule(&host->tasklet);
610 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
612 struct dma_slave_config slave_config;
613 struct dma_async_tx_descriptor *desc = NULL;
614 struct scatterlist *sgl = host->data->sg;
615 u32 sg_elems = host->data->sg_len;
618 /* Set external dma config: burst size, burst width*/
619 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
620 slave_config.src_addr = slave_config.dst_addr;
621 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
622 slave_config.src_addr_width = slave_config.dst_addr_width;
624 /* Match FIFO dma burst MSIZE with external dma config*/
625 slave_config.dst_maxburst = ((host->fifoth_val) >> 28) && 0x7;
626 slave_config.src_maxburst = slave_config.dst_maxburst;
628 if(host->data->flags & MMC_DATA_WRITE){
629 slave_config.direction = DMA_MEM_TO_DEV;
630 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
632 dev_err(host->dev, "error in dw_mci edma configuration.\n");
636 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
637 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
639 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
642 /* Set dw_mci_edmac_complete_dma as callback */
643 desc->callback = dw_mci_edmac_complete_dma;
644 desc->callback_param = (void *)host;
645 dmaengine_submit(desc);
647 /* Flush cache before write */
648 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
649 sg_elems, DMA_TO_DEVICE);
650 dma_async_issue_pending(host->dms->ch);
653 slave_config.direction = DMA_DEV_TO_MEM;
654 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
656 dev_err(host->dev, "error in dw_mci edma configuration.\n");
659 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
660 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
665 /* set dw_mci_edmac_complete_dma as callback */
666 desc->callback = dw_mci_edmac_complete_dma;
667 desc->callback_param = (void *)host;
668 dmaengine_submit(desc);
669 dma_async_issue_pending(host->dms->ch);
673 static int dw_mci_edmac_init(struct dw_mci *host)
675 /* Request external dma channel, SHOULD decide chn in dts */
677 host->dms = (struct dw_mci_dma_slave *)kmalloc
678 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
679 if (NULL == host->dms) {
680 dev_err(host->dev, "No enough memory to alloc dms.\n");
684 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
685 if (!host->dms->ch) {
686 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
687 host->dms->ch->chan_id);
694 if (NULL != host->dms) {
702 static void dw_mci_edmac_exit(struct dw_mci *host)
704 dma_release_channel(host->dms->ch);
705 host->dms->ch = NULL;
707 if (NULL != host->dms) {
713 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
714 .init = dw_mci_edmac_init,
715 .exit = dw_mci_edmac_exit,
716 .start = dw_mci_edmac_start_dma,
717 .stop = dw_mci_edmac_stop_dma,
718 .complete = dw_mci_edmac_complete_dma,
719 .cleanup = dw_mci_edma_cleanup,
721 #endif /* CONFIG_MMC_DW_IDMAC */
723 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
724 struct mmc_data *data,
727 struct scatterlist *sg;
728 unsigned int i, sg_len;
730 if (!next && data->host_cookie)
731 return data->host_cookie;
734 * We don't do DMA on "complex" transfers, i.e. with
735 * non-word-aligned buffers or lengths. Also, we don't bother
736 * with all the DMA setup overhead for short transfers.
738 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
744 for_each_sg(data->sg, sg, data->sg_len, i) {
745 if (sg->offset & 3 || sg->length & 3)
749 sg_len = dma_map_sg(host->dev,
752 dw_mci_get_dma_dir(data));
757 data->host_cookie = sg_len;
762 static void dw_mci_pre_req(struct mmc_host *mmc,
763 struct mmc_request *mrq,
766 struct dw_mci_slot *slot = mmc_priv(mmc);
767 struct mmc_data *data = mrq->data;
769 if (!slot->host->use_dma || !data)
772 if (data->host_cookie) {
773 data->host_cookie = 0;
777 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
778 data->host_cookie = 0;
781 static void dw_mci_post_req(struct mmc_host *mmc,
782 struct mmc_request *mrq,
785 struct dw_mci_slot *slot = mmc_priv(mmc);
786 struct mmc_data *data = mrq->data;
788 if (!slot->host->use_dma || !data)
791 if (data->host_cookie)
792 dma_unmap_sg(slot->host->dev,
795 dw_mci_get_dma_dir(data));
796 data->host_cookie = 0;
799 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
801 #ifdef CONFIG_MMC_DW_IDMAC
802 unsigned int blksz = data->blksz;
803 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
804 u32 fifo_width = 1 << host->data_shift;
805 u32 blksz_depth = blksz / fifo_width, fifoth_val;
806 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
807 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
809 tx_wmark = (host->fifo_depth) / 2;
810 tx_wmark_invers = host->fifo_depth - tx_wmark;
814 * if blksz is not a multiple of the FIFO width
816 if (blksz % fifo_width) {
823 if (!((blksz_depth % mszs[idx]) ||
824 (tx_wmark_invers % mszs[idx]))) {
826 rx_wmark = mszs[idx] - 1;
831 * If idx is '0', it won't be tried
832 * Thus, initial values are uesed
835 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
836 mci_writel(host, FIFOTH, fifoth_val);
840 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
842 unsigned int blksz = data->blksz;
843 u32 blksz_depth, fifo_depth;
846 WARN_ON(!(data->flags & MMC_DATA_READ));
848 if (host->timing != MMC_TIMING_MMC_HS200 &&
849 host->timing != MMC_TIMING_UHS_SDR104)
852 blksz_depth = blksz / (1 << host->data_shift);
853 fifo_depth = host->fifo_depth;
855 if (blksz_depth > fifo_depth)
859 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
860 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
861 * Currently just choose blksz.
864 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
868 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
871 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
878 /* If we don't have a channel, we can't do DMA */
882 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
884 host->dma_ops->stop(host);
891 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
892 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
896 * Decide the MSIZE and RX/TX Watermark.
897 * If current block size is same with previous size,
898 * no need to update fifoth.
900 if (host->prev_blksz != data->blksz)
901 dw_mci_adjust_fifoth(host, data);
904 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
906 /* Enable the DMA interface */
907 temp = mci_readl(host, CTRL);
908 temp |= SDMMC_CTRL_DMA_ENABLE;
909 mci_writel(host, CTRL, temp);
911 /* Disable RX/TX IRQs, let DMA handle it */
912 temp = mci_readl(host, INTMASK);
913 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
914 mci_writel(host, INTMASK, temp);
916 host->dma_ops->start(host, sg_len);
921 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
925 data->error = -EINPROGRESS;
932 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
934 if (data->flags & MMC_DATA_READ) {
935 host->dir_status = DW_MCI_RECV_STATUS;
936 dw_mci_ctrl_rd_thld(host, data);
938 host->dir_status = DW_MCI_SEND_STATUS;
941 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
942 data->blocks, data->blksz, mmc_hostname(host->mmc));
944 if (dw_mci_submit_data_dma(host, data)) {
945 int flags = SG_MITER_ATOMIC;
946 if (host->data->flags & MMC_DATA_READ)
947 flags |= SG_MITER_TO_SG;
949 flags |= SG_MITER_FROM_SG;
951 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
953 host->part_buf_start = 0;
954 host->part_buf_count = 0;
956 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
957 temp = mci_readl(host, INTMASK);
958 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
959 mci_writel(host, INTMASK, temp);
961 temp = mci_readl(host, CTRL);
962 temp &= ~SDMMC_CTRL_DMA_ENABLE;
963 mci_writel(host, CTRL, temp);
966 * Use the initial fifoth_val for PIO mode.
967 * If next issued data may be transfered by DMA mode,
968 * prev_blksz should be invalidated.
970 mci_writel(host, FIFOTH, host->fifoth_val);
971 host->prev_blksz = 0;
974 * Keep the current block size.
975 * It will be used to decide whether to update
976 * fifoth register next time.
978 host->prev_blksz = data->blksz;
982 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
984 struct dw_mci *host = slot->host;
985 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
986 unsigned int cmd_status = 0;
987 #ifdef SDMMC_WAIT_FOR_UNBUSY
989 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
991 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
993 ret = time_before(jiffies, timeout);
994 cmd_status = mci_readl(host, STATUS);
995 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
999 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1000 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1003 mci_writel(host, CMDARG, arg);
1005 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1006 if(cmd & SDMMC_CMD_UPD_CLK)
1007 timeout = jiffies + msecs_to_jiffies(50);
1009 timeout = jiffies + msecs_to_jiffies(500);
1010 while (time_before(jiffies, timeout)) {
1011 cmd_status = mci_readl(host, CMD);
1012 if (!(cmd_status & SDMMC_CMD_START))
1015 dev_err(&slot->mmc->class_dev,
1016 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1017 cmd, arg, cmd_status);
1020 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1022 struct dw_mci *host = slot->host;
1023 unsigned int tempck,clock = slot->clock;
1028 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1029 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1032 mci_writel(host, CLKENA, 0);
1033 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1034 if(host->svi_flags == 0)
1035 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1037 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1039 } else if (clock != host->current_speed || force_clkinit) {
1040 div = host->bus_hz / clock;
1041 if (host->bus_hz % clock && host->bus_hz > clock)
1043 * move the + 1 after the divide to prevent
1044 * over-clocking the card.
1048 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1050 if ((clock << div) != slot->__clk_old || force_clkinit) {
1051 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1052 dev_info(&slot->mmc->class_dev,
1053 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1054 slot->id, host->bus_hz, clock,
1057 host->set_speed = tempck;
1058 host->set_div = div;
1062 mci_writel(host, CLKENA, 0);
1063 mci_writel(host, CLKSRC, 0);
1067 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1069 if(clock <= 400*1000){
1070 MMC_DBG_BOOT_FUNC(host->mmc,
1071 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1072 clock * 2, mmc_hostname(host->mmc));
1073 /* clk_mmc will change parents to 24MHz xtal*/
1074 clk_set_rate(host->clk_mmc, clock * 2);
1077 host->set_div = div;
1081 MMC_DBG_BOOT_FUNC(host->mmc,
1082 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1083 mmc_hostname(host->mmc));
1086 MMC_DBG_ERR_FUNC(host->mmc,
1087 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1088 mmc_hostname(host->mmc));
1090 host->set_div = div;
1091 host->bus_hz = host->set_speed * 2;
1092 MMC_DBG_BOOT_FUNC(host->mmc,
1093 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1094 div, host->bus_hz, mmc_hostname(host->mmc));
1096 /* BUG may be here, come on, Linux BSP engineer looks!
1097 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1098 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1099 some oops happened like that:
1100 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1101 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1102 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1103 mmc0: new high speed DDR MMC card at address 0001
1104 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1106 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1107 mmcblk0: retrying using single block read
1108 mmcblk0: error -110 sending status command, retrying
1110 How to: If eMMC HW version < 4.51, or > 4.51 but no caps2-mmc-hs200 support in dts
1111 Please set dts emmc clk to 100M or 150M, I will workaround it!
1114 if (host->verid < DW_MMC_240A)
1115 clk_set_rate(host->clk_mmc,(host->bus_hz));
1117 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1123 /* set clock to desired speed */
1124 mci_writel(host, CLKDIV, div);
1128 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1130 /* enable clock; only low power if no SDIO */
1131 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1133 if (host->verid < DW_MMC_240A)
1134 sdio_int = SDMMC_INT_SDIO(slot->id);
1136 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1138 if (!(mci_readl(host, INTMASK) & sdio_int))
1139 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1140 mci_writel(host, CLKENA, clk_en_a);
1144 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1145 /* keep the clock with reflecting clock dividor */
1146 slot->__clk_old = clock << div;
1149 host->current_speed = clock;
1151 if(slot->ctype != slot->pre_ctype)
1152 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1154 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1155 mmc_hostname(host->mmc));
1156 slot->pre_ctype = slot->ctype;
1158 /* Set the current slot bus width */
1159 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1162 extern struct mmc_card *this_card;
1163 static void dw_mci_wait_unbusy(struct dw_mci *host)
1166 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1167 unsigned long time_loop;
1168 unsigned int status;
1171 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1173 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1174 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1175 /* Special care for (secure)erase timeout calculation */
1177 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1180 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1) ;
1181 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1182 300000 * (this_card->ext_csd.sec_erase_mult)) :
1183 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1187 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1188 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1189 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1190 timeout = SDMMC_DATA_TIMEOUT_SD;
1193 time_loop = jiffies + msecs_to_jiffies(timeout);
1195 status = mci_readl(host, STATUS);
1196 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1198 } while (time_before(jiffies, time_loop));
1203 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1206 * 0--status is busy.
1207 * 1--status is unbusy.
1209 int dw_mci_card_busy(struct mmc_host *mmc)
1211 struct dw_mci_slot *slot = mmc_priv(mmc);
1212 struct dw_mci *host = slot->host;
1214 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1215 host->svi_flags, mmc_hostname(host->mmc));
1218 if(host->svi_flags == 0){
1220 host->svi_flags = 1;
1221 return host->svi_flags;
1224 host->svi_flags = 0;
1225 return host->svi_flags;
1231 static void __dw_mci_start_request(struct dw_mci *host,
1232 struct dw_mci_slot *slot,
1233 struct mmc_command *cmd)
1235 struct mmc_request *mrq;
1236 struct mmc_data *data;
1240 if (host->pdata->select_slot)
1241 host->pdata->select_slot(slot->id);
1243 host->cur_slot = slot;
1246 dw_mci_wait_unbusy(host);
1248 host->pending_events = 0;
1249 host->completed_events = 0;
1250 host->data_status = 0;
1254 dw_mci_set_timeout(host);
1255 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1256 mci_writel(host, BLKSIZ, data->blksz);
1259 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1261 /* this is the first command, send the initialization clock */
1262 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1263 cmdflags |= SDMMC_CMD_INIT;
1266 dw_mci_submit_data(host, data);
1270 dw_mci_start_command(host, cmd, cmdflags);
1273 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1276 static void dw_mci_start_request(struct dw_mci *host,
1277 struct dw_mci_slot *slot)
1279 struct mmc_request *mrq = slot->mrq;
1280 struct mmc_command *cmd;
1282 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1283 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1285 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1286 __dw_mci_start_request(host, slot, cmd);
1289 /* must be called with host->lock held */
1290 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1291 struct mmc_request *mrq)
1293 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1298 if (host->state == STATE_IDLE) {
1299 host->state = STATE_SENDING_CMD;
1300 dw_mci_start_request(host, slot);
1302 list_add_tail(&slot->queue_node, &host->queue);
1306 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1308 struct dw_mci_slot *slot = mmc_priv(mmc);
1309 struct dw_mci *host = slot->host;
1314 * The check for card presence and queueing of the request must be
1315 * atomic, otherwise the card could be removed in between and the
1316 * request wouldn't fail until another card was inserted.
1318 spin_lock_bh(&host->lock);
1320 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1321 spin_unlock_bh(&host->lock);
1322 mrq->cmd->error = -ENOMEDIUM;
1323 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1324 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1326 mmc_request_done(mmc, mrq);
1330 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1331 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1333 dw_mci_queue_request(host, slot, mrq);
1335 spin_unlock_bh(&host->lock);
1338 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1340 struct dw_mci_slot *slot = mmc_priv(mmc);
1341 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1342 struct dw_mci *host = slot->host;
1344 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1347 #ifdef SDMMC_WAIT_FOR_UNBUSY
1348 unsigned long time_loop;
1351 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1352 if(host->svi_flags == 1)
1353 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1355 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1357 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1360 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1361 printk("%d..%s: no card. [%s]\n", \
1362 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1367 ret = time_before(jiffies, time_loop);
1368 regs = mci_readl(slot->host, STATUS);
1369 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1375 printk("slot->flags = %lu ", slot->flags);
1376 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1377 if(host->svi_flags != 1)
1380 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1381 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1385 switch (ios->bus_width) {
1386 case MMC_BUS_WIDTH_4:
1387 slot->ctype = SDMMC_CTYPE_4BIT;
1389 case MMC_BUS_WIDTH_8:
1390 slot->ctype = SDMMC_CTYPE_8BIT;
1393 /* set default 1 bit mode */
1394 slot->ctype = SDMMC_CTYPE_1BIT;
1395 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1398 regs = mci_readl(slot->host, UHS_REG);
1401 if (ios->timing == MMC_TIMING_UHS_DDR50)
1402 regs |= ((0x1 << slot->id) << 16);
1404 regs &= ~((0x1 << slot->id) << 16);
1406 mci_writel(slot->host, UHS_REG, regs);
1407 slot->host->timing = ios->timing;
1410 * Use mirror of ios->clock to prevent race with mmc
1411 * core ios update when finding the minimum.
1413 slot->clock = ios->clock;
1415 if (drv_data && drv_data->set_ios)
1416 drv_data->set_ios(slot->host, ios);
1418 /* Slot specific timing and width adjustment */
1419 dw_mci_setup_bus(slot, false);
1423 switch (ios->power_mode) {
1425 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1427 if (slot->host->pdata->setpower)
1428 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1429 regs = mci_readl(slot->host, PWREN);
1430 regs |= (1 << slot->id);
1431 mci_writel(slot->host, PWREN, regs);
1434 /* Power down slot */
1435 if(slot->host->pdata->setpower)
1436 slot->host->pdata->setpower(slot->id, 0);
1437 regs = mci_readl(slot->host, PWREN);
1438 regs &= ~(1 << slot->id);
1439 mci_writel(slot->host, PWREN, regs);
1446 static int dw_mci_get_ro(struct mmc_host *mmc)
1449 struct dw_mci_slot *slot = mmc_priv(mmc);
1450 struct dw_mci_board *brd = slot->host->pdata;
1452 /* Use platform get_ro function, else try on board write protect */
1453 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1455 else if(brd->get_ro)
1456 read_only = brd->get_ro(slot->id);
1457 else if(gpio_is_valid(slot->wp_gpio))
1458 read_only = gpio_get_value(slot->wp_gpio);
1461 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1463 dev_dbg(&mmc->class_dev, "card is %s\n",
1464 read_only ? "read-only" : "read-write");
1469 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1471 struct dw_mci_slot *slot = mmc_priv(mmc);
1472 struct dw_mci *host = slot->host;
1473 /*struct dw_mci_board *brd = slot->host->pdata;*/
1475 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1478 spin_lock_bh(&host->lock);
1481 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1483 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1485 spin_unlock_bh(&host->lock);
1487 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1488 if(__clk_is_enabled(host->hclk_mmc) == false)
1489 clk_prepare_enable(host->hclk_mmc);
1490 if(__clk_is_enabled(host->clk_mmc) == false)
1491 clk_prepare_enable(host->clk_mmc);
1493 if(__clk_is_enabled(host->clk_mmc) == true)
1494 clk_disable_unprepare(slot->host->clk_mmc);
1495 if(__clk_is_enabled(host->hclk_mmc) == true)
1496 clk_disable_unprepare(slot->host->hclk_mmc);
1499 mmc_detect_change(slot->mmc, 20);
1505 static int dw_mci_get_cd(struct mmc_host *mmc)
1508 struct dw_mci_slot *slot = mmc_priv(mmc);
1509 struct dw_mci_board *brd = slot->host->pdata;
1510 struct dw_mci *host = slot->host;
1511 int gpio_cd = mmc_gpio_get_cd(mmc);
1514 if (cpu_is_rk312x() &&
1516 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1517 gpio_cd = slot->cd_gpio;
1518 if (gpio_is_valid(gpio_cd)) {
1519 gpio_val = gpio_get_value_cansleep(gpio_cd);
1521 if (gpio_val == gpio_get_value_cansleep(gpio_cd)) {
1522 gpio_cd = gpio_get_value_cansleep(gpio_cd) == 0 ? 1 : 0;
1524 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1525 grf_writel((0x1 << 24) | (1 << 8), RK312X_GRF_SOC_CON0);
1526 dw_mci_ctrl_all_reset(host);
1528 /* Really card detected: SHOULD disable force_jtag */
1529 grf_writel((0x1 << 24) | (0 << 8), RK312X_GRF_SOC_CON0);
1533 return slot->last_detect_state;
1536 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1540 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1541 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1543 /* Use platform get_cd function, else try onboard card detect */
1544 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1546 else if (brd->get_cd)
1547 present = !brd->get_cd(slot->id);
1548 else if (!IS_ERR_VALUE(gpio_cd))
1551 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1554 spin_lock_bh(&host->lock);
1556 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1557 dev_dbg(&mmc->class_dev, "card is present\n");
1559 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1560 dev_dbg(&mmc->class_dev, "card is not present\n");
1562 spin_unlock_bh(&host->lock);
1569 * Dts Should caps emmc controller with poll-hw-reset
1571 static void dw_mci_hw_reset(struct mmc_host *mmc)
1573 struct dw_mci_slot *slot = mmc_priv(mmc);
1574 struct dw_mci *host = slot->host;
1579 unsigned long timeout;
1582 /* (1) CMD12 to end any transfer in process */
1583 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1584 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1586 if(host->mmc->hold_reg_flag)
1587 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1588 mci_writel(host, CMDARG, 0);
1590 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1592 timeout = jiffies + msecs_to_jiffies(500);
1594 ret = time_before(jiffies, timeout);
1595 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1600 MMC_DBG_ERR_FUNC(host->mmc,
1601 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1602 __func__, mmc_hostname(host->mmc));
1604 /* (2) wait DTO, even if no response is sent back by card */
1606 timeout = jiffies + msecs_to_jiffies(5);
1608 ret = time_before(jiffies, timeout);
1609 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1610 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1616 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1618 /* Software reset - BMOD[0] for IDMA only */
1619 regs = mci_readl(host, BMOD);
1620 regs |= SDMMC_IDMAC_SWRESET;
1621 mci_writel(host, BMOD, regs);
1622 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1623 regs = mci_readl(host, BMOD);
1624 if(regs & SDMMC_IDMAC_SWRESET)
1625 MMC_DBG_WARN_FUNC(host->mmc,
1626 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1627 __func__, mmc_hostname(host->mmc));
1629 /* DMA reset - CTRL[2] */
1630 regs = mci_readl(host, CTRL);
1631 regs |= SDMMC_CTRL_DMA_RESET;
1632 mci_writel(host, CTRL, regs);
1633 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1634 regs = mci_readl(host, CTRL);
1635 if(regs & SDMMC_CTRL_DMA_RESET)
1636 MMC_DBG_WARN_FUNC(host->mmc,
1637 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1638 __func__, mmc_hostname(host->mmc));
1640 /* FIFO reset - CTRL[1] */
1641 regs = mci_readl(host, CTRL);
1642 regs |= SDMMC_CTRL_FIFO_RESET;
1643 mci_writel(host, CTRL, regs);
1644 mdelay(1); /* no timing limited, 1ms is random value */
1645 regs = mci_readl(host, CTRL);
1646 if(regs & SDMMC_CTRL_FIFO_RESET)
1647 MMC_DBG_WARN_FUNC(host->mmc,
1648 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1649 __func__, mmc_hostname(host->mmc));
1652 According to eMMC spec
1653 tRstW >= 1us ; RST_n pulse width
1654 tRSCA >= 200us ; RST_n to Command time
1655 tRSTH >= 1us ; RST_n high period
1657 mci_writel(slot->host, PWREN, 0x0);
1658 mci_writel(slot->host, RST_N, 0x0);
1660 udelay(10); /* 10us for bad quality eMMc. */
1662 mci_writel(slot->host, PWREN, 0x1);
1663 mci_writel(slot->host, RST_N, 0x1);
1665 usleep_range(500, 1000); /* at least 500(> 200us) */
1669 * Disable lower power mode.
1671 * Low power mode will stop the card clock when idle. According to the
1672 * description of the CLKENA register we should disable low power mode
1673 * for SDIO cards if we need SDIO interrupts to work.
1675 * This function is fast if low power mode is already disabled.
1677 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1679 struct dw_mci *host = slot->host;
1681 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1683 clk_en_a = mci_readl(host, CLKENA);
1685 if (clk_en_a & clken_low_pwr) {
1686 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1687 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1688 SDMMC_CMD_PRV_DAT_WAIT, 0);
1692 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1694 struct dw_mci_slot *slot = mmc_priv(mmc);
1695 struct dw_mci *host = slot->host;
1699 /* Enable/disable Slot Specific SDIO interrupt */
1700 int_mask = mci_readl(host, INTMASK);
1702 if (host->verid < DW_MMC_240A)
1703 sdio_int = SDMMC_INT_SDIO(slot->id);
1705 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1709 * Turn off low power mode if it was enabled. This is a bit of
1710 * a heavy operation and we disable / enable IRQs a lot, so
1711 * we'll leave low power mode disabled and it will get
1712 * re-enabled again in dw_mci_setup_bus().
1714 dw_mci_disable_low_power(slot);
1716 mci_writel(host, INTMASK,
1717 (int_mask | sdio_int));
1719 mci_writel(host, INTMASK,
1720 (int_mask & ~sdio_int));
1724 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1726 IO_DOMAIN_12 = 1200,
1727 IO_DOMAIN_18 = 1800,
1728 IO_DOMAIN_33 = 3300,
1730 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1740 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1741 __FUNCTION__, mmc_hostname(host->mmc));
1744 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1745 __FUNCTION__, mmc_hostname(host->mmc));
1749 if(cpu_is_rk3288()){
1750 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1751 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1755 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1756 __FUNCTION__, mmc_hostname(host->mmc));
1760 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1761 struct mmc_ios *ios)
1764 unsigned int value,uhs_reg;
1767 * Signal Voltage Switching is only applicable for Host Controllers
1770 if (host->verid < DW_MMC_240A)
1773 uhs_reg = mci_readl(host, UHS_REG);
1774 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1775 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1777 switch (ios->signal_voltage) {
1778 case MMC_SIGNAL_VOLTAGE_330:
1779 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1781 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1782 /* regulator_put(host->vmmc); //to be done in remove function. */
1784 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1785 __func__, regulator_get_voltage(host->vmmc), ret);
1787 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1788 " failed\n", mmc_hostname(host->mmc));
1791 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1793 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1794 __FUNCTION__, mmc_hostname(host->mmc));
1796 /* set High-power mode */
1797 value = mci_readl(host, CLKENA);
1798 value &= ~SDMMC_CLKEN_LOW_PWR;
1799 mci_writel(host,CLKENA , value);
1801 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1802 mci_writel(host,UHS_REG , uhs_reg);
1805 usleep_range(5000, 5500);
1807 /* 3.3V regulator output should be stable within 5 ms */
1808 uhs_reg = mci_readl(host, UHS_REG);
1809 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1812 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1813 mmc_hostname(host->mmc));
1816 case MMC_SIGNAL_VOLTAGE_180:
1818 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1819 /* regulator_put(host->vmmc);//to be done in remove function. */
1821 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1822 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1824 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1825 " failed\n", mmc_hostname(host->mmc));
1828 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1832 * Enable 1.8V Signal Enable in the Host Control2
1835 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1838 usleep_range(5000, 5500);
1839 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1840 __FUNCTION__,mmc_hostname(host->mmc));
1842 /* 1.8V regulator output should be stable within 5 ms */
1843 uhs_reg = mci_readl(host, UHS_REG);
1844 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1847 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1848 mmc_hostname(host->mmc));
1851 case MMC_SIGNAL_VOLTAGE_120:
1853 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1855 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1856 " failed\n", mmc_hostname(host->mmc));
1862 /* No signal voltage switch required */
1868 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1869 struct mmc_ios *ios)
1871 struct dw_mci_slot *slot = mmc_priv(mmc);
1872 struct dw_mci *host = slot->host;
1875 if (host->verid < DW_MMC_240A)
1878 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1884 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1886 struct dw_mci_slot *slot = mmc_priv(mmc);
1887 struct dw_mci *host = slot->host;
1888 const struct dw_mci_drv_data *drv_data = host->drv_data;
1889 struct dw_mci_tuning_data tuning_data;
1892 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1893 if(cpu_is_rk3036() || cpu_is_rk312x())
1896 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1897 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1898 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1899 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1900 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1901 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1902 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1906 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1907 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1908 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1911 "Undefined command(%d) for tuning\n", opcode);
1916 /* Recommend sample phase and delayline
1917 Fixme: Mix-use these three controllers will cause
1920 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1921 tuning_data.con_id = 3;
1922 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1923 tuning_data.con_id = 1;
1925 tuning_data.con_id = 0;
1927 /* 0: driver, from host->devices
1928 1: sample, from devices->host
1930 tuning_data.tuning_type = 1;
1932 if (drv_data && drv_data->execute_tuning)
1933 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1938 static const struct mmc_host_ops dw_mci_ops = {
1939 .request = dw_mci_request,
1940 .pre_req = dw_mci_pre_req,
1941 .post_req = dw_mci_post_req,
1942 .set_ios = dw_mci_set_ios,
1943 .get_ro = dw_mci_get_ro,
1944 .get_cd = dw_mci_get_cd,
1945 .set_sdio_status = dw_mci_set_sdio_status,
1946 .hw_reset = dw_mci_hw_reset,
1947 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1948 .execute_tuning = dw_mci_execute_tuning,
1949 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1950 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1951 .card_busy = dw_mci_card_busy,
1956 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1958 unsigned long flags;
1963 local_irq_save(flags);
1964 if(host->irq_state != irqflag)
1966 host->irq_state = irqflag;
1969 enable_irq(host->irq);
1973 disable_irq(host->irq);
1976 local_irq_restore(flags);
1980 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1981 __releases(&host->lock)
1982 __acquires(&host->lock)
1984 if(DW_MCI_SEND_STATUS == host->dir_status){
1986 if( MMC_BUS_TEST_W != host->cmd->opcode){
1987 if(host->data_status & SDMMC_INT_DCRC)
1988 host->data->error = -EILSEQ;
1989 else if(host->data_status & SDMMC_INT_EBE)
1990 host->data->error = -ETIMEDOUT;
1992 dw_mci_wait_unbusy(host);
1995 dw_mci_wait_unbusy(host);
2000 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2001 __releases(&host->lock)
2002 __acquires(&host->lock)
2004 struct dw_mci_slot *slot;
2005 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2007 WARN_ON(host->cmd || host->data);
2009 del_timer_sync(&host->dto_timer);
2010 dw_mci_deal_data_end(host, mrq);
2013 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2014 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2016 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2017 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2019 host->cur_slot->mrq = NULL;
2021 if (!list_empty(&host->queue)) {
2022 slot = list_entry(host->queue.next,
2023 struct dw_mci_slot, queue_node);
2024 list_del(&slot->queue_node);
2025 dev_vdbg(host->dev, "list not empty: %s is next\n",
2026 mmc_hostname(slot->mmc));
2027 host->state = STATE_SENDING_CMD;
2028 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2029 dw_mci_start_request(host, slot);
2031 dev_vdbg(host->dev, "list empty\n");
2032 host->state = STATE_IDLE;
2035 spin_unlock(&host->lock);
2036 mmc_request_done(prev_mmc, mrq);
2037 spin_lock(&host->lock);
2040 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2042 u32 status = host->cmd_status;
2044 host->cmd_status = 0;
2046 /* Read the response from the card (up to 16 bytes) */
2047 if (cmd->flags & MMC_RSP_PRESENT) {
2048 if (cmd->flags & MMC_RSP_136) {
2049 cmd->resp[3] = mci_readl(host, RESP0);
2050 cmd->resp[2] = mci_readl(host, RESP1);
2051 cmd->resp[1] = mci_readl(host, RESP2);
2052 cmd->resp[0] = mci_readl(host, RESP3);
2054 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2055 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2057 cmd->resp[0] = mci_readl(host, RESP0);
2061 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2062 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2066 if (status & SDMMC_INT_RTO)
2068 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2071 cmd->error = -ETIMEDOUT;
2072 del_timer_sync(&host->dto_timer);
2073 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2074 del_timer_sync(&host->dto_timer);
2075 cmd->error = -EILSEQ;
2076 }else if (status & SDMMC_INT_RESP_ERR){
2077 del_timer_sync(&host->dto_timer);
2082 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2083 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2086 del_timer_sync(&host->dto_timer);
2087 if(MMC_SEND_STATUS != cmd->opcode)
2088 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2089 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2090 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2094 /* newer ip versions need a delay between retries */
2095 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2101 static void dw_mci_tasklet_func(unsigned long priv)
2103 struct dw_mci *host = (struct dw_mci *)priv;
2104 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2105 struct mmc_data *data;
2106 struct mmc_command *cmd;
2107 enum dw_mci_state state;
2108 enum dw_mci_state prev_state;
2109 u32 status, cmd_flags;
2110 unsigned long timeout = 0;
2113 spin_lock(&host->lock);
2115 state = host->state;
2125 case STATE_SENDING_CMD:
2126 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2127 &host->pending_events))
2132 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2133 dw_mci_command_complete(host, cmd);
2134 if (cmd == host->mrq->sbc && !cmd->error) {
2135 prev_state = state = STATE_SENDING_CMD;
2136 __dw_mci_start_request(host, host->cur_slot,
2141 if (cmd->data && cmd->error) {
2142 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2143 dw_mci_stop_dma(host);
2146 send_stop_cmd(host, data);
2147 state = STATE_SENDING_STOP;
2153 send_stop_abort(host, data);
2154 state = STATE_SENDING_STOP;
2157 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2160 if (!host->mrq->data || cmd->error) {
2161 dw_mci_request_end(host, host->mrq);
2165 prev_state = state = STATE_SENDING_DATA;
2168 case STATE_SENDING_DATA:
2169 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2170 dw_mci_stop_dma(host);
2173 send_stop_cmd(host, data);
2175 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2176 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2177 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2179 mci_writel(host, CMDARG, 0);
2181 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2182 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2184 if(host->mmc->hold_reg_flag)
2185 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2187 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2189 timeout = jiffies + msecs_to_jiffies(500);
2192 ret = time_before(jiffies, timeout);
2193 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2197 MMC_DBG_ERR_FUNC(host->mmc,
2198 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2199 __func__, mmc_hostname(host->mmc));
2202 send_stop_abort(host, data);
2204 state = STATE_DATA_ERROR;
2208 MMC_DBG_CMD_FUNC(host->mmc,
2209 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2210 prev_state,state, mmc_hostname(host->mmc));
2212 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2213 &host->pending_events))
2215 MMC_DBG_INFO_FUNC(host->mmc,
2216 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2217 prev_state,state,mmc_hostname(host->mmc));
2219 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2220 prev_state = state = STATE_DATA_BUSY;
2223 case STATE_DATA_BUSY:
2224 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2225 &host->pending_events))
2228 dw_mci_deal_data_end(host, host->mrq);
2229 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2230 MMC_DBG_INFO_FUNC(host->mmc,
2231 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2232 prev_state,state,mmc_hostname(host->mmc));
2235 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2236 status = host->data_status;
2238 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2239 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2240 MMC_DBG_ERR_FUNC(host->mmc,
2241 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2242 prev_state,state, status, mmc_hostname(host->mmc));
2244 if (status & SDMMC_INT_DRTO) {
2245 data->error = -ETIMEDOUT;
2246 } else if (status & SDMMC_INT_DCRC) {
2247 data->error = -EILSEQ;
2248 } else if (status & SDMMC_INT_EBE &&
2249 host->dir_status == DW_MCI_SEND_STATUS){
2251 * No data CRC status was returned.
2252 * The number of bytes transferred will
2253 * be exaggerated in PIO mode.
2255 data->bytes_xfered = 0;
2256 data->error = -ETIMEDOUT;
2265 * After an error, there may be data lingering
2266 * in the FIFO, so reset it - doing so
2267 * generates a block interrupt, hence setting
2268 * the scatter-gather pointer to NULL.
2270 dw_mci_fifo_reset(host);
2272 data->bytes_xfered = data->blocks * data->blksz;
2277 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2278 prev_state,state,mmc_hostname(host->mmc));
2279 dw_mci_request_end(host, host->mrq);
2282 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2283 prev_state,state,mmc_hostname(host->mmc));
2285 if (host->mrq->sbc && !data->error) {
2286 data->stop->error = 0;
2288 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2289 prev_state,state,mmc_hostname(host->mmc));
2291 dw_mci_request_end(host, host->mrq);
2295 prev_state = state = STATE_SENDING_STOP;
2297 send_stop_cmd(host, data);
2299 if (data->stop && !data->error) {
2300 /* stop command for open-ended transfer*/
2302 send_stop_abort(host, data);
2306 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2307 prev_state,state,mmc_hostname(host->mmc));
2309 case STATE_SENDING_STOP:
2310 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2313 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2314 prev_state, state, mmc_hostname(host->mmc));
2316 /* CMD error in data command */
2317 if (host->mrq->cmd->error && host->mrq->data) {
2318 dw_mci_fifo_reset(host);
2324 dw_mci_command_complete(host, host->mrq->stop);
2326 if (host->mrq->stop)
2327 dw_mci_command_complete(host, host->mrq->stop);
2329 host->cmd_status = 0;
2332 dw_mci_request_end(host, host->mrq);
2335 case STATE_DATA_ERROR:
2336 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2337 &host->pending_events))
2340 state = STATE_DATA_BUSY;
2343 } while (state != prev_state);
2345 host->state = state;
2347 spin_unlock(&host->lock);
2351 /* push final bytes to part_buf, only use during push */
2352 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2354 memcpy((void *)&host->part_buf, buf, cnt);
2355 host->part_buf_count = cnt;
2358 /* append bytes to part_buf, only use during push */
2359 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2361 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2362 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2363 host->part_buf_count += cnt;
2367 /* pull first bytes from part_buf, only use during pull */
2368 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2370 cnt = min(cnt, (int)host->part_buf_count);
2372 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2374 host->part_buf_count -= cnt;
2375 host->part_buf_start += cnt;
2380 /* pull final bytes from the part_buf, assuming it's just been filled */
2381 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2383 memcpy(buf, &host->part_buf, cnt);
2384 host->part_buf_start = cnt;
2385 host->part_buf_count = (1 << host->data_shift) - cnt;
2388 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2390 struct mmc_data *data = host->data;
2393 /* try and push anything in the part_buf */
2394 if (unlikely(host->part_buf_count)) {
2395 int len = dw_mci_push_part_bytes(host, buf, cnt);
2398 if (host->part_buf_count == 2) {
2399 mci_writew(host, DATA(host->data_offset),
2401 host->part_buf_count = 0;
2404 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2405 if (unlikely((unsigned long)buf & 0x1)) {
2407 u16 aligned_buf[64];
2408 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2409 int items = len >> 1;
2411 /* memcpy from input buffer into aligned buffer */
2412 memcpy(aligned_buf, buf, len);
2415 /* push data from aligned buffer into fifo */
2416 for (i = 0; i < items; ++i)
2417 mci_writew(host, DATA(host->data_offset),
2424 for (; cnt >= 2; cnt -= 2)
2425 mci_writew(host, DATA(host->data_offset), *pdata++);
2428 /* put anything remaining in the part_buf */
2430 dw_mci_set_part_bytes(host, buf, cnt);
2431 /* Push data if we have reached the expected data length */
2432 if ((data->bytes_xfered + init_cnt) ==
2433 (data->blksz * data->blocks))
2434 mci_writew(host, DATA(host->data_offset),
2439 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2441 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2442 if (unlikely((unsigned long)buf & 0x1)) {
2444 /* pull data from fifo into aligned buffer */
2445 u16 aligned_buf[64];
2446 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2447 int items = len >> 1;
2449 for (i = 0; i < items; ++i)
2450 aligned_buf[i] = mci_readw(host,
2451 DATA(host->data_offset));
2452 /* memcpy from aligned buffer into output buffer */
2453 memcpy(buf, aligned_buf, len);
2461 for (; cnt >= 2; cnt -= 2)
2462 *pdata++ = mci_readw(host, DATA(host->data_offset));
2466 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2467 dw_mci_pull_final_bytes(host, buf, cnt);
2471 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2473 struct mmc_data *data = host->data;
2476 /* try and push anything in the part_buf */
2477 if (unlikely(host->part_buf_count)) {
2478 int len = dw_mci_push_part_bytes(host, buf, cnt);
2481 if (host->part_buf_count == 4) {
2482 mci_writel(host, DATA(host->data_offset),
2484 host->part_buf_count = 0;
2487 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2488 if (unlikely((unsigned long)buf & 0x3)) {
2490 u32 aligned_buf[32];
2491 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2492 int items = len >> 2;
2494 /* memcpy from input buffer into aligned buffer */
2495 memcpy(aligned_buf, buf, len);
2498 /* push data from aligned buffer into fifo */
2499 for (i = 0; i < items; ++i)
2500 mci_writel(host, DATA(host->data_offset),
2507 for (; cnt >= 4; cnt -= 4)
2508 mci_writel(host, DATA(host->data_offset), *pdata++);
2511 /* put anything remaining in the part_buf */
2513 dw_mci_set_part_bytes(host, buf, cnt);
2514 /* Push data if we have reached the expected data length */
2515 if ((data->bytes_xfered + init_cnt) ==
2516 (data->blksz * data->blocks))
2517 mci_writel(host, DATA(host->data_offset),
2522 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2524 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2525 if (unlikely((unsigned long)buf & 0x3)) {
2527 /* pull data from fifo into aligned buffer */
2528 u32 aligned_buf[32];
2529 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2530 int items = len >> 2;
2532 for (i = 0; i < items; ++i)
2533 aligned_buf[i] = mci_readl(host,
2534 DATA(host->data_offset));
2535 /* memcpy from aligned buffer into output buffer */
2536 memcpy(buf, aligned_buf, len);
2544 for (; cnt >= 4; cnt -= 4)
2545 *pdata++ = mci_readl(host, DATA(host->data_offset));
2549 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2550 dw_mci_pull_final_bytes(host, buf, cnt);
2554 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2556 struct mmc_data *data = host->data;
2559 /* try and push anything in the part_buf */
2560 if (unlikely(host->part_buf_count)) {
2561 int len = dw_mci_push_part_bytes(host, buf, cnt);
2565 if (host->part_buf_count == 8) {
2566 mci_writeq(host, DATA(host->data_offset),
2568 host->part_buf_count = 0;
2571 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2572 if (unlikely((unsigned long)buf & 0x7)) {
2574 u64 aligned_buf[16];
2575 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2576 int items = len >> 3;
2578 /* memcpy from input buffer into aligned buffer */
2579 memcpy(aligned_buf, buf, len);
2582 /* push data from aligned buffer into fifo */
2583 for (i = 0; i < items; ++i)
2584 mci_writeq(host, DATA(host->data_offset),
2591 for (; cnt >= 8; cnt -= 8)
2592 mci_writeq(host, DATA(host->data_offset), *pdata++);
2595 /* put anything remaining in the part_buf */
2597 dw_mci_set_part_bytes(host, buf, cnt);
2598 /* Push data if we have reached the expected data length */
2599 if ((data->bytes_xfered + init_cnt) ==
2600 (data->blksz * data->blocks))
2601 mci_writeq(host, DATA(host->data_offset),
2606 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2608 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2609 if (unlikely((unsigned long)buf & 0x7)) {
2611 /* pull data from fifo into aligned buffer */
2612 u64 aligned_buf[16];
2613 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2614 int items = len >> 3;
2616 for (i = 0; i < items; ++i)
2617 aligned_buf[i] = mci_readq(host,
2618 DATA(host->data_offset));
2619 /* memcpy from aligned buffer into output buffer */
2620 memcpy(buf, aligned_buf, len);
2628 for (; cnt >= 8; cnt -= 8)
2629 *pdata++ = mci_readq(host, DATA(host->data_offset));
2633 host->part_buf = mci_readq(host, DATA(host->data_offset));
2634 dw_mci_pull_final_bytes(host, buf, cnt);
2638 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2642 /* get remaining partial bytes */
2643 len = dw_mci_pull_part_bytes(host, buf, cnt);
2644 if (unlikely(len == cnt))
2649 /* get the rest of the data */
2650 host->pull_data(host, buf, cnt);
2653 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2655 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2657 unsigned int offset;
2658 struct mmc_data *data = host->data;
2659 int shift = host->data_shift;
2662 unsigned int remain, fcnt;
2664 if(!host->mmc->bus_refs){
2665 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2669 if (!sg_miter_next(sg_miter))
2672 host->sg = sg_miter->piter.sg;
2673 buf = sg_miter->addr;
2674 remain = sg_miter->length;
2678 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2679 << shift) + host->part_buf_count;
2680 len = min(remain, fcnt);
2683 dw_mci_pull_data(host, (void *)(buf + offset), len);
2684 data->bytes_xfered += len;
2689 sg_miter->consumed = offset;
2690 status = mci_readl(host, MINTSTS);
2691 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2692 /* if the RXDR is ready read again */
2693 } while ((status & SDMMC_INT_RXDR) ||
2694 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2697 if (!sg_miter_next(sg_miter))
2699 sg_miter->consumed = 0;
2701 sg_miter_stop(sg_miter);
2705 sg_miter_stop(sg_miter);
2709 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2712 static void dw_mci_write_data_pio(struct dw_mci *host)
2714 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2716 unsigned int offset;
2717 struct mmc_data *data = host->data;
2718 int shift = host->data_shift;
2721 unsigned int fifo_depth = host->fifo_depth;
2722 unsigned int remain, fcnt;
2724 if(!host->mmc->bus_refs){
2725 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2730 if (!sg_miter_next(sg_miter))
2733 host->sg = sg_miter->piter.sg;
2734 buf = sg_miter->addr;
2735 remain = sg_miter->length;
2739 fcnt = ((fifo_depth -
2740 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2741 << shift) - host->part_buf_count;
2742 len = min(remain, fcnt);
2745 host->push_data(host, (void *)(buf + offset), len);
2746 data->bytes_xfered += len;
2751 sg_miter->consumed = offset;
2752 status = mci_readl(host, MINTSTS);
2753 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2754 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2757 if (!sg_miter_next(sg_miter))
2759 sg_miter->consumed = 0;
2761 sg_miter_stop(sg_miter);
2765 sg_miter_stop(sg_miter);
2769 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2772 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2774 u32 multi, unit = SZ_2M;
2776 if (!host->cmd_status)
2777 host->cmd_status = status;
2782 if ((MMC_STOP_TRANSMISSION != host->cmd->opcode)) {
2783 multi = (mci_readl(host, BYTCNT) / unit) +
2784 ((mci_readl(host, BYTCNT) % unit) ? 1 :0 ) +
2785 ((host->cmd->retries > 2) ? 2 : host->cmd->retries);
2786 /* Max limit time: 8s for dto */
2787 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4000 * multi));
2792 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2793 tasklet_schedule(&host->tasklet);
2796 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2798 struct dw_mci *host = dev_id;
2799 u32 pending, sdio_int;
2802 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2805 * DTO fix - version 2.10a and below, and only if internal DMA
2808 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2810 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2811 pending |= SDMMC_INT_DATA_OVER;
2815 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2816 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2817 host->cmd_status = pending;
2819 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2820 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2822 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2825 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2826 /* if there is an error report DATA_ERROR */
2827 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2828 host->data_status = pending;
2830 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2832 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2833 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2834 tasklet_schedule(&host->tasklet);
2837 if (pending & SDMMC_INT_DATA_OVER) {
2838 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2839 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2840 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2841 if (!host->data_status)
2842 host->data_status = pending;
2844 if (host->dir_status == DW_MCI_RECV_STATUS) {
2845 if (host->sg != NULL)
2846 dw_mci_read_data_pio(host, true);
2848 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2849 tasklet_schedule(&host->tasklet);
2852 if (pending & SDMMC_INT_RXDR) {
2853 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2854 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2855 dw_mci_read_data_pio(host, false);
2858 if (pending & SDMMC_INT_TXDR) {
2859 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2860 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2861 dw_mci_write_data_pio(host);
2864 if (pending & SDMMC_INT_VSI) {
2865 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2866 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2867 dw_mci_cmd_interrupt(host, pending);
2870 if (pending & SDMMC_INT_CMD_DONE) {
2871 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2872 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2873 dw_mci_cmd_interrupt(host, pending);
2876 if (pending & SDMMC_INT_CD) {
2877 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2878 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2879 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2880 queue_work(host->card_workqueue, &host->card_work);
2883 if (pending & SDMMC_INT_HLE) {
2884 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2885 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2889 /* Handle SDIO Interrupts */
2890 for (i = 0; i < host->num_slots; i++) {
2891 struct dw_mci_slot *slot = host->slot[i];
2893 if (host->verid < DW_MMC_240A)
2894 sdio_int = SDMMC_INT_SDIO(i);
2896 sdio_int = SDMMC_INT_SDIO(i + 8);
2898 if (pending & sdio_int) {
2899 mci_writel(host, RINTSTS, sdio_int);
2900 mmc_signal_sdio_irq(slot->mmc);
2906 #ifdef CONFIG_MMC_DW_IDMAC
2907 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2908 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2909 /* Handle DMA interrupts */
2910 pending = mci_readl(host, IDSTS);
2911 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2912 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2913 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2914 host->dma_ops->complete((void *)host);
2922 static void dw_mci_work_routine_card(struct work_struct *work)
2924 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2927 for (i = 0; i < host->num_slots; i++) {
2928 struct dw_mci_slot *slot = host->slot[i];
2929 struct mmc_host *mmc = slot->mmc;
2930 struct mmc_request *mrq;
2933 present = dw_mci_get_cd(mmc);
2934 while (present != slot->last_detect_state) {
2935 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2936 present ? "inserted" : "removed");
2937 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2938 present ? "inserted" : "removed.", mmc_hostname(mmc));
2940 rk_send_wakeup_key();//wake up system
2941 spin_lock_bh(&host->lock);
2943 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2944 /* Card change detected */
2945 slot->last_detect_state = present;
2947 /* Clean up queue if present */
2950 if (mrq == host->mrq) {
2954 switch (host->state) {
2957 case STATE_SENDING_CMD:
2958 mrq->cmd->error = -ENOMEDIUM;
2962 case STATE_SENDING_DATA:
2963 mrq->data->error = -ENOMEDIUM;
2964 dw_mci_stop_dma(host);
2966 case STATE_DATA_BUSY:
2967 case STATE_DATA_ERROR:
2968 if (mrq->data->error == -EINPROGRESS)
2969 mrq->data->error = -ENOMEDIUM;
2973 case STATE_SENDING_STOP:
2974 mrq->stop->error = -ENOMEDIUM;
2978 dw_mci_request_end(host, mrq);
2980 list_del(&slot->queue_node);
2981 mrq->cmd->error = -ENOMEDIUM;
2983 mrq->data->error = -ENOMEDIUM;
2985 mrq->stop->error = -ENOMEDIUM;
2987 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
2988 mrq->cmd->opcode, mmc_hostname(mmc));
2990 spin_unlock(&host->lock);
2991 mmc_request_done(slot->mmc, mrq);
2992 spin_lock(&host->lock);
2996 /* Power down slot */
2998 /* Clear down the FIFO */
2999 dw_mci_fifo_reset(host);
3000 #ifdef CONFIG_MMC_DW_IDMAC
3001 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3002 dw_mci_idmac_reset(host);
3007 spin_unlock_bh(&host->lock);
3009 present = dw_mci_get_cd(mmc);
3012 mmc_detect_change(slot->mmc,
3013 msecs_to_jiffies(host->pdata->detect_delay_ms));
3018 /* given a slot id, find out the device node representing that slot */
3019 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3021 struct device_node *np;
3025 if (!dev || !dev->of_node)
3028 for_each_child_of_node(dev->of_node, np) {
3029 addr = of_get_property(np, "reg", &len);
3030 if (!addr || (len < sizeof(int)))
3032 if (be32_to_cpup(addr) == slot)
3038 static struct dw_mci_of_slot_quirks {
3041 } of_slot_quirks[] = {
3043 .quirk = "disable-wp",
3044 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3048 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3050 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3055 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3056 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3057 quirks |= of_slot_quirks[idx].id;
3062 /* find out bus-width for a given slot */
3063 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3065 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3071 if (of_property_read_u32(np, "bus-width", &bus_wd))
3072 dev_err(dev, "bus-width property not found, assuming width"
3078 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3079 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3081 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3087 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3089 /* Having a missing entry is valid; return silently */
3090 if (!gpio_is_valid(gpio))
3093 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3094 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3098 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3104 /* find the write protect gpio for a given slot; or -1 if none specified */
3105 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3107 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3113 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3115 /* Having a missing entry is valid; return silently */
3116 if (!gpio_is_valid(gpio))
3119 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3120 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3127 /* find the cd gpio for a given slot */
3128 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3129 struct mmc_host *mmc)
3131 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3137 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3139 /* Having a missing entry is valid; return silently */
3140 if (!gpio_is_valid(gpio))
3143 if (mmc_gpio_request_cd(mmc, gpio, 0))
3144 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3147 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3149 struct mmc_host *mmc = dev_id;
3150 struct dw_mci_slot *slot = mmc_priv(mmc);
3151 struct dw_mci *host = slot->host;
3154 if (mmc->ops->card_event)
3155 mmc->ops->card_event(mmc);
3157 mmc_detect_change(mmc, msecs_to_jiffies(200));
3160 queue_work(host->card_workqueue, &host->card_work);
3164 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3165 struct mmc_host *mmc)
3167 struct dw_mci_slot *slot = mmc_priv(mmc);
3168 struct dw_mci *host = slot->host;
3172 /* Having a missing entry is valid; return silently */
3173 if (!gpio_is_valid(gpio))
3176 irq = gpio_to_irq(gpio);
3178 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3179 NULL, dw_mci_gpio_cd_irqt,
3180 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
3184 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3187 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3191 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3192 struct mmc_host *mmc)
3194 if (!gpio_is_valid(gpio))
3197 if (gpio_to_irq(gpio) >= 0) {
3198 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3199 devm_gpio_free(&mmc->class_dev, gpio);
3202 #else /* CONFIG_OF */
3203 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3207 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3211 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3215 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3219 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3220 struct mmc_host *mmc)
3224 #endif /* CONFIG_OF */
3226 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3228 struct mmc_host *mmc;
3229 struct dw_mci_slot *slot;
3230 const struct dw_mci_drv_data *drv_data = host->drv_data;
3235 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3239 slot = mmc_priv(mmc);
3243 host->slot[id] = slot;
3246 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3248 mmc->ops = &dw_mci_ops;
3250 if (of_property_read_u32_array(host->dev->of_node,
3251 "clock-freq-min-max", freq, 2)) {
3252 mmc->f_min = DW_MCI_FREQ_MIN;
3253 mmc->f_max = DW_MCI_FREQ_MAX;
3255 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3256 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3258 mmc->f_min = freq[0];
3259 mmc->f_max = freq[1];
3261 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3262 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3265 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3267 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3268 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3269 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3270 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3271 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3272 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3274 if (of_find_property(host->dev->of_node, "supports-tSD", NULL))
3275 mmc->restrict_caps |= RESTRICT_CARD_TYPE_TSD;
3278 /* We assume only low-level chip use gpio_cd */
3279 if (cpu_is_rk312x() &&
3281 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3282 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3283 if (gpio_is_valid(slot->cd_gpio)) {
3284 /* Request gpio int for card detection */
3285 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3287 slot->cd_gpio = -ENODEV;
3288 dev_err(host->dev, "failed to get your cd-gpios!\n");
3292 if (host->pdata->get_ocr)
3293 mmc->ocr_avail = host->pdata->get_ocr(id);
3296 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3297 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3298 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3299 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3303 * Start with slot power disabled, it will be enabled when a card
3306 if (host->pdata->setpower)
3307 host->pdata->setpower(id, 0);
3309 if (host->pdata->caps)
3310 mmc->caps = host->pdata->caps;
3312 if (host->pdata->pm_caps)
3313 mmc->pm_caps = host->pdata->pm_caps;
3315 if (host->dev->of_node) {
3316 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3320 ctrl_id = to_platform_device(host->dev)->id;
3322 if (drv_data && drv_data->caps)
3323 mmc->caps |= drv_data->caps[ctrl_id];
3324 if (drv_data && drv_data->hold_reg_flag)
3325 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3327 /* set the compatibility of driver. */
3328 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3329 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3331 if (host->pdata->caps2)
3332 mmc->caps2 = host->pdata->caps2;
3334 if (host->pdata->get_bus_wd)
3335 bus_width = host->pdata->get_bus_wd(slot->id);
3336 else if (host->dev->of_node)
3337 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3341 switch (bus_width) {
3343 mmc->caps |= MMC_CAP_8_BIT_DATA;
3345 mmc->caps |= MMC_CAP_4_BIT_DATA;
3348 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3349 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3350 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3351 mmc->caps |= MMC_CAP_SDIO_IRQ;
3352 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3353 mmc->caps |= MMC_CAP_HW_RESET;
3354 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3355 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3356 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3357 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3358 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3359 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3360 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3361 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3363 /*Assign pm_caps pass to pm_flags*/
3364 mmc->pm_flags = mmc->pm_caps;
3366 if (host->pdata->blk_settings) {
3367 mmc->max_segs = host->pdata->blk_settings->max_segs;
3368 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3369 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3370 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3371 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3373 /* Useful defaults if platform data is unset. */
3374 #ifdef CONFIG_MMC_DW_IDMAC
3375 mmc->max_segs = host->ring_size;
3376 mmc->max_blk_size = 65536;
3377 mmc->max_blk_count = host->ring_size;
3378 mmc->max_seg_size = 0x1000;
3379 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3380 if(cpu_is_rk3036() || cpu_is_rk312x()){
3381 /* fixup for external dmac setting */
3383 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3384 mmc->max_blk_count = 65535;
3385 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3386 mmc->max_seg_size = mmc->max_req_size;
3390 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3391 mmc->max_blk_count = 512;
3392 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3393 mmc->max_seg_size = mmc->max_req_size;
3394 #endif /* CONFIG_MMC_DW_IDMAC */
3398 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3400 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3405 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3406 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3410 if (IS_ERR(host->vmmc)) {
3411 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3414 ret = regulator_enable(host->vmmc);
3417 "failed to enable regulator: %d\n", ret);
3424 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3426 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3427 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3429 ret = mmc_add_host(mmc);
3433 /* Pinctrl set default iomux state to fucntion port.
3434 * Fixme: DON'T TOUCH EMMC SETTING!
3436 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3438 host->pinctrl = devm_pinctrl_get(host->dev);
3439 if(IS_ERR(host->pinctrl)){
3440 printk("%s: Warning : No pinctrl used!\n",mmc_hostname(host->mmc));
3442 host->pins_idle= pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_IDLE);
3443 if(IS_ERR(host->pins_default)){
3444 printk("%s: Warning : No IDLE pinctrl matched!\n", mmc_hostname(host->mmc));
3448 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3449 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
3452 host->pins_default = pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_DEFAULT);
3453 if(IS_ERR(host->pins_default)){
3454 printk("%s: Warning : No default pinctrl matched!\n", mmc_hostname(host->mmc));
3458 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3459 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
3465 #if defined(CONFIG_DEBUG_FS)
3466 dw_mci_init_debugfs(slot);
3469 /* Card initially undetected */
3470 slot->last_detect_state = 1;
3475 if (gpio_is_valid(slot->cd_gpio))
3476 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3481 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3483 /* Shutdown detect IRQ */
3484 if (slot->host->pdata->exit)
3485 slot->host->pdata->exit(id);
3487 /* Debugfs stuff is cleaned up by mmc core */
3488 mmc_remove_host(slot->mmc);
3489 slot->host->slot[id] = NULL;
3490 mmc_free_host(slot->mmc);
3493 static void dw_mci_init_dma(struct dw_mci *host)
3495 /* Alloc memory for sg translation */
3496 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3497 &host->sg_dma, GFP_KERNEL);
3498 if (!host->sg_cpu) {
3499 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3504 /* Determine which DMA interface to use */
3505 #if defined(CONFIG_MMC_DW_IDMAC)
3506 if(cpu_is_rk3036() || cpu_is_rk312x()){
3507 host->dma_ops = &dw_mci_edmac_ops;
3508 dev_info(host->dev, "Using external DMA controller.\n");
3510 host->dma_ops = &dw_mci_idmac_ops;
3511 dev_info(host->dev, "Using internal DMA controller.\n");
3518 if (host->dma_ops->init && host->dma_ops->start &&
3519 host->dma_ops->stop && host->dma_ops->cleanup) {
3520 if (host->dma_ops->init(host)) {
3521 dev_err(host->dev, "%s: Unable to initialize "
3522 "DMA Controller.\n", __func__);
3526 dev_err(host->dev, "DMA initialization not found.\n");
3534 dev_info(host->dev, "Using PIO mode.\n");
3539 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3541 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3544 ctrl = mci_readl(host, CTRL);
3546 mci_writel(host, CTRL, ctrl);
3548 /* wait till resets clear */
3550 ctrl = mci_readl(host, CTRL);
3551 if (!(ctrl & reset))
3553 } while (time_before(jiffies, timeout));
3556 "Timeout resetting block (ctrl reset %#x)\n",
3562 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3565 * Reseting generates a block interrupt, hence setting
3566 * the scatter-gather pointer to NULL.
3569 sg_miter_stop(&host->sg_miter);
3573 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3576 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3578 return dw_mci_ctrl_reset(host,
3579 SDMMC_CTRL_FIFO_RESET |
3581 SDMMC_CTRL_DMA_RESET);
3586 static struct dw_mci_of_quirks {
3591 .quirk = "broken-cd",
3592 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3596 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3598 struct dw_mci_board *pdata;
3599 struct device *dev = host->dev;
3600 struct device_node *np = dev->of_node;
3601 const struct dw_mci_drv_data *drv_data = host->drv_data;
3603 u32 clock_frequency;
3605 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3607 dev_err(dev, "could not allocate memory for pdata\n");
3608 return ERR_PTR(-ENOMEM);
3611 /* find out number of slots supported */
3612 if (of_property_read_u32(dev->of_node, "num-slots",
3613 &pdata->num_slots)) {
3614 dev_info(dev, "num-slots property not found, "
3615 "assuming 1 slot is available\n");
3616 pdata->num_slots = 1;
3620 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3621 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3622 pdata->quirks |= of_quirks[idx].id;
3625 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3626 dev_info(dev, "fifo-depth property not found, using "
3627 "value of FIFOTH register as default\n");
3629 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3631 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3632 pdata->bus_hz = clock_frequency;
3634 if (drv_data && drv_data->parse_dt) {
3635 ret = drv_data->parse_dt(host);
3637 return ERR_PTR(ret);
3640 if (of_find_property(np, "keep-power-in-suspend", NULL))
3641 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3643 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3644 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3646 if (of_find_property(np, "supports-highspeed", NULL))
3647 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3649 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3650 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3652 if (of_find_property(np, "supports-DDR_MODE", NULL))
3653 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3655 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3656 pdata->caps2 |= MMC_CAP2_HS200;
3658 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3659 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3661 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3662 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3664 if (of_get_property(np, "cd-inverted", NULL))
3665 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3666 if (of_get_property(np, "bootpart-no-access", NULL))
3667 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3672 #else /* CONFIG_OF */
3673 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3675 return ERR_PTR(-EINVAL);
3677 #endif /* CONFIG_OF */
3679 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3683 switch(host->state){
3686 case STATE_SENDING_DATA:
3687 case STATE_DATA_BUSY:
3688 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3689 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3690 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3691 host->state = STATE_DATA_BUSY;
3692 if (!dw_mci_ctrl_all_reset(host)) {
3697 /* NO requirement to reclaim slave chn using external dmac */
3698 #ifdef CONFIG_MMC_DW_IDMAC
3699 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3700 if (host->use_dma && host->dma_ops->init)
3701 host->dma_ops->init(host);
3705 * Restore the initial value at FIFOTH register
3706 * And Invalidate the prev_blksz with zero
3708 mci_writel(host, FIFOTH, host->fifoth_val);
3709 host->prev_blksz = 0;
3710 mci_writel(host, TMOUT, 0xFFFFFFFF);
3711 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3712 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3713 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3714 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3715 regs |= SDMMC_INT_CD;
3716 mci_writel(host, INTMASK, regs);
3717 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3718 for (i = 0; i < host->num_slots; i++) {
3719 struct dw_mci_slot *slot = host->slot[i];
3722 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3723 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3724 dw_mci_setup_bus(slot, true);
3727 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3728 tasklet_schedule(&host->tasklet);
3734 static void dw_mci_dto_timeout(unsigned long host_data)
3736 struct dw_mci *host = (struct dw_mci *) host_data;
3738 disable_irq(host->irq);
3740 host->data_status = SDMMC_INT_EBE;
3741 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3742 dw_mci_dealwith_timeout(host);
3744 enable_irq(host->irq);
3746 int dw_mci_probe(struct dw_mci *host)
3748 const struct dw_mci_drv_data *drv_data = host->drv_data;
3749 int width, i, ret = 0;
3755 host->pdata = dw_mci_parse_dt(host);
3756 if (IS_ERR(host->pdata)) {
3757 dev_err(host->dev, "platform data not available\n");
3762 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3764 "Platform data must supply select_slot function\n");
3769 * In 2.40a spec, Data offset is changed.
3770 * Need to check the version-id and set data-offset for DATA register.
3772 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3773 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3775 if (host->verid < DW_MMC_240A)
3776 host->data_offset = DATA_OFFSET;
3778 host->data_offset = DATA_240A_OFFSET;
3781 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3782 if (IS_ERR(host->hclk_mmc)) {
3783 dev_err(host->dev, "failed to get hclk_mmc\n");
3784 ret = PTR_ERR(host->hclk_mmc);
3788 clk_prepare_enable(host->hclk_mmc);
3791 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3792 if (IS_ERR(host->clk_mmc)) {
3793 dev_err(host->dev, "failed to get clk mmc_per\n");
3794 ret = PTR_ERR(host->clk_mmc);
3798 host->bus_hz = host->pdata->bus_hz;
3799 if (!host->bus_hz) {
3800 dev_err(host->dev,"Platform data must supply bus speed\n");
3805 if (host->verid < DW_MMC_240A)
3806 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3808 //rockchip: fix divider 2 in clksum before controlller
3809 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3812 dev_err(host->dev, "failed to set clk mmc\n");
3815 clk_prepare_enable(host->clk_mmc);
3817 if (drv_data && drv_data->setup_clock) {
3818 ret = drv_data->setup_clock(host);
3821 "implementation specific clock setup failed\n");
3826 host->quirks = host->pdata->quirks;
3827 host->irq_state = true;
3828 host->set_speed = 0;
3830 host->svi_flags = 0;
3832 spin_lock_init(&host->lock);
3833 INIT_LIST_HEAD(&host->queue);
3836 * Get the host data width - this assumes that HCON has been set with
3837 * the correct values.
3839 i = (mci_readl(host, HCON) >> 7) & 0x7;
3841 host->push_data = dw_mci_push_data16;
3842 host->pull_data = dw_mci_pull_data16;
3844 host->data_shift = 1;
3845 } else if (i == 2) {
3846 host->push_data = dw_mci_push_data64;
3847 host->pull_data = dw_mci_pull_data64;
3849 host->data_shift = 3;
3851 /* Check for a reserved value, and warn if it is */
3853 "HCON reports a reserved host data width!\n"
3854 "Defaulting to 32-bit access.\n");
3855 host->push_data = dw_mci_push_data32;
3856 host->pull_data = dw_mci_pull_data32;
3858 host->data_shift = 2;
3861 /* Reset all blocks */
3862 if (!dw_mci_ctrl_all_reset(host))
3865 host->dma_ops = host->pdata->dma_ops;
3866 dw_mci_init_dma(host);
3868 /* Clear the interrupts for the host controller */
3869 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3870 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3872 /* Put in max timeout */
3873 mci_writel(host, TMOUT, 0xFFFFFFFF);
3876 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3877 * Tx Mark = fifo_size / 2 DMA Size = 8
3879 if (!host->pdata->fifo_depth) {
3881 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3882 * have been overwritten by the bootloader, just like we're
3883 * about to do, so if you know the value for your hardware, you
3884 * should put it in the platform data.
3886 fifo_size = mci_readl(host, FIFOTH);
3887 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3889 fifo_size = host->pdata->fifo_depth;
3891 host->fifo_depth = fifo_size;
3893 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3894 mci_writel(host, FIFOTH, host->fifoth_val);
3896 /* disable clock to CIU */
3897 mci_writel(host, CLKENA, 0);
3898 mci_writel(host, CLKSRC, 0);
3900 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3901 host->card_workqueue = alloc_workqueue("dw-mci-card",
3902 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3903 if (!host->card_workqueue) {
3907 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3908 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3909 host->irq_flags, "dw-mci", host);
3913 if (host->pdata->num_slots)
3914 host->num_slots = host->pdata->num_slots;
3916 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
3918 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
3919 /* We need at least one slot to succeed */
3920 for (i = 0; i < host->num_slots; i++) {
3921 ret = dw_mci_init_slot(host, i);
3923 dev_dbg(host->dev, "slot %d init failed\n", i);
3929 * Enable interrupts for command done, data over, data empty, card det,
3930 * receive ready and error such as transmit, receive timeout, crc error
3932 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3933 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
3934 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
3935 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3936 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3937 regs |= SDMMC_INT_CD;
3939 mci_writel(host, INTMASK, regs);
3941 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
3943 dev_info(host->dev, "DW MMC controller at irq %d, "
3944 "%d bit host data width, "
3946 host->irq, width, fifo_size);
3949 dev_info(host->dev, "%d slots initialized\n", init_slots);
3951 dev_dbg(host->dev, "attempted to initialize %d slots, "
3952 "but failed on all\n", host->num_slots);
3957 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
3958 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
3963 destroy_workqueue(host->card_workqueue);
3966 if (host->use_dma && host->dma_ops->exit)
3967 host->dma_ops->exit(host);
3970 regulator_disable(host->vmmc);
3971 regulator_put(host->vmmc);
3975 if (!IS_ERR(host->clk_mmc))
3976 clk_disable_unprepare(host->clk_mmc);
3978 if (!IS_ERR(host->hclk_mmc))
3979 clk_disable_unprepare(host->hclk_mmc);
3983 EXPORT_SYMBOL(dw_mci_probe);
3985 void dw_mci_remove(struct dw_mci *host)
3987 struct mmc_host *mmc = host->mmc;
3988 struct dw_mci_slot *slot = mmc_priv(mmc);
3991 del_timer_sync(&host->dto_timer);
3993 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3994 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3996 for(i = 0; i < host->num_slots; i++){
3997 dev_dbg(host->dev, "remove slot %d\n", i);
3999 dw_mci_cleanup_slot(host->slot[i], i);
4002 /* disable clock to CIU */
4003 mci_writel(host, CLKENA, 0);
4004 mci_writel(host, CLKSRC, 0);
4006 destroy_workqueue(host->card_workqueue);
4008 if(host->use_dma && host->dma_ops->exit)
4009 host->dma_ops->exit(host);
4011 if (gpio_is_valid(slot->cd_gpio))
4012 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4015 regulator_disable(host->vmmc);
4016 regulator_put(host->vmmc);
4018 if(!IS_ERR(host->clk_mmc))
4019 clk_disable_unprepare(host->clk_mmc);
4021 if(!IS_ERR(host->hclk_mmc))
4022 clk_disable_unprepare(host->hclk_mmc);
4024 EXPORT_SYMBOL(dw_mci_remove);
4028 #ifdef CONFIG_PM_SLEEP
4030 * TODO: we should probably disable the clock to the card in the suspend path.
4032 int dw_mci_suspend(struct dw_mci *host)
4035 regulator_disable(host->vmmc);
4037 if(host->use_dma && host->dma_ops->exit)
4038 host->dma_ops->exit(host);
4040 /*only for sdmmc controller*/
4041 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4042 host->mmc->rescan_disable = 1;
4043 if (cancel_delayed_work_sync(&host->mmc->detect))
4044 wake_unlock(&host->mmc->detect_wake_lock);
4046 disable_irq(host->irq);
4047 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4048 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4049 mmc_hostname(host->mmc));
4051 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4052 mci_writel(host, INTMASK, 0x00);
4053 mci_writel(host, CTRL, 0x00);
4055 /* Soc rk3126 already in gpio_cd mode */
4056 if (!(cpu_is_rk312x() && soc_is_rk3126())) {
4057 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4058 enable_irq_wake(host->mmc->slot.cd_irq);
4063 EXPORT_SYMBOL(dw_mci_suspend);
4065 int dw_mci_resume(struct dw_mci *host)
4067 int i, ret, retry_cnt = 0;
4069 struct dw_mci_slot *slot;
4071 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO){
4072 slot = mmc_priv(host->mmc);
4074 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4077 /*only for sdmmc controller*/
4078 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4079 /* Soc rk3126 already in gpio_cd mode */
4080 if (!(cpu_is_rk312x() && soc_is_rk3126())) {
4081 disable_irq_wake(host->mmc->slot.cd_irq);
4082 mmc_gpio_free_cd(host->mmc);
4084 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4085 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4086 mmc_hostname(host->mmc));
4087 host->mmc->rescan_disable = 0;
4090 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4091 else if(cpu_is_rk3036())
4092 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4093 else if(cpu_is_rk312x())
4094 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4095 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4098 ret = regulator_enable(host->vmmc);
4101 "failed to enable regulator: %d\n", ret);
4106 if(!dw_mci_ctrl_all_reset(host)){
4111 if(host->use_dma && host->dma_ops->init)
4112 host->dma_ops->init(host);
4115 * Restore the initial value at FIFOTH register
4116 * And Invalidate the prev_blksz with zero
4118 mci_writel(host, FIFOTH, host->fifoth_val);
4119 host->prev_blksz = 0;
4120 /* Put in max timeout */
4121 mci_writel(host, TMOUT, 0xFFFFFFFF);
4123 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4124 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4126 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4127 regs |= SDMMC_INT_CD;
4128 mci_writel(host, INTMASK, regs);
4129 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4130 /*only for sdmmc controller*/
4131 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4132 enable_irq(host->irq);
4135 for(i = 0; i < host->num_slots; i++){
4136 struct dw_mci_slot *slot = host->slot[i];
4139 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4140 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4141 dw_mci_setup_bus(slot, true);
4147 EXPORT_SYMBOL(dw_mci_resume);
4148 #endif /* CONFIG_PM_SLEEP */
4150 static int __init dw_mci_init(void)
4152 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4156 static void __exit dw_mci_exit(void)
4160 module_init(dw_mci_init);
4161 module_exit(dw_mci_exit);
4163 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4164 MODULE_AUTHOR("NXP Semiconductor VietNam");
4165 MODULE_AUTHOR("Imagination Technologies Ltd");
4166 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4167 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
4168 MODULE_LICENSE("GPL v2");