2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/mmc.h>
35 #include <linux/mmc/sd.h>
36 #include <linux/mmc/card.h>
37 #include <linux/mmc/sdio.h>
38 #include <linux/mmc/rk_mmc.h>
39 #include <linux/bitops.h>
40 #include <linux/regulator/consumer.h>
41 #include <linux/workqueue.h>
43 #include <linux/of_gpio.h>
44 #include <linux/mmc/slot-gpio.h>
45 #include <linux/clk-private.h>
46 #include <linux/rockchip/cpu.h>
49 #include "rk_sdmmc_dbg.h"
50 #include <linux/regulator/rockchip_io_vol_domain.h>
51 #include "../../clk/rockchip/clk-ops.h"
53 #define RK_SDMMC_DRIVER_VERSION "Ver 1.12 2014-07-08"
55 /* Common flag combinations */
56 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
57 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
59 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
61 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
62 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
63 #define DW_MCI_SEND_STATUS 1
64 #define DW_MCI_RECV_STATUS 2
65 #define DW_MCI_DMA_THRESHOLD 16
67 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
68 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
70 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
71 #define SDMMC_DATA_TIMEOUT_SD 500
72 #define SDMMC_DATA_TIMEOUT_SDIO 250
73 #define SDMMC_DATA_TIMEOUT_EMMC 2500
75 #define SDMMC_CMD_RTO_MAX_HOLD 200
76 #define SDMMC_WAIT_FOR_UNBUSY 2500
78 #ifdef CONFIG_MMC_DW_IDMAC
79 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
80 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
81 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
85 u32 des0; /* Control Descriptor */
86 #define IDMAC_DES0_DIC BIT(1)
87 #define IDMAC_DES0_LD BIT(2)
88 #define IDMAC_DES0_FD BIT(3)
89 #define IDMAC_DES0_CH BIT(4)
90 #define IDMAC_DES0_ER BIT(5)
91 #define IDMAC_DES0_CES BIT(30)
92 #define IDMAC_DES0_OWN BIT(31)
94 u32 des1; /* Buffer sizes */
95 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
96 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
98 u32 des2; /* buffer 1 physical address */
100 u32 des3; /* buffer 2 physical address */
102 #endif /* CONFIG_MMC_DW_IDMAC */
104 static const u8 tuning_blk_pattern_4bit[] = {
105 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
106 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
107 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
108 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
109 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
110 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
111 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
112 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
115 static const u8 tuning_blk_pattern_8bit[] = {
116 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
117 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
118 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
119 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
120 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
121 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
122 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
123 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
124 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
125 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
126 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
127 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
128 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
129 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
130 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
131 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
134 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
135 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
136 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
137 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
139 /*printk the all register of current host*/
141 static int dw_mci_regs_printk(struct dw_mci *host)
143 struct sdmmc_reg *regs = dw_mci_regs;
145 while( regs->name != 0 ){
146 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
149 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
154 #if defined(CONFIG_DEBUG_FS)
155 static int dw_mci_req_show(struct seq_file *s, void *v)
157 struct dw_mci_slot *slot = s->private;
158 struct mmc_request *mrq;
159 struct mmc_command *cmd;
160 struct mmc_command *stop;
161 struct mmc_data *data;
163 /* Make sure we get a consistent snapshot */
164 spin_lock_bh(&slot->host->lock);
174 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
175 cmd->opcode, cmd->arg, cmd->flags,
176 cmd->resp[0], cmd->resp[1], cmd->resp[2],
177 cmd->resp[2], cmd->error);
179 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
180 data->bytes_xfered, data->blocks,
181 data->blksz, data->flags, data->error);
184 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
185 stop->opcode, stop->arg, stop->flags,
186 stop->resp[0], stop->resp[1], stop->resp[2],
187 stop->resp[2], stop->error);
190 spin_unlock_bh(&slot->host->lock);
195 static int dw_mci_req_open(struct inode *inode, struct file *file)
197 return single_open(file, dw_mci_req_show, inode->i_private);
200 static const struct file_operations dw_mci_req_fops = {
201 .owner = THIS_MODULE,
202 .open = dw_mci_req_open,
205 .release = single_release,
208 static int dw_mci_regs_show(struct seq_file *s, void *v)
210 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
211 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
212 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
213 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
214 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
215 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
220 static int dw_mci_regs_open(struct inode *inode, struct file *file)
222 return single_open(file, dw_mci_regs_show, inode->i_private);
225 static const struct file_operations dw_mci_regs_fops = {
226 .owner = THIS_MODULE,
227 .open = dw_mci_regs_open,
230 .release = single_release,
233 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
235 struct mmc_host *mmc = slot->mmc;
236 struct dw_mci *host = slot->host;
240 root = mmc->debugfs_root;
244 node = debugfs_create_file("regs", S_IRUSR, root, host,
249 node = debugfs_create_file("req", S_IRUSR, root, slot,
254 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
258 node = debugfs_create_x32("pending_events", S_IRUSR, root,
259 (u32 *)&host->pending_events);
263 node = debugfs_create_x32("completed_events", S_IRUSR, root,
264 (u32 *)&host->completed_events);
271 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
273 #endif /* defined(CONFIG_DEBUG_FS) */
275 static void dw_mci_set_timeout(struct dw_mci *host)
277 /* timeout (maximum) */
278 mci_writel(host, TMOUT, 0xffffffff);
281 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
283 struct mmc_data *data;
284 struct dw_mci_slot *slot = mmc_priv(mmc);
285 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
287 cmd->error = -EINPROGRESS;
291 if (cmdr == MMC_STOP_TRANSMISSION)
292 cmdr |= SDMMC_CMD_STOP;
294 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
296 if (cmd->flags & MMC_RSP_PRESENT) {
297 /* We expect a response, so set this bit */
298 cmdr |= SDMMC_CMD_RESP_EXP;
299 if (cmd->flags & MMC_RSP_136)
300 cmdr |= SDMMC_CMD_RESP_LONG;
303 if (cmd->flags & MMC_RSP_CRC)
304 cmdr |= SDMMC_CMD_RESP_CRC;
308 cmdr |= SDMMC_CMD_DAT_EXP;
309 if (data->flags & MMC_DATA_STREAM)
310 cmdr |= SDMMC_CMD_STRM_MODE;
311 if (data->flags & MMC_DATA_WRITE)
312 cmdr |= SDMMC_CMD_DAT_WR;
315 if (drv_data && drv_data->prepare_command)
316 drv_data->prepare_command(slot->host, &cmdr);
322 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
324 struct mmc_command *stop;
330 stop = &host->stop_abort;
332 memset(stop, 0, sizeof(struct mmc_command));
334 if (cmdr == MMC_READ_SINGLE_BLOCK ||
335 cmdr == MMC_READ_MULTIPLE_BLOCK ||
336 cmdr == MMC_WRITE_BLOCK ||
337 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
338 stop->opcode = MMC_STOP_TRANSMISSION;
340 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
341 } else if (cmdr == SD_IO_RW_EXTENDED) {
342 stop->opcode = SD_IO_RW_DIRECT;
343 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
344 ((cmd->arg >> 28) & 0x7);
345 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
350 cmdr = stop->opcode | SDMMC_CMD_STOP |
351 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
356 static void dw_mci_start_command(struct dw_mci *host,
357 struct mmc_command *cmd, u32 cmd_flags)
359 struct dw_mci_slot *slot = host->slot[0];
360 /*temporality fix slot[0] due to host->num_slots equal to 1*/
362 host->pre_cmd = host->cmd;
365 "start command: ARGR=0x%08x CMDR=0x%08x\n",
366 cmd->arg, cmd_flags);
368 if(SD_SWITCH_VOLTAGE == cmd->opcode){
369 /*confirm non-low-power mode*/
370 mci_writel(host, CMDARG, 0);
371 dw_mci_disable_low_power(slot);
373 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
374 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
376 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
379 mci_writel(host, CMDARG, cmd->arg);
382 /* fix the value to 1 in some Soc,for example RK3188. */
383 if(host->mmc->hold_reg_flag)
384 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
386 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
390 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
392 dw_mci_start_command(host, data->stop, host->stop_cmdr);
395 /* DMA interface functions */
396 static void dw_mci_stop_dma(struct dw_mci *host)
398 if (host->using_dma) {
399 host->dma_ops->stop(host);
400 host->dma_ops->cleanup(host);
403 /* Data transfer was stopped by the interrupt handler */
404 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
407 static int dw_mci_get_dma_dir(struct mmc_data *data)
409 if (data->flags & MMC_DATA_WRITE)
410 return DMA_TO_DEVICE;
412 return DMA_FROM_DEVICE;
415 #ifdef CONFIG_MMC_DW_IDMAC
416 static void dw_mci_dma_cleanup(struct dw_mci *host)
418 struct mmc_data *data = host->data;
421 if (!data->host_cookie)
422 dma_unmap_sg(host->dev,
425 dw_mci_get_dma_dir(data));
428 static void dw_mci_idmac_reset(struct dw_mci *host)
430 u32 bmod = mci_readl(host, BMOD);
431 /* Software reset of DMA */
432 bmod |= SDMMC_IDMAC_SWRESET;
433 mci_writel(host, BMOD, bmod);
436 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
440 /* Disable and reset the IDMAC interface */
441 temp = mci_readl(host, CTRL);
442 temp &= ~SDMMC_CTRL_USE_IDMAC;
443 temp |= SDMMC_CTRL_DMA_RESET;
444 mci_writel(host, CTRL, temp);
446 /* Stop the IDMAC running */
447 temp = mci_readl(host, BMOD);
448 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
449 temp |= SDMMC_IDMAC_SWRESET;
450 mci_writel(host, BMOD, temp);
453 static void dw_mci_idmac_complete_dma(void *arg)
455 struct dw_mci *host = arg;
456 struct mmc_data *data = host->data;
458 dev_vdbg(host->dev, "DMA complete\n");
461 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
462 host->mrq->cmd->opcode,host->mrq->cmd->arg,
463 data->blocks,data->blksz,mmc_hostname(host->mmc));
466 host->dma_ops->cleanup(host);
469 * If the card was removed, data will be NULL. No point in trying to
470 * send the stop command or waiting for NBUSY in this case.
473 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
474 tasklet_schedule(&host->tasklet);
478 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
482 struct idmac_desc *desc = host->sg_cpu;
484 for (i = 0; i < sg_len; i++, desc++) {
485 unsigned int length = sg_dma_len(&data->sg[i]);
486 u32 mem_addr = sg_dma_address(&data->sg[i]);
488 /* Set the OWN bit and disable interrupts for this descriptor */
489 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
492 IDMAC_SET_BUFFER1_SIZE(desc, length);
494 /* Physical address to DMA to/from */
495 desc->des2 = mem_addr;
498 /* Set first descriptor */
500 desc->des0 |= IDMAC_DES0_FD;
502 /* Set last descriptor */
503 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
504 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
505 desc->des0 |= IDMAC_DES0_LD;
510 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
514 dw_mci_translate_sglist(host, host->data, sg_len);
516 /* Select IDMAC interface */
517 temp = mci_readl(host, CTRL);
518 temp |= SDMMC_CTRL_USE_IDMAC;
519 mci_writel(host, CTRL, temp);
523 /* Enable the IDMAC */
524 temp = mci_readl(host, BMOD);
525 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
526 mci_writel(host, BMOD, temp);
528 /* Start it running */
529 mci_writel(host, PLDMND, 1);
532 static int dw_mci_idmac_init(struct dw_mci *host)
534 struct idmac_desc *p;
537 /* Number of descriptors in the ring buffer */
538 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
540 /* Forward link the descriptor list */
541 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
542 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
544 /* Set the last descriptor as the end-of-ring descriptor */
545 p->des3 = host->sg_dma;
546 p->des0 = IDMAC_DES0_ER;
548 dw_mci_idmac_reset(host);
550 /* Mask out interrupts - get Tx & Rx complete only */
551 mci_writel(host, IDSTS, IDMAC_INT_CLR);
552 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
555 /* Set the descriptor base address */
556 mci_writel(host, DBADDR, host->sg_dma);
560 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
561 .init = dw_mci_idmac_init,
562 .start = dw_mci_idmac_start_dma,
563 .stop = dw_mci_idmac_stop_dma,
564 .complete = dw_mci_idmac_complete_dma,
565 .cleanup = dw_mci_dma_cleanup,
569 static void dw_mci_edma_cleanup(struct dw_mci *host)
571 struct mmc_data *data = host->data;
574 if (!data->host_cookie)
575 dma_unmap_sg(host->dev,
576 data->sg, data->sg_len,
577 dw_mci_get_dma_dir(data));
580 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
582 dmaengine_terminate_all(host->dms->ch);
585 static void dw_mci_edmac_complete_dma(void *arg)
587 struct dw_mci *host = arg;
588 struct mmc_data *data = host->data;
590 dev_vdbg(host->dev, "DMA complete\n");
593 if(data->flags & MMC_DATA_READ)
594 /* Invalidate cache after read */
595 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
596 data->sg_len, DMA_FROM_DEVICE);
598 host->dma_ops->cleanup(host);
601 * If the card was removed, data will be NULL. No point in trying to
602 * send the stop command or waiting for NBUSY in this case.
605 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
606 tasklet_schedule(&host->tasklet);
610 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
612 struct dma_slave_config slave_config;
613 struct dma_async_tx_descriptor *desc = NULL;
614 struct scatterlist *sgl = host->data->sg;
615 u32 sg_elems = host->data->sg_len;
618 /* Set external dma config: burst size, burst width*/
619 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
620 slave_config.src_addr = slave_config.dst_addr;
621 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
622 slave_config.src_addr_width = slave_config.dst_addr_width;
624 /* Match FIFO dma burst MSIZE with external dma config*/
625 slave_config.dst_maxburst = ((host->fifoth_val) >> 28) && 0x7;
626 slave_config.src_maxburst = slave_config.dst_maxburst;
628 if(host->data->flags & MMC_DATA_WRITE){
629 slave_config.direction = DMA_MEM_TO_DEV;
630 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
632 dev_err(host->dev, "error in dw_mci edma configuration.\n");
636 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
637 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
639 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
642 /* Set dw_mci_edmac_complete_dma as callback */
643 desc->callback = dw_mci_edmac_complete_dma;
644 desc->callback_param = (void *)host;
645 dmaengine_submit(desc);
647 /* Flush cache before write */
648 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
649 sg_elems, DMA_TO_DEVICE);
650 dma_async_issue_pending(host->dms->ch);
653 slave_config.direction = DMA_DEV_TO_MEM;
654 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
656 dev_err(host->dev, "error in dw_mci edma configuration.\n");
659 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
660 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
665 /* set dw_mci_edmac_complete_dma as callback */
666 desc->callback = dw_mci_edmac_complete_dma;
667 desc->callback_param = (void *)host;
668 dmaengine_submit(desc);
669 dma_async_issue_pending(host->dms->ch);
673 static int dw_mci_edmac_init(struct dw_mci *host)
675 /* 1) request external dma channel, SHOULD decide chn in dts */
676 host->dms = (struct dw_mci_dma_slave *)kmalloc(sizeof(struct dw_mci_dma_slave),GFP_KERNEL);
677 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
679 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
680 host->dms->ch->chan_id);
693 static void dw_mci_edmac_exit(struct dw_mci *host)
695 dma_release_channel(host->dms->ch);
698 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
699 .init = dw_mci_edmac_init,
700 .exit = dw_mci_edmac_exit,
701 .start = dw_mci_edmac_start_dma,
702 .stop = dw_mci_edmac_stop_dma,
703 .complete = dw_mci_edmac_complete_dma,
704 .cleanup = dw_mci_edma_cleanup,
706 #endif /* CONFIG_MMC_DW_IDMAC */
708 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
709 struct mmc_data *data,
712 struct scatterlist *sg;
713 unsigned int i, sg_len;
715 if (!next && data->host_cookie)
716 return data->host_cookie;
719 * We don't do DMA on "complex" transfers, i.e. with
720 * non-word-aligned buffers or lengths. Also, we don't bother
721 * with all the DMA setup overhead for short transfers.
723 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
729 for_each_sg(data->sg, sg, data->sg_len, i) {
730 if (sg->offset & 3 || sg->length & 3)
734 sg_len = dma_map_sg(host->dev,
737 dw_mci_get_dma_dir(data));
742 data->host_cookie = sg_len;
747 static void dw_mci_pre_req(struct mmc_host *mmc,
748 struct mmc_request *mrq,
751 struct dw_mci_slot *slot = mmc_priv(mmc);
752 struct mmc_data *data = mrq->data;
754 if (!slot->host->use_dma || !data)
757 if (data->host_cookie) {
758 data->host_cookie = 0;
762 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
763 data->host_cookie = 0;
766 static void dw_mci_post_req(struct mmc_host *mmc,
767 struct mmc_request *mrq,
770 struct dw_mci_slot *slot = mmc_priv(mmc);
771 struct mmc_data *data = mrq->data;
773 if (!slot->host->use_dma || !data)
776 if (data->host_cookie)
777 dma_unmap_sg(slot->host->dev,
780 dw_mci_get_dma_dir(data));
781 data->host_cookie = 0;
784 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
786 #ifdef CONFIG_MMC_DW_IDMAC
787 unsigned int blksz = data->blksz;
788 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
789 u32 fifo_width = 1 << host->data_shift;
790 u32 blksz_depth = blksz / fifo_width, fifoth_val;
791 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
792 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
794 tx_wmark = (host->fifo_depth) / 2;
795 tx_wmark_invers = host->fifo_depth - tx_wmark;
799 * if blksz is not a multiple of the FIFO width
801 if (blksz % fifo_width) {
808 if (!((blksz_depth % mszs[idx]) ||
809 (tx_wmark_invers % mszs[idx]))) {
811 rx_wmark = mszs[idx] - 1;
816 * If idx is '0', it won't be tried
817 * Thus, initial values are uesed
820 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
821 mci_writel(host, FIFOTH, fifoth_val);
825 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
827 unsigned int blksz = data->blksz;
828 u32 blksz_depth, fifo_depth;
831 WARN_ON(!(data->flags & MMC_DATA_READ));
833 if (host->timing != MMC_TIMING_MMC_HS200 &&
834 host->timing != MMC_TIMING_UHS_SDR104)
837 blksz_depth = blksz / (1 << host->data_shift);
838 fifo_depth = host->fifo_depth;
840 if (blksz_depth > fifo_depth)
844 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
845 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
846 * Currently just choose blksz.
849 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
853 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
856 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
863 /* If we don't have a channel, we can't do DMA */
867 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
869 host->dma_ops->stop(host);
876 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
877 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
881 * Decide the MSIZE and RX/TX Watermark.
882 * If current block size is same with previous size,
883 * no need to update fifoth.
885 if (host->prev_blksz != data->blksz)
886 dw_mci_adjust_fifoth(host, data);
889 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
891 /* Enable the DMA interface */
892 temp = mci_readl(host, CTRL);
893 temp |= SDMMC_CTRL_DMA_ENABLE;
894 mci_writel(host, CTRL, temp);
896 /* Disable RX/TX IRQs, let DMA handle it */
897 temp = mci_readl(host, INTMASK);
898 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
899 mci_writel(host, INTMASK, temp);
901 host->dma_ops->start(host, sg_len);
906 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
910 data->error = -EINPROGRESS;
917 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
919 if (data->flags & MMC_DATA_READ) {
920 host->dir_status = DW_MCI_RECV_STATUS;
921 dw_mci_ctrl_rd_thld(host, data);
923 host->dir_status = DW_MCI_SEND_STATUS;
926 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
927 data->blocks, data->blksz, mmc_hostname(host->mmc));
929 if (dw_mci_submit_data_dma(host, data)) {
930 int flags = SG_MITER_ATOMIC;
931 if (host->data->flags & MMC_DATA_READ)
932 flags |= SG_MITER_TO_SG;
934 flags |= SG_MITER_FROM_SG;
936 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
938 host->part_buf_start = 0;
939 host->part_buf_count = 0;
941 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
942 temp = mci_readl(host, INTMASK);
943 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
944 mci_writel(host, INTMASK, temp);
946 temp = mci_readl(host, CTRL);
947 temp &= ~SDMMC_CTRL_DMA_ENABLE;
948 mci_writel(host, CTRL, temp);
951 * Use the initial fifoth_val for PIO mode.
952 * If next issued data may be transfered by DMA mode,
953 * prev_blksz should be invalidated.
955 mci_writel(host, FIFOTH, host->fifoth_val);
956 host->prev_blksz = 0;
959 * Keep the current block size.
960 * It will be used to decide whether to update
961 * fifoth register next time.
963 host->prev_blksz = data->blksz;
967 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
969 struct dw_mci *host = slot->host;
970 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
971 unsigned int cmd_status = 0;
972 #ifdef SDMMC_WAIT_FOR_UNBUSY
974 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
976 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
978 ret = time_before(jiffies, timeout);
979 cmd_status = mci_readl(host, STATUS);
980 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
984 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
985 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
988 mci_writel(host, CMDARG, arg);
990 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
991 if(cmd & SDMMC_CMD_UPD_CLK)
992 timeout = jiffies + msecs_to_jiffies(50);
994 timeout = jiffies + msecs_to_jiffies(500);
995 while (time_before(jiffies, timeout)) {
996 cmd_status = mci_readl(host, CMD);
997 if (!(cmd_status & SDMMC_CMD_START))
1000 dev_err(&slot->mmc->class_dev,
1001 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1002 cmd, arg, cmd_status);
1005 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1007 struct dw_mci *host = slot->host;
1008 unsigned int tempck,clock = slot->clock;
1013 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1014 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1017 mci_writel(host, CLKENA, 0);
1018 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1019 if(host->svi_flags == 0)
1020 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1022 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1024 } else if (clock != host->current_speed || force_clkinit) {
1025 div = host->bus_hz / clock;
1026 if (host->bus_hz % clock && host->bus_hz > clock)
1028 * move the + 1 after the divide to prevent
1029 * over-clocking the card.
1033 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1035 if ((clock << div) != slot->__clk_old || force_clkinit) {
1036 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1037 dev_info(&slot->mmc->class_dev,
1038 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1039 slot->id, host->bus_hz, clock,
1042 host->set_speed = tempck;
1043 host->set_div = div;
1047 mci_writel(host, CLKENA, 0);
1048 mci_writel(host, CLKSRC, 0);
1052 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1054 if(clock <= 400*1000){
1055 MMC_DBG_BOOT_FUNC(host->mmc,
1056 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1057 clock * 2, mmc_hostname(host->mmc));
1058 /* clk_mmc will change parents to 24MHz xtal*/
1059 clk_set_rate(host->clk_mmc, clock * 2);
1062 host->set_div = div;
1066 MMC_DBG_BOOT_FUNC(host->mmc,
1067 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1068 mmc_hostname(host->mmc));
1071 MMC_DBG_ERR_FUNC(host->mmc,
1072 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1073 mmc_hostname(host->mmc));
1075 host->set_div = div;
1076 host->bus_hz = host->set_speed * 2;
1077 MMC_DBG_BOOT_FUNC(host->mmc,
1078 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1079 div, host->bus_hz, mmc_hostname(host->mmc));
1081 /* BUG may be here, come on, Linux BSP engineer looks!
1082 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1083 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1084 some oops happened like that:
1085 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1086 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1087 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1088 mmc0: new high speed DDR MMC card at address 0001
1089 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1091 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1092 mmcblk0: retrying using single block read
1093 mmcblk0: error -110 sending status command, retrying
1095 How to: If eMMC HW version < 4.51, or > 4.51 but no caps2-mmc-hs200 support in dts
1096 Please set dts emmc clk to 100M or 150M, I will workaround it!
1099 if (host->verid < DW_MMC_240A)
1100 clk_set_rate(host->clk_mmc,(host->bus_hz));
1102 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1108 /* set clock to desired speed */
1109 mci_writel(host, CLKDIV, div);
1113 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1115 /* enable clock; only low power if no SDIO */
1116 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1118 if (host->verid < DW_MMC_240A)
1119 sdio_int = SDMMC_INT_SDIO(slot->id);
1121 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1123 if (!(mci_readl(host, INTMASK) & sdio_int))
1124 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1125 mci_writel(host, CLKENA, clk_en_a);
1129 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1130 /* keep the clock with reflecting clock dividor */
1131 slot->__clk_old = clock << div;
1134 host->current_speed = clock;
1136 if(slot->ctype != slot->pre_ctype)
1137 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1139 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1140 mmc_hostname(host->mmc));
1141 slot->pre_ctype = slot->ctype;
1143 /* Set the current slot bus width */
1144 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1147 extern struct mmc_card *this_card;
1148 static void dw_mci_wait_unbusy(struct dw_mci *host)
1151 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1152 unsigned long time_loop;
1153 unsigned int status;
1156 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1158 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1159 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1160 /* Special care for (secure)erase timeout calculation */
1162 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1165 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1) ;
1166 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1167 300000 * (this_card->ext_csd.sec_erase_mult)) :
1168 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1172 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1173 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1174 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1175 timeout = SDMMC_DATA_TIMEOUT_SD;
1178 time_loop = jiffies + msecs_to_jiffies(timeout);
1180 status = mci_readl(host, STATUS);
1181 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1183 } while (time_before(jiffies, time_loop));
1188 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1191 * 0--status is busy.
1192 * 1--status is unbusy.
1194 int dw_mci_card_busy(struct mmc_host *mmc)
1196 struct dw_mci_slot *slot = mmc_priv(mmc);
1197 struct dw_mci *host = slot->host;
1199 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1200 host->svi_flags, mmc_hostname(host->mmc));
1203 if(host->svi_flags == 0){
1205 host->svi_flags = 1;
1206 return host->svi_flags;
1209 host->svi_flags = 0;
1210 return host->svi_flags;
1216 static void __dw_mci_start_request(struct dw_mci *host,
1217 struct dw_mci_slot *slot,
1218 struct mmc_command *cmd)
1220 struct mmc_request *mrq;
1221 struct mmc_data *data;
1225 if (host->pdata->select_slot)
1226 host->pdata->select_slot(slot->id);
1228 host->cur_slot = slot;
1231 dw_mci_wait_unbusy(host);
1233 host->pending_events = 0;
1234 host->completed_events = 0;
1235 host->data_status = 0;
1239 dw_mci_set_timeout(host);
1240 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1241 mci_writel(host, BLKSIZ, data->blksz);
1244 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1246 /* this is the first command, send the initialization clock */
1247 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1248 cmdflags |= SDMMC_CMD_INIT;
1251 dw_mci_submit_data(host, data);
1255 dw_mci_start_command(host, cmd, cmdflags);
1258 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1261 static void dw_mci_start_request(struct dw_mci *host,
1262 struct dw_mci_slot *slot)
1264 struct mmc_request *mrq = slot->mrq;
1265 struct mmc_command *cmd;
1267 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1268 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1270 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1271 __dw_mci_start_request(host, slot, cmd);
1274 /* must be called with host->lock held */
1275 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1276 struct mmc_request *mrq)
1278 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1283 if (host->state == STATE_IDLE) {
1284 host->state = STATE_SENDING_CMD;
1285 dw_mci_start_request(host, slot);
1287 list_add_tail(&slot->queue_node, &host->queue);
1291 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1293 struct dw_mci_slot *slot = mmc_priv(mmc);
1294 struct dw_mci *host = slot->host;
1299 * The check for card presence and queueing of the request must be
1300 * atomic, otherwise the card could be removed in between and the
1301 * request wouldn't fail until another card was inserted.
1303 spin_lock_bh(&host->lock);
1305 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1306 spin_unlock_bh(&host->lock);
1307 mrq->cmd->error = -ENOMEDIUM;
1308 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1309 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1311 mmc_request_done(mmc, mrq);
1315 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1316 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1318 dw_mci_queue_request(host, slot, mrq);
1320 spin_unlock_bh(&host->lock);
1323 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1325 struct dw_mci_slot *slot = mmc_priv(mmc);
1326 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1327 struct dw_mci *host = slot->host;
1329 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1332 #ifdef SDMMC_WAIT_FOR_UNBUSY
1333 unsigned long time_loop;
1336 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1337 if(host->svi_flags == 1)
1338 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1340 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1342 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1345 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1346 printk("%d..%s: no card. [%s]\n", \
1347 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1352 ret = time_before(jiffies, time_loop);
1353 regs = mci_readl(slot->host, STATUS);
1354 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1360 printk("slot->flags = %lu ", slot->flags);
1361 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1362 if(host->svi_flags != 1)
1365 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1366 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1370 switch (ios->bus_width) {
1371 case MMC_BUS_WIDTH_4:
1372 slot->ctype = SDMMC_CTYPE_4BIT;
1374 case MMC_BUS_WIDTH_8:
1375 slot->ctype = SDMMC_CTYPE_8BIT;
1378 /* set default 1 bit mode */
1379 slot->ctype = SDMMC_CTYPE_1BIT;
1380 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1383 regs = mci_readl(slot->host, UHS_REG);
1386 if (ios->timing == MMC_TIMING_UHS_DDR50)
1387 regs |= ((0x1 << slot->id) << 16);
1389 regs &= ~((0x1 << slot->id) << 16);
1391 mci_writel(slot->host, UHS_REG, regs);
1392 slot->host->timing = ios->timing;
1395 * Use mirror of ios->clock to prevent race with mmc
1396 * core ios update when finding the minimum.
1398 slot->clock = ios->clock;
1400 if (drv_data && drv_data->set_ios)
1401 drv_data->set_ios(slot->host, ios);
1403 /* Slot specific timing and width adjustment */
1404 dw_mci_setup_bus(slot, false);
1408 switch (ios->power_mode) {
1410 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1412 if (slot->host->pdata->setpower)
1413 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1414 regs = mci_readl(slot->host, PWREN);
1415 regs |= (1 << slot->id);
1416 mci_writel(slot->host, PWREN, regs);
1419 /* Power down slot */
1420 if(slot->host->pdata->setpower)
1421 slot->host->pdata->setpower(slot->id, 0);
1422 regs = mci_readl(slot->host, PWREN);
1423 regs &= ~(1 << slot->id);
1424 mci_writel(slot->host, PWREN, regs);
1431 static int dw_mci_get_ro(struct mmc_host *mmc)
1434 struct dw_mci_slot *slot = mmc_priv(mmc);
1435 struct dw_mci_board *brd = slot->host->pdata;
1437 /* Use platform get_ro function, else try on board write protect */
1438 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1440 else if(brd->get_ro)
1441 read_only = brd->get_ro(slot->id);
1442 else if(gpio_is_valid(slot->wp_gpio))
1443 read_only = gpio_get_value(slot->wp_gpio);
1446 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1448 dev_dbg(&mmc->class_dev, "card is %s\n",
1449 read_only ? "read-only" : "read-write");
1454 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1456 struct dw_mci_slot *slot = mmc_priv(mmc);
1457 struct dw_mci *host = slot->host;
1458 /*struct dw_mci_board *brd = slot->host->pdata;*/
1460 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1463 spin_lock_bh(&host->lock);
1466 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1468 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1470 spin_unlock_bh(&host->lock);
1472 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1473 if(__clk_is_enabled(host->hclk_mmc) == false)
1474 clk_prepare_enable(host->hclk_mmc);
1475 if(__clk_is_enabled(host->clk_mmc) == false)
1476 clk_prepare_enable(host->clk_mmc);
1478 if(__clk_is_enabled(host->clk_mmc) == true)
1479 clk_disable_unprepare(slot->host->clk_mmc);
1480 if(__clk_is_enabled(host->hclk_mmc) == true)
1481 clk_disable_unprepare(slot->host->hclk_mmc);
1484 mmc_detect_change(slot->mmc, 20);
1490 static int dw_mci_get_cd(struct mmc_host *mmc)
1493 struct dw_mci_slot *slot = mmc_priv(mmc);
1494 struct dw_mci_board *brd = slot->host->pdata;
1495 struct dw_mci *host = slot->host;
1496 int gpio_cd = mmc_gpio_get_cd(mmc);
1499 if (cpu_is_rk312x() &&
1501 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1502 gpio_cd = slot->cd_gpio;
1503 if (gpio_is_valid(gpio_cd)) {
1504 gpio_val = gpio_get_value_cansleep(gpio_cd);
1506 if (gpio_val == gpio_get_value_cansleep(gpio_cd)) {
1507 gpio_cd = gpio_get_value_cansleep(gpio_cd) == 0 ? 1 : 0;
1509 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1510 grf_writel((0x1 << 24) | (1 << 8), RK312X_GRF_SOC_CON0);
1511 dw_mci_ctrl_all_reset(host);
1513 /* Really card detected: SHOULD disable force_jtag */
1514 grf_writel((0x1 << 24) | (0 << 8), RK312X_GRF_SOC_CON0);
1518 return slot->last_detect_state;
1521 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1525 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1526 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1528 /* Use platform get_cd function, else try onboard card detect */
1529 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1531 else if (brd->get_cd)
1532 present = !brd->get_cd(slot->id);
1533 else if (!IS_ERR_VALUE(gpio_cd))
1536 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1539 spin_lock_bh(&host->lock);
1541 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1542 dev_dbg(&mmc->class_dev, "card is present\n");
1544 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1545 dev_dbg(&mmc->class_dev, "card is not present\n");
1547 spin_unlock_bh(&host->lock);
1554 * Dts Should caps emmc controller with poll-hw-reset
1556 static void dw_mci_hw_reset(struct mmc_host *mmc)
1558 struct dw_mci_slot *slot = mmc_priv(mmc);
1559 struct dw_mci *host = slot->host;
1564 unsigned long timeout;
1567 /* (1) CMD12 to end any transfer in process */
1568 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1569 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1571 if(host->mmc->hold_reg_flag)
1572 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1573 mci_writel(host, CMDARG, 0);
1575 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1577 timeout = jiffies + msecs_to_jiffies(500);
1579 ret = time_before(jiffies, timeout);
1580 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1585 MMC_DBG_ERR_FUNC(host->mmc,
1586 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1587 __func__, mmc_hostname(host->mmc));
1589 /* (2) wait DTO, even if no response is sent back by card */
1591 timeout = jiffies + msecs_to_jiffies(5);
1593 ret = time_before(jiffies, timeout);
1594 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1595 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1601 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1603 /* Software reset - BMOD[0] for IDMA only */
1604 regs = mci_readl(host, BMOD);
1605 regs |= SDMMC_IDMAC_SWRESET;
1606 mci_writel(host, BMOD, regs);
1607 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1608 regs = mci_readl(host, BMOD);
1609 if(regs & SDMMC_IDMAC_SWRESET)
1610 MMC_DBG_WARN_FUNC(host->mmc,
1611 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1612 __func__, mmc_hostname(host->mmc));
1614 /* DMA reset - CTRL[2] */
1615 regs = mci_readl(host, CTRL);
1616 regs |= SDMMC_CTRL_DMA_RESET;
1617 mci_writel(host, CTRL, regs);
1618 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1619 regs = mci_readl(host, CTRL);
1620 if(regs & SDMMC_CTRL_DMA_RESET)
1621 MMC_DBG_WARN_FUNC(host->mmc,
1622 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1623 __func__, mmc_hostname(host->mmc));
1625 /* FIFO reset - CTRL[1] */
1626 regs = mci_readl(host, CTRL);
1627 regs |= SDMMC_CTRL_FIFO_RESET;
1628 mci_writel(host, CTRL, regs);
1629 mdelay(1); /* no timing limited, 1ms is random value */
1630 regs = mci_readl(host, CTRL);
1631 if(regs & SDMMC_CTRL_FIFO_RESET)
1632 MMC_DBG_WARN_FUNC(host->mmc,
1633 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1634 __func__, mmc_hostname(host->mmc));
1637 According to eMMC spec
1638 tRstW >= 1us ; RST_n pulse width
1639 tRSCA >= 200us ; RST_n to Command time
1640 tRSTH >= 1us ; RST_n high period
1642 mci_writel(slot->host, PWREN, 0x0);
1643 mci_writel(slot->host, RST_N, 0x0);
1645 udelay(10); /* 10us for bad quality eMMc. */
1647 mci_writel(slot->host, PWREN, 0x1);
1648 mci_writel(slot->host, RST_N, 0x1);
1650 usleep_range(500, 1000); /* at least 500(> 200us) */
1654 * Disable lower power mode.
1656 * Low power mode will stop the card clock when idle. According to the
1657 * description of the CLKENA register we should disable low power mode
1658 * for SDIO cards if we need SDIO interrupts to work.
1660 * This function is fast if low power mode is already disabled.
1662 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1664 struct dw_mci *host = slot->host;
1666 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1668 clk_en_a = mci_readl(host, CLKENA);
1670 if (clk_en_a & clken_low_pwr) {
1671 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1672 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1673 SDMMC_CMD_PRV_DAT_WAIT, 0);
1677 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1679 struct dw_mci_slot *slot = mmc_priv(mmc);
1680 struct dw_mci *host = slot->host;
1684 /* Enable/disable Slot Specific SDIO interrupt */
1685 int_mask = mci_readl(host, INTMASK);
1687 if (host->verid < DW_MMC_240A)
1688 sdio_int = SDMMC_INT_SDIO(slot->id);
1690 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1694 * Turn off low power mode if it was enabled. This is a bit of
1695 * a heavy operation and we disable / enable IRQs a lot, so
1696 * we'll leave low power mode disabled and it will get
1697 * re-enabled again in dw_mci_setup_bus().
1699 dw_mci_disable_low_power(slot);
1701 mci_writel(host, INTMASK,
1702 (int_mask | sdio_int));
1704 mci_writel(host, INTMASK,
1705 (int_mask & ~sdio_int));
1709 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1711 IO_DOMAIN_12 = 1200,
1712 IO_DOMAIN_18 = 1800,
1713 IO_DOMAIN_33 = 3300,
1715 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1725 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1726 __FUNCTION__, mmc_hostname(host->mmc));
1729 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1730 __FUNCTION__, mmc_hostname(host->mmc));
1734 if(cpu_is_rk3288()){
1735 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1736 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1740 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1741 __FUNCTION__, mmc_hostname(host->mmc));
1745 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1746 struct mmc_ios *ios)
1749 unsigned int value,uhs_reg;
1752 * Signal Voltage Switching is only applicable for Host Controllers
1755 if (host->verid < DW_MMC_240A)
1758 uhs_reg = mci_readl(host, UHS_REG);
1759 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1760 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1762 switch (ios->signal_voltage) {
1763 case MMC_SIGNAL_VOLTAGE_330:
1764 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1766 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1767 /* regulator_put(host->vmmc); //to be done in remove function. */
1769 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1770 __func__, regulator_get_voltage(host->vmmc), ret);
1772 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1773 " failed\n", mmc_hostname(host->mmc));
1776 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1778 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1779 __FUNCTION__, mmc_hostname(host->mmc));
1781 /* set High-power mode */
1782 value = mci_readl(host, CLKENA);
1783 value &= ~SDMMC_CLKEN_LOW_PWR;
1784 mci_writel(host,CLKENA , value);
1786 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1787 mci_writel(host,UHS_REG , uhs_reg);
1790 usleep_range(5000, 5500);
1792 /* 3.3V regulator output should be stable within 5 ms */
1793 uhs_reg = mci_readl(host, UHS_REG);
1794 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1797 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1798 mmc_hostname(host->mmc));
1801 case MMC_SIGNAL_VOLTAGE_180:
1803 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1804 /* regulator_put(host->vmmc);//to be done in remove function. */
1806 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1807 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1809 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1810 " failed\n", mmc_hostname(host->mmc));
1813 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1817 * Enable 1.8V Signal Enable in the Host Control2
1820 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1823 usleep_range(5000, 5500);
1824 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1825 __FUNCTION__,mmc_hostname(host->mmc));
1827 /* 1.8V regulator output should be stable within 5 ms */
1828 uhs_reg = mci_readl(host, UHS_REG);
1829 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1832 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1833 mmc_hostname(host->mmc));
1836 case MMC_SIGNAL_VOLTAGE_120:
1838 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1840 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1841 " failed\n", mmc_hostname(host->mmc));
1847 /* No signal voltage switch required */
1853 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1854 struct mmc_ios *ios)
1856 struct dw_mci_slot *slot = mmc_priv(mmc);
1857 struct dw_mci *host = slot->host;
1860 if (host->verid < DW_MMC_240A)
1863 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1869 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1871 struct dw_mci_slot *slot = mmc_priv(mmc);
1872 struct dw_mci *host = slot->host;
1873 const struct dw_mci_drv_data *drv_data = host->drv_data;
1874 struct dw_mci_tuning_data tuning_data;
1877 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1878 if(cpu_is_rk3036() || cpu_is_rk312x())
1881 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1882 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1883 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1884 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1885 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1886 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1887 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1891 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1892 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1893 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1896 "Undefined command(%d) for tuning\n", opcode);
1901 /* Recommend sample phase and delayline
1902 Fixme: Mix-use these three controllers will cause
1905 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1906 tuning_data.con_id = 3;
1907 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1908 tuning_data.con_id = 1;
1910 tuning_data.con_id = 0;
1912 /* 0: driver, from host->devices
1913 1: sample, from devices->host
1915 tuning_data.tuning_type = 1;
1917 if (drv_data && drv_data->execute_tuning)
1918 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1923 static const struct mmc_host_ops dw_mci_ops = {
1924 .request = dw_mci_request,
1925 .pre_req = dw_mci_pre_req,
1926 .post_req = dw_mci_post_req,
1927 .set_ios = dw_mci_set_ios,
1928 .get_ro = dw_mci_get_ro,
1929 .get_cd = dw_mci_get_cd,
1930 .set_sdio_status = dw_mci_set_sdio_status,
1931 .hw_reset = dw_mci_hw_reset,
1932 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1933 .execute_tuning = dw_mci_execute_tuning,
1934 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1935 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1936 .card_busy = dw_mci_card_busy,
1941 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1943 unsigned long flags;
1948 local_irq_save(flags);
1949 if(host->irq_state != irqflag)
1951 host->irq_state = irqflag;
1954 enable_irq(host->irq);
1958 disable_irq(host->irq);
1961 local_irq_restore(flags);
1965 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1966 __releases(&host->lock)
1967 __acquires(&host->lock)
1969 if(DW_MCI_SEND_STATUS == host->dir_status){
1971 if( MMC_BUS_TEST_W != host->cmd->opcode){
1972 if(host->data_status & SDMMC_INT_DCRC)
1973 host->data->error = -EILSEQ;
1974 else if(host->data_status & SDMMC_INT_EBE)
1975 host->data->error = -ETIMEDOUT;
1977 dw_mci_wait_unbusy(host);
1980 dw_mci_wait_unbusy(host);
1985 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1986 __releases(&host->lock)
1987 __acquires(&host->lock)
1989 struct dw_mci_slot *slot;
1990 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1992 WARN_ON(host->cmd || host->data);
1994 del_timer_sync(&host->dto_timer);
1995 dw_mci_deal_data_end(host, mrq);
1998 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1999 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2001 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2002 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2004 host->cur_slot->mrq = NULL;
2006 if (!list_empty(&host->queue)) {
2007 slot = list_entry(host->queue.next,
2008 struct dw_mci_slot, queue_node);
2009 list_del(&slot->queue_node);
2010 dev_vdbg(host->dev, "list not empty: %s is next\n",
2011 mmc_hostname(slot->mmc));
2012 host->state = STATE_SENDING_CMD;
2013 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2014 dw_mci_start_request(host, slot);
2016 dev_vdbg(host->dev, "list empty\n");
2017 host->state = STATE_IDLE;
2020 spin_unlock(&host->lock);
2021 mmc_request_done(prev_mmc, mrq);
2022 spin_lock(&host->lock);
2025 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2027 u32 status = host->cmd_status;
2029 host->cmd_status = 0;
2031 /* Read the response from the card (up to 16 bytes) */
2032 if (cmd->flags & MMC_RSP_PRESENT) {
2033 if (cmd->flags & MMC_RSP_136) {
2034 cmd->resp[3] = mci_readl(host, RESP0);
2035 cmd->resp[2] = mci_readl(host, RESP1);
2036 cmd->resp[1] = mci_readl(host, RESP2);
2037 cmd->resp[0] = mci_readl(host, RESP3);
2039 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2040 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2042 cmd->resp[0] = mci_readl(host, RESP0);
2046 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2047 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2051 if (status & SDMMC_INT_RTO)
2053 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2056 cmd->error = -ETIMEDOUT;
2057 del_timer_sync(&host->dto_timer);
2058 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2059 del_timer_sync(&host->dto_timer);
2060 cmd->error = -EILSEQ;
2061 }else if (status & SDMMC_INT_RESP_ERR){
2062 del_timer_sync(&host->dto_timer);
2067 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2068 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2071 del_timer_sync(&host->dto_timer);
2072 if(MMC_SEND_STATUS != cmd->opcode)
2073 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2074 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2075 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2079 /* newer ip versions need a delay between retries */
2080 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2086 static void dw_mci_tasklet_func(unsigned long priv)
2088 struct dw_mci *host = (struct dw_mci *)priv;
2089 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2090 struct mmc_data *data;
2091 struct mmc_command *cmd;
2092 enum dw_mci_state state;
2093 enum dw_mci_state prev_state;
2094 u32 status, cmd_flags;
2095 unsigned long timeout = 0;
2098 spin_lock(&host->lock);
2100 state = host->state;
2110 case STATE_SENDING_CMD:
2111 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2112 &host->pending_events))
2117 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2118 dw_mci_command_complete(host, cmd);
2119 if (cmd == host->mrq->sbc && !cmd->error) {
2120 prev_state = state = STATE_SENDING_CMD;
2121 __dw_mci_start_request(host, host->cur_slot,
2126 if (cmd->data && cmd->error) {
2127 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2128 dw_mci_stop_dma(host);
2131 send_stop_cmd(host, data);
2132 state = STATE_SENDING_STOP;
2138 send_stop_abort(host, data);
2139 state = STATE_SENDING_STOP;
2142 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2145 if (!host->mrq->data || cmd->error) {
2146 dw_mci_request_end(host, host->mrq);
2150 prev_state = state = STATE_SENDING_DATA;
2153 case STATE_SENDING_DATA:
2154 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2155 dw_mci_stop_dma(host);
2158 send_stop_cmd(host, data);
2160 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2161 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2162 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2164 mci_writel(host, CMDARG, 0);
2166 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2167 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2169 if(host->mmc->hold_reg_flag)
2170 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2172 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2174 timeout = jiffies + msecs_to_jiffies(500);
2177 ret = time_before(jiffies, timeout);
2178 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2182 MMC_DBG_ERR_FUNC(host->mmc,
2183 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2184 __func__, mmc_hostname(host->mmc));
2187 send_stop_abort(host, data);
2189 state = STATE_DATA_ERROR;
2193 MMC_DBG_CMD_FUNC(host->mmc,
2194 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2195 prev_state,state, mmc_hostname(host->mmc));
2197 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2198 &host->pending_events))
2200 MMC_DBG_INFO_FUNC(host->mmc,
2201 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2202 prev_state,state,mmc_hostname(host->mmc));
2204 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2205 prev_state = state = STATE_DATA_BUSY;
2208 case STATE_DATA_BUSY:
2209 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2210 &host->pending_events))
2213 dw_mci_deal_data_end(host, host->mrq);
2214 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2215 MMC_DBG_INFO_FUNC(host->mmc,
2216 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2217 prev_state,state,mmc_hostname(host->mmc));
2220 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2221 status = host->data_status;
2223 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2224 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2225 MMC_DBG_ERR_FUNC(host->mmc,
2226 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2227 prev_state,state, status, mmc_hostname(host->mmc));
2229 if (status & SDMMC_INT_DRTO) {
2230 data->error = -ETIMEDOUT;
2231 } else if (status & SDMMC_INT_DCRC) {
2232 data->error = -EILSEQ;
2233 } else if (status & SDMMC_INT_EBE &&
2234 host->dir_status == DW_MCI_SEND_STATUS){
2236 * No data CRC status was returned.
2237 * The number of bytes transferred will
2238 * be exaggerated in PIO mode.
2240 data->bytes_xfered = 0;
2241 data->error = -ETIMEDOUT;
2250 * After an error, there may be data lingering
2251 * in the FIFO, so reset it - doing so
2252 * generates a block interrupt, hence setting
2253 * the scatter-gather pointer to NULL.
2255 dw_mci_fifo_reset(host);
2257 data->bytes_xfered = data->blocks * data->blksz;
2262 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2263 prev_state,state,mmc_hostname(host->mmc));
2264 dw_mci_request_end(host, host->mrq);
2267 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2268 prev_state,state,mmc_hostname(host->mmc));
2270 if (host->mrq->sbc && !data->error) {
2271 data->stop->error = 0;
2273 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2274 prev_state,state,mmc_hostname(host->mmc));
2276 dw_mci_request_end(host, host->mrq);
2280 prev_state = state = STATE_SENDING_STOP;
2282 send_stop_cmd(host, data);
2284 if (data->stop && !data->error) {
2285 /* stop command for open-ended transfer*/
2287 send_stop_abort(host, data);
2291 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2292 prev_state,state,mmc_hostname(host->mmc));
2294 case STATE_SENDING_STOP:
2295 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2298 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2299 prev_state, state, mmc_hostname(host->mmc));
2301 /* CMD error in data command */
2302 if (host->mrq->cmd->error && host->mrq->data) {
2303 dw_mci_fifo_reset(host);
2309 dw_mci_command_complete(host, host->mrq->stop);
2311 if (host->mrq->stop)
2312 dw_mci_command_complete(host, host->mrq->stop);
2314 host->cmd_status = 0;
2317 dw_mci_request_end(host, host->mrq);
2320 case STATE_DATA_ERROR:
2321 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2322 &host->pending_events))
2325 state = STATE_DATA_BUSY;
2328 } while (state != prev_state);
2330 host->state = state;
2332 spin_unlock(&host->lock);
2336 /* push final bytes to part_buf, only use during push */
2337 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2339 memcpy((void *)&host->part_buf, buf, cnt);
2340 host->part_buf_count = cnt;
2343 /* append bytes to part_buf, only use during push */
2344 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2346 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2347 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2348 host->part_buf_count += cnt;
2352 /* pull first bytes from part_buf, only use during pull */
2353 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2355 cnt = min(cnt, (int)host->part_buf_count);
2357 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2359 host->part_buf_count -= cnt;
2360 host->part_buf_start += cnt;
2365 /* pull final bytes from the part_buf, assuming it's just been filled */
2366 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2368 memcpy(buf, &host->part_buf, cnt);
2369 host->part_buf_start = cnt;
2370 host->part_buf_count = (1 << host->data_shift) - cnt;
2373 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2375 struct mmc_data *data = host->data;
2378 /* try and push anything in the part_buf */
2379 if (unlikely(host->part_buf_count)) {
2380 int len = dw_mci_push_part_bytes(host, buf, cnt);
2383 if (host->part_buf_count == 2) {
2384 mci_writew(host, DATA(host->data_offset),
2386 host->part_buf_count = 0;
2389 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2390 if (unlikely((unsigned long)buf & 0x1)) {
2392 u16 aligned_buf[64];
2393 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2394 int items = len >> 1;
2396 /* memcpy from input buffer into aligned buffer */
2397 memcpy(aligned_buf, buf, len);
2400 /* push data from aligned buffer into fifo */
2401 for (i = 0; i < items; ++i)
2402 mci_writew(host, DATA(host->data_offset),
2409 for (; cnt >= 2; cnt -= 2)
2410 mci_writew(host, DATA(host->data_offset), *pdata++);
2413 /* put anything remaining in the part_buf */
2415 dw_mci_set_part_bytes(host, buf, cnt);
2416 /* Push data if we have reached the expected data length */
2417 if ((data->bytes_xfered + init_cnt) ==
2418 (data->blksz * data->blocks))
2419 mci_writew(host, DATA(host->data_offset),
2424 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2426 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2427 if (unlikely((unsigned long)buf & 0x1)) {
2429 /* pull data from fifo into aligned buffer */
2430 u16 aligned_buf[64];
2431 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2432 int items = len >> 1;
2434 for (i = 0; i < items; ++i)
2435 aligned_buf[i] = mci_readw(host,
2436 DATA(host->data_offset));
2437 /* memcpy from aligned buffer into output buffer */
2438 memcpy(buf, aligned_buf, len);
2446 for (; cnt >= 2; cnt -= 2)
2447 *pdata++ = mci_readw(host, DATA(host->data_offset));
2451 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2452 dw_mci_pull_final_bytes(host, buf, cnt);
2456 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2458 struct mmc_data *data = host->data;
2461 /* try and push anything in the part_buf */
2462 if (unlikely(host->part_buf_count)) {
2463 int len = dw_mci_push_part_bytes(host, buf, cnt);
2466 if (host->part_buf_count == 4) {
2467 mci_writel(host, DATA(host->data_offset),
2469 host->part_buf_count = 0;
2472 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2473 if (unlikely((unsigned long)buf & 0x3)) {
2475 u32 aligned_buf[32];
2476 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2477 int items = len >> 2;
2479 /* memcpy from input buffer into aligned buffer */
2480 memcpy(aligned_buf, buf, len);
2483 /* push data from aligned buffer into fifo */
2484 for (i = 0; i < items; ++i)
2485 mci_writel(host, DATA(host->data_offset),
2492 for (; cnt >= 4; cnt -= 4)
2493 mci_writel(host, DATA(host->data_offset), *pdata++);
2496 /* put anything remaining in the part_buf */
2498 dw_mci_set_part_bytes(host, buf, cnt);
2499 /* Push data if we have reached the expected data length */
2500 if ((data->bytes_xfered + init_cnt) ==
2501 (data->blksz * data->blocks))
2502 mci_writel(host, DATA(host->data_offset),
2507 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2509 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2510 if (unlikely((unsigned long)buf & 0x3)) {
2512 /* pull data from fifo into aligned buffer */
2513 u32 aligned_buf[32];
2514 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2515 int items = len >> 2;
2517 for (i = 0; i < items; ++i)
2518 aligned_buf[i] = mci_readl(host,
2519 DATA(host->data_offset));
2520 /* memcpy from aligned buffer into output buffer */
2521 memcpy(buf, aligned_buf, len);
2529 for (; cnt >= 4; cnt -= 4)
2530 *pdata++ = mci_readl(host, DATA(host->data_offset));
2534 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2535 dw_mci_pull_final_bytes(host, buf, cnt);
2539 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2541 struct mmc_data *data = host->data;
2544 /* try and push anything in the part_buf */
2545 if (unlikely(host->part_buf_count)) {
2546 int len = dw_mci_push_part_bytes(host, buf, cnt);
2550 if (host->part_buf_count == 8) {
2551 mci_writeq(host, DATA(host->data_offset),
2553 host->part_buf_count = 0;
2556 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2557 if (unlikely((unsigned long)buf & 0x7)) {
2559 u64 aligned_buf[16];
2560 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2561 int items = len >> 3;
2563 /* memcpy from input buffer into aligned buffer */
2564 memcpy(aligned_buf, buf, len);
2567 /* push data from aligned buffer into fifo */
2568 for (i = 0; i < items; ++i)
2569 mci_writeq(host, DATA(host->data_offset),
2576 for (; cnt >= 8; cnt -= 8)
2577 mci_writeq(host, DATA(host->data_offset), *pdata++);
2580 /* put anything remaining in the part_buf */
2582 dw_mci_set_part_bytes(host, buf, cnt);
2583 /* Push data if we have reached the expected data length */
2584 if ((data->bytes_xfered + init_cnt) ==
2585 (data->blksz * data->blocks))
2586 mci_writeq(host, DATA(host->data_offset),
2591 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2593 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2594 if (unlikely((unsigned long)buf & 0x7)) {
2596 /* pull data from fifo into aligned buffer */
2597 u64 aligned_buf[16];
2598 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2599 int items = len >> 3;
2601 for (i = 0; i < items; ++i)
2602 aligned_buf[i] = mci_readq(host,
2603 DATA(host->data_offset));
2604 /* memcpy from aligned buffer into output buffer */
2605 memcpy(buf, aligned_buf, len);
2613 for (; cnt >= 8; cnt -= 8)
2614 *pdata++ = mci_readq(host, DATA(host->data_offset));
2618 host->part_buf = mci_readq(host, DATA(host->data_offset));
2619 dw_mci_pull_final_bytes(host, buf, cnt);
2623 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2627 /* get remaining partial bytes */
2628 len = dw_mci_pull_part_bytes(host, buf, cnt);
2629 if (unlikely(len == cnt))
2634 /* get the rest of the data */
2635 host->pull_data(host, buf, cnt);
2638 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2640 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2642 unsigned int offset;
2643 struct mmc_data *data = host->data;
2644 int shift = host->data_shift;
2647 unsigned int remain, fcnt;
2649 if(!host->mmc->bus_refs){
2650 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2654 if (!sg_miter_next(sg_miter))
2657 host->sg = sg_miter->piter.sg;
2658 buf = sg_miter->addr;
2659 remain = sg_miter->length;
2663 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2664 << shift) + host->part_buf_count;
2665 len = min(remain, fcnt);
2668 dw_mci_pull_data(host, (void *)(buf + offset), len);
2669 data->bytes_xfered += len;
2674 sg_miter->consumed = offset;
2675 status = mci_readl(host, MINTSTS);
2676 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2677 /* if the RXDR is ready read again */
2678 } while ((status & SDMMC_INT_RXDR) ||
2679 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2682 if (!sg_miter_next(sg_miter))
2684 sg_miter->consumed = 0;
2686 sg_miter_stop(sg_miter);
2690 sg_miter_stop(sg_miter);
2694 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2697 static void dw_mci_write_data_pio(struct dw_mci *host)
2699 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2701 unsigned int offset;
2702 struct mmc_data *data = host->data;
2703 int shift = host->data_shift;
2706 unsigned int fifo_depth = host->fifo_depth;
2707 unsigned int remain, fcnt;
2709 if(!host->mmc->bus_refs){
2710 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2715 if (!sg_miter_next(sg_miter))
2718 host->sg = sg_miter->piter.sg;
2719 buf = sg_miter->addr;
2720 remain = sg_miter->length;
2724 fcnt = ((fifo_depth -
2725 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2726 << shift) - host->part_buf_count;
2727 len = min(remain, fcnt);
2730 host->push_data(host, (void *)(buf + offset), len);
2731 data->bytes_xfered += len;
2736 sg_miter->consumed = offset;
2737 status = mci_readl(host, MINTSTS);
2738 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2739 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2742 if (!sg_miter_next(sg_miter))
2744 sg_miter->consumed = 0;
2746 sg_miter_stop(sg_miter);
2750 sg_miter_stop(sg_miter);
2754 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2757 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2759 u32 multi, unit = SZ_2M;
2761 if (!host->cmd_status)
2762 host->cmd_status = status;
2767 if ((MMC_STOP_TRANSMISSION != host->cmd->opcode)) {
2768 multi = (mci_readl(host, BYTCNT) / unit) +
2769 ((mci_readl(host, BYTCNT) % unit) ? 1 :0 ) +
2770 ((host->cmd->retries > 2) ? 2 : host->cmd->retries);
2771 /* Max limit time: 8s for dto */
2772 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4000 * multi));
2777 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2778 tasklet_schedule(&host->tasklet);
2781 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2783 struct dw_mci *host = dev_id;
2784 u32 pending, sdio_int;
2787 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2790 * DTO fix - version 2.10a and below, and only if internal DMA
2793 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2795 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2796 pending |= SDMMC_INT_DATA_OVER;
2800 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2801 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2802 host->cmd_status = pending;
2804 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2805 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2807 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2810 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2811 /* if there is an error report DATA_ERROR */
2812 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2813 host->data_status = pending;
2815 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2817 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2818 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2819 tasklet_schedule(&host->tasklet);
2822 if (pending & SDMMC_INT_DATA_OVER) {
2823 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2824 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2825 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2826 if (!host->data_status)
2827 host->data_status = pending;
2829 if (host->dir_status == DW_MCI_RECV_STATUS) {
2830 if (host->sg != NULL)
2831 dw_mci_read_data_pio(host, true);
2833 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2834 tasklet_schedule(&host->tasklet);
2837 if (pending & SDMMC_INT_RXDR) {
2838 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2839 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2840 dw_mci_read_data_pio(host, false);
2843 if (pending & SDMMC_INT_TXDR) {
2844 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2845 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2846 dw_mci_write_data_pio(host);
2849 if (pending & SDMMC_INT_VSI) {
2850 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2851 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2852 dw_mci_cmd_interrupt(host, pending);
2855 if (pending & SDMMC_INT_CMD_DONE) {
2856 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2857 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2858 dw_mci_cmd_interrupt(host, pending);
2861 if (pending & SDMMC_INT_CD) {
2862 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2863 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2864 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2865 queue_work(host->card_workqueue, &host->card_work);
2868 if (pending & SDMMC_INT_HLE) {
2869 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2870 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2874 /* Handle SDIO Interrupts */
2875 for (i = 0; i < host->num_slots; i++) {
2876 struct dw_mci_slot *slot = host->slot[i];
2878 if (host->verid < DW_MMC_240A)
2879 sdio_int = SDMMC_INT_SDIO(i);
2881 sdio_int = SDMMC_INT_SDIO(i + 8);
2883 if (pending & sdio_int) {
2884 mci_writel(host, RINTSTS, sdio_int);
2885 mmc_signal_sdio_irq(slot->mmc);
2891 #ifdef CONFIG_MMC_DW_IDMAC
2892 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2893 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2894 /* Handle DMA interrupts */
2895 pending = mci_readl(host, IDSTS);
2896 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2897 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2898 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2899 host->dma_ops->complete((void *)host);
2907 static void dw_mci_work_routine_card(struct work_struct *work)
2909 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2912 for (i = 0; i < host->num_slots; i++) {
2913 struct dw_mci_slot *slot = host->slot[i];
2914 struct mmc_host *mmc = slot->mmc;
2915 struct mmc_request *mrq;
2918 present = dw_mci_get_cd(mmc);
2919 while (present != slot->last_detect_state) {
2920 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2921 present ? "inserted" : "removed");
2922 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2923 present ? "inserted" : "removed.", mmc_hostname(mmc));
2925 rk_send_wakeup_key();//wake up system
2926 spin_lock_bh(&host->lock);
2928 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2929 /* Card change detected */
2930 slot->last_detect_state = present;
2932 /* Clean up queue if present */
2935 if (mrq == host->mrq) {
2939 switch (host->state) {
2942 case STATE_SENDING_CMD:
2943 mrq->cmd->error = -ENOMEDIUM;
2947 case STATE_SENDING_DATA:
2948 mrq->data->error = -ENOMEDIUM;
2949 dw_mci_stop_dma(host);
2951 case STATE_DATA_BUSY:
2952 case STATE_DATA_ERROR:
2953 if (mrq->data->error == -EINPROGRESS)
2954 mrq->data->error = -ENOMEDIUM;
2958 case STATE_SENDING_STOP:
2959 mrq->stop->error = -ENOMEDIUM;
2963 dw_mci_request_end(host, mrq);
2965 list_del(&slot->queue_node);
2966 mrq->cmd->error = -ENOMEDIUM;
2968 mrq->data->error = -ENOMEDIUM;
2970 mrq->stop->error = -ENOMEDIUM;
2972 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
2973 mrq->cmd->opcode, mmc_hostname(mmc));
2975 spin_unlock(&host->lock);
2976 mmc_request_done(slot->mmc, mrq);
2977 spin_lock(&host->lock);
2981 /* Power down slot */
2983 /* Clear down the FIFO */
2984 dw_mci_fifo_reset(host);
2985 #ifdef CONFIG_MMC_DW_IDMAC
2986 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
2987 dw_mci_idmac_reset(host);
2992 spin_unlock_bh(&host->lock);
2994 present = dw_mci_get_cd(mmc);
2997 mmc_detect_change(slot->mmc,
2998 msecs_to_jiffies(host->pdata->detect_delay_ms));
3003 /* given a slot id, find out the device node representing that slot */
3004 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3006 struct device_node *np;
3010 if (!dev || !dev->of_node)
3013 for_each_child_of_node(dev->of_node, np) {
3014 addr = of_get_property(np, "reg", &len);
3015 if (!addr || (len < sizeof(int)))
3017 if (be32_to_cpup(addr) == slot)
3023 static struct dw_mci_of_slot_quirks {
3026 } of_slot_quirks[] = {
3028 .quirk = "disable-wp",
3029 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3033 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3035 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3040 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3041 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3042 quirks |= of_slot_quirks[idx].id;
3047 /* find out bus-width for a given slot */
3048 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3050 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3056 if (of_property_read_u32(np, "bus-width", &bus_wd))
3057 dev_err(dev, "bus-width property not found, assuming width"
3063 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3064 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3066 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3072 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3074 /* Having a missing entry is valid; return silently */
3075 if (!gpio_is_valid(gpio))
3078 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3079 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3083 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3089 /* find the write protect gpio for a given slot; or -1 if none specified */
3090 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3092 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3098 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3100 /* Having a missing entry is valid; return silently */
3101 if (!gpio_is_valid(gpio))
3104 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3105 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3112 /* find the cd gpio for a given slot */
3113 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3114 struct mmc_host *mmc)
3116 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3122 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3124 /* Having a missing entry is valid; return silently */
3125 if (!gpio_is_valid(gpio))
3128 if (mmc_gpio_request_cd(mmc, gpio, 0))
3129 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3132 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3134 struct mmc_host *mmc = dev_id;
3135 struct dw_mci_slot *slot = mmc_priv(mmc);
3136 struct dw_mci *host = slot->host;
3139 if (mmc->ops->card_event)
3140 mmc->ops->card_event(mmc);
3142 mmc_detect_change(mmc, msecs_to_jiffies(200));
3145 queue_work(host->card_workqueue, &host->card_work);
3149 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3150 struct mmc_host *mmc)
3152 struct dw_mci_slot *slot = mmc_priv(mmc);
3153 struct dw_mci *host = slot->host;
3157 /* Having a missing entry is valid; return silently */
3158 if (!gpio_is_valid(gpio))
3161 irq = gpio_to_irq(gpio);
3163 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3164 NULL, dw_mci_gpio_cd_irqt,
3165 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
3169 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3172 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3176 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3177 struct mmc_host *mmc)
3179 if (!gpio_is_valid(gpio))
3182 if (gpio_to_irq(gpio) >= 0) {
3183 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3184 devm_gpio_free(&mmc->class_dev, gpio);
3187 #else /* CONFIG_OF */
3188 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3192 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3196 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3200 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3204 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3205 struct mmc_host *mmc)
3209 #endif /* CONFIG_OF */
3211 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3213 struct mmc_host *mmc;
3214 struct dw_mci_slot *slot;
3215 const struct dw_mci_drv_data *drv_data = host->drv_data;
3220 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3224 slot = mmc_priv(mmc);
3228 host->slot[id] = slot;
3231 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3233 mmc->ops = &dw_mci_ops;
3235 if (of_property_read_u32_array(host->dev->of_node,
3236 "clock-freq-min-max", freq, 2)) {
3237 mmc->f_min = DW_MCI_FREQ_MIN;
3238 mmc->f_max = DW_MCI_FREQ_MAX;
3240 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
3241 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3243 mmc->f_min = freq[0];
3244 mmc->f_max = freq[1];
3246 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
3247 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3250 if(strstr("mmc0",mmc_hostname(mmc)))
3251 printk("Line%d..%s: The rk_sdmmc %s",__LINE__, __FUNCTION__,RK_SDMMC_DRIVER_VERSION);
3253 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3254 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3255 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3256 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3257 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3258 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3260 /* We assume only low-level chip use gpio_cd */
3261 if (cpu_is_rk312x() &&
3263 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3264 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3265 if (gpio_is_valid(slot->cd_gpio)) {
3266 /* Request gpio int for card detection */
3267 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3269 slot->cd_gpio = -ENODEV;
3270 dev_err(host->dev, "failed to get your cd-gpios!\n");
3274 if (host->pdata->get_ocr)
3275 mmc->ocr_avail = host->pdata->get_ocr(id);
3278 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3279 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3280 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3281 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3285 * Start with slot power disabled, it will be enabled when a card
3288 if (host->pdata->setpower)
3289 host->pdata->setpower(id, 0);
3291 if (host->pdata->caps)
3292 mmc->caps = host->pdata->caps;
3294 if (host->pdata->pm_caps)
3295 mmc->pm_caps = host->pdata->pm_caps;
3297 if (host->dev->of_node) {
3298 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3302 ctrl_id = to_platform_device(host->dev)->id;
3304 if (drv_data && drv_data->caps)
3305 mmc->caps |= drv_data->caps[ctrl_id];
3306 if (drv_data && drv_data->hold_reg_flag)
3307 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3309 /* set the compatibility of driver. */
3310 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3311 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3313 if (host->pdata->caps2)
3314 mmc->caps2 = host->pdata->caps2;
3316 if (host->pdata->get_bus_wd)
3317 bus_width = host->pdata->get_bus_wd(slot->id);
3318 else if (host->dev->of_node)
3319 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3323 switch (bus_width) {
3325 mmc->caps |= MMC_CAP_8_BIT_DATA;
3327 mmc->caps |= MMC_CAP_4_BIT_DATA;
3330 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3331 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3332 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3333 mmc->caps |= MMC_CAP_SDIO_IRQ;
3334 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3335 mmc->caps |= MMC_CAP_HW_RESET;
3336 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3337 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3338 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3339 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3340 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3341 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3342 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3343 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3345 /*Assign pm_caps pass to pm_flags*/
3346 mmc->pm_flags = mmc->pm_caps;
3348 if (host->pdata->blk_settings) {
3349 mmc->max_segs = host->pdata->blk_settings->max_segs;
3350 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3351 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3352 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3353 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3355 /* Useful defaults if platform data is unset. */
3356 #ifdef CONFIG_MMC_DW_IDMAC
3357 mmc->max_segs = host->ring_size;
3358 mmc->max_blk_size = 65536;
3359 mmc->max_blk_count = host->ring_size;
3360 mmc->max_seg_size = 0x1000;
3361 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3362 if(cpu_is_rk3036() || cpu_is_rk312x()){
3363 /* fixup for external dmac setting */
3365 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3366 mmc->max_blk_count = 65535;
3367 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3368 mmc->max_seg_size = mmc->max_req_size;
3372 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3373 mmc->max_blk_count = 512;
3374 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3375 mmc->max_seg_size = mmc->max_req_size;
3376 #endif /* CONFIG_MMC_DW_IDMAC */
3380 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3382 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3387 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3388 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3392 if (IS_ERR(host->vmmc)) {
3393 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3396 ret = regulator_enable(host->vmmc);
3399 "failed to enable regulator: %d\n", ret);
3406 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3408 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3409 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3411 ret = mmc_add_host(mmc);
3415 /* Pinctrl set default iomux state to fucntion port.
3416 * Fixme: DON'T TOUCH EMMC SETTING!
3418 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3420 host->pinctrl = devm_pinctrl_get(host->dev);
3421 if(IS_ERR(host->pinctrl)){
3422 printk("%s: Warning : No pinctrl used!\n",mmc_hostname(host->mmc));
3424 host->pins_idle= pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_IDLE);
3425 if(IS_ERR(host->pins_default)){
3426 printk("%s: Warning : No IDLE pinctrl matched!\n", mmc_hostname(host->mmc));
3430 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3431 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
3434 host->pins_default = pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_DEFAULT);
3435 if(IS_ERR(host->pins_default)){
3436 printk("%s: Warning : No default pinctrl matched!\n", mmc_hostname(host->mmc));
3440 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3441 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
3447 #if defined(CONFIG_DEBUG_FS)
3448 dw_mci_init_debugfs(slot);
3451 /* Card initially undetected */
3452 slot->last_detect_state = 1;
3457 if (gpio_is_valid(slot->cd_gpio))
3458 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3463 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3465 /* Shutdown detect IRQ */
3466 if (slot->host->pdata->exit)
3467 slot->host->pdata->exit(id);
3469 /* Debugfs stuff is cleaned up by mmc core */
3470 mmc_remove_host(slot->mmc);
3471 slot->host->slot[id] = NULL;
3472 mmc_free_host(slot->mmc);
3475 static void dw_mci_init_dma(struct dw_mci *host)
3477 /* Alloc memory for sg translation */
3478 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3479 &host->sg_dma, GFP_KERNEL);
3480 if (!host->sg_cpu) {
3481 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3486 /* Determine which DMA interface to use */
3487 #if defined(CONFIG_MMC_DW_IDMAC)
3488 if(cpu_is_rk3036() || cpu_is_rk312x()){
3489 host->dma_ops = &dw_mci_edmac_ops;
3490 dev_info(host->dev, "Using external DMA controller.\n");
3492 host->dma_ops = &dw_mci_idmac_ops;
3493 dev_info(host->dev, "Using internal DMA controller.\n");
3500 if (host->dma_ops->init && host->dma_ops->start &&
3501 host->dma_ops->stop && host->dma_ops->cleanup) {
3502 if (host->dma_ops->init(host)) {
3503 dev_err(host->dev, "%s: Unable to initialize "
3504 "DMA Controller.\n", __func__);
3508 dev_err(host->dev, "DMA initialization not found.\n");
3516 dev_info(host->dev, "Using PIO mode.\n");
3521 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3523 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3526 ctrl = mci_readl(host, CTRL);
3528 mci_writel(host, CTRL, ctrl);
3530 /* wait till resets clear */
3532 ctrl = mci_readl(host, CTRL);
3533 if (!(ctrl & reset))
3535 } while (time_before(jiffies, timeout));
3538 "Timeout resetting block (ctrl reset %#x)\n",
3544 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3547 * Reseting generates a block interrupt, hence setting
3548 * the scatter-gather pointer to NULL.
3551 sg_miter_stop(&host->sg_miter);
3555 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3558 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3560 return dw_mci_ctrl_reset(host,
3561 SDMMC_CTRL_FIFO_RESET |
3563 SDMMC_CTRL_DMA_RESET);
3568 static struct dw_mci_of_quirks {
3573 .quirk = "broken-cd",
3574 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3578 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3580 struct dw_mci_board *pdata;
3581 struct device *dev = host->dev;
3582 struct device_node *np = dev->of_node;
3583 const struct dw_mci_drv_data *drv_data = host->drv_data;
3585 u32 clock_frequency;
3587 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3589 dev_err(dev, "could not allocate memory for pdata\n");
3590 return ERR_PTR(-ENOMEM);
3593 /* find out number of slots supported */
3594 if (of_property_read_u32(dev->of_node, "num-slots",
3595 &pdata->num_slots)) {
3596 dev_info(dev, "num-slots property not found, "
3597 "assuming 1 slot is available\n");
3598 pdata->num_slots = 1;
3602 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3603 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3604 pdata->quirks |= of_quirks[idx].id;
3607 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3608 dev_info(dev, "fifo-depth property not found, using "
3609 "value of FIFOTH register as default\n");
3611 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3613 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3614 pdata->bus_hz = clock_frequency;
3616 if (drv_data && drv_data->parse_dt) {
3617 ret = drv_data->parse_dt(host);
3619 return ERR_PTR(ret);
3622 if (of_find_property(np, "keep-power-in-suspend", NULL))
3623 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3625 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3626 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3628 if (of_find_property(np, "supports-highspeed", NULL))
3629 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3631 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3632 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3634 if (of_find_property(np, "supports-DDR_MODE", NULL))
3635 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3637 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3638 pdata->caps2 |= MMC_CAP2_HS200;
3640 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3641 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3643 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3644 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3646 if (of_get_property(np, "cd-inverted", NULL))
3647 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3648 if (of_get_property(np, "bootpart-no-access", NULL))
3649 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3654 #else /* CONFIG_OF */
3655 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3657 return ERR_PTR(-EINVAL);
3659 #endif /* CONFIG_OF */
3661 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3665 switch(host->state){
3668 case STATE_SENDING_DATA:
3669 case STATE_DATA_BUSY:
3670 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3671 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3672 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3673 host->state = STATE_DATA_BUSY;
3674 if (!dw_mci_ctrl_all_reset(host)) {
3679 /* NO requirement to reclaim slave chn using external dmac */
3680 #ifdef CONFIG_MMC_DW_IDMAC
3681 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3682 if (host->use_dma && host->dma_ops->init)
3683 host->dma_ops->init(host);
3687 * Restore the initial value at FIFOTH register
3688 * And Invalidate the prev_blksz with zero
3690 mci_writel(host, FIFOTH, host->fifoth_val);
3691 host->prev_blksz = 0;
3692 mci_writel(host, TMOUT, 0xFFFFFFFF);
3693 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3694 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3695 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3696 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3697 regs |= SDMMC_INT_CD;
3698 mci_writel(host, INTMASK, regs);
3699 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3700 for (i = 0; i < host->num_slots; i++) {
3701 struct dw_mci_slot *slot = host->slot[i];
3704 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3705 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3706 dw_mci_setup_bus(slot, true);
3709 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3710 tasklet_schedule(&host->tasklet);
3716 static void dw_mci_dto_timeout(unsigned long host_data)
3718 struct dw_mci *host = (struct dw_mci *) host_data;
3720 disable_irq(host->irq);
3722 host->data_status = SDMMC_INT_EBE;
3723 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3724 dw_mci_dealwith_timeout(host);
3726 enable_irq(host->irq);
3728 int dw_mci_probe(struct dw_mci *host)
3730 const struct dw_mci_drv_data *drv_data = host->drv_data;
3731 int width, i, ret = 0;
3737 host->pdata = dw_mci_parse_dt(host);
3738 if (IS_ERR(host->pdata)) {
3739 dev_err(host->dev, "platform data not available\n");
3744 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3746 "Platform data must supply select_slot function\n");
3751 * In 2.40a spec, Data offset is changed.
3752 * Need to check the version-id and set data-offset for DATA register.
3754 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3755 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3757 if (host->verid < DW_MMC_240A)
3758 host->data_offset = DATA_OFFSET;
3760 host->data_offset = DATA_240A_OFFSET;
3763 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3764 if (IS_ERR(host->hclk_mmc)) {
3765 dev_err(host->dev, "failed to get hclk_mmc\n");
3766 ret = PTR_ERR(host->hclk_mmc);
3770 clk_prepare_enable(host->hclk_mmc);
3773 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3774 if (IS_ERR(host->clk_mmc)) {
3775 dev_err(host->dev, "failed to get clk mmc_per\n");
3776 ret = PTR_ERR(host->clk_mmc);
3780 host->bus_hz = host->pdata->bus_hz;
3781 if (!host->bus_hz) {
3782 dev_err(host->dev,"Platform data must supply bus speed\n");
3787 if (host->verid < DW_MMC_240A)
3788 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3790 //rockchip: fix divider 2 in clksum before controlller
3791 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3794 dev_err(host->dev, "failed to set clk mmc\n");
3797 clk_prepare_enable(host->clk_mmc);
3799 if (drv_data && drv_data->setup_clock) {
3800 ret = drv_data->setup_clock(host);
3803 "implementation specific clock setup failed\n");
3808 host->quirks = host->pdata->quirks;
3809 host->irq_state = true;
3810 host->set_speed = 0;
3812 host->svi_flags = 0;
3814 spin_lock_init(&host->lock);
3815 INIT_LIST_HEAD(&host->queue);
3818 * Get the host data width - this assumes that HCON has been set with
3819 * the correct values.
3821 i = (mci_readl(host, HCON) >> 7) & 0x7;
3823 host->push_data = dw_mci_push_data16;
3824 host->pull_data = dw_mci_pull_data16;
3826 host->data_shift = 1;
3827 } else if (i == 2) {
3828 host->push_data = dw_mci_push_data64;
3829 host->pull_data = dw_mci_pull_data64;
3831 host->data_shift = 3;
3833 /* Check for a reserved value, and warn if it is */
3835 "HCON reports a reserved host data width!\n"
3836 "Defaulting to 32-bit access.\n");
3837 host->push_data = dw_mci_push_data32;
3838 host->pull_data = dw_mci_pull_data32;
3840 host->data_shift = 2;
3843 /* Reset all blocks */
3844 if (!dw_mci_ctrl_all_reset(host))
3847 host->dma_ops = host->pdata->dma_ops;
3848 dw_mci_init_dma(host);
3850 /* Clear the interrupts for the host controller */
3851 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3852 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3854 /* Put in max timeout */
3855 mci_writel(host, TMOUT, 0xFFFFFFFF);
3858 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3859 * Tx Mark = fifo_size / 2 DMA Size = 8
3861 if (!host->pdata->fifo_depth) {
3863 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3864 * have been overwritten by the bootloader, just like we're
3865 * about to do, so if you know the value for your hardware, you
3866 * should put it in the platform data.
3868 fifo_size = mci_readl(host, FIFOTH);
3869 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3871 fifo_size = host->pdata->fifo_depth;
3873 host->fifo_depth = fifo_size;
3875 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3876 mci_writel(host, FIFOTH, host->fifoth_val);
3878 /* disable clock to CIU */
3879 mci_writel(host, CLKENA, 0);
3880 mci_writel(host, CLKSRC, 0);
3882 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3883 host->card_workqueue = alloc_workqueue("dw-mci-card",
3884 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3885 if (!host->card_workqueue) {
3889 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3890 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3891 host->irq_flags, "dw-mci", host);
3895 if (host->pdata->num_slots)
3896 host->num_slots = host->pdata->num_slots;
3898 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
3900 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
3901 /* We need at least one slot to succeed */
3902 for (i = 0; i < host->num_slots; i++) {
3903 ret = dw_mci_init_slot(host, i);
3905 dev_dbg(host->dev, "slot %d init failed\n", i);
3911 * Enable interrupts for command done, data over, data empty, card det,
3912 * receive ready and error such as transmit, receive timeout, crc error
3914 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3915 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
3916 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
3917 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3918 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3919 regs |= SDMMC_INT_CD;
3921 mci_writel(host, INTMASK, regs);
3923 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
3925 dev_info(host->dev, "DW MMC controller at irq %d, "
3926 "%d bit host data width, "
3928 host->irq, width, fifo_size);
3931 dev_info(host->dev, "%d slots initialized\n", init_slots);
3933 dev_dbg(host->dev, "attempted to initialize %d slots, "
3934 "but failed on all\n", host->num_slots);
3939 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
3940 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
3945 destroy_workqueue(host->card_workqueue);
3948 if (host->use_dma && host->dma_ops->exit)
3949 host->dma_ops->exit(host);
3952 regulator_disable(host->vmmc);
3953 regulator_put(host->vmmc);
3957 if (!IS_ERR(host->clk_mmc))
3958 clk_disable_unprepare(host->clk_mmc);
3960 if (!IS_ERR(host->hclk_mmc))
3961 clk_disable_unprepare(host->hclk_mmc);
3965 EXPORT_SYMBOL(dw_mci_probe);
3967 void dw_mci_remove(struct dw_mci *host)
3969 struct mmc_host *mmc = host->mmc;
3970 struct dw_mci_slot *slot = mmc_priv(mmc);
3973 del_timer_sync(&host->dto_timer);
3975 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3976 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3978 for(i = 0; i < host->num_slots; i++){
3979 dev_dbg(host->dev, "remove slot %d\n", i);
3981 dw_mci_cleanup_slot(host->slot[i], i);
3984 /* disable clock to CIU */
3985 mci_writel(host, CLKENA, 0);
3986 mci_writel(host, CLKSRC, 0);
3988 destroy_workqueue(host->card_workqueue);
3990 if(host->use_dma && host->dma_ops->exit)
3991 host->dma_ops->exit(host);
3993 if (gpio_is_valid(slot->cd_gpio))
3994 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
3997 regulator_disable(host->vmmc);
3998 regulator_put(host->vmmc);
4000 if(!IS_ERR(host->clk_mmc))
4001 clk_disable_unprepare(host->clk_mmc);
4003 if(!IS_ERR(host->hclk_mmc))
4004 clk_disable_unprepare(host->hclk_mmc);
4006 EXPORT_SYMBOL(dw_mci_remove);
4010 #ifdef CONFIG_PM_SLEEP
4012 * TODO: we should probably disable the clock to the card in the suspend path.
4014 int dw_mci_suspend(struct dw_mci *host)
4017 regulator_disable(host->vmmc);
4019 if(host->use_dma && host->dma_ops->exit)
4020 host->dma_ops->exit(host);
4022 /*only for sdmmc controller*/
4023 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4024 host->mmc->rescan_disable = 1;
4025 if (cancel_delayed_work_sync(&host->mmc->detect))
4026 wake_unlock(&host->mmc->detect_wake_lock);
4028 disable_irq(host->irq);
4029 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4030 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4031 mmc_hostname(host->mmc));
4033 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4034 mci_writel(host, INTMASK, 0x00);
4035 mci_writel(host, CTRL, 0x00);
4037 /* Soc rk3126 already in gpio_cd mode */
4038 if (!(cpu_is_rk312x() && soc_is_rk3126())) {
4039 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4040 enable_irq_wake(host->mmc->slot.cd_irq);
4045 EXPORT_SYMBOL(dw_mci_suspend);
4047 int dw_mci_resume(struct dw_mci *host)
4049 int i, ret, retry_cnt = 0;
4051 struct dw_mci_slot *slot;
4053 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO){
4054 slot = mmc_priv(host->mmc);
4056 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4059 /*only for sdmmc controller*/
4060 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4061 /* Soc rk3126 already in gpio_cd mode */
4062 if (!(cpu_is_rk312x() && soc_is_rk3126())) {
4063 disable_irq_wake(host->mmc->slot.cd_irq);
4064 mmc_gpio_free_cd(host->mmc);
4066 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4067 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4068 mmc_hostname(host->mmc));
4069 host->mmc->rescan_disable = 0;
4072 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4073 else if(cpu_is_rk3036())
4074 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4075 else if(cpu_is_rk312x())
4076 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4077 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4080 ret = regulator_enable(host->vmmc);
4083 "failed to enable regulator: %d\n", ret);
4088 if(!dw_mci_ctrl_all_reset(host)){
4093 if(host->use_dma && host->dma_ops->init)
4094 host->dma_ops->init(host);
4097 * Restore the initial value at FIFOTH register
4098 * And Invalidate the prev_blksz with zero
4100 mci_writel(host, FIFOTH, host->fifoth_val);
4101 host->prev_blksz = 0;
4102 /* Put in max timeout */
4103 mci_writel(host, TMOUT, 0xFFFFFFFF);
4105 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4106 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4108 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4109 regs |= SDMMC_INT_CD;
4110 mci_writel(host, INTMASK, regs);
4111 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4112 /*only for sdmmc controller*/
4113 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4114 enable_irq(host->irq);
4117 for(i = 0; i < host->num_slots; i++){
4118 struct dw_mci_slot *slot = host->slot[i];
4121 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4122 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4123 dw_mci_setup_bus(slot, true);
4129 EXPORT_SYMBOL(dw_mci_resume);
4130 #endif /* CONFIG_PM_SLEEP */
4132 static int __init dw_mci_init(void)
4134 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4138 static void __exit dw_mci_exit(void)
4142 module_init(dw_mci_init);
4143 module_exit(dw_mci_exit);
4145 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4146 MODULE_AUTHOR("NXP Semiconductor VietNam");
4147 MODULE_AUTHOR("Imagination Technologies Ltd");
4148 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4149 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
4150 MODULE_LICENSE("GPL v2");