2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/mmc.h>
35 #include <linux/mmc/sd.h>
36 #include <linux/mmc/card.h>
37 #include <linux/mmc/sdio.h>
38 #include <linux/mmc/rk_mmc.h>
39 #include <linux/bitops.h>
40 #include <linux/regulator/consumer.h>
41 #include <linux/workqueue.h>
43 #include <linux/of_gpio.h>
44 #include <linux/mmc/slot-gpio.h>
45 #include <linux/clk-private.h>
46 #include <linux/rockchip/cpu.h>
49 #include "rk_sdmmc_dbg.h"
50 #include <linux/regulator/rockchip_io_vol_domain.h>
51 #include "../../clk/rockchip/clk-ops.h"
53 #define RK_SDMMC_DRIVER_VERSION "Ver 1.12 2014-07-08"
55 /* Common flag combinations */
56 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
57 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
59 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
61 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
62 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
63 #define DW_MCI_SEND_STATUS 1
64 #define DW_MCI_RECV_STATUS 2
65 #define DW_MCI_DMA_THRESHOLD 16
67 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
68 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
70 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
71 #define SDMMC_DATA_TIMEOUT_SD 500
72 #define SDMMC_DATA_TIMEOUT_SDIO 250
73 #define SDMMC_DATA_TIMEOUT_EMMC 2500
75 #define SDMMC_CMD_RTO_MAX_HOLD 200
76 #define SDMMC_WAIT_FOR_UNBUSY 2500
78 #ifdef CONFIG_MMC_DW_IDMAC
79 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
80 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
81 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
85 u32 des0; /* Control Descriptor */
86 #define IDMAC_DES0_DIC BIT(1)
87 #define IDMAC_DES0_LD BIT(2)
88 #define IDMAC_DES0_FD BIT(3)
89 #define IDMAC_DES0_CH BIT(4)
90 #define IDMAC_DES0_ER BIT(5)
91 #define IDMAC_DES0_CES BIT(30)
92 #define IDMAC_DES0_OWN BIT(31)
94 u32 des1; /* Buffer sizes */
95 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
96 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
98 u32 des2; /* buffer 1 physical address */
100 u32 des3; /* buffer 2 physical address */
102 #endif /* CONFIG_MMC_DW_IDMAC */
104 static const u8 tuning_blk_pattern_4bit[] = {
105 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
106 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
107 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
108 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
109 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
110 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
111 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
112 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
115 static const u8 tuning_blk_pattern_8bit[] = {
116 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
117 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
118 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
119 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
120 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
121 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
122 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
123 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
124 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
125 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
126 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
127 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
128 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
129 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
130 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
131 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
134 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
135 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
136 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
137 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
139 /*printk the all register of current host*/
141 static int dw_mci_regs_printk(struct dw_mci *host)
143 struct sdmmc_reg *regs = dw_mci_regs;
145 while( regs->name != 0 ){
146 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
149 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
154 #if defined(CONFIG_DEBUG_FS)
155 static int dw_mci_req_show(struct seq_file *s, void *v)
157 struct dw_mci_slot *slot = s->private;
158 struct mmc_request *mrq;
159 struct mmc_command *cmd;
160 struct mmc_command *stop;
161 struct mmc_data *data;
163 /* Make sure we get a consistent snapshot */
164 spin_lock_bh(&slot->host->lock);
174 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
175 cmd->opcode, cmd->arg, cmd->flags,
176 cmd->resp[0], cmd->resp[1], cmd->resp[2],
177 cmd->resp[2], cmd->error);
179 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
180 data->bytes_xfered, data->blocks,
181 data->blksz, data->flags, data->error);
184 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
185 stop->opcode, stop->arg, stop->flags,
186 stop->resp[0], stop->resp[1], stop->resp[2],
187 stop->resp[2], stop->error);
190 spin_unlock_bh(&slot->host->lock);
195 static int dw_mci_req_open(struct inode *inode, struct file *file)
197 return single_open(file, dw_mci_req_show, inode->i_private);
200 static const struct file_operations dw_mci_req_fops = {
201 .owner = THIS_MODULE,
202 .open = dw_mci_req_open,
205 .release = single_release,
208 static int dw_mci_regs_show(struct seq_file *s, void *v)
210 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
211 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
212 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
213 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
214 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
215 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
220 static int dw_mci_regs_open(struct inode *inode, struct file *file)
222 return single_open(file, dw_mci_regs_show, inode->i_private);
225 static const struct file_operations dw_mci_regs_fops = {
226 .owner = THIS_MODULE,
227 .open = dw_mci_regs_open,
230 .release = single_release,
233 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
235 struct mmc_host *mmc = slot->mmc;
236 struct dw_mci *host = slot->host;
240 root = mmc->debugfs_root;
244 node = debugfs_create_file("regs", S_IRUSR, root, host,
249 node = debugfs_create_file("req", S_IRUSR, root, slot,
254 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
258 node = debugfs_create_x32("pending_events", S_IRUSR, root,
259 (u32 *)&host->pending_events);
263 node = debugfs_create_x32("completed_events", S_IRUSR, root,
264 (u32 *)&host->completed_events);
271 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
273 #endif /* defined(CONFIG_DEBUG_FS) */
275 static void dw_mci_set_timeout(struct dw_mci *host)
277 /* timeout (maximum) */
278 mci_writel(host, TMOUT, 0xffffffff);
281 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
283 struct mmc_data *data;
284 struct dw_mci_slot *slot = mmc_priv(mmc);
285 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
287 cmd->error = -EINPROGRESS;
291 if (cmdr == MMC_STOP_TRANSMISSION)
292 cmdr |= SDMMC_CMD_STOP;
294 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
296 if (cmd->flags & MMC_RSP_PRESENT) {
297 /* We expect a response, so set this bit */
298 cmdr |= SDMMC_CMD_RESP_EXP;
299 if (cmd->flags & MMC_RSP_136)
300 cmdr |= SDMMC_CMD_RESP_LONG;
303 if (cmd->flags & MMC_RSP_CRC)
304 cmdr |= SDMMC_CMD_RESP_CRC;
308 cmdr |= SDMMC_CMD_DAT_EXP;
309 if (data->flags & MMC_DATA_STREAM)
310 cmdr |= SDMMC_CMD_STRM_MODE;
311 if (data->flags & MMC_DATA_WRITE)
312 cmdr |= SDMMC_CMD_DAT_WR;
315 if (drv_data && drv_data->prepare_command)
316 drv_data->prepare_command(slot->host, &cmdr);
322 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
324 struct mmc_command *stop;
330 stop = &host->stop_abort;
332 memset(stop, 0, sizeof(struct mmc_command));
334 if (cmdr == MMC_READ_SINGLE_BLOCK ||
335 cmdr == MMC_READ_MULTIPLE_BLOCK ||
336 cmdr == MMC_WRITE_BLOCK ||
337 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
338 stop->opcode = MMC_STOP_TRANSMISSION;
340 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
341 } else if (cmdr == SD_IO_RW_EXTENDED) {
342 stop->opcode = SD_IO_RW_DIRECT;
343 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
344 ((cmd->arg >> 28) & 0x7);
345 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
350 cmdr = stop->opcode | SDMMC_CMD_STOP |
351 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
356 static void dw_mci_start_command(struct dw_mci *host,
357 struct mmc_command *cmd, u32 cmd_flags)
359 struct dw_mci_slot *slot = host->slot[0];
360 /*temporality fix slot[0] due to host->num_slots equal to 1*/
362 host->pre_cmd = host->cmd;
365 "start command: ARGR=0x%08x CMDR=0x%08x\n",
366 cmd->arg, cmd_flags);
368 if(SD_SWITCH_VOLTAGE == cmd->opcode){
369 /*confirm non-low-power mode*/
370 mci_writel(host, CMDARG, 0);
371 dw_mci_disable_low_power(slot);
373 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
374 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
376 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
379 mci_writel(host, CMDARG, cmd->arg);
382 /* fix the value to 1 in some Soc,for example RK3188. */
383 if(host->mmc->hold_reg_flag)
384 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
386 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
390 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
392 dw_mci_start_command(host, data->stop, host->stop_cmdr);
395 /* DMA interface functions */
396 static void dw_mci_stop_dma(struct dw_mci *host)
398 if (host->using_dma) {
399 host->dma_ops->stop(host);
400 host->dma_ops->cleanup(host);
403 /* Data transfer was stopped by the interrupt handler */
404 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
407 static int dw_mci_get_dma_dir(struct mmc_data *data)
409 if (data->flags & MMC_DATA_WRITE)
410 return DMA_TO_DEVICE;
412 return DMA_FROM_DEVICE;
415 #ifdef CONFIG_MMC_DW_IDMAC
416 static void dw_mci_dma_cleanup(struct dw_mci *host)
418 struct mmc_data *data = host->data;
421 if (!data->host_cookie)
422 dma_unmap_sg(host->dev,
425 dw_mci_get_dma_dir(data));
428 static void dw_mci_idmac_reset(struct dw_mci *host)
430 u32 bmod = mci_readl(host, BMOD);
431 /* Software reset of DMA */
432 bmod |= SDMMC_IDMAC_SWRESET;
433 mci_writel(host, BMOD, bmod);
436 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
440 /* Disable and reset the IDMAC interface */
441 temp = mci_readl(host, CTRL);
442 temp &= ~SDMMC_CTRL_USE_IDMAC;
443 temp |= SDMMC_CTRL_DMA_RESET;
444 mci_writel(host, CTRL, temp);
446 /* Stop the IDMAC running */
447 temp = mci_readl(host, BMOD);
448 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
449 temp |= SDMMC_IDMAC_SWRESET;
450 mci_writel(host, BMOD, temp);
453 static void dw_mci_idmac_complete_dma(void *arg)
455 struct dw_mci *host = arg;
456 struct mmc_data *data = host->data;
458 dev_vdbg(host->dev, "DMA complete\n");
461 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
462 host->mrq->cmd->opcode,host->mrq->cmd->arg,
463 data->blocks,data->blksz,mmc_hostname(host->mmc));
466 host->dma_ops->cleanup(host);
469 * If the card was removed, data will be NULL. No point in trying to
470 * send the stop command or waiting for NBUSY in this case.
473 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
474 tasklet_schedule(&host->tasklet);
478 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
482 struct idmac_desc *desc = host->sg_cpu;
484 for (i = 0; i < sg_len; i++, desc++) {
485 unsigned int length = sg_dma_len(&data->sg[i]);
486 u32 mem_addr = sg_dma_address(&data->sg[i]);
488 /* Set the OWN bit and disable interrupts for this descriptor */
489 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
492 IDMAC_SET_BUFFER1_SIZE(desc, length);
494 /* Physical address to DMA to/from */
495 desc->des2 = mem_addr;
498 /* Set first descriptor */
500 desc->des0 |= IDMAC_DES0_FD;
502 /* Set last descriptor */
503 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
504 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
505 desc->des0 |= IDMAC_DES0_LD;
510 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
514 dw_mci_translate_sglist(host, host->data, sg_len);
516 /* Select IDMAC interface */
517 temp = mci_readl(host, CTRL);
518 temp |= SDMMC_CTRL_USE_IDMAC;
519 mci_writel(host, CTRL, temp);
523 /* Enable the IDMAC */
524 temp = mci_readl(host, BMOD);
525 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
526 mci_writel(host, BMOD, temp);
528 /* Start it running */
529 mci_writel(host, PLDMND, 1);
532 static int dw_mci_idmac_init(struct dw_mci *host)
534 struct idmac_desc *p;
537 /* Number of descriptors in the ring buffer */
538 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
540 /* Forward link the descriptor list */
541 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
542 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
544 /* Set the last descriptor as the end-of-ring descriptor */
545 p->des3 = host->sg_dma;
546 p->des0 = IDMAC_DES0_ER;
548 dw_mci_idmac_reset(host);
550 /* Mask out interrupts - get Tx & Rx complete only */
551 mci_writel(host, IDSTS, IDMAC_INT_CLR);
552 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
555 /* Set the descriptor base address */
556 mci_writel(host, DBADDR, host->sg_dma);
560 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
561 .init = dw_mci_idmac_init,
562 .start = dw_mci_idmac_start_dma,
563 .stop = dw_mci_idmac_stop_dma,
564 .complete = dw_mci_idmac_complete_dma,
565 .cleanup = dw_mci_dma_cleanup,
569 static void dw_mci_edma_cleanup(struct dw_mci *host)
571 struct mmc_data *data = host->data;
574 if (!data->host_cookie)
575 dma_unmap_sg(host->dev,
576 data->sg, data->sg_len,
577 dw_mci_get_dma_dir(data));
580 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
582 dmaengine_terminate_all(host->dms->ch);
585 static void dw_mci_edmac_complete_dma(void *arg)
587 struct dw_mci *host = arg;
588 struct mmc_data *data = host->data;
590 dev_vdbg(host->dev, "DMA complete\n");
593 if(data->flags & MMC_DATA_READ)
594 /* Invalidate cache after read */
595 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
596 data->sg_len, DMA_FROM_DEVICE);
598 host->dma_ops->cleanup(host);
601 * If the card was removed, data will be NULL. No point in trying to
602 * send the stop command or waiting for NBUSY in this case.
605 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
606 tasklet_schedule(&host->tasklet);
610 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
612 struct dma_slave_config slave_config;
613 struct dma_async_tx_descriptor *desc = NULL;
614 struct scatterlist *sgl = host->data->sg;
615 u32 sg_elems = host->data->sg_len;
618 /* Set external dma config: burst size, burst width*/
619 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
620 slave_config.src_addr = slave_config.dst_addr;
621 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
622 slave_config.src_addr_width = slave_config.dst_addr_width;
624 /* Match FIFO dma burst MSIZE with external dma config*/
625 slave_config.dst_maxburst = ((host->fifoth_val) >> 28) && 0x7;
626 slave_config.src_maxburst = slave_config.dst_maxburst;
628 if(host->data->flags & MMC_DATA_WRITE){
629 slave_config.direction = DMA_MEM_TO_DEV;
630 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
632 dev_err(host->dev, "error in dw_mci edma configuration.\n");
636 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
637 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
639 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
642 /* Set dw_mci_edmac_complete_dma as callback */
643 desc->callback = dw_mci_edmac_complete_dma;
644 desc->callback_param = (void *)host;
645 dmaengine_submit(desc);
647 /* Flush cache before write */
648 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
649 sg_elems, DMA_TO_DEVICE);
650 dma_async_issue_pending(host->dms->ch);
653 slave_config.direction = DMA_DEV_TO_MEM;
654 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
656 dev_err(host->dev, "error in dw_mci edma configuration.\n");
659 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
660 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
665 /* set dw_mci_edmac_complete_dma as callback */
666 desc->callback = dw_mci_edmac_complete_dma;
667 desc->callback_param = (void *)host;
668 dmaengine_submit(desc);
669 dma_async_issue_pending(host->dms->ch);
673 static int dw_mci_edmac_init(struct dw_mci *host)
675 /* 1) request external dma channel, SHOULD decide chn in dts */
676 host->dms = (struct dw_mci_dma_slave *)kmalloc(sizeof(struct dw_mci_dma_slave),GFP_KERNEL);
677 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
679 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
680 host->dms->ch->chan_id);
693 static void dw_mci_edmac_exit(struct dw_mci *host)
695 dma_release_channel(host->dms->ch);
698 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
699 .init = dw_mci_edmac_init,
700 .exit = dw_mci_edmac_exit,
701 .start = dw_mci_edmac_start_dma,
702 .stop = dw_mci_edmac_stop_dma,
703 .complete = dw_mci_edmac_complete_dma,
704 .cleanup = dw_mci_edma_cleanup,
706 #endif /* CONFIG_MMC_DW_IDMAC */
708 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
709 struct mmc_data *data,
712 struct scatterlist *sg;
713 unsigned int i, sg_len;
715 if (!next && data->host_cookie)
716 return data->host_cookie;
719 * We don't do DMA on "complex" transfers, i.e. with
720 * non-word-aligned buffers or lengths. Also, we don't bother
721 * with all the DMA setup overhead for short transfers.
723 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
729 for_each_sg(data->sg, sg, data->sg_len, i) {
730 if (sg->offset & 3 || sg->length & 3)
734 sg_len = dma_map_sg(host->dev,
737 dw_mci_get_dma_dir(data));
742 data->host_cookie = sg_len;
747 static void dw_mci_pre_req(struct mmc_host *mmc,
748 struct mmc_request *mrq,
751 struct dw_mci_slot *slot = mmc_priv(mmc);
752 struct mmc_data *data = mrq->data;
754 if (!slot->host->use_dma || !data)
757 if (data->host_cookie) {
758 data->host_cookie = 0;
762 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
763 data->host_cookie = 0;
766 static void dw_mci_post_req(struct mmc_host *mmc,
767 struct mmc_request *mrq,
770 struct dw_mci_slot *slot = mmc_priv(mmc);
771 struct mmc_data *data = mrq->data;
773 if (!slot->host->use_dma || !data)
776 if (data->host_cookie)
777 dma_unmap_sg(slot->host->dev,
780 dw_mci_get_dma_dir(data));
781 data->host_cookie = 0;
784 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
786 #ifdef CONFIG_MMC_DW_IDMAC
787 unsigned int blksz = data->blksz;
788 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
789 u32 fifo_width = 1 << host->data_shift;
790 u32 blksz_depth = blksz / fifo_width, fifoth_val;
791 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
792 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
794 tx_wmark = (host->fifo_depth) / 2;
795 tx_wmark_invers = host->fifo_depth - tx_wmark;
799 * if blksz is not a multiple of the FIFO width
801 if (blksz % fifo_width) {
808 if (!((blksz_depth % mszs[idx]) ||
809 (tx_wmark_invers % mszs[idx]))) {
811 rx_wmark = mszs[idx] - 1;
816 * If idx is '0', it won't be tried
817 * Thus, initial values are uesed
820 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
821 mci_writel(host, FIFOTH, fifoth_val);
825 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
827 unsigned int blksz = data->blksz;
828 u32 blksz_depth, fifo_depth;
831 WARN_ON(!(data->flags & MMC_DATA_READ));
833 if (host->timing != MMC_TIMING_MMC_HS200 &&
834 host->timing != MMC_TIMING_UHS_SDR104)
837 blksz_depth = blksz / (1 << host->data_shift);
838 fifo_depth = host->fifo_depth;
840 if (blksz_depth > fifo_depth)
844 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
845 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
846 * Currently just choose blksz.
849 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
853 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
856 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
863 /* If we don't have a channel, we can't do DMA */
867 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
869 host->dma_ops->stop(host);
876 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
877 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
881 * Decide the MSIZE and RX/TX Watermark.
882 * If current block size is same with previous size,
883 * no need to update fifoth.
885 if (host->prev_blksz != data->blksz)
886 dw_mci_adjust_fifoth(host, data);
889 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
891 /* Enable the DMA interface */
892 temp = mci_readl(host, CTRL);
893 temp |= SDMMC_CTRL_DMA_ENABLE;
894 mci_writel(host, CTRL, temp);
896 /* Disable RX/TX IRQs, let DMA handle it */
897 temp = mci_readl(host, INTMASK);
898 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
899 mci_writel(host, INTMASK, temp);
901 host->dma_ops->start(host, sg_len);
906 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
910 data->error = -EINPROGRESS;
917 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
919 if (data->flags & MMC_DATA_READ) {
920 host->dir_status = DW_MCI_RECV_STATUS;
921 dw_mci_ctrl_rd_thld(host, data);
923 host->dir_status = DW_MCI_SEND_STATUS;
926 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
927 data->blocks, data->blksz, mmc_hostname(host->mmc));
929 if (dw_mci_submit_data_dma(host, data)) {
930 int flags = SG_MITER_ATOMIC;
931 if (host->data->flags & MMC_DATA_READ)
932 flags |= SG_MITER_TO_SG;
934 flags |= SG_MITER_FROM_SG;
936 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
938 host->part_buf_start = 0;
939 host->part_buf_count = 0;
941 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
942 temp = mci_readl(host, INTMASK);
943 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
944 mci_writel(host, INTMASK, temp);
946 temp = mci_readl(host, CTRL);
947 temp &= ~SDMMC_CTRL_DMA_ENABLE;
948 mci_writel(host, CTRL, temp);
951 * Use the initial fifoth_val for PIO mode.
952 * If next issued data may be transfered by DMA mode,
953 * prev_blksz should be invalidated.
955 mci_writel(host, FIFOTH, host->fifoth_val);
956 host->prev_blksz = 0;
959 * Keep the current block size.
960 * It will be used to decide whether to update
961 * fifoth register next time.
963 host->prev_blksz = data->blksz;
967 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
969 struct dw_mci *host = slot->host;
970 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
971 unsigned int cmd_status = 0;
972 #ifdef SDMMC_WAIT_FOR_UNBUSY
974 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
976 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
978 ret = time_before(jiffies, timeout);
979 cmd_status = mci_readl(host, STATUS);
980 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
984 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
985 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
988 mci_writel(host, CMDARG, arg);
990 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
991 if(cmd & SDMMC_CMD_UPD_CLK)
992 timeout = jiffies + msecs_to_jiffies(50);
994 timeout = jiffies + msecs_to_jiffies(500);
995 while (time_before(jiffies, timeout)) {
996 cmd_status = mci_readl(host, CMD);
997 if (!(cmd_status & SDMMC_CMD_START))
1000 dev_err(&slot->mmc->class_dev,
1001 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1002 cmd, arg, cmd_status);
1005 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1007 struct dw_mci *host = slot->host;
1008 unsigned int tempck,clock = slot->clock;
1013 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1014 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1017 mci_writel(host, CLKENA, 0);
1018 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1019 if(host->svi_flags == 0)
1020 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1022 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1024 } else if (clock != host->current_speed || force_clkinit) {
1025 div = host->bus_hz / clock;
1026 if (host->bus_hz % clock && host->bus_hz > clock)
1028 * move the + 1 after the divide to prevent
1029 * over-clocking the card.
1033 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1035 if ((clock << div) != slot->__clk_old || force_clkinit) {
1036 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1037 dev_info(&slot->mmc->class_dev,
1038 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1039 slot->id, host->bus_hz, clock,
1042 host->set_speed = tempck;
1043 host->set_div = div;
1047 mci_writel(host, CLKENA, 0);
1048 mci_writel(host, CLKSRC, 0);
1052 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1054 if(clock <= 400*1000){
1055 MMC_DBG_BOOT_FUNC(host->mmc,
1056 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1057 clock * 2, mmc_hostname(host->mmc));
1058 /* clk_mmc will change parents to 24MHz xtal*/
1059 clk_set_rate(host->clk_mmc, clock * 2);
1062 host->set_div = div;
1066 MMC_DBG_BOOT_FUNC(host->mmc,
1067 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1068 mmc_hostname(host->mmc));
1071 MMC_DBG_ERR_FUNC(host->mmc,
1072 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1073 mmc_hostname(host->mmc));
1075 host->set_div = div;
1076 host->bus_hz = host->set_speed * 2;
1077 MMC_DBG_BOOT_FUNC(host->mmc,
1078 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1079 div, host->bus_hz, mmc_hostname(host->mmc));
1081 /* BUG may be here, come on, Linux BSP engineer looks!
1082 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1083 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1084 some oops happened like that:
1085 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1086 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1087 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1088 mmc0: new high speed DDR MMC card at address 0001
1089 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1091 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1092 mmcblk0: retrying using single block read
1093 mmcblk0: error -110 sending status command, retrying
1095 How to: If eMMC HW version < 4.51, or > 4.51 but no caps2-mmc-hs200 support in dts
1096 Please set dts emmc clk to 100M or 150M, I will workaround it!
1099 if (host->verid < DW_MMC_240A)
1100 clk_set_rate(host->clk_mmc,(host->bus_hz));
1102 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1108 /* set clock to desired speed */
1109 mci_writel(host, CLKDIV, div);
1113 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1115 /* enable clock; only low power if no SDIO */
1116 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1118 if (host->verid < DW_MMC_240A)
1119 sdio_int = SDMMC_INT_SDIO(slot->id);
1121 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1123 if (!(mci_readl(host, INTMASK) & sdio_int))
1124 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1125 mci_writel(host, CLKENA, clk_en_a);
1129 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1130 /* keep the clock with reflecting clock dividor */
1131 slot->__clk_old = clock << div;
1134 host->current_speed = clock;
1136 if(slot->ctype != slot->pre_ctype)
1137 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1139 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1140 mmc_hostname(host->mmc));
1141 slot->pre_ctype = slot->ctype;
1143 /* Set the current slot bus width */
1144 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1147 extern struct mmc_card *this_card;
1148 static void dw_mci_wait_unbusy(struct dw_mci *host)
1151 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1152 unsigned long time_loop;
1153 unsigned int status;
1156 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1158 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1159 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1160 /* Special care for (secure)erase timeout calculation */
1162 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1165 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1) ;
1166 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1167 300000 * (this_card->ext_csd.sec_erase_mult)) :
1168 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1172 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1173 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1174 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1175 timeout = SDMMC_DATA_TIMEOUT_SD;
1178 time_loop = jiffies + msecs_to_jiffies(timeout);
1180 status = mci_readl(host, STATUS);
1181 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1183 } while (time_before(jiffies, time_loop));
1188 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1191 * 0--status is busy.
1192 * 1--status is unbusy.
1194 int dw_mci_card_busy(struct mmc_host *mmc)
1196 struct dw_mci_slot *slot = mmc_priv(mmc);
1197 struct dw_mci *host = slot->host;
1199 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1200 host->svi_flags, mmc_hostname(host->mmc));
1203 if(host->svi_flags == 0){
1205 host->svi_flags = 1;
1206 return host->svi_flags;
1209 host->svi_flags = 0;
1210 return host->svi_flags;
1216 static void __dw_mci_start_request(struct dw_mci *host,
1217 struct dw_mci_slot *slot,
1218 struct mmc_command *cmd)
1220 struct mmc_request *mrq;
1221 struct mmc_data *data;
1225 if (host->pdata->select_slot)
1226 host->pdata->select_slot(slot->id);
1228 host->cur_slot = slot;
1231 dw_mci_wait_unbusy(host);
1233 host->pending_events = 0;
1234 host->completed_events = 0;
1235 host->data_status = 0;
1239 dw_mci_set_timeout(host);
1240 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1241 mci_writel(host, BLKSIZ, data->blksz);
1244 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1246 /* this is the first command, send the initialization clock */
1247 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1248 cmdflags |= SDMMC_CMD_INIT;
1251 dw_mci_submit_data(host, data);
1255 dw_mci_start_command(host, cmd, cmdflags);
1258 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1261 static void dw_mci_start_request(struct dw_mci *host,
1262 struct dw_mci_slot *slot)
1264 struct mmc_request *mrq = slot->mrq;
1265 struct mmc_command *cmd;
1267 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1268 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1270 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1271 __dw_mci_start_request(host, slot, cmd);
1274 /* must be called with host->lock held */
1275 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1276 struct mmc_request *mrq)
1278 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1283 if (host->state == STATE_IDLE) {
1284 host->state = STATE_SENDING_CMD;
1285 dw_mci_start_request(host, slot);
1287 list_add_tail(&slot->queue_node, &host->queue);
1291 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1293 struct dw_mci_slot *slot = mmc_priv(mmc);
1294 struct dw_mci *host = slot->host;
1299 * The check for card presence and queueing of the request must be
1300 * atomic, otherwise the card could be removed in between and the
1301 * request wouldn't fail until another card was inserted.
1303 spin_lock_bh(&host->lock);
1305 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1306 spin_unlock_bh(&host->lock);
1307 mrq->cmd->error = -ENOMEDIUM;
1308 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1309 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1311 mmc_request_done(mmc, mrq);
1315 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1316 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1318 dw_mci_queue_request(host, slot, mrq);
1320 spin_unlock_bh(&host->lock);
1323 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1325 struct dw_mci_slot *slot = mmc_priv(mmc);
1326 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1327 struct dw_mci *host = slot->host;
1329 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1332 #ifdef SDMMC_WAIT_FOR_UNBUSY
1333 unsigned long time_loop;
1336 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1337 if(host->svi_flags == 1)
1338 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1340 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1342 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1345 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1346 printk("%d..%s: no card. [%s]\n", \
1347 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1352 ret = time_before(jiffies, time_loop);
1353 regs = mci_readl(slot->host, STATUS);
1354 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1360 printk("slot->flags = %lu ", slot->flags);
1361 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1362 if(host->svi_flags != 1)
1365 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1366 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1370 switch (ios->bus_width) {
1371 case MMC_BUS_WIDTH_4:
1372 slot->ctype = SDMMC_CTYPE_4BIT;
1374 case MMC_BUS_WIDTH_8:
1375 slot->ctype = SDMMC_CTYPE_8BIT;
1378 /* set default 1 bit mode */
1379 slot->ctype = SDMMC_CTYPE_1BIT;
1380 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1383 regs = mci_readl(slot->host, UHS_REG);
1386 if (ios->timing == MMC_TIMING_UHS_DDR50)
1387 regs |= ((0x1 << slot->id) << 16);
1389 regs &= ~((0x1 << slot->id) << 16);
1391 mci_writel(slot->host, UHS_REG, regs);
1392 slot->host->timing = ios->timing;
1395 * Use mirror of ios->clock to prevent race with mmc
1396 * core ios update when finding the minimum.
1398 slot->clock = ios->clock;
1400 if (drv_data && drv_data->set_ios)
1401 drv_data->set_ios(slot->host, ios);
1403 /* Slot specific timing and width adjustment */
1404 dw_mci_setup_bus(slot, false);
1408 switch (ios->power_mode) {
1410 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1412 if (slot->host->pdata->setpower)
1413 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1414 regs = mci_readl(slot->host, PWREN);
1415 regs |= (1 << slot->id);
1416 mci_writel(slot->host, PWREN, regs);
1419 /* Power down slot */
1420 if(slot->host->pdata->setpower)
1421 slot->host->pdata->setpower(slot->id, 0);
1422 regs = mci_readl(slot->host, PWREN);
1423 regs &= ~(1 << slot->id);
1424 mci_writel(slot->host, PWREN, regs);
1431 static int dw_mci_get_ro(struct mmc_host *mmc)
1434 struct dw_mci_slot *slot = mmc_priv(mmc);
1435 struct dw_mci_board *brd = slot->host->pdata;
1437 /* Use platform get_ro function, else try on board write protect */
1438 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1440 else if(brd->get_ro)
1441 read_only = brd->get_ro(slot->id);
1442 else if(gpio_is_valid(slot->wp_gpio))
1443 read_only = gpio_get_value(slot->wp_gpio);
1446 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1448 dev_dbg(&mmc->class_dev, "card is %s\n",
1449 read_only ? "read-only" : "read-write");
1454 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1456 struct dw_mci_slot *slot = mmc_priv(mmc);
1457 struct dw_mci *host = slot->host;
1458 /*struct dw_mci_board *brd = slot->host->pdata;*/
1460 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1463 spin_lock_bh(&host->lock);
1466 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1468 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1470 spin_unlock_bh(&host->lock);
1472 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1473 if(__clk_is_enabled(host->hclk_mmc) == false)
1474 clk_prepare_enable(host->hclk_mmc);
1475 if(__clk_is_enabled(host->clk_mmc) == false)
1476 clk_prepare_enable(host->clk_mmc);
1478 if(__clk_is_enabled(host->clk_mmc) == true)
1479 clk_disable_unprepare(slot->host->clk_mmc);
1480 if(__clk_is_enabled(host->hclk_mmc) == true)
1481 clk_disable_unprepare(slot->host->hclk_mmc);
1484 mmc_detect_change(slot->mmc, 20);
1490 static int dw_mci_get_cd(struct mmc_host *mmc)
1493 struct dw_mci_slot *slot = mmc_priv(mmc);
1494 struct dw_mci_board *brd = slot->host->pdata;
1495 struct dw_mci *host = slot->host;
1496 int gpio_cd = mmc_gpio_get_cd(mmc);
1498 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1499 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1501 /* Use platform get_cd function, else try onboard card detect */
1502 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1504 else if (brd->get_cd)
1505 present = !brd->get_cd(slot->id);
1506 else if (!IS_ERR_VALUE(gpio_cd))
1509 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1511 spin_lock_bh(&host->lock);
1513 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1514 dev_dbg(&mmc->class_dev, "card is present\n");
1516 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1517 dev_dbg(&mmc->class_dev, "card is not present\n");
1519 spin_unlock_bh(&host->lock);
1526 * Dts Should caps emmc controller with poll-hw-reset
1528 static void dw_mci_hw_reset(struct mmc_host *mmc)
1530 struct dw_mci_slot *slot = mmc_priv(mmc);
1531 struct dw_mci *host = slot->host;
1536 unsigned long timeout;
1539 /* (1) CMD12 to end any transfer in process */
1540 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1541 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1543 if(host->mmc->hold_reg_flag)
1544 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1545 mci_writel(host, CMDARG, 0);
1547 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1549 timeout = jiffies + msecs_to_jiffies(500);
1551 ret = time_before(jiffies, timeout);
1552 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1557 MMC_DBG_ERR_FUNC(host->mmc,
1558 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1559 __func__, mmc_hostname(host->mmc));
1561 /* (2) wait DTO, even if no response is sent back by card */
1563 timeout = jiffies + msecs_to_jiffies(5);
1565 ret = time_before(jiffies, timeout);
1566 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1567 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1573 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1575 /* Software reset - BMOD[0] for IDMA only */
1576 regs = mci_readl(host, BMOD);
1577 regs |= SDMMC_IDMAC_SWRESET;
1578 mci_writel(host, BMOD, regs);
1579 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1580 regs = mci_readl(host, BMOD);
1581 if(regs & SDMMC_IDMAC_SWRESET)
1582 MMC_DBG_WARN_FUNC(host->mmc,
1583 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1584 __func__, mmc_hostname(host->mmc));
1586 /* DMA reset - CTRL[2] */
1587 regs = mci_readl(host, CTRL);
1588 regs |= SDMMC_CTRL_DMA_RESET;
1589 mci_writel(host, CTRL, regs);
1590 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1591 regs = mci_readl(host, CTRL);
1592 if(regs & SDMMC_CTRL_DMA_RESET)
1593 MMC_DBG_WARN_FUNC(host->mmc,
1594 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1595 __func__, mmc_hostname(host->mmc));
1597 /* FIFO reset - CTRL[1] */
1598 regs = mci_readl(host, CTRL);
1599 regs |= SDMMC_CTRL_FIFO_RESET;
1600 mci_writel(host, CTRL, regs);
1601 mdelay(1); /* no timing limited, 1ms is random value */
1602 regs = mci_readl(host, CTRL);
1603 if(regs & SDMMC_CTRL_FIFO_RESET)
1604 MMC_DBG_WARN_FUNC(host->mmc,
1605 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1606 __func__, mmc_hostname(host->mmc));
1609 According to eMMC spec
1610 tRstW >= 1us ; RST_n pulse width
1611 tRSCA >= 200us ; RST_n to Command time
1612 tRSTH >= 1us ; RST_n high period
1614 mci_writel(slot->host, PWREN, 0x0);
1615 mci_writel(slot->host, RST_N, 0x0);
1617 udelay(10); /* 10us for bad quality eMMc. */
1619 mci_writel(slot->host, PWREN, 0x1);
1620 mci_writel(slot->host, RST_N, 0x1);
1622 usleep_range(500, 1000); /* at least 500(> 200us) */
1626 * Disable lower power mode.
1628 * Low power mode will stop the card clock when idle. According to the
1629 * description of the CLKENA register we should disable low power mode
1630 * for SDIO cards if we need SDIO interrupts to work.
1632 * This function is fast if low power mode is already disabled.
1634 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1636 struct dw_mci *host = slot->host;
1638 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1640 clk_en_a = mci_readl(host, CLKENA);
1642 if (clk_en_a & clken_low_pwr) {
1643 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1644 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1645 SDMMC_CMD_PRV_DAT_WAIT, 0);
1649 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1651 struct dw_mci_slot *slot = mmc_priv(mmc);
1652 struct dw_mci *host = slot->host;
1656 /* Enable/disable Slot Specific SDIO interrupt */
1657 int_mask = mci_readl(host, INTMASK);
1659 if (host->verid < DW_MMC_240A)
1660 sdio_int = SDMMC_INT_SDIO(slot->id);
1662 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1666 * Turn off low power mode if it was enabled. This is a bit of
1667 * a heavy operation and we disable / enable IRQs a lot, so
1668 * we'll leave low power mode disabled and it will get
1669 * re-enabled again in dw_mci_setup_bus().
1671 dw_mci_disable_low_power(slot);
1673 mci_writel(host, INTMASK,
1674 (int_mask | sdio_int));
1676 mci_writel(host, INTMASK,
1677 (int_mask & ~sdio_int));
1681 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1683 IO_DOMAIN_12 = 1200,
1684 IO_DOMAIN_18 = 1800,
1685 IO_DOMAIN_33 = 3300,
1687 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1697 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1698 __FUNCTION__, mmc_hostname(host->mmc));
1701 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1702 __FUNCTION__, mmc_hostname(host->mmc));
1706 if(cpu_is_rk3288()){
1707 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1708 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1712 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1713 __FUNCTION__, mmc_hostname(host->mmc));
1717 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1718 struct mmc_ios *ios)
1721 unsigned int value,uhs_reg;
1724 * Signal Voltage Switching is only applicable for Host Controllers
1727 if (host->verid < DW_MMC_240A)
1730 uhs_reg = mci_readl(host, UHS_REG);
1731 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1732 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1734 switch (ios->signal_voltage) {
1735 case MMC_SIGNAL_VOLTAGE_330:
1736 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1738 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1739 /* regulator_put(host->vmmc); //to be done in remove function. */
1741 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1742 __func__, regulator_get_voltage(host->vmmc), ret);
1744 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1745 " failed\n", mmc_hostname(host->mmc));
1748 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1750 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1751 __FUNCTION__, mmc_hostname(host->mmc));
1753 /* set High-power mode */
1754 value = mci_readl(host, CLKENA);
1755 value &= ~SDMMC_CLKEN_LOW_PWR;
1756 mci_writel(host,CLKENA , value);
1758 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1759 mci_writel(host,UHS_REG , uhs_reg);
1762 usleep_range(5000, 5500);
1764 /* 3.3V regulator output should be stable within 5 ms */
1765 uhs_reg = mci_readl(host, UHS_REG);
1766 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1769 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1770 mmc_hostname(host->mmc));
1773 case MMC_SIGNAL_VOLTAGE_180:
1775 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1776 /* regulator_put(host->vmmc);//to be done in remove function. */
1778 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1779 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1781 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1782 " failed\n", mmc_hostname(host->mmc));
1785 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1789 * Enable 1.8V Signal Enable in the Host Control2
1792 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1795 usleep_range(5000, 5500);
1796 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1797 __FUNCTION__,mmc_hostname(host->mmc));
1799 /* 1.8V regulator output should be stable within 5 ms */
1800 uhs_reg = mci_readl(host, UHS_REG);
1801 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1804 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1805 mmc_hostname(host->mmc));
1808 case MMC_SIGNAL_VOLTAGE_120:
1810 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1812 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1813 " failed\n", mmc_hostname(host->mmc));
1819 /* No signal voltage switch required */
1825 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1826 struct mmc_ios *ios)
1828 struct dw_mci_slot *slot = mmc_priv(mmc);
1829 struct dw_mci *host = slot->host;
1832 if (host->verid < DW_MMC_240A)
1835 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1841 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1843 struct dw_mci_slot *slot = mmc_priv(mmc);
1844 struct dw_mci *host = slot->host;
1845 const struct dw_mci_drv_data *drv_data = host->drv_data;
1846 struct dw_mci_tuning_data tuning_data;
1849 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1850 if(cpu_is_rk3036() || cpu_is_rk312x())
1853 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1854 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1855 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1856 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1857 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1858 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1859 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1863 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1864 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1865 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1868 "Undefined command(%d) for tuning\n", opcode);
1873 /* Recommend sample phase and delayline
1874 Fixme: Mix-use these three controllers will cause
1877 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1878 tuning_data.con_id = 3;
1879 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1880 tuning_data.con_id = 1;
1882 tuning_data.con_id = 0;
1884 /* 0: driver, from host->devices
1885 1: sample, from devices->host
1887 tuning_data.tuning_type = 1;
1889 if (drv_data && drv_data->execute_tuning)
1890 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1895 static const struct mmc_host_ops dw_mci_ops = {
1896 .request = dw_mci_request,
1897 .pre_req = dw_mci_pre_req,
1898 .post_req = dw_mci_post_req,
1899 .set_ios = dw_mci_set_ios,
1900 .get_ro = dw_mci_get_ro,
1901 .get_cd = dw_mci_get_cd,
1902 .set_sdio_status = dw_mci_set_sdio_status,
1903 .hw_reset = dw_mci_hw_reset,
1904 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1905 .execute_tuning = dw_mci_execute_tuning,
1906 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1907 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1908 .card_busy = dw_mci_card_busy,
1913 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1915 unsigned long flags;
1920 local_irq_save(flags);
1921 if(host->irq_state != irqflag)
1923 host->irq_state = irqflag;
1926 enable_irq(host->irq);
1930 disable_irq(host->irq);
1933 local_irq_restore(flags);
1937 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1938 __releases(&host->lock)
1939 __acquires(&host->lock)
1941 if(DW_MCI_SEND_STATUS == host->dir_status){
1943 if( MMC_BUS_TEST_W != host->cmd->opcode){
1944 if(host->data_status & SDMMC_INT_DCRC)
1945 host->data->error = -EILSEQ;
1946 else if(host->data_status & SDMMC_INT_EBE)
1947 host->data->error = -ETIMEDOUT;
1949 dw_mci_wait_unbusy(host);
1952 dw_mci_wait_unbusy(host);
1957 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1958 __releases(&host->lock)
1959 __acquires(&host->lock)
1961 struct dw_mci_slot *slot;
1962 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1964 WARN_ON(host->cmd || host->data);
1966 del_timer_sync(&host->dto_timer);
1967 dw_mci_deal_data_end(host, mrq);
1970 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1971 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
1973 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
1974 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
1976 host->cur_slot->mrq = NULL;
1978 if (!list_empty(&host->queue)) {
1979 slot = list_entry(host->queue.next,
1980 struct dw_mci_slot, queue_node);
1981 list_del(&slot->queue_node);
1982 dev_vdbg(host->dev, "list not empty: %s is next\n",
1983 mmc_hostname(slot->mmc));
1984 host->state = STATE_SENDING_CMD;
1985 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
1986 dw_mci_start_request(host, slot);
1988 dev_vdbg(host->dev, "list empty\n");
1989 host->state = STATE_IDLE;
1992 spin_unlock(&host->lock);
1993 mmc_request_done(prev_mmc, mrq);
1994 spin_lock(&host->lock);
1997 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1999 u32 status = host->cmd_status;
2001 host->cmd_status = 0;
2003 /* Read the response from the card (up to 16 bytes) */
2004 if (cmd->flags & MMC_RSP_PRESENT) {
2005 if (cmd->flags & MMC_RSP_136) {
2006 cmd->resp[3] = mci_readl(host, RESP0);
2007 cmd->resp[2] = mci_readl(host, RESP1);
2008 cmd->resp[1] = mci_readl(host, RESP2);
2009 cmd->resp[0] = mci_readl(host, RESP3);
2011 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2012 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2014 cmd->resp[0] = mci_readl(host, RESP0);
2018 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2019 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2023 if (status & SDMMC_INT_RTO)
2025 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2028 cmd->error = -ETIMEDOUT;
2029 del_timer_sync(&host->dto_timer);
2030 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2031 del_timer_sync(&host->dto_timer);
2032 cmd->error = -EILSEQ;
2033 }else if (status & SDMMC_INT_RESP_ERR){
2034 del_timer_sync(&host->dto_timer);
2039 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2040 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2043 del_timer_sync(&host->dto_timer);
2044 if(MMC_SEND_STATUS != cmd->opcode)
2045 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2046 MMC_DBG_ERR_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2047 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2051 /* newer ip versions need a delay between retries */
2052 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2058 static void dw_mci_tasklet_func(unsigned long priv)
2060 struct dw_mci *host = (struct dw_mci *)priv;
2061 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2062 struct mmc_data *data;
2063 struct mmc_command *cmd;
2064 enum dw_mci_state state;
2065 enum dw_mci_state prev_state;
2066 u32 status, cmd_flags;
2067 unsigned long timeout = 0;
2070 spin_lock(&host->lock);
2072 state = host->state;
2082 case STATE_SENDING_CMD:
2083 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2084 &host->pending_events))
2089 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2090 dw_mci_command_complete(host, cmd);
2091 if (cmd == host->mrq->sbc && !cmd->error) {
2092 prev_state = state = STATE_SENDING_CMD;
2093 __dw_mci_start_request(host, host->cur_slot,
2098 if (cmd->data && cmd->error) {
2099 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2100 dw_mci_stop_dma(host);
2103 send_stop_cmd(host, data);
2104 state = STATE_SENDING_STOP;
2110 send_stop_abort(host, data);
2111 state = STATE_SENDING_STOP;
2114 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2117 if (!host->mrq->data || cmd->error) {
2118 dw_mci_request_end(host, host->mrq);
2122 prev_state = state = STATE_SENDING_DATA;
2125 case STATE_SENDING_DATA:
2126 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2127 dw_mci_stop_dma(host);
2130 send_stop_cmd(host, data);
2132 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2133 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2134 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2136 mci_writel(host, CMDARG, 0);
2138 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2139 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2141 if(host->mmc->hold_reg_flag)
2142 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2144 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2146 timeout = jiffies + msecs_to_jiffies(500);
2149 ret = time_before(jiffies, timeout);
2150 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2154 MMC_DBG_ERR_FUNC(host->mmc,
2155 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2156 __func__, mmc_hostname(host->mmc));
2159 send_stop_abort(host, data);
2161 state = STATE_DATA_ERROR;
2165 MMC_DBG_CMD_FUNC(host->mmc,
2166 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2167 prev_state,state, mmc_hostname(host->mmc));
2169 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2170 &host->pending_events))
2172 MMC_DBG_INFO_FUNC(host->mmc,
2173 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2174 prev_state,state,mmc_hostname(host->mmc));
2176 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2177 prev_state = state = STATE_DATA_BUSY;
2180 case STATE_DATA_BUSY:
2181 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2182 &host->pending_events))
2185 dw_mci_deal_data_end(host, host->mrq);
2186 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2187 MMC_DBG_INFO_FUNC(host->mmc,
2188 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2189 prev_state,state,mmc_hostname(host->mmc));
2192 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2193 status = host->data_status;
2195 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2196 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2197 MMC_DBG_ERR_FUNC(host->mmc,
2198 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2199 prev_state,state, status, mmc_hostname(host->mmc));
2201 if (status & SDMMC_INT_DRTO) {
2202 data->error = -ETIMEDOUT;
2203 } else if (status & SDMMC_INT_DCRC) {
2204 data->error = -EILSEQ;
2205 } else if (status & SDMMC_INT_EBE &&
2206 host->dir_status == DW_MCI_SEND_STATUS){
2208 * No data CRC status was returned.
2209 * The number of bytes transferred will
2210 * be exaggerated in PIO mode.
2212 data->bytes_xfered = 0;
2213 data->error = -ETIMEDOUT;
2222 * After an error, there may be data lingering
2223 * in the FIFO, so reset it - doing so
2224 * generates a block interrupt, hence setting
2225 * the scatter-gather pointer to NULL.
2227 dw_mci_fifo_reset(host);
2229 data->bytes_xfered = data->blocks * data->blksz;
2234 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2235 prev_state,state,mmc_hostname(host->mmc));
2236 dw_mci_request_end(host, host->mrq);
2239 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2240 prev_state,state,mmc_hostname(host->mmc));
2242 if (host->mrq->sbc && !data->error) {
2243 data->stop->error = 0;
2245 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2246 prev_state,state,mmc_hostname(host->mmc));
2248 dw_mci_request_end(host, host->mrq);
2252 prev_state = state = STATE_SENDING_STOP;
2254 send_stop_cmd(host, data);
2256 if (data->stop && !data->error) {
2257 /* stop command for open-ended transfer*/
2259 send_stop_abort(host, data);
2263 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2264 prev_state,state,mmc_hostname(host->mmc));
2266 case STATE_SENDING_STOP:
2267 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2270 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2271 prev_state, state, mmc_hostname(host->mmc));
2273 /* CMD error in data command */
2274 if (host->mrq->cmd->error && host->mrq->data) {
2275 dw_mci_fifo_reset(host);
2281 dw_mci_command_complete(host, host->mrq->stop);
2283 if (host->mrq->stop)
2284 dw_mci_command_complete(host, host->mrq->stop);
2286 host->cmd_status = 0;
2289 dw_mci_request_end(host, host->mrq);
2292 case STATE_DATA_ERROR:
2293 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2294 &host->pending_events))
2297 state = STATE_DATA_BUSY;
2300 } while (state != prev_state);
2302 host->state = state;
2304 spin_unlock(&host->lock);
2308 /* push final bytes to part_buf, only use during push */
2309 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2311 memcpy((void *)&host->part_buf, buf, cnt);
2312 host->part_buf_count = cnt;
2315 /* append bytes to part_buf, only use during push */
2316 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2318 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2319 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2320 host->part_buf_count += cnt;
2324 /* pull first bytes from part_buf, only use during pull */
2325 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2327 cnt = min(cnt, (int)host->part_buf_count);
2329 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2331 host->part_buf_count -= cnt;
2332 host->part_buf_start += cnt;
2337 /* pull final bytes from the part_buf, assuming it's just been filled */
2338 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2340 memcpy(buf, &host->part_buf, cnt);
2341 host->part_buf_start = cnt;
2342 host->part_buf_count = (1 << host->data_shift) - cnt;
2345 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2347 struct mmc_data *data = host->data;
2350 /* try and push anything in the part_buf */
2351 if (unlikely(host->part_buf_count)) {
2352 int len = dw_mci_push_part_bytes(host, buf, cnt);
2355 if (host->part_buf_count == 2) {
2356 mci_writew(host, DATA(host->data_offset),
2358 host->part_buf_count = 0;
2361 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2362 if (unlikely((unsigned long)buf & 0x1)) {
2364 u16 aligned_buf[64];
2365 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2366 int items = len >> 1;
2368 /* memcpy from input buffer into aligned buffer */
2369 memcpy(aligned_buf, buf, len);
2372 /* push data from aligned buffer into fifo */
2373 for (i = 0; i < items; ++i)
2374 mci_writew(host, DATA(host->data_offset),
2381 for (; cnt >= 2; cnt -= 2)
2382 mci_writew(host, DATA(host->data_offset), *pdata++);
2385 /* put anything remaining in the part_buf */
2387 dw_mci_set_part_bytes(host, buf, cnt);
2388 /* Push data if we have reached the expected data length */
2389 if ((data->bytes_xfered + init_cnt) ==
2390 (data->blksz * data->blocks))
2391 mci_writew(host, DATA(host->data_offset),
2396 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2398 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2399 if (unlikely((unsigned long)buf & 0x1)) {
2401 /* pull data from fifo into aligned buffer */
2402 u16 aligned_buf[64];
2403 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2404 int items = len >> 1;
2406 for (i = 0; i < items; ++i)
2407 aligned_buf[i] = mci_readw(host,
2408 DATA(host->data_offset));
2409 /* memcpy from aligned buffer into output buffer */
2410 memcpy(buf, aligned_buf, len);
2418 for (; cnt >= 2; cnt -= 2)
2419 *pdata++ = mci_readw(host, DATA(host->data_offset));
2423 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2424 dw_mci_pull_final_bytes(host, buf, cnt);
2428 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2430 struct mmc_data *data = host->data;
2433 /* try and push anything in the part_buf */
2434 if (unlikely(host->part_buf_count)) {
2435 int len = dw_mci_push_part_bytes(host, buf, cnt);
2438 if (host->part_buf_count == 4) {
2439 mci_writel(host, DATA(host->data_offset),
2441 host->part_buf_count = 0;
2444 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2445 if (unlikely((unsigned long)buf & 0x3)) {
2447 u32 aligned_buf[32];
2448 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2449 int items = len >> 2;
2451 /* memcpy from input buffer into aligned buffer */
2452 memcpy(aligned_buf, buf, len);
2455 /* push data from aligned buffer into fifo */
2456 for (i = 0; i < items; ++i)
2457 mci_writel(host, DATA(host->data_offset),
2464 for (; cnt >= 4; cnt -= 4)
2465 mci_writel(host, DATA(host->data_offset), *pdata++);
2468 /* put anything remaining in the part_buf */
2470 dw_mci_set_part_bytes(host, buf, cnt);
2471 /* Push data if we have reached the expected data length */
2472 if ((data->bytes_xfered + init_cnt) ==
2473 (data->blksz * data->blocks))
2474 mci_writel(host, DATA(host->data_offset),
2479 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2481 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2482 if (unlikely((unsigned long)buf & 0x3)) {
2484 /* pull data from fifo into aligned buffer */
2485 u32 aligned_buf[32];
2486 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2487 int items = len >> 2;
2489 for (i = 0; i < items; ++i)
2490 aligned_buf[i] = mci_readl(host,
2491 DATA(host->data_offset));
2492 /* memcpy from aligned buffer into output buffer */
2493 memcpy(buf, aligned_buf, len);
2501 for (; cnt >= 4; cnt -= 4)
2502 *pdata++ = mci_readl(host, DATA(host->data_offset));
2506 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2507 dw_mci_pull_final_bytes(host, buf, cnt);
2511 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2513 struct mmc_data *data = host->data;
2516 /* try and push anything in the part_buf */
2517 if (unlikely(host->part_buf_count)) {
2518 int len = dw_mci_push_part_bytes(host, buf, cnt);
2522 if (host->part_buf_count == 8) {
2523 mci_writeq(host, DATA(host->data_offset),
2525 host->part_buf_count = 0;
2528 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2529 if (unlikely((unsigned long)buf & 0x7)) {
2531 u64 aligned_buf[16];
2532 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2533 int items = len >> 3;
2535 /* memcpy from input buffer into aligned buffer */
2536 memcpy(aligned_buf, buf, len);
2539 /* push data from aligned buffer into fifo */
2540 for (i = 0; i < items; ++i)
2541 mci_writeq(host, DATA(host->data_offset),
2548 for (; cnt >= 8; cnt -= 8)
2549 mci_writeq(host, DATA(host->data_offset), *pdata++);
2552 /* put anything remaining in the part_buf */
2554 dw_mci_set_part_bytes(host, buf, cnt);
2555 /* Push data if we have reached the expected data length */
2556 if ((data->bytes_xfered + init_cnt) ==
2557 (data->blksz * data->blocks))
2558 mci_writeq(host, DATA(host->data_offset),
2563 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2565 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2566 if (unlikely((unsigned long)buf & 0x7)) {
2568 /* pull data from fifo into aligned buffer */
2569 u64 aligned_buf[16];
2570 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2571 int items = len >> 3;
2573 for (i = 0; i < items; ++i)
2574 aligned_buf[i] = mci_readq(host,
2575 DATA(host->data_offset));
2576 /* memcpy from aligned buffer into output buffer */
2577 memcpy(buf, aligned_buf, len);
2585 for (; cnt >= 8; cnt -= 8)
2586 *pdata++ = mci_readq(host, DATA(host->data_offset));
2590 host->part_buf = mci_readq(host, DATA(host->data_offset));
2591 dw_mci_pull_final_bytes(host, buf, cnt);
2595 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2599 /* get remaining partial bytes */
2600 len = dw_mci_pull_part_bytes(host, buf, cnt);
2601 if (unlikely(len == cnt))
2606 /* get the rest of the data */
2607 host->pull_data(host, buf, cnt);
2610 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2612 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2614 unsigned int offset;
2615 struct mmc_data *data = host->data;
2616 int shift = host->data_shift;
2619 unsigned int remain, fcnt;
2621 if(!host->mmc->bus_refs){
2622 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2626 if (!sg_miter_next(sg_miter))
2629 host->sg = sg_miter->piter.sg;
2630 buf = sg_miter->addr;
2631 remain = sg_miter->length;
2635 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2636 << shift) + host->part_buf_count;
2637 len = min(remain, fcnt);
2640 dw_mci_pull_data(host, (void *)(buf + offset), len);
2641 data->bytes_xfered += len;
2646 sg_miter->consumed = offset;
2647 status = mci_readl(host, MINTSTS);
2648 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2649 /* if the RXDR is ready read again */
2650 } while ((status & SDMMC_INT_RXDR) ||
2651 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2654 if (!sg_miter_next(sg_miter))
2656 sg_miter->consumed = 0;
2658 sg_miter_stop(sg_miter);
2662 sg_miter_stop(sg_miter);
2666 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2669 static void dw_mci_write_data_pio(struct dw_mci *host)
2671 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2673 unsigned int offset;
2674 struct mmc_data *data = host->data;
2675 int shift = host->data_shift;
2678 unsigned int fifo_depth = host->fifo_depth;
2679 unsigned int remain, fcnt;
2681 if(!host->mmc->bus_refs){
2682 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2687 if (!sg_miter_next(sg_miter))
2690 host->sg = sg_miter->piter.sg;
2691 buf = sg_miter->addr;
2692 remain = sg_miter->length;
2696 fcnt = ((fifo_depth -
2697 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2698 << shift) - host->part_buf_count;
2699 len = min(remain, fcnt);
2702 host->push_data(host, (void *)(buf + offset), len);
2703 data->bytes_xfered += len;
2708 sg_miter->consumed = offset;
2709 status = mci_readl(host, MINTSTS);
2710 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2711 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2714 if (!sg_miter_next(sg_miter))
2716 sg_miter->consumed = 0;
2718 sg_miter_stop(sg_miter);
2722 sg_miter_stop(sg_miter);
2726 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2729 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2733 if (!host->cmd_status)
2734 host->cmd_status = status;
2739 if((MMC_STOP_TRANSMISSION != host->cmd->opcode))
2742 multi = mci_readl(host, BYTCNT)/unit;
2743 multi += ((mci_readl(host, BYTCNT) % unit) ? 1 :0 );
2744 multi = (multi > 0) ? multi : 1;
2745 multi += (host->cmd->retries > 2)? 2 : host->cmd->retries;
2746 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4500 * multi));//max wait 8s larger
2751 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2752 tasklet_schedule(&host->tasklet);
2755 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2757 struct dw_mci *host = dev_id;
2758 u32 pending, sdio_int;
2761 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2764 * DTO fix - version 2.10a and below, and only if internal DMA
2767 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2769 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2770 pending |= SDMMC_INT_DATA_OVER;
2774 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2775 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2776 host->cmd_status = pending;
2778 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2779 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2781 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2784 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2785 /* if there is an error report DATA_ERROR */
2786 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2787 host->data_status = pending;
2789 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2791 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2792 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2793 tasklet_schedule(&host->tasklet);
2796 if (pending & SDMMC_INT_DATA_OVER) {
2797 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2798 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2799 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2800 if (!host->data_status)
2801 host->data_status = pending;
2803 if (host->dir_status == DW_MCI_RECV_STATUS) {
2804 if (host->sg != NULL)
2805 dw_mci_read_data_pio(host, true);
2807 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2808 tasklet_schedule(&host->tasklet);
2811 if (pending & SDMMC_INT_RXDR) {
2812 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2813 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2814 dw_mci_read_data_pio(host, false);
2817 if (pending & SDMMC_INT_TXDR) {
2818 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2819 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2820 dw_mci_write_data_pio(host);
2823 if (pending & SDMMC_INT_VSI) {
2824 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2825 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2826 dw_mci_cmd_interrupt(host, pending);
2829 if (pending & SDMMC_INT_CMD_DONE) {
2830 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2831 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2832 dw_mci_cmd_interrupt(host, pending);
2835 if (pending & SDMMC_INT_CD) {
2836 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2837 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2838 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2839 queue_work(host->card_workqueue, &host->card_work);
2842 if (pending & SDMMC_INT_HLE) {
2843 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2844 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2848 /* Handle SDIO Interrupts */
2849 for (i = 0; i < host->num_slots; i++) {
2850 struct dw_mci_slot *slot = host->slot[i];
2852 if (host->verid < DW_MMC_240A)
2853 sdio_int = SDMMC_INT_SDIO(i);
2855 sdio_int = SDMMC_INT_SDIO(i + 8);
2857 if (pending & sdio_int) {
2858 mci_writel(host, RINTSTS, sdio_int);
2859 mmc_signal_sdio_irq(slot->mmc);
2865 #ifdef CONFIG_MMC_DW_IDMAC
2866 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2867 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2868 /* Handle DMA interrupts */
2869 pending = mci_readl(host, IDSTS);
2870 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2871 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2872 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2873 host->dma_ops->complete((void *)host);
2881 static void dw_mci_work_routine_card(struct work_struct *work)
2883 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2886 for (i = 0; i < host->num_slots; i++) {
2887 struct dw_mci_slot *slot = host->slot[i];
2888 struct mmc_host *mmc = slot->mmc;
2889 struct mmc_request *mrq;
2892 present = dw_mci_get_cd(mmc);
2893 while (present != slot->last_detect_state) {
2894 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2895 present ? "inserted" : "removed");
2896 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2897 present ? "inserted" : "removed.", mmc_hostname(mmc));
2899 rk_send_wakeup_key();//wake up system
2900 spin_lock_bh(&host->lock);
2902 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2903 /* Card change detected */
2904 slot->last_detect_state = present;
2906 /* Clean up queue if present */
2909 if (mrq == host->mrq) {
2913 switch (host->state) {
2916 case STATE_SENDING_CMD:
2917 mrq->cmd->error = -ENOMEDIUM;
2921 case STATE_SENDING_DATA:
2922 mrq->data->error = -ENOMEDIUM;
2923 dw_mci_stop_dma(host);
2925 case STATE_DATA_BUSY:
2926 case STATE_DATA_ERROR:
2927 if (mrq->data->error == -EINPROGRESS)
2928 mrq->data->error = -ENOMEDIUM;
2932 case STATE_SENDING_STOP:
2933 mrq->stop->error = -ENOMEDIUM;
2937 dw_mci_request_end(host, mrq);
2939 list_del(&slot->queue_node);
2940 mrq->cmd->error = -ENOMEDIUM;
2942 mrq->data->error = -ENOMEDIUM;
2944 mrq->stop->error = -ENOMEDIUM;
2946 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
2947 mrq->cmd->opcode, mmc_hostname(mmc));
2949 spin_unlock(&host->lock);
2950 mmc_request_done(slot->mmc, mrq);
2951 spin_lock(&host->lock);
2955 /* Power down slot */
2957 /* Clear down the FIFO */
2958 dw_mci_fifo_reset(host);
2959 #ifdef CONFIG_MMC_DW_IDMAC
2960 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
2961 dw_mci_idmac_reset(host);
2966 spin_unlock_bh(&host->lock);
2968 present = dw_mci_get_cd(mmc);
2971 mmc_detect_change(slot->mmc,
2972 msecs_to_jiffies(host->pdata->detect_delay_ms));
2977 /* given a slot id, find out the device node representing that slot */
2978 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2980 struct device_node *np;
2984 if (!dev || !dev->of_node)
2987 for_each_child_of_node(dev->of_node, np) {
2988 addr = of_get_property(np, "reg", &len);
2989 if (!addr || (len < sizeof(int)))
2991 if (be32_to_cpup(addr) == slot)
2997 static struct dw_mci_of_slot_quirks {
3000 } of_slot_quirks[] = {
3002 .quirk = "disable-wp",
3003 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3007 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3009 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3014 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3015 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3016 quirks |= of_slot_quirks[idx].id;
3021 /* find out bus-width for a given slot */
3022 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3024 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3030 if (of_property_read_u32(np, "bus-width", &bus_wd))
3031 dev_err(dev, "bus-width property not found, assuming width"
3037 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3038 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3040 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3046 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3048 /* Having a missing entry is valid; return silently */
3049 if (!gpio_is_valid(gpio))
3052 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3053 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3057 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3063 /* find the write protect gpio for a given slot; or -1 if none specified */
3064 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3066 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3072 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3074 /* Having a missing entry is valid; return silently */
3075 if (!gpio_is_valid(gpio))
3078 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3079 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3086 /* find the cd gpio for a given slot */
3087 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3088 struct mmc_host *mmc)
3090 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3096 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3098 /* Having a missing entry is valid; return silently */
3099 if (!gpio_is_valid(gpio))
3102 if (mmc_gpio_request_cd(mmc, gpio, 0))
3103 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3105 #else /* CONFIG_OF */
3106 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3110 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3114 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3118 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3122 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3123 struct mmc_host *mmc)
3127 #endif /* CONFIG_OF */
3129 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3131 struct mmc_host *mmc;
3132 struct dw_mci_slot *slot;
3133 const struct dw_mci_drv_data *drv_data = host->drv_data;
3138 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3142 slot = mmc_priv(mmc);
3146 host->slot[id] = slot;
3149 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3151 mmc->ops = &dw_mci_ops;
3153 if (of_property_read_u32_array(host->dev->of_node,
3154 "clock-freq-min-max", freq, 2)) {
3155 mmc->f_min = DW_MCI_FREQ_MIN;
3156 mmc->f_max = DW_MCI_FREQ_MAX;
3158 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
3159 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3161 mmc->f_min = freq[0];
3162 mmc->f_max = freq[1];
3164 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
3165 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3168 if(strstr("mmc0",mmc_hostname(mmc)))
3169 printk("Line%d..%s: The rk_sdmmc %s",__LINE__, __FUNCTION__,RK_SDMMC_DRIVER_VERSION);
3171 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3172 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3173 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3174 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3175 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3176 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3178 if (host->pdata->get_ocr)
3179 mmc->ocr_avail = host->pdata->get_ocr(id);
3182 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3183 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3184 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3185 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3189 * Start with slot power disabled, it will be enabled when a card
3192 if (host->pdata->setpower)
3193 host->pdata->setpower(id, 0);
3195 if (host->pdata->caps)
3196 mmc->caps = host->pdata->caps;
3198 if (host->pdata->pm_caps)
3199 mmc->pm_caps = host->pdata->pm_caps;
3201 if (host->dev->of_node) {
3202 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3206 ctrl_id = to_platform_device(host->dev)->id;
3208 if (drv_data && drv_data->caps)
3209 mmc->caps |= drv_data->caps[ctrl_id];
3210 if (drv_data && drv_data->hold_reg_flag)
3211 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3213 /* set the compatibility of driver. */
3214 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3215 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3217 if (host->pdata->caps2)
3218 mmc->caps2 = host->pdata->caps2;
3220 if (host->pdata->get_bus_wd)
3221 bus_width = host->pdata->get_bus_wd(slot->id);
3222 else if (host->dev->of_node)
3223 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3227 switch (bus_width) {
3229 mmc->caps |= MMC_CAP_8_BIT_DATA;
3231 mmc->caps |= MMC_CAP_4_BIT_DATA;
3234 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3235 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3236 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3237 mmc->caps |= MMC_CAP_SDIO_IRQ;
3238 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3239 mmc->caps |= MMC_CAP_HW_RESET;
3240 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3241 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3242 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3243 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3244 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3245 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3246 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3247 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3249 /*Assign pm_caps pass to pm_flags*/
3250 mmc->pm_flags = mmc->pm_caps;
3252 if (host->pdata->blk_settings) {
3253 mmc->max_segs = host->pdata->blk_settings->max_segs;
3254 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3255 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3256 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3257 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3259 /* Useful defaults if platform data is unset. */
3260 #ifdef CONFIG_MMC_DW_IDMAC
3261 mmc->max_segs = host->ring_size;
3262 mmc->max_blk_size = 65536;
3263 mmc->max_blk_count = host->ring_size;
3264 mmc->max_seg_size = 0x1000;
3265 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3266 if(cpu_is_rk3036() || cpu_is_rk312x()){
3267 /* fixup for external dmac setting */
3269 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3270 mmc->max_blk_count = 65535;
3271 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3272 mmc->max_seg_size = mmc->max_req_size;
3276 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3277 mmc->max_blk_count = 512;
3278 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3279 mmc->max_seg_size = mmc->max_req_size;
3280 #endif /* CONFIG_MMC_DW_IDMAC */
3284 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3286 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3291 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3292 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3296 if (IS_ERR(host->vmmc)) {
3297 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3300 ret = regulator_enable(host->vmmc);
3303 "failed to enable regulator: %d\n", ret);
3310 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3312 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3313 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3315 ret = mmc_add_host(mmc);
3319 /* Pinctrl set default iomux state to fucntion port.
3320 * Fixme: DON'T TOUCH EMMC SETTING!
3322 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3324 host->pinctrl = devm_pinctrl_get(host->dev);
3325 if(IS_ERR(host->pinctrl)){
3326 printk("%s: Warning : No pinctrl used!\n",mmc_hostname(host->mmc));
3328 host->pins_idle= pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_IDLE);
3329 if(IS_ERR(host->pins_default)){
3330 printk("%s: Warning : No IDLE pinctrl matched!\n", mmc_hostname(host->mmc));
3334 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3335 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
3338 host->pins_default = pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_DEFAULT);
3339 if(IS_ERR(host->pins_default)){
3340 printk("%s: Warning : No default pinctrl matched!\n", mmc_hostname(host->mmc));
3344 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3345 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
3351 #if defined(CONFIG_DEBUG_FS)
3352 dw_mci_init_debugfs(slot);
3355 /* Card initially undetected */
3356 slot->last_detect_state = 1;
3365 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3367 /* Shutdown detect IRQ */
3368 if (slot->host->pdata->exit)
3369 slot->host->pdata->exit(id);
3371 /* Debugfs stuff is cleaned up by mmc core */
3372 mmc_remove_host(slot->mmc);
3373 slot->host->slot[id] = NULL;
3374 mmc_free_host(slot->mmc);
3377 static void dw_mci_init_dma(struct dw_mci *host)
3379 /* Alloc memory for sg translation */
3380 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3381 &host->sg_dma, GFP_KERNEL);
3382 if (!host->sg_cpu) {
3383 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3388 /* Determine which DMA interface to use */
3389 #if defined(CONFIG_MMC_DW_IDMAC)
3390 if(cpu_is_rk3036() || cpu_is_rk312x()){
3391 host->dma_ops = &dw_mci_edmac_ops;
3392 dev_info(host->dev, "Using external DMA controller.\n");
3394 host->dma_ops = &dw_mci_idmac_ops;
3395 dev_info(host->dev, "Using internal DMA controller.\n");
3402 if (host->dma_ops->init && host->dma_ops->start &&
3403 host->dma_ops->stop && host->dma_ops->cleanup) {
3404 if (host->dma_ops->init(host)) {
3405 dev_err(host->dev, "%s: Unable to initialize "
3406 "DMA Controller.\n", __func__);
3410 dev_err(host->dev, "DMA initialization not found.\n");
3418 dev_info(host->dev, "Using PIO mode.\n");
3423 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3425 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3428 ctrl = mci_readl(host, CTRL);
3430 mci_writel(host, CTRL, ctrl);
3432 /* wait till resets clear */
3434 ctrl = mci_readl(host, CTRL);
3435 if (!(ctrl & reset))
3437 } while (time_before(jiffies, timeout));
3440 "Timeout resetting block (ctrl reset %#x)\n",
3446 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3449 * Reseting generates a block interrupt, hence setting
3450 * the scatter-gather pointer to NULL.
3453 sg_miter_stop(&host->sg_miter);
3457 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3460 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3462 return dw_mci_ctrl_reset(host,
3463 SDMMC_CTRL_FIFO_RESET |
3465 SDMMC_CTRL_DMA_RESET);
3470 static struct dw_mci_of_quirks {
3475 .quirk = "broken-cd",
3476 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3480 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3482 struct dw_mci_board *pdata;
3483 struct device *dev = host->dev;
3484 struct device_node *np = dev->of_node;
3485 const struct dw_mci_drv_data *drv_data = host->drv_data;
3487 u32 clock_frequency;
3489 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3491 dev_err(dev, "could not allocate memory for pdata\n");
3492 return ERR_PTR(-ENOMEM);
3495 /* find out number of slots supported */
3496 if (of_property_read_u32(dev->of_node, "num-slots",
3497 &pdata->num_slots)) {
3498 dev_info(dev, "num-slots property not found, "
3499 "assuming 1 slot is available\n");
3500 pdata->num_slots = 1;
3504 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3505 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3506 pdata->quirks |= of_quirks[idx].id;
3509 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3510 dev_info(dev, "fifo-depth property not found, using "
3511 "value of FIFOTH register as default\n");
3513 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3515 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3516 pdata->bus_hz = clock_frequency;
3518 if (drv_data && drv_data->parse_dt) {
3519 ret = drv_data->parse_dt(host);
3521 return ERR_PTR(ret);
3524 if (of_find_property(np, "keep-power-in-suspend", NULL))
3525 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3527 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3528 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3530 if (of_find_property(np, "supports-highspeed", NULL))
3531 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3533 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3534 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3536 if (of_find_property(np, "supports-DDR_MODE", NULL))
3537 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3539 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3540 pdata->caps2 |= MMC_CAP2_HS200;
3542 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3543 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3545 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3546 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3548 if (of_get_property(np, "cd-inverted", NULL))
3549 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3550 if (of_get_property(np, "bootpart-no-access", NULL))
3551 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3556 #else /* CONFIG_OF */
3557 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3559 return ERR_PTR(-EINVAL);
3561 #endif /* CONFIG_OF */
3563 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3567 switch(host->state){
3570 case STATE_SENDING_DATA:
3571 case STATE_DATA_BUSY:
3572 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3573 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3574 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3575 host->state = STATE_DATA_BUSY;
3576 if (!dw_mci_ctrl_all_reset(host)) {
3581 /* NO requirement to reclaim slave chn using external dmac */
3582 #ifdef CONFIG_MMC_DW_IDMAC
3583 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3584 if (host->use_dma && host->dma_ops->init)
3585 host->dma_ops->init(host);
3589 * Restore the initial value at FIFOTH register
3590 * And Invalidate the prev_blksz with zero
3592 mci_writel(host, FIFOTH, host->fifoth_val);
3593 host->prev_blksz = 0;
3594 mci_writel(host, TMOUT, 0xFFFFFFFF);
3595 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3596 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3597 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3598 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3599 regs |= SDMMC_INT_CD;
3600 mci_writel(host, INTMASK, regs);
3601 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3602 for (i = 0; i < host->num_slots; i++) {
3603 struct dw_mci_slot *slot = host->slot[i];
3606 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3607 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3608 dw_mci_setup_bus(slot, true);
3611 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3612 tasklet_schedule(&host->tasklet);
3618 static void dw_mci_dto_timeout(unsigned long host_data)
3620 struct dw_mci *host = (struct dw_mci *) host_data;
3622 disable_irq(host->irq);
3624 host->data_status = SDMMC_INT_EBE;
3625 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3626 dw_mci_dealwith_timeout(host);
3628 enable_irq(host->irq);
3630 int dw_mci_probe(struct dw_mci *host)
3632 const struct dw_mci_drv_data *drv_data = host->drv_data;
3633 int width, i, ret = 0;
3639 host->pdata = dw_mci_parse_dt(host);
3640 if (IS_ERR(host->pdata)) {
3641 dev_err(host->dev, "platform data not available\n");
3646 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3648 "Platform data must supply select_slot function\n");
3653 * In 2.40a spec, Data offset is changed.
3654 * Need to check the version-id and set data-offset for DATA register.
3656 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3657 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3659 if (host->verid < DW_MMC_240A)
3660 host->data_offset = DATA_OFFSET;
3662 host->data_offset = DATA_240A_OFFSET;
3665 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3666 if (IS_ERR(host->hclk_mmc)) {
3667 dev_err(host->dev, "failed to get hclk_mmc\n");
3668 ret = PTR_ERR(host->hclk_mmc);
3672 clk_prepare_enable(host->hclk_mmc);
3675 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3676 if (IS_ERR(host->clk_mmc)) {
3677 dev_err(host->dev, "failed to get clk mmc_per\n");
3678 ret = PTR_ERR(host->clk_mmc);
3682 host->bus_hz = host->pdata->bus_hz;
3683 if (!host->bus_hz) {
3684 dev_err(host->dev,"Platform data must supply bus speed\n");
3689 if (host->verid < DW_MMC_240A)
3690 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3692 //rockchip: fix divider 2 in clksum before controlller
3693 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3696 dev_err(host->dev, "failed to set clk mmc\n");
3699 clk_prepare_enable(host->clk_mmc);
3701 if (drv_data && drv_data->setup_clock) {
3702 ret = drv_data->setup_clock(host);
3705 "implementation specific clock setup failed\n");
3710 host->quirks = host->pdata->quirks;
3711 host->irq_state = true;
3712 host->set_speed = 0;
3714 host->svi_flags = 0;
3716 spin_lock_init(&host->lock);
3717 INIT_LIST_HEAD(&host->queue);
3720 * Get the host data width - this assumes that HCON has been set with
3721 * the correct values.
3723 i = (mci_readl(host, HCON) >> 7) & 0x7;
3725 host->push_data = dw_mci_push_data16;
3726 host->pull_data = dw_mci_pull_data16;
3728 host->data_shift = 1;
3729 } else if (i == 2) {
3730 host->push_data = dw_mci_push_data64;
3731 host->pull_data = dw_mci_pull_data64;
3733 host->data_shift = 3;
3735 /* Check for a reserved value, and warn if it is */
3737 "HCON reports a reserved host data width!\n"
3738 "Defaulting to 32-bit access.\n");
3739 host->push_data = dw_mci_push_data32;
3740 host->pull_data = dw_mci_pull_data32;
3742 host->data_shift = 2;
3745 /* Reset all blocks */
3746 if (!dw_mci_ctrl_all_reset(host))
3749 host->dma_ops = host->pdata->dma_ops;
3750 dw_mci_init_dma(host);
3752 /* Clear the interrupts for the host controller */
3753 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3754 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3756 /* Put in max timeout */
3757 mci_writel(host, TMOUT, 0xFFFFFFFF);
3760 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3761 * Tx Mark = fifo_size / 2 DMA Size = 8
3763 if (!host->pdata->fifo_depth) {
3765 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3766 * have been overwritten by the bootloader, just like we're
3767 * about to do, so if you know the value for your hardware, you
3768 * should put it in the platform data.
3770 fifo_size = mci_readl(host, FIFOTH);
3771 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3773 fifo_size = host->pdata->fifo_depth;
3775 host->fifo_depth = fifo_size;
3777 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3778 mci_writel(host, FIFOTH, host->fifoth_val);
3780 /* disable clock to CIU */
3781 mci_writel(host, CLKENA, 0);
3782 mci_writel(host, CLKSRC, 0);
3784 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3785 host->card_workqueue = alloc_workqueue("dw-mci-card",
3786 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3787 if (!host->card_workqueue) {
3791 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3792 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3793 host->irq_flags, "dw-mci", host);
3797 if (host->pdata->num_slots)
3798 host->num_slots = host->pdata->num_slots;
3800 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
3802 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
3803 /* We need at least one slot to succeed */
3804 for (i = 0; i < host->num_slots; i++) {
3805 ret = dw_mci_init_slot(host, i);
3807 dev_dbg(host->dev, "slot %d init failed\n", i);
3813 * Enable interrupts for command done, data over, data empty, card det,
3814 * receive ready and error such as transmit, receive timeout, crc error
3816 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3817 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
3818 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
3819 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3820 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3821 regs |= SDMMC_INT_CD;
3823 mci_writel(host, INTMASK, regs);
3825 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
3827 dev_info(host->dev, "DW MMC controller at irq %d, "
3828 "%d bit host data width, "
3830 host->irq, width, fifo_size);
3833 dev_info(host->dev, "%d slots initialized\n", init_slots);
3835 dev_dbg(host->dev, "attempted to initialize %d slots, "
3836 "but failed on all\n", host->num_slots);
3841 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
3842 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
3847 destroy_workqueue(host->card_workqueue);
3850 if (host->use_dma && host->dma_ops->exit)
3851 host->dma_ops->exit(host);
3854 regulator_disable(host->vmmc);
3855 regulator_put(host->vmmc);
3859 if (!IS_ERR(host->clk_mmc))
3860 clk_disable_unprepare(host->clk_mmc);
3862 if (!IS_ERR(host->hclk_mmc))
3863 clk_disable_unprepare(host->hclk_mmc);
3867 EXPORT_SYMBOL(dw_mci_probe);
3869 void dw_mci_remove(struct dw_mci *host)
3872 del_timer_sync(&host->dto_timer);
3874 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3875 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3877 for(i = 0; i < host->num_slots; i++){
3878 dev_dbg(host->dev, "remove slot %d\n", i);
3880 dw_mci_cleanup_slot(host->slot[i], i);
3883 /* disable clock to CIU */
3884 mci_writel(host, CLKENA, 0);
3885 mci_writel(host, CLKSRC, 0);
3887 destroy_workqueue(host->card_workqueue);
3889 if(host->use_dma && host->dma_ops->exit)
3890 host->dma_ops->exit(host);
3893 regulator_disable(host->vmmc);
3894 regulator_put(host->vmmc);
3896 if(!IS_ERR(host->clk_mmc))
3897 clk_disable_unprepare(host->clk_mmc);
3899 if(!IS_ERR(host->hclk_mmc))
3900 clk_disable_unprepare(host->hclk_mmc);
3902 EXPORT_SYMBOL(dw_mci_remove);
3906 #ifdef CONFIG_PM_SLEEP
3908 * TODO: we should probably disable the clock to the card in the suspend path.
3910 int dw_mci_suspend(struct dw_mci *host)
3913 regulator_disable(host->vmmc);
3915 if(host->use_dma && host->dma_ops->exit)
3916 host->dma_ops->exit(host);
3918 /*only for sdmmc controller*/
3919 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD){
3920 host->mmc->rescan_disable = 1;
3921 if(cancel_delayed_work_sync(&host->mmc->detect))
3922 wake_unlock(&host->mmc->detect_wake_lock);
3924 disable_irq(host->irq);
3925 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3926 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
3927 mmc_hostname(host->mmc));
3928 dw_mci_of_get_cd_gpio(host->dev,0,host->mmc);
3929 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3930 mci_writel(host, INTMASK, 0x00);
3931 mci_writel(host, CTRL, 0x00);
3932 enable_irq_wake(host->mmc->slot.cd_irq);
3936 EXPORT_SYMBOL(dw_mci_suspend);
3938 int dw_mci_resume(struct dw_mci *host)
3940 int i, ret, retry_cnt = 0;
3942 struct dw_mci_slot *slot;
3944 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO){
3945 slot = mmc_priv(host->mmc);
3947 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
3950 /*only for sdmmc controller*/
3951 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3952 disable_irq_wake(host->mmc->slot.cd_irq);
3953 mmc_gpio_free_cd(host->mmc);
3954 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3955 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
3956 mmc_hostname(host->mmc));
3957 host->mmc->rescan_disable = 0;
3960 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
3961 else if(cpu_is_rk3036())
3962 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
3963 else if(cpu_is_rk312x())
3964 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
3965 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
3968 ret = regulator_enable(host->vmmc);
3971 "failed to enable regulator: %d\n", ret);
3976 if(!dw_mci_ctrl_all_reset(host)){
3981 if(host->use_dma && host->dma_ops->init)
3982 host->dma_ops->init(host);
3985 * Restore the initial value at FIFOTH register
3986 * And Invalidate the prev_blksz with zero
3988 mci_writel(host, FIFOTH, host->fifoth_val);
3989 host->prev_blksz = 0;
3990 /* Put in max timeout */
3991 mci_writel(host, TMOUT, 0xFFFFFFFF);
3993 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3994 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
3996 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3997 regs |= SDMMC_INT_CD;
3998 mci_writel(host, INTMASK, regs);
3999 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4000 /*only for sdmmc controller*/
4001 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4002 enable_irq(host->irq);
4005 for(i = 0; i < host->num_slots; i++){
4006 struct dw_mci_slot *slot = host->slot[i];
4009 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4010 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4011 dw_mci_setup_bus(slot, true);
4017 EXPORT_SYMBOL(dw_mci_resume);
4018 #endif /* CONFIG_PM_SLEEP */
4020 static int __init dw_mci_init(void)
4022 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4026 static void __exit dw_mci_exit(void)
4030 module_init(dw_mci_init);
4031 module_exit(dw_mci_exit);
4033 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4034 MODULE_AUTHOR("NXP Semiconductor VietNam");
4035 MODULE_AUTHOR("Imagination Technologies Ltd");
4036 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4037 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
4038 MODULE_LICENSE("GPL v2");