2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/mmc.h>
35 #include <linux/mmc/sd.h>
36 #include <linux/mmc/card.h>
37 #include <linux/mmc/sdio.h>
38 #include <linux/mmc/rk_mmc.h>
39 #include <linux/bitops.h>
40 #include <linux/regulator/consumer.h>
41 #include <linux/workqueue.h>
43 #include <linux/of_gpio.h>
44 #include <linux/mmc/slot-gpio.h>
45 #include <linux/clk-private.h>
46 #include <linux/rockchip/cpu.h>
49 #include "rk_sdmmc_dbg.h"
50 #include <linux/regulator/rockchip_io_vol_domain.h>
51 #include "../../clk/rockchip/clk-ops.h"
53 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
55 /* Common flag combinations */
56 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
57 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
59 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
61 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
62 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
63 #define DW_MCI_SEND_STATUS 1
64 #define DW_MCI_RECV_STATUS 2
65 #define DW_MCI_DMA_THRESHOLD 16
67 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
68 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
70 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
71 #define SDMMC_DATA_TIMEOUT_SD 500
72 #define SDMMC_DATA_TIMEOUT_SDIO 250
73 #define SDMMC_DATA_TIMEOUT_EMMC 2500
75 #define SDMMC_CMD_RTO_MAX_HOLD 200
76 #define SDMMC_WAIT_FOR_UNBUSY 2500
78 #ifdef CONFIG_MMC_DW_IDMAC
79 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
80 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
81 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
85 u32 des0; /* Control Descriptor */
86 #define IDMAC_DES0_DIC BIT(1)
87 #define IDMAC_DES0_LD BIT(2)
88 #define IDMAC_DES0_FD BIT(3)
89 #define IDMAC_DES0_CH BIT(4)
90 #define IDMAC_DES0_ER BIT(5)
91 #define IDMAC_DES0_CES BIT(30)
92 #define IDMAC_DES0_OWN BIT(31)
94 u32 des1; /* Buffer sizes */
95 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
96 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
98 u32 des2; /* buffer 1 physical address */
100 u32 des3; /* buffer 2 physical address */
102 #endif /* CONFIG_MMC_DW_IDMAC */
104 static const u8 tuning_blk_pattern_4bit[] = {
105 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
106 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
107 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
108 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
109 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
110 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
111 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
112 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
115 static const u8 tuning_blk_pattern_8bit[] = {
116 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
117 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
118 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
119 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
120 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
121 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
122 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
123 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
124 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
125 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
126 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
127 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
128 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
129 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
130 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
131 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
134 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
135 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
136 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
137 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
139 /*printk the all register of current host*/
141 static int dw_mci_regs_printk(struct dw_mci *host)
143 struct sdmmc_reg *regs = dw_mci_regs;
145 while( regs->name != 0 ){
146 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
149 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
154 #if defined(CONFIG_DEBUG_FS)
155 static int dw_mci_req_show(struct seq_file *s, void *v)
157 struct dw_mci_slot *slot = s->private;
158 struct mmc_request *mrq;
159 struct mmc_command *cmd;
160 struct mmc_command *stop;
161 struct mmc_data *data;
163 /* Make sure we get a consistent snapshot */
164 spin_lock_bh(&slot->host->lock);
174 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
175 cmd->opcode, cmd->arg, cmd->flags,
176 cmd->resp[0], cmd->resp[1], cmd->resp[2],
177 cmd->resp[2], cmd->error);
179 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
180 data->bytes_xfered, data->blocks,
181 data->blksz, data->flags, data->error);
184 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
185 stop->opcode, stop->arg, stop->flags,
186 stop->resp[0], stop->resp[1], stop->resp[2],
187 stop->resp[2], stop->error);
190 spin_unlock_bh(&slot->host->lock);
195 static int dw_mci_req_open(struct inode *inode, struct file *file)
197 return single_open(file, dw_mci_req_show, inode->i_private);
200 static const struct file_operations dw_mci_req_fops = {
201 .owner = THIS_MODULE,
202 .open = dw_mci_req_open,
205 .release = single_release,
208 static int dw_mci_regs_show(struct seq_file *s, void *v)
210 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
211 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
212 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
213 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
214 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
215 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
220 static int dw_mci_regs_open(struct inode *inode, struct file *file)
222 return single_open(file, dw_mci_regs_show, inode->i_private);
225 static const struct file_operations dw_mci_regs_fops = {
226 .owner = THIS_MODULE,
227 .open = dw_mci_regs_open,
230 .release = single_release,
233 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
235 struct mmc_host *mmc = slot->mmc;
236 struct dw_mci *host = slot->host;
240 root = mmc->debugfs_root;
244 node = debugfs_create_file("regs", S_IRUSR, root, host,
249 node = debugfs_create_file("req", S_IRUSR, root, slot,
254 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
258 node = debugfs_create_x32("pending_events", S_IRUSR, root,
259 (u32 *)&host->pending_events);
263 node = debugfs_create_x32("completed_events", S_IRUSR, root,
264 (u32 *)&host->completed_events);
271 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
273 #endif /* defined(CONFIG_DEBUG_FS) */
275 static void dw_mci_set_timeout(struct dw_mci *host)
277 /* timeout (maximum) */
278 mci_writel(host, TMOUT, 0xffffffff);
281 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
283 struct mmc_data *data;
284 struct dw_mci_slot *slot = mmc_priv(mmc);
285 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
287 cmd->error = -EINPROGRESS;
291 if (cmdr == MMC_STOP_TRANSMISSION)
292 cmdr |= SDMMC_CMD_STOP;
294 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
296 if (cmd->flags & MMC_RSP_PRESENT) {
297 /* We expect a response, so set this bit */
298 cmdr |= SDMMC_CMD_RESP_EXP;
299 if (cmd->flags & MMC_RSP_136)
300 cmdr |= SDMMC_CMD_RESP_LONG;
303 if (cmd->flags & MMC_RSP_CRC)
304 cmdr |= SDMMC_CMD_RESP_CRC;
308 cmdr |= SDMMC_CMD_DAT_EXP;
309 if (data->flags & MMC_DATA_STREAM)
310 cmdr |= SDMMC_CMD_STRM_MODE;
311 if (data->flags & MMC_DATA_WRITE)
312 cmdr |= SDMMC_CMD_DAT_WR;
315 if (drv_data && drv_data->prepare_command)
316 drv_data->prepare_command(slot->host, &cmdr);
322 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
324 struct mmc_command *stop;
330 stop = &host->stop_abort;
332 memset(stop, 0, sizeof(struct mmc_command));
334 if (cmdr == MMC_READ_SINGLE_BLOCK ||
335 cmdr == MMC_READ_MULTIPLE_BLOCK ||
336 cmdr == MMC_WRITE_BLOCK ||
337 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
338 stop->opcode = MMC_STOP_TRANSMISSION;
340 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
341 } else if (cmdr == SD_IO_RW_EXTENDED) {
342 stop->opcode = SD_IO_RW_DIRECT;
343 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
344 ((cmd->arg >> 28) & 0x7);
345 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
350 cmdr = stop->opcode | SDMMC_CMD_STOP |
351 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
356 static void dw_mci_start_command(struct dw_mci *host,
357 struct mmc_command *cmd, u32 cmd_flags)
359 struct dw_mci_slot *slot = host->slot[0];
360 /*temporality fix slot[0] due to host->num_slots equal to 1*/
362 host->pre_cmd = host->cmd;
365 "start command: ARGR=0x%08x CMDR=0x%08x\n",
366 cmd->arg, cmd_flags);
368 if(SD_SWITCH_VOLTAGE == cmd->opcode){
369 /*confirm non-low-power mode*/
370 mci_writel(host, CMDARG, 0);
371 dw_mci_disable_low_power(slot);
373 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
374 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
376 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
379 mci_writel(host, CMDARG, cmd->arg);
382 /* fix the value to 1 in some Soc,for example RK3188. */
383 if(host->mmc->hold_reg_flag)
384 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
386 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
390 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
392 dw_mci_start_command(host, data->stop, host->stop_cmdr);
395 /* DMA interface functions */
396 static void dw_mci_stop_dma(struct dw_mci *host)
398 if (host->using_dma) {
399 host->dma_ops->stop(host);
400 host->dma_ops->cleanup(host);
403 /* Data transfer was stopped by the interrupt handler */
404 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
407 static int dw_mci_get_dma_dir(struct mmc_data *data)
409 if (data->flags & MMC_DATA_WRITE)
410 return DMA_TO_DEVICE;
412 return DMA_FROM_DEVICE;
415 #ifdef CONFIG_MMC_DW_IDMAC
416 static void dw_mci_dma_cleanup(struct dw_mci *host)
418 struct mmc_data *data = host->data;
421 if (!data->host_cookie)
422 dma_unmap_sg(host->dev,
425 dw_mci_get_dma_dir(data));
428 static void dw_mci_idmac_reset(struct dw_mci *host)
430 u32 bmod = mci_readl(host, BMOD);
431 /* Software reset of DMA */
432 bmod |= SDMMC_IDMAC_SWRESET;
433 mci_writel(host, BMOD, bmod);
436 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
440 /* Disable and reset the IDMAC interface */
441 temp = mci_readl(host, CTRL);
442 temp &= ~SDMMC_CTRL_USE_IDMAC;
443 temp |= SDMMC_CTRL_DMA_RESET;
444 mci_writel(host, CTRL, temp);
446 /* Stop the IDMAC running */
447 temp = mci_readl(host, BMOD);
448 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
449 temp |= SDMMC_IDMAC_SWRESET;
450 mci_writel(host, BMOD, temp);
453 static void dw_mci_idmac_complete_dma(void *arg)
455 struct dw_mci *host = arg;
456 struct mmc_data *data = host->data;
458 dev_vdbg(host->dev, "DMA complete\n");
461 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
462 host->mrq->cmd->opcode,host->mrq->cmd->arg,
463 data->blocks,data->blksz,mmc_hostname(host->mmc));
466 host->dma_ops->cleanup(host);
469 * If the card was removed, data will be NULL. No point in trying to
470 * send the stop command or waiting for NBUSY in this case.
473 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
474 tasklet_schedule(&host->tasklet);
478 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
482 struct idmac_desc *desc = host->sg_cpu;
484 for (i = 0; i < sg_len; i++, desc++) {
485 unsigned int length = sg_dma_len(&data->sg[i]);
486 u32 mem_addr = sg_dma_address(&data->sg[i]);
488 /* Set the OWN bit and disable interrupts for this descriptor */
489 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
492 IDMAC_SET_BUFFER1_SIZE(desc, length);
494 /* Physical address to DMA to/from */
495 desc->des2 = mem_addr;
498 /* Set first descriptor */
500 desc->des0 |= IDMAC_DES0_FD;
502 /* Set last descriptor */
503 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
504 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
505 desc->des0 |= IDMAC_DES0_LD;
510 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
514 dw_mci_translate_sglist(host, host->data, sg_len);
516 /* Select IDMAC interface */
517 temp = mci_readl(host, CTRL);
518 temp |= SDMMC_CTRL_USE_IDMAC;
519 mci_writel(host, CTRL, temp);
523 /* Enable the IDMAC */
524 temp = mci_readl(host, BMOD);
525 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
526 mci_writel(host, BMOD, temp);
528 /* Start it running */
529 mci_writel(host, PLDMND, 1);
532 static int dw_mci_idmac_init(struct dw_mci *host)
534 struct idmac_desc *p;
537 /* Number of descriptors in the ring buffer */
538 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
540 /* Forward link the descriptor list */
541 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
542 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
544 /* Set the last descriptor as the end-of-ring descriptor */
545 p->des3 = host->sg_dma;
546 p->des0 = IDMAC_DES0_ER;
548 dw_mci_idmac_reset(host);
550 /* Mask out interrupts - get Tx & Rx complete only */
551 mci_writel(host, IDSTS, IDMAC_INT_CLR);
552 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
555 /* Set the descriptor base address */
556 mci_writel(host, DBADDR, host->sg_dma);
560 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
561 .init = dw_mci_idmac_init,
562 .start = dw_mci_idmac_start_dma,
563 .stop = dw_mci_idmac_stop_dma,
564 .complete = dw_mci_idmac_complete_dma,
565 .cleanup = dw_mci_dma_cleanup,
569 static void dw_mci_edma_cleanup(struct dw_mci *host)
571 struct mmc_data *data = host->data;
574 if (!data->host_cookie)
575 dma_unmap_sg(host->dev,
576 data->sg, data->sg_len,
577 dw_mci_get_dma_dir(data));
580 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
582 dmaengine_terminate_all(host->dms->ch);
585 static void dw_mci_edmac_complete_dma(void *arg)
587 struct dw_mci *host = arg;
588 struct mmc_data *data = host->data;
590 dev_vdbg(host->dev, "DMA complete\n");
593 if(data->flags & MMC_DATA_READ)
594 /* Invalidate cache after read */
595 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
596 data->sg_len, DMA_FROM_DEVICE);
598 host->dma_ops->cleanup(host);
601 * If the card was removed, data will be NULL. No point in trying to
602 * send the stop command or waiting for NBUSY in this case.
605 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
606 tasklet_schedule(&host->tasklet);
610 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
612 struct dma_slave_config slave_config;
613 struct dma_async_tx_descriptor *desc = NULL;
614 struct scatterlist *sgl = host->data->sg;
615 u32 sg_elems = host->data->sg_len;
618 /* Set external dma config: burst size, burst width*/
619 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
620 slave_config.src_addr = slave_config.dst_addr;
621 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
622 slave_config.src_addr_width = slave_config.dst_addr_width;
624 /* Match FIFO dma burst MSIZE with external dma config*/
625 slave_config.dst_maxburst = ((host->fifoth_val) >> 28) && 0x7;
626 slave_config.src_maxburst = slave_config.dst_maxburst;
628 if(host->data->flags & MMC_DATA_WRITE){
629 slave_config.direction = DMA_MEM_TO_DEV;
630 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
632 dev_err(host->dev, "error in dw_mci edma configuration.\n");
636 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
637 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
639 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
642 /* Set dw_mci_edmac_complete_dma as callback */
643 desc->callback = dw_mci_edmac_complete_dma;
644 desc->callback_param = (void *)host;
645 dmaengine_submit(desc);
647 /* Flush cache before write */
648 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
649 sg_elems, DMA_TO_DEVICE);
650 dma_async_issue_pending(host->dms->ch);
653 slave_config.direction = DMA_DEV_TO_MEM;
654 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
656 dev_err(host->dev, "error in dw_mci edma configuration.\n");
659 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
660 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
665 /* set dw_mci_edmac_complete_dma as callback */
666 desc->callback = dw_mci_edmac_complete_dma;
667 desc->callback_param = (void *)host;
668 dmaengine_submit(desc);
669 dma_async_issue_pending(host->dms->ch);
673 static int dw_mci_edmac_init(struct dw_mci *host)
675 /* Request external dma channel, SHOULD decide chn in dts */
677 host->dms = (struct dw_mci_dma_slave *)kmalloc
678 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
679 if (NULL == host->dms) {
680 dev_err(host->dev, "No enough memory to alloc dms.\n");
684 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
685 if (!host->dms->ch) {
686 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
687 host->dms->ch->chan_id);
694 if (NULL != host->dms) {
702 static void dw_mci_edmac_exit(struct dw_mci *host)
704 if (NULL != host->dms) {
705 if (NULL != host->dms->ch) {
706 dma_release_channel(host->dms->ch);
707 host->dms->ch = NULL;
714 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
715 .init = dw_mci_edmac_init,
716 .exit = dw_mci_edmac_exit,
717 .start = dw_mci_edmac_start_dma,
718 .stop = dw_mci_edmac_stop_dma,
719 .complete = dw_mci_edmac_complete_dma,
720 .cleanup = dw_mci_edma_cleanup,
722 #endif /* CONFIG_MMC_DW_IDMAC */
724 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
725 struct mmc_data *data,
728 struct scatterlist *sg;
729 unsigned int i, sg_len;
731 if (!next && data->host_cookie)
732 return data->host_cookie;
735 * We don't do DMA on "complex" transfers, i.e. with
736 * non-word-aligned buffers or lengths. Also, we don't bother
737 * with all the DMA setup overhead for short transfers.
739 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
745 for_each_sg(data->sg, sg, data->sg_len, i) {
746 if (sg->offset & 3 || sg->length & 3)
750 sg_len = dma_map_sg(host->dev,
753 dw_mci_get_dma_dir(data));
758 data->host_cookie = sg_len;
763 static void dw_mci_pre_req(struct mmc_host *mmc,
764 struct mmc_request *mrq,
767 struct dw_mci_slot *slot = mmc_priv(mmc);
768 struct mmc_data *data = mrq->data;
770 if (!slot->host->use_dma || !data)
773 if (data->host_cookie) {
774 data->host_cookie = 0;
778 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
779 data->host_cookie = 0;
782 static void dw_mci_post_req(struct mmc_host *mmc,
783 struct mmc_request *mrq,
786 struct dw_mci_slot *slot = mmc_priv(mmc);
787 struct mmc_data *data = mrq->data;
789 if (!slot->host->use_dma || !data)
792 if (data->host_cookie)
793 dma_unmap_sg(slot->host->dev,
796 dw_mci_get_dma_dir(data));
797 data->host_cookie = 0;
800 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
802 #ifdef CONFIG_MMC_DW_IDMAC
803 unsigned int blksz = data->blksz;
804 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
805 u32 fifo_width = 1 << host->data_shift;
806 u32 blksz_depth = blksz / fifo_width, fifoth_val;
807 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
808 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
810 tx_wmark = (host->fifo_depth) / 2;
811 tx_wmark_invers = host->fifo_depth - tx_wmark;
815 * if blksz is not a multiple of the FIFO width
817 if (blksz % fifo_width) {
824 if (!((blksz_depth % mszs[idx]) ||
825 (tx_wmark_invers % mszs[idx]))) {
827 rx_wmark = mszs[idx] - 1;
832 * If idx is '0', it won't be tried
833 * Thus, initial values are uesed
836 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
837 mci_writel(host, FIFOTH, fifoth_val);
841 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
843 unsigned int blksz = data->blksz;
844 u32 blksz_depth, fifo_depth;
847 WARN_ON(!(data->flags & MMC_DATA_READ));
849 if (host->timing != MMC_TIMING_MMC_HS200 &&
850 host->timing != MMC_TIMING_UHS_SDR104)
853 blksz_depth = blksz / (1 << host->data_shift);
854 fifo_depth = host->fifo_depth;
856 if (blksz_depth > fifo_depth)
860 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
861 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
862 * Currently just choose blksz.
865 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
869 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
872 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
880 /* If we don't have a channel, we can't do DMA */
884 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
886 host->dma_ops->stop(host);
893 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
894 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
898 * Decide the MSIZE and RX/TX Watermark.
899 * If current block size is same with previous size,
900 * no need to update fifoth.
902 if (host->prev_blksz != data->blksz)
903 dw_mci_adjust_fifoth(host, data);
906 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
908 /* Enable the DMA interface */
909 temp = mci_readl(host, CTRL);
910 temp |= SDMMC_CTRL_DMA_ENABLE;
911 mci_writel(host, CTRL, temp);
913 /* Disable RX/TX IRQs, let DMA handle it */
914 spin_lock_irqsave(&host->slock, flags);
915 temp = mci_readl(host, INTMASK);
916 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
917 mci_writel(host, INTMASK, temp);
918 spin_unlock_irqrestore(&host->slock, flags);
920 host->dma_ops->start(host, sg_len);
925 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
930 data->error = -EINPROGRESS;
937 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
939 if (data->flags & MMC_DATA_READ) {
940 host->dir_status = DW_MCI_RECV_STATUS;
941 dw_mci_ctrl_rd_thld(host, data);
943 host->dir_status = DW_MCI_SEND_STATUS;
946 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
947 data->blocks, data->blksz, mmc_hostname(host->mmc));
949 if (dw_mci_submit_data_dma(host, data)) {
950 int flags = SG_MITER_ATOMIC;
951 if (host->data->flags & MMC_DATA_READ)
952 flags |= SG_MITER_TO_SG;
954 flags |= SG_MITER_FROM_SG;
956 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
958 host->part_buf_start = 0;
959 host->part_buf_count = 0;
961 spin_lock_irqsave(&host->slock, flag);
962 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
963 temp = mci_readl(host, INTMASK);
964 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
965 mci_writel(host, INTMASK, temp);
966 spin_unlock_irqrestore(&host->slock, flag);
968 temp = mci_readl(host, CTRL);
969 temp &= ~SDMMC_CTRL_DMA_ENABLE;
970 mci_writel(host, CTRL, temp);
973 * Use the initial fifoth_val for PIO mode.
974 * If next issued data may be transfered by DMA mode,
975 * prev_blksz should be invalidated.
977 mci_writel(host, FIFOTH, host->fifoth_val);
978 host->prev_blksz = 0;
981 * Keep the current block size.
982 * It will be used to decide whether to update
983 * fifoth register next time.
985 host->prev_blksz = data->blksz;
989 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
991 struct dw_mci *host = slot->host;
992 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
993 unsigned int cmd_status = 0;
994 #ifdef SDMMC_WAIT_FOR_UNBUSY
996 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
998 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1000 ret = time_before(jiffies, timeout);
1001 cmd_status = mci_readl(host, STATUS);
1002 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1006 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1007 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1010 mci_writel(host, CMDARG, arg);
1012 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1013 if(cmd & SDMMC_CMD_UPD_CLK)
1014 timeout = jiffies + msecs_to_jiffies(50);
1016 timeout = jiffies + msecs_to_jiffies(500);
1017 while (time_before(jiffies, timeout)) {
1018 cmd_status = mci_readl(host, CMD);
1019 if (!(cmd_status & SDMMC_CMD_START))
1022 dev_err(&slot->mmc->class_dev,
1023 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1024 cmd, arg, cmd_status);
1027 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1029 struct dw_mci *host = slot->host;
1030 unsigned int tempck,clock = slot->clock;
1035 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1036 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1039 mci_writel(host, CLKENA, 0);
1040 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1041 if(host->svi_flags == 0)
1042 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1044 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1046 } else if (clock != host->current_speed || force_clkinit) {
1047 div = host->bus_hz / clock;
1048 if (host->bus_hz % clock && host->bus_hz > clock)
1050 * move the + 1 after the divide to prevent
1051 * over-clocking the card.
1055 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1057 if ((clock << div) != slot->__clk_old || force_clkinit) {
1058 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1059 dev_info(&slot->mmc->class_dev,
1060 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1061 slot->id, host->bus_hz, clock,
1064 host->set_speed = tempck;
1065 host->set_div = div;
1069 mci_writel(host, CLKENA, 0);
1070 mci_writel(host, CLKSRC, 0);
1074 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1076 if(clock <= 400*1000){
1077 MMC_DBG_BOOT_FUNC(host->mmc,
1078 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1079 clock * 2, mmc_hostname(host->mmc));
1080 /* clk_mmc will change parents to 24MHz xtal*/
1081 clk_set_rate(host->clk_mmc, clock * 2);
1084 host->set_div = div;
1088 MMC_DBG_BOOT_FUNC(host->mmc,
1089 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1090 mmc_hostname(host->mmc));
1093 MMC_DBG_ERR_FUNC(host->mmc,
1094 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1095 mmc_hostname(host->mmc));
1097 host->set_div = div;
1098 host->bus_hz = host->set_speed * 2;
1099 MMC_DBG_BOOT_FUNC(host->mmc,
1100 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1101 div, host->bus_hz, mmc_hostname(host->mmc));
1103 /* BUG may be here, come on, Linux BSP engineer looks!
1104 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1105 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1106 some oops happened like that:
1107 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1108 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1109 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1110 mmc0: new high speed DDR MMC card at address 0001
1111 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1113 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1114 mmcblk0: retrying using single block read
1115 mmcblk0: error -110 sending status command, retrying
1117 We assume all eMMC in RK platform with 3.10 kernel, at least version 4.5
1120 (host->mmc->caps & (MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR)) &&
1121 !(host->mmc->caps2 & MMC_CAP2_HS200)) {
1124 host->set_div = div;
1125 host->bus_hz = host->set_speed * 2;
1126 MMC_DBG_BOOT_FUNC(host->mmc,
1127 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1128 div, host->bus_hz, mmc_hostname(host->mmc));
1131 if (host->verid < DW_MMC_240A)
1132 clk_set_rate(host->clk_mmc,(host->bus_hz));
1134 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1140 /* set clock to desired speed */
1141 mci_writel(host, CLKDIV, div);
1145 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1147 /* enable clock; only low power if no SDIO */
1148 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1150 if (host->verid < DW_MMC_240A)
1151 sdio_int = SDMMC_INT_SDIO(slot->id);
1153 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1155 if (!(mci_readl(host, INTMASK) & sdio_int))
1156 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1157 mci_writel(host, CLKENA, clk_en_a);
1161 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1162 /* keep the clock with reflecting clock dividor */
1163 slot->__clk_old = clock << div;
1166 host->current_speed = clock;
1168 if(slot->ctype != slot->pre_ctype)
1169 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1171 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1172 mmc_hostname(host->mmc));
1173 slot->pre_ctype = slot->ctype;
1175 /* Set the current slot bus width */
1176 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1179 extern struct mmc_card *this_card;
1180 static void dw_mci_wait_unbusy(struct dw_mci *host)
1183 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1184 unsigned long time_loop;
1185 unsigned int status;
1188 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1190 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1191 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1192 /* Special care for (secure)erase timeout calculation */
1194 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1197 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1) ;
1198 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1199 300000 * (this_card->ext_csd.sec_erase_mult)) :
1200 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1204 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1205 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1206 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1207 timeout = SDMMC_DATA_TIMEOUT_SD;
1210 time_loop = jiffies + msecs_to_jiffies(timeout);
1212 status = mci_readl(host, STATUS);
1213 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1215 } while (time_before(jiffies, time_loop));
1220 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1223 * 0--status is busy.
1224 * 1--status is unbusy.
1226 int dw_mci_card_busy(struct mmc_host *mmc)
1228 struct dw_mci_slot *slot = mmc_priv(mmc);
1229 struct dw_mci *host = slot->host;
1231 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1232 host->svi_flags, mmc_hostname(host->mmc));
1235 if(host->svi_flags == 0){
1237 host->svi_flags = 1;
1238 return host->svi_flags;
1241 host->svi_flags = 0;
1242 return host->svi_flags;
1248 static void __dw_mci_start_request(struct dw_mci *host,
1249 struct dw_mci_slot *slot,
1250 struct mmc_command *cmd)
1252 struct mmc_request *mrq;
1253 struct mmc_data *data;
1257 if (host->pdata->select_slot)
1258 host->pdata->select_slot(slot->id);
1260 host->cur_slot = slot;
1263 dw_mci_wait_unbusy(host);
1265 host->pending_events = 0;
1266 host->completed_events = 0;
1267 host->data_status = 0;
1271 dw_mci_set_timeout(host);
1272 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1273 mci_writel(host, BLKSIZ, data->blksz);
1276 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1278 /* this is the first command, send the initialization clock */
1279 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1280 cmdflags |= SDMMC_CMD_INIT;
1283 dw_mci_submit_data(host, data);
1287 dw_mci_start_command(host, cmd, cmdflags);
1290 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1293 static void dw_mci_start_request(struct dw_mci *host,
1294 struct dw_mci_slot *slot)
1296 struct mmc_request *mrq = slot->mrq;
1297 struct mmc_command *cmd;
1299 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1300 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1302 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1303 __dw_mci_start_request(host, slot, cmd);
1306 /* must be called with host->lock held */
1307 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1308 struct mmc_request *mrq)
1310 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1315 if (host->state == STATE_IDLE) {
1316 host->state = STATE_SENDING_CMD;
1317 dw_mci_start_request(host, slot);
1319 list_add_tail(&slot->queue_node, &host->queue);
1323 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1325 struct dw_mci_slot *slot = mmc_priv(mmc);
1326 struct dw_mci *host = slot->host;
1331 * The check for card presence and queueing of the request must be
1332 * atomic, otherwise the card could be removed in between and the
1333 * request wouldn't fail until another card was inserted.
1335 spin_lock_bh(&host->lock);
1337 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1338 spin_unlock_bh(&host->lock);
1339 mrq->cmd->error = -ENOMEDIUM;
1340 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1341 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1343 mmc_request_done(mmc, mrq);
1347 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1348 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1350 dw_mci_queue_request(host, slot, mrq);
1352 spin_unlock_bh(&host->lock);
1355 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1357 struct dw_mci_slot *slot = mmc_priv(mmc);
1358 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1359 struct dw_mci *host = slot->host;
1361 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1364 #ifdef SDMMC_WAIT_FOR_UNBUSY
1365 unsigned long time_loop;
1368 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1369 if(host->svi_flags == 1)
1370 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1372 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1374 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1377 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1378 printk("%d..%s: no card. [%s]\n", \
1379 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1384 ret = time_before(jiffies, time_loop);
1385 regs = mci_readl(slot->host, STATUS);
1386 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1392 printk("slot->flags = %lu ", slot->flags);
1393 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1394 if(host->svi_flags != 1)
1397 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1398 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1402 switch (ios->bus_width) {
1403 case MMC_BUS_WIDTH_4:
1404 slot->ctype = SDMMC_CTYPE_4BIT;
1406 case MMC_BUS_WIDTH_8:
1407 slot->ctype = SDMMC_CTYPE_8BIT;
1410 /* set default 1 bit mode */
1411 slot->ctype = SDMMC_CTYPE_1BIT;
1412 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1415 regs = mci_readl(slot->host, UHS_REG);
1418 if (ios->timing == MMC_TIMING_UHS_DDR50)
1419 regs |= ((0x1 << slot->id) << 16);
1421 regs &= ~((0x1 << slot->id) << 16);
1423 mci_writel(slot->host, UHS_REG, regs);
1424 slot->host->timing = ios->timing;
1427 * Use mirror of ios->clock to prevent race with mmc
1428 * core ios update when finding the minimum.
1430 slot->clock = ios->clock;
1432 if (drv_data && drv_data->set_ios)
1433 drv_data->set_ios(slot->host, ios);
1435 /* Slot specific timing and width adjustment */
1436 dw_mci_setup_bus(slot, false);
1440 switch (ios->power_mode) {
1442 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1444 if (slot->host->pdata->setpower)
1445 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1446 regs = mci_readl(slot->host, PWREN);
1447 regs |= (1 << slot->id);
1448 mci_writel(slot->host, PWREN, regs);
1451 /* Power down slot */
1452 if(slot->host->pdata->setpower)
1453 slot->host->pdata->setpower(slot->id, 0);
1454 regs = mci_readl(slot->host, PWREN);
1455 regs &= ~(1 << slot->id);
1456 mci_writel(slot->host, PWREN, regs);
1463 static int dw_mci_get_ro(struct mmc_host *mmc)
1466 struct dw_mci_slot *slot = mmc_priv(mmc);
1467 struct dw_mci_board *brd = slot->host->pdata;
1469 /* Use platform get_ro function, else try on board write protect */
1470 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1472 else if(brd->get_ro)
1473 read_only = brd->get_ro(slot->id);
1474 else if(gpio_is_valid(slot->wp_gpio))
1475 read_only = gpio_get_value(slot->wp_gpio);
1478 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1480 dev_dbg(&mmc->class_dev, "card is %s\n",
1481 read_only ? "read-only" : "read-write");
1486 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1488 struct dw_mci_slot *slot = mmc_priv(mmc);
1489 struct dw_mci *host = slot->host;
1490 /*struct dw_mci_board *brd = slot->host->pdata;*/
1492 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1495 spin_lock_bh(&host->lock);
1498 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1500 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1502 spin_unlock_bh(&host->lock);
1504 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1505 if(__clk_is_enabled(host->hclk_mmc) == false)
1506 clk_prepare_enable(host->hclk_mmc);
1507 if(__clk_is_enabled(host->clk_mmc) == false)
1508 clk_prepare_enable(host->clk_mmc);
1510 if(__clk_is_enabled(host->clk_mmc) == true)
1511 clk_disable_unprepare(slot->host->clk_mmc);
1512 if(__clk_is_enabled(host->hclk_mmc) == true)
1513 clk_disable_unprepare(slot->host->hclk_mmc);
1516 mmc_detect_change(slot->mmc, 20);
1522 static int dw_mci_get_cd(struct mmc_host *mmc)
1525 struct dw_mci_slot *slot = mmc_priv(mmc);
1526 struct dw_mci_board *brd = slot->host->pdata;
1527 struct dw_mci *host = slot->host;
1528 int gpio_cd = mmc_gpio_get_cd(mmc);
1531 if (cpu_is_rk312x() &&
1533 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1534 gpio_cd = slot->cd_gpio;
1535 if (gpio_is_valid(gpio_cd)) {
1536 gpio_val = gpio_get_value_cansleep(gpio_cd);
1538 if (gpio_val == gpio_get_value_cansleep(gpio_cd)) {
1539 gpio_cd = gpio_get_value_cansleep(gpio_cd) == 0 ? 1 : 0;
1541 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1542 grf_writel((0x1 << 24) | (1 << 8), RK312X_GRF_SOC_CON0);
1543 dw_mci_ctrl_all_reset(host);
1545 /* Really card detected: SHOULD disable force_jtag */
1546 grf_writel((0x1 << 24) | (0 << 8), RK312X_GRF_SOC_CON0);
1550 return slot->last_detect_state;
1553 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1557 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1558 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1560 /* Use platform get_cd function, else try onboard card detect */
1561 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1563 else if (brd->get_cd)
1564 present = !brd->get_cd(slot->id);
1565 else if (!IS_ERR_VALUE(gpio_cd))
1568 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1571 spin_lock_bh(&host->lock);
1573 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1574 dev_dbg(&mmc->class_dev, "card is present\n");
1576 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1577 dev_dbg(&mmc->class_dev, "card is not present\n");
1579 spin_unlock_bh(&host->lock);
1586 * Dts Should caps emmc controller with poll-hw-reset
1588 static void dw_mci_hw_reset(struct mmc_host *mmc)
1590 struct dw_mci_slot *slot = mmc_priv(mmc);
1591 struct dw_mci *host = slot->host;
1596 unsigned long timeout;
1599 /* (1) CMD12 to end any transfer in process */
1600 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1601 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1603 if(host->mmc->hold_reg_flag)
1604 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1605 mci_writel(host, CMDARG, 0);
1607 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1609 timeout = jiffies + msecs_to_jiffies(500);
1611 ret = time_before(jiffies, timeout);
1612 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1617 MMC_DBG_ERR_FUNC(host->mmc,
1618 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1619 __func__, mmc_hostname(host->mmc));
1621 /* (2) wait DTO, even if no response is sent back by card */
1623 timeout = jiffies + msecs_to_jiffies(5);
1625 ret = time_before(jiffies, timeout);
1626 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1627 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1633 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1635 /* Software reset - BMOD[0] for IDMA only */
1636 regs = mci_readl(host, BMOD);
1637 regs |= SDMMC_IDMAC_SWRESET;
1638 mci_writel(host, BMOD, regs);
1639 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1640 regs = mci_readl(host, BMOD);
1641 if(regs & SDMMC_IDMAC_SWRESET)
1642 MMC_DBG_WARN_FUNC(host->mmc,
1643 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1644 __func__, mmc_hostname(host->mmc));
1646 /* DMA reset - CTRL[2] */
1647 regs = mci_readl(host, CTRL);
1648 regs |= SDMMC_CTRL_DMA_RESET;
1649 mci_writel(host, CTRL, regs);
1650 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1651 regs = mci_readl(host, CTRL);
1652 if(regs & SDMMC_CTRL_DMA_RESET)
1653 MMC_DBG_WARN_FUNC(host->mmc,
1654 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1655 __func__, mmc_hostname(host->mmc));
1657 /* FIFO reset - CTRL[1] */
1658 regs = mci_readl(host, CTRL);
1659 regs |= SDMMC_CTRL_FIFO_RESET;
1660 mci_writel(host, CTRL, regs);
1661 mdelay(1); /* no timing limited, 1ms is random value */
1662 regs = mci_readl(host, CTRL);
1663 if(regs & SDMMC_CTRL_FIFO_RESET)
1664 MMC_DBG_WARN_FUNC(host->mmc,
1665 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1666 __func__, mmc_hostname(host->mmc));
1669 According to eMMC spec
1670 tRstW >= 1us ; RST_n pulse width
1671 tRSCA >= 200us ; RST_n to Command time
1672 tRSTH >= 1us ; RST_n high period
1674 mci_writel(slot->host, PWREN, 0x0);
1675 mci_writel(slot->host, RST_N, 0x0);
1677 udelay(10); /* 10us for bad quality eMMc. */
1679 mci_writel(slot->host, PWREN, 0x1);
1680 mci_writel(slot->host, RST_N, 0x1);
1682 usleep_range(500, 1000); /* at least 500(> 200us) */
1686 * Disable lower power mode.
1688 * Low power mode will stop the card clock when idle. According to the
1689 * description of the CLKENA register we should disable low power mode
1690 * for SDIO cards if we need SDIO interrupts to work.
1692 * This function is fast if low power mode is already disabled.
1694 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1696 struct dw_mci *host = slot->host;
1698 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1700 clk_en_a = mci_readl(host, CLKENA);
1702 if (clk_en_a & clken_low_pwr) {
1703 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1704 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1705 SDMMC_CMD_PRV_DAT_WAIT, 0);
1709 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1711 struct dw_mci_slot *slot = mmc_priv(mmc);
1712 struct dw_mci *host = slot->host;
1713 unsigned long flags;
1717 spin_lock_irqsave(&host->slock, flags);
1719 /* Enable/disable Slot Specific SDIO interrupt */
1720 int_mask = mci_readl(host, INTMASK);
1722 if (host->verid < DW_MMC_240A)
1723 sdio_int = SDMMC_INT_SDIO(slot->id);
1725 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1729 * Turn off low power mode if it was enabled. This is a bit of
1730 * a heavy operation and we disable / enable IRQs a lot, so
1731 * we'll leave low power mode disabled and it will get
1732 * re-enabled again in dw_mci_setup_bus().
1734 dw_mci_disable_low_power(slot);
1736 mci_writel(host, INTMASK,
1737 (int_mask | sdio_int));
1739 mci_writel(host, INTMASK,
1740 (int_mask & ~sdio_int));
1743 spin_unlock_irqrestore(&host->slock, flags);
1746 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1748 IO_DOMAIN_12 = 1200,
1749 IO_DOMAIN_18 = 1800,
1750 IO_DOMAIN_33 = 3300,
1752 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1762 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1763 __FUNCTION__, mmc_hostname(host->mmc));
1766 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1767 __FUNCTION__, mmc_hostname(host->mmc));
1771 if(cpu_is_rk3288()){
1772 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1773 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1777 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1778 __FUNCTION__, mmc_hostname(host->mmc));
1782 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1783 struct mmc_ios *ios)
1786 unsigned int value,uhs_reg;
1789 * Signal Voltage Switching is only applicable for Host Controllers
1792 if (host->verid < DW_MMC_240A)
1795 uhs_reg = mci_readl(host, UHS_REG);
1796 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1797 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1799 switch (ios->signal_voltage) {
1800 case MMC_SIGNAL_VOLTAGE_330:
1801 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1803 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1804 /* regulator_put(host->vmmc); //to be done in remove function. */
1806 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1807 __func__, regulator_get_voltage(host->vmmc), ret);
1809 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1810 " failed\n", mmc_hostname(host->mmc));
1813 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1815 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1816 __FUNCTION__, mmc_hostname(host->mmc));
1818 /* set High-power mode */
1819 value = mci_readl(host, CLKENA);
1820 value &= ~SDMMC_CLKEN_LOW_PWR;
1821 mci_writel(host,CLKENA , value);
1823 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1824 mci_writel(host,UHS_REG , uhs_reg);
1827 usleep_range(5000, 5500);
1829 /* 3.3V regulator output should be stable within 5 ms */
1830 uhs_reg = mci_readl(host, UHS_REG);
1831 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1834 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1835 mmc_hostname(host->mmc));
1838 case MMC_SIGNAL_VOLTAGE_180:
1840 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1841 /* regulator_put(host->vmmc);//to be done in remove function. */
1843 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1844 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1846 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1847 " failed\n", mmc_hostname(host->mmc));
1850 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1854 * Enable 1.8V Signal Enable in the Host Control2
1857 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1860 usleep_range(5000, 5500);
1861 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1862 __FUNCTION__,mmc_hostname(host->mmc));
1864 /* 1.8V regulator output should be stable within 5 ms */
1865 uhs_reg = mci_readl(host, UHS_REG);
1866 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1869 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1870 mmc_hostname(host->mmc));
1873 case MMC_SIGNAL_VOLTAGE_120:
1875 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1877 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1878 " failed\n", mmc_hostname(host->mmc));
1884 /* No signal voltage switch required */
1890 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1891 struct mmc_ios *ios)
1893 struct dw_mci_slot *slot = mmc_priv(mmc);
1894 struct dw_mci *host = slot->host;
1897 if (host->verid < DW_MMC_240A)
1900 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1906 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1908 struct dw_mci_slot *slot = mmc_priv(mmc);
1909 struct dw_mci *host = slot->host;
1910 const struct dw_mci_drv_data *drv_data = host->drv_data;
1911 struct dw_mci_tuning_data tuning_data;
1914 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1915 if(cpu_is_rk3036() || cpu_is_rk312x())
1918 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1919 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1920 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1921 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1922 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1923 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1924 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1928 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1929 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1930 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1933 "Undefined command(%d) for tuning\n", opcode);
1938 /* Recommend sample phase and delayline
1939 Fixme: Mix-use these three controllers will cause
1942 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1943 tuning_data.con_id = 3;
1944 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1945 tuning_data.con_id = 1;
1947 tuning_data.con_id = 0;
1949 /* 0: driver, from host->devices
1950 1: sample, from devices->host
1952 tuning_data.tuning_type = 1;
1954 if (drv_data && drv_data->execute_tuning)
1955 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1960 static const struct mmc_host_ops dw_mci_ops = {
1961 .request = dw_mci_request,
1962 .pre_req = dw_mci_pre_req,
1963 .post_req = dw_mci_post_req,
1964 .set_ios = dw_mci_set_ios,
1965 .get_ro = dw_mci_get_ro,
1966 .get_cd = dw_mci_get_cd,
1967 .set_sdio_status = dw_mci_set_sdio_status,
1968 .hw_reset = dw_mci_hw_reset,
1969 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1970 .execute_tuning = dw_mci_execute_tuning,
1971 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1972 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1973 .card_busy = dw_mci_card_busy,
1978 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1980 unsigned long flags;
1985 local_irq_save(flags);
1986 if(host->irq_state != irqflag)
1988 host->irq_state = irqflag;
1991 enable_irq(host->irq);
1995 disable_irq(host->irq);
1998 local_irq_restore(flags);
2002 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
2003 __releases(&host->lock)
2004 __acquires(&host->lock)
2006 if(DW_MCI_SEND_STATUS == host->dir_status){
2008 if( MMC_BUS_TEST_W != host->cmd->opcode){
2009 if(host->data_status & SDMMC_INT_DCRC)
2010 host->data->error = -EILSEQ;
2011 else if(host->data_status & SDMMC_INT_EBE)
2012 host->data->error = -ETIMEDOUT;
2014 dw_mci_wait_unbusy(host);
2017 dw_mci_wait_unbusy(host);
2022 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2023 __releases(&host->lock)
2024 __acquires(&host->lock)
2026 struct dw_mci_slot *slot;
2027 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2029 WARN_ON(host->cmd || host->data);
2031 del_timer_sync(&host->dto_timer);
2032 dw_mci_deal_data_end(host, mrq);
2035 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2036 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2038 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2039 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2041 host->cur_slot->mrq = NULL;
2043 if (!list_empty(&host->queue)) {
2044 slot = list_entry(host->queue.next,
2045 struct dw_mci_slot, queue_node);
2046 list_del(&slot->queue_node);
2047 dev_vdbg(host->dev, "list not empty: %s is next\n",
2048 mmc_hostname(slot->mmc));
2049 host->state = STATE_SENDING_CMD;
2050 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2051 dw_mci_start_request(host, slot);
2053 dev_vdbg(host->dev, "list empty\n");
2054 host->state = STATE_IDLE;
2057 spin_unlock(&host->lock);
2058 mmc_request_done(prev_mmc, mrq);
2059 spin_lock(&host->lock);
2062 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2064 u32 status = host->cmd_status;
2066 host->cmd_status = 0;
2068 /* Read the response from the card (up to 16 bytes) */
2069 if (cmd->flags & MMC_RSP_PRESENT) {
2070 if (cmd->flags & MMC_RSP_136) {
2071 cmd->resp[3] = mci_readl(host, RESP0);
2072 cmd->resp[2] = mci_readl(host, RESP1);
2073 cmd->resp[1] = mci_readl(host, RESP2);
2074 cmd->resp[0] = mci_readl(host, RESP3);
2076 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2077 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2079 cmd->resp[0] = mci_readl(host, RESP0);
2083 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2084 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2088 if (status & SDMMC_INT_RTO)
2090 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2093 cmd->error = -ETIMEDOUT;
2094 del_timer_sync(&host->dto_timer);
2095 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2096 del_timer_sync(&host->dto_timer);
2097 cmd->error = -EILSEQ;
2098 }else if (status & SDMMC_INT_RESP_ERR){
2099 del_timer_sync(&host->dto_timer);
2104 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2105 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2108 del_timer_sync(&host->dto_timer);
2109 if(MMC_SEND_STATUS != cmd->opcode)
2110 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2111 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2112 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2116 /* newer ip versions need a delay between retries */
2117 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2123 static void dw_mci_tasklet_func(unsigned long priv)
2125 struct dw_mci *host = (struct dw_mci *)priv;
2126 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2127 struct mmc_data *data;
2128 struct mmc_command *cmd;
2129 enum dw_mci_state state;
2130 enum dw_mci_state prev_state;
2131 u32 status, cmd_flags;
2132 unsigned long timeout = 0;
2135 spin_lock(&host->lock);
2137 state = host->state;
2147 case STATE_SENDING_CMD:
2148 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2149 &host->pending_events))
2154 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2155 dw_mci_command_complete(host, cmd);
2156 if (cmd == host->mrq->sbc && !cmd->error) {
2157 prev_state = state = STATE_SENDING_CMD;
2158 __dw_mci_start_request(host, host->cur_slot,
2163 if (cmd->data && cmd->error) {
2164 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2165 dw_mci_stop_dma(host);
2168 send_stop_cmd(host, data);
2169 state = STATE_SENDING_STOP;
2175 send_stop_abort(host, data);
2176 state = STATE_SENDING_STOP;
2179 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2182 if (!host->mrq->data || cmd->error) {
2183 dw_mci_request_end(host, host->mrq);
2187 prev_state = state = STATE_SENDING_DATA;
2190 case STATE_SENDING_DATA:
2191 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2192 dw_mci_stop_dma(host);
2195 send_stop_cmd(host, data);
2197 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2198 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2199 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2201 mci_writel(host, CMDARG, 0);
2203 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2204 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2206 if(host->mmc->hold_reg_flag)
2207 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2209 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2211 timeout = jiffies + msecs_to_jiffies(500);
2214 ret = time_before(jiffies, timeout);
2215 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2219 MMC_DBG_ERR_FUNC(host->mmc,
2220 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2221 __func__, mmc_hostname(host->mmc));
2224 send_stop_abort(host, data);
2226 state = STATE_DATA_ERROR;
2230 MMC_DBG_CMD_FUNC(host->mmc,
2231 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2232 prev_state,state, mmc_hostname(host->mmc));
2234 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2235 &host->pending_events))
2237 MMC_DBG_INFO_FUNC(host->mmc,
2238 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2239 prev_state,state,mmc_hostname(host->mmc));
2241 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2242 prev_state = state = STATE_DATA_BUSY;
2245 case STATE_DATA_BUSY:
2246 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2247 &host->pending_events))
2250 dw_mci_deal_data_end(host, host->mrq);
2251 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2252 MMC_DBG_INFO_FUNC(host->mmc,
2253 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2254 prev_state,state,mmc_hostname(host->mmc));
2257 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2258 status = host->data_status;
2260 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2261 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2262 MMC_DBG_ERR_FUNC(host->mmc,
2263 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2264 prev_state,state, status, mmc_hostname(host->mmc));
2266 if (status & SDMMC_INT_DRTO) {
2267 data->error = -ETIMEDOUT;
2268 } else if (status & SDMMC_INT_DCRC) {
2269 data->error = -EILSEQ;
2270 } else if (status & SDMMC_INT_EBE &&
2271 host->dir_status == DW_MCI_SEND_STATUS){
2273 * No data CRC status was returned.
2274 * The number of bytes transferred will
2275 * be exaggerated in PIO mode.
2277 data->bytes_xfered = 0;
2278 data->error = -ETIMEDOUT;
2287 * After an error, there may be data lingering
2288 * in the FIFO, so reset it - doing so
2289 * generates a block interrupt, hence setting
2290 * the scatter-gather pointer to NULL.
2292 dw_mci_fifo_reset(host);
2294 data->bytes_xfered = data->blocks * data->blksz;
2299 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2300 prev_state,state,mmc_hostname(host->mmc));
2301 dw_mci_request_end(host, host->mrq);
2304 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2305 prev_state,state,mmc_hostname(host->mmc));
2307 if (host->mrq->sbc && !data->error) {
2308 data->stop->error = 0;
2310 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2311 prev_state,state,mmc_hostname(host->mmc));
2313 dw_mci_request_end(host, host->mrq);
2317 prev_state = state = STATE_SENDING_STOP;
2319 send_stop_cmd(host, data);
2321 if (data->stop && !data->error) {
2322 /* stop command for open-ended transfer*/
2324 send_stop_abort(host, data);
2328 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2329 prev_state,state,mmc_hostname(host->mmc));
2331 case STATE_SENDING_STOP:
2332 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2335 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2336 prev_state, state, mmc_hostname(host->mmc));
2338 /* CMD error in data command */
2339 if (host->mrq->cmd->error && host->mrq->data) {
2340 dw_mci_fifo_reset(host);
2346 dw_mci_command_complete(host, host->mrq->stop);
2348 if (host->mrq->stop)
2349 dw_mci_command_complete(host, host->mrq->stop);
2351 host->cmd_status = 0;
2354 dw_mci_request_end(host, host->mrq);
2357 case STATE_DATA_ERROR:
2358 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2359 &host->pending_events))
2362 state = STATE_DATA_BUSY;
2365 } while (state != prev_state);
2367 host->state = state;
2369 spin_unlock(&host->lock);
2373 /* push final bytes to part_buf, only use during push */
2374 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2376 memcpy((void *)&host->part_buf, buf, cnt);
2377 host->part_buf_count = cnt;
2380 /* append bytes to part_buf, only use during push */
2381 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2383 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2384 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2385 host->part_buf_count += cnt;
2389 /* pull first bytes from part_buf, only use during pull */
2390 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2392 cnt = min(cnt, (int)host->part_buf_count);
2394 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2396 host->part_buf_count -= cnt;
2397 host->part_buf_start += cnt;
2402 /* pull final bytes from the part_buf, assuming it's just been filled */
2403 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2405 memcpy(buf, &host->part_buf, cnt);
2406 host->part_buf_start = cnt;
2407 host->part_buf_count = (1 << host->data_shift) - cnt;
2410 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2412 struct mmc_data *data = host->data;
2415 /* try and push anything in the part_buf */
2416 if (unlikely(host->part_buf_count)) {
2417 int len = dw_mci_push_part_bytes(host, buf, cnt);
2420 if (host->part_buf_count == 2) {
2421 mci_writew(host, DATA(host->data_offset),
2423 host->part_buf_count = 0;
2426 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2427 if (unlikely((unsigned long)buf & 0x1)) {
2429 u16 aligned_buf[64];
2430 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2431 int items = len >> 1;
2433 /* memcpy from input buffer into aligned buffer */
2434 memcpy(aligned_buf, buf, len);
2437 /* push data from aligned buffer into fifo */
2438 for (i = 0; i < items; ++i)
2439 mci_writew(host, DATA(host->data_offset),
2446 for (; cnt >= 2; cnt -= 2)
2447 mci_writew(host, DATA(host->data_offset), *pdata++);
2450 /* put anything remaining in the part_buf */
2452 dw_mci_set_part_bytes(host, buf, cnt);
2453 /* Push data if we have reached the expected data length */
2454 if ((data->bytes_xfered + init_cnt) ==
2455 (data->blksz * data->blocks))
2456 mci_writew(host, DATA(host->data_offset),
2461 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2463 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2464 if (unlikely((unsigned long)buf & 0x1)) {
2466 /* pull data from fifo into aligned buffer */
2467 u16 aligned_buf[64];
2468 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2469 int items = len >> 1;
2471 for (i = 0; i < items; ++i)
2472 aligned_buf[i] = mci_readw(host,
2473 DATA(host->data_offset));
2474 /* memcpy from aligned buffer into output buffer */
2475 memcpy(buf, aligned_buf, len);
2483 for (; cnt >= 2; cnt -= 2)
2484 *pdata++ = mci_readw(host, DATA(host->data_offset));
2488 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2489 dw_mci_pull_final_bytes(host, buf, cnt);
2493 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2495 struct mmc_data *data = host->data;
2498 /* try and push anything in the part_buf */
2499 if (unlikely(host->part_buf_count)) {
2500 int len = dw_mci_push_part_bytes(host, buf, cnt);
2503 if (host->part_buf_count == 4) {
2504 mci_writel(host, DATA(host->data_offset),
2506 host->part_buf_count = 0;
2509 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2510 if (unlikely((unsigned long)buf & 0x3)) {
2512 u32 aligned_buf[32];
2513 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2514 int items = len >> 2;
2516 /* memcpy from input buffer into aligned buffer */
2517 memcpy(aligned_buf, buf, len);
2520 /* push data from aligned buffer into fifo */
2521 for (i = 0; i < items; ++i)
2522 mci_writel(host, DATA(host->data_offset),
2529 for (; cnt >= 4; cnt -= 4)
2530 mci_writel(host, DATA(host->data_offset), *pdata++);
2533 /* put anything remaining in the part_buf */
2535 dw_mci_set_part_bytes(host, buf, cnt);
2536 /* Push data if we have reached the expected data length */
2537 if ((data->bytes_xfered + init_cnt) ==
2538 (data->blksz * data->blocks))
2539 mci_writel(host, DATA(host->data_offset),
2544 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2546 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2547 if (unlikely((unsigned long)buf & 0x3)) {
2549 /* pull data from fifo into aligned buffer */
2550 u32 aligned_buf[32];
2551 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2552 int items = len >> 2;
2554 for (i = 0; i < items; ++i)
2555 aligned_buf[i] = mci_readl(host,
2556 DATA(host->data_offset));
2557 /* memcpy from aligned buffer into output buffer */
2558 memcpy(buf, aligned_buf, len);
2566 for (; cnt >= 4; cnt -= 4)
2567 *pdata++ = mci_readl(host, DATA(host->data_offset));
2571 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2572 dw_mci_pull_final_bytes(host, buf, cnt);
2576 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2578 struct mmc_data *data = host->data;
2581 /* try and push anything in the part_buf */
2582 if (unlikely(host->part_buf_count)) {
2583 int len = dw_mci_push_part_bytes(host, buf, cnt);
2587 if (host->part_buf_count == 8) {
2588 mci_writeq(host, DATA(host->data_offset),
2590 host->part_buf_count = 0;
2593 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2594 if (unlikely((unsigned long)buf & 0x7)) {
2596 u64 aligned_buf[16];
2597 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2598 int items = len >> 3;
2600 /* memcpy from input buffer into aligned buffer */
2601 memcpy(aligned_buf, buf, len);
2604 /* push data from aligned buffer into fifo */
2605 for (i = 0; i < items; ++i)
2606 mci_writeq(host, DATA(host->data_offset),
2613 for (; cnt >= 8; cnt -= 8)
2614 mci_writeq(host, DATA(host->data_offset), *pdata++);
2617 /* put anything remaining in the part_buf */
2619 dw_mci_set_part_bytes(host, buf, cnt);
2620 /* Push data if we have reached the expected data length */
2621 if ((data->bytes_xfered + init_cnt) ==
2622 (data->blksz * data->blocks))
2623 mci_writeq(host, DATA(host->data_offset),
2628 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2630 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2631 if (unlikely((unsigned long)buf & 0x7)) {
2633 /* pull data from fifo into aligned buffer */
2634 u64 aligned_buf[16];
2635 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2636 int items = len >> 3;
2638 for (i = 0; i < items; ++i)
2639 aligned_buf[i] = mci_readq(host,
2640 DATA(host->data_offset));
2641 /* memcpy from aligned buffer into output buffer */
2642 memcpy(buf, aligned_buf, len);
2650 for (; cnt >= 8; cnt -= 8)
2651 *pdata++ = mci_readq(host, DATA(host->data_offset));
2655 host->part_buf = mci_readq(host, DATA(host->data_offset));
2656 dw_mci_pull_final_bytes(host, buf, cnt);
2660 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2664 /* get remaining partial bytes */
2665 len = dw_mci_pull_part_bytes(host, buf, cnt);
2666 if (unlikely(len == cnt))
2671 /* get the rest of the data */
2672 host->pull_data(host, buf, cnt);
2675 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2677 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2679 unsigned int offset;
2680 struct mmc_data *data = host->data;
2681 int shift = host->data_shift;
2684 unsigned int remain, fcnt;
2686 if(!host->mmc->bus_refs){
2687 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2691 if (!sg_miter_next(sg_miter))
2694 host->sg = sg_miter->piter.sg;
2695 buf = sg_miter->addr;
2696 remain = sg_miter->length;
2700 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2701 << shift) + host->part_buf_count;
2702 len = min(remain, fcnt);
2705 dw_mci_pull_data(host, (void *)(buf + offset), len);
2706 data->bytes_xfered += len;
2711 sg_miter->consumed = offset;
2712 status = mci_readl(host, MINTSTS);
2713 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2714 /* if the RXDR is ready read again */
2715 } while ((status & SDMMC_INT_RXDR) ||
2716 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2719 if (!sg_miter_next(sg_miter))
2721 sg_miter->consumed = 0;
2723 sg_miter_stop(sg_miter);
2727 sg_miter_stop(sg_miter);
2731 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2734 static void dw_mci_write_data_pio(struct dw_mci *host)
2736 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2738 unsigned int offset;
2739 struct mmc_data *data = host->data;
2740 int shift = host->data_shift;
2743 unsigned int fifo_depth = host->fifo_depth;
2744 unsigned int remain, fcnt;
2746 if(!host->mmc->bus_refs){
2747 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2752 if (!sg_miter_next(sg_miter))
2755 host->sg = sg_miter->piter.sg;
2756 buf = sg_miter->addr;
2757 remain = sg_miter->length;
2761 fcnt = ((fifo_depth -
2762 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2763 << shift) - host->part_buf_count;
2764 len = min(remain, fcnt);
2767 host->push_data(host, (void *)(buf + offset), len);
2768 data->bytes_xfered += len;
2773 sg_miter->consumed = offset;
2774 status = mci_readl(host, MINTSTS);
2775 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2776 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2779 if (!sg_miter_next(sg_miter))
2781 sg_miter->consumed = 0;
2783 sg_miter_stop(sg_miter);
2787 sg_miter_stop(sg_miter);
2791 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2794 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2796 u32 multi, unit = SZ_2M;
2798 if (!host->cmd_status)
2799 host->cmd_status = status;
2804 if ((MMC_STOP_TRANSMISSION != host->cmd->opcode)) {
2805 multi = (mci_readl(host, BYTCNT) / unit) +
2806 ((mci_readl(host, BYTCNT) % unit) ? 1 :0 ) +
2807 ((host->cmd->retries > 2) ? 2 : host->cmd->retries);
2808 /* Max limit time: 8s for dto */
2809 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4000 * multi));
2814 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2815 tasklet_schedule(&host->tasklet);
2818 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2820 struct dw_mci *host = dev_id;
2821 u32 pending, sdio_int;
2824 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2827 * DTO fix - version 2.10a and below, and only if internal DMA
2830 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2832 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2833 pending |= SDMMC_INT_DATA_OVER;
2837 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2838 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2839 host->cmd_status = pending;
2841 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2842 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2844 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2847 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2848 /* if there is an error report DATA_ERROR */
2849 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2850 host->data_status = pending;
2852 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2854 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2855 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2856 tasklet_schedule(&host->tasklet);
2859 if (pending & SDMMC_INT_DATA_OVER) {
2860 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2861 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2862 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2863 if (!host->data_status)
2864 host->data_status = pending;
2866 if (host->dir_status == DW_MCI_RECV_STATUS) {
2867 if (host->sg != NULL)
2868 dw_mci_read_data_pio(host, true);
2870 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2871 tasklet_schedule(&host->tasklet);
2874 if (pending & SDMMC_INT_RXDR) {
2875 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2876 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2877 dw_mci_read_data_pio(host, false);
2880 if (pending & SDMMC_INT_TXDR) {
2881 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2882 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2883 dw_mci_write_data_pio(host);
2886 if (pending & SDMMC_INT_VSI) {
2887 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2888 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2889 dw_mci_cmd_interrupt(host, pending);
2892 if (pending & SDMMC_INT_CMD_DONE) {
2893 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2894 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2895 dw_mci_cmd_interrupt(host, pending);
2898 if (pending & SDMMC_INT_CD) {
2899 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2900 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2901 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2902 queue_work(host->card_workqueue, &host->card_work);
2905 if (pending & SDMMC_INT_HLE) {
2906 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2907 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2911 /* Handle SDIO Interrupts */
2912 for (i = 0; i < host->num_slots; i++) {
2913 struct dw_mci_slot *slot = host->slot[i];
2915 if (host->verid < DW_MMC_240A)
2916 sdio_int = SDMMC_INT_SDIO(i);
2918 sdio_int = SDMMC_INT_SDIO(i + 8);
2920 if (pending & sdio_int) {
2921 mci_writel(host, RINTSTS, sdio_int);
2922 mmc_signal_sdio_irq(slot->mmc);
2928 #ifdef CONFIG_MMC_DW_IDMAC
2929 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2930 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2931 /* Handle DMA interrupts */
2932 pending = mci_readl(host, IDSTS);
2933 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2934 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2935 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2936 host->dma_ops->complete((void *)host);
2944 static void dw_mci_work_routine_card(struct work_struct *work)
2946 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2949 for (i = 0; i < host->num_slots; i++) {
2950 struct dw_mci_slot *slot = host->slot[i];
2951 struct mmc_host *mmc = slot->mmc;
2952 struct mmc_request *mrq;
2955 present = dw_mci_get_cd(mmc);
2956 while (present != slot->last_detect_state) {
2957 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2958 present ? "inserted" : "removed");
2959 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2960 present ? "inserted" : "removed.", mmc_hostname(mmc));
2962 rk_send_wakeup_key();//wake up system
2963 spin_lock_bh(&host->lock);
2965 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2966 /* Card change detected */
2967 slot->last_detect_state = present;
2969 /* Clean up queue if present */
2972 if (mrq == host->mrq) {
2976 switch (host->state) {
2979 case STATE_SENDING_CMD:
2980 mrq->cmd->error = -ENOMEDIUM;
2984 case STATE_SENDING_DATA:
2985 mrq->data->error = -ENOMEDIUM;
2986 dw_mci_stop_dma(host);
2988 case STATE_DATA_BUSY:
2989 case STATE_DATA_ERROR:
2990 if (mrq->data->error == -EINPROGRESS)
2991 mrq->data->error = -ENOMEDIUM;
2995 case STATE_SENDING_STOP:
2996 mrq->stop->error = -ENOMEDIUM;
3000 dw_mci_request_end(host, mrq);
3002 list_del(&slot->queue_node);
3003 mrq->cmd->error = -ENOMEDIUM;
3005 mrq->data->error = -ENOMEDIUM;
3007 mrq->stop->error = -ENOMEDIUM;
3009 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
3010 mrq->cmd->opcode, mmc_hostname(mmc));
3012 spin_unlock(&host->lock);
3013 mmc_request_done(slot->mmc, mrq);
3014 spin_lock(&host->lock);
3018 /* Power down slot */
3020 /* Clear down the FIFO */
3021 dw_mci_fifo_reset(host);
3022 #ifdef CONFIG_MMC_DW_IDMAC
3023 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3024 dw_mci_idmac_reset(host);
3029 spin_unlock_bh(&host->lock);
3031 present = dw_mci_get_cd(mmc);
3034 mmc_detect_change(slot->mmc,
3035 msecs_to_jiffies(host->pdata->detect_delay_ms));
3040 /* given a slot id, find out the device node representing that slot */
3041 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3043 struct device_node *np;
3047 if (!dev || !dev->of_node)
3050 for_each_child_of_node(dev->of_node, np) {
3051 addr = of_get_property(np, "reg", &len);
3052 if (!addr || (len < sizeof(int)))
3054 if (be32_to_cpup(addr) == slot)
3060 static struct dw_mci_of_slot_quirks {
3063 } of_slot_quirks[] = {
3065 .quirk = "disable-wp",
3066 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3070 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3072 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3077 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3078 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3079 quirks |= of_slot_quirks[idx].id;
3084 /* find out bus-width for a given slot */
3085 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3087 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3093 if (of_property_read_u32(np, "bus-width", &bus_wd))
3094 dev_err(dev, "bus-width property not found, assuming width"
3100 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3101 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3103 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3109 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3111 /* Having a missing entry is valid; return silently */
3112 if (!gpio_is_valid(gpio))
3115 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3116 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3120 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3126 /* find the write protect gpio for a given slot; or -1 if none specified */
3127 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3129 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3135 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3137 /* Having a missing entry is valid; return silently */
3138 if (!gpio_is_valid(gpio))
3141 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3142 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3149 /* find the cd gpio for a given slot */
3150 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3151 struct mmc_host *mmc)
3153 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3159 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3161 /* Having a missing entry is valid; return silently */
3162 if (!gpio_is_valid(gpio))
3165 if (mmc_gpio_request_cd(mmc, gpio, 0))
3166 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3169 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3171 struct mmc_host *mmc = dev_id;
3172 struct dw_mci_slot *slot = mmc_priv(mmc);
3173 struct dw_mci *host = slot->host;
3176 if (mmc->ops->card_event)
3177 mmc->ops->card_event(mmc);
3179 mmc_detect_change(mmc, msecs_to_jiffies(200));
3182 /* wakeup system whether gpio debounce or not */
3183 rk_send_wakeup_key();
3184 queue_work(host->card_workqueue, &host->card_work);
3188 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3189 struct mmc_host *mmc)
3191 struct dw_mci_slot *slot = mmc_priv(mmc);
3192 struct dw_mci *host = slot->host;
3196 /* Having a missing entry is valid; return silently */
3197 if (!gpio_is_valid(gpio))
3200 irq = gpio_to_irq(gpio);
3202 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3203 NULL, dw_mci_gpio_cd_irqt,
3204 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
3208 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3210 /* enable wakeup event for gpio-cd in idle or deep suspend*/
3211 enable_irq_wake(irq);
3214 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3218 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3219 struct mmc_host *mmc)
3221 if (!gpio_is_valid(gpio))
3224 if (gpio_to_irq(gpio) >= 0) {
3225 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3226 devm_gpio_free(&mmc->class_dev, gpio);
3229 #else /* CONFIG_OF */
3230 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3234 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3238 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3242 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3246 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3247 struct mmc_host *mmc)
3251 #endif /* CONFIG_OF */
3253 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3255 struct mmc_host *mmc;
3256 struct dw_mci_slot *slot;
3257 const struct dw_mci_drv_data *drv_data = host->drv_data;
3262 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3266 slot = mmc_priv(mmc);
3270 host->slot[id] = slot;
3273 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3275 mmc->ops = &dw_mci_ops;
3277 if (of_property_read_u32_array(host->dev->of_node,
3278 "clock-freq-min-max", freq, 2)) {
3279 mmc->f_min = DW_MCI_FREQ_MIN;
3280 mmc->f_max = DW_MCI_FREQ_MAX;
3282 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3283 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3285 mmc->f_min = freq[0];
3286 mmc->f_max = freq[1];
3288 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3289 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3292 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3294 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3295 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3296 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3297 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3298 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3299 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3301 if (of_find_property(host->dev->of_node, "supports-tSD", NULL))
3302 mmc->restrict_caps |= RESTRICT_CARD_TYPE_TSD;
3305 /* We assume only low-level chip use gpio_cd */
3306 if (cpu_is_rk312x() &&
3308 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3309 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3310 if (gpio_is_valid(slot->cd_gpio)) {
3311 /* Request gpio int for card detection */
3312 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3314 slot->cd_gpio = -ENODEV;
3315 dev_err(host->dev, "failed to get your cd-gpios!\n");
3319 if (host->pdata->get_ocr)
3320 mmc->ocr_avail = host->pdata->get_ocr(id);
3323 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3324 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3325 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3326 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3330 * Start with slot power disabled, it will be enabled when a card
3333 if (host->pdata->setpower)
3334 host->pdata->setpower(id, 0);
3336 if (host->pdata->caps)
3337 mmc->caps = host->pdata->caps;
3339 if (host->pdata->pm_caps)
3340 mmc->pm_caps = host->pdata->pm_caps;
3342 if (host->dev->of_node) {
3343 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3347 ctrl_id = to_platform_device(host->dev)->id;
3349 if (drv_data && drv_data->caps)
3350 mmc->caps |= drv_data->caps[ctrl_id];
3351 if (drv_data && drv_data->hold_reg_flag)
3352 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3354 /* set the compatibility of driver. */
3355 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3356 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3358 if (host->pdata->caps2)
3359 mmc->caps2 = host->pdata->caps2;
3361 if (host->pdata->get_bus_wd)
3362 bus_width = host->pdata->get_bus_wd(slot->id);
3363 else if (host->dev->of_node)
3364 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3368 switch (bus_width) {
3370 mmc->caps |= MMC_CAP_8_BIT_DATA;
3372 mmc->caps |= MMC_CAP_4_BIT_DATA;
3375 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3376 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3377 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3378 mmc->caps |= MMC_CAP_SDIO_IRQ;
3379 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3380 mmc->caps |= MMC_CAP_HW_RESET;
3381 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3382 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3383 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3384 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3385 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3386 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3387 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3388 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3390 /*Assign pm_caps pass to pm_flags*/
3391 mmc->pm_flags = mmc->pm_caps;
3393 if (host->pdata->blk_settings) {
3394 mmc->max_segs = host->pdata->blk_settings->max_segs;
3395 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3396 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3397 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3398 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3400 /* Useful defaults if platform data is unset. */
3401 #ifdef CONFIG_MMC_DW_IDMAC
3402 mmc->max_segs = host->ring_size;
3403 mmc->max_blk_size = 65536;
3404 mmc->max_blk_count = host->ring_size;
3405 mmc->max_seg_size = 0x1000;
3406 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3407 if(cpu_is_rk3036() || cpu_is_rk312x()){
3408 /* fixup for external dmac setting */
3410 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3411 mmc->max_blk_count = 65535;
3412 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3413 mmc->max_seg_size = mmc->max_req_size;
3417 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3418 mmc->max_blk_count = 512;
3419 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3420 mmc->max_seg_size = mmc->max_req_size;
3421 #endif /* CONFIG_MMC_DW_IDMAC */
3425 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3427 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3432 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3433 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3437 if (IS_ERR(host->vmmc)) {
3438 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3441 ret = regulator_enable(host->vmmc);
3444 "failed to enable regulator: %d\n", ret);
3451 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3453 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3454 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3456 ret = mmc_add_host(mmc);
3460 /* Pinctrl set default iomux state to fucntion port.
3461 * Fixme: DON'T TOUCH EMMC SETTING!
3463 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3465 host->pinctrl = devm_pinctrl_get(host->dev);
3466 if(IS_ERR(host->pinctrl)){
3467 printk("%s: Warning : No pinctrl used!\n",mmc_hostname(host->mmc));
3469 host->pins_idle= pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_IDLE);
3470 if(IS_ERR(host->pins_default)){
3471 printk("%s: Warning : No IDLE pinctrl matched!\n", mmc_hostname(host->mmc));
3475 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3476 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
3479 host->pins_default = pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_DEFAULT);
3480 if(IS_ERR(host->pins_default)){
3481 printk("%s: Warning : No default pinctrl matched!\n", mmc_hostname(host->mmc));
3485 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3486 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
3492 #if defined(CONFIG_DEBUG_FS)
3493 dw_mci_init_debugfs(slot);
3496 /* Card initially undetected */
3497 slot->last_detect_state = 1;
3502 if (gpio_is_valid(slot->cd_gpio))
3503 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3508 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3510 /* Shutdown detect IRQ */
3511 if (slot->host->pdata->exit)
3512 slot->host->pdata->exit(id);
3514 /* Debugfs stuff is cleaned up by mmc core */
3515 mmc_remove_host(slot->mmc);
3516 slot->host->slot[id] = NULL;
3517 mmc_free_host(slot->mmc);
3520 static void dw_mci_init_dma(struct dw_mci *host)
3522 /* Alloc memory for sg translation */
3523 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3524 &host->sg_dma, GFP_KERNEL);
3525 if (!host->sg_cpu) {
3526 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3531 /* Determine which DMA interface to use */
3532 #if defined(CONFIG_MMC_DW_IDMAC)
3533 if(cpu_is_rk3036() || cpu_is_rk312x()){
3534 host->dma_ops = &dw_mci_edmac_ops;
3535 dev_info(host->dev, "Using external DMA controller.\n");
3537 host->dma_ops = &dw_mci_idmac_ops;
3538 dev_info(host->dev, "Using internal DMA controller.\n");
3545 if (host->dma_ops->init && host->dma_ops->start &&
3546 host->dma_ops->stop && host->dma_ops->cleanup) {
3547 if (host->dma_ops->init(host)) {
3548 dev_err(host->dev, "%s: Unable to initialize "
3549 "DMA Controller.\n", __func__);
3553 dev_err(host->dev, "DMA initialization not found.\n");
3561 dev_info(host->dev, "Using PIO mode.\n");
3566 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3568 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3571 ctrl = mci_readl(host, CTRL);
3573 mci_writel(host, CTRL, ctrl);
3575 /* wait till resets clear */
3577 ctrl = mci_readl(host, CTRL);
3578 if (!(ctrl & reset))
3580 } while (time_before(jiffies, timeout));
3583 "Timeout resetting block (ctrl reset %#x)\n",
3589 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3592 * Reseting generates a block interrupt, hence setting
3593 * the scatter-gather pointer to NULL.
3596 sg_miter_stop(&host->sg_miter);
3600 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3603 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3605 return dw_mci_ctrl_reset(host,
3606 SDMMC_CTRL_FIFO_RESET |
3608 SDMMC_CTRL_DMA_RESET);
3613 static struct dw_mci_of_quirks {
3618 .quirk = "broken-cd",
3619 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3623 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3625 struct dw_mci_board *pdata;
3626 struct device *dev = host->dev;
3627 struct device_node *np = dev->of_node;
3628 const struct dw_mci_drv_data *drv_data = host->drv_data;
3630 u32 clock_frequency;
3632 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3634 dev_err(dev, "could not allocate memory for pdata\n");
3635 return ERR_PTR(-ENOMEM);
3638 /* find out number of slots supported */
3639 if (of_property_read_u32(dev->of_node, "num-slots",
3640 &pdata->num_slots)) {
3641 dev_info(dev, "num-slots property not found, "
3642 "assuming 1 slot is available\n");
3643 pdata->num_slots = 1;
3647 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3648 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3649 pdata->quirks |= of_quirks[idx].id;
3652 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3653 dev_info(dev, "fifo-depth property not found, using "
3654 "value of FIFOTH register as default\n");
3656 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3658 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3659 pdata->bus_hz = clock_frequency;
3661 if (drv_data && drv_data->parse_dt) {
3662 ret = drv_data->parse_dt(host);
3664 return ERR_PTR(ret);
3667 if (of_find_property(np, "keep-power-in-suspend", NULL))
3668 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3670 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3671 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3673 if (of_find_property(np, "supports-highspeed", NULL))
3674 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3676 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3677 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3679 if (of_find_property(np, "supports-DDR_MODE", NULL))
3680 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3682 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3683 pdata->caps2 |= MMC_CAP2_HS200;
3685 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3686 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3688 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3689 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3691 if (of_get_property(np, "cd-inverted", NULL))
3692 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3693 if (of_get_property(np, "bootpart-no-access", NULL))
3694 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3699 #else /* CONFIG_OF */
3700 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3702 return ERR_PTR(-EINVAL);
3704 #endif /* CONFIG_OF */
3706 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3711 switch(host->state){
3714 case STATE_SENDING_DATA:
3715 case STATE_DATA_BUSY:
3716 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3717 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3718 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3719 host->state = STATE_DATA_BUSY;
3720 if (!dw_mci_ctrl_all_reset(host)) {
3725 /* NO requirement to reclaim slave chn using external dmac */
3726 #ifdef CONFIG_MMC_DW_IDMAC
3727 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3728 if (host->use_dma && host->dma_ops->init)
3729 host->dma_ops->init(host);
3733 * Restore the initial value at FIFOTH register
3734 * And Invalidate the prev_blksz with zero
3736 mci_writel(host, FIFOTH, host->fifoth_val);
3737 host->prev_blksz = 0;
3738 mci_writel(host, TMOUT, 0xFFFFFFFF);
3739 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3740 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3741 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3742 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3743 regs |= SDMMC_INT_CD;
3745 if ((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)) {
3746 if (host->verid < DW_MMC_240A)
3747 sdio_int = SDMMC_INT_SDIO(0);
3749 sdio_int = SDMMC_INT_SDIO(8);
3751 if (mci_readl(host, INTMASK) & sdio_int)
3755 mci_writel(host, INTMASK, regs);
3756 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3757 for (i = 0; i < host->num_slots; i++) {
3758 struct dw_mci_slot *slot = host->slot[i];
3761 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3762 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3763 dw_mci_setup_bus(slot, true);
3766 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3767 tasklet_schedule(&host->tasklet);
3773 static void dw_mci_dto_timeout(unsigned long host_data)
3775 struct dw_mci *host = (struct dw_mci *) host_data;
3777 disable_irq(host->irq);
3779 dev_err(host->dev, "data_over interrupt timeout!\n");
3780 host->data_status = SDMMC_INT_EBE;
3781 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3782 dw_mci_dealwith_timeout(host);
3784 enable_irq(host->irq);
3788 void resume_rescan_enable(struct work_struct *work)
3790 struct dw_mci *host =
3791 container_of(work, struct dw_mci, resume_rescan.work);
3792 host->mmc->rescan_disable = 0;
3793 mmc_detect_change(host->mmc, 10);
3797 int dw_mci_probe(struct dw_mci *host)
3799 const struct dw_mci_drv_data *drv_data = host->drv_data;
3800 int width, i, ret = 0;
3806 host->pdata = dw_mci_parse_dt(host);
3807 if (IS_ERR(host->pdata)) {
3808 dev_err(host->dev, "platform data not available\n");
3813 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3815 "Platform data must supply select_slot function\n");
3820 * In 2.40a spec, Data offset is changed.
3821 * Need to check the version-id and set data-offset for DATA register.
3823 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3824 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3826 if (host->verid < DW_MMC_240A)
3827 host->data_offset = DATA_OFFSET;
3829 host->data_offset = DATA_240A_OFFSET;
3832 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3833 if (IS_ERR(host->hclk_mmc)) {
3834 dev_err(host->dev, "failed to get hclk_mmc\n");
3835 ret = PTR_ERR(host->hclk_mmc);
3839 clk_prepare_enable(host->hclk_mmc);
3842 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3843 if (IS_ERR(host->clk_mmc)) {
3844 dev_err(host->dev, "failed to get clk mmc_per\n");
3845 ret = PTR_ERR(host->clk_mmc);
3849 host->bus_hz = host->pdata->bus_hz;
3850 if (!host->bus_hz) {
3851 dev_err(host->dev,"Platform data must supply bus speed\n");
3856 if (host->verid < DW_MMC_240A)
3857 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3859 //rockchip: fix divider 2 in clksum before controlller
3860 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3863 dev_err(host->dev, "failed to set clk mmc\n");
3866 clk_prepare_enable(host->clk_mmc);
3868 if (drv_data && drv_data->setup_clock) {
3869 ret = drv_data->setup_clock(host);
3872 "implementation specific clock setup failed\n");
3877 host->quirks = host->pdata->quirks;
3878 host->irq_state = true;
3879 host->set_speed = 0;
3881 host->svi_flags = 0;
3883 spin_lock_init(&host->lock);
3884 spin_lock_init(&host->slock);
3886 INIT_LIST_HEAD(&host->queue);
3887 INIT_DELAYED_WORK(&host->resume_rescan, resume_rescan_enable);
3889 * Get the host data width - this assumes that HCON has been set with
3890 * the correct values.
3892 i = (mci_readl(host, HCON) >> 7) & 0x7;
3894 host->push_data = dw_mci_push_data16;
3895 host->pull_data = dw_mci_pull_data16;
3897 host->data_shift = 1;
3898 } else if (i == 2) {
3899 host->push_data = dw_mci_push_data64;
3900 host->pull_data = dw_mci_pull_data64;
3902 host->data_shift = 3;
3904 /* Check for a reserved value, and warn if it is */
3906 "HCON reports a reserved host data width!\n"
3907 "Defaulting to 32-bit access.\n");
3908 host->push_data = dw_mci_push_data32;
3909 host->pull_data = dw_mci_pull_data32;
3911 host->data_shift = 2;
3914 /* Reset all blocks */
3915 if (!dw_mci_ctrl_all_reset(host))
3918 host->dma_ops = host->pdata->dma_ops;
3919 dw_mci_init_dma(host);
3921 /* Clear the interrupts for the host controller */
3922 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3923 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3925 /* Put in max timeout */
3926 mci_writel(host, TMOUT, 0xFFFFFFFF);
3929 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3930 * Tx Mark = fifo_size / 2 DMA Size = 8
3932 if (!host->pdata->fifo_depth) {
3934 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3935 * have been overwritten by the bootloader, just like we're
3936 * about to do, so if you know the value for your hardware, you
3937 * should put it in the platform data.
3939 fifo_size = mci_readl(host, FIFOTH);
3940 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3942 fifo_size = host->pdata->fifo_depth;
3944 host->fifo_depth = fifo_size;
3946 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3947 mci_writel(host, FIFOTH, host->fifoth_val);
3949 /* disable clock to CIU */
3950 mci_writel(host, CLKENA, 0);
3951 mci_writel(host, CLKSRC, 0);
3953 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3954 host->card_workqueue = alloc_workqueue("dw-mci-card",
3955 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3956 if (!host->card_workqueue) {
3960 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3961 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3962 host->irq_flags, "dw-mci", host);
3966 if (host->pdata->num_slots)
3967 host->num_slots = host->pdata->num_slots;
3969 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
3971 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
3972 /* We need at least one slot to succeed */
3973 for (i = 0; i < host->num_slots; i++) {
3974 ret = dw_mci_init_slot(host, i);
3976 dev_dbg(host->dev, "slot %d init failed\n", i);
3982 * Enable interrupts for command done, data over, data empty, card det,
3983 * receive ready and error such as transmit, receive timeout, crc error
3985 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3986 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
3987 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
3988 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3989 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3990 regs |= SDMMC_INT_CD;
3992 mci_writel(host, INTMASK, regs);
3994 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
3996 dev_info(host->dev, "DW MMC controller at irq %d, "
3997 "%d bit host data width, "
3999 host->irq, width, fifo_size);
4002 dev_info(host->dev, "%d slots initialized\n", init_slots);
4004 dev_dbg(host->dev, "attempted to initialize %d slots, "
4005 "but failed on all\n", host->num_slots);
4010 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4011 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
4016 destroy_workqueue(host->card_workqueue);
4019 if (host->use_dma && host->dma_ops->exit)
4020 host->dma_ops->exit(host);
4023 regulator_disable(host->vmmc);
4024 regulator_put(host->vmmc);
4028 if (!IS_ERR(host->clk_mmc))
4029 clk_disable_unprepare(host->clk_mmc);
4031 if (!IS_ERR(host->hclk_mmc))
4032 clk_disable_unprepare(host->hclk_mmc);
4036 EXPORT_SYMBOL(dw_mci_probe);
4038 void dw_mci_remove(struct dw_mci *host)
4040 struct mmc_host *mmc = host->mmc;
4041 struct dw_mci_slot *slot = mmc_priv(mmc);
4044 del_timer_sync(&host->dto_timer);
4045 cancel_delayed_work(&host->resume_rescan);
4047 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4048 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4050 for(i = 0; i < host->num_slots; i++){
4051 dev_dbg(host->dev, "remove slot %d\n", i);
4053 dw_mci_cleanup_slot(host->slot[i], i);
4056 /* disable clock to CIU */
4057 mci_writel(host, CLKENA, 0);
4058 mci_writel(host, CLKSRC, 0);
4060 destroy_workqueue(host->card_workqueue);
4062 if(host->use_dma && host->dma_ops->exit)
4063 host->dma_ops->exit(host);
4065 if (gpio_is_valid(slot->cd_gpio))
4066 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4069 regulator_disable(host->vmmc);
4070 regulator_put(host->vmmc);
4072 if(!IS_ERR(host->clk_mmc))
4073 clk_disable_unprepare(host->clk_mmc);
4075 if(!IS_ERR(host->hclk_mmc))
4076 clk_disable_unprepare(host->hclk_mmc);
4078 EXPORT_SYMBOL(dw_mci_remove);
4082 #ifdef CONFIG_PM_SLEEP
4084 * TODO: we should probably disable the clock to the card in the suspend path.
4086 int dw_mci_suspend(struct dw_mci *host)
4089 regulator_disable(host->vmmc);
4091 /*only for sdmmc controller*/
4092 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4093 host->mmc->rescan_disable = 1;
4094 if (cancel_delayed_work_sync(&host->mmc->detect))
4095 wake_unlock(&host->mmc->detect_wake_lock);
4097 disable_irq(host->irq);
4098 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4099 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4100 mmc_hostname(host->mmc));
4102 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4103 mci_writel(host, INTMASK, 0x00);
4104 mci_writel(host, CTRL, 0x00);
4106 /* Soc rk3126 already in gpio_cd mode */
4107 if (!(cpu_is_rk312x() && soc_is_rk3126())) {
4108 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4109 enable_irq_wake(host->mmc->slot.cd_irq);
4114 EXPORT_SYMBOL(dw_mci_suspend);
4116 int dw_mci_resume(struct dw_mci *host)
4118 int i, ret, retry_cnt = 0;
4120 struct dw_mci_slot *slot;
4122 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO) {
4123 slot = mmc_priv(host->mmc);
4124 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4128 /*only for sdmmc controller*/
4129 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4130 /* Soc rk3126 already in gpio_cd mode */
4131 if (!(cpu_is_rk312x() && soc_is_rk3126())) {
4132 disable_irq_wake(host->mmc->slot.cd_irq);
4133 mmc_gpio_free_cd(host->mmc);
4135 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4136 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4137 mmc_hostname(host->mmc));
4138 host->mmc->rescan_disable = 1;
4141 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4142 else if(cpu_is_rk3036())
4143 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4144 else if(cpu_is_rk312x())
4145 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4146 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4149 ret = regulator_enable(host->vmmc);
4152 "failed to enable regulator: %d\n", ret);
4157 if(!dw_mci_ctrl_all_reset(host)){
4162 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
4163 if(host->use_dma && host->dma_ops->init)
4164 host->dma_ops->init(host);
4167 * Restore the initial value at FIFOTH register
4168 * And Invalidate the prev_blksz with zero
4170 mci_writel(host, FIFOTH, host->fifoth_val);
4171 host->prev_blksz = 0;
4172 /* Put in max timeout */
4173 mci_writel(host, TMOUT, 0xFFFFFFFF);
4175 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4176 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4178 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4179 regs |= SDMMC_INT_CD;
4180 mci_writel(host, INTMASK, regs);
4181 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4182 /*only for sdmmc controller*/
4183 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4184 enable_irq(host->irq);
4187 for(i = 0; i < host->num_slots; i++){
4188 struct dw_mci_slot *slot = host->slot[i];
4191 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4192 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4193 dw_mci_setup_bus(slot, true);
4196 schedule_delayed_work(&host->resume_rescan, msecs_to_jiffies(2000));
4199 EXPORT_SYMBOL(dw_mci_resume);
4200 #endif /* CONFIG_PM_SLEEP */
4202 static int __init dw_mci_init(void)
4204 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4208 static void __exit dw_mci_exit(void)
4212 module_init(dw_mci_init);
4213 module_exit(dw_mci_exit);
4215 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4216 MODULE_AUTHOR("NXP Semiconductor VietNam");
4217 MODULE_AUTHOR("Imagination Technologies Ltd");
4218 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4219 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
4220 MODULE_LICENSE("GPL v2");