2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/mmc.h>
35 #include <linux/mmc/sd.h>
36 #include <linux/mmc/card.h>
37 #include <linux/mmc/sdio.h>
38 #include <linux/mmc/rk_mmc.h>
39 #include <linux/bitops.h>
40 #include <linux/regulator/consumer.h>
41 #include <linux/workqueue.h>
43 #include <linux/of_gpio.h>
44 #include <linux/mmc/slot-gpio.h>
45 #include <linux/clk-private.h>
46 #include <linux/rockchip/cpu.h>
49 #include "rk_sdmmc_dbg.h"
50 #include <linux/regulator/rockchip_io_vol_domain.h>
51 #include "../../clk/rockchip/clk-ops.h"
53 #define RK_SDMMC_DRIVER_VERSION "Ver 1.13 2014-09-05"
55 /* Common flag combinations */
56 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
57 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
59 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
61 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
62 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
63 #define DW_MCI_SEND_STATUS 1
64 #define DW_MCI_RECV_STATUS 2
65 #define DW_MCI_DMA_THRESHOLD 16
67 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
68 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
70 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
71 #define SDMMC_DATA_TIMEOUT_SD 500
72 #define SDMMC_DATA_TIMEOUT_SDIO 250
73 #define SDMMC_DATA_TIMEOUT_EMMC 2500
75 #define SDMMC_CMD_RTO_MAX_HOLD 200
76 #define SDMMC_WAIT_FOR_UNBUSY 2500
78 #ifdef CONFIG_MMC_DW_IDMAC
79 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
80 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
81 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
85 u32 des0; /* Control Descriptor */
86 #define IDMAC_DES0_DIC BIT(1)
87 #define IDMAC_DES0_LD BIT(2)
88 #define IDMAC_DES0_FD BIT(3)
89 #define IDMAC_DES0_CH BIT(4)
90 #define IDMAC_DES0_ER BIT(5)
91 #define IDMAC_DES0_CES BIT(30)
92 #define IDMAC_DES0_OWN BIT(31)
94 u32 des1; /* Buffer sizes */
95 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
96 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
98 u32 des2; /* buffer 1 physical address */
100 u32 des3; /* buffer 2 physical address */
102 #endif /* CONFIG_MMC_DW_IDMAC */
104 static const u8 tuning_blk_pattern_4bit[] = {
105 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
106 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
107 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
108 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
109 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
110 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
111 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
112 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
115 static const u8 tuning_blk_pattern_8bit[] = {
116 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
117 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
118 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
119 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
120 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
121 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
122 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
123 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
124 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
125 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
126 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
127 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
128 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
129 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
130 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
131 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
134 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
135 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
136 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
137 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
139 /*printk the all register of current host*/
141 static int dw_mci_regs_printk(struct dw_mci *host)
143 struct sdmmc_reg *regs = dw_mci_regs;
145 while( regs->name != 0 ){
146 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
149 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
154 #if defined(CONFIG_DEBUG_FS)
155 static int dw_mci_req_show(struct seq_file *s, void *v)
157 struct dw_mci_slot *slot = s->private;
158 struct mmc_request *mrq;
159 struct mmc_command *cmd;
160 struct mmc_command *stop;
161 struct mmc_data *data;
163 /* Make sure we get a consistent snapshot */
164 spin_lock_bh(&slot->host->lock);
174 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
175 cmd->opcode, cmd->arg, cmd->flags,
176 cmd->resp[0], cmd->resp[1], cmd->resp[2],
177 cmd->resp[2], cmd->error);
179 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
180 data->bytes_xfered, data->blocks,
181 data->blksz, data->flags, data->error);
184 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
185 stop->opcode, stop->arg, stop->flags,
186 stop->resp[0], stop->resp[1], stop->resp[2],
187 stop->resp[2], stop->error);
190 spin_unlock_bh(&slot->host->lock);
195 static int dw_mci_req_open(struct inode *inode, struct file *file)
197 return single_open(file, dw_mci_req_show, inode->i_private);
200 static const struct file_operations dw_mci_req_fops = {
201 .owner = THIS_MODULE,
202 .open = dw_mci_req_open,
205 .release = single_release,
208 static int dw_mci_regs_show(struct seq_file *s, void *v)
210 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
211 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
212 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
213 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
214 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
215 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
220 static int dw_mci_regs_open(struct inode *inode, struct file *file)
222 return single_open(file, dw_mci_regs_show, inode->i_private);
225 static const struct file_operations dw_mci_regs_fops = {
226 .owner = THIS_MODULE,
227 .open = dw_mci_regs_open,
230 .release = single_release,
233 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
235 struct mmc_host *mmc = slot->mmc;
236 struct dw_mci *host = slot->host;
240 root = mmc->debugfs_root;
244 node = debugfs_create_file("regs", S_IRUSR, root, host,
249 node = debugfs_create_file("req", S_IRUSR, root, slot,
254 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
258 node = debugfs_create_x32("pending_events", S_IRUSR, root,
259 (u32 *)&host->pending_events);
263 node = debugfs_create_x32("completed_events", S_IRUSR, root,
264 (u32 *)&host->completed_events);
271 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
273 #endif /* defined(CONFIG_DEBUG_FS) */
275 static void dw_mci_set_timeout(struct dw_mci *host)
277 /* timeout (maximum) */
278 mci_writel(host, TMOUT, 0xffffffff);
281 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
283 struct mmc_data *data;
284 struct dw_mci_slot *slot = mmc_priv(mmc);
285 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
287 cmd->error = -EINPROGRESS;
291 if (cmdr == MMC_STOP_TRANSMISSION)
292 cmdr |= SDMMC_CMD_STOP;
294 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
296 if (cmd->flags & MMC_RSP_PRESENT) {
297 /* We expect a response, so set this bit */
298 cmdr |= SDMMC_CMD_RESP_EXP;
299 if (cmd->flags & MMC_RSP_136)
300 cmdr |= SDMMC_CMD_RESP_LONG;
303 if (cmd->flags & MMC_RSP_CRC)
304 cmdr |= SDMMC_CMD_RESP_CRC;
308 cmdr |= SDMMC_CMD_DAT_EXP;
309 if (data->flags & MMC_DATA_STREAM)
310 cmdr |= SDMMC_CMD_STRM_MODE;
311 if (data->flags & MMC_DATA_WRITE)
312 cmdr |= SDMMC_CMD_DAT_WR;
315 if (drv_data && drv_data->prepare_command)
316 drv_data->prepare_command(slot->host, &cmdr);
322 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
324 struct mmc_command *stop;
330 stop = &host->stop_abort;
332 memset(stop, 0, sizeof(struct mmc_command));
334 if (cmdr == MMC_READ_SINGLE_BLOCK ||
335 cmdr == MMC_READ_MULTIPLE_BLOCK ||
336 cmdr == MMC_WRITE_BLOCK ||
337 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
338 stop->opcode = MMC_STOP_TRANSMISSION;
340 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
341 } else if (cmdr == SD_IO_RW_EXTENDED) {
342 stop->opcode = SD_IO_RW_DIRECT;
343 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
344 ((cmd->arg >> 28) & 0x7);
345 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
350 cmdr = stop->opcode | SDMMC_CMD_STOP |
351 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
356 static void dw_mci_start_command(struct dw_mci *host,
357 struct mmc_command *cmd, u32 cmd_flags)
359 struct dw_mci_slot *slot = host->slot[0];
360 /*temporality fix slot[0] due to host->num_slots equal to 1*/
362 host->pre_cmd = host->cmd;
365 "start command: ARGR=0x%08x CMDR=0x%08x\n",
366 cmd->arg, cmd_flags);
368 if(SD_SWITCH_VOLTAGE == cmd->opcode){
369 /*confirm non-low-power mode*/
370 mci_writel(host, CMDARG, 0);
371 dw_mci_disable_low_power(slot);
373 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
374 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
376 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
379 mci_writel(host, CMDARG, cmd->arg);
382 /* fix the value to 1 in some Soc,for example RK3188. */
383 if(host->mmc->hold_reg_flag)
384 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
386 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
390 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
392 dw_mci_start_command(host, data->stop, host->stop_cmdr);
395 /* DMA interface functions */
396 static void dw_mci_stop_dma(struct dw_mci *host)
398 if (host->using_dma) {
399 host->dma_ops->stop(host);
400 host->dma_ops->cleanup(host);
403 /* Data transfer was stopped by the interrupt handler */
404 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
407 static int dw_mci_get_dma_dir(struct mmc_data *data)
409 if (data->flags & MMC_DATA_WRITE)
410 return DMA_TO_DEVICE;
412 return DMA_FROM_DEVICE;
415 #ifdef CONFIG_MMC_DW_IDMAC
416 static void dw_mci_dma_cleanup(struct dw_mci *host)
418 struct mmc_data *data = host->data;
421 if (!data->host_cookie)
422 dma_unmap_sg(host->dev,
425 dw_mci_get_dma_dir(data));
428 static void dw_mci_idmac_reset(struct dw_mci *host)
430 u32 bmod = mci_readl(host, BMOD);
431 /* Software reset of DMA */
432 bmod |= SDMMC_IDMAC_SWRESET;
433 mci_writel(host, BMOD, bmod);
436 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
440 /* Disable and reset the IDMAC interface */
441 temp = mci_readl(host, CTRL);
442 temp &= ~SDMMC_CTRL_USE_IDMAC;
443 temp |= SDMMC_CTRL_DMA_RESET;
444 mci_writel(host, CTRL, temp);
446 /* Stop the IDMAC running */
447 temp = mci_readl(host, BMOD);
448 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
449 temp |= SDMMC_IDMAC_SWRESET;
450 mci_writel(host, BMOD, temp);
453 static void dw_mci_idmac_complete_dma(void *arg)
455 struct dw_mci *host = arg;
456 struct mmc_data *data = host->data;
458 dev_vdbg(host->dev, "DMA complete\n");
461 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
462 host->mrq->cmd->opcode,host->mrq->cmd->arg,
463 data->blocks,data->blksz,mmc_hostname(host->mmc));
466 host->dma_ops->cleanup(host);
469 * If the card was removed, data will be NULL. No point in trying to
470 * send the stop command or waiting for NBUSY in this case.
473 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
474 tasklet_schedule(&host->tasklet);
478 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
482 struct idmac_desc *desc = host->sg_cpu;
484 for (i = 0; i < sg_len; i++, desc++) {
485 unsigned int length = sg_dma_len(&data->sg[i]);
486 u32 mem_addr = sg_dma_address(&data->sg[i]);
488 /* Set the OWN bit and disable interrupts for this descriptor */
489 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
492 IDMAC_SET_BUFFER1_SIZE(desc, length);
494 /* Physical address to DMA to/from */
495 desc->des2 = mem_addr;
498 /* Set first descriptor */
500 desc->des0 |= IDMAC_DES0_FD;
502 /* Set last descriptor */
503 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
504 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
505 desc->des0 |= IDMAC_DES0_LD;
510 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
514 dw_mci_translate_sglist(host, host->data, sg_len);
516 /* Select IDMAC interface */
517 temp = mci_readl(host, CTRL);
518 temp |= SDMMC_CTRL_USE_IDMAC;
519 mci_writel(host, CTRL, temp);
523 /* Enable the IDMAC */
524 temp = mci_readl(host, BMOD);
525 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
526 mci_writel(host, BMOD, temp);
528 /* Start it running */
529 mci_writel(host, PLDMND, 1);
532 static int dw_mci_idmac_init(struct dw_mci *host)
534 struct idmac_desc *p;
537 /* Number of descriptors in the ring buffer */
538 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
540 /* Forward link the descriptor list */
541 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
542 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
544 /* Set the last descriptor as the end-of-ring descriptor */
545 p->des3 = host->sg_dma;
546 p->des0 = IDMAC_DES0_ER;
548 dw_mci_idmac_reset(host);
550 /* Mask out interrupts - get Tx & Rx complete only */
551 mci_writel(host, IDSTS, IDMAC_INT_CLR);
552 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
555 /* Set the descriptor base address */
556 mci_writel(host, DBADDR, host->sg_dma);
560 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
561 .init = dw_mci_idmac_init,
562 .start = dw_mci_idmac_start_dma,
563 .stop = dw_mci_idmac_stop_dma,
564 .complete = dw_mci_idmac_complete_dma,
565 .cleanup = dw_mci_dma_cleanup,
569 static void dw_mci_edma_cleanup(struct dw_mci *host)
571 struct mmc_data *data = host->data;
574 if (!data->host_cookie)
575 dma_unmap_sg(host->dev,
576 data->sg, data->sg_len,
577 dw_mci_get_dma_dir(data));
580 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
582 dmaengine_terminate_all(host->dms->ch);
585 static void dw_mci_edmac_complete_dma(void *arg)
587 struct dw_mci *host = arg;
588 struct mmc_data *data = host->data;
590 dev_vdbg(host->dev, "DMA complete\n");
593 if(data->flags & MMC_DATA_READ)
594 /* Invalidate cache after read */
595 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
596 data->sg_len, DMA_FROM_DEVICE);
598 host->dma_ops->cleanup(host);
601 * If the card was removed, data will be NULL. No point in trying to
602 * send the stop command or waiting for NBUSY in this case.
605 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
606 tasklet_schedule(&host->tasklet);
610 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
612 struct dma_slave_config slave_config;
613 struct dma_async_tx_descriptor *desc = NULL;
614 struct scatterlist *sgl = host->data->sg;
615 u32 sg_elems = host->data->sg_len;
618 /* Set external dma config: burst size, burst width*/
619 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
620 slave_config.src_addr = slave_config.dst_addr;
621 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
622 slave_config.src_addr_width = slave_config.dst_addr_width;
624 /* Match FIFO dma burst MSIZE with external dma config*/
625 slave_config.dst_maxburst = ((host->fifoth_val) >> 28) && 0x7;
626 slave_config.src_maxburst = slave_config.dst_maxburst;
628 if(host->data->flags & MMC_DATA_WRITE){
629 slave_config.direction = DMA_MEM_TO_DEV;
630 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
632 dev_err(host->dev, "error in dw_mci edma configuration.\n");
636 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
637 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
639 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
642 /* Set dw_mci_edmac_complete_dma as callback */
643 desc->callback = dw_mci_edmac_complete_dma;
644 desc->callback_param = (void *)host;
645 dmaengine_submit(desc);
647 /* Flush cache before write */
648 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
649 sg_elems, DMA_TO_DEVICE);
650 dma_async_issue_pending(host->dms->ch);
653 slave_config.direction = DMA_DEV_TO_MEM;
654 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
656 dev_err(host->dev, "error in dw_mci edma configuration.\n");
659 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
660 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
665 /* set dw_mci_edmac_complete_dma as callback */
666 desc->callback = dw_mci_edmac_complete_dma;
667 desc->callback_param = (void *)host;
668 dmaengine_submit(desc);
669 dma_async_issue_pending(host->dms->ch);
673 static int dw_mci_edmac_init(struct dw_mci *host)
675 /* Request external dma channel, SHOULD decide chn in dts */
677 host->dms = (struct dw_mci_dma_slave *)kmalloc
678 (sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
679 if (NULL == host->dms) {
680 dev_err(host->dev, "No enough memory to alloc dms.\n");
684 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
685 if (!host->dms->ch) {
686 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
687 host->dms->ch->chan_id);
694 if (NULL != host->dms) {
702 static void dw_mci_edmac_exit(struct dw_mci *host)
704 if (NULL != host->dms) {
705 if (NULL != host->dms->ch) {
706 dma_release_channel(host->dms->ch);
707 host->dms->ch = NULL;
714 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
715 .init = dw_mci_edmac_init,
716 .exit = dw_mci_edmac_exit,
717 .start = dw_mci_edmac_start_dma,
718 .stop = dw_mci_edmac_stop_dma,
719 .complete = dw_mci_edmac_complete_dma,
720 .cleanup = dw_mci_edma_cleanup,
722 #endif /* CONFIG_MMC_DW_IDMAC */
724 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
725 struct mmc_data *data,
728 struct scatterlist *sg;
729 unsigned int i, sg_len;
731 if (!next && data->host_cookie)
732 return data->host_cookie;
735 * We don't do DMA on "complex" transfers, i.e. with
736 * non-word-aligned buffers or lengths. Also, we don't bother
737 * with all the DMA setup overhead for short transfers.
739 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
745 for_each_sg(data->sg, sg, data->sg_len, i) {
746 if (sg->offset & 3 || sg->length & 3)
750 sg_len = dma_map_sg(host->dev,
753 dw_mci_get_dma_dir(data));
758 data->host_cookie = sg_len;
763 static void dw_mci_pre_req(struct mmc_host *mmc,
764 struct mmc_request *mrq,
767 struct dw_mci_slot *slot = mmc_priv(mmc);
768 struct mmc_data *data = mrq->data;
770 if (!slot->host->use_dma || !data)
773 if (data->host_cookie) {
774 data->host_cookie = 0;
778 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
779 data->host_cookie = 0;
782 static void dw_mci_post_req(struct mmc_host *mmc,
783 struct mmc_request *mrq,
786 struct dw_mci_slot *slot = mmc_priv(mmc);
787 struct mmc_data *data = mrq->data;
789 if (!slot->host->use_dma || !data)
792 if (data->host_cookie)
793 dma_unmap_sg(slot->host->dev,
796 dw_mci_get_dma_dir(data));
797 data->host_cookie = 0;
800 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
802 #ifdef CONFIG_MMC_DW_IDMAC
803 unsigned int blksz = data->blksz;
804 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
805 u32 fifo_width = 1 << host->data_shift;
806 u32 blksz_depth = blksz / fifo_width, fifoth_val;
807 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
808 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
810 tx_wmark = (host->fifo_depth) / 2;
811 tx_wmark_invers = host->fifo_depth - tx_wmark;
815 * if blksz is not a multiple of the FIFO width
817 if (blksz % fifo_width) {
824 if (!((blksz_depth % mszs[idx]) ||
825 (tx_wmark_invers % mszs[idx]))) {
827 rx_wmark = mszs[idx] - 1;
832 * If idx is '0', it won't be tried
833 * Thus, initial values are uesed
836 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
837 mci_writel(host, FIFOTH, fifoth_val);
841 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
843 unsigned int blksz = data->blksz;
844 u32 blksz_depth, fifo_depth;
847 WARN_ON(!(data->flags & MMC_DATA_READ));
849 if (host->timing != MMC_TIMING_MMC_HS200 &&
850 host->timing != MMC_TIMING_UHS_SDR104)
853 blksz_depth = blksz / (1 << host->data_shift);
854 fifo_depth = host->fifo_depth;
856 if (blksz_depth > fifo_depth)
860 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
861 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
862 * Currently just choose blksz.
865 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
869 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
872 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
879 /* If we don't have a channel, we can't do DMA */
883 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
885 host->dma_ops->stop(host);
892 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
893 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
897 * Decide the MSIZE and RX/TX Watermark.
898 * If current block size is same with previous size,
899 * no need to update fifoth.
901 if (host->prev_blksz != data->blksz)
902 dw_mci_adjust_fifoth(host, data);
905 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
907 /* Enable the DMA interface */
908 temp = mci_readl(host, CTRL);
909 temp |= SDMMC_CTRL_DMA_ENABLE;
910 mci_writel(host, CTRL, temp);
912 /* Disable RX/TX IRQs, let DMA handle it */
913 temp = mci_readl(host, INTMASK);
914 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
915 mci_writel(host, INTMASK, temp);
917 host->dma_ops->start(host, sg_len);
922 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
926 data->error = -EINPROGRESS;
933 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
935 if (data->flags & MMC_DATA_READ) {
936 host->dir_status = DW_MCI_RECV_STATUS;
937 dw_mci_ctrl_rd_thld(host, data);
939 host->dir_status = DW_MCI_SEND_STATUS;
942 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
943 data->blocks, data->blksz, mmc_hostname(host->mmc));
945 if (dw_mci_submit_data_dma(host, data)) {
946 int flags = SG_MITER_ATOMIC;
947 if (host->data->flags & MMC_DATA_READ)
948 flags |= SG_MITER_TO_SG;
950 flags |= SG_MITER_FROM_SG;
952 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
954 host->part_buf_start = 0;
955 host->part_buf_count = 0;
957 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
958 temp = mci_readl(host, INTMASK);
959 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
960 mci_writel(host, INTMASK, temp);
962 temp = mci_readl(host, CTRL);
963 temp &= ~SDMMC_CTRL_DMA_ENABLE;
964 mci_writel(host, CTRL, temp);
967 * Use the initial fifoth_val for PIO mode.
968 * If next issued data may be transfered by DMA mode,
969 * prev_blksz should be invalidated.
971 mci_writel(host, FIFOTH, host->fifoth_val);
972 host->prev_blksz = 0;
975 * Keep the current block size.
976 * It will be used to decide whether to update
977 * fifoth register next time.
979 host->prev_blksz = data->blksz;
983 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
985 struct dw_mci *host = slot->host;
986 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
987 unsigned int cmd_status = 0;
988 #ifdef SDMMC_WAIT_FOR_UNBUSY
990 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
992 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
994 ret = time_before(jiffies, timeout);
995 cmd_status = mci_readl(host, STATUS);
996 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1000 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1001 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1004 mci_writel(host, CMDARG, arg);
1006 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
1007 if(cmd & SDMMC_CMD_UPD_CLK)
1008 timeout = jiffies + msecs_to_jiffies(50);
1010 timeout = jiffies + msecs_to_jiffies(500);
1011 while (time_before(jiffies, timeout)) {
1012 cmd_status = mci_readl(host, CMD);
1013 if (!(cmd_status & SDMMC_CMD_START))
1016 dev_err(&slot->mmc->class_dev,
1017 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1018 cmd, arg, cmd_status);
1021 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1023 struct dw_mci *host = slot->host;
1024 unsigned int tempck,clock = slot->clock;
1029 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1030 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1033 mci_writel(host, CLKENA, 0);
1034 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1035 if(host->svi_flags == 0)
1036 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1038 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1040 } else if (clock != host->current_speed || force_clkinit) {
1041 div = host->bus_hz / clock;
1042 if (host->bus_hz % clock && host->bus_hz > clock)
1044 * move the + 1 after the divide to prevent
1045 * over-clocking the card.
1049 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1051 if ((clock << div) != slot->__clk_old || force_clkinit) {
1052 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1053 dev_info(&slot->mmc->class_dev,
1054 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1055 slot->id, host->bus_hz, clock,
1058 host->set_speed = tempck;
1059 host->set_div = div;
1063 mci_writel(host, CLKENA, 0);
1064 mci_writel(host, CLKSRC, 0);
1068 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1070 if(clock <= 400*1000){
1071 MMC_DBG_BOOT_FUNC(host->mmc,
1072 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1073 clock * 2, mmc_hostname(host->mmc));
1074 /* clk_mmc will change parents to 24MHz xtal*/
1075 clk_set_rate(host->clk_mmc, clock * 2);
1078 host->set_div = div;
1082 MMC_DBG_BOOT_FUNC(host->mmc,
1083 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1084 mmc_hostname(host->mmc));
1087 MMC_DBG_ERR_FUNC(host->mmc,
1088 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1089 mmc_hostname(host->mmc));
1091 host->set_div = div;
1092 host->bus_hz = host->set_speed * 2;
1093 MMC_DBG_BOOT_FUNC(host->mmc,
1094 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1095 div, host->bus_hz, mmc_hostname(host->mmc));
1097 /* BUG may be here, come on, Linux BSP engineer looks!
1098 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1099 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1100 some oops happened like that:
1101 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1102 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1103 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1104 mmc0: new high speed DDR MMC card at address 0001
1105 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1107 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1108 mmcblk0: retrying using single block read
1109 mmcblk0: error -110 sending status command, retrying
1111 How to: If eMMC HW version < 4.51, or > 4.51 but no caps2-mmc-hs200 support in dts
1112 Please set dts emmc clk to 100M or 150M, I will workaround it!
1115 if (host->verid < DW_MMC_240A)
1116 clk_set_rate(host->clk_mmc,(host->bus_hz));
1118 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1124 /* set clock to desired speed */
1125 mci_writel(host, CLKDIV, div);
1129 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1131 /* enable clock; only low power if no SDIO */
1132 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1134 if (host->verid < DW_MMC_240A)
1135 sdio_int = SDMMC_INT_SDIO(slot->id);
1137 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1139 if (!(mci_readl(host, INTMASK) & sdio_int))
1140 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1141 mci_writel(host, CLKENA, clk_en_a);
1145 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1146 /* keep the clock with reflecting clock dividor */
1147 slot->__clk_old = clock << div;
1150 host->current_speed = clock;
1152 if(slot->ctype != slot->pre_ctype)
1153 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1155 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1156 mmc_hostname(host->mmc));
1157 slot->pre_ctype = slot->ctype;
1159 /* Set the current slot bus width */
1160 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1163 extern struct mmc_card *this_card;
1164 static void dw_mci_wait_unbusy(struct dw_mci *host)
1167 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1168 unsigned long time_loop;
1169 unsigned int status;
1172 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1174 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC) {
1175 if (host->cmd && (host->cmd->opcode == MMC_ERASE)) {
1176 /* Special care for (secure)erase timeout calculation */
1178 if((host->cmd->arg & (0x1 << 31)) == 1) /* secure erase */
1181 if (((this_card->ext_csd.erase_group_def) & 0x1) == 1) ;
1182 se_flag ? (timeout = (this_card->ext_csd.hc_erase_timeout) *
1183 300000 * (this_card->ext_csd.sec_erase_mult)) :
1184 (timeout = (this_card->ext_csd.hc_erase_timeout) * 300000);
1188 if(timeout < SDMMC_DATA_TIMEOUT_EMMC)
1189 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1190 } else if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
1191 timeout = SDMMC_DATA_TIMEOUT_SD;
1194 time_loop = jiffies + msecs_to_jiffies(timeout);
1196 status = mci_readl(host, STATUS);
1197 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1199 } while (time_before(jiffies, time_loop));
1204 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1207 * 0--status is busy.
1208 * 1--status is unbusy.
1210 int dw_mci_card_busy(struct mmc_host *mmc)
1212 struct dw_mci_slot *slot = mmc_priv(mmc);
1213 struct dw_mci *host = slot->host;
1215 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1216 host->svi_flags, mmc_hostname(host->mmc));
1219 if(host->svi_flags == 0){
1221 host->svi_flags = 1;
1222 return host->svi_flags;
1225 host->svi_flags = 0;
1226 return host->svi_flags;
1232 static void __dw_mci_start_request(struct dw_mci *host,
1233 struct dw_mci_slot *slot,
1234 struct mmc_command *cmd)
1236 struct mmc_request *mrq;
1237 struct mmc_data *data;
1241 if (host->pdata->select_slot)
1242 host->pdata->select_slot(slot->id);
1244 host->cur_slot = slot;
1247 dw_mci_wait_unbusy(host);
1249 host->pending_events = 0;
1250 host->completed_events = 0;
1251 host->data_status = 0;
1255 dw_mci_set_timeout(host);
1256 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1257 mci_writel(host, BLKSIZ, data->blksz);
1260 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1262 /* this is the first command, send the initialization clock */
1263 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1264 cmdflags |= SDMMC_CMD_INIT;
1267 dw_mci_submit_data(host, data);
1271 dw_mci_start_command(host, cmd, cmdflags);
1274 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1277 static void dw_mci_start_request(struct dw_mci *host,
1278 struct dw_mci_slot *slot)
1280 struct mmc_request *mrq = slot->mrq;
1281 struct mmc_command *cmd;
1283 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1284 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1286 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1287 __dw_mci_start_request(host, slot, cmd);
1290 /* must be called with host->lock held */
1291 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1292 struct mmc_request *mrq)
1294 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1299 if (host->state == STATE_IDLE) {
1300 host->state = STATE_SENDING_CMD;
1301 dw_mci_start_request(host, slot);
1303 list_add_tail(&slot->queue_node, &host->queue);
1307 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1309 struct dw_mci_slot *slot = mmc_priv(mmc);
1310 struct dw_mci *host = slot->host;
1315 * The check for card presence and queueing of the request must be
1316 * atomic, otherwise the card could be removed in between and the
1317 * request wouldn't fail until another card was inserted.
1319 spin_lock_bh(&host->lock);
1321 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1322 spin_unlock_bh(&host->lock);
1323 mrq->cmd->error = -ENOMEDIUM;
1324 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1325 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1327 mmc_request_done(mmc, mrq);
1331 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1332 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1334 dw_mci_queue_request(host, slot, mrq);
1336 spin_unlock_bh(&host->lock);
1339 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1341 struct dw_mci_slot *slot = mmc_priv(mmc);
1342 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1343 struct dw_mci *host = slot->host;
1345 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1348 #ifdef SDMMC_WAIT_FOR_UNBUSY
1349 unsigned long time_loop;
1352 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1353 if(host->svi_flags == 1)
1354 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1356 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1358 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1361 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1362 printk("%d..%s: no card. [%s]\n", \
1363 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1368 ret = time_before(jiffies, time_loop);
1369 regs = mci_readl(slot->host, STATUS);
1370 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1376 printk("slot->flags = %lu ", slot->flags);
1377 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1378 if(host->svi_flags != 1)
1381 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1382 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1386 switch (ios->bus_width) {
1387 case MMC_BUS_WIDTH_4:
1388 slot->ctype = SDMMC_CTYPE_4BIT;
1390 case MMC_BUS_WIDTH_8:
1391 slot->ctype = SDMMC_CTYPE_8BIT;
1394 /* set default 1 bit mode */
1395 slot->ctype = SDMMC_CTYPE_1BIT;
1396 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1399 regs = mci_readl(slot->host, UHS_REG);
1402 if (ios->timing == MMC_TIMING_UHS_DDR50)
1403 regs |= ((0x1 << slot->id) << 16);
1405 regs &= ~((0x1 << slot->id) << 16);
1407 mci_writel(slot->host, UHS_REG, regs);
1408 slot->host->timing = ios->timing;
1411 * Use mirror of ios->clock to prevent race with mmc
1412 * core ios update when finding the minimum.
1414 slot->clock = ios->clock;
1416 if (drv_data && drv_data->set_ios)
1417 drv_data->set_ios(slot->host, ios);
1419 /* Slot specific timing and width adjustment */
1420 dw_mci_setup_bus(slot, false);
1424 switch (ios->power_mode) {
1426 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1428 if (slot->host->pdata->setpower)
1429 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1430 regs = mci_readl(slot->host, PWREN);
1431 regs |= (1 << slot->id);
1432 mci_writel(slot->host, PWREN, regs);
1435 /* Power down slot */
1436 if(slot->host->pdata->setpower)
1437 slot->host->pdata->setpower(slot->id, 0);
1438 regs = mci_readl(slot->host, PWREN);
1439 regs &= ~(1 << slot->id);
1440 mci_writel(slot->host, PWREN, regs);
1447 static int dw_mci_get_ro(struct mmc_host *mmc)
1450 struct dw_mci_slot *slot = mmc_priv(mmc);
1451 struct dw_mci_board *brd = slot->host->pdata;
1453 /* Use platform get_ro function, else try on board write protect */
1454 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1456 else if(brd->get_ro)
1457 read_only = brd->get_ro(slot->id);
1458 else if(gpio_is_valid(slot->wp_gpio))
1459 read_only = gpio_get_value(slot->wp_gpio);
1462 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1464 dev_dbg(&mmc->class_dev, "card is %s\n",
1465 read_only ? "read-only" : "read-write");
1470 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1472 struct dw_mci_slot *slot = mmc_priv(mmc);
1473 struct dw_mci *host = slot->host;
1474 /*struct dw_mci_board *brd = slot->host->pdata;*/
1476 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1479 spin_lock_bh(&host->lock);
1482 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1484 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1486 spin_unlock_bh(&host->lock);
1488 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1489 if(__clk_is_enabled(host->hclk_mmc) == false)
1490 clk_prepare_enable(host->hclk_mmc);
1491 if(__clk_is_enabled(host->clk_mmc) == false)
1492 clk_prepare_enable(host->clk_mmc);
1494 if(__clk_is_enabled(host->clk_mmc) == true)
1495 clk_disable_unprepare(slot->host->clk_mmc);
1496 if(__clk_is_enabled(host->hclk_mmc) == true)
1497 clk_disable_unprepare(slot->host->hclk_mmc);
1500 mmc_detect_change(slot->mmc, 20);
1506 static int dw_mci_get_cd(struct mmc_host *mmc)
1509 struct dw_mci_slot *slot = mmc_priv(mmc);
1510 struct dw_mci_board *brd = slot->host->pdata;
1511 struct dw_mci *host = slot->host;
1512 int gpio_cd = mmc_gpio_get_cd(mmc);
1515 if (cpu_is_rk312x() &&
1517 (mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
1518 gpio_cd = slot->cd_gpio;
1519 if (gpio_is_valid(gpio_cd)) {
1520 gpio_val = gpio_get_value_cansleep(gpio_cd);
1522 if (gpio_val == gpio_get_value_cansleep(gpio_cd)) {
1523 gpio_cd = gpio_get_value_cansleep(gpio_cd) == 0 ? 1 : 0;
1525 /* Enable force_jtag wihtout card in slot, ONLY for NCD-package */
1526 grf_writel((0x1 << 24) | (1 << 8), RK312X_GRF_SOC_CON0);
1527 dw_mci_ctrl_all_reset(host);
1529 /* Really card detected: SHOULD disable force_jtag */
1530 grf_writel((0x1 << 24) | (0 << 8), RK312X_GRF_SOC_CON0);
1534 return slot->last_detect_state;
1537 dev_err(host->dev, "dw_mci_get_cd: invalid gpio_cd!\n");
1541 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1542 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1544 /* Use platform get_cd function, else try onboard card detect */
1545 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1547 else if (brd->get_cd)
1548 present = !brd->get_cd(slot->id);
1549 else if (!IS_ERR_VALUE(gpio_cd))
1552 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1555 spin_lock_bh(&host->lock);
1557 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1558 dev_dbg(&mmc->class_dev, "card is present\n");
1560 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1561 dev_dbg(&mmc->class_dev, "card is not present\n");
1563 spin_unlock_bh(&host->lock);
1570 * Dts Should caps emmc controller with poll-hw-reset
1572 static void dw_mci_hw_reset(struct mmc_host *mmc)
1574 struct dw_mci_slot *slot = mmc_priv(mmc);
1575 struct dw_mci *host = slot->host;
1580 unsigned long timeout;
1583 /* (1) CMD12 to end any transfer in process */
1584 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1585 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1587 if(host->mmc->hold_reg_flag)
1588 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1589 mci_writel(host, CMDARG, 0);
1591 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1593 timeout = jiffies + msecs_to_jiffies(500);
1595 ret = time_before(jiffies, timeout);
1596 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1601 MMC_DBG_ERR_FUNC(host->mmc,
1602 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1603 __func__, mmc_hostname(host->mmc));
1605 /* (2) wait DTO, even if no response is sent back by card */
1607 timeout = jiffies + msecs_to_jiffies(5);
1609 ret = time_before(jiffies, timeout);
1610 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1611 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1617 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1619 /* Software reset - BMOD[0] for IDMA only */
1620 regs = mci_readl(host, BMOD);
1621 regs |= SDMMC_IDMAC_SWRESET;
1622 mci_writel(host, BMOD, regs);
1623 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1624 regs = mci_readl(host, BMOD);
1625 if(regs & SDMMC_IDMAC_SWRESET)
1626 MMC_DBG_WARN_FUNC(host->mmc,
1627 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1628 __func__, mmc_hostname(host->mmc));
1630 /* DMA reset - CTRL[2] */
1631 regs = mci_readl(host, CTRL);
1632 regs |= SDMMC_CTRL_DMA_RESET;
1633 mci_writel(host, CTRL, regs);
1634 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1635 regs = mci_readl(host, CTRL);
1636 if(regs & SDMMC_CTRL_DMA_RESET)
1637 MMC_DBG_WARN_FUNC(host->mmc,
1638 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1639 __func__, mmc_hostname(host->mmc));
1641 /* FIFO reset - CTRL[1] */
1642 regs = mci_readl(host, CTRL);
1643 regs |= SDMMC_CTRL_FIFO_RESET;
1644 mci_writel(host, CTRL, regs);
1645 mdelay(1); /* no timing limited, 1ms is random value */
1646 regs = mci_readl(host, CTRL);
1647 if(regs & SDMMC_CTRL_FIFO_RESET)
1648 MMC_DBG_WARN_FUNC(host->mmc,
1649 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1650 __func__, mmc_hostname(host->mmc));
1653 According to eMMC spec
1654 tRstW >= 1us ; RST_n pulse width
1655 tRSCA >= 200us ; RST_n to Command time
1656 tRSTH >= 1us ; RST_n high period
1658 mci_writel(slot->host, PWREN, 0x0);
1659 mci_writel(slot->host, RST_N, 0x0);
1661 udelay(10); /* 10us for bad quality eMMc. */
1663 mci_writel(slot->host, PWREN, 0x1);
1664 mci_writel(slot->host, RST_N, 0x1);
1666 usleep_range(500, 1000); /* at least 500(> 200us) */
1670 * Disable lower power mode.
1672 * Low power mode will stop the card clock when idle. According to the
1673 * description of the CLKENA register we should disable low power mode
1674 * for SDIO cards if we need SDIO interrupts to work.
1676 * This function is fast if low power mode is already disabled.
1678 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1680 struct dw_mci *host = slot->host;
1682 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1684 clk_en_a = mci_readl(host, CLKENA);
1686 if (clk_en_a & clken_low_pwr) {
1687 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1688 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1689 SDMMC_CMD_PRV_DAT_WAIT, 0);
1693 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1695 struct dw_mci_slot *slot = mmc_priv(mmc);
1696 struct dw_mci *host = slot->host;
1697 //unsigned long flags;
1701 //spin_lock_irqsave(&host->lock, flags);
1703 /* Enable/disable Slot Specific SDIO interrupt */
1704 int_mask = mci_readl(host, INTMASK);
1706 if (host->verid < DW_MMC_240A)
1707 sdio_int = SDMMC_INT_SDIO(slot->id);
1709 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1713 * Turn off low power mode if it was enabled. This is a bit of
1714 * a heavy operation and we disable / enable IRQs a lot, so
1715 * we'll leave low power mode disabled and it will get
1716 * re-enabled again in dw_mci_setup_bus().
1718 dw_mci_disable_low_power(slot);
1720 mci_writel(host, INTMASK,
1721 (int_mask | sdio_int));
1723 mci_writel(host, INTMASK,
1724 (int_mask & ~sdio_int));
1727 //spin_unlock_irqrestore(&host->lock, flags);
1730 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1732 IO_DOMAIN_12 = 1200,
1733 IO_DOMAIN_18 = 1800,
1734 IO_DOMAIN_33 = 3300,
1736 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1746 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1747 __FUNCTION__, mmc_hostname(host->mmc));
1750 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1751 __FUNCTION__, mmc_hostname(host->mmc));
1755 if(cpu_is_rk3288()){
1756 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1757 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1761 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1762 __FUNCTION__, mmc_hostname(host->mmc));
1766 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1767 struct mmc_ios *ios)
1770 unsigned int value,uhs_reg;
1773 * Signal Voltage Switching is only applicable for Host Controllers
1776 if (host->verid < DW_MMC_240A)
1779 uhs_reg = mci_readl(host, UHS_REG);
1780 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1781 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1783 switch (ios->signal_voltage) {
1784 case MMC_SIGNAL_VOLTAGE_330:
1785 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1787 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1788 /* regulator_put(host->vmmc); //to be done in remove function. */
1790 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1791 __func__, regulator_get_voltage(host->vmmc), ret);
1793 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1794 " failed\n", mmc_hostname(host->mmc));
1797 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1799 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1800 __FUNCTION__, mmc_hostname(host->mmc));
1802 /* set High-power mode */
1803 value = mci_readl(host, CLKENA);
1804 value &= ~SDMMC_CLKEN_LOW_PWR;
1805 mci_writel(host,CLKENA , value);
1807 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1808 mci_writel(host,UHS_REG , uhs_reg);
1811 usleep_range(5000, 5500);
1813 /* 3.3V regulator output should be stable within 5 ms */
1814 uhs_reg = mci_readl(host, UHS_REG);
1815 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1818 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1819 mmc_hostname(host->mmc));
1822 case MMC_SIGNAL_VOLTAGE_180:
1824 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1825 /* regulator_put(host->vmmc);//to be done in remove function. */
1827 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1828 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1830 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1831 " failed\n", mmc_hostname(host->mmc));
1834 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1838 * Enable 1.8V Signal Enable in the Host Control2
1841 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1844 usleep_range(5000, 5500);
1845 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1846 __FUNCTION__,mmc_hostname(host->mmc));
1848 /* 1.8V regulator output should be stable within 5 ms */
1849 uhs_reg = mci_readl(host, UHS_REG);
1850 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1853 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1854 mmc_hostname(host->mmc));
1857 case MMC_SIGNAL_VOLTAGE_120:
1859 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1861 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1862 " failed\n", mmc_hostname(host->mmc));
1868 /* No signal voltage switch required */
1874 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1875 struct mmc_ios *ios)
1877 struct dw_mci_slot *slot = mmc_priv(mmc);
1878 struct dw_mci *host = slot->host;
1881 if (host->verid < DW_MMC_240A)
1884 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1890 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1892 struct dw_mci_slot *slot = mmc_priv(mmc);
1893 struct dw_mci *host = slot->host;
1894 const struct dw_mci_drv_data *drv_data = host->drv_data;
1895 struct dw_mci_tuning_data tuning_data;
1898 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1899 if(cpu_is_rk3036() || cpu_is_rk312x())
1902 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1903 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1904 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1905 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1906 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1907 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1908 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1912 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1913 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1914 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1917 "Undefined command(%d) for tuning\n", opcode);
1922 /* Recommend sample phase and delayline
1923 Fixme: Mix-use these three controllers will cause
1926 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1927 tuning_data.con_id = 3;
1928 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1929 tuning_data.con_id = 1;
1931 tuning_data.con_id = 0;
1933 /* 0: driver, from host->devices
1934 1: sample, from devices->host
1936 tuning_data.tuning_type = 1;
1938 if (drv_data && drv_data->execute_tuning)
1939 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1944 static const struct mmc_host_ops dw_mci_ops = {
1945 .request = dw_mci_request,
1946 .pre_req = dw_mci_pre_req,
1947 .post_req = dw_mci_post_req,
1948 .set_ios = dw_mci_set_ios,
1949 .get_ro = dw_mci_get_ro,
1950 .get_cd = dw_mci_get_cd,
1951 .set_sdio_status = dw_mci_set_sdio_status,
1952 .hw_reset = dw_mci_hw_reset,
1953 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1954 .execute_tuning = dw_mci_execute_tuning,
1955 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1956 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1957 .card_busy = dw_mci_card_busy,
1962 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1964 unsigned long flags;
1969 local_irq_save(flags);
1970 if(host->irq_state != irqflag)
1972 host->irq_state = irqflag;
1975 enable_irq(host->irq);
1979 disable_irq(host->irq);
1982 local_irq_restore(flags);
1986 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1987 __releases(&host->lock)
1988 __acquires(&host->lock)
1990 if(DW_MCI_SEND_STATUS == host->dir_status){
1992 if( MMC_BUS_TEST_W != host->cmd->opcode){
1993 if(host->data_status & SDMMC_INT_DCRC)
1994 host->data->error = -EILSEQ;
1995 else if(host->data_status & SDMMC_INT_EBE)
1996 host->data->error = -ETIMEDOUT;
1998 dw_mci_wait_unbusy(host);
2001 dw_mci_wait_unbusy(host);
2006 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
2007 __releases(&host->lock)
2008 __acquires(&host->lock)
2010 struct dw_mci_slot *slot;
2011 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2013 WARN_ON(host->cmd || host->data);
2015 del_timer_sync(&host->dto_timer);
2016 dw_mci_deal_data_end(host, mrq);
2019 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
2020 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
2022 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
2023 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
2025 host->cur_slot->mrq = NULL;
2027 if (!list_empty(&host->queue)) {
2028 slot = list_entry(host->queue.next,
2029 struct dw_mci_slot, queue_node);
2030 list_del(&slot->queue_node);
2031 dev_vdbg(host->dev, "list not empty: %s is next\n",
2032 mmc_hostname(slot->mmc));
2033 host->state = STATE_SENDING_CMD;
2034 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
2035 dw_mci_start_request(host, slot);
2037 dev_vdbg(host->dev, "list empty\n");
2038 host->state = STATE_IDLE;
2041 spin_unlock(&host->lock);
2042 mmc_request_done(prev_mmc, mrq);
2043 spin_lock(&host->lock);
2046 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2048 u32 status = host->cmd_status;
2050 host->cmd_status = 0;
2052 /* Read the response from the card (up to 16 bytes) */
2053 if (cmd->flags & MMC_RSP_PRESENT) {
2054 if (cmd->flags & MMC_RSP_136) {
2055 cmd->resp[3] = mci_readl(host, RESP0);
2056 cmd->resp[2] = mci_readl(host, RESP1);
2057 cmd->resp[1] = mci_readl(host, RESP2);
2058 cmd->resp[0] = mci_readl(host, RESP3);
2060 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
2061 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2063 cmd->resp[0] = mci_readl(host, RESP0);
2067 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2068 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2072 if (status & SDMMC_INT_RTO)
2074 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2077 cmd->error = -ETIMEDOUT;
2078 del_timer_sync(&host->dto_timer);
2079 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2080 del_timer_sync(&host->dto_timer);
2081 cmd->error = -EILSEQ;
2082 }else if (status & SDMMC_INT_RESP_ERR){
2083 del_timer_sync(&host->dto_timer);
2088 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2089 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2092 del_timer_sync(&host->dto_timer);
2093 if(MMC_SEND_STATUS != cmd->opcode)
2094 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2095 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2096 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2100 /* newer ip versions need a delay between retries */
2101 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2107 static void dw_mci_tasklet_func(unsigned long priv)
2109 struct dw_mci *host = (struct dw_mci *)priv;
2110 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2111 struct mmc_data *data;
2112 struct mmc_command *cmd;
2113 enum dw_mci_state state;
2114 enum dw_mci_state prev_state;
2115 u32 status, cmd_flags;
2116 unsigned long timeout = 0;
2119 spin_lock(&host->lock);
2121 state = host->state;
2131 case STATE_SENDING_CMD:
2132 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2133 &host->pending_events))
2138 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2139 dw_mci_command_complete(host, cmd);
2140 if (cmd == host->mrq->sbc && !cmd->error) {
2141 prev_state = state = STATE_SENDING_CMD;
2142 __dw_mci_start_request(host, host->cur_slot,
2147 if (cmd->data && cmd->error) {
2148 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2149 dw_mci_stop_dma(host);
2152 send_stop_cmd(host, data);
2153 state = STATE_SENDING_STOP;
2159 send_stop_abort(host, data);
2160 state = STATE_SENDING_STOP;
2163 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2166 if (!host->mrq->data || cmd->error) {
2167 dw_mci_request_end(host, host->mrq);
2171 prev_state = state = STATE_SENDING_DATA;
2174 case STATE_SENDING_DATA:
2175 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2176 dw_mci_stop_dma(host);
2179 send_stop_cmd(host, data);
2181 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2182 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2183 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2185 mci_writel(host, CMDARG, 0);
2187 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2188 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2190 if(host->mmc->hold_reg_flag)
2191 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2193 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2195 timeout = jiffies + msecs_to_jiffies(500);
2198 ret = time_before(jiffies, timeout);
2199 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2203 MMC_DBG_ERR_FUNC(host->mmc,
2204 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2205 __func__, mmc_hostname(host->mmc));
2208 send_stop_abort(host, data);
2210 state = STATE_DATA_ERROR;
2214 MMC_DBG_CMD_FUNC(host->mmc,
2215 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2216 prev_state,state, mmc_hostname(host->mmc));
2218 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2219 &host->pending_events))
2221 MMC_DBG_INFO_FUNC(host->mmc,
2222 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2223 prev_state,state,mmc_hostname(host->mmc));
2225 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2226 prev_state = state = STATE_DATA_BUSY;
2229 case STATE_DATA_BUSY:
2230 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2231 &host->pending_events))
2234 dw_mci_deal_data_end(host, host->mrq);
2235 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2236 MMC_DBG_INFO_FUNC(host->mmc,
2237 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2238 prev_state,state,mmc_hostname(host->mmc));
2241 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2242 status = host->data_status;
2244 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2245 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2246 MMC_DBG_ERR_FUNC(host->mmc,
2247 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2248 prev_state,state, status, mmc_hostname(host->mmc));
2250 if (status & SDMMC_INT_DRTO) {
2251 data->error = -ETIMEDOUT;
2252 } else if (status & SDMMC_INT_DCRC) {
2253 data->error = -EILSEQ;
2254 } else if (status & SDMMC_INT_EBE &&
2255 host->dir_status == DW_MCI_SEND_STATUS){
2257 * No data CRC status was returned.
2258 * The number of bytes transferred will
2259 * be exaggerated in PIO mode.
2261 data->bytes_xfered = 0;
2262 data->error = -ETIMEDOUT;
2271 * After an error, there may be data lingering
2272 * in the FIFO, so reset it - doing so
2273 * generates a block interrupt, hence setting
2274 * the scatter-gather pointer to NULL.
2276 dw_mci_fifo_reset(host);
2278 data->bytes_xfered = data->blocks * data->blksz;
2283 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2284 prev_state,state,mmc_hostname(host->mmc));
2285 dw_mci_request_end(host, host->mrq);
2288 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2289 prev_state,state,mmc_hostname(host->mmc));
2291 if (host->mrq->sbc && !data->error) {
2292 data->stop->error = 0;
2294 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2295 prev_state,state,mmc_hostname(host->mmc));
2297 dw_mci_request_end(host, host->mrq);
2301 prev_state = state = STATE_SENDING_STOP;
2303 send_stop_cmd(host, data);
2305 if (data->stop && !data->error) {
2306 /* stop command for open-ended transfer*/
2308 send_stop_abort(host, data);
2312 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2313 prev_state,state,mmc_hostname(host->mmc));
2315 case STATE_SENDING_STOP:
2316 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2319 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2320 prev_state, state, mmc_hostname(host->mmc));
2322 /* CMD error in data command */
2323 if (host->mrq->cmd->error && host->mrq->data) {
2324 dw_mci_fifo_reset(host);
2330 dw_mci_command_complete(host, host->mrq->stop);
2332 if (host->mrq->stop)
2333 dw_mci_command_complete(host, host->mrq->stop);
2335 host->cmd_status = 0;
2338 dw_mci_request_end(host, host->mrq);
2341 case STATE_DATA_ERROR:
2342 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2343 &host->pending_events))
2346 state = STATE_DATA_BUSY;
2349 } while (state != prev_state);
2351 host->state = state;
2353 spin_unlock(&host->lock);
2357 /* push final bytes to part_buf, only use during push */
2358 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2360 memcpy((void *)&host->part_buf, buf, cnt);
2361 host->part_buf_count = cnt;
2364 /* append bytes to part_buf, only use during push */
2365 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2367 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2368 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2369 host->part_buf_count += cnt;
2373 /* pull first bytes from part_buf, only use during pull */
2374 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2376 cnt = min(cnt, (int)host->part_buf_count);
2378 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2380 host->part_buf_count -= cnt;
2381 host->part_buf_start += cnt;
2386 /* pull final bytes from the part_buf, assuming it's just been filled */
2387 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2389 memcpy(buf, &host->part_buf, cnt);
2390 host->part_buf_start = cnt;
2391 host->part_buf_count = (1 << host->data_shift) - cnt;
2394 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2396 struct mmc_data *data = host->data;
2399 /* try and push anything in the part_buf */
2400 if (unlikely(host->part_buf_count)) {
2401 int len = dw_mci_push_part_bytes(host, buf, cnt);
2404 if (host->part_buf_count == 2) {
2405 mci_writew(host, DATA(host->data_offset),
2407 host->part_buf_count = 0;
2410 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2411 if (unlikely((unsigned long)buf & 0x1)) {
2413 u16 aligned_buf[64];
2414 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2415 int items = len >> 1;
2417 /* memcpy from input buffer into aligned buffer */
2418 memcpy(aligned_buf, buf, len);
2421 /* push data from aligned buffer into fifo */
2422 for (i = 0; i < items; ++i)
2423 mci_writew(host, DATA(host->data_offset),
2430 for (; cnt >= 2; cnt -= 2)
2431 mci_writew(host, DATA(host->data_offset), *pdata++);
2434 /* put anything remaining in the part_buf */
2436 dw_mci_set_part_bytes(host, buf, cnt);
2437 /* Push data if we have reached the expected data length */
2438 if ((data->bytes_xfered + init_cnt) ==
2439 (data->blksz * data->blocks))
2440 mci_writew(host, DATA(host->data_offset),
2445 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2447 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2448 if (unlikely((unsigned long)buf & 0x1)) {
2450 /* pull data from fifo into aligned buffer */
2451 u16 aligned_buf[64];
2452 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2453 int items = len >> 1;
2455 for (i = 0; i < items; ++i)
2456 aligned_buf[i] = mci_readw(host,
2457 DATA(host->data_offset));
2458 /* memcpy from aligned buffer into output buffer */
2459 memcpy(buf, aligned_buf, len);
2467 for (; cnt >= 2; cnt -= 2)
2468 *pdata++ = mci_readw(host, DATA(host->data_offset));
2472 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2473 dw_mci_pull_final_bytes(host, buf, cnt);
2477 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2479 struct mmc_data *data = host->data;
2482 /* try and push anything in the part_buf */
2483 if (unlikely(host->part_buf_count)) {
2484 int len = dw_mci_push_part_bytes(host, buf, cnt);
2487 if (host->part_buf_count == 4) {
2488 mci_writel(host, DATA(host->data_offset),
2490 host->part_buf_count = 0;
2493 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2494 if (unlikely((unsigned long)buf & 0x3)) {
2496 u32 aligned_buf[32];
2497 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2498 int items = len >> 2;
2500 /* memcpy from input buffer into aligned buffer */
2501 memcpy(aligned_buf, buf, len);
2504 /* push data from aligned buffer into fifo */
2505 for (i = 0; i < items; ++i)
2506 mci_writel(host, DATA(host->data_offset),
2513 for (; cnt >= 4; cnt -= 4)
2514 mci_writel(host, DATA(host->data_offset), *pdata++);
2517 /* put anything remaining in the part_buf */
2519 dw_mci_set_part_bytes(host, buf, cnt);
2520 /* Push data if we have reached the expected data length */
2521 if ((data->bytes_xfered + init_cnt) ==
2522 (data->blksz * data->blocks))
2523 mci_writel(host, DATA(host->data_offset),
2528 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2530 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2531 if (unlikely((unsigned long)buf & 0x3)) {
2533 /* pull data from fifo into aligned buffer */
2534 u32 aligned_buf[32];
2535 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2536 int items = len >> 2;
2538 for (i = 0; i < items; ++i)
2539 aligned_buf[i] = mci_readl(host,
2540 DATA(host->data_offset));
2541 /* memcpy from aligned buffer into output buffer */
2542 memcpy(buf, aligned_buf, len);
2550 for (; cnt >= 4; cnt -= 4)
2551 *pdata++ = mci_readl(host, DATA(host->data_offset));
2555 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2556 dw_mci_pull_final_bytes(host, buf, cnt);
2560 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2562 struct mmc_data *data = host->data;
2565 /* try and push anything in the part_buf */
2566 if (unlikely(host->part_buf_count)) {
2567 int len = dw_mci_push_part_bytes(host, buf, cnt);
2571 if (host->part_buf_count == 8) {
2572 mci_writeq(host, DATA(host->data_offset),
2574 host->part_buf_count = 0;
2577 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2578 if (unlikely((unsigned long)buf & 0x7)) {
2580 u64 aligned_buf[16];
2581 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2582 int items = len >> 3;
2584 /* memcpy from input buffer into aligned buffer */
2585 memcpy(aligned_buf, buf, len);
2588 /* push data from aligned buffer into fifo */
2589 for (i = 0; i < items; ++i)
2590 mci_writeq(host, DATA(host->data_offset),
2597 for (; cnt >= 8; cnt -= 8)
2598 mci_writeq(host, DATA(host->data_offset), *pdata++);
2601 /* put anything remaining in the part_buf */
2603 dw_mci_set_part_bytes(host, buf, cnt);
2604 /* Push data if we have reached the expected data length */
2605 if ((data->bytes_xfered + init_cnt) ==
2606 (data->blksz * data->blocks))
2607 mci_writeq(host, DATA(host->data_offset),
2612 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2614 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2615 if (unlikely((unsigned long)buf & 0x7)) {
2617 /* pull data from fifo into aligned buffer */
2618 u64 aligned_buf[16];
2619 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2620 int items = len >> 3;
2622 for (i = 0; i < items; ++i)
2623 aligned_buf[i] = mci_readq(host,
2624 DATA(host->data_offset));
2625 /* memcpy from aligned buffer into output buffer */
2626 memcpy(buf, aligned_buf, len);
2634 for (; cnt >= 8; cnt -= 8)
2635 *pdata++ = mci_readq(host, DATA(host->data_offset));
2639 host->part_buf = mci_readq(host, DATA(host->data_offset));
2640 dw_mci_pull_final_bytes(host, buf, cnt);
2644 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2648 /* get remaining partial bytes */
2649 len = dw_mci_pull_part_bytes(host, buf, cnt);
2650 if (unlikely(len == cnt))
2655 /* get the rest of the data */
2656 host->pull_data(host, buf, cnt);
2659 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2661 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2663 unsigned int offset;
2664 struct mmc_data *data = host->data;
2665 int shift = host->data_shift;
2668 unsigned int remain, fcnt;
2670 if(!host->mmc->bus_refs){
2671 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2675 if (!sg_miter_next(sg_miter))
2678 host->sg = sg_miter->piter.sg;
2679 buf = sg_miter->addr;
2680 remain = sg_miter->length;
2684 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2685 << shift) + host->part_buf_count;
2686 len = min(remain, fcnt);
2689 dw_mci_pull_data(host, (void *)(buf + offset), len);
2690 data->bytes_xfered += len;
2695 sg_miter->consumed = offset;
2696 status = mci_readl(host, MINTSTS);
2697 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2698 /* if the RXDR is ready read again */
2699 } while ((status & SDMMC_INT_RXDR) ||
2700 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2703 if (!sg_miter_next(sg_miter))
2705 sg_miter->consumed = 0;
2707 sg_miter_stop(sg_miter);
2711 sg_miter_stop(sg_miter);
2715 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2718 static void dw_mci_write_data_pio(struct dw_mci *host)
2720 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2722 unsigned int offset;
2723 struct mmc_data *data = host->data;
2724 int shift = host->data_shift;
2727 unsigned int fifo_depth = host->fifo_depth;
2728 unsigned int remain, fcnt;
2730 if(!host->mmc->bus_refs){
2731 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2736 if (!sg_miter_next(sg_miter))
2739 host->sg = sg_miter->piter.sg;
2740 buf = sg_miter->addr;
2741 remain = sg_miter->length;
2745 fcnt = ((fifo_depth -
2746 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2747 << shift) - host->part_buf_count;
2748 len = min(remain, fcnt);
2751 host->push_data(host, (void *)(buf + offset), len);
2752 data->bytes_xfered += len;
2757 sg_miter->consumed = offset;
2758 status = mci_readl(host, MINTSTS);
2759 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2760 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2763 if (!sg_miter_next(sg_miter))
2765 sg_miter->consumed = 0;
2767 sg_miter_stop(sg_miter);
2771 sg_miter_stop(sg_miter);
2775 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2778 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2780 u32 multi, unit = SZ_2M;
2782 if (!host->cmd_status)
2783 host->cmd_status = status;
2788 if ((MMC_STOP_TRANSMISSION != host->cmd->opcode)) {
2789 multi = (mci_readl(host, BYTCNT) / unit) +
2790 ((mci_readl(host, BYTCNT) % unit) ? 1 :0 ) +
2791 ((host->cmd->retries > 2) ? 2 : host->cmd->retries);
2792 /* Max limit time: 8s for dto */
2793 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4000 * multi));
2798 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2799 tasklet_schedule(&host->tasklet);
2802 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2804 struct dw_mci *host = dev_id;
2805 u32 pending, sdio_int;
2808 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2811 * DTO fix - version 2.10a and below, and only if internal DMA
2814 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2816 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2817 pending |= SDMMC_INT_DATA_OVER;
2821 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2822 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2823 host->cmd_status = pending;
2825 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2826 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2828 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2831 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2832 /* if there is an error report DATA_ERROR */
2833 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2834 host->data_status = pending;
2836 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2838 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2839 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2840 tasklet_schedule(&host->tasklet);
2843 if (pending & SDMMC_INT_DATA_OVER) {
2844 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2845 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2846 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2847 if (!host->data_status)
2848 host->data_status = pending;
2850 if (host->dir_status == DW_MCI_RECV_STATUS) {
2851 if (host->sg != NULL)
2852 dw_mci_read_data_pio(host, true);
2854 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2855 tasklet_schedule(&host->tasklet);
2858 if (pending & SDMMC_INT_RXDR) {
2859 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2860 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2861 dw_mci_read_data_pio(host, false);
2864 if (pending & SDMMC_INT_TXDR) {
2865 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2866 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2867 dw_mci_write_data_pio(host);
2870 if (pending & SDMMC_INT_VSI) {
2871 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2872 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2873 dw_mci_cmd_interrupt(host, pending);
2876 if (pending & SDMMC_INT_CMD_DONE) {
2877 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2878 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2879 dw_mci_cmd_interrupt(host, pending);
2882 if (pending & SDMMC_INT_CD) {
2883 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2884 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2885 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2886 queue_work(host->card_workqueue, &host->card_work);
2889 if (pending & SDMMC_INT_HLE) {
2890 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2891 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2895 /* Handle SDIO Interrupts */
2896 for (i = 0; i < host->num_slots; i++) {
2897 struct dw_mci_slot *slot = host->slot[i];
2899 if (host->verid < DW_MMC_240A)
2900 sdio_int = SDMMC_INT_SDIO(i);
2902 sdio_int = SDMMC_INT_SDIO(i + 8);
2904 if (pending & sdio_int) {
2905 mci_writel(host, RINTSTS, sdio_int);
2906 mmc_signal_sdio_irq(slot->mmc);
2912 #ifdef CONFIG_MMC_DW_IDMAC
2913 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2914 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2915 /* Handle DMA interrupts */
2916 pending = mci_readl(host, IDSTS);
2917 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2918 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2919 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2920 host->dma_ops->complete((void *)host);
2928 static void dw_mci_work_routine_card(struct work_struct *work)
2930 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2933 for (i = 0; i < host->num_slots; i++) {
2934 struct dw_mci_slot *slot = host->slot[i];
2935 struct mmc_host *mmc = slot->mmc;
2936 struct mmc_request *mrq;
2939 present = dw_mci_get_cd(mmc);
2940 while (present != slot->last_detect_state) {
2941 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2942 present ? "inserted" : "removed");
2943 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2944 present ? "inserted" : "removed.", mmc_hostname(mmc));
2946 rk_send_wakeup_key();//wake up system
2947 spin_lock_bh(&host->lock);
2949 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2950 /* Card change detected */
2951 slot->last_detect_state = present;
2953 /* Clean up queue if present */
2956 if (mrq == host->mrq) {
2960 switch (host->state) {
2963 case STATE_SENDING_CMD:
2964 mrq->cmd->error = -ENOMEDIUM;
2968 case STATE_SENDING_DATA:
2969 mrq->data->error = -ENOMEDIUM;
2970 dw_mci_stop_dma(host);
2972 case STATE_DATA_BUSY:
2973 case STATE_DATA_ERROR:
2974 if (mrq->data->error == -EINPROGRESS)
2975 mrq->data->error = -ENOMEDIUM;
2979 case STATE_SENDING_STOP:
2980 mrq->stop->error = -ENOMEDIUM;
2984 dw_mci_request_end(host, mrq);
2986 list_del(&slot->queue_node);
2987 mrq->cmd->error = -ENOMEDIUM;
2989 mrq->data->error = -ENOMEDIUM;
2991 mrq->stop->error = -ENOMEDIUM;
2993 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
2994 mrq->cmd->opcode, mmc_hostname(mmc));
2996 spin_unlock(&host->lock);
2997 mmc_request_done(slot->mmc, mrq);
2998 spin_lock(&host->lock);
3002 /* Power down slot */
3004 /* Clear down the FIFO */
3005 dw_mci_fifo_reset(host);
3006 #ifdef CONFIG_MMC_DW_IDMAC
3007 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3008 dw_mci_idmac_reset(host);
3013 spin_unlock_bh(&host->lock);
3015 present = dw_mci_get_cd(mmc);
3018 mmc_detect_change(slot->mmc,
3019 msecs_to_jiffies(host->pdata->detect_delay_ms));
3024 /* given a slot id, find out the device node representing that slot */
3025 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3027 struct device_node *np;
3031 if (!dev || !dev->of_node)
3034 for_each_child_of_node(dev->of_node, np) {
3035 addr = of_get_property(np, "reg", &len);
3036 if (!addr || (len < sizeof(int)))
3038 if (be32_to_cpup(addr) == slot)
3044 static struct dw_mci_of_slot_quirks {
3047 } of_slot_quirks[] = {
3049 .quirk = "disable-wp",
3050 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3054 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3056 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3061 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3062 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3063 quirks |= of_slot_quirks[idx].id;
3068 /* find out bus-width for a given slot */
3069 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3071 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3077 if (of_property_read_u32(np, "bus-width", &bus_wd))
3078 dev_err(dev, "bus-width property not found, assuming width"
3084 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3085 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3087 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3093 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3095 /* Having a missing entry is valid; return silently */
3096 if (!gpio_is_valid(gpio))
3099 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3100 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3104 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3110 /* find the write protect gpio for a given slot; or -1 if none specified */
3111 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3113 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3119 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3121 /* Having a missing entry is valid; return silently */
3122 if (!gpio_is_valid(gpio))
3125 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3126 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3133 /* find the cd gpio for a given slot */
3134 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3135 struct mmc_host *mmc)
3137 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3143 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3145 /* Having a missing entry is valid; return silently */
3146 if (!gpio_is_valid(gpio))
3149 if (mmc_gpio_request_cd(mmc, gpio, 0))
3150 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3153 static irqreturn_t dw_mci_gpio_cd_irqt(int irq, void *dev_id)
3155 struct mmc_host *mmc = dev_id;
3156 struct dw_mci_slot *slot = mmc_priv(mmc);
3157 struct dw_mci *host = slot->host;
3160 if (mmc->ops->card_event)
3161 mmc->ops->card_event(mmc);
3163 mmc_detect_change(mmc, msecs_to_jiffies(200));
3166 queue_work(host->card_workqueue, &host->card_work);
3170 static void dw_mci_of_set_cd_gpio_irq(struct device *dev, u32 gpio,
3171 struct mmc_host *mmc)
3173 struct dw_mci_slot *slot = mmc_priv(mmc);
3174 struct dw_mci *host = slot->host;
3178 /* Having a missing entry is valid; return silently */
3179 if (!gpio_is_valid(gpio))
3182 irq = gpio_to_irq(gpio);
3184 ret = devm_request_threaded_irq(&mmc->class_dev, irq,
3185 NULL, dw_mci_gpio_cd_irqt,
3186 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
3190 dev_err(host->dev, "Request cd-gpio %d interrupt error!\n", gpio);
3193 dev_err(host->dev, "Cannot convert gpio %d to irq!\n", gpio);
3197 static void dw_mci_of_free_cd_gpio_irq(struct device *dev, u32 gpio,
3198 struct mmc_host *mmc)
3200 if (!gpio_is_valid(gpio))
3203 if (gpio_to_irq(gpio) >= 0) {
3204 devm_free_irq(&mmc->class_dev, gpio_to_irq(gpio), mmc);
3205 devm_gpio_free(&mmc->class_dev, gpio);
3208 #else /* CONFIG_OF */
3209 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3213 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3217 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3221 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3225 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3226 struct mmc_host *mmc)
3230 #endif /* CONFIG_OF */
3232 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3234 struct mmc_host *mmc;
3235 struct dw_mci_slot *slot;
3236 const struct dw_mci_drv_data *drv_data = host->drv_data;
3241 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3245 slot = mmc_priv(mmc);
3249 host->slot[id] = slot;
3252 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3254 mmc->ops = &dw_mci_ops;
3256 if (of_property_read_u32_array(host->dev->of_node,
3257 "clock-freq-min-max", freq, 2)) {
3258 mmc->f_min = DW_MCI_FREQ_MIN;
3259 mmc->f_max = DW_MCI_FREQ_MAX;
3261 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3262 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3264 mmc->f_min = freq[0];
3265 mmc->f_max = freq[1];
3267 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__, __FUNCTION__,
3268 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3271 printk("%s : Rockchip specific MHSC: %s\n", mmc_hostname(mmc), RK_SDMMC_DRIVER_VERSION);
3273 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3274 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3275 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3276 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3277 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3278 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3280 if (of_find_property(host->dev->of_node, "supports-tSD", NULL))
3281 mmc->restrict_caps |= RESTRICT_CARD_TYPE_TSD;
3284 /* We assume only low-level chip use gpio_cd */
3285 if (cpu_is_rk312x() &&
3287 (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)) {
3288 slot->cd_gpio = of_get_named_gpio(host->dev->of_node, "cd-gpios", 0);
3289 if (gpio_is_valid(slot->cd_gpio)) {
3290 /* Request gpio int for card detection */
3291 dw_mci_of_set_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3293 slot->cd_gpio = -ENODEV;
3294 dev_err(host->dev, "failed to get your cd-gpios!\n");
3298 if (host->pdata->get_ocr)
3299 mmc->ocr_avail = host->pdata->get_ocr(id);
3302 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3303 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3304 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3305 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3309 * Start with slot power disabled, it will be enabled when a card
3312 if (host->pdata->setpower)
3313 host->pdata->setpower(id, 0);
3315 if (host->pdata->caps)
3316 mmc->caps = host->pdata->caps;
3318 if (host->pdata->pm_caps)
3319 mmc->pm_caps = host->pdata->pm_caps;
3321 if (host->dev->of_node) {
3322 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3326 ctrl_id = to_platform_device(host->dev)->id;
3328 if (drv_data && drv_data->caps)
3329 mmc->caps |= drv_data->caps[ctrl_id];
3330 if (drv_data && drv_data->hold_reg_flag)
3331 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3333 /* set the compatibility of driver. */
3334 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3335 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3337 if (host->pdata->caps2)
3338 mmc->caps2 = host->pdata->caps2;
3340 if (host->pdata->get_bus_wd)
3341 bus_width = host->pdata->get_bus_wd(slot->id);
3342 else if (host->dev->of_node)
3343 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3347 switch (bus_width) {
3349 mmc->caps |= MMC_CAP_8_BIT_DATA;
3351 mmc->caps |= MMC_CAP_4_BIT_DATA;
3354 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3355 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3356 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3357 mmc->caps |= MMC_CAP_SDIO_IRQ;
3358 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3359 mmc->caps |= MMC_CAP_HW_RESET;
3360 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3361 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3362 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3363 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3364 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3365 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3366 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3367 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3369 /*Assign pm_caps pass to pm_flags*/
3370 mmc->pm_flags = mmc->pm_caps;
3372 if (host->pdata->blk_settings) {
3373 mmc->max_segs = host->pdata->blk_settings->max_segs;
3374 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3375 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3376 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3377 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3379 /* Useful defaults if platform data is unset. */
3380 #ifdef CONFIG_MMC_DW_IDMAC
3381 mmc->max_segs = host->ring_size;
3382 mmc->max_blk_size = 65536;
3383 mmc->max_blk_count = host->ring_size;
3384 mmc->max_seg_size = 0x1000;
3385 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3386 if(cpu_is_rk3036() || cpu_is_rk312x()){
3387 /* fixup for external dmac setting */
3389 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3390 mmc->max_blk_count = 65535;
3391 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3392 mmc->max_seg_size = mmc->max_req_size;
3396 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3397 mmc->max_blk_count = 512;
3398 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3399 mmc->max_seg_size = mmc->max_req_size;
3400 #endif /* CONFIG_MMC_DW_IDMAC */
3404 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3406 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3411 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3412 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3416 if (IS_ERR(host->vmmc)) {
3417 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3420 ret = regulator_enable(host->vmmc);
3423 "failed to enable regulator: %d\n", ret);
3430 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3432 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3433 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3435 ret = mmc_add_host(mmc);
3439 /* Pinctrl set default iomux state to fucntion port.
3440 * Fixme: DON'T TOUCH EMMC SETTING!
3442 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3444 host->pinctrl = devm_pinctrl_get(host->dev);
3445 if(IS_ERR(host->pinctrl)){
3446 printk("%s: Warning : No pinctrl used!\n",mmc_hostname(host->mmc));
3448 host->pins_idle= pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_IDLE);
3449 if(IS_ERR(host->pins_default)){
3450 printk("%s: Warning : No IDLE pinctrl matched!\n", mmc_hostname(host->mmc));
3454 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3455 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
3458 host->pins_default = pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_DEFAULT);
3459 if(IS_ERR(host->pins_default)){
3460 printk("%s: Warning : No default pinctrl matched!\n", mmc_hostname(host->mmc));
3464 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3465 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
3471 #if defined(CONFIG_DEBUG_FS)
3472 dw_mci_init_debugfs(slot);
3475 /* Card initially undetected */
3476 slot->last_detect_state = 1;
3481 if (gpio_is_valid(slot->cd_gpio))
3482 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio,host->mmc);
3487 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3489 /* Shutdown detect IRQ */
3490 if (slot->host->pdata->exit)
3491 slot->host->pdata->exit(id);
3493 /* Debugfs stuff is cleaned up by mmc core */
3494 mmc_remove_host(slot->mmc);
3495 slot->host->slot[id] = NULL;
3496 mmc_free_host(slot->mmc);
3499 static void dw_mci_init_dma(struct dw_mci *host)
3501 /* Alloc memory for sg translation */
3502 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3503 &host->sg_dma, GFP_KERNEL);
3504 if (!host->sg_cpu) {
3505 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3510 /* Determine which DMA interface to use */
3511 #if defined(CONFIG_MMC_DW_IDMAC)
3512 if(cpu_is_rk3036() || cpu_is_rk312x()){
3513 host->dma_ops = &dw_mci_edmac_ops;
3514 dev_info(host->dev, "Using external DMA controller.\n");
3516 host->dma_ops = &dw_mci_idmac_ops;
3517 dev_info(host->dev, "Using internal DMA controller.\n");
3524 if (host->dma_ops->init && host->dma_ops->start &&
3525 host->dma_ops->stop && host->dma_ops->cleanup) {
3526 if (host->dma_ops->init(host)) {
3527 dev_err(host->dev, "%s: Unable to initialize "
3528 "DMA Controller.\n", __func__);
3532 dev_err(host->dev, "DMA initialization not found.\n");
3540 dev_info(host->dev, "Using PIO mode.\n");
3545 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3547 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3550 ctrl = mci_readl(host, CTRL);
3552 mci_writel(host, CTRL, ctrl);
3554 /* wait till resets clear */
3556 ctrl = mci_readl(host, CTRL);
3557 if (!(ctrl & reset))
3559 } while (time_before(jiffies, timeout));
3562 "Timeout resetting block (ctrl reset %#x)\n",
3568 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3571 * Reseting generates a block interrupt, hence setting
3572 * the scatter-gather pointer to NULL.
3575 sg_miter_stop(&host->sg_miter);
3579 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3582 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3584 return dw_mci_ctrl_reset(host,
3585 SDMMC_CTRL_FIFO_RESET |
3587 SDMMC_CTRL_DMA_RESET);
3592 static struct dw_mci_of_quirks {
3597 .quirk = "broken-cd",
3598 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3602 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3604 struct dw_mci_board *pdata;
3605 struct device *dev = host->dev;
3606 struct device_node *np = dev->of_node;
3607 const struct dw_mci_drv_data *drv_data = host->drv_data;
3609 u32 clock_frequency;
3611 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3613 dev_err(dev, "could not allocate memory for pdata\n");
3614 return ERR_PTR(-ENOMEM);
3617 /* find out number of slots supported */
3618 if (of_property_read_u32(dev->of_node, "num-slots",
3619 &pdata->num_slots)) {
3620 dev_info(dev, "num-slots property not found, "
3621 "assuming 1 slot is available\n");
3622 pdata->num_slots = 1;
3626 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3627 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3628 pdata->quirks |= of_quirks[idx].id;
3631 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3632 dev_info(dev, "fifo-depth property not found, using "
3633 "value of FIFOTH register as default\n");
3635 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3637 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3638 pdata->bus_hz = clock_frequency;
3640 if (drv_data && drv_data->parse_dt) {
3641 ret = drv_data->parse_dt(host);
3643 return ERR_PTR(ret);
3646 if (of_find_property(np, "keep-power-in-suspend", NULL))
3647 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3649 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3650 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3652 if (of_find_property(np, "supports-highspeed", NULL))
3653 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3655 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3656 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3658 if (of_find_property(np, "supports-DDR_MODE", NULL))
3659 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3661 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3662 pdata->caps2 |= MMC_CAP2_HS200;
3664 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3665 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3667 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3668 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3670 if (of_get_property(np, "cd-inverted", NULL))
3671 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3672 if (of_get_property(np, "bootpart-no-access", NULL))
3673 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3678 #else /* CONFIG_OF */
3679 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3681 return ERR_PTR(-EINVAL);
3683 #endif /* CONFIG_OF */
3685 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3689 switch(host->state){
3692 case STATE_SENDING_DATA:
3693 case STATE_DATA_BUSY:
3694 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3695 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3696 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3697 host->state = STATE_DATA_BUSY;
3698 if (!dw_mci_ctrl_all_reset(host)) {
3703 /* NO requirement to reclaim slave chn using external dmac */
3704 #ifdef CONFIG_MMC_DW_IDMAC
3705 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3706 if (host->use_dma && host->dma_ops->init)
3707 host->dma_ops->init(host);
3711 * Restore the initial value at FIFOTH register
3712 * And Invalidate the prev_blksz with zero
3714 mci_writel(host, FIFOTH, host->fifoth_val);
3715 host->prev_blksz = 0;
3716 mci_writel(host, TMOUT, 0xFFFFFFFF);
3717 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3718 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3719 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3720 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3721 regs |= SDMMC_INT_CD;
3722 mci_writel(host, INTMASK, regs);
3723 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3724 for (i = 0; i < host->num_slots; i++) {
3725 struct dw_mci_slot *slot = host->slot[i];
3728 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3729 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3730 dw_mci_setup_bus(slot, true);
3733 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3734 tasklet_schedule(&host->tasklet);
3740 static void dw_mci_dto_timeout(unsigned long host_data)
3742 struct dw_mci *host = (struct dw_mci *) host_data;
3744 disable_irq(host->irq);
3746 host->data_status = SDMMC_INT_EBE;
3747 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3748 dw_mci_dealwith_timeout(host);
3750 enable_irq(host->irq);
3752 int dw_mci_probe(struct dw_mci *host)
3754 const struct dw_mci_drv_data *drv_data = host->drv_data;
3755 int width, i, ret = 0;
3761 host->pdata = dw_mci_parse_dt(host);
3762 if (IS_ERR(host->pdata)) {
3763 dev_err(host->dev, "platform data not available\n");
3768 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3770 "Platform data must supply select_slot function\n");
3775 * In 2.40a spec, Data offset is changed.
3776 * Need to check the version-id and set data-offset for DATA register.
3778 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3779 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3781 if (host->verid < DW_MMC_240A)
3782 host->data_offset = DATA_OFFSET;
3784 host->data_offset = DATA_240A_OFFSET;
3787 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3788 if (IS_ERR(host->hclk_mmc)) {
3789 dev_err(host->dev, "failed to get hclk_mmc\n");
3790 ret = PTR_ERR(host->hclk_mmc);
3794 clk_prepare_enable(host->hclk_mmc);
3797 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3798 if (IS_ERR(host->clk_mmc)) {
3799 dev_err(host->dev, "failed to get clk mmc_per\n");
3800 ret = PTR_ERR(host->clk_mmc);
3804 host->bus_hz = host->pdata->bus_hz;
3805 if (!host->bus_hz) {
3806 dev_err(host->dev,"Platform data must supply bus speed\n");
3811 if (host->verid < DW_MMC_240A)
3812 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3814 //rockchip: fix divider 2 in clksum before controlller
3815 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3818 dev_err(host->dev, "failed to set clk mmc\n");
3821 clk_prepare_enable(host->clk_mmc);
3823 if (drv_data && drv_data->setup_clock) {
3824 ret = drv_data->setup_clock(host);
3827 "implementation specific clock setup failed\n");
3832 host->quirks = host->pdata->quirks;
3833 host->irq_state = true;
3834 host->set_speed = 0;
3836 host->svi_flags = 0;
3838 spin_lock_init(&host->lock);
3839 INIT_LIST_HEAD(&host->queue);
3842 * Get the host data width - this assumes that HCON has been set with
3843 * the correct values.
3845 i = (mci_readl(host, HCON) >> 7) & 0x7;
3847 host->push_data = dw_mci_push_data16;
3848 host->pull_data = dw_mci_pull_data16;
3850 host->data_shift = 1;
3851 } else if (i == 2) {
3852 host->push_data = dw_mci_push_data64;
3853 host->pull_data = dw_mci_pull_data64;
3855 host->data_shift = 3;
3857 /* Check for a reserved value, and warn if it is */
3859 "HCON reports a reserved host data width!\n"
3860 "Defaulting to 32-bit access.\n");
3861 host->push_data = dw_mci_push_data32;
3862 host->pull_data = dw_mci_pull_data32;
3864 host->data_shift = 2;
3867 /* Reset all blocks */
3868 if (!dw_mci_ctrl_all_reset(host))
3871 host->dma_ops = host->pdata->dma_ops;
3872 dw_mci_init_dma(host);
3874 /* Clear the interrupts for the host controller */
3875 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3876 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3878 /* Put in max timeout */
3879 mci_writel(host, TMOUT, 0xFFFFFFFF);
3882 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3883 * Tx Mark = fifo_size / 2 DMA Size = 8
3885 if (!host->pdata->fifo_depth) {
3887 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3888 * have been overwritten by the bootloader, just like we're
3889 * about to do, so if you know the value for your hardware, you
3890 * should put it in the platform data.
3892 fifo_size = mci_readl(host, FIFOTH);
3893 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3895 fifo_size = host->pdata->fifo_depth;
3897 host->fifo_depth = fifo_size;
3899 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3900 mci_writel(host, FIFOTH, host->fifoth_val);
3902 /* disable clock to CIU */
3903 mci_writel(host, CLKENA, 0);
3904 mci_writel(host, CLKSRC, 0);
3906 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3907 host->card_workqueue = alloc_workqueue("dw-mci-card",
3908 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3909 if (!host->card_workqueue) {
3913 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3914 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3915 host->irq_flags, "dw-mci", host);
3919 if (host->pdata->num_slots)
3920 host->num_slots = host->pdata->num_slots;
3922 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
3924 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
3925 /* We need at least one slot to succeed */
3926 for (i = 0; i < host->num_slots; i++) {
3927 ret = dw_mci_init_slot(host, i);
3929 dev_dbg(host->dev, "slot %d init failed\n", i);
3935 * Enable interrupts for command done, data over, data empty, card det,
3936 * receive ready and error such as transmit, receive timeout, crc error
3938 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3939 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
3940 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
3941 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3942 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3943 regs |= SDMMC_INT_CD;
3945 mci_writel(host, INTMASK, regs);
3947 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
3949 dev_info(host->dev, "DW MMC controller at irq %d, "
3950 "%d bit host data width, "
3952 host->irq, width, fifo_size);
3955 dev_info(host->dev, "%d slots initialized\n", init_slots);
3957 dev_dbg(host->dev, "attempted to initialize %d slots, "
3958 "but failed on all\n", host->num_slots);
3963 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
3964 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
3969 destroy_workqueue(host->card_workqueue);
3972 if (host->use_dma && host->dma_ops->exit)
3973 host->dma_ops->exit(host);
3976 regulator_disable(host->vmmc);
3977 regulator_put(host->vmmc);
3981 if (!IS_ERR(host->clk_mmc))
3982 clk_disable_unprepare(host->clk_mmc);
3984 if (!IS_ERR(host->hclk_mmc))
3985 clk_disable_unprepare(host->hclk_mmc);
3989 EXPORT_SYMBOL(dw_mci_probe);
3991 void dw_mci_remove(struct dw_mci *host)
3993 struct mmc_host *mmc = host->mmc;
3994 struct dw_mci_slot *slot = mmc_priv(mmc);
3997 del_timer_sync(&host->dto_timer);
3999 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4000 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4002 for(i = 0; i < host->num_slots; i++){
4003 dev_dbg(host->dev, "remove slot %d\n", i);
4005 dw_mci_cleanup_slot(host->slot[i], i);
4008 /* disable clock to CIU */
4009 mci_writel(host, CLKENA, 0);
4010 mci_writel(host, CLKSRC, 0);
4012 destroy_workqueue(host->card_workqueue);
4014 if(host->use_dma && host->dma_ops->exit)
4015 host->dma_ops->exit(host);
4017 if (gpio_is_valid(slot->cd_gpio))
4018 dw_mci_of_free_cd_gpio_irq(host->dev, slot->cd_gpio, host->mmc);
4021 regulator_disable(host->vmmc);
4022 regulator_put(host->vmmc);
4024 if(!IS_ERR(host->clk_mmc))
4025 clk_disable_unprepare(host->clk_mmc);
4027 if(!IS_ERR(host->hclk_mmc))
4028 clk_disable_unprepare(host->hclk_mmc);
4030 EXPORT_SYMBOL(dw_mci_remove);
4034 #ifdef CONFIG_PM_SLEEP
4036 * TODO: we should probably disable the clock to the card in the suspend path.
4038 int dw_mci_suspend(struct dw_mci *host)
4041 regulator_disable(host->vmmc);
4043 if(host->use_dma && host->dma_ops->exit)
4044 host->dma_ops->exit(host);
4046 /*only for sdmmc controller*/
4047 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4048 host->mmc->rescan_disable = 1;
4049 if (cancel_delayed_work_sync(&host->mmc->detect))
4050 wake_unlock(&host->mmc->detect_wake_lock);
4052 disable_irq(host->irq);
4053 if (pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
4054 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
4055 mmc_hostname(host->mmc));
4057 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4058 mci_writel(host, INTMASK, 0x00);
4059 mci_writel(host, CTRL, 0x00);
4061 /* Soc rk3126 already in gpio_cd mode */
4062 if (!(cpu_is_rk312x() && soc_is_rk3126())) {
4063 dw_mci_of_get_cd_gpio(host->dev, 0, host->mmc);
4064 enable_irq_wake(host->mmc->slot.cd_irq);
4069 EXPORT_SYMBOL(dw_mci_suspend);
4071 int dw_mci_resume(struct dw_mci *host)
4073 int i, ret, retry_cnt = 0;
4075 struct dw_mci_slot *slot;
4077 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO){
4078 slot = mmc_priv(host->mmc);
4080 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
4083 /*only for sdmmc controller*/
4084 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
4085 /* Soc rk3126 already in gpio_cd mode */
4086 if (!(cpu_is_rk312x() && soc_is_rk3126())) {
4087 disable_irq_wake(host->mmc->slot.cd_irq);
4088 mmc_gpio_free_cd(host->mmc);
4090 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
4091 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
4092 mmc_hostname(host->mmc));
4093 host->mmc->rescan_disable = 0;
4096 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
4097 else if(cpu_is_rk3036())
4098 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
4099 else if(cpu_is_rk312x())
4100 /* RK3036_GRF_SOC_CON0 is compatible with rk312x, tmp setting */
4101 grf_writel(((1 << 8) << 16) | (0 << 8), RK3036_GRF_SOC_CON0);
4104 ret = regulator_enable(host->vmmc);
4107 "failed to enable regulator: %d\n", ret);
4112 if(!dw_mci_ctrl_all_reset(host)){
4117 if(host->use_dma && host->dma_ops->init)
4118 host->dma_ops->init(host);
4121 * Restore the initial value at FIFOTH register
4122 * And Invalidate the prev_blksz with zero
4124 mci_writel(host, FIFOTH, host->fifoth_val);
4125 host->prev_blksz = 0;
4126 /* Put in max timeout */
4127 mci_writel(host, TMOUT, 0xFFFFFFFF);
4129 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4130 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
4132 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
4133 regs |= SDMMC_INT_CD;
4134 mci_writel(host, INTMASK, regs);
4135 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
4136 /*only for sdmmc controller*/
4137 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
4138 enable_irq(host->irq);
4141 for(i = 0; i < host->num_slots; i++){
4142 struct dw_mci_slot *slot = host->slot[i];
4145 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
4146 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
4147 dw_mci_setup_bus(slot, true);
4153 EXPORT_SYMBOL(dw_mci_resume);
4154 #endif /* CONFIG_PM_SLEEP */
4156 static int __init dw_mci_init(void)
4158 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4162 static void __exit dw_mci_exit(void)
4166 module_init(dw_mci_init);
4167 module_exit(dw_mci_exit);
4169 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4170 MODULE_AUTHOR("NXP Semiconductor VietNam");
4171 MODULE_AUTHOR("Imagination Technologies Ltd");
4172 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4173 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
4174 MODULE_LICENSE("GPL v2");