2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/mmc.h>
35 #include <linux/mmc/sd.h>
36 #include <linux/mmc/sdio.h>
37 #include <linux/mmc/rk_mmc.h>
38 #include <linux/bitops.h>
39 #include <linux/regulator/consumer.h>
40 #include <linux/workqueue.h>
42 #include <linux/of_gpio.h>
43 #include <linux/mmc/slot-gpio.h>
44 #include <linux/clk-private.h>
45 #include <linux/rockchip/cpu.h>
48 #include "rk_sdmmc_dbg.h"
49 #include <linux/regulator/rockchip_io_vol_domain.h>
50 #include "../../clk/rockchip/clk-ops.h"
52 #define RK_SDMMC_DRIVER_VERSION "Ver 1.12 2014-07-08"
54 /* Common flag combinations */
55 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
56 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
58 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
60 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
61 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
62 #define DW_MCI_SEND_STATUS 1
63 #define DW_MCI_RECV_STATUS 2
64 #define DW_MCI_DMA_THRESHOLD 16
66 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
67 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
69 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
70 #define SDMMC_DATA_TIMEOUT_SD 500
71 #define SDMMC_DATA_TIMEOUT_SDIO 250
72 #define SDMMC_DATA_TIMEOUT_EMMC 2500
74 #define SDMMC_CMD_RTO_MAX_HOLD 200
75 #define SDMMC_WAIT_FOR_UNBUSY 2500
77 #ifdef CONFIG_MMC_DW_IDMAC
78 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
79 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
80 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
84 u32 des0; /* Control Descriptor */
85 #define IDMAC_DES0_DIC BIT(1)
86 #define IDMAC_DES0_LD BIT(2)
87 #define IDMAC_DES0_FD BIT(3)
88 #define IDMAC_DES0_CH BIT(4)
89 #define IDMAC_DES0_ER BIT(5)
90 #define IDMAC_DES0_CES BIT(30)
91 #define IDMAC_DES0_OWN BIT(31)
93 u32 des1; /* Buffer sizes */
94 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
95 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
97 u32 des2; /* buffer 1 physical address */
99 u32 des3; /* buffer 2 physical address */
101 #endif /* CONFIG_MMC_DW_IDMAC */
103 static const u8 tuning_blk_pattern_4bit[] = {
104 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
105 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
106 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
107 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
108 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
109 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
110 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
111 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
114 static const u8 tuning_blk_pattern_8bit[] = {
115 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
116 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
117 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
118 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
119 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
120 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
121 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
122 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
123 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
124 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
125 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
126 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
127 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
128 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
129 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
130 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
133 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
134 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
135 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
136 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
138 /*printk the all register of current host*/
140 static int dw_mci_regs_printk(struct dw_mci *host)
142 struct sdmmc_reg *regs = dw_mci_regs;
144 while( regs->name != 0 ){
145 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
148 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
153 #if defined(CONFIG_DEBUG_FS)
154 static int dw_mci_req_show(struct seq_file *s, void *v)
156 struct dw_mci_slot *slot = s->private;
157 struct mmc_request *mrq;
158 struct mmc_command *cmd;
159 struct mmc_command *stop;
160 struct mmc_data *data;
162 /* Make sure we get a consistent snapshot */
163 spin_lock_bh(&slot->host->lock);
173 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
174 cmd->opcode, cmd->arg, cmd->flags,
175 cmd->resp[0], cmd->resp[1], cmd->resp[2],
176 cmd->resp[2], cmd->error);
178 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
179 data->bytes_xfered, data->blocks,
180 data->blksz, data->flags, data->error);
183 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
184 stop->opcode, stop->arg, stop->flags,
185 stop->resp[0], stop->resp[1], stop->resp[2],
186 stop->resp[2], stop->error);
189 spin_unlock_bh(&slot->host->lock);
194 static int dw_mci_req_open(struct inode *inode, struct file *file)
196 return single_open(file, dw_mci_req_show, inode->i_private);
199 static const struct file_operations dw_mci_req_fops = {
200 .owner = THIS_MODULE,
201 .open = dw_mci_req_open,
204 .release = single_release,
207 static int dw_mci_regs_show(struct seq_file *s, void *v)
209 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
210 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
211 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
212 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
213 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
214 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
219 static int dw_mci_regs_open(struct inode *inode, struct file *file)
221 return single_open(file, dw_mci_regs_show, inode->i_private);
224 static const struct file_operations dw_mci_regs_fops = {
225 .owner = THIS_MODULE,
226 .open = dw_mci_regs_open,
229 .release = single_release,
232 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
234 struct mmc_host *mmc = slot->mmc;
235 struct dw_mci *host = slot->host;
239 root = mmc->debugfs_root;
243 node = debugfs_create_file("regs", S_IRUSR, root, host,
248 node = debugfs_create_file("req", S_IRUSR, root, slot,
253 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
257 node = debugfs_create_x32("pending_events", S_IRUSR, root,
258 (u32 *)&host->pending_events);
262 node = debugfs_create_x32("completed_events", S_IRUSR, root,
263 (u32 *)&host->completed_events);
270 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
272 #endif /* defined(CONFIG_DEBUG_FS) */
274 static void dw_mci_set_timeout(struct dw_mci *host)
276 /* timeout (maximum) */
277 mci_writel(host, TMOUT, 0xffffffff);
280 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
282 struct mmc_data *data;
283 struct dw_mci_slot *slot = mmc_priv(mmc);
284 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
286 cmd->error = -EINPROGRESS;
290 if (cmdr == MMC_STOP_TRANSMISSION)
291 cmdr |= SDMMC_CMD_STOP;
293 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
295 if (cmd->flags & MMC_RSP_PRESENT) {
296 /* We expect a response, so set this bit */
297 cmdr |= SDMMC_CMD_RESP_EXP;
298 if (cmd->flags & MMC_RSP_136)
299 cmdr |= SDMMC_CMD_RESP_LONG;
302 if (cmd->flags & MMC_RSP_CRC)
303 cmdr |= SDMMC_CMD_RESP_CRC;
307 cmdr |= SDMMC_CMD_DAT_EXP;
308 if (data->flags & MMC_DATA_STREAM)
309 cmdr |= SDMMC_CMD_STRM_MODE;
310 if (data->flags & MMC_DATA_WRITE)
311 cmdr |= SDMMC_CMD_DAT_WR;
314 if (drv_data && drv_data->prepare_command)
315 drv_data->prepare_command(slot->host, &cmdr);
321 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
323 struct mmc_command *stop;
329 stop = &host->stop_abort;
331 memset(stop, 0, sizeof(struct mmc_command));
333 if (cmdr == MMC_READ_SINGLE_BLOCK ||
334 cmdr == MMC_READ_MULTIPLE_BLOCK ||
335 cmdr == MMC_WRITE_BLOCK ||
336 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
337 stop->opcode = MMC_STOP_TRANSMISSION;
339 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
340 } else if (cmdr == SD_IO_RW_EXTENDED) {
341 stop->opcode = SD_IO_RW_DIRECT;
342 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
343 ((cmd->arg >> 28) & 0x7);
344 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
349 cmdr = stop->opcode | SDMMC_CMD_STOP |
350 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
355 static void dw_mci_start_command(struct dw_mci *host,
356 struct mmc_command *cmd, u32 cmd_flags)
358 struct dw_mci_slot *slot = host->slot[0];
359 /*temporality fix slot[0] due to host->num_slots equal to 1*/
361 host->pre_cmd = host->cmd;
364 "start command: ARGR=0x%08x CMDR=0x%08x\n",
365 cmd->arg, cmd_flags);
367 if(SD_SWITCH_VOLTAGE == cmd->opcode){
368 /*confirm non-low-power mode*/
369 mci_writel(host, CMDARG, 0);
370 dw_mci_disable_low_power(slot);
372 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
373 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
375 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
378 mci_writel(host, CMDARG, cmd->arg);
381 /* fix the value to 1 in some Soc,for example RK3188. */
382 if(host->mmc->hold_reg_flag)
383 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
385 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
389 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
391 dw_mci_start_command(host, data->stop, host->stop_cmdr);
394 /* DMA interface functions */
395 static void dw_mci_stop_dma(struct dw_mci *host)
397 if (host->using_dma) {
398 host->dma_ops->stop(host);
399 host->dma_ops->cleanup(host);
402 /* Data transfer was stopped by the interrupt handler */
403 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
406 static int dw_mci_get_dma_dir(struct mmc_data *data)
408 if (data->flags & MMC_DATA_WRITE)
409 return DMA_TO_DEVICE;
411 return DMA_FROM_DEVICE;
414 #ifdef CONFIG_MMC_DW_IDMAC
415 static void dw_mci_dma_cleanup(struct dw_mci *host)
417 struct mmc_data *data = host->data;
420 if (!data->host_cookie)
421 dma_unmap_sg(host->dev,
424 dw_mci_get_dma_dir(data));
427 static void dw_mci_idmac_reset(struct dw_mci *host)
429 u32 bmod = mci_readl(host, BMOD);
430 /* Software reset of DMA */
431 bmod |= SDMMC_IDMAC_SWRESET;
432 mci_writel(host, BMOD, bmod);
435 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
439 /* Disable and reset the IDMAC interface */
440 temp = mci_readl(host, CTRL);
441 temp &= ~SDMMC_CTRL_USE_IDMAC;
442 temp |= SDMMC_CTRL_DMA_RESET;
443 mci_writel(host, CTRL, temp);
445 /* Stop the IDMAC running */
446 temp = mci_readl(host, BMOD);
447 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
448 temp |= SDMMC_IDMAC_SWRESET;
449 mci_writel(host, BMOD, temp);
452 static void dw_mci_idmac_complete_dma(void *arg)
454 struct dw_mci *host = arg;
455 struct mmc_data *data = host->data;
457 dev_vdbg(host->dev, "DMA complete\n");
460 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
461 host->mrq->cmd->opcode,host->mrq->cmd->arg,
462 data->blocks,data->blksz,mmc_hostname(host->mmc));
465 host->dma_ops->cleanup(host);
468 * If the card was removed, data will be NULL. No point in trying to
469 * send the stop command or waiting for NBUSY in this case.
472 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
473 tasklet_schedule(&host->tasklet);
477 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
481 struct idmac_desc *desc = host->sg_cpu;
483 for (i = 0; i < sg_len; i++, desc++) {
484 unsigned int length = sg_dma_len(&data->sg[i]);
485 u32 mem_addr = sg_dma_address(&data->sg[i]);
487 /* Set the OWN bit and disable interrupts for this descriptor */
488 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
491 IDMAC_SET_BUFFER1_SIZE(desc, length);
493 /* Physical address to DMA to/from */
494 desc->des2 = mem_addr;
497 /* Set first descriptor */
499 desc->des0 |= IDMAC_DES0_FD;
501 /* Set last descriptor */
502 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
503 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
504 desc->des0 |= IDMAC_DES0_LD;
509 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
513 dw_mci_translate_sglist(host, host->data, sg_len);
515 /* Select IDMAC interface */
516 temp = mci_readl(host, CTRL);
517 temp |= SDMMC_CTRL_USE_IDMAC;
518 mci_writel(host, CTRL, temp);
522 /* Enable the IDMAC */
523 temp = mci_readl(host, BMOD);
524 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
525 mci_writel(host, BMOD, temp);
527 /* Start it running */
528 mci_writel(host, PLDMND, 1);
531 static int dw_mci_idmac_init(struct dw_mci *host)
533 struct idmac_desc *p;
536 /* Number of descriptors in the ring buffer */
537 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
539 /* Forward link the descriptor list */
540 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
541 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
543 /* Set the last descriptor as the end-of-ring descriptor */
544 p->des3 = host->sg_dma;
545 p->des0 = IDMAC_DES0_ER;
547 dw_mci_idmac_reset(host);
549 /* Mask out interrupts - get Tx & Rx complete only */
550 mci_writel(host, IDSTS, IDMAC_INT_CLR);
551 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
554 /* Set the descriptor base address */
555 mci_writel(host, DBADDR, host->sg_dma);
559 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
560 .init = dw_mci_idmac_init,
561 .start = dw_mci_idmac_start_dma,
562 .stop = dw_mci_idmac_stop_dma,
563 .complete = dw_mci_idmac_complete_dma,
564 .cleanup = dw_mci_dma_cleanup,
568 static void dw_mci_edma_cleanup(struct dw_mci *host)
570 struct mmc_data *data = host->data;
573 if (!data->host_cookie)
574 dma_unmap_sg(host->dev,
575 data->sg, data->sg_len,
576 dw_mci_get_dma_dir(data));
579 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
581 dmaengine_terminate_all(host->dms->ch);
584 static void dw_mci_edmac_complete_dma(void *arg)
586 struct dw_mci *host = arg;
587 struct mmc_data *data = host->data;
589 dev_vdbg(host->dev, "DMA complete\n");
592 if(data->flags & MMC_DATA_READ)
593 /* Invalidate cache after read */
594 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
595 data->sg_len, DMA_FROM_DEVICE);
597 host->dma_ops->cleanup(host);
600 * If the card was removed, data will be NULL. No point in trying to
601 * send the stop command or waiting for NBUSY in this case.
604 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
605 tasklet_schedule(&host->tasklet);
609 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
611 struct dma_slave_config slave_config;
612 struct dma_async_tx_descriptor *desc = NULL;
613 struct scatterlist *sgl = host->data->sg;
614 u32 sg_elems = host->data->sg_len;
617 /* Set external dma config: burst size, burst width*/
618 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
619 slave_config.src_addr = slave_config.dst_addr;
620 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
621 slave_config.src_addr_width = slave_config.dst_addr_width;
623 /* Match FIFO dma burst MSIZE with external dma config*/
624 slave_config.dst_maxburst = ((host->fifoth_val) >> 28) && 0x7;
625 slave_config.src_maxburst = slave_config.dst_maxburst;
627 if(host->data->flags & MMC_DATA_WRITE){
628 slave_config.direction = DMA_MEM_TO_DEV;
629 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
631 dev_err(host->dev, "error in dw_mci edma configuration.\n");
635 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
636 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
638 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
641 /* Set dw_mci_edmac_complete_dma as callback */
642 desc->callback = dw_mci_edmac_complete_dma;
643 desc->callback_param = (void *)host;
644 dmaengine_submit(desc);
646 /* Flush cache before write */
647 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
648 sg_elems, DMA_TO_DEVICE);
649 dma_async_issue_pending(host->dms->ch);
652 slave_config.direction = DMA_DEV_TO_MEM;
653 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
655 dev_err(host->dev, "error in dw_mci edma configuration.\n");
658 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
659 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
661 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
664 /* set dw_mci_edmac_complete_dma as callback */
665 desc->callback = dw_mci_edmac_complete_dma;
666 desc->callback_param = (void *)host;
667 dmaengine_submit(desc);
668 dma_async_issue_pending(host->dms->ch);
672 static int dw_mci_edmac_init(struct dw_mci *host)
674 /* 1) request external dma channel, SHOULD decide chn in dts */
675 host->dms = (struct dw_mci_dma_slave *)kmalloc(sizeof(struct dw_mci_dma_slave),GFP_KERNEL);
676 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
678 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
679 host->dms->ch->chan_id);
692 static void dw_mci_edmac_exit(struct dw_mci *host)
694 dma_release_channel(host->dms->ch);
697 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
698 .init = dw_mci_edmac_init,
699 .exit = dw_mci_edmac_exit,
700 .start = dw_mci_edmac_start_dma,
701 .stop = dw_mci_edmac_stop_dma,
702 .complete = dw_mci_edmac_complete_dma,
703 .cleanup = dw_mci_edma_cleanup,
705 #endif /* CONFIG_MMC_DW_IDMAC */
707 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
708 struct mmc_data *data,
711 struct scatterlist *sg;
712 unsigned int i, sg_len;
714 if (!next && data->host_cookie)
715 return data->host_cookie;
718 * We don't do DMA on "complex" transfers, i.e. with
719 * non-word-aligned buffers or lengths. Also, we don't bother
720 * with all the DMA setup overhead for short transfers.
722 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
728 for_each_sg(data->sg, sg, data->sg_len, i) {
729 if (sg->offset & 3 || sg->length & 3)
733 sg_len = dma_map_sg(host->dev,
736 dw_mci_get_dma_dir(data));
741 data->host_cookie = sg_len;
746 static void dw_mci_pre_req(struct mmc_host *mmc,
747 struct mmc_request *mrq,
750 struct dw_mci_slot *slot = mmc_priv(mmc);
751 struct mmc_data *data = mrq->data;
753 if (!slot->host->use_dma || !data)
756 if (data->host_cookie) {
757 data->host_cookie = 0;
761 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
762 data->host_cookie = 0;
765 static void dw_mci_post_req(struct mmc_host *mmc,
766 struct mmc_request *mrq,
769 struct dw_mci_slot *slot = mmc_priv(mmc);
770 struct mmc_data *data = mrq->data;
772 if (!slot->host->use_dma || !data)
775 if (data->host_cookie)
776 dma_unmap_sg(slot->host->dev,
779 dw_mci_get_dma_dir(data));
780 data->host_cookie = 0;
783 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
785 #ifdef CONFIG_MMC_DW_IDMAC
786 unsigned int blksz = data->blksz;
787 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
788 u32 fifo_width = 1 << host->data_shift;
789 u32 blksz_depth = blksz / fifo_width, fifoth_val;
790 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
791 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
793 tx_wmark = (host->fifo_depth) / 2;
794 tx_wmark_invers = host->fifo_depth - tx_wmark;
798 * if blksz is not a multiple of the FIFO width
800 if (blksz % fifo_width) {
807 if (!((blksz_depth % mszs[idx]) ||
808 (tx_wmark_invers % mszs[idx]))) {
810 rx_wmark = mszs[idx] - 1;
815 * If idx is '0', it won't be tried
816 * Thus, initial values are uesed
819 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
820 mci_writel(host, FIFOTH, fifoth_val);
824 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
826 unsigned int blksz = data->blksz;
827 u32 blksz_depth, fifo_depth;
830 WARN_ON(!(data->flags & MMC_DATA_READ));
832 if (host->timing != MMC_TIMING_MMC_HS200 &&
833 host->timing != MMC_TIMING_UHS_SDR104)
836 blksz_depth = blksz / (1 << host->data_shift);
837 fifo_depth = host->fifo_depth;
839 if (blksz_depth > fifo_depth)
843 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
844 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
845 * Currently just choose blksz.
848 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
852 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
855 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
862 /* If we don't have a channel, we can't do DMA */
866 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
868 host->dma_ops->stop(host);
875 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
876 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
880 * Decide the MSIZE and RX/TX Watermark.
881 * If current block size is same with previous size,
882 * no need to update fifoth.
884 if (host->prev_blksz != data->blksz)
885 dw_mci_adjust_fifoth(host, data);
888 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
890 /* Enable the DMA interface */
891 temp = mci_readl(host, CTRL);
892 temp |= SDMMC_CTRL_DMA_ENABLE;
893 mci_writel(host, CTRL, temp);
895 /* Disable RX/TX IRQs, let DMA handle it */
896 temp = mci_readl(host, INTMASK);
897 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
898 mci_writel(host, INTMASK, temp);
900 host->dma_ops->start(host, sg_len);
905 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
909 data->error = -EINPROGRESS;
916 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
918 if (data->flags & MMC_DATA_READ) {
919 host->dir_status = DW_MCI_RECV_STATUS;
920 dw_mci_ctrl_rd_thld(host, data);
922 host->dir_status = DW_MCI_SEND_STATUS;
925 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
926 data->blocks, data->blksz, mmc_hostname(host->mmc));
928 if (dw_mci_submit_data_dma(host, data)) {
929 int flags = SG_MITER_ATOMIC;
930 if (host->data->flags & MMC_DATA_READ)
931 flags |= SG_MITER_TO_SG;
933 flags |= SG_MITER_FROM_SG;
935 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
937 host->part_buf_start = 0;
938 host->part_buf_count = 0;
940 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
941 temp = mci_readl(host, INTMASK);
942 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
943 mci_writel(host, INTMASK, temp);
945 temp = mci_readl(host, CTRL);
946 temp &= ~SDMMC_CTRL_DMA_ENABLE;
947 mci_writel(host, CTRL, temp);
950 * Use the initial fifoth_val for PIO mode.
951 * If next issued data may be transfered by DMA mode,
952 * prev_blksz should be invalidated.
954 mci_writel(host, FIFOTH, host->fifoth_val);
955 host->prev_blksz = 0;
958 * Keep the current block size.
959 * It will be used to decide whether to update
960 * fifoth register next time.
962 host->prev_blksz = data->blksz;
966 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
968 struct dw_mci *host = slot->host;
969 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
970 unsigned int cmd_status = 0;
971 #ifdef SDMMC_WAIT_FOR_UNBUSY
973 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
975 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
977 ret = time_before(jiffies, timeout);
978 cmd_status = mci_readl(host, STATUS);
979 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
983 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
984 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
987 mci_writel(host, CMDARG, arg);
989 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
990 if(cmd & SDMMC_CMD_UPD_CLK)
991 timeout = jiffies + msecs_to_jiffies(50);
993 timeout = jiffies + msecs_to_jiffies(500);
994 while (time_before(jiffies, timeout)) {
995 cmd_status = mci_readl(host, CMD);
996 if (!(cmd_status & SDMMC_CMD_START))
999 dev_err(&slot->mmc->class_dev,
1000 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1001 cmd, arg, cmd_status);
1004 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1006 struct dw_mci *host = slot->host;
1007 unsigned int tempck,clock = slot->clock;
1012 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1013 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1016 mci_writel(host, CLKENA, 0);
1017 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1018 if(host->svi_flags == 0)
1019 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1021 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1023 } else if (clock != host->current_speed || force_clkinit) {
1024 div = host->bus_hz / clock;
1025 if (host->bus_hz % clock && host->bus_hz > clock)
1027 * move the + 1 after the divide to prevent
1028 * over-clocking the card.
1032 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1034 if ((clock << div) != slot->__clk_old || force_clkinit) {
1035 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1036 dev_info(&slot->mmc->class_dev,
1037 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1038 slot->id, host->bus_hz, clock,
1041 host->set_speed = tempck;
1042 host->set_div = div;
1046 mci_writel(host, CLKENA, 0);
1047 mci_writel(host, CLKSRC, 0);
1051 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1053 if(clock <= 400*1000){
1054 MMC_DBG_BOOT_FUNC(host->mmc,
1055 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1056 clock * 2, mmc_hostname(host->mmc));
1057 /* clk_mmc will change parents to 24MHz xtal*/
1058 clk_set_rate(host->clk_mmc, clock * 2);
1061 host->set_div = div;
1065 MMC_DBG_BOOT_FUNC(host->mmc,
1066 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1067 mmc_hostname(host->mmc));
1070 MMC_DBG_ERR_FUNC(host->mmc,
1071 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1072 mmc_hostname(host->mmc));
1074 host->set_div = div;
1075 host->bus_hz = host->set_speed * 2;
1076 MMC_DBG_BOOT_FUNC(host->mmc,
1077 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1078 div, host->bus_hz, mmc_hostname(host->mmc));
1080 /* BUG may be here, come on, Linux BSP engineer looks!
1081 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1082 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1083 some oops happened like that:
1084 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1085 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1086 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1087 mmc0: new high speed DDR MMC card at address 0001
1088 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1090 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1091 mmcblk0: retrying using single block read
1092 mmcblk0: error -110 sending status command, retrying
1094 How to: If eMMC HW version < 4.51, or > 4.51 but no caps2-mmc-hs200 support in dts
1095 Please set dts emmc clk to 100M or 150M, I will workaround it!
1098 if (host->verid < DW_MMC_240A)
1099 clk_set_rate(host->clk_mmc,(host->bus_hz));
1101 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1107 /* set clock to desired speed */
1108 mci_writel(host, CLKDIV, div);
1112 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1114 /* enable clock; only low power if no SDIO */
1115 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1117 if (host->verid < DW_MMC_240A)
1118 sdio_int = SDMMC_INT_SDIO(slot->id);
1120 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1122 if (!(mci_readl(host, INTMASK) & sdio_int))
1123 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1124 mci_writel(host, CLKENA, clk_en_a);
1128 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1129 /* keep the clock with reflecting clock dividor */
1130 slot->__clk_old = clock << div;
1133 host->current_speed = clock;
1135 if(slot->ctype != slot->pre_ctype)
1136 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1138 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1139 mmc_hostname(host->mmc));
1140 slot->pre_ctype = slot->ctype;
1142 /* Set the current slot bus width */
1143 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1146 static void dw_mci_wait_unbusy(struct dw_mci *host)
1149 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1150 unsigned long time_loop;
1151 unsigned int status;
1153 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1155 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1156 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1157 else if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1158 timeout = SDMMC_DATA_TIMEOUT_SD;
1160 time_loop = jiffies + msecs_to_jiffies(timeout);
1162 status = mci_readl(host, STATUS);
1163 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1165 } while (time_before(jiffies, time_loop));
1169 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1172 * 0--status is busy.
1173 * 1--status is unbusy.
1175 int dw_mci_card_busy(struct mmc_host *mmc)
1177 struct dw_mci_slot *slot = mmc_priv(mmc);
1178 struct dw_mci *host = slot->host;
1180 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1181 host->svi_flags, mmc_hostname(host->mmc));
1184 if(host->svi_flags == 0){
1186 host->svi_flags = 1;
1187 return host->svi_flags;
1190 host->svi_flags = 0;
1191 return host->svi_flags;
1197 static void __dw_mci_start_request(struct dw_mci *host,
1198 struct dw_mci_slot *slot,
1199 struct mmc_command *cmd)
1201 struct mmc_request *mrq;
1202 struct mmc_data *data;
1206 if (host->pdata->select_slot)
1207 host->pdata->select_slot(slot->id);
1209 host->cur_slot = slot;
1212 dw_mci_wait_unbusy(host);
1214 host->pending_events = 0;
1215 host->completed_events = 0;
1216 host->data_status = 0;
1220 dw_mci_set_timeout(host);
1221 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1222 mci_writel(host, BLKSIZ, data->blksz);
1225 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1227 /* this is the first command, send the initialization clock */
1228 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1229 cmdflags |= SDMMC_CMD_INIT;
1232 dw_mci_submit_data(host, data);
1236 dw_mci_start_command(host, cmd, cmdflags);
1239 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1242 static void dw_mci_start_request(struct dw_mci *host,
1243 struct dw_mci_slot *slot)
1245 struct mmc_request *mrq = slot->mrq;
1246 struct mmc_command *cmd;
1248 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1249 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1251 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1252 __dw_mci_start_request(host, slot, cmd);
1255 /* must be called with host->lock held */
1256 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1257 struct mmc_request *mrq)
1259 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1264 if (host->state == STATE_IDLE) {
1265 host->state = STATE_SENDING_CMD;
1266 dw_mci_start_request(host, slot);
1268 list_add_tail(&slot->queue_node, &host->queue);
1272 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1274 struct dw_mci_slot *slot = mmc_priv(mmc);
1275 struct dw_mci *host = slot->host;
1280 * The check for card presence and queueing of the request must be
1281 * atomic, otherwise the card could be removed in between and the
1282 * request wouldn't fail until another card was inserted.
1284 spin_lock_bh(&host->lock);
1286 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1287 spin_unlock_bh(&host->lock);
1288 mrq->cmd->error = -ENOMEDIUM;
1289 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1290 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1292 mmc_request_done(mmc, mrq);
1296 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1297 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1299 dw_mci_queue_request(host, slot, mrq);
1301 spin_unlock_bh(&host->lock);
1304 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1306 struct dw_mci_slot *slot = mmc_priv(mmc);
1307 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1308 struct dw_mci *host = slot->host;
1310 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1313 #ifdef SDMMC_WAIT_FOR_UNBUSY
1314 unsigned long time_loop;
1317 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1318 if(host->svi_flags == 1)
1319 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1321 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1323 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1326 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1327 printk("%d..%s: no card. [%s]\n", \
1328 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1333 ret = time_before(jiffies, time_loop);
1334 regs = mci_readl(slot->host, STATUS);
1335 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1341 printk("slot->flags = %lu ", slot->flags);
1342 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1343 if(host->svi_flags != 1)
1346 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1347 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1351 switch (ios->bus_width) {
1352 case MMC_BUS_WIDTH_4:
1353 slot->ctype = SDMMC_CTYPE_4BIT;
1355 case MMC_BUS_WIDTH_8:
1356 slot->ctype = SDMMC_CTYPE_8BIT;
1359 /* set default 1 bit mode */
1360 slot->ctype = SDMMC_CTYPE_1BIT;
1361 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1364 regs = mci_readl(slot->host, UHS_REG);
1367 if (ios->timing == MMC_TIMING_UHS_DDR50)
1368 regs |= ((0x1 << slot->id) << 16);
1370 regs &= ~((0x1 << slot->id) << 16);
1372 mci_writel(slot->host, UHS_REG, regs);
1373 slot->host->timing = ios->timing;
1376 * Use mirror of ios->clock to prevent race with mmc
1377 * core ios update when finding the minimum.
1379 slot->clock = ios->clock;
1381 if (drv_data && drv_data->set_ios)
1382 drv_data->set_ios(slot->host, ios);
1384 /* Slot specific timing and width adjustment */
1385 dw_mci_setup_bus(slot, false);
1389 switch (ios->power_mode) {
1391 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1393 if (slot->host->pdata->setpower)
1394 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1395 regs = mci_readl(slot->host, PWREN);
1396 regs |= (1 << slot->id);
1397 mci_writel(slot->host, PWREN, regs);
1400 /* Power down slot */
1401 if(slot->host->pdata->setpower)
1402 slot->host->pdata->setpower(slot->id, 0);
1403 regs = mci_readl(slot->host, PWREN);
1404 regs &= ~(1 << slot->id);
1405 mci_writel(slot->host, PWREN, regs);
1412 static int dw_mci_get_ro(struct mmc_host *mmc)
1415 struct dw_mci_slot *slot = mmc_priv(mmc);
1416 struct dw_mci_board *brd = slot->host->pdata;
1418 /* Use platform get_ro function, else try on board write protect */
1419 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1421 else if(brd->get_ro)
1422 read_only = brd->get_ro(slot->id);
1423 else if(gpio_is_valid(slot->wp_gpio))
1424 read_only = gpio_get_value(slot->wp_gpio);
1427 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1429 dev_dbg(&mmc->class_dev, "card is %s\n",
1430 read_only ? "read-only" : "read-write");
1435 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1437 struct dw_mci_slot *slot = mmc_priv(mmc);
1438 struct dw_mci *host = slot->host;
1439 /*struct dw_mci_board *brd = slot->host->pdata;*/
1441 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1444 spin_lock_bh(&host->lock);
1447 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1449 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1451 spin_unlock_bh(&host->lock);
1453 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1454 if(__clk_is_enabled(host->hclk_mmc) == false)
1455 clk_prepare_enable(host->hclk_mmc);
1456 if(__clk_is_enabled(host->clk_mmc) == false)
1457 clk_prepare_enable(host->clk_mmc);
1459 if(__clk_is_enabled(host->clk_mmc) == true)
1460 clk_disable_unprepare(slot->host->clk_mmc);
1461 if(__clk_is_enabled(host->hclk_mmc) == true)
1462 clk_disable_unprepare(slot->host->hclk_mmc);
1465 mmc_detect_change(slot->mmc, 20);
1471 static int dw_mci_get_cd(struct mmc_host *mmc)
1474 struct dw_mci_slot *slot = mmc_priv(mmc);
1475 struct dw_mci_board *brd = slot->host->pdata;
1476 struct dw_mci *host = slot->host;
1477 int gpio_cd = mmc_gpio_get_cd(mmc);
1479 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1480 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1482 /* Use platform get_cd function, else try onboard card detect */
1483 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1485 else if (brd->get_cd)
1486 present = !brd->get_cd(slot->id);
1487 else if (!IS_ERR_VALUE(gpio_cd))
1490 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1492 spin_lock_bh(&host->lock);
1494 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1495 dev_dbg(&mmc->class_dev, "card is present\n");
1497 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1498 dev_dbg(&mmc->class_dev, "card is not present\n");
1500 spin_unlock_bh(&host->lock);
1507 * Dts Should caps emmc controller with poll-hw-reset
1509 static void dw_mci_hw_reset(struct mmc_host *mmc)
1511 struct dw_mci_slot *slot = mmc_priv(mmc);
1512 struct dw_mci *host = slot->host;
1517 unsigned long timeout;
1520 /* (1) CMD12 to end any transfer in process */
1521 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1522 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1524 if(host->mmc->hold_reg_flag)
1525 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1526 mci_writel(host, CMDARG, 0);
1528 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1530 timeout = jiffies + msecs_to_jiffies(500);
1532 ret = time_before(jiffies, timeout);
1533 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1538 MMC_DBG_ERR_FUNC(host->mmc,
1539 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1540 __func__, mmc_hostname(host->mmc));
1542 /* (2) wait DTO, even if no response is sent back by card */
1544 timeout = jiffies + msecs_to_jiffies(5);
1546 ret = time_before(jiffies, timeout);
1547 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1548 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1554 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1556 /* Software reset - BMOD[0] for IDMA only */
1557 regs = mci_readl(host, BMOD);
1558 regs |= SDMMC_IDMAC_SWRESET;
1559 mci_writel(host, BMOD, regs);
1560 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1561 regs = mci_readl(host, BMOD);
1562 if(regs & SDMMC_IDMAC_SWRESET)
1563 MMC_DBG_WARN_FUNC(host->mmc,
1564 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1565 __func__, mmc_hostname(host->mmc));
1567 /* DMA reset - CTRL[2] */
1568 regs = mci_readl(host, CTRL);
1569 regs |= SDMMC_CTRL_DMA_RESET;
1570 mci_writel(host, CTRL, regs);
1571 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1572 regs = mci_readl(host, CTRL);
1573 if(regs & SDMMC_CTRL_DMA_RESET)
1574 MMC_DBG_WARN_FUNC(host->mmc,
1575 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1576 __func__, mmc_hostname(host->mmc));
1578 /* FIFO reset - CTRL[1] */
1579 regs = mci_readl(host, CTRL);
1580 regs |= SDMMC_CTRL_FIFO_RESET;
1581 mci_writel(host, CTRL, regs);
1582 mdelay(1); /* no timing limited, 1ms is random value */
1583 regs = mci_readl(host, CTRL);
1584 if(regs & SDMMC_CTRL_FIFO_RESET)
1585 MMC_DBG_WARN_FUNC(host->mmc,
1586 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1587 __func__, mmc_hostname(host->mmc));
1590 According to eMMC spec
1591 tRstW >= 1us ; RST_n pulse width
1592 tRSCA >= 200us ; RST_n to Command time
1593 tRSTH >= 1us ; RST_n high period
1595 mci_writel(slot->host, PWREN, 0x0);
1596 mci_writel(slot->host, RST_N, 0x0);
1598 udelay(10); /* 10us for bad quality eMMc. */
1600 mci_writel(slot->host, PWREN, 0x1);
1601 mci_writel(slot->host, RST_N, 0x1);
1603 usleep_range(500, 1000); /* at least 500(> 200us) */
1607 * Disable lower power mode.
1609 * Low power mode will stop the card clock when idle. According to the
1610 * description of the CLKENA register we should disable low power mode
1611 * for SDIO cards if we need SDIO interrupts to work.
1613 * This function is fast if low power mode is already disabled.
1615 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1617 struct dw_mci *host = slot->host;
1619 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1621 clk_en_a = mci_readl(host, CLKENA);
1623 if (clk_en_a & clken_low_pwr) {
1624 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1625 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1626 SDMMC_CMD_PRV_DAT_WAIT, 0);
1630 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1632 struct dw_mci_slot *slot = mmc_priv(mmc);
1633 struct dw_mci *host = slot->host;
1637 /* Enable/disable Slot Specific SDIO interrupt */
1638 int_mask = mci_readl(host, INTMASK);
1640 if (host->verid < DW_MMC_240A)
1641 sdio_int = SDMMC_INT_SDIO(slot->id);
1643 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1647 * Turn off low power mode if it was enabled. This is a bit of
1648 * a heavy operation and we disable / enable IRQs a lot, so
1649 * we'll leave low power mode disabled and it will get
1650 * re-enabled again in dw_mci_setup_bus().
1652 dw_mci_disable_low_power(slot);
1654 mci_writel(host, INTMASK,
1655 (int_mask | sdio_int));
1657 mci_writel(host, INTMASK,
1658 (int_mask & ~sdio_int));
1662 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1664 IO_DOMAIN_12 = 1200,
1665 IO_DOMAIN_18 = 1800,
1666 IO_DOMAIN_33 = 3300,
1668 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1678 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1679 __FUNCTION__, mmc_hostname(host->mmc));
1682 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1683 __FUNCTION__, mmc_hostname(host->mmc));
1687 if(cpu_is_rk3288()){
1688 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1689 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1693 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1694 __FUNCTION__, mmc_hostname(host->mmc));
1698 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1699 struct mmc_ios *ios)
1702 unsigned int value,uhs_reg;
1705 * Signal Voltage Switching is only applicable for Host Controllers
1708 if (host->verid < DW_MMC_240A)
1711 uhs_reg = mci_readl(host, UHS_REG);
1712 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1713 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1715 switch (ios->signal_voltage) {
1716 case MMC_SIGNAL_VOLTAGE_330:
1717 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1719 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1720 /* regulator_put(host->vmmc); //to be done in remove function. */
1722 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1723 __func__, regulator_get_voltage(host->vmmc), ret);
1725 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1726 " failed\n", mmc_hostname(host->mmc));
1729 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1731 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1732 __FUNCTION__, mmc_hostname(host->mmc));
1734 /* set High-power mode */
1735 value = mci_readl(host, CLKENA);
1736 value &= ~SDMMC_CLKEN_LOW_PWR;
1737 mci_writel(host,CLKENA , value);
1739 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1740 mci_writel(host,UHS_REG , uhs_reg);
1743 usleep_range(5000, 5500);
1745 /* 3.3V regulator output should be stable within 5 ms */
1746 uhs_reg = mci_readl(host, UHS_REG);
1747 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1750 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1751 mmc_hostname(host->mmc));
1754 case MMC_SIGNAL_VOLTAGE_180:
1756 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1757 /* regulator_put(host->vmmc);//to be done in remove function. */
1759 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1760 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1762 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1763 " failed\n", mmc_hostname(host->mmc));
1766 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1770 * Enable 1.8V Signal Enable in the Host Control2
1773 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1776 usleep_range(5000, 5500);
1777 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1778 __FUNCTION__,mmc_hostname(host->mmc));
1780 /* 1.8V regulator output should be stable within 5 ms */
1781 uhs_reg = mci_readl(host, UHS_REG);
1782 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1785 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1786 mmc_hostname(host->mmc));
1789 case MMC_SIGNAL_VOLTAGE_120:
1791 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1793 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1794 " failed\n", mmc_hostname(host->mmc));
1800 /* No signal voltage switch required */
1806 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1807 struct mmc_ios *ios)
1809 struct dw_mci_slot *slot = mmc_priv(mmc);
1810 struct dw_mci *host = slot->host;
1813 if (host->verid < DW_MMC_240A)
1816 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1822 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1824 struct dw_mci_slot *slot = mmc_priv(mmc);
1825 struct dw_mci *host = slot->host;
1826 const struct dw_mci_drv_data *drv_data = host->drv_data;
1827 struct dw_mci_tuning_data tuning_data;
1830 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning */
1831 if(cpu_is_rk3036() || cpu_is_rk312x())
1834 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1835 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1836 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1837 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1838 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1839 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1840 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1844 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1845 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1846 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1849 "Undefined command(%d) for tuning\n", opcode);
1854 /* Recommend sample phase and delayline
1855 Fixme: Mix-use these three controllers will cause
1858 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1859 tuning_data.con_id = 3;
1860 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1861 tuning_data.con_id = 1;
1863 tuning_data.con_id = 0;
1865 /* 0: driver, from host->devices
1866 1: sample, from devices->host
1868 tuning_data.tuning_type = 1;
1870 if (drv_data && drv_data->execute_tuning)
1871 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1876 static const struct mmc_host_ops dw_mci_ops = {
1877 .request = dw_mci_request,
1878 .pre_req = dw_mci_pre_req,
1879 .post_req = dw_mci_post_req,
1880 .set_ios = dw_mci_set_ios,
1881 .get_ro = dw_mci_get_ro,
1882 .get_cd = dw_mci_get_cd,
1883 .set_sdio_status = dw_mci_set_sdio_status,
1884 .hw_reset = dw_mci_hw_reset,
1885 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1886 .execute_tuning = dw_mci_execute_tuning,
1887 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1888 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1889 .card_busy = dw_mci_card_busy,
1894 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1896 unsigned long flags;
1901 local_irq_save(flags);
1902 if(host->irq_state != irqflag)
1904 host->irq_state = irqflag;
1907 enable_irq(host->irq);
1911 disable_irq(host->irq);
1914 local_irq_restore(flags);
1918 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1919 __releases(&host->lock)
1920 __acquires(&host->lock)
1922 if(DW_MCI_SEND_STATUS == host->dir_status){
1924 if( MMC_BUS_TEST_W != host->cmd->opcode){
1925 if(host->data_status & SDMMC_INT_DCRC)
1926 host->data->error = -EILSEQ;
1927 else if(host->data_status & SDMMC_INT_EBE)
1928 host->data->error = -ETIMEDOUT;
1930 dw_mci_wait_unbusy(host);
1933 dw_mci_wait_unbusy(host);
1938 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1939 __releases(&host->lock)
1940 __acquires(&host->lock)
1942 struct dw_mci_slot *slot;
1943 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1945 WARN_ON(host->cmd || host->data);
1947 del_timer_sync(&host->dto_timer);
1948 dw_mci_deal_data_end(host, mrq);
1951 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1952 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
1954 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
1955 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
1957 host->cur_slot->mrq = NULL;
1959 if (!list_empty(&host->queue)) {
1960 slot = list_entry(host->queue.next,
1961 struct dw_mci_slot, queue_node);
1962 list_del(&slot->queue_node);
1963 dev_vdbg(host->dev, "list not empty: %s is next\n",
1964 mmc_hostname(slot->mmc));
1965 host->state = STATE_SENDING_CMD;
1966 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
1967 dw_mci_start_request(host, slot);
1969 dev_vdbg(host->dev, "list empty\n");
1970 host->state = STATE_IDLE;
1973 spin_unlock(&host->lock);
1974 mmc_request_done(prev_mmc, mrq);
1975 spin_lock(&host->lock);
1978 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1980 u32 status = host->cmd_status;
1982 host->cmd_status = 0;
1984 /* Read the response from the card (up to 16 bytes) */
1985 if (cmd->flags & MMC_RSP_PRESENT) {
1986 if (cmd->flags & MMC_RSP_136) {
1987 cmd->resp[3] = mci_readl(host, RESP0);
1988 cmd->resp[2] = mci_readl(host, RESP1);
1989 cmd->resp[1] = mci_readl(host, RESP2);
1990 cmd->resp[0] = mci_readl(host, RESP3);
1992 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
1993 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
1995 cmd->resp[0] = mci_readl(host, RESP0);
1999 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2000 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2004 if (status & SDMMC_INT_RTO)
2006 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2009 cmd->error = -ETIMEDOUT;
2010 del_timer_sync(&host->dto_timer);
2011 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2012 del_timer_sync(&host->dto_timer);
2013 cmd->error = -EILSEQ;
2014 }else if (status & SDMMC_INT_RESP_ERR){
2015 del_timer_sync(&host->dto_timer);
2020 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2021 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2024 del_timer_sync(&host->dto_timer);
2025 if(MMC_SEND_STATUS != cmd->opcode)
2026 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2027 MMC_DBG_ERR_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2028 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2032 /* newer ip versions need a delay between retries */
2033 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2039 static void dw_mci_tasklet_func(unsigned long priv)
2041 struct dw_mci *host = (struct dw_mci *)priv;
2042 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2043 struct mmc_data *data;
2044 struct mmc_command *cmd;
2045 enum dw_mci_state state;
2046 enum dw_mci_state prev_state;
2047 u32 status, cmd_flags;
2048 unsigned long timeout = 0;
2051 spin_lock(&host->lock);
2053 state = host->state;
2063 case STATE_SENDING_CMD:
2064 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2065 &host->pending_events))
2070 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2071 dw_mci_command_complete(host, cmd);
2072 if (cmd == host->mrq->sbc && !cmd->error) {
2073 prev_state = state = STATE_SENDING_CMD;
2074 __dw_mci_start_request(host, host->cur_slot,
2079 if (cmd->data && cmd->error) {
2080 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2081 dw_mci_stop_dma(host);
2084 send_stop_cmd(host, data);
2085 state = STATE_SENDING_STOP;
2091 send_stop_abort(host, data);
2092 state = STATE_SENDING_STOP;
2095 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2098 if (!host->mrq->data || cmd->error) {
2099 dw_mci_request_end(host, host->mrq);
2103 prev_state = state = STATE_SENDING_DATA;
2106 case STATE_SENDING_DATA:
2107 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2108 dw_mci_stop_dma(host);
2111 send_stop_cmd(host, data);
2113 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2114 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2115 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2117 mci_writel(host, CMDARG, 0);
2119 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2120 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2122 if(host->mmc->hold_reg_flag)
2123 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2125 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2127 timeout = jiffies + msecs_to_jiffies(500);
2130 ret = time_before(jiffies, timeout);
2131 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2135 MMC_DBG_ERR_FUNC(host->mmc,
2136 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2137 __func__, mmc_hostname(host->mmc));
2140 send_stop_abort(host, data);
2142 state = STATE_DATA_ERROR;
2146 MMC_DBG_CMD_FUNC(host->mmc,
2147 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2148 prev_state,state, mmc_hostname(host->mmc));
2150 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2151 &host->pending_events))
2153 MMC_DBG_INFO_FUNC(host->mmc,
2154 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2155 prev_state,state,mmc_hostname(host->mmc));
2157 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2158 prev_state = state = STATE_DATA_BUSY;
2161 case STATE_DATA_BUSY:
2162 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2163 &host->pending_events))
2166 dw_mci_deal_data_end(host, host->mrq);
2167 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2168 MMC_DBG_INFO_FUNC(host->mmc,
2169 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2170 prev_state,state,mmc_hostname(host->mmc));
2173 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2174 status = host->data_status;
2176 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2177 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2178 MMC_DBG_ERR_FUNC(host->mmc,
2179 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2180 prev_state,state, status, mmc_hostname(host->mmc));
2182 if (status & SDMMC_INT_DRTO) {
2183 data->error = -ETIMEDOUT;
2184 } else if (status & SDMMC_INT_DCRC) {
2185 data->error = -EILSEQ;
2186 } else if (status & SDMMC_INT_EBE &&
2187 host->dir_status == DW_MCI_SEND_STATUS){
2189 * No data CRC status was returned.
2190 * The number of bytes transferred will
2191 * be exaggerated in PIO mode.
2193 data->bytes_xfered = 0;
2194 data->error = -ETIMEDOUT;
2203 * After an error, there may be data lingering
2204 * in the FIFO, so reset it - doing so
2205 * generates a block interrupt, hence setting
2206 * the scatter-gather pointer to NULL.
2208 dw_mci_fifo_reset(host);
2210 data->bytes_xfered = data->blocks * data->blksz;
2215 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2216 prev_state,state,mmc_hostname(host->mmc));
2217 dw_mci_request_end(host, host->mrq);
2220 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2221 prev_state,state,mmc_hostname(host->mmc));
2223 if (host->mrq->sbc && !data->error) {
2224 data->stop->error = 0;
2226 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2227 prev_state,state,mmc_hostname(host->mmc));
2229 dw_mci_request_end(host, host->mrq);
2233 prev_state = state = STATE_SENDING_STOP;
2235 send_stop_cmd(host, data);
2237 if (data->stop && !data->error) {
2238 /* stop command for open-ended transfer*/
2240 send_stop_abort(host, data);
2244 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2245 prev_state,state,mmc_hostname(host->mmc));
2247 case STATE_SENDING_STOP:
2248 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2251 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2252 prev_state, state, mmc_hostname(host->mmc));
2254 /* CMD error in data command */
2255 if (host->mrq->cmd->error && host->mrq->data) {
2256 dw_mci_fifo_reset(host);
2262 dw_mci_command_complete(host, host->mrq->stop);
2264 if (host->mrq->stop)
2265 dw_mci_command_complete(host, host->mrq->stop);
2267 host->cmd_status = 0;
2270 dw_mci_request_end(host, host->mrq);
2273 case STATE_DATA_ERROR:
2274 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2275 &host->pending_events))
2278 state = STATE_DATA_BUSY;
2281 } while (state != prev_state);
2283 host->state = state;
2285 spin_unlock(&host->lock);
2289 /* push final bytes to part_buf, only use during push */
2290 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2292 memcpy((void *)&host->part_buf, buf, cnt);
2293 host->part_buf_count = cnt;
2296 /* append bytes to part_buf, only use during push */
2297 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2299 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2300 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2301 host->part_buf_count += cnt;
2305 /* pull first bytes from part_buf, only use during pull */
2306 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2308 cnt = min(cnt, (int)host->part_buf_count);
2310 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2312 host->part_buf_count -= cnt;
2313 host->part_buf_start += cnt;
2318 /* pull final bytes from the part_buf, assuming it's just been filled */
2319 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2321 memcpy(buf, &host->part_buf, cnt);
2322 host->part_buf_start = cnt;
2323 host->part_buf_count = (1 << host->data_shift) - cnt;
2326 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2328 struct mmc_data *data = host->data;
2331 /* try and push anything in the part_buf */
2332 if (unlikely(host->part_buf_count)) {
2333 int len = dw_mci_push_part_bytes(host, buf, cnt);
2336 if (host->part_buf_count == 2) {
2337 mci_writew(host, DATA(host->data_offset),
2339 host->part_buf_count = 0;
2342 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2343 if (unlikely((unsigned long)buf & 0x1)) {
2345 u16 aligned_buf[64];
2346 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2347 int items = len >> 1;
2349 /* memcpy from input buffer into aligned buffer */
2350 memcpy(aligned_buf, buf, len);
2353 /* push data from aligned buffer into fifo */
2354 for (i = 0; i < items; ++i)
2355 mci_writew(host, DATA(host->data_offset),
2362 for (; cnt >= 2; cnt -= 2)
2363 mci_writew(host, DATA(host->data_offset), *pdata++);
2366 /* put anything remaining in the part_buf */
2368 dw_mci_set_part_bytes(host, buf, cnt);
2369 /* Push data if we have reached the expected data length */
2370 if ((data->bytes_xfered + init_cnt) ==
2371 (data->blksz * data->blocks))
2372 mci_writew(host, DATA(host->data_offset),
2377 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2379 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2380 if (unlikely((unsigned long)buf & 0x1)) {
2382 /* pull data from fifo into aligned buffer */
2383 u16 aligned_buf[64];
2384 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2385 int items = len >> 1;
2387 for (i = 0; i < items; ++i)
2388 aligned_buf[i] = mci_readw(host,
2389 DATA(host->data_offset));
2390 /* memcpy from aligned buffer into output buffer */
2391 memcpy(buf, aligned_buf, len);
2399 for (; cnt >= 2; cnt -= 2)
2400 *pdata++ = mci_readw(host, DATA(host->data_offset));
2404 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2405 dw_mci_pull_final_bytes(host, buf, cnt);
2409 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2411 struct mmc_data *data = host->data;
2414 /* try and push anything in the part_buf */
2415 if (unlikely(host->part_buf_count)) {
2416 int len = dw_mci_push_part_bytes(host, buf, cnt);
2419 if (host->part_buf_count == 4) {
2420 mci_writel(host, DATA(host->data_offset),
2422 host->part_buf_count = 0;
2425 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2426 if (unlikely((unsigned long)buf & 0x3)) {
2428 u32 aligned_buf[32];
2429 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2430 int items = len >> 2;
2432 /* memcpy from input buffer into aligned buffer */
2433 memcpy(aligned_buf, buf, len);
2436 /* push data from aligned buffer into fifo */
2437 for (i = 0; i < items; ++i)
2438 mci_writel(host, DATA(host->data_offset),
2445 for (; cnt >= 4; cnt -= 4)
2446 mci_writel(host, DATA(host->data_offset), *pdata++);
2449 /* put anything remaining in the part_buf */
2451 dw_mci_set_part_bytes(host, buf, cnt);
2452 /* Push data if we have reached the expected data length */
2453 if ((data->bytes_xfered + init_cnt) ==
2454 (data->blksz * data->blocks))
2455 mci_writel(host, DATA(host->data_offset),
2460 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2462 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2463 if (unlikely((unsigned long)buf & 0x3)) {
2465 /* pull data from fifo into aligned buffer */
2466 u32 aligned_buf[32];
2467 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2468 int items = len >> 2;
2470 for (i = 0; i < items; ++i)
2471 aligned_buf[i] = mci_readl(host,
2472 DATA(host->data_offset));
2473 /* memcpy from aligned buffer into output buffer */
2474 memcpy(buf, aligned_buf, len);
2482 for (; cnt >= 4; cnt -= 4)
2483 *pdata++ = mci_readl(host, DATA(host->data_offset));
2487 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2488 dw_mci_pull_final_bytes(host, buf, cnt);
2492 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2494 struct mmc_data *data = host->data;
2497 /* try and push anything in the part_buf */
2498 if (unlikely(host->part_buf_count)) {
2499 int len = dw_mci_push_part_bytes(host, buf, cnt);
2503 if (host->part_buf_count == 8) {
2504 mci_writeq(host, DATA(host->data_offset),
2506 host->part_buf_count = 0;
2509 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2510 if (unlikely((unsigned long)buf & 0x7)) {
2512 u64 aligned_buf[16];
2513 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2514 int items = len >> 3;
2516 /* memcpy from input buffer into aligned buffer */
2517 memcpy(aligned_buf, buf, len);
2520 /* push data from aligned buffer into fifo */
2521 for (i = 0; i < items; ++i)
2522 mci_writeq(host, DATA(host->data_offset),
2529 for (; cnt >= 8; cnt -= 8)
2530 mci_writeq(host, DATA(host->data_offset), *pdata++);
2533 /* put anything remaining in the part_buf */
2535 dw_mci_set_part_bytes(host, buf, cnt);
2536 /* Push data if we have reached the expected data length */
2537 if ((data->bytes_xfered + init_cnt) ==
2538 (data->blksz * data->blocks))
2539 mci_writeq(host, DATA(host->data_offset),
2544 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2546 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2547 if (unlikely((unsigned long)buf & 0x7)) {
2549 /* pull data from fifo into aligned buffer */
2550 u64 aligned_buf[16];
2551 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2552 int items = len >> 3;
2554 for (i = 0; i < items; ++i)
2555 aligned_buf[i] = mci_readq(host,
2556 DATA(host->data_offset));
2557 /* memcpy from aligned buffer into output buffer */
2558 memcpy(buf, aligned_buf, len);
2566 for (; cnt >= 8; cnt -= 8)
2567 *pdata++ = mci_readq(host, DATA(host->data_offset));
2571 host->part_buf = mci_readq(host, DATA(host->data_offset));
2572 dw_mci_pull_final_bytes(host, buf, cnt);
2576 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2580 /* get remaining partial bytes */
2581 len = dw_mci_pull_part_bytes(host, buf, cnt);
2582 if (unlikely(len == cnt))
2587 /* get the rest of the data */
2588 host->pull_data(host, buf, cnt);
2591 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2593 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2595 unsigned int offset;
2596 struct mmc_data *data = host->data;
2597 int shift = host->data_shift;
2600 unsigned int remain, fcnt;
2602 if(!host->mmc->bus_refs){
2603 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2607 if (!sg_miter_next(sg_miter))
2610 host->sg = sg_miter->piter.sg;
2611 buf = sg_miter->addr;
2612 remain = sg_miter->length;
2616 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2617 << shift) + host->part_buf_count;
2618 len = min(remain, fcnt);
2621 dw_mci_pull_data(host, (void *)(buf + offset), len);
2622 data->bytes_xfered += len;
2627 sg_miter->consumed = offset;
2628 status = mci_readl(host, MINTSTS);
2629 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2630 /* if the RXDR is ready read again */
2631 } while ((status & SDMMC_INT_RXDR) ||
2632 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2635 if (!sg_miter_next(sg_miter))
2637 sg_miter->consumed = 0;
2639 sg_miter_stop(sg_miter);
2643 sg_miter_stop(sg_miter);
2647 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2650 static void dw_mci_write_data_pio(struct dw_mci *host)
2652 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2654 unsigned int offset;
2655 struct mmc_data *data = host->data;
2656 int shift = host->data_shift;
2659 unsigned int fifo_depth = host->fifo_depth;
2660 unsigned int remain, fcnt;
2662 if(!host->mmc->bus_refs){
2663 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2668 if (!sg_miter_next(sg_miter))
2671 host->sg = sg_miter->piter.sg;
2672 buf = sg_miter->addr;
2673 remain = sg_miter->length;
2677 fcnt = ((fifo_depth -
2678 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2679 << shift) - host->part_buf_count;
2680 len = min(remain, fcnt);
2683 host->push_data(host, (void *)(buf + offset), len);
2684 data->bytes_xfered += len;
2689 sg_miter->consumed = offset;
2690 status = mci_readl(host, MINTSTS);
2691 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2692 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2695 if (!sg_miter_next(sg_miter))
2697 sg_miter->consumed = 0;
2699 sg_miter_stop(sg_miter);
2703 sg_miter_stop(sg_miter);
2707 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2710 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2714 if (!host->cmd_status)
2715 host->cmd_status = status;
2720 if((MMC_STOP_TRANSMISSION != host->cmd->opcode))
2723 multi = mci_readl(host, BYTCNT)/unit;
2724 multi += ((mci_readl(host, BYTCNT) % unit) ? 1 :0 );
2725 multi = (multi > 0) ? multi : 1;
2726 multi += (host->cmd->retries > 2)? 2 : host->cmd->retries;
2727 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4500 * multi));//max wait 8s larger
2732 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2733 tasklet_schedule(&host->tasklet);
2736 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2738 struct dw_mci *host = dev_id;
2739 u32 pending, sdio_int;
2742 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2745 * DTO fix - version 2.10a and below, and only if internal DMA
2748 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2750 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2751 pending |= SDMMC_INT_DATA_OVER;
2755 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2756 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2757 host->cmd_status = pending;
2759 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2760 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2762 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2765 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2766 /* if there is an error report DATA_ERROR */
2767 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2768 host->data_status = pending;
2770 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2772 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2773 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2774 tasklet_schedule(&host->tasklet);
2777 if (pending & SDMMC_INT_DATA_OVER) {
2778 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2779 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2780 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2781 if (!host->data_status)
2782 host->data_status = pending;
2784 if (host->dir_status == DW_MCI_RECV_STATUS) {
2785 if (host->sg != NULL)
2786 dw_mci_read_data_pio(host, true);
2788 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2789 tasklet_schedule(&host->tasklet);
2792 if (pending & SDMMC_INT_RXDR) {
2793 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2794 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2795 dw_mci_read_data_pio(host, false);
2798 if (pending & SDMMC_INT_TXDR) {
2799 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2800 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2801 dw_mci_write_data_pio(host);
2804 if (pending & SDMMC_INT_VSI) {
2805 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2806 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2807 dw_mci_cmd_interrupt(host, pending);
2810 if (pending & SDMMC_INT_CMD_DONE) {
2811 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2812 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2813 dw_mci_cmd_interrupt(host, pending);
2816 if (pending & SDMMC_INT_CD) {
2817 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2818 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2819 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2820 queue_work(host->card_workqueue, &host->card_work);
2823 if (pending & SDMMC_INT_HLE) {
2824 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2825 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2829 /* Handle SDIO Interrupts */
2830 for (i = 0; i < host->num_slots; i++) {
2831 struct dw_mci_slot *slot = host->slot[i];
2833 if (host->verid < DW_MMC_240A)
2834 sdio_int = SDMMC_INT_SDIO(i);
2836 sdio_int = SDMMC_INT_SDIO(i + 8);
2838 if (pending & sdio_int) {
2839 mci_writel(host, RINTSTS, sdio_int);
2840 mmc_signal_sdio_irq(slot->mmc);
2846 #ifdef CONFIG_MMC_DW_IDMAC
2847 /* External DMA Soc platform NOT need to ack interrupt IDSTS */
2848 if(!(cpu_is_rk3036() || cpu_is_rk312x())){
2849 /* Handle DMA interrupts */
2850 pending = mci_readl(host, IDSTS);
2851 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2852 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2853 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2854 host->dma_ops->complete((void *)host);
2862 static void dw_mci_work_routine_card(struct work_struct *work)
2864 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2867 for (i = 0; i < host->num_slots; i++) {
2868 struct dw_mci_slot *slot = host->slot[i];
2869 struct mmc_host *mmc = slot->mmc;
2870 struct mmc_request *mrq;
2873 present = dw_mci_get_cd(mmc);
2874 while (present != slot->last_detect_state) {
2875 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2876 present ? "inserted" : "removed");
2877 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2878 present ? "inserted" : "removed.", mmc_hostname(mmc));
2880 rk_send_wakeup_key();//wake up system
2881 spin_lock_bh(&host->lock);
2883 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2884 /* Card change detected */
2885 slot->last_detect_state = present;
2887 /* Clean up queue if present */
2890 if (mrq == host->mrq) {
2894 switch (host->state) {
2897 case STATE_SENDING_CMD:
2898 mrq->cmd->error = -ENOMEDIUM;
2902 case STATE_SENDING_DATA:
2903 mrq->data->error = -ENOMEDIUM;
2904 dw_mci_stop_dma(host);
2906 case STATE_DATA_BUSY:
2907 case STATE_DATA_ERROR:
2908 if (mrq->data->error == -EINPROGRESS)
2909 mrq->data->error = -ENOMEDIUM;
2913 case STATE_SENDING_STOP:
2914 mrq->stop->error = -ENOMEDIUM;
2918 dw_mci_request_end(host, mrq);
2920 list_del(&slot->queue_node);
2921 mrq->cmd->error = -ENOMEDIUM;
2923 mrq->data->error = -ENOMEDIUM;
2925 mrq->stop->error = -ENOMEDIUM;
2927 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
2928 mrq->cmd->opcode, mmc_hostname(mmc));
2930 spin_unlock(&host->lock);
2931 mmc_request_done(slot->mmc, mrq);
2932 spin_lock(&host->lock);
2936 /* Power down slot */
2938 /* Clear down the FIFO */
2939 dw_mci_fifo_reset(host);
2940 #ifdef CONFIG_MMC_DW_IDMAC
2941 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
2942 dw_mci_idmac_reset(host);
2947 spin_unlock_bh(&host->lock);
2949 present = dw_mci_get_cd(mmc);
2952 mmc_detect_change(slot->mmc,
2953 msecs_to_jiffies(host->pdata->detect_delay_ms));
2958 /* given a slot id, find out the device node representing that slot */
2959 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2961 struct device_node *np;
2965 if (!dev || !dev->of_node)
2968 for_each_child_of_node(dev->of_node, np) {
2969 addr = of_get_property(np, "reg", &len);
2970 if (!addr || (len < sizeof(int)))
2972 if (be32_to_cpup(addr) == slot)
2978 static struct dw_mci_of_slot_quirks {
2981 } of_slot_quirks[] = {
2983 .quirk = "disable-wp",
2984 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2988 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2990 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2995 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2996 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2997 quirks |= of_slot_quirks[idx].id;
3002 /* find out bus-width for a given slot */
3003 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3005 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3011 if (of_property_read_u32(np, "bus-width", &bus_wd))
3012 dev_err(dev, "bus-width property not found, assuming width"
3018 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3019 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3021 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3027 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3029 /* Having a missing entry is valid; return silently */
3030 if (!gpio_is_valid(gpio))
3033 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3034 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3038 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3044 /* find the write protect gpio for a given slot; or -1 if none specified */
3045 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3047 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3053 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3055 /* Having a missing entry is valid; return silently */
3056 if (!gpio_is_valid(gpio))
3059 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3060 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3067 /* find the cd gpio for a given slot */
3068 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3069 struct mmc_host *mmc)
3071 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3077 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3079 /* Having a missing entry is valid; return silently */
3080 if (!gpio_is_valid(gpio))
3083 if (mmc_gpio_request_cd(mmc, gpio, 0))
3084 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3086 #else /* CONFIG_OF */
3087 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3091 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3095 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3099 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3103 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3104 struct mmc_host *mmc)
3108 #endif /* CONFIG_OF */
3110 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3112 struct mmc_host *mmc;
3113 struct dw_mci_slot *slot;
3114 const struct dw_mci_drv_data *drv_data = host->drv_data;
3119 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3123 slot = mmc_priv(mmc);
3127 host->slot[id] = slot;
3130 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3132 mmc->ops = &dw_mci_ops;
3134 if (of_property_read_u32_array(host->dev->of_node,
3135 "clock-freq-min-max", freq, 2)) {
3136 mmc->f_min = DW_MCI_FREQ_MIN;
3137 mmc->f_max = DW_MCI_FREQ_MAX;
3139 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
3140 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3142 mmc->f_min = freq[0];
3143 mmc->f_max = freq[1];
3145 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
3146 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3149 if(strstr("mmc0",mmc_hostname(mmc)))
3150 printk("Line%d..%s: The rk_sdmmc %s",__LINE__, __FUNCTION__,RK_SDMMC_DRIVER_VERSION);
3152 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3153 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3154 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3155 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3156 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3157 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3159 if (host->pdata->get_ocr)
3160 mmc->ocr_avail = host->pdata->get_ocr(id);
3163 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3164 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3165 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3166 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3170 * Start with slot power disabled, it will be enabled when a card
3173 if (host->pdata->setpower)
3174 host->pdata->setpower(id, 0);
3176 if (host->pdata->caps)
3177 mmc->caps = host->pdata->caps;
3179 if (host->pdata->pm_caps)
3180 mmc->pm_caps = host->pdata->pm_caps;
3182 if (host->dev->of_node) {
3183 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3187 ctrl_id = to_platform_device(host->dev)->id;
3189 if (drv_data && drv_data->caps)
3190 mmc->caps |= drv_data->caps[ctrl_id];
3191 if (drv_data && drv_data->hold_reg_flag)
3192 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3194 /* set the compatibility of driver. */
3195 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3196 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3198 if (host->pdata->caps2)
3199 mmc->caps2 = host->pdata->caps2;
3201 if (host->pdata->get_bus_wd)
3202 bus_width = host->pdata->get_bus_wd(slot->id);
3203 else if (host->dev->of_node)
3204 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3208 switch (bus_width) {
3210 mmc->caps |= MMC_CAP_8_BIT_DATA;
3212 mmc->caps |= MMC_CAP_4_BIT_DATA;
3215 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3216 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3217 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3218 mmc->caps |= MMC_CAP_SDIO_IRQ;
3219 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3220 mmc->caps |= MMC_CAP_HW_RESET;
3221 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3222 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3223 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3224 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3225 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3226 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3227 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3228 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3230 /*Assign pm_caps pass to pm_flags*/
3231 mmc->pm_flags = mmc->pm_caps;
3233 if (host->pdata->blk_settings) {
3234 mmc->max_segs = host->pdata->blk_settings->max_segs;
3235 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3236 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3237 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3238 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3240 /* Useful defaults if platform data is unset. */
3241 #ifdef CONFIG_MMC_DW_IDMAC
3242 mmc->max_segs = host->ring_size;
3243 mmc->max_blk_size = 65536;
3244 mmc->max_blk_count = host->ring_size;
3245 mmc->max_seg_size = 0x1000;
3246 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3247 if(cpu_is_rk3036() || cpu_is_rk312x()){
3248 /* fixup for external dmac setting */
3250 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3251 mmc->max_blk_count = 512;
3252 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3253 mmc->max_seg_size = mmc->max_req_size;
3257 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3258 mmc->max_blk_count = 512;
3259 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3260 mmc->max_seg_size = mmc->max_req_size;
3261 #endif /* CONFIG_MMC_DW_IDMAC */
3265 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3267 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3272 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3273 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3277 if (IS_ERR(host->vmmc)) {
3278 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3281 ret = regulator_enable(host->vmmc);
3284 "failed to enable regulator: %d\n", ret);
3291 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3293 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3294 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3296 ret = mmc_add_host(mmc);
3300 /* Pinctrl set default iomux state to fucntion port.
3301 * Fixme: DON'T TOUCH EMMC SETTING!
3303 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3305 host->pinctrl = devm_pinctrl_get(host->dev);
3306 if(IS_ERR(host->pinctrl)){
3307 printk("%s: Warning : No pinctrl used!\n",mmc_hostname(host->mmc));
3309 host->pins_idle= pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_IDLE);
3310 if(IS_ERR(host->pins_default)){
3311 printk("%s: Warning : No IDLE pinctrl matched!\n", mmc_hostname(host->mmc));
3315 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3316 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
3319 host->pins_default = pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_DEFAULT);
3320 if(IS_ERR(host->pins_default)){
3321 printk("%s: Warning : No default pinctrl matched!\n", mmc_hostname(host->mmc));
3325 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3326 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
3332 #if defined(CONFIG_DEBUG_FS)
3333 dw_mci_init_debugfs(slot);
3336 /* Card initially undetected */
3337 slot->last_detect_state = 1;
3346 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3348 /* Shutdown detect IRQ */
3349 if (slot->host->pdata->exit)
3350 slot->host->pdata->exit(id);
3352 /* Debugfs stuff is cleaned up by mmc core */
3353 mmc_remove_host(slot->mmc);
3354 slot->host->slot[id] = NULL;
3355 mmc_free_host(slot->mmc);
3358 static void dw_mci_init_dma(struct dw_mci *host)
3360 /* Alloc memory for sg translation */
3361 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3362 &host->sg_dma, GFP_KERNEL);
3363 if (!host->sg_cpu) {
3364 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3369 /* Determine which DMA interface to use */
3370 #if defined(CONFIG_MMC_DW_IDMAC)
3371 if(cpu_is_rk3036() || cpu_is_rk312x()){
3372 host->dma_ops = &dw_mci_edmac_ops;
3373 dev_info(host->dev, "Using external DMA controller.\n");
3375 host->dma_ops = &dw_mci_idmac_ops;
3376 dev_info(host->dev, "Using internal DMA controller.\n");
3383 if (host->dma_ops->init && host->dma_ops->start &&
3384 host->dma_ops->stop && host->dma_ops->cleanup) {
3385 if (host->dma_ops->init(host)) {
3386 dev_err(host->dev, "%s: Unable to initialize "
3387 "DMA Controller.\n", __func__);
3391 dev_err(host->dev, "DMA initialization not found.\n");
3399 dev_info(host->dev, "Using PIO mode.\n");
3404 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3406 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3409 ctrl = mci_readl(host, CTRL);
3411 mci_writel(host, CTRL, ctrl);
3413 /* wait till resets clear */
3415 ctrl = mci_readl(host, CTRL);
3416 if (!(ctrl & reset))
3418 } while (time_before(jiffies, timeout));
3421 "Timeout resetting block (ctrl reset %#x)\n",
3427 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3430 * Reseting generates a block interrupt, hence setting
3431 * the scatter-gather pointer to NULL.
3434 sg_miter_stop(&host->sg_miter);
3438 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3441 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3443 return dw_mci_ctrl_reset(host,
3444 SDMMC_CTRL_FIFO_RESET |
3446 SDMMC_CTRL_DMA_RESET);
3451 static struct dw_mci_of_quirks {
3456 .quirk = "broken-cd",
3457 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3461 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3463 struct dw_mci_board *pdata;
3464 struct device *dev = host->dev;
3465 struct device_node *np = dev->of_node;
3466 const struct dw_mci_drv_data *drv_data = host->drv_data;
3468 u32 clock_frequency;
3470 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3472 dev_err(dev, "could not allocate memory for pdata\n");
3473 return ERR_PTR(-ENOMEM);
3476 /* find out number of slots supported */
3477 if (of_property_read_u32(dev->of_node, "num-slots",
3478 &pdata->num_slots)) {
3479 dev_info(dev, "num-slots property not found, "
3480 "assuming 1 slot is available\n");
3481 pdata->num_slots = 1;
3485 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3486 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3487 pdata->quirks |= of_quirks[idx].id;
3490 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3491 dev_info(dev, "fifo-depth property not found, using "
3492 "value of FIFOTH register as default\n");
3494 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3496 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3497 pdata->bus_hz = clock_frequency;
3499 if (drv_data && drv_data->parse_dt) {
3500 ret = drv_data->parse_dt(host);
3502 return ERR_PTR(ret);
3505 if (of_find_property(np, "keep-power-in-suspend", NULL))
3506 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3508 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3509 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3511 if (of_find_property(np, "supports-highspeed", NULL))
3512 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3514 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3515 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3517 if (of_find_property(np, "supports-DDR_MODE", NULL))
3518 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3520 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3521 pdata->caps2 |= MMC_CAP2_HS200;
3523 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3524 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3526 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3527 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3529 if (of_get_property(np, "cd-inverted", NULL))
3530 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3531 if (of_get_property(np, "bootpart-no-access", NULL))
3532 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3537 #else /* CONFIG_OF */
3538 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3540 return ERR_PTR(-EINVAL);
3542 #endif /* CONFIG_OF */
3544 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3548 switch(host->state){
3551 case STATE_SENDING_DATA:
3552 case STATE_DATA_BUSY:
3553 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3554 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3555 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3556 host->state = STATE_DATA_BUSY;
3557 if (!dw_mci_ctrl_all_reset(host)) {
3562 /* NO requirement to reclaim slave chn using external dmac */
3563 #ifdef CONFIG_MMC_DW_IDMAC
3564 if(!(cpu_is_rk3036() || cpu_is_rk312x()))
3565 if (host->use_dma && host->dma_ops->init)
3566 host->dma_ops->init(host);
3570 * Restore the initial value at FIFOTH register
3571 * And Invalidate the prev_blksz with zero
3573 mci_writel(host, FIFOTH, host->fifoth_val);
3574 host->prev_blksz = 0;
3575 mci_writel(host, TMOUT, 0xFFFFFFFF);
3576 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3577 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3578 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3579 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3580 regs |= SDMMC_INT_CD;
3581 mci_writel(host, INTMASK, regs);
3582 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3583 for (i = 0; i < host->num_slots; i++) {
3584 struct dw_mci_slot *slot = host->slot[i];
3587 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3588 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3589 dw_mci_setup_bus(slot, true);
3592 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3593 tasklet_schedule(&host->tasklet);
3599 static void dw_mci_dto_timeout(unsigned long host_data)
3601 struct dw_mci *host = (struct dw_mci *) host_data;
3603 disable_irq(host->irq);
3605 host->data_status = SDMMC_INT_EBE;
3606 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3607 dw_mci_dealwith_timeout(host);
3609 enable_irq(host->irq);
3611 int dw_mci_probe(struct dw_mci *host)
3613 const struct dw_mci_drv_data *drv_data = host->drv_data;
3614 int width, i, ret = 0;
3620 host->pdata = dw_mci_parse_dt(host);
3621 if (IS_ERR(host->pdata)) {
3622 dev_err(host->dev, "platform data not available\n");
3627 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3629 "Platform data must supply select_slot function\n");
3634 * In 2.40a spec, Data offset is changed.
3635 * Need to check the version-id and set data-offset for DATA register.
3637 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3638 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3640 if (host->verid < DW_MMC_240A)
3641 host->data_offset = DATA_OFFSET;
3643 host->data_offset = DATA_240A_OFFSET;
3646 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3647 if (IS_ERR(host->hclk_mmc)) {
3648 dev_err(host->dev, "failed to get hclk_mmc\n");
3649 ret = PTR_ERR(host->hclk_mmc);
3653 clk_prepare_enable(host->hclk_mmc);
3656 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3657 if (IS_ERR(host->clk_mmc)) {
3658 dev_err(host->dev, "failed to get clk mmc_per\n");
3659 ret = PTR_ERR(host->clk_mmc);
3663 host->bus_hz = host->pdata->bus_hz;
3664 if (!host->bus_hz) {
3665 dev_err(host->dev,"Platform data must supply bus speed\n");
3670 if (host->verid < DW_MMC_240A)
3671 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3673 //rockchip: fix divider 2 in clksum before controlller
3674 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3677 dev_err(host->dev, "failed to set clk mmc\n");
3680 clk_prepare_enable(host->clk_mmc);
3682 if (drv_data && drv_data->setup_clock) {
3683 ret = drv_data->setup_clock(host);
3686 "implementation specific clock setup failed\n");
3691 host->quirks = host->pdata->quirks;
3692 host->irq_state = true;
3693 host->set_speed = 0;
3695 host->svi_flags = 0;
3697 spin_lock_init(&host->lock);
3698 INIT_LIST_HEAD(&host->queue);
3701 * Get the host data width - this assumes that HCON has been set with
3702 * the correct values.
3704 i = (mci_readl(host, HCON) >> 7) & 0x7;
3706 host->push_data = dw_mci_push_data16;
3707 host->pull_data = dw_mci_pull_data16;
3709 host->data_shift = 1;
3710 } else if (i == 2) {
3711 host->push_data = dw_mci_push_data64;
3712 host->pull_data = dw_mci_pull_data64;
3714 host->data_shift = 3;
3716 /* Check for a reserved value, and warn if it is */
3718 "HCON reports a reserved host data width!\n"
3719 "Defaulting to 32-bit access.\n");
3720 host->push_data = dw_mci_push_data32;
3721 host->pull_data = dw_mci_pull_data32;
3723 host->data_shift = 2;
3726 /* Reset all blocks */
3727 if (!dw_mci_ctrl_all_reset(host))
3730 host->dma_ops = host->pdata->dma_ops;
3731 dw_mci_init_dma(host);
3733 /* Clear the interrupts for the host controller */
3734 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3735 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3737 /* Put in max timeout */
3738 mci_writel(host, TMOUT, 0xFFFFFFFF);
3741 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3742 * Tx Mark = fifo_size / 2 DMA Size = 8
3744 if (!host->pdata->fifo_depth) {
3746 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3747 * have been overwritten by the bootloader, just like we're
3748 * about to do, so if you know the value for your hardware, you
3749 * should put it in the platform data.
3751 fifo_size = mci_readl(host, FIFOTH);
3752 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3754 fifo_size = host->pdata->fifo_depth;
3756 host->fifo_depth = fifo_size;
3758 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3759 mci_writel(host, FIFOTH, host->fifoth_val);
3761 /* disable clock to CIU */
3762 mci_writel(host, CLKENA, 0);
3763 mci_writel(host, CLKSRC, 0);
3765 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3766 host->card_workqueue = alloc_workqueue("dw-mci-card",
3767 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3768 if (!host->card_workqueue) {
3772 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3773 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3774 host->irq_flags, "dw-mci", host);
3778 if (host->pdata->num_slots)
3779 host->num_slots = host->pdata->num_slots;
3781 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
3783 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
3784 /* We need at least one slot to succeed */
3785 for (i = 0; i < host->num_slots; i++) {
3786 ret = dw_mci_init_slot(host, i);
3788 dev_dbg(host->dev, "slot %d init failed\n", i);
3794 * Enable interrupts for command done, data over, data empty, card det,
3795 * receive ready and error such as transmit, receive timeout, crc error
3797 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3798 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
3799 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
3800 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3801 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3802 regs |= SDMMC_INT_CD;
3804 mci_writel(host, INTMASK, regs);
3806 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
3808 dev_info(host->dev, "DW MMC controller at irq %d, "
3809 "%d bit host data width, "
3811 host->irq, width, fifo_size);
3814 dev_info(host->dev, "%d slots initialized\n", init_slots);
3816 dev_dbg(host->dev, "attempted to initialize %d slots, "
3817 "but failed on all\n", host->num_slots);
3822 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
3823 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
3828 destroy_workqueue(host->card_workqueue);
3831 if (host->use_dma && host->dma_ops->exit)
3832 host->dma_ops->exit(host);
3835 regulator_disable(host->vmmc);
3836 regulator_put(host->vmmc);
3840 if (!IS_ERR(host->clk_mmc))
3841 clk_disable_unprepare(host->clk_mmc);
3843 if (!IS_ERR(host->hclk_mmc))
3844 clk_disable_unprepare(host->hclk_mmc);
3848 EXPORT_SYMBOL(dw_mci_probe);
3850 void dw_mci_remove(struct dw_mci *host)
3853 del_timer_sync(&host->dto_timer);
3855 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3856 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3858 for(i = 0; i < host->num_slots; i++){
3859 dev_dbg(host->dev, "remove slot %d\n", i);
3861 dw_mci_cleanup_slot(host->slot[i], i);
3864 /* disable clock to CIU */
3865 mci_writel(host, CLKENA, 0);
3866 mci_writel(host, CLKSRC, 0);
3868 destroy_workqueue(host->card_workqueue);
3870 if(host->use_dma && host->dma_ops->exit)
3871 host->dma_ops->exit(host);
3874 regulator_disable(host->vmmc);
3875 regulator_put(host->vmmc);
3877 if(!IS_ERR(host->clk_mmc))
3878 clk_disable_unprepare(host->clk_mmc);
3880 if(!IS_ERR(host->hclk_mmc))
3881 clk_disable_unprepare(host->hclk_mmc);
3883 EXPORT_SYMBOL(dw_mci_remove);
3887 #ifdef CONFIG_PM_SLEEP
3889 * TODO: we should probably disable the clock to the card in the suspend path.
3891 int dw_mci_suspend(struct dw_mci *host)
3894 regulator_disable(host->vmmc);
3896 if(host->use_dma && host->dma_ops->exit)
3897 host->dma_ops->exit(host);
3899 /*only for sdmmc controller*/
3900 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD){
3901 host->mmc->rescan_disable = 1;
3902 if(cancel_delayed_work_sync(&host->mmc->detect))
3903 wake_unlock(&host->mmc->detect_wake_lock);
3905 disable_irq(host->irq);
3906 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3907 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
3908 mmc_hostname(host->mmc));
3909 dw_mci_of_get_cd_gpio(host->dev,0,host->mmc);
3910 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3911 mci_writel(host, INTMASK, 0x00);
3912 mci_writel(host, CTRL, 0x00);
3913 enable_irq_wake(host->mmc->slot.cd_irq);
3917 EXPORT_SYMBOL(dw_mci_suspend);
3919 int dw_mci_resume(struct dw_mci *host)
3921 int i, ret, retry_cnt = 0;
3923 struct dw_mci_slot *slot;
3925 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO){
3926 slot = mmc_priv(host->mmc);
3928 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
3931 /*only for sdmmc controller*/
3932 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3933 disable_irq_wake(host->mmc->slot.cd_irq);
3934 mmc_gpio_free_cd(host->mmc);
3935 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3936 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
3937 mmc_hostname(host->mmc));
3938 host->mmc->rescan_disable = 0;
3941 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
3943 else if(cpu_is_rk3036())
3944 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
3945 else if(cpu_is_rk3126())
3952 ret = regulator_enable(host->vmmc);
3955 "failed to enable regulator: %d\n", ret);
3960 if(!dw_mci_ctrl_all_reset(host)){
3965 if(host->use_dma && host->dma_ops->init)
3966 host->dma_ops->init(host);
3969 * Restore the initial value at FIFOTH register
3970 * And Invalidate the prev_blksz with zero
3972 mci_writel(host, FIFOTH, host->fifoth_val);
3973 host->prev_blksz = 0;
3974 /* Put in max timeout */
3975 mci_writel(host, TMOUT, 0xFFFFFFFF);
3977 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3978 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
3980 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3981 regs |= SDMMC_INT_CD;
3982 mci_writel(host, INTMASK, regs);
3983 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3984 /*only for sdmmc controller*/
3985 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
3986 enable_irq(host->irq);
3989 for(i = 0; i < host->num_slots; i++){
3990 struct dw_mci_slot *slot = host->slot[i];
3993 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
3994 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3995 dw_mci_setup_bus(slot, true);
4001 EXPORT_SYMBOL(dw_mci_resume);
4002 #endif /* CONFIG_PM_SLEEP */
4004 static int __init dw_mci_init(void)
4006 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4010 static void __exit dw_mci_exit(void)
4014 module_init(dw_mci_init);
4015 module_exit(dw_mci_exit);
4017 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4018 MODULE_AUTHOR("NXP Semiconductor VietNam");
4019 MODULE_AUTHOR("Imagination Technologies Ltd");
4020 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4021 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
4022 MODULE_LICENSE("GPL v2");