2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/err.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ioport.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/stat.h>
31 #include <linux/delay.h>
32 #include <linux/irq.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/mmc.h>
35 #include <linux/mmc/sd.h>
36 #include <linux/mmc/sdio.h>
37 #include <linux/mmc/rk_mmc.h>
38 #include <linux/bitops.h>
39 #include <linux/regulator/consumer.h>
40 #include <linux/workqueue.h>
42 #include <linux/of_gpio.h>
43 #include <linux/mmc/slot-gpio.h>
44 #include <linux/clk-private.h>
45 #include <linux/rockchip/cpu.h>
48 #include "rk_sdmmc_dbg.h"
49 #include <linux/regulator/rockchip_io_vol_domain.h>
50 #include "../../clk/rockchip/clk-ops.h"
52 #define RK_SDMMC_DRIVER_VERSION "Ver 1.12 2014-07-08"
54 /* Common flag combinations */
55 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
56 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
58 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
60 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
61 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
62 #define DW_MCI_SEND_STATUS 1
63 #define DW_MCI_RECV_STATUS 2
64 #define DW_MCI_DMA_THRESHOLD 16
66 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
67 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
69 /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
70 #define SDMMC_DATA_TIMEOUT_SD 500
71 #define SDMMC_DATA_TIMEOUT_SDIO 250
72 #define SDMMC_DATA_TIMEOUT_EMMC 2500
74 #define SDMMC_CMD_RTO_MAX_HOLD 200
75 #define SDMMC_WAIT_FOR_UNBUSY 2500
77 #ifdef CONFIG_MMC_DW_IDMAC
78 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
79 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
80 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
84 u32 des0; /* Control Descriptor */
85 #define IDMAC_DES0_DIC BIT(1)
86 #define IDMAC_DES0_LD BIT(2)
87 #define IDMAC_DES0_FD BIT(3)
88 #define IDMAC_DES0_CH BIT(4)
89 #define IDMAC_DES0_ER BIT(5)
90 #define IDMAC_DES0_CES BIT(30)
91 #define IDMAC_DES0_OWN BIT(31)
93 u32 des1; /* Buffer sizes */
94 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
95 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
97 u32 des2; /* buffer 1 physical address */
99 u32 des3; /* buffer 2 physical address */
101 #endif /* CONFIG_MMC_DW_IDMAC */
103 static const u8 tuning_blk_pattern_4bit[] = {
104 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
105 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
106 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
107 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
108 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
109 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
110 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
111 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
114 static const u8 tuning_blk_pattern_8bit[] = {
115 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
116 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
117 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
118 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
119 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
120 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
121 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
122 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
123 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
124 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
125 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
126 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
127 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
128 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
129 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
130 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
133 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
134 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
135 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
136 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
138 /*printk the all register of current host*/
140 static int dw_mci_regs_printk(struct dw_mci *host)
142 struct sdmmc_reg *regs = dw_mci_regs;
144 while( regs->name != 0 ){
145 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
148 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
153 #if defined(CONFIG_DEBUG_FS)
154 static int dw_mci_req_show(struct seq_file *s, void *v)
156 struct dw_mci_slot *slot = s->private;
157 struct mmc_request *mrq;
158 struct mmc_command *cmd;
159 struct mmc_command *stop;
160 struct mmc_data *data;
162 /* Make sure we get a consistent snapshot */
163 spin_lock_bh(&slot->host->lock);
173 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
174 cmd->opcode, cmd->arg, cmd->flags,
175 cmd->resp[0], cmd->resp[1], cmd->resp[2],
176 cmd->resp[2], cmd->error);
178 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
179 data->bytes_xfered, data->blocks,
180 data->blksz, data->flags, data->error);
183 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
184 stop->opcode, stop->arg, stop->flags,
185 stop->resp[0], stop->resp[1], stop->resp[2],
186 stop->resp[2], stop->error);
189 spin_unlock_bh(&slot->host->lock);
194 static int dw_mci_req_open(struct inode *inode, struct file *file)
196 return single_open(file, dw_mci_req_show, inode->i_private);
199 static const struct file_operations dw_mci_req_fops = {
200 .owner = THIS_MODULE,
201 .open = dw_mci_req_open,
204 .release = single_release,
207 static int dw_mci_regs_show(struct seq_file *s, void *v)
209 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
210 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
211 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
212 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
213 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
214 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
219 static int dw_mci_regs_open(struct inode *inode, struct file *file)
221 return single_open(file, dw_mci_regs_show, inode->i_private);
224 static const struct file_operations dw_mci_regs_fops = {
225 .owner = THIS_MODULE,
226 .open = dw_mci_regs_open,
229 .release = single_release,
232 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
234 struct mmc_host *mmc = slot->mmc;
235 struct dw_mci *host = slot->host;
239 root = mmc->debugfs_root;
243 node = debugfs_create_file("regs", S_IRUSR, root, host,
248 node = debugfs_create_file("req", S_IRUSR, root, slot,
253 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
257 node = debugfs_create_x32("pending_events", S_IRUSR, root,
258 (u32 *)&host->pending_events);
262 node = debugfs_create_x32("completed_events", S_IRUSR, root,
263 (u32 *)&host->completed_events);
270 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
272 #endif /* defined(CONFIG_DEBUG_FS) */
274 static void dw_mci_set_timeout(struct dw_mci *host)
276 /* timeout (maximum) */
277 mci_writel(host, TMOUT, 0xffffffff);
280 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
282 struct mmc_data *data;
283 struct dw_mci_slot *slot = mmc_priv(mmc);
284 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
286 cmd->error = -EINPROGRESS;
290 if (cmdr == MMC_STOP_TRANSMISSION)
291 cmdr |= SDMMC_CMD_STOP;
293 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
295 if (cmd->flags & MMC_RSP_PRESENT) {
296 /* We expect a response, so set this bit */
297 cmdr |= SDMMC_CMD_RESP_EXP;
298 if (cmd->flags & MMC_RSP_136)
299 cmdr |= SDMMC_CMD_RESP_LONG;
302 if (cmd->flags & MMC_RSP_CRC)
303 cmdr |= SDMMC_CMD_RESP_CRC;
307 cmdr |= SDMMC_CMD_DAT_EXP;
308 if (data->flags & MMC_DATA_STREAM)
309 cmdr |= SDMMC_CMD_STRM_MODE;
310 if (data->flags & MMC_DATA_WRITE)
311 cmdr |= SDMMC_CMD_DAT_WR;
314 if (drv_data && drv_data->prepare_command)
315 drv_data->prepare_command(slot->host, &cmdr);
321 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
323 struct mmc_command *stop;
329 stop = &host->stop_abort;
331 memset(stop, 0, sizeof(struct mmc_command));
333 if (cmdr == MMC_READ_SINGLE_BLOCK ||
334 cmdr == MMC_READ_MULTIPLE_BLOCK ||
335 cmdr == MMC_WRITE_BLOCK ||
336 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
337 stop->opcode = MMC_STOP_TRANSMISSION;
339 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
340 } else if (cmdr == SD_IO_RW_EXTENDED) {
341 stop->opcode = SD_IO_RW_DIRECT;
342 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
343 ((cmd->arg >> 28) & 0x7);
344 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
349 cmdr = stop->opcode | SDMMC_CMD_STOP |
350 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
355 static void dw_mci_start_command(struct dw_mci *host,
356 struct mmc_command *cmd, u32 cmd_flags)
358 struct dw_mci_slot *slot = host->slot[0];
359 /*temporality fix slot[0] due to host->num_slots equal to 1*/
361 host->pre_cmd = host->cmd;
364 "start command: ARGR=0x%08x CMDR=0x%08x\n",
365 cmd->arg, cmd_flags);
367 if(SD_SWITCH_VOLTAGE == cmd->opcode){
368 /*confirm non-low-power mode*/
369 mci_writel(host, CMDARG, 0);
370 dw_mci_disable_low_power(slot);
372 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
373 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
375 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
378 mci_writel(host, CMDARG, cmd->arg);
381 /* fix the value to 1 in some Soc,for example RK3188. */
382 if(host->mmc->hold_reg_flag)
383 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
385 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
389 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
391 dw_mci_start_command(host, data->stop, host->stop_cmdr);
394 /* DMA interface functions */
395 static void dw_mci_stop_dma(struct dw_mci *host)
397 if (host->using_dma) {
398 host->dma_ops->stop(host);
399 host->dma_ops->cleanup(host);
402 /* Data transfer was stopped by the interrupt handler */
403 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
406 static int dw_mci_get_dma_dir(struct mmc_data *data)
408 if (data->flags & MMC_DATA_WRITE)
409 return DMA_TO_DEVICE;
411 return DMA_FROM_DEVICE;
414 #ifdef CONFIG_MMC_DW_IDMAC
415 static void dw_mci_dma_cleanup(struct dw_mci *host)
417 struct mmc_data *data = host->data;
420 if (!data->host_cookie)
421 dma_unmap_sg(host->dev,
424 dw_mci_get_dma_dir(data));
427 static void dw_mci_idmac_reset(struct dw_mci *host)
429 u32 bmod = mci_readl(host, BMOD);
430 /* Software reset of DMA */
431 bmod |= SDMMC_IDMAC_SWRESET;
432 mci_writel(host, BMOD, bmod);
435 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
439 /* Disable and reset the IDMAC interface */
440 temp = mci_readl(host, CTRL);
441 temp &= ~SDMMC_CTRL_USE_IDMAC;
442 temp |= SDMMC_CTRL_DMA_RESET;
443 mci_writel(host, CTRL, temp);
445 /* Stop the IDMAC running */
446 temp = mci_readl(host, BMOD);
447 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
448 temp |= SDMMC_IDMAC_SWRESET;
449 mci_writel(host, BMOD, temp);
452 static void dw_mci_idmac_complete_dma(void *arg)
454 struct dw_mci *host = arg;
455 struct mmc_data *data = host->data;
457 dev_vdbg(host->dev, "DMA complete\n");
460 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
461 host->mrq->cmd->opcode,host->mrq->cmd->arg,
462 data->blocks,data->blksz,mmc_hostname(host->mmc));
465 host->dma_ops->cleanup(host);
468 * If the card was removed, data will be NULL. No point in trying to
469 * send the stop command or waiting for NBUSY in this case.
472 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
473 tasklet_schedule(&host->tasklet);
477 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
481 struct idmac_desc *desc = host->sg_cpu;
483 for (i = 0; i < sg_len; i++, desc++) {
484 unsigned int length = sg_dma_len(&data->sg[i]);
485 u32 mem_addr = sg_dma_address(&data->sg[i]);
487 /* Set the OWN bit and disable interrupts for this descriptor */
488 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
491 IDMAC_SET_BUFFER1_SIZE(desc, length);
493 /* Physical address to DMA to/from */
494 desc->des2 = mem_addr;
497 /* Set first descriptor */
499 desc->des0 |= IDMAC_DES0_FD;
501 /* Set last descriptor */
502 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
503 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
504 desc->des0 |= IDMAC_DES0_LD;
509 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
513 dw_mci_translate_sglist(host, host->data, sg_len);
515 /* Select IDMAC interface */
516 temp = mci_readl(host, CTRL);
517 temp |= SDMMC_CTRL_USE_IDMAC;
518 mci_writel(host, CTRL, temp);
522 /* Enable the IDMAC */
523 temp = mci_readl(host, BMOD);
524 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
525 mci_writel(host, BMOD, temp);
527 /* Start it running */
528 mci_writel(host, PLDMND, 1);
531 static int dw_mci_idmac_init(struct dw_mci *host)
533 struct idmac_desc *p;
536 /* Number of descriptors in the ring buffer */
537 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
539 /* Forward link the descriptor list */
540 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
541 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
543 /* Set the last descriptor as the end-of-ring descriptor */
544 p->des3 = host->sg_dma;
545 p->des0 = IDMAC_DES0_ER;
547 dw_mci_idmac_reset(host);
549 /* Mask out interrupts - get Tx & Rx complete only */
550 mci_writel(host, IDSTS, IDMAC_INT_CLR);
551 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
554 /* Set the descriptor base address */
555 mci_writel(host, DBADDR, host->sg_dma);
559 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
560 .init = dw_mci_idmac_init,
561 .start = dw_mci_idmac_start_dma,
562 .stop = dw_mci_idmac_stop_dma,
563 .complete = dw_mci_idmac_complete_dma,
564 .cleanup = dw_mci_dma_cleanup,
566 #endif /* CONFIG_MMC_DW_IDMAC */
568 #ifdef CONFIG_MMC_DW_EDMAC
569 static void dw_mci_edma_cleanup(struct dw_mci *host)
571 struct mmc_data *data = host->data;
574 if (!data->host_cookie)
575 dma_unmap_sg(host->dev,
576 data->sg, data->sg_len,
577 dw_mci_get_dma_dir(data));
580 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
582 dmaengine_terminate_all(host->dms->ch);
585 static void dw_mci_edmac_complete_dma(void *arg)
587 struct dw_mci *host = arg;
588 struct mmc_data *data = host->data;
590 dev_vdbg(host->dev, "DMA complete\n");
593 if(data->flags & MMC_DATA_READ)
594 /* Invalidate cache after read */
595 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
596 data->sg_len, DMA_FROM_DEVICE);
598 host->dma_ops->cleanup(host);
601 * If the card was removed, data will be NULL. No point in trying to
602 * send the stop command or waiting for NBUSY in this case.
605 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
606 tasklet_schedule(&host->tasklet);
610 static void dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
612 struct dma_slave_config slave_config;
613 struct dma_async_tx_descriptor *desc = NULL;
614 struct scatterlist *sgl = host->data->sg;
615 u32 sg_elems = host->data->sg_len;
618 /* Set external dma config: burst size, burst width*/
619 slave_config.dst_addr = (dma_addr_t)(host->phy_regs + host->data_offset);
620 slave_config.src_addr = slave_config.dst_addr;
621 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
622 slave_config.src_addr_width = slave_config.dst_addr_width;
624 /* Match FIFO dma burst MSIZE with external dma config*/
625 slave_config.dst_maxburst = ((host->fifoth_val) >> 28) && 0x7;
626 slave_config.src_maxburst = slave_config.dst_maxburst;
628 if(host->data->flags & MMC_DATA_WRITE){
629 slave_config.direction = DMA_MEM_TO_DEV;
630 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
632 dev_err(host->dev, "error in dw_mci edma configuration.\n");
636 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
637 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
639 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
642 /* Set dw_mci_edmac_complete_dma as callback */
643 desc->callback = dw_mci_edmac_complete_dma;
644 desc->callback_param = (void *)host;
645 dmaengine_submit(desc);
647 /* Flush cache before write */
648 dma_sync_sg_for_device(mmc_dev(host->mmc), sgl,
649 sg_elems, DMA_TO_DEVICE);
650 dma_async_issue_pending(host->dms->ch);
653 slave_config.direction = DMA_DEV_TO_MEM;
654 ret = dmaengine_slave_config(host->dms->ch, &slave_config);
656 dev_err(host->dev, "error in dw_mci edma configuration.\n");
659 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl, sg_len,
660 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662 dev_err(host->dev, "We cannot prepare for the dw_mci slave edma!\n");
665 /* set dw_mci_edmac_complete_dma as callback */
666 desc->callback = dw_mci_edmac_complete_dma;
667 desc->callback_param = (void *)host;
668 dmaengine_submit(desc);
669 dma_async_issue_pending(host->dms->ch);
673 static int dw_mci_edmac_init(struct dw_mci *host)
675 MMC_DBG_BOOT_FUNC(host->mmc,"dw_mci_edmac_init: Soc is 0x%x [%s]\n",
676 (unsigned int)(rockchip_soc_id & ROCKCHIP_CPU_MASK), mmc_hostname(host->mmc));
678 /* 1) request external dma channel, SHOULD decide chn in dts */
679 host->dms = (struct dw_mci_dma_slave *)kmalloc(sizeof(struct dw_mci_dma_slave),GFP_KERNEL);
680 host->dms->ch = dma_request_slave_channel(host->dev, "dw_mci");
682 dev_err(host->dev, "Failed to get external DMA channel: channel id = %d\n",
683 host->dms->ch->chan_id);
696 static void dw_mci_edmac_exit(struct dw_mci *host)
698 dma_release_channel(host->dms->ch);
701 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
702 .init = dw_mci_edmac_init,
703 .exit = dw_mci_edmac_exit,
704 .start = dw_mci_edmac_start_dma,
705 .stop = dw_mci_edmac_stop_dma,
706 .complete = dw_mci_edmac_complete_dma,
707 .cleanup = dw_mci_edma_cleanup,
710 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
711 struct mmc_data *data,
714 struct scatterlist *sg;
715 unsigned int i, sg_len;
717 if (!next && data->host_cookie)
718 return data->host_cookie;
721 * We don't do DMA on "complex" transfers, i.e. with
722 * non-word-aligned buffers or lengths. Also, we don't bother
723 * with all the DMA setup overhead for short transfers.
725 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
731 for_each_sg(data->sg, sg, data->sg_len, i) {
732 if (sg->offset & 3 || sg->length & 3)
736 sg_len = dma_map_sg(host->dev,
739 dw_mci_get_dma_dir(data));
744 data->host_cookie = sg_len;
749 static void dw_mci_pre_req(struct mmc_host *mmc,
750 struct mmc_request *mrq,
753 struct dw_mci_slot *slot = mmc_priv(mmc);
754 struct mmc_data *data = mrq->data;
756 if (!slot->host->use_dma || !data)
759 if (data->host_cookie) {
760 data->host_cookie = 0;
764 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
765 data->host_cookie = 0;
768 static void dw_mci_post_req(struct mmc_host *mmc,
769 struct mmc_request *mrq,
772 struct dw_mci_slot *slot = mmc_priv(mmc);
773 struct mmc_data *data = mrq->data;
775 if (!slot->host->use_dma || !data)
778 if (data->host_cookie)
779 dma_unmap_sg(slot->host->dev,
782 dw_mci_get_dma_dir(data));
783 data->host_cookie = 0;
786 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
788 #if defined(CONFIG_MMC_DW_IDMAC) || defined(CONFIG_MMC_DW_EDMAC)
789 unsigned int blksz = data->blksz;
790 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
791 u32 fifo_width = 1 << host->data_shift;
792 u32 blksz_depth = blksz / fifo_width, fifoth_val;
793 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
794 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
796 tx_wmark = (host->fifo_depth) / 2;
797 tx_wmark_invers = host->fifo_depth - tx_wmark;
801 * if blksz is not a multiple of the FIFO width
803 if (blksz % fifo_width) {
810 if (!((blksz_depth % mszs[idx]) ||
811 (tx_wmark_invers % mszs[idx]))) {
813 rx_wmark = mszs[idx] - 1;
818 * If idx is '0', it won't be tried
819 * Thus, initial values are uesed
822 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
823 mci_writel(host, FIFOTH, fifoth_val);
827 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
829 unsigned int blksz = data->blksz;
830 u32 blksz_depth, fifo_depth;
833 WARN_ON(!(data->flags & MMC_DATA_READ));
835 if (host->timing != MMC_TIMING_MMC_HS200 &&
836 host->timing != MMC_TIMING_UHS_SDR104)
839 blksz_depth = blksz / (1 << host->data_shift);
840 fifo_depth = host->fifo_depth;
842 if (blksz_depth > fifo_depth)
846 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
847 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
848 * Currently just choose blksz.
851 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
855 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
858 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
865 /* If we don't have a channel, we can't do DMA */
869 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
871 host->dma_ops->stop(host);
878 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
879 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
883 * Decide the MSIZE and RX/TX Watermark.
884 * If current block size is same with previous size,
885 * no need to update fifoth.
887 if (host->prev_blksz != data->blksz)
888 dw_mci_adjust_fifoth(host, data);
891 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
893 /* Enable the DMA interface */
894 temp = mci_readl(host, CTRL);
895 temp |= SDMMC_CTRL_DMA_ENABLE;
896 mci_writel(host, CTRL, temp);
898 /* Disable RX/TX IRQs, let DMA handle it */
899 temp = mci_readl(host, INTMASK);
900 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
901 mci_writel(host, INTMASK, temp);
903 host->dma_ops->start(host, sg_len);
908 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
912 data->error = -EINPROGRESS;
919 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
921 if (data->flags & MMC_DATA_READ) {
922 host->dir_status = DW_MCI_RECV_STATUS;
923 dw_mci_ctrl_rd_thld(host, data);
925 host->dir_status = DW_MCI_SEND_STATUS;
928 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
929 data->blocks, data->blksz, mmc_hostname(host->mmc));
931 if (dw_mci_submit_data_dma(host, data)) {
932 int flags = SG_MITER_ATOMIC;
933 if (host->data->flags & MMC_DATA_READ)
934 flags |= SG_MITER_TO_SG;
936 flags |= SG_MITER_FROM_SG;
938 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
940 host->part_buf_start = 0;
941 host->part_buf_count = 0;
943 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
944 temp = mci_readl(host, INTMASK);
945 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
946 mci_writel(host, INTMASK, temp);
948 temp = mci_readl(host, CTRL);
949 temp &= ~SDMMC_CTRL_DMA_ENABLE;
950 mci_writel(host, CTRL, temp);
953 * Use the initial fifoth_val for PIO mode.
954 * If next issued data may be transfered by DMA mode,
955 * prev_blksz should be invalidated.
957 mci_writel(host, FIFOTH, host->fifoth_val);
958 host->prev_blksz = 0;
961 * Keep the current block size.
962 * It will be used to decide whether to update
963 * fifoth register next time.
965 host->prev_blksz = data->blksz;
969 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
971 struct dw_mci *host = slot->host;
972 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
973 unsigned int cmd_status = 0;
974 #ifdef SDMMC_WAIT_FOR_UNBUSY
976 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
978 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
980 ret = time_before(jiffies, timeout);
981 cmd_status = mci_readl(host, STATUS);
982 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
986 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
987 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
990 mci_writel(host, CMDARG, arg);
992 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
993 if(cmd & SDMMC_CMD_UPD_CLK)
994 timeout = jiffies + msecs_to_jiffies(50);
996 timeout = jiffies + msecs_to_jiffies(500);
997 while (time_before(jiffies, timeout)) {
998 cmd_status = mci_readl(host, CMD);
999 if (!(cmd_status & SDMMC_CMD_START))
1002 dev_err(&slot->mmc->class_dev,
1003 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
1004 cmd, arg, cmd_status);
1007 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1009 struct dw_mci *host = slot->host;
1010 unsigned int tempck,clock = slot->clock;
1015 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
1016 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
1019 mci_writel(host, CLKENA, 0);
1020 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1021 if(host->svi_flags == 0)
1022 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1024 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1026 } else if (clock != host->current_speed || force_clkinit) {
1027 div = host->bus_hz / clock;
1028 if (host->bus_hz % clock && host->bus_hz > clock)
1030 * move the + 1 after the divide to prevent
1031 * over-clocking the card.
1035 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1037 if ((clock << div) != slot->__clk_old || force_clkinit) {
1038 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
1039 dev_info(&slot->mmc->class_dev,
1040 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1041 slot->id, host->bus_hz, clock,
1044 host->set_speed = tempck;
1045 host->set_div = div;
1049 mci_writel(host, CLKENA, 0);
1050 mci_writel(host, CLKSRC, 0);
1054 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1056 if(clock <= 400*1000){
1057 MMC_DBG_BOOT_FUNC(host->mmc,
1058 "dw_mci_setup_bus: argue clk_mmc workaround out %dHz for init[%s]",
1059 clock * 2, mmc_hostname(host->mmc));
1060 /* clk_mmc will change parents to 24MHz xtal*/
1061 clk_set_rate(host->clk_mmc, clock * 2);
1064 host->set_div = div;
1068 MMC_DBG_BOOT_FUNC(host->mmc,
1069 "dw_mci_setup_bus: argue clk_mmc workaround out normal clock [%s]",
1070 mmc_hostname(host->mmc));
1073 MMC_DBG_ERR_FUNC(host->mmc,
1074 "dw_mci_setup_bus: div SHOULD NOT LARGER THAN ONE! [%s]",
1075 mmc_hostname(host->mmc));
1077 host->set_div = div;
1078 host->bus_hz = host->set_speed * 2;
1079 MMC_DBG_BOOT_FUNC(host->mmc,
1080 "dw_mci_setup_bus: workaround div = %d, host->bus_hz = %d [%s]",
1081 div, host->bus_hz, mmc_hostname(host->mmc));
1083 /* BUG may be here, come on, Linux BSP engineer looks!
1084 FIXME: HS-DDR eMMC, div SHOULD be ONE, but we here cannot fetch eMMC bus mode!!!!!!!!
1085 WRONG dts set clk = 50M, and calc div be zero. Controller denied this setting!
1086 some oops happened like that:
1087 mmc_host mmc0: Bus speed (slot 0) = 50000000Hz (slot req 50000000Hz, actual 50000000HZ div = 0)
1088 rk_sdmmc: BOOT dw_mci_setup_bus: argue clk_mmc workaround out normal clock [mmc0]
1089 rk_sdmmc: BOOT Bus speed=50000000Hz,Bus width=8bits.[mmc0]
1090 mmc0: new high speed DDR MMC card at address 0001
1091 mmcblk0: mmc0:0001 M8G1GC 7.28 GiB
1093 mmcblk0: error -84 transferring data, sector 606208, nr 32, cmd response 0x900, card status 0xb00
1094 mmcblk0: retrying using single block read
1095 mmcblk0: error -110 sending status command, retrying
1097 How to: If eMMC HW version < 4.51, or > 4.51 but no caps2-mmc-hs200 support in dts
1098 Please set dts emmc clk to 100M or 150M, I will workaround it!
1101 if (host->verid < DW_MMC_240A)
1102 clk_set_rate(host->clk_mmc,(host->bus_hz));
1104 clk_set_rate(host->clk_mmc,(host->bus_hz) * 2);
1110 /* set clock to desired speed */
1111 mci_writel(host, CLKDIV, div);
1115 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1117 /* enable clock; only low power if no SDIO */
1118 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1120 if (host->verid < DW_MMC_240A)
1121 sdio_int = SDMMC_INT_SDIO(slot->id);
1123 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1125 if (!(mci_readl(host, INTMASK) & sdio_int))
1126 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1127 mci_writel(host, CLKENA, clk_en_a);
1131 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
1132 /* keep the clock with reflecting clock dividor */
1133 slot->__clk_old = clock << div;
1136 host->current_speed = clock;
1138 if(slot->ctype != slot->pre_ctype)
1139 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
1141 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
1142 mmc_hostname(host->mmc));
1143 slot->pre_ctype = slot->ctype;
1145 /* Set the current slot bus width */
1146 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1149 static void dw_mci_wait_unbusy(struct dw_mci *host)
1152 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1153 unsigned long time_loop;
1154 unsigned int status;
1156 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
1158 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1159 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1160 else if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1161 timeout = SDMMC_DATA_TIMEOUT_SD;
1163 time_loop = jiffies + msecs_to_jiffies(timeout);
1165 status = mci_readl(host, STATUS);
1166 if (!(status & (SDMMC_STAUTS_DATA_BUSY | SDMMC_STAUTS_MC_BUSY)))
1168 } while (time_before(jiffies, time_loop));
1172 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1175 * 0--status is busy.
1176 * 1--status is unbusy.
1178 int dw_mci_card_busy(struct mmc_host *mmc)
1180 struct dw_mci_slot *slot = mmc_priv(mmc);
1181 struct dw_mci *host = slot->host;
1183 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_card_busy: svi_flags = %d [%s]", \
1184 host->svi_flags, mmc_hostname(host->mmc));
1187 if(host->svi_flags == 0){
1189 host->svi_flags = 1;
1190 return host->svi_flags;
1193 host->svi_flags = 0;
1194 return host->svi_flags;
1200 static void __dw_mci_start_request(struct dw_mci *host,
1201 struct dw_mci_slot *slot,
1202 struct mmc_command *cmd)
1204 struct mmc_request *mrq;
1205 struct mmc_data *data;
1209 if (host->pdata->select_slot)
1210 host->pdata->select_slot(slot->id);
1212 host->cur_slot = slot;
1215 dw_mci_wait_unbusy(host);
1217 host->pending_events = 0;
1218 host->completed_events = 0;
1219 host->data_status = 0;
1223 dw_mci_set_timeout(host);
1224 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1225 mci_writel(host, BLKSIZ, data->blksz);
1228 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1230 /* this is the first command, send the initialization clock */
1231 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1232 cmdflags |= SDMMC_CMD_INIT;
1235 dw_mci_submit_data(host, data);
1239 dw_mci_start_command(host, cmd, cmdflags);
1242 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1245 static void dw_mci_start_request(struct dw_mci *host,
1246 struct dw_mci_slot *slot)
1248 struct mmc_request *mrq = slot->mrq;
1249 struct mmc_command *cmd;
1251 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1252 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1254 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1255 __dw_mci_start_request(host, slot, cmd);
1258 /* must be called with host->lock held */
1259 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1260 struct mmc_request *mrq)
1262 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1267 if (host->state == STATE_IDLE) {
1268 host->state = STATE_SENDING_CMD;
1269 dw_mci_start_request(host, slot);
1271 list_add_tail(&slot->queue_node, &host->queue);
1275 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1277 struct dw_mci_slot *slot = mmc_priv(mmc);
1278 struct dw_mci *host = slot->host;
1283 * The check for card presence and queueing of the request must be
1284 * atomic, otherwise the card could be removed in between and the
1285 * request wouldn't fail until another card was inserted.
1287 spin_lock_bh(&host->lock);
1289 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1290 spin_unlock_bh(&host->lock);
1291 mrq->cmd->error = -ENOMEDIUM;
1292 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1293 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1295 mmc_request_done(mmc, mrq);
1299 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1300 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1302 dw_mci_queue_request(host, slot, mrq);
1304 spin_unlock_bh(&host->lock);
1307 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1309 struct dw_mci_slot *slot = mmc_priv(mmc);
1310 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1311 struct dw_mci *host = slot->host;
1313 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1316 #ifdef SDMMC_WAIT_FOR_UNBUSY
1317 unsigned long time_loop;
1320 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1321 if(host->svi_flags == 1)
1322 time_loop = jiffies + msecs_to_jiffies(SDMMC_DATA_TIMEOUT_SD);
1324 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1326 time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1329 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1330 printk("%d..%s: no card. [%s]\n", \
1331 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1336 ret = time_before(jiffies, time_loop);
1337 regs = mci_readl(slot->host, STATUS);
1338 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1344 printk("slot->flags = %lu ", slot->flags);
1345 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1346 if(host->svi_flags != 1)
1349 printk("%d..%s: wait for unbusy timeout....... STATUS = 0x%x [%s]\n", \
1350 __LINE__, __FUNCTION__, regs, mmc_hostname(mmc));
1354 switch (ios->bus_width) {
1355 case MMC_BUS_WIDTH_4:
1356 slot->ctype = SDMMC_CTYPE_4BIT;
1358 case MMC_BUS_WIDTH_8:
1359 slot->ctype = SDMMC_CTYPE_8BIT;
1362 /* set default 1 bit mode */
1363 slot->ctype = SDMMC_CTYPE_1BIT;
1364 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1367 regs = mci_readl(slot->host, UHS_REG);
1370 if (ios->timing == MMC_TIMING_UHS_DDR50)
1371 regs |= ((0x1 << slot->id) << 16);
1373 regs &= ~((0x1 << slot->id) << 16);
1375 mci_writel(slot->host, UHS_REG, regs);
1376 slot->host->timing = ios->timing;
1379 * Use mirror of ios->clock to prevent race with mmc
1380 * core ios update when finding the minimum.
1382 slot->clock = ios->clock;
1384 if (drv_data && drv_data->set_ios)
1385 drv_data->set_ios(slot->host, ios);
1387 /* Slot specific timing and width adjustment */
1388 dw_mci_setup_bus(slot, false);
1392 switch (ios->power_mode) {
1394 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1396 if (slot->host->pdata->setpower)
1397 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1398 regs = mci_readl(slot->host, PWREN);
1399 regs |= (1 << slot->id);
1400 mci_writel(slot->host, PWREN, regs);
1403 /* Power down slot */
1404 if(slot->host->pdata->setpower)
1405 slot->host->pdata->setpower(slot->id, 0);
1406 regs = mci_readl(slot->host, PWREN);
1407 regs &= ~(1 << slot->id);
1408 mci_writel(slot->host, PWREN, regs);
1415 static int dw_mci_get_ro(struct mmc_host *mmc)
1418 struct dw_mci_slot *slot = mmc_priv(mmc);
1419 struct dw_mci_board *brd = slot->host->pdata;
1421 /* Use platform get_ro function, else try on board write protect */
1422 if(slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1424 else if(brd->get_ro)
1425 read_only = brd->get_ro(slot->id);
1426 else if(gpio_is_valid(slot->wp_gpio))
1427 read_only = gpio_get_value(slot->wp_gpio);
1430 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1432 dev_dbg(&mmc->class_dev, "card is %s\n",
1433 read_only ? "read-only" : "read-write");
1438 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1440 struct dw_mci_slot *slot = mmc_priv(mmc);
1441 struct dw_mci *host = slot->host;
1442 /*struct dw_mci_board *brd = slot->host->pdata;*/
1444 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1447 spin_lock_bh(&host->lock);
1450 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1452 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1454 spin_unlock_bh(&host->lock);
1456 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1457 if(__clk_is_enabled(host->hclk_mmc) == false)
1458 clk_prepare_enable(host->hclk_mmc);
1459 if(__clk_is_enabled(host->clk_mmc) == false)
1460 clk_prepare_enable(host->clk_mmc);
1462 if(__clk_is_enabled(host->clk_mmc) == true)
1463 clk_disable_unprepare(slot->host->clk_mmc);
1464 if(__clk_is_enabled(host->hclk_mmc) == true)
1465 clk_disable_unprepare(slot->host->hclk_mmc);
1468 mmc_detect_change(slot->mmc, 20);
1474 static int dw_mci_get_cd(struct mmc_host *mmc)
1477 struct dw_mci_slot *slot = mmc_priv(mmc);
1478 struct dw_mci_board *brd = slot->host->pdata;
1479 struct dw_mci *host = slot->host;
1480 int gpio_cd = mmc_gpio_get_cd(mmc);
1482 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1483 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1485 /* Use platform get_cd function, else try onboard card detect */
1486 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1488 else if (brd->get_cd)
1489 present = !brd->get_cd(slot->id);
1490 else if (!IS_ERR_VALUE(gpio_cd))
1493 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1495 spin_lock_bh(&host->lock);
1497 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1498 dev_dbg(&mmc->class_dev, "card is present\n");
1500 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1501 dev_dbg(&mmc->class_dev, "card is not present\n");
1503 spin_unlock_bh(&host->lock);
1510 * Dts Should caps emmc controller with poll-hw-reset
1512 static void dw_mci_hw_reset(struct mmc_host *mmc)
1514 struct dw_mci_slot *slot = mmc_priv(mmc);
1515 struct dw_mci *host = slot->host;
1520 unsigned long timeout;
1523 /* (1) CMD12 to end any transfer in process */
1524 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
1525 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
1527 if(host->mmc->hold_reg_flag)
1528 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1529 mci_writel(host, CMDARG, 0);
1531 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1533 timeout = jiffies + msecs_to_jiffies(500);
1535 ret = time_before(jiffies, timeout);
1536 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1541 MMC_DBG_ERR_FUNC(host->mmc,
1542 "%s dw_mci_hw_reset: STOP_TRANSMISSION failed!!! [%s]\n",
1543 __func__, mmc_hostname(host->mmc));
1545 /* (2) wait DTO, even if no response is sent back by card */
1547 timeout = jiffies + msecs_to_jiffies(5);
1549 ret = time_before(jiffies, timeout);
1550 if(!(mci_readl(host, MINTSTS) & SDMMC_INT_DATA_OVER)){
1551 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1557 /* (3) Reset following: DONNOT CHANGE RESET ORDER!*/
1559 /* Software reset - BMOD[0] for IDMA only */
1560 regs = mci_readl(host, BMOD);
1561 regs |= SDMMC_IDMAC_SWRESET;
1562 mci_writel(host, BMOD, regs);
1563 udelay(1); /* Auto cleared after 1 cycle, 1us is enough for hclk_mmc */
1564 regs = mci_readl(host, BMOD);
1565 if(regs & SDMMC_IDMAC_SWRESET)
1566 MMC_DBG_WARN_FUNC(host->mmc,
1567 "%s dw_mci_hw_reset: SDMMC_IDMAC_SWRESET failed!!! [%s]\n",
1568 __func__, mmc_hostname(host->mmc));
1570 /* DMA reset - CTRL[2] */
1571 regs = mci_readl(host, CTRL);
1572 regs |= SDMMC_CTRL_DMA_RESET;
1573 mci_writel(host, CTRL, regs);
1574 udelay(1); /* Auto cleared after 2 AHB clocks, 1us is enough plus mci_readl access */
1575 regs = mci_readl(host, CTRL);
1576 if(regs & SDMMC_CTRL_DMA_RESET)
1577 MMC_DBG_WARN_FUNC(host->mmc,
1578 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1579 __func__, mmc_hostname(host->mmc));
1581 /* FIFO reset - CTRL[1] */
1582 regs = mci_readl(host, CTRL);
1583 regs |= SDMMC_CTRL_FIFO_RESET;
1584 mci_writel(host, CTRL, regs);
1585 mdelay(1); /* no timing limited, 1ms is random value */
1586 regs = mci_readl(host, CTRL);
1587 if(regs & SDMMC_CTRL_FIFO_RESET)
1588 MMC_DBG_WARN_FUNC(host->mmc,
1589 "%s dw_mci_hw_reset: SDMMC_CTRL_DMA_RESET failed!!! [%s]\n",
1590 __func__, mmc_hostname(host->mmc));
1593 According to eMMC spec
1594 tRstW >= 1us ; RST_n pulse width
1595 tRSCA >= 200us ; RST_n to Command time
1596 tRSTH >= 1us ; RST_n high period
1598 mci_writel(slot->host, PWREN, 0x0);
1599 mci_writel(slot->host, RST_N, 0x0);
1601 udelay(10); /* 10us for bad quality eMMc. */
1603 mci_writel(slot->host, PWREN, 0x1);
1604 mci_writel(slot->host, RST_N, 0x1);
1606 usleep_range(500, 1000); /* at least 500(> 200us) */
1610 * Disable lower power mode.
1612 * Low power mode will stop the card clock when idle. According to the
1613 * description of the CLKENA register we should disable low power mode
1614 * for SDIO cards if we need SDIO interrupts to work.
1616 * This function is fast if low power mode is already disabled.
1618 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1620 struct dw_mci *host = slot->host;
1622 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1624 clk_en_a = mci_readl(host, CLKENA);
1626 if (clk_en_a & clken_low_pwr) {
1627 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1628 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1629 SDMMC_CMD_PRV_DAT_WAIT, 0);
1633 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1635 struct dw_mci_slot *slot = mmc_priv(mmc);
1636 struct dw_mci *host = slot->host;
1640 /* Enable/disable Slot Specific SDIO interrupt */
1641 int_mask = mci_readl(host, INTMASK);
1643 if (host->verid < DW_MMC_240A)
1644 sdio_int = SDMMC_INT_SDIO(slot->id);
1646 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1650 * Turn off low power mode if it was enabled. This is a bit of
1651 * a heavy operation and we disable / enable IRQs a lot, so
1652 * we'll leave low power mode disabled and it will get
1653 * re-enabled again in dw_mci_setup_bus().
1655 dw_mci_disable_low_power(slot);
1657 mci_writel(host, INTMASK,
1658 (int_mask | sdio_int));
1660 mci_writel(host, INTMASK,
1661 (int_mask & ~sdio_int));
1665 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1667 IO_DOMAIN_12 = 1200,
1668 IO_DOMAIN_18 = 1800,
1669 IO_DOMAIN_33 = 3300,
1671 static void dw_mci_do_grf_io_domain_switch(struct dw_mci *host, u32 voltage)
1681 MMC_DBG_ERR_FUNC(host->mmc,"%s : Not support io domain voltage [%s]\n",
1682 __FUNCTION__, mmc_hostname(host->mmc));
1685 MMC_DBG_ERR_FUNC(host->mmc,"%s : Err io domain voltage [%s]\n",
1686 __FUNCTION__, mmc_hostname(host->mmc));
1690 if(cpu_is_rk3288()){
1691 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1692 grf_writel((voltage << 7) | (1 << 23), RK3288_GRF_IO_VSEL);
1696 MMC_DBG_ERR_FUNC(host->mmc,"%s : unknown chip [%s]\n",
1697 __FUNCTION__, mmc_hostname(host->mmc));
1701 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1702 struct mmc_ios *ios)
1705 unsigned int value,uhs_reg;
1708 * Signal Voltage Switching is only applicable for Host Controllers
1711 if (host->verid < DW_MMC_240A)
1714 uhs_reg = mci_readl(host, UHS_REG);
1715 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1716 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1718 switch (ios->signal_voltage) {
1719 case MMC_SIGNAL_VOLTAGE_330:
1720 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1722 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1723 /* regulator_put(host->vmmc); //to be done in remove function. */
1725 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1726 __func__, regulator_get_voltage(host->vmmc), ret);
1728 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 3.3V signalling voltage "
1729 " failed\n", mmc_hostname(host->mmc));
1732 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_33);
1734 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__,
1735 __FUNCTION__, mmc_hostname(host->mmc));
1737 /* set High-power mode */
1738 value = mci_readl(host, CLKENA);
1739 value &= ~SDMMC_CLKEN_LOW_PWR;
1740 mci_writel(host,CLKENA , value);
1742 uhs_reg &= ~SDMMC_UHS_VOLT_REG_18;
1743 mci_writel(host,UHS_REG , uhs_reg);
1746 usleep_range(5000, 5500);
1748 /* 3.3V regulator output should be stable within 5 ms */
1749 uhs_reg = mci_readl(host, UHS_REG);
1750 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1753 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 3.3V regulator output did not became stable\n",
1754 mmc_hostname(host->mmc));
1757 case MMC_SIGNAL_VOLTAGE_180:
1759 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1760 /* regulator_put(host->vmmc);//to be done in remove function. */
1762 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1763 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1765 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.8V signalling voltage "
1766 " failed\n", mmc_hostname(host->mmc));
1769 dw_mci_do_grf_io_domain_switch(host, IO_DOMAIN_18);
1773 * Enable 1.8V Signal Enable in the Host Control2
1776 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1779 usleep_range(5000, 5500);
1780 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__,
1781 __FUNCTION__,mmc_hostname(host->mmc));
1783 /* 1.8V regulator output should be stable within 5 ms */
1784 uhs_reg = mci_readl(host, UHS_REG);
1785 if(uhs_reg & SDMMC_UHS_VOLT_REG_18)
1788 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: 1.8V regulator output did not became stable\n",
1789 mmc_hostname(host->mmc));
1792 case MMC_SIGNAL_VOLTAGE_120:
1794 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1796 MMC_DBG_SW_VOL_FUNC(host->mmc, "%s: Switching to 1.2V signalling voltage "
1797 " failed\n", mmc_hostname(host->mmc));
1803 /* No signal voltage switch required */
1809 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1810 struct mmc_ios *ios)
1812 struct dw_mci_slot *slot = mmc_priv(mmc);
1813 struct dw_mci *host = slot->host;
1816 if (host->verid < DW_MMC_240A)
1819 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1825 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1827 struct dw_mci_slot *slot = mmc_priv(mmc);
1828 struct dw_mci *host = slot->host;
1829 const struct dw_mci_drv_data *drv_data = host->drv_data;
1830 struct dw_mci_tuning_data tuning_data;
1833 /* Fixme: 3036/3126 doesn't support 1.8 io domain, no sense exe tuning
1834 if(cpu_is_3036() || cpu_is_3126())
1840 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1841 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1842 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1843 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1844 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1845 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1846 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1850 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1851 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1852 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1855 "Undefined command(%d) for tuning\n", opcode);
1860 /* Recommend sample phase and delayline
1861 Fixme: Mix-use these three controllers will cause
1864 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1865 tuning_data.con_id = 3;
1866 else if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1867 tuning_data.con_id = 1;
1869 tuning_data.con_id = 0;
1871 /* 0: driver, from host->devices
1872 1: sample, from devices->host
1874 tuning_data.tuning_type = 1;
1876 if (drv_data && drv_data->execute_tuning)
1877 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1882 static const struct mmc_host_ops dw_mci_ops = {
1883 .request = dw_mci_request,
1884 .pre_req = dw_mci_pre_req,
1885 .post_req = dw_mci_post_req,
1886 .set_ios = dw_mci_set_ios,
1887 .get_ro = dw_mci_get_ro,
1888 .get_cd = dw_mci_get_cd,
1889 .set_sdio_status = dw_mci_set_sdio_status,
1890 .hw_reset = dw_mci_hw_reset,
1891 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1892 .execute_tuning = dw_mci_execute_tuning,
1893 #ifdef CONFIG_MMC_DW_ROCKCHIP_SWITCH_VOLTAGE
1894 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1895 .card_busy = dw_mci_card_busy,
1900 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1902 unsigned long flags;
1907 local_irq_save(flags);
1908 if(host->irq_state != irqflag)
1910 host->irq_state = irqflag;
1913 enable_irq(host->irq);
1917 disable_irq(host->irq);
1920 local_irq_restore(flags);
1924 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1925 __releases(&host->lock)
1926 __acquires(&host->lock)
1928 if(DW_MCI_SEND_STATUS == host->dir_status){
1930 if( MMC_BUS_TEST_W != host->cmd->opcode){
1931 if(host->data_status & SDMMC_INT_DCRC)
1932 host->data->error = -EILSEQ;
1933 else if(host->data_status & SDMMC_INT_EBE)
1934 host->data->error = -ETIMEDOUT;
1936 dw_mci_wait_unbusy(host);
1939 dw_mci_wait_unbusy(host);
1944 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1945 __releases(&host->lock)
1946 __acquires(&host->lock)
1948 struct dw_mci_slot *slot;
1949 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1951 WARN_ON(host->cmd || host->data);
1953 del_timer_sync(&host->dto_timer);
1954 dw_mci_deal_data_end(host, mrq);
1957 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1958 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
1960 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
1961 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
1963 host->cur_slot->mrq = NULL;
1965 if (!list_empty(&host->queue)) {
1966 slot = list_entry(host->queue.next,
1967 struct dw_mci_slot, queue_node);
1968 list_del(&slot->queue_node);
1969 dev_vdbg(host->dev, "list not empty: %s is next\n",
1970 mmc_hostname(slot->mmc));
1971 host->state = STATE_SENDING_CMD;
1972 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
1973 dw_mci_start_request(host, slot);
1975 dev_vdbg(host->dev, "list empty\n");
1976 host->state = STATE_IDLE;
1979 spin_unlock(&host->lock);
1980 mmc_request_done(prev_mmc, mrq);
1981 spin_lock(&host->lock);
1984 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1986 u32 status = host->cmd_status;
1988 host->cmd_status = 0;
1990 /* Read the response from the card (up to 16 bytes) */
1991 if (cmd->flags & MMC_RSP_PRESENT) {
1992 if (cmd->flags & MMC_RSP_136) {
1993 cmd->resp[3] = mci_readl(host, RESP0);
1994 cmd->resp[2] = mci_readl(host, RESP1);
1995 cmd->resp[1] = mci_readl(host, RESP2);
1996 cmd->resp[0] = mci_readl(host, RESP3);
1998 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
1999 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
2001 cmd->resp[0] = mci_readl(host, RESP0);
2005 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
2006 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
2010 if (status & SDMMC_INT_RTO)
2012 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2015 cmd->error = -ETIMEDOUT;
2016 del_timer_sync(&host->dto_timer);
2017 }else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)){
2018 del_timer_sync(&host->dto_timer);
2019 cmd->error = -EILSEQ;
2020 }else if (status & SDMMC_INT_RESP_ERR){
2021 del_timer_sync(&host->dto_timer);
2026 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",
2027 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2030 del_timer_sync(&host->dto_timer);
2031 if(MMC_SEND_STATUS != cmd->opcode)
2032 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
2033 MMC_DBG_ERR_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
2034 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
2038 /* newer ip versions need a delay between retries */
2039 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2045 static void dw_mci_tasklet_func(unsigned long priv)
2047 struct dw_mci *host = (struct dw_mci *)priv;
2048 struct dw_mci_slot *slot = mmc_priv(host->mmc);
2049 struct mmc_data *data;
2050 struct mmc_command *cmd;
2051 enum dw_mci_state state;
2052 enum dw_mci_state prev_state;
2053 u32 status, cmd_flags;
2054 unsigned long timeout = 0;
2057 spin_lock(&host->lock);
2059 state = host->state;
2069 case STATE_SENDING_CMD:
2070 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2071 &host->pending_events))
2076 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2077 dw_mci_command_complete(host, cmd);
2078 if (cmd == host->mrq->sbc && !cmd->error) {
2079 prev_state = state = STATE_SENDING_CMD;
2080 __dw_mci_start_request(host, host->cur_slot,
2085 if (cmd->data && cmd->error) {
2086 del_timer_sync(&host->dto_timer); /* delete the timer for INT_DTO */
2087 dw_mci_stop_dma(host);
2090 send_stop_cmd(host, data);
2091 state = STATE_SENDING_STOP;
2097 send_stop_abort(host, data);
2098 state = STATE_SENDING_STOP;
2101 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2104 if (!host->mrq->data || cmd->error) {
2105 dw_mci_request_end(host, host->mrq);
2109 prev_state = state = STATE_SENDING_DATA;
2112 case STATE_SENDING_DATA:
2113 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
2114 dw_mci_stop_dma(host);
2117 send_stop_cmd(host, data);
2119 /*single block read/write, send stop cmd manually to prevent host controller halt*/
2120 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
2121 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
2123 mci_writel(host, CMDARG, 0);
2125 cmd_flags = SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC
2126 | SDMMC_CMD_RESP_EXP | MMC_STOP_TRANSMISSION;
2128 if(host->mmc->hold_reg_flag)
2129 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
2131 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
2133 timeout = jiffies + msecs_to_jiffies(500);
2136 ret = time_before(jiffies, timeout);
2137 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
2141 MMC_DBG_ERR_FUNC(host->mmc,
2142 "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
2143 __func__, mmc_hostname(host->mmc));
2146 send_stop_abort(host, data);
2148 state = STATE_DATA_ERROR;
2152 MMC_DBG_CMD_FUNC(host->mmc,
2153 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
2154 prev_state,state, mmc_hostname(host->mmc));
2156 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2157 &host->pending_events))
2159 MMC_DBG_INFO_FUNC(host->mmc,
2160 "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
2161 prev_state,state,mmc_hostname(host->mmc));
2163 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2164 prev_state = state = STATE_DATA_BUSY;
2167 case STATE_DATA_BUSY:
2168 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2169 &host->pending_events))
2172 dw_mci_deal_data_end(host, host->mrq);
2173 del_timer_sync(&host->dto_timer); //delete the timer for INT_DTO
2174 MMC_DBG_INFO_FUNC(host->mmc,
2175 "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
2176 prev_state,state,mmc_hostname(host->mmc));
2179 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2180 status = host->data_status;
2182 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2183 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
2184 MMC_DBG_ERR_FUNC(host->mmc,
2185 "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
2186 prev_state,state, status, mmc_hostname(host->mmc));
2188 if (status & SDMMC_INT_DRTO) {
2189 data->error = -ETIMEDOUT;
2190 } else if (status & SDMMC_INT_DCRC) {
2191 data->error = -EILSEQ;
2192 } else if (status & SDMMC_INT_EBE &&
2193 host->dir_status == DW_MCI_SEND_STATUS){
2195 * No data CRC status was returned.
2196 * The number of bytes transferred will
2197 * be exaggerated in PIO mode.
2199 data->bytes_xfered = 0;
2200 data->error = -ETIMEDOUT;
2209 * After an error, there may be data lingering
2210 * in the FIFO, so reset it - doing so
2211 * generates a block interrupt, hence setting
2212 * the scatter-gather pointer to NULL.
2214 dw_mci_fifo_reset(host);
2216 data->bytes_xfered = data->blocks * data->blksz;
2221 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
2222 prev_state,state,mmc_hostname(host->mmc));
2223 dw_mci_request_end(host, host->mrq);
2226 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
2227 prev_state,state,mmc_hostname(host->mmc));
2229 if (host->mrq->sbc && !data->error) {
2230 data->stop->error = 0;
2232 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
2233 prev_state,state,mmc_hostname(host->mmc));
2235 dw_mci_request_end(host, host->mrq);
2239 prev_state = state = STATE_SENDING_STOP;
2241 send_stop_cmd(host, data);
2243 if (data->stop && !data->error) {
2244 /* stop command for open-ended transfer*/
2246 send_stop_abort(host, data);
2250 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
2251 prev_state,state,mmc_hostname(host->mmc));
2253 case STATE_SENDING_STOP:
2254 if (!test_and_clear_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2257 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
2258 prev_state, state, mmc_hostname(host->mmc));
2260 /* CMD error in data command */
2261 if (host->mrq->cmd->error && host->mrq->data) {
2262 dw_mci_fifo_reset(host);
2268 dw_mci_command_complete(host, host->mrq->stop);
2270 if (host->mrq->stop)
2271 dw_mci_command_complete(host, host->mrq->stop);
2273 host->cmd_status = 0;
2276 dw_mci_request_end(host, host->mrq);
2279 case STATE_DATA_ERROR:
2280 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2281 &host->pending_events))
2284 state = STATE_DATA_BUSY;
2287 } while (state != prev_state);
2289 host->state = state;
2291 spin_unlock(&host->lock);
2295 /* push final bytes to part_buf, only use during push */
2296 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2298 memcpy((void *)&host->part_buf, buf, cnt);
2299 host->part_buf_count = cnt;
2302 /* append bytes to part_buf, only use during push */
2303 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2305 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2306 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2307 host->part_buf_count += cnt;
2311 /* pull first bytes from part_buf, only use during pull */
2312 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2314 cnt = min(cnt, (int)host->part_buf_count);
2316 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2318 host->part_buf_count -= cnt;
2319 host->part_buf_start += cnt;
2324 /* pull final bytes from the part_buf, assuming it's just been filled */
2325 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2327 memcpy(buf, &host->part_buf, cnt);
2328 host->part_buf_start = cnt;
2329 host->part_buf_count = (1 << host->data_shift) - cnt;
2332 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2334 struct mmc_data *data = host->data;
2337 /* try and push anything in the part_buf */
2338 if (unlikely(host->part_buf_count)) {
2339 int len = dw_mci_push_part_bytes(host, buf, cnt);
2342 if (host->part_buf_count == 2) {
2343 mci_writew(host, DATA(host->data_offset),
2345 host->part_buf_count = 0;
2348 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2349 if (unlikely((unsigned long)buf & 0x1)) {
2351 u16 aligned_buf[64];
2352 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2353 int items = len >> 1;
2355 /* memcpy from input buffer into aligned buffer */
2356 memcpy(aligned_buf, buf, len);
2359 /* push data from aligned buffer into fifo */
2360 for (i = 0; i < items; ++i)
2361 mci_writew(host, DATA(host->data_offset),
2368 for (; cnt >= 2; cnt -= 2)
2369 mci_writew(host, DATA(host->data_offset), *pdata++);
2372 /* put anything remaining in the part_buf */
2374 dw_mci_set_part_bytes(host, buf, cnt);
2375 /* Push data if we have reached the expected data length */
2376 if ((data->bytes_xfered + init_cnt) ==
2377 (data->blksz * data->blocks))
2378 mci_writew(host, DATA(host->data_offset),
2383 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2385 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2386 if (unlikely((unsigned long)buf & 0x1)) {
2388 /* pull data from fifo into aligned buffer */
2389 u16 aligned_buf[64];
2390 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2391 int items = len >> 1;
2393 for (i = 0; i < items; ++i)
2394 aligned_buf[i] = mci_readw(host,
2395 DATA(host->data_offset));
2396 /* memcpy from aligned buffer into output buffer */
2397 memcpy(buf, aligned_buf, len);
2405 for (; cnt >= 2; cnt -= 2)
2406 *pdata++ = mci_readw(host, DATA(host->data_offset));
2410 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2411 dw_mci_pull_final_bytes(host, buf, cnt);
2415 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2417 struct mmc_data *data = host->data;
2420 /* try and push anything in the part_buf */
2421 if (unlikely(host->part_buf_count)) {
2422 int len = dw_mci_push_part_bytes(host, buf, cnt);
2425 if (host->part_buf_count == 4) {
2426 mci_writel(host, DATA(host->data_offset),
2428 host->part_buf_count = 0;
2431 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2432 if (unlikely((unsigned long)buf & 0x3)) {
2434 u32 aligned_buf[32];
2435 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2436 int items = len >> 2;
2438 /* memcpy from input buffer into aligned buffer */
2439 memcpy(aligned_buf, buf, len);
2442 /* push data from aligned buffer into fifo */
2443 for (i = 0; i < items; ++i)
2444 mci_writel(host, DATA(host->data_offset),
2451 for (; cnt >= 4; cnt -= 4)
2452 mci_writel(host, DATA(host->data_offset), *pdata++);
2455 /* put anything remaining in the part_buf */
2457 dw_mci_set_part_bytes(host, buf, cnt);
2458 /* Push data if we have reached the expected data length */
2459 if ((data->bytes_xfered + init_cnt) ==
2460 (data->blksz * data->blocks))
2461 mci_writel(host, DATA(host->data_offset),
2466 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2468 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2469 if (unlikely((unsigned long)buf & 0x3)) {
2471 /* pull data from fifo into aligned buffer */
2472 u32 aligned_buf[32];
2473 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2474 int items = len >> 2;
2476 for (i = 0; i < items; ++i)
2477 aligned_buf[i] = mci_readl(host,
2478 DATA(host->data_offset));
2479 /* memcpy from aligned buffer into output buffer */
2480 memcpy(buf, aligned_buf, len);
2488 for (; cnt >= 4; cnt -= 4)
2489 *pdata++ = mci_readl(host, DATA(host->data_offset));
2493 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2494 dw_mci_pull_final_bytes(host, buf, cnt);
2498 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2500 struct mmc_data *data = host->data;
2503 /* try and push anything in the part_buf */
2504 if (unlikely(host->part_buf_count)) {
2505 int len = dw_mci_push_part_bytes(host, buf, cnt);
2509 if (host->part_buf_count == 8) {
2510 mci_writeq(host, DATA(host->data_offset),
2512 host->part_buf_count = 0;
2515 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2516 if (unlikely((unsigned long)buf & 0x7)) {
2518 u64 aligned_buf[16];
2519 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2520 int items = len >> 3;
2522 /* memcpy from input buffer into aligned buffer */
2523 memcpy(aligned_buf, buf, len);
2526 /* push data from aligned buffer into fifo */
2527 for (i = 0; i < items; ++i)
2528 mci_writeq(host, DATA(host->data_offset),
2535 for (; cnt >= 8; cnt -= 8)
2536 mci_writeq(host, DATA(host->data_offset), *pdata++);
2539 /* put anything remaining in the part_buf */
2541 dw_mci_set_part_bytes(host, buf, cnt);
2542 /* Push data if we have reached the expected data length */
2543 if ((data->bytes_xfered + init_cnt) ==
2544 (data->blksz * data->blocks))
2545 mci_writeq(host, DATA(host->data_offset),
2550 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2552 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2553 if (unlikely((unsigned long)buf & 0x7)) {
2555 /* pull data from fifo into aligned buffer */
2556 u64 aligned_buf[16];
2557 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2558 int items = len >> 3;
2560 for (i = 0; i < items; ++i)
2561 aligned_buf[i] = mci_readq(host,
2562 DATA(host->data_offset));
2563 /* memcpy from aligned buffer into output buffer */
2564 memcpy(buf, aligned_buf, len);
2572 for (; cnt >= 8; cnt -= 8)
2573 *pdata++ = mci_readq(host, DATA(host->data_offset));
2577 host->part_buf = mci_readq(host, DATA(host->data_offset));
2578 dw_mci_pull_final_bytes(host, buf, cnt);
2582 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2586 /* get remaining partial bytes */
2587 len = dw_mci_pull_part_bytes(host, buf, cnt);
2588 if (unlikely(len == cnt))
2593 /* get the rest of the data */
2594 host->pull_data(host, buf, cnt);
2597 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2599 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2601 unsigned int offset;
2602 struct mmc_data *data = host->data;
2603 int shift = host->data_shift;
2606 unsigned int remain, fcnt;
2608 if(!host->mmc->bus_refs){
2609 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2613 if (!sg_miter_next(sg_miter))
2616 host->sg = sg_miter->piter.sg;
2617 buf = sg_miter->addr;
2618 remain = sg_miter->length;
2622 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2623 << shift) + host->part_buf_count;
2624 len = min(remain, fcnt);
2627 dw_mci_pull_data(host, (void *)(buf + offset), len);
2628 data->bytes_xfered += len;
2633 sg_miter->consumed = offset;
2634 status = mci_readl(host, MINTSTS);
2635 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2636 /* if the RXDR is ready read again */
2637 } while ((status & SDMMC_INT_RXDR) ||
2638 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2641 if (!sg_miter_next(sg_miter))
2643 sg_miter->consumed = 0;
2645 sg_miter_stop(sg_miter);
2649 sg_miter_stop(sg_miter);
2653 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2656 static void dw_mci_write_data_pio(struct dw_mci *host)
2658 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2660 unsigned int offset;
2661 struct mmc_data *data = host->data;
2662 int shift = host->data_shift;
2665 unsigned int fifo_depth = host->fifo_depth;
2666 unsigned int remain, fcnt;
2668 if(!host->mmc->bus_refs){
2669 printk("Note: %s host->mmc->bus_refs is 0!!!\n", __func__);
2674 if (!sg_miter_next(sg_miter))
2677 host->sg = sg_miter->piter.sg;
2678 buf = sg_miter->addr;
2679 remain = sg_miter->length;
2683 fcnt = ((fifo_depth -
2684 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2685 << shift) - host->part_buf_count;
2686 len = min(remain, fcnt);
2689 host->push_data(host, (void *)(buf + offset), len);
2690 data->bytes_xfered += len;
2695 sg_miter->consumed = offset;
2696 status = mci_readl(host, MINTSTS);
2697 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2698 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2701 if (!sg_miter_next(sg_miter))
2703 sg_miter->consumed = 0;
2705 sg_miter_stop(sg_miter);
2709 sg_miter_stop(sg_miter);
2713 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2716 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2720 if (!host->cmd_status)
2721 host->cmd_status = status;
2726 if((MMC_STOP_TRANSMISSION != host->cmd->opcode))
2729 multi = mci_readl(host, BYTCNT)/unit;
2730 multi += ((mci_readl(host, BYTCNT) % unit) ? 1 :0 );
2731 multi = (multi > 0) ? multi : 1;
2732 multi += (host->cmd->retries > 2)? 2 : host->cmd->retries;
2733 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(4500 * multi));//max wait 8s larger
2738 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2739 tasklet_schedule(&host->tasklet);
2742 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2744 struct dw_mci *host = dev_id;
2745 u32 pending, sdio_int;
2748 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2751 * DTO fix - version 2.10a and below, and only if internal DMA
2754 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2756 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2757 pending |= SDMMC_INT_DATA_OVER;
2761 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2762 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2763 host->cmd_status = pending;
2765 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2766 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2768 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2771 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2772 /* if there is an error report DATA_ERROR */
2773 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2774 host->data_status = pending;
2776 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2778 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2779 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2780 tasklet_schedule(&host->tasklet);
2783 if (pending & SDMMC_INT_DATA_OVER) {
2784 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2785 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2786 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2787 if (!host->data_status)
2788 host->data_status = pending;
2790 if (host->dir_status == DW_MCI_RECV_STATUS) {
2791 if (host->sg != NULL)
2792 dw_mci_read_data_pio(host, true);
2794 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2795 tasklet_schedule(&host->tasklet);
2798 if (pending & SDMMC_INT_RXDR) {
2799 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2800 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2801 dw_mci_read_data_pio(host, false);
2804 if (pending & SDMMC_INT_TXDR) {
2805 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2806 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2807 dw_mci_write_data_pio(host);
2810 if (pending & SDMMC_INT_VSI) {
2811 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2812 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2813 dw_mci_cmd_interrupt(host, pending);
2816 if (pending & SDMMC_INT_CMD_DONE) {
2817 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, CMD = 0x%x, INT-pending=0x%x. [%s]",mci_readl(host, CMD),pending,mmc_hostname(host->mmc));
2818 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2819 dw_mci_cmd_interrupt(host, pending);
2822 if (pending & SDMMC_INT_CD) {
2823 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2824 MMC_DBG_INFO_FUNC(host->mmc, "SDMMC_INT_CD, INT-pending=0x%x. [%s]", pending, mmc_hostname(host->mmc));
2825 wake_lock_timeout(&host->mmc->detect_wake_lock, 5 * HZ);
2826 queue_work(host->card_workqueue, &host->card_work);
2829 if (pending & SDMMC_INT_HLE) {
2830 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2831 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2835 /* Handle SDIO Interrupts */
2836 for (i = 0; i < host->num_slots; i++) {
2837 struct dw_mci_slot *slot = host->slot[i];
2839 if (host->verid < DW_MMC_240A)
2840 sdio_int = SDMMC_INT_SDIO(i);
2842 sdio_int = SDMMC_INT_SDIO(i + 8);
2844 if (pending & sdio_int) {
2845 mci_writel(host, RINTSTS, sdio_int);
2846 mmc_signal_sdio_irq(slot->mmc);
2852 #ifdef CONFIG_MMC_DW_IDMAC
2853 /* Handle DMA interrupts */
2854 pending = mci_readl(host, IDSTS);
2855 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2856 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2857 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2858 host->dma_ops->complete((void *)host);
2865 static void dw_mci_work_routine_card(struct work_struct *work)
2867 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2870 for (i = 0; i < host->num_slots; i++) {
2871 struct dw_mci_slot *slot = host->slot[i];
2872 struct mmc_host *mmc = slot->mmc;
2873 struct mmc_request *mrq;
2876 present = dw_mci_get_cd(mmc);
2877 while (present != slot->last_detect_state) {
2878 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2879 present ? "inserted" : "removed");
2880 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2881 present ? "inserted" : "removed.", mmc_hostname(mmc));
2883 rk_send_wakeup_key();//wake up system
2884 spin_lock_bh(&host->lock);
2886 del_timer(&host->dto_timer); /* delete the timer for INT_DTO */
2887 /* Card change detected */
2888 slot->last_detect_state = present;
2890 /* Clean up queue if present */
2893 if (mrq == host->mrq) {
2897 switch (host->state) {
2900 case STATE_SENDING_CMD:
2901 mrq->cmd->error = -ENOMEDIUM;
2905 case STATE_SENDING_DATA:
2906 mrq->data->error = -ENOMEDIUM;
2907 dw_mci_stop_dma(host);
2909 case STATE_DATA_BUSY:
2910 case STATE_DATA_ERROR:
2911 if (mrq->data->error == -EINPROGRESS)
2912 mrq->data->error = -ENOMEDIUM;
2916 case STATE_SENDING_STOP:
2917 mrq->stop->error = -ENOMEDIUM;
2921 dw_mci_request_end(host, mrq);
2923 list_del(&slot->queue_node);
2924 mrq->cmd->error = -ENOMEDIUM;
2926 mrq->data->error = -ENOMEDIUM;
2928 mrq->stop->error = -ENOMEDIUM;
2930 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",
2931 mrq->cmd->opcode, mmc_hostname(mmc));
2933 spin_unlock(&host->lock);
2934 mmc_request_done(slot->mmc, mrq);
2935 spin_lock(&host->lock);
2939 /* Power down slot */
2941 /* Clear down the FIFO */
2942 dw_mci_fifo_reset(host);
2943 #ifdef CONFIG_MMC_DW_IDMAC
2944 dw_mci_idmac_reset(host);
2949 spin_unlock_bh(&host->lock);
2951 present = dw_mci_get_cd(mmc);
2954 mmc_detect_change(slot->mmc,
2955 msecs_to_jiffies(host->pdata->detect_delay_ms));
2960 /* given a slot id, find out the device node representing that slot */
2961 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2963 struct device_node *np;
2967 if (!dev || !dev->of_node)
2970 for_each_child_of_node(dev->of_node, np) {
2971 addr = of_get_property(np, "reg", &len);
2972 if (!addr || (len < sizeof(int)))
2974 if (be32_to_cpup(addr) == slot)
2980 static struct dw_mci_of_slot_quirks {
2983 } of_slot_quirks[] = {
2985 .quirk = "disable-wp",
2986 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2990 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2992 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2997 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2998 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2999 quirks |= of_slot_quirks[idx].id;
3004 /* find out bus-width for a given slot */
3005 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3007 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3013 if (of_property_read_u32(np, "bus-width", &bus_wd))
3014 dev_err(dev, "bus-width property not found, assuming width"
3020 /* find the pwr-en gpio for a given slot; or -1 if none specified */
3021 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
3023 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3029 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
3031 /* Having a missing entry is valid; return silently */
3032 if (!gpio_is_valid(gpio))
3035 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
3036 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3040 gpio_direction_output(gpio, 0);//set 0 to pwr-en
3046 /* find the write protect gpio for a given slot; or -1 if none specified */
3047 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3049 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3055 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3057 /* Having a missing entry is valid; return silently */
3058 if (!gpio_is_valid(gpio))
3061 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3062 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3069 /* find the cd gpio for a given slot */
3070 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3071 struct mmc_host *mmc)
3073 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
3079 gpio = of_get_named_gpio(np, "cd-gpios", 0);
3081 /* Having a missing entry is valid; return silently */
3082 if (!gpio_is_valid(gpio))
3085 if (mmc_gpio_request_cd(mmc, gpio, 0))
3086 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3088 #else /* CONFIG_OF */
3089 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3093 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3097 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3101 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3105 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
3106 struct mmc_host *mmc)
3110 #endif /* CONFIG_OF */
3112 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3114 struct mmc_host *mmc;
3115 struct dw_mci_slot *slot;
3116 const struct dw_mci_drv_data *drv_data = host->drv_data;
3121 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
3125 slot = mmc_priv(mmc);
3129 host->slot[id] = slot;
3132 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3134 mmc->ops = &dw_mci_ops;
3136 if (of_property_read_u32_array(host->dev->of_node,
3137 "clock-freq-min-max", freq, 2)) {
3138 mmc->f_min = DW_MCI_FREQ_MIN;
3139 mmc->f_max = DW_MCI_FREQ_MAX;
3141 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
3142 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3144 mmc->f_min = freq[0];
3145 mmc->f_max = freq[1];
3147 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
3148 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
3151 if(strstr("mmc0",mmc_hostname(mmc)))
3152 printk("Line%d..%s: The rk_sdmmc %s",__LINE__, __FUNCTION__,RK_SDMMC_DRIVER_VERSION);
3154 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
3155 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
3156 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
3157 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
3158 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
3159 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
3161 if (host->pdata->get_ocr)
3162 mmc->ocr_avail = host->pdata->get_ocr(id);
3165 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
3166 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
3167 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
3168 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
3172 * Start with slot power disabled, it will be enabled when a card
3175 if (host->pdata->setpower)
3176 host->pdata->setpower(id, 0);
3178 if (host->pdata->caps)
3179 mmc->caps = host->pdata->caps;
3181 if (host->pdata->pm_caps)
3182 mmc->pm_caps = host->pdata->pm_caps;
3184 if (host->dev->of_node) {
3185 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3189 ctrl_id = to_platform_device(host->dev)->id;
3191 if (drv_data && drv_data->caps)
3192 mmc->caps |= drv_data->caps[ctrl_id];
3193 if (drv_data && drv_data->hold_reg_flag)
3194 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
3196 /* set the compatibility of driver. */
3197 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50
3198 | MMC_CAP_UHS_SDR104 | MMC_CAP_ERASE ;
3200 if (host->pdata->caps2)
3201 mmc->caps2 = host->pdata->caps2;
3203 if (host->pdata->get_bus_wd)
3204 bus_width = host->pdata->get_bus_wd(slot->id);
3205 else if (host->dev->of_node)
3206 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
3210 switch (bus_width) {
3212 mmc->caps |= MMC_CAP_8_BIT_DATA;
3214 mmc->caps |= MMC_CAP_4_BIT_DATA;
3217 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
3218 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
3219 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
3220 mmc->caps |= MMC_CAP_SDIO_IRQ;
3221 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
3222 mmc->caps |= MMC_CAP_HW_RESET;
3223 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
3224 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
3225 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
3226 mmc->pm_caps |= MMC_PM_KEEP_POWER;
3227 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
3228 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
3229 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
3230 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3232 /*Assign pm_caps pass to pm_flags*/
3233 mmc->pm_flags = mmc->pm_caps;
3235 if (host->pdata->blk_settings) {
3236 mmc->max_segs = host->pdata->blk_settings->max_segs;
3237 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
3238 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
3239 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
3240 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
3242 /* Useful defaults if platform data is unset. */
3243 #ifdef CONFIG_MMC_DW_IDMAC
3244 mmc->max_segs = host->ring_size;
3245 mmc->max_blk_size = 65536;
3246 mmc->max_blk_count = host->ring_size;
3247 mmc->max_seg_size = 0x1000;
3248 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
3251 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
3252 mmc->max_blk_count = 512;
3253 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
3254 mmc->max_seg_size = mmc->max_req_size;
3255 #endif /* CONFIG_MMC_DW_IDMAC */
3258 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
3260 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))
3265 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
3266 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
3270 if (IS_ERR(host->vmmc)) {
3271 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
3274 ret = regulator_enable(host->vmmc);
3277 "failed to enable regulator: %d\n", ret);
3284 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
3286 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3287 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3289 ret = mmc_add_host(mmc);
3293 /* Pinctrl set default iomux state to fucntion port.
3294 * Fixme: DON'T TOUCH EMMC SETTING!
3296 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3298 host->pinctrl = devm_pinctrl_get(host->dev);
3299 if(IS_ERR(host->pinctrl)){
3300 printk("%s: Warning : No pinctrl used!\n",mmc_hostname(host->mmc));
3302 host->pins_idle= pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_IDLE);
3303 if(IS_ERR(host->pins_default)){
3304 printk("%s: Warning : No IDLE pinctrl matched!\n", mmc_hostname(host->mmc));
3308 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3309 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
3312 host->pins_default = pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_DEFAULT);
3313 if(IS_ERR(host->pins_default)){
3314 printk("%s: Warning : No default pinctrl matched!\n", mmc_hostname(host->mmc));
3318 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3319 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
3325 #if defined(CONFIG_DEBUG_FS)
3326 dw_mci_init_debugfs(slot);
3329 /* Card initially undetected */
3330 slot->last_detect_state = 1;
3339 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
3341 /* Shutdown detect IRQ */
3342 if (slot->host->pdata->exit)
3343 slot->host->pdata->exit(id);
3345 /* Debugfs stuff is cleaned up by mmc core */
3346 mmc_remove_host(slot->mmc);
3347 slot->host->slot[id] = NULL;
3348 mmc_free_host(slot->mmc);
3351 static void dw_mci_init_dma(struct dw_mci *host)
3353 /* Alloc memory for sg translation */
3354 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
3355 &host->sg_dma, GFP_KERNEL);
3356 if (!host->sg_cpu) {
3357 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3362 /* Determine which DMA interface to use */
3363 #if defined(CONFIG_MMC_DW_IDMAC)
3364 host->dma_ops = &dw_mci_idmac_ops;
3365 dev_info(host->dev, "Using internal DMA controller.\n");
3366 #elif defined(CONFIG_MMC_DW_EDMAC)
3367 host->dma_ops = &dw_mci_edmac_ops;
3368 dev_info(host->dev, "Using external DMA controller.\n");
3374 if (host->dma_ops->init && host->dma_ops->start &&
3375 host->dma_ops->stop && host->dma_ops->cleanup) {
3376 if (host->dma_ops->init(host)) {
3377 dev_err(host->dev, "%s: Unable to initialize "
3378 "DMA Controller.\n", __func__);
3382 dev_err(host->dev, "DMA initialization not found.\n");
3390 dev_info(host->dev, "Using PIO mode.\n");
3395 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3397 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3400 ctrl = mci_readl(host, CTRL);
3402 mci_writel(host, CTRL, ctrl);
3404 /* wait till resets clear */
3406 ctrl = mci_readl(host, CTRL);
3407 if (!(ctrl & reset))
3409 } while (time_before(jiffies, timeout));
3412 "Timeout resetting block (ctrl reset %#x)\n",
3418 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3421 * Reseting generates a block interrupt, hence setting
3422 * the scatter-gather pointer to NULL.
3425 sg_miter_stop(&host->sg_miter);
3429 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3432 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3434 return dw_mci_ctrl_reset(host,
3435 SDMMC_CTRL_FIFO_RESET |
3437 SDMMC_CTRL_DMA_RESET);
3442 static struct dw_mci_of_quirks {
3447 .quirk = "broken-cd",
3448 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3452 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3454 struct dw_mci_board *pdata;
3455 struct device *dev = host->dev;
3456 struct device_node *np = dev->of_node;
3457 const struct dw_mci_drv_data *drv_data = host->drv_data;
3459 u32 clock_frequency;
3461 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3463 dev_err(dev, "could not allocate memory for pdata\n");
3464 return ERR_PTR(-ENOMEM);
3467 /* find out number of slots supported */
3468 if (of_property_read_u32(dev->of_node, "num-slots",
3469 &pdata->num_slots)) {
3470 dev_info(dev, "num-slots property not found, "
3471 "assuming 1 slot is available\n");
3472 pdata->num_slots = 1;
3476 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3477 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3478 pdata->quirks |= of_quirks[idx].id;
3481 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3482 dev_info(dev, "fifo-depth property not found, using "
3483 "value of FIFOTH register as default\n");
3485 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3487 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3488 pdata->bus_hz = clock_frequency;
3490 if (drv_data && drv_data->parse_dt) {
3491 ret = drv_data->parse_dt(host);
3493 return ERR_PTR(ret);
3496 if (of_find_property(np, "keep-power-in-suspend", NULL))
3497 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3499 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3500 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3502 if (of_find_property(np, "supports-highspeed", NULL))
3503 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3505 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3506 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3508 if (of_find_property(np, "supports-DDR_MODE", NULL))
3509 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3511 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3512 pdata->caps2 |= MMC_CAP2_HS200;
3514 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3515 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3517 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3518 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3520 if (of_get_property(np, "cd-inverted", NULL))
3521 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3522 if (of_get_property(np, "bootpart-no-access", NULL))
3523 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3528 #else /* CONFIG_OF */
3529 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3531 return ERR_PTR(-EINVAL);
3533 #endif /* CONFIG_OF */
3535 static void dw_mci_dealwith_timeout(struct dw_mci *host)
3539 switch(host->state){
3542 case STATE_SENDING_DATA:
3543 case STATE_DATA_BUSY:
3544 host->data_status |= (SDMMC_INT_DCRC|SDMMC_INT_EBE);
3545 mci_writel(host, RINTSTS, SDMMC_INT_DRTO); // clear interrupt
3546 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3547 host->state = STATE_DATA_BUSY;
3548 if (!dw_mci_ctrl_all_reset(host)) {
3553 /* NO requirement to reclaim slave chn using external dmac */
3554 #ifdef CONFIG_MMC_DW_IDMAC
3555 if (host->use_dma && host->dma_ops->init)
3556 host->dma_ops->init(host);
3560 * Restore the initial value at FIFOTH register
3561 * And Invalidate the prev_blksz with zero
3563 mci_writel(host, FIFOTH, host->fifoth_val);
3564 host->prev_blksz = 0;
3565 mci_writel(host, TMOUT, 0xFFFFFFFF);
3566 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3567 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR
3568 | SDMMC_INT_RXDR | SDMMC_INT_VSI | DW_MCI_ERROR_FLAGS;
3569 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3570 regs |= SDMMC_INT_CD;
3571 mci_writel(host, INTMASK, regs);
3572 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3573 for (i = 0; i < host->num_slots; i++) {
3574 struct dw_mci_slot *slot = host->slot[i];
3577 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3578 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3579 dw_mci_setup_bus(slot, true);
3582 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3583 tasklet_schedule(&host->tasklet);
3589 static void dw_mci_dto_timeout(unsigned long host_data)
3591 struct dw_mci *host = (struct dw_mci *) host_data;
3593 disable_irq(host->irq);
3595 host->data_status = SDMMC_INT_EBE;
3596 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3597 dw_mci_dealwith_timeout(host);
3599 enable_irq(host->irq);
3601 int dw_mci_probe(struct dw_mci *host)
3603 const struct dw_mci_drv_data *drv_data = host->drv_data;
3604 int width, i, ret = 0;
3610 host->pdata = dw_mci_parse_dt(host);
3611 if (IS_ERR(host->pdata)) {
3612 dev_err(host->dev, "platform data not available\n");
3617 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3619 "Platform data must supply select_slot function\n");
3624 * In 2.40a spec, Data offset is changed.
3625 * Need to check the version-id and set data-offset for DATA register.
3627 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3628 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3630 if (host->verid < DW_MMC_240A)
3631 host->data_offset = DATA_OFFSET;
3633 host->data_offset = DATA_240A_OFFSET;
3636 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3637 if (IS_ERR(host->hclk_mmc)) {
3638 dev_err(host->dev, "failed to get hclk_mmc\n");
3639 ret = PTR_ERR(host->hclk_mmc);
3643 clk_prepare_enable(host->hclk_mmc);
3646 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3647 if (IS_ERR(host->clk_mmc)) {
3648 dev_err(host->dev, "failed to get clk mmc_per\n");
3649 ret = PTR_ERR(host->clk_mmc);
3653 host->bus_hz = host->pdata->bus_hz;
3654 if (!host->bus_hz) {
3655 dev_err(host->dev,"Platform data must supply bus speed\n");
3660 if (host->verid < DW_MMC_240A)
3661 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3663 //rockchip: fix divider 2 in clksum before controlller
3664 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3667 dev_err(host->dev, "failed to set clk mmc\n");
3670 clk_prepare_enable(host->clk_mmc);
3672 if (drv_data && drv_data->setup_clock) {
3673 ret = drv_data->setup_clock(host);
3676 "implementation specific clock setup failed\n");
3681 host->quirks = host->pdata->quirks;
3682 host->irq_state = true;
3683 host->set_speed = 0;
3685 host->svi_flags = 0;
3687 spin_lock_init(&host->lock);
3688 INIT_LIST_HEAD(&host->queue);
3691 * Get the host data width - this assumes that HCON has been set with
3692 * the correct values.
3694 i = (mci_readl(host, HCON) >> 7) & 0x7;
3696 host->push_data = dw_mci_push_data16;
3697 host->pull_data = dw_mci_pull_data16;
3699 host->data_shift = 1;
3700 } else if (i == 2) {
3701 host->push_data = dw_mci_push_data64;
3702 host->pull_data = dw_mci_pull_data64;
3704 host->data_shift = 3;
3706 /* Check for a reserved value, and warn if it is */
3708 "HCON reports a reserved host data width!\n"
3709 "Defaulting to 32-bit access.\n");
3710 host->push_data = dw_mci_push_data32;
3711 host->pull_data = dw_mci_pull_data32;
3713 host->data_shift = 2;
3716 /* Reset all blocks */
3717 if (!dw_mci_ctrl_all_reset(host))
3720 host->dma_ops = host->pdata->dma_ops;
3721 dw_mci_init_dma(host);
3723 /* Clear the interrupts for the host controller */
3724 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3725 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3727 /* Put in max timeout */
3728 mci_writel(host, TMOUT, 0xFFFFFFFF);
3731 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3732 * Tx Mark = fifo_size / 2 DMA Size = 8
3734 if (!host->pdata->fifo_depth) {
3736 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3737 * have been overwritten by the bootloader, just like we're
3738 * about to do, so if you know the value for your hardware, you
3739 * should put it in the platform data.
3741 fifo_size = mci_readl(host, FIFOTH);
3742 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3744 fifo_size = host->pdata->fifo_depth;
3746 host->fifo_depth = fifo_size;
3748 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3749 mci_writel(host, FIFOTH, host->fifoth_val);
3751 /* disable clock to CIU */
3752 mci_writel(host, CLKENA, 0);
3753 mci_writel(host, CLKSRC, 0);
3755 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3756 host->card_workqueue = alloc_workqueue("dw-mci-card",
3757 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3758 if (!host->card_workqueue) {
3762 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3763 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3764 host->irq_flags, "dw-mci", host);
3768 if (host->pdata->num_slots)
3769 host->num_slots = host->pdata->num_slots;
3771 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
3773 setup_timer(&host->dto_timer, dw_mci_dto_timeout, (unsigned long)host);
3774 /* We need at least one slot to succeed */
3775 for (i = 0; i < host->num_slots; i++) {
3776 ret = dw_mci_init_slot(host, i);
3778 dev_dbg(host->dev, "slot %d init failed\n", i);
3784 * Enable interrupts for command done, data over, data empty, card det,
3785 * receive ready and error such as transmit, receive timeout, crc error
3787 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3788 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
3789 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
3790 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3791 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3792 regs |= SDMMC_INT_CD;
3794 mci_writel(host, INTMASK, regs);
3796 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
3798 dev_info(host->dev, "DW MMC controller at irq %d, "
3799 "%d bit host data width, "
3801 host->irq, width, fifo_size);
3804 dev_info(host->dev, "%d slots initialized\n", init_slots);
3806 dev_dbg(host->dev, "attempted to initialize %d slots, "
3807 "but failed on all\n", host->num_slots);
3812 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
3813 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
3818 destroy_workqueue(host->card_workqueue);
3821 if (host->use_dma && host->dma_ops->exit)
3822 host->dma_ops->exit(host);
3825 regulator_disable(host->vmmc);
3826 regulator_put(host->vmmc);
3830 if (!IS_ERR(host->clk_mmc))
3831 clk_disable_unprepare(host->clk_mmc);
3833 if (!IS_ERR(host->hclk_mmc))
3834 clk_disable_unprepare(host->hclk_mmc);
3838 EXPORT_SYMBOL(dw_mci_probe);
3840 void dw_mci_remove(struct dw_mci *host)
3843 del_timer_sync(&host->dto_timer);
3845 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3846 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3848 for(i = 0; i < host->num_slots; i++){
3849 dev_dbg(host->dev, "remove slot %d\n", i);
3851 dw_mci_cleanup_slot(host->slot[i], i);
3854 /* disable clock to CIU */
3855 mci_writel(host, CLKENA, 0);
3856 mci_writel(host, CLKSRC, 0);
3858 destroy_workqueue(host->card_workqueue);
3860 if(host->use_dma && host->dma_ops->exit)
3861 host->dma_ops->exit(host);
3864 regulator_disable(host->vmmc);
3865 regulator_put(host->vmmc);
3867 if(!IS_ERR(host->clk_mmc))
3868 clk_disable_unprepare(host->clk_mmc);
3870 if(!IS_ERR(host->hclk_mmc))
3871 clk_disable_unprepare(host->hclk_mmc);
3873 EXPORT_SYMBOL(dw_mci_remove);
3877 #ifdef CONFIG_PM_SLEEP
3879 * TODO: we should probably disable the clock to the card in the suspend path.
3881 int dw_mci_suspend(struct dw_mci *host)
3884 regulator_disable(host->vmmc);
3886 if(host->use_dma && host->dma_ops->exit)
3887 host->dma_ops->exit(host);
3889 /*only for sdmmc controller*/
3890 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD){
3891 host->mmc->rescan_disable = 1;
3892 if(cancel_delayed_work_sync(&host->mmc->detect))
3893 wake_unlock(&host->mmc->detect_wake_lock);
3895 disable_irq(host->irq);
3896 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3897 MMC_DBG_ERR_FUNC(host->mmc, "Idle pinctrl setting failed! [%s]",
3898 mmc_hostname(host->mmc));
3899 dw_mci_of_get_cd_gpio(host->dev,0,host->mmc);
3900 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3901 mci_writel(host, INTMASK, 0x00);
3902 mci_writel(host, CTRL, 0x00);
3903 enable_irq_wake(host->mmc->slot.cd_irq);
3907 EXPORT_SYMBOL(dw_mci_suspend);
3909 int dw_mci_resume(struct dw_mci *host)
3911 int i, ret, retry_cnt = 0;
3913 struct dw_mci_slot *slot;
3915 if (host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO){
3916 slot = mmc_priv(host->mmc);
3918 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags))
3921 /*only for sdmmc controller*/
3922 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3923 disable_irq_wake(host->mmc->slot.cd_irq);
3924 mmc_gpio_free_cd(host->mmc);
3925 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3926 MMC_DBG_ERR_FUNC(host->mmc, "Default pinctrl setting failed! [%s]",
3927 mmc_hostname(host->mmc));
3928 host->mmc->rescan_disable = 0;
3931 grf_writel(((1 << 12) << 16) | (0 << 12), RK3288_GRF_SOC_CON0);
3933 else if(cpu_is_rk3036())
3934 grf_writel(((1 << 11) << 16) | (0 << 11), RK3036_GRF_SOC_CON0);
3935 else if(cpu_is_rk3126())
3942 ret = regulator_enable(host->vmmc);
3945 "failed to enable regulator: %d\n", ret);
3950 if(!dw_mci_ctrl_all_reset(host)){
3955 if(host->use_dma && host->dma_ops->init)
3956 host->dma_ops->init(host);
3959 * Restore the initial value at FIFOTH register
3960 * And Invalidate the prev_blksz with zero
3962 mci_writel(host, FIFOTH, host->fifoth_val);
3963 host->prev_blksz = 0;
3964 /* Put in max timeout */
3965 mci_writel(host, TMOUT, 0xFFFFFFFF);
3967 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3968 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
3970 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3971 regs |= SDMMC_INT_CD;
3972 mci_writel(host, INTMASK, regs);
3973 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3974 /*only for sdmmc controller*/
3975 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)&& (!retry_cnt)){
3976 enable_irq(host->irq);
3979 for(i = 0; i < host->num_slots; i++){
3980 struct dw_mci_slot *slot = host->slot[i];
3983 if(slot->mmc->pm_flags & MMC_PM_KEEP_POWER){
3984 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3985 dw_mci_setup_bus(slot, true);
3991 EXPORT_SYMBOL(dw_mci_resume);
3992 #endif /* CONFIG_PM_SLEEP */
3994 static int __init dw_mci_init(void)
3996 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
4000 static void __exit dw_mci_exit(void)
4004 module_init(dw_mci_init);
4005 module_exit(dw_mci_exit);
4007 MODULE_DESCRIPTION("Rockchip specific DW Multimedia Card Interface driver");
4008 MODULE_AUTHOR("NXP Semiconductor VietNam");
4009 MODULE_AUTHOR("Imagination Technologies Ltd");
4010 MODULE_AUTHOR("Shawn Lin <lintao@rock-chips.com>");
4011 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
4012 MODULE_LICENSE("GPL v2");