2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/seq_file.h>
28 #include <linux/slab.h>
29 #include <linux/stat.h>
30 #include <linux/delay.h>
31 #include <linux/irq.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/mmc.h>
34 #include <linux/mmc/sd.h>
35 #include <linux/mmc/sdio.h>
36 #include <linux/mmc/rk_mmc.h>
37 #include <linux/bitops.h>
38 #include <linux/regulator/consumer.h>
39 #include <linux/workqueue.h>
41 #include <linux/of_gpio.h>
42 #include <linux/mmc/slot-gpio.h>
45 #include "rk_sdmmc_of.h"
46 #include <linux/regulator/rockchip_io_vol_domain.h>
48 #define RK_SDMMC_DRIVER_VERSION "Ver 1.00. The last modify date is 2014-05-05"
50 /* Common flag combinations */
51 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
52 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
54 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
56 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
57 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
58 #define DW_MCI_SEND_STATUS 1
59 #define DW_MCI_RECV_STATUS 2
60 #define DW_MCI_DMA_THRESHOLD 16
62 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
63 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
65 #define SDMMC_DATA_TIMEOUT_SD 500; /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
66 #define SDMMC_DATA_TIMEOUT_SDIO 250
67 #define SDMMC_DATA_TIMEOUT_EMMC 2500
69 #define SDMMC_CMD_RTO_MAX_HOLD 200
70 #define SDMMC_WAIT_FOR_UNBUSY 2500
72 #ifdef CONFIG_MMC_DW_IDMAC
73 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
74 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
75 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
79 u32 des0; /* Control Descriptor */
80 #define IDMAC_DES0_DIC BIT(1)
81 #define IDMAC_DES0_LD BIT(2)
82 #define IDMAC_DES0_FD BIT(3)
83 #define IDMAC_DES0_CH BIT(4)
84 #define IDMAC_DES0_ER BIT(5)
85 #define IDMAC_DES0_CES BIT(30)
86 #define IDMAC_DES0_OWN BIT(31)
88 u32 des1; /* Buffer sizes */
89 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
90 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
92 u32 des2; /* buffer 1 physical address */
94 u32 des3; /* buffer 2 physical address */
96 #endif /* CONFIG_MMC_DW_IDMAC */
98 static const u8 tuning_blk_pattern_4bit[] = {
99 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
100 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
101 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
102 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
103 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
104 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
105 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
106 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
109 static const u8 tuning_blk_pattern_8bit[] = {
110 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
111 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
112 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
113 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
114 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
115 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
116 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
117 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
118 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
119 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
120 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
121 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
122 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
123 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
124 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
125 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
128 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
129 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
130 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
131 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
133 /*printk the all register of current host*/
134 static int dw_mci_regs_printk(struct dw_mci *host)
136 struct sdmmc_reg *regs = dw_mci_regs;
138 while( regs->name != 0 ){
139 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
142 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
147 #if defined(CONFIG_DEBUG_FS)
148 static int dw_mci_req_show(struct seq_file *s, void *v)
150 struct dw_mci_slot *slot = s->private;
151 struct mmc_request *mrq;
152 struct mmc_command *cmd;
153 struct mmc_command *stop;
154 struct mmc_data *data;
156 /* Make sure we get a consistent snapshot */
157 spin_lock_bh(&slot->host->lock);
167 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
168 cmd->opcode, cmd->arg, cmd->flags,
169 cmd->resp[0], cmd->resp[1], cmd->resp[2],
170 cmd->resp[2], cmd->error);
172 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
173 data->bytes_xfered, data->blocks,
174 data->blksz, data->flags, data->error);
177 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
178 stop->opcode, stop->arg, stop->flags,
179 stop->resp[0], stop->resp[1], stop->resp[2],
180 stop->resp[2], stop->error);
183 spin_unlock_bh(&slot->host->lock);
188 static int dw_mci_req_open(struct inode *inode, struct file *file)
190 return single_open(file, dw_mci_req_show, inode->i_private);
193 static const struct file_operations dw_mci_req_fops = {
194 .owner = THIS_MODULE,
195 .open = dw_mci_req_open,
198 .release = single_release,
201 static int dw_mci_regs_show(struct seq_file *s, void *v)
203 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
204 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
205 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
206 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
207 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
208 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
213 static int dw_mci_regs_open(struct inode *inode, struct file *file)
215 return single_open(file, dw_mci_regs_show, inode->i_private);
218 static const struct file_operations dw_mci_regs_fops = {
219 .owner = THIS_MODULE,
220 .open = dw_mci_regs_open,
223 .release = single_release,
226 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
228 struct mmc_host *mmc = slot->mmc;
229 struct dw_mci *host = slot->host;
233 root = mmc->debugfs_root;
237 node = debugfs_create_file("regs", S_IRUSR, root, host,
242 node = debugfs_create_file("req", S_IRUSR, root, slot,
247 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
251 node = debugfs_create_x32("pending_events", S_IRUSR, root,
252 (u32 *)&host->pending_events);
256 node = debugfs_create_x32("completed_events", S_IRUSR, root,
257 (u32 *)&host->completed_events);
264 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
266 #endif /* defined(CONFIG_DEBUG_FS) */
268 static void dw_mci_set_timeout(struct dw_mci *host)
270 /* timeout (maximum) */
271 mci_writel(host, TMOUT, 0xffffffff);
274 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
276 struct mmc_data *data;
277 struct dw_mci_slot *slot = mmc_priv(mmc);
278 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
280 cmd->error = -EINPROGRESS;
284 if (cmdr == MMC_STOP_TRANSMISSION)
285 cmdr |= SDMMC_CMD_STOP;
287 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
289 if (cmd->flags & MMC_RSP_PRESENT) {
290 /* We expect a response, so set this bit */
291 cmdr |= SDMMC_CMD_RESP_EXP;
292 if (cmd->flags & MMC_RSP_136)
293 cmdr |= SDMMC_CMD_RESP_LONG;
296 if (cmd->flags & MMC_RSP_CRC)
297 cmdr |= SDMMC_CMD_RESP_CRC;
301 cmdr |= SDMMC_CMD_DAT_EXP;
302 if (data->flags & MMC_DATA_STREAM)
303 cmdr |= SDMMC_CMD_STRM_MODE;
304 if (data->flags & MMC_DATA_WRITE)
305 cmdr |= SDMMC_CMD_DAT_WR;
308 if (drv_data && drv_data->prepare_command)
309 drv_data->prepare_command(slot->host, &cmdr);
314 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
316 struct mmc_command *stop;
322 stop = &host->stop_abort;
324 memset(stop, 0, sizeof(struct mmc_command));
326 if (cmdr == MMC_READ_SINGLE_BLOCK ||
327 cmdr == MMC_READ_MULTIPLE_BLOCK ||
328 cmdr == MMC_WRITE_BLOCK ||
329 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
330 stop->opcode = MMC_STOP_TRANSMISSION;
332 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
333 } else if (cmdr == SD_IO_RW_EXTENDED) {
334 stop->opcode = SD_IO_RW_DIRECT;
335 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
336 ((cmd->arg >> 28) & 0x7);
337 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
342 cmdr = stop->opcode | SDMMC_CMD_STOP |
343 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
348 static void dw_mci_start_command(struct dw_mci *host,
349 struct mmc_command *cmd, u32 cmd_flags)
351 unsigned long time_loop = jiffies + msecs_to_jiffies(5000);
355 host->pre_cmd = host->cmd;
358 "start command: ARGR=0x%08x CMDR=0x%08x\n",
359 cmd->arg, cmd_flags);
361 if(SD_SWITCH_VOLTAGE == cmd->opcode){
362 /*confirm non-low-power mode*/
363 struct dw_mci_slot *slot = host->slot[0];//temporality fix slot[0] due to host->num_slots equal to 1;
364 mci_writel(host, CMDARG, 0);
365 dw_mci_disable_low_power(slot);
366 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
367 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
369 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
372 mci_writel(host, CMDARG, cmd->arg);
374 if(host->mmc->hold_reg_flag)
375 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;//fix the value to 1 in some Soc,for example RK3188.
377 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
381 while ((time_before(jiffies, time_loop))&&(test_bit(DW_MMC_CARD_PRESENT, &host->cur_slot->flags))){
382 status = mci_readl(host, STATUS);
383 if (!(status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY))){
384 ret = 1 ;//card is unbusy
389 MMC_DBG_ERR_FUNC(host->mmc,"Line%d..%s start cmd=%d(arg=0x%x), cmd_reg=0x%x, unbusy=%d,card-present=%d. [%s]",
390 __LINE__, __FUNCTION__,cmd->opcode, cmd->arg,cmd_flags,
391 ret,test_bit(DW_MMC_CARD_PRESENT, &host->cur_slot->flags), mmc_hostname(host->mmc));
395 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
397 dw_mci_start_command(host, data->stop, host->stop_cmdr);
400 /* DMA interface functions */
401 static void dw_mci_stop_dma(struct dw_mci *host)
403 if (host->using_dma) {
404 host->dma_ops->stop(host);
405 host->dma_ops->cleanup(host);
408 /* Data transfer was stopped by the interrupt handler */
409 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
412 static int dw_mci_get_dma_dir(struct mmc_data *data)
414 if (data->flags & MMC_DATA_WRITE)
415 return DMA_TO_DEVICE;
417 return DMA_FROM_DEVICE;
420 #ifdef CONFIG_MMC_DW_IDMAC
421 static void dw_mci_dma_cleanup(struct dw_mci *host)
423 struct mmc_data *data = host->data;
426 if (!data->host_cookie)
427 dma_unmap_sg(host->dev,
430 dw_mci_get_dma_dir(data));
433 static void dw_mci_idmac_reset(struct dw_mci *host)
435 u32 bmod = mci_readl(host, BMOD);
436 /* Software reset of DMA */
437 bmod |= SDMMC_IDMAC_SWRESET;
438 mci_writel(host, BMOD, bmod);
441 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
445 /* Disable and reset the IDMAC interface */
446 temp = mci_readl(host, CTRL);
447 temp &= ~SDMMC_CTRL_USE_IDMAC;
448 temp |= SDMMC_CTRL_DMA_RESET;
449 mci_writel(host, CTRL, temp);
451 /* Stop the IDMAC running */
452 temp = mci_readl(host, BMOD);
453 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
454 temp |= SDMMC_IDMAC_SWRESET;
455 mci_writel(host, BMOD, temp);
458 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
460 struct mmc_data *data = host->data;
462 dev_vdbg(host->dev, "DMA complete\n");
465 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
466 host->mrq->cmd->opcode,host->mrq->cmd->arg,data->blocks,data->blksz,mmc_hostname(host->mmc));
469 host->dma_ops->cleanup(host);
472 * If the card was removed, data will be NULL. No point in trying to
473 * send the stop command or waiting for NBUSY in this case.
476 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
477 tasklet_schedule(&host->tasklet);
481 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
485 struct idmac_desc *desc = host->sg_cpu;
487 for (i = 0; i < sg_len; i++, desc++) {
488 unsigned int length = sg_dma_len(&data->sg[i]);
489 u32 mem_addr = sg_dma_address(&data->sg[i]);
491 /* Set the OWN bit and disable interrupts for this descriptor */
492 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
495 IDMAC_SET_BUFFER1_SIZE(desc, length);
497 /* Physical address to DMA to/from */
498 desc->des2 = mem_addr;
501 /* Set first descriptor */
503 desc->des0 |= IDMAC_DES0_FD;
505 /* Set last descriptor */
506 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
507 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
508 desc->des0 |= IDMAC_DES0_LD;
513 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
517 dw_mci_translate_sglist(host, host->data, sg_len);
519 /* Select IDMAC interface */
520 temp = mci_readl(host, CTRL);
521 temp |= SDMMC_CTRL_USE_IDMAC;
522 mci_writel(host, CTRL, temp);
526 /* Enable the IDMAC */
527 temp = mci_readl(host, BMOD);
528 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
529 mci_writel(host, BMOD, temp);
531 /* Start it running */
532 mci_writel(host, PLDMND, 1);
535 static int dw_mci_idmac_init(struct dw_mci *host)
537 struct idmac_desc *p;
540 /* Number of descriptors in the ring buffer */
541 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
543 /* Forward link the descriptor list */
544 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
545 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
547 /* Set the last descriptor as the end-of-ring descriptor */
548 p->des3 = host->sg_dma;
549 p->des0 = IDMAC_DES0_ER;
551 dw_mci_idmac_reset(host);
553 /* Mask out interrupts - get Tx & Rx complete only */
554 mci_writel(host, IDSTS, IDMAC_INT_CLR);
555 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
558 /* Set the descriptor base address */
559 mci_writel(host, DBADDR, host->sg_dma);
563 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
564 .init = dw_mci_idmac_init,
565 .start = dw_mci_idmac_start_dma,
566 .stop = dw_mci_idmac_stop_dma,
567 .complete = dw_mci_idmac_complete_dma,
568 .cleanup = dw_mci_dma_cleanup,
570 #endif /* CONFIG_MMC_DW_IDMAC */
572 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
573 struct mmc_data *data,
576 struct scatterlist *sg;
577 unsigned int i, sg_len;
579 if (!next && data->host_cookie)
580 return data->host_cookie;
583 * We don't do DMA on "complex" transfers, i.e. with
584 * non-word-aligned buffers or lengths. Also, we don't bother
585 * with all the DMA setup overhead for short transfers.
587 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
593 for_each_sg(data->sg, sg, data->sg_len, i) {
594 if (sg->offset & 3 || sg->length & 3)
598 sg_len = dma_map_sg(host->dev,
601 dw_mci_get_dma_dir(data));
606 data->host_cookie = sg_len;
611 static void dw_mci_pre_req(struct mmc_host *mmc,
612 struct mmc_request *mrq,
615 struct dw_mci_slot *slot = mmc_priv(mmc);
616 struct mmc_data *data = mrq->data;
618 if (!slot->host->use_dma || !data)
621 if (data->host_cookie) {
622 data->host_cookie = 0;
626 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
627 data->host_cookie = 0;
630 static void dw_mci_post_req(struct mmc_host *mmc,
631 struct mmc_request *mrq,
634 struct dw_mci_slot *slot = mmc_priv(mmc);
635 struct mmc_data *data = mrq->data;
637 if (!slot->host->use_dma || !data)
640 if (data->host_cookie)
641 dma_unmap_sg(slot->host->dev,
644 dw_mci_get_dma_dir(data));
645 data->host_cookie = 0;
648 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
650 #ifdef CONFIG_MMC_DW_IDMAC
651 unsigned int blksz = data->blksz;
652 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
653 u32 fifo_width = 1 << host->data_shift;
654 u32 blksz_depth = blksz / fifo_width, fifoth_val;
655 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
656 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
658 tx_wmark = (host->fifo_depth) / 2;
659 tx_wmark_invers = host->fifo_depth - tx_wmark;
663 * if blksz is not a multiple of the FIFO width
665 if (blksz % fifo_width) {
672 if (!((blksz_depth % mszs[idx]) ||
673 (tx_wmark_invers % mszs[idx]))) {
675 rx_wmark = mszs[idx] - 1;
680 * If idx is '0', it won't be tried
681 * Thus, initial values are uesed
684 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
685 mci_writel(host, FIFOTH, fifoth_val);
689 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
691 unsigned int blksz = data->blksz;
692 u32 blksz_depth, fifo_depth;
695 WARN_ON(!(data->flags & MMC_DATA_READ));
697 if (host->timing != MMC_TIMING_MMC_HS200 &&
698 host->timing != MMC_TIMING_UHS_SDR104)
701 blksz_depth = blksz / (1 << host->data_shift);
702 fifo_depth = host->fifo_depth;
704 if (blksz_depth > fifo_depth)
708 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
709 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
710 * Currently just choose blksz.
713 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
717 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
720 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
727 /* If we don't have a channel, we can't do DMA */
731 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
733 host->dma_ops->stop(host);
740 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
741 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
745 * Decide the MSIZE and RX/TX Watermark.
746 * If current block size is same with previous size,
747 * no need to update fifoth.
749 if (host->prev_blksz != data->blksz)
750 dw_mci_adjust_fifoth(host, data);
753 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
755 /* Enable the DMA interface */
756 temp = mci_readl(host, CTRL);
757 temp |= SDMMC_CTRL_DMA_ENABLE;
758 mci_writel(host, CTRL, temp);
760 /* Disable RX/TX IRQs, let DMA handle it */
761 temp = mci_readl(host, INTMASK);
762 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
763 mci_writel(host, INTMASK, temp);
765 host->dma_ops->start(host, sg_len);
770 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
774 data->error = -EINPROGRESS;
781 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
783 if (data->flags & MMC_DATA_READ) {
784 host->dir_status = DW_MCI_RECV_STATUS;
785 dw_mci_ctrl_rd_thld(host, data);
787 host->dir_status = DW_MCI_SEND_STATUS;
790 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
791 data->blocks, data->blksz, mmc_hostname(host->mmc));
793 if (dw_mci_submit_data_dma(host, data)) {
794 int flags = SG_MITER_ATOMIC;
795 if (host->data->flags & MMC_DATA_READ)
796 flags |= SG_MITER_TO_SG;
798 flags |= SG_MITER_FROM_SG;
800 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
802 host->part_buf_start = 0;
803 host->part_buf_count = 0;
805 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
806 temp = mci_readl(host, INTMASK);
807 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
808 mci_writel(host, INTMASK, temp);
810 temp = mci_readl(host, CTRL);
811 temp &= ~SDMMC_CTRL_DMA_ENABLE;
812 mci_writel(host, CTRL, temp);
815 * Use the initial fifoth_val for PIO mode.
816 * If next issued data may be transfered by DMA mode,
817 * prev_blksz should be invalidated.
819 mci_writel(host, FIFOTH, host->fifoth_val);
820 host->prev_blksz = 0;
823 * Keep the current block size.
824 * It will be used to decide whether to update
825 * fifoth register next time.
827 host->prev_blksz = data->blksz;
831 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
833 struct dw_mci *host = slot->host;
834 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
835 unsigned int cmd_status = 0;
836 #ifdef SDMMC_WAIT_FOR_UNBUSY
837 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
839 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
840 while (ret=time_before(jiffies, timeout)) {
841 cmd_status = mci_readl(host, STATUS);
842 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
846 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
847 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
850 mci_writel(host, CMDARG, arg);
852 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
854 while (time_before(jiffies, timeout)) {
855 cmd_status = mci_readl(host, CMD);
856 if (!(cmd_status & SDMMC_CMD_START))
859 dev_err(&slot->mmc->class_dev,
860 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
861 cmd, arg, cmd_status);
864 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
866 struct dw_mci *host = slot->host;
867 unsigned int tempck,clock = slot->clock;
872 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
873 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
876 mci_writel(host, CLKENA, 0);
878 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
879 } else if (clock != host->current_speed || force_clkinit) {
880 div = host->bus_hz / clock;
881 if (host->bus_hz % clock && host->bus_hz > clock)
883 * move the + 1 after the divide to prevent
884 * over-clocking the card.
888 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
890 if ((clock << div) != slot->__clk_old || force_clkinit) {
891 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
892 dev_info(&slot->mmc->class_dev,
893 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
894 slot->id, host->bus_hz, clock,
897 host->set_speed = tempck;
902 mci_writel(host, CLKENA, 0);
903 mci_writel(host, CLKSRC, 0);
907 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
909 /* set clock to desired speed */
910 mci_writel(host, CLKDIV, div);
914 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
916 /* enable clock; only low power if no SDIO */
917 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
919 if (host->verid < DW_MMC_240A)
920 sdio_int = SDMMC_INT_SDIO(slot->id);
922 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
924 if (!(mci_readl(host, INTMASK) & sdio_int))
925 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
926 mci_writel(host, CLKENA, clk_en_a);
930 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
932 /* keep the clock with reflecting clock dividor */
933 slot->__clk_old = clock << div;
936 host->current_speed = clock;
938 if(slot->ctype != slot->pre_ctype)
939 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
941 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
942 mmc_hostname(host->mmc));
943 slot->pre_ctype = slot->ctype;
945 /* Set the current slot bus width */
946 mci_writel(host, CTYPE, (slot->ctype << slot->id));
950 static void dw_mci_wait_unbusy(struct dw_mci *host)
953 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
954 unsigned long time_loop;
957 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
959 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
960 timeout = SDMMC_DATA_TIMEOUT_EMMC;
961 else if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
962 timeout = SDMMC_DATA_TIMEOUT_SD;
964 time_loop = jiffies + msecs_to_jiffies(timeout);
966 status = mci_readl(host, STATUS);
967 if (!(status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
969 //MMC_DBG_INFO_FUNC("dw_mci_wait_unbusy, waiting for......");
970 } while (time_before(jiffies, time_loop));
976 * 1--status is unbusy.
978 int dw_mci_card_busy(struct mmc_host *mmc)
980 struct dw_mci_slot *slot = mmc_priv(mmc);
981 struct dw_mci *host = slot->host;
982 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
983 unsigned long time_loop;
987 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
988 timeout = SDMMC_DATA_TIMEOUT_EMMC;
989 else if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
990 timeout = SDMMC_DATA_TIMEOUT_SD;
991 timeout = 250*1000;//test
992 time_loop = jiffies + msecs_to_jiffies(timeout);
994 MMC_DBG_INFO_FUNC(host->mmc, "line%d: dw_mci_wait_unbusy,timeloop=%lu, status=0x%x ",
995 __LINE__, time_loop, mci_readl(host, STATUS));
997 status = mci_readl(host, STATUS);
998 if (!(status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY))){
999 ret = 1;//card is unbusy.
1002 //MMC_DBG_INFO_FUNC("dw_mci_wait_unbusy, waiting for......");
1003 } while (time_before(jiffies, time_loop));
1004 MMC_DBG_INFO_FUNC(host->mmc, "line%d: dw_mci_wait_unbusy,ret=%d, status=0x%x ",
1005 __LINE__,ret,mci_readl(host, STATUS));
1010 static void __dw_mci_start_request(struct dw_mci *host,
1011 struct dw_mci_slot *slot,
1012 struct mmc_command *cmd)
1014 struct mmc_request *mrq;
1015 struct mmc_data *data;
1019 if (host->pdata->select_slot)
1020 host->pdata->select_slot(slot->id);
1022 host->cur_slot = slot;
1024 #if 0 //add by xbw,at 2014-03-12
1025 /*clean FIFO if it is a new request*/
1026 if(!(mrq->cmd->opcode & SDMMC_CMD_STOP)) {
1027 MMC_DBG_INFO_FUNC("%d..%s: reset the ctrl.", __LINE__, __FUNCTION__);
1028 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1029 SDMMC_CTRL_DMA_RESET));
1032 dw_mci_wait_unbusy(host);
1034 host->pending_events = 0;
1035 host->completed_events = 0;
1036 host->data_status = 0;
1040 dw_mci_set_timeout(host);
1041 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1042 mci_writel(host, BLKSIZ, data->blksz);
1045 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1047 /* this is the first command, send the initialization clock */
1048 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1049 cmdflags |= SDMMC_CMD_INIT;
1052 dw_mci_submit_data(host, data);
1056 dw_mci_start_command(host, cmd, cmdflags);
1059 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1062 static void dw_mci_start_request(struct dw_mci *host,
1063 struct dw_mci_slot *slot)
1065 struct mmc_request *mrq = slot->mrq;
1066 struct mmc_command *cmd;
1068 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1069 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1071 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1072 __dw_mci_start_request(host, slot, cmd);
1075 /* must be called with host->lock held */
1076 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1077 struct mmc_request *mrq)
1079 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1084 if (host->state == STATE_IDLE) {
1085 host->state = STATE_SENDING_CMD;
1086 dw_mci_start_request(host, slot);
1088 list_add_tail(&slot->queue_node, &host->queue);
1092 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1094 struct dw_mci_slot *slot = mmc_priv(mmc);
1095 struct dw_mci *host = slot->host;
1100 * The check for card presence and queueing of the request must be
1101 * atomic, otherwise the card could be removed in between and the
1102 * request wouldn't fail until another card was inserted.
1104 spin_lock_bh(&host->lock);
1106 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1107 spin_unlock_bh(&host->lock);
1108 mrq->cmd->error = -ENOMEDIUM;
1109 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1110 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1112 mmc_request_done(mmc, mrq);
1115 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1116 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1118 dw_mci_queue_request(host, slot, mrq);
1120 spin_unlock_bh(&host->lock);
1123 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1125 struct dw_mci_slot *slot = mmc_priv(mmc);
1126 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1128 #ifdef SDMMC_WAIT_FOR_UNBUSY
1129 unsigned long time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1132 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1133 printk("%d..%s: no card. [%s]\n", \
1134 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1138 while (ret=time_before(jiffies, time_loop)) {
1139 regs = mci_readl(slot->host, STATUS);
1140 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1145 printk("slot->flags=%d ", slot->flags);
1147 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1148 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1151 switch (ios->bus_width) {
1152 case MMC_BUS_WIDTH_4:
1153 slot->ctype = SDMMC_CTYPE_4BIT;
1155 case MMC_BUS_WIDTH_8:
1156 slot->ctype = SDMMC_CTYPE_8BIT;
1159 /* set default 1 bit mode */
1160 slot->ctype = SDMMC_CTYPE_1BIT;
1161 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1164 regs = mci_readl(slot->host, UHS_REG);
1167 if (ios->timing == MMC_TIMING_UHS_DDR50)
1168 regs |= ((0x1 << slot->id) << 16);
1170 regs &= ~((0x1 << slot->id) << 16);
1172 mci_writel(slot->host, UHS_REG, regs);
1173 slot->host->timing = ios->timing;
1176 * Use mirror of ios->clock to prevent race with mmc
1177 * core ios update when finding the minimum.
1179 slot->clock = ios->clock;
1181 if (drv_data && drv_data->set_ios)
1182 drv_data->set_ios(slot->host, ios);
1184 /* Slot specific timing and width adjustment */
1185 dw_mci_setup_bus(slot, false);
1188 switch (ios->power_mode) {
1190 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1192 if (slot->host->pdata->setpower)
1193 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1194 regs = mci_readl(slot->host, PWREN);
1195 regs |= (1 << slot->id);
1196 mci_writel(slot->host, PWREN, regs);
1199 /* Power down slot */
1200 if (slot->host->pdata->setpower)
1201 slot->host->pdata->setpower(slot->id, 0);
1202 regs = mci_readl(slot->host, PWREN);
1203 regs &= ~(1 << slot->id);
1204 mci_writel(slot->host, PWREN, regs);
1211 static int dw_mci_get_ro(struct mmc_host *mmc)
1214 struct dw_mci_slot *slot = mmc_priv(mmc);
1215 struct dw_mci_board *brd = slot->host->pdata;
1217 /* Use platform get_ro function, else try on board write protect */
1218 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1220 else if (brd->get_ro)
1221 read_only = brd->get_ro(slot->id);
1222 else if (gpio_is_valid(slot->wp_gpio))
1223 read_only = gpio_get_value(slot->wp_gpio);
1226 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1228 dev_dbg(&mmc->class_dev, "card is %s\n",
1229 read_only ? "read-only" : "read-write");
1234 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1236 struct dw_mci_slot *slot = mmc_priv(mmc);
1237 struct dw_mci_board *brd = slot->host->pdata;
1238 struct dw_mci *host = slot->host;
1239 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1242 spin_lock_bh(&host->lock);
1244 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1246 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1248 spin_unlock_bh(&host->lock);
1250 mmc_detect_change(slot->mmc, 20);
1257 static int dw_mci_get_cd(struct mmc_host *mmc)
1260 struct dw_mci_slot *slot = mmc_priv(mmc);
1261 struct dw_mci_board *brd = slot->host->pdata;
1262 struct dw_mci *host = slot->host;
1263 int gpio_cd = mmc_gpio_get_cd(mmc);
1265 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1266 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1268 /* Use platform get_cd function, else try onboard card detect */
1269 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1271 else if (brd->get_cd)
1272 present = !brd->get_cd(slot->id);
1273 else if (!IS_ERR_VALUE(gpio_cd))
1276 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1279 spin_lock_bh(&host->lock);
1281 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1282 dev_dbg(&mmc->class_dev, "card is present\n");
1284 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1285 dev_dbg(&mmc->class_dev, "card is not present\n");
1287 spin_unlock_bh(&host->lock);
1292 static void dw_mci_hw_reset(struct mmc_host *mmc)
1294 struct dw_mci_slot *slot = mmc_priv(mmc);
1297 * According to eMMC spec
1298 * tRstW >= 1us ; RST_n pulse width
1299 * tRSCA >= 200us ; RST_n to Command time
1300 * tRSTH >= 1us ; RST_n high period
1303 mci_writel(slot->host, RST_n, 0x1);
1305 udelay(10); //10us for bad quality eMMc.
1307 mci_writel(slot->host, RST_n, 0x0);
1309 usleep_range(300, 1000); //ay least 300(> 200us)
1314 * Disable lower power mode.
1316 * Low power mode will stop the card clock when idle. According to the
1317 * description of the CLKENA register we should disable low power mode
1318 * for SDIO cards if we need SDIO interrupts to work.
1320 * This function is fast if low power mode is already disabled.
1322 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1324 struct dw_mci *host = slot->host;
1326 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1328 clk_en_a = mci_readl(host, CLKENA);
1330 if (clk_en_a & clken_low_pwr) {
1331 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1332 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1333 SDMMC_CMD_PRV_DAT_WAIT, 0);
1337 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1339 struct dw_mci_slot *slot = mmc_priv(mmc);
1340 struct dw_mci *host = slot->host;
1344 /* Enable/disable Slot Specific SDIO interrupt */
1345 int_mask = mci_readl(host, INTMASK);
1347 if (host->verid < DW_MMC_240A)
1348 sdio_int = SDMMC_INT_SDIO(slot->id);
1350 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1354 * Turn off low power mode if it was enabled. This is a bit of
1355 * a heavy operation and we disable / enable IRQs a lot, so
1356 * we'll leave low power mode disabled and it will get
1357 * re-enabled again in dw_mci_setup_bus().
1359 dw_mci_disable_low_power(slot);
1361 mci_writel(host, INTMASK,
1362 (int_mask | sdio_int));
1364 mci_writel(host, INTMASK,
1365 (int_mask & ~sdio_int));
1370 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1371 struct mmc_ios *ios)
1374 unsigned int value,uhs_reg;
1377 * Signal Voltage Switching is only applicable for Host Controllers
1380 if (host->verid < DW_MMC_240A)
1383 uhs_reg = mci_readl(host, UHS_REG);
1384 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1385 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1387 switch (ios->signal_voltage) {
1388 case MMC_SIGNAL_VOLTAGE_330:
1389 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1391 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1392 //regulator_put(host->vmmc); //to be done in remove function.
1394 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1395 __func__, regulator_get_voltage(host->vmmc), ret);
1397 pr_warning("%s: Switching to 3.3V signalling voltage "
1398 " failed\n", mmc_hostname(host->mmc));
1402 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1404 //set High-power mode
1405 value = mci_readl(host, CLKENA);
1406 mci_writel(host,CLKENA , value& ~SDMMC_CLKEN_LOW_PWR);
1408 mci_writel(host,UHS_REG , uhs_reg & ~SDMMC_UHS_VOLT_REG_18);
1411 usleep_range(5000, 5500);
1413 /* 3.3V regulator output should be stable within 5 ms */
1414 uhs_reg = mci_readl(host, UHS_REG);
1415 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1418 pr_warning("%s: 3.3V regulator output did not became stable\n",
1419 mmc_hostname(host->mmc));
1422 case MMC_SIGNAL_VOLTAGE_180:
1424 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1425 // regulator_put(host->vmmc);//to be done in remove function.
1427 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1428 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1430 pr_warning("%s: Switching to 1.8V signalling voltage "
1431 " failed\n", mmc_hostname(host->mmc));
1437 * Enable 1.8V Signal Enable in the Host Control2
1440 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1443 usleep_range(5000, 5500);
1444 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__, __FUNCTION__,mmc_hostname(host->mmc));
1446 /* 1.8V regulator output should be stable within 5 ms */
1447 uhs_reg = mci_readl(host, UHS_REG);
1448 if( uhs_reg & SDMMC_UHS_VOLT_REG_18){
1453 pr_warning("%s: 1.8V regulator output did not became stable\n",
1454 mmc_hostname(host->mmc));
1457 case MMC_SIGNAL_VOLTAGE_120:
1459 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1461 pr_warning("%s: Switching to 1.2V signalling voltage "
1462 " failed\n", mmc_hostname(host->mmc));
1468 /* No signal voltage switch required */
1474 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1475 struct mmc_ios *ios)
1477 struct dw_mci_slot *slot = mmc_priv(mmc);
1478 struct dw_mci *host = slot->host;
1481 if (host->verid < DW_MMC_240A)
1483 //sdhci_runtime_pm_get(host);
1484 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1485 //sdhci_runtime_pm_put(host);
1489 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1491 struct dw_mci_slot *slot = mmc_priv(mmc);
1492 struct dw_mci *host = slot->host;
1493 const struct dw_mci_drv_data *drv_data = host->drv_data;
1494 struct dw_mci_tuning_data tuning_data;
1497 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1498 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1499 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1500 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1501 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1502 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1503 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1507 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1508 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1509 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1512 "Undefined command(%d) for tuning\n", opcode);
1516 /////////////////////////////////////////////////
1517 //temporary settings,!!!!!!!!!!!!!!!
1518 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1519 tuning_data.con_id = 3;
1520 else if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1521 tuning_data.con_id = 1;
1523 tuning_data.con_id = 0;
1524 tuning_data.tuning_type = 1; //0--drv, 1--sample
1525 /////////////////////////////////////////////////
1527 if (drv_data && drv_data->execute_tuning)
1528 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1532 static const struct mmc_host_ops dw_mci_ops = {
1533 .request = dw_mci_request,
1534 .pre_req = dw_mci_pre_req,
1535 .post_req = dw_mci_post_req,
1536 .set_ios = dw_mci_set_ios,
1537 .get_ro = dw_mci_get_ro,
1538 .get_cd = dw_mci_get_cd,
1539 .set_sdio_status = dw_mci_set_sdio_status,
1540 .hw_reset = dw_mci_hw_reset,
1541 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1542 .execute_tuning = dw_mci_execute_tuning,
1543 //.start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1544 //.card_busy = dw_mci_card_busy,
1547 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1549 unsigned long flags;
1554 local_irq_save(flags);
1555 if(host->irq_state != irqflag)
1557 host->irq_state = irqflag;
1560 enable_irq(host->irq);
1564 disable_irq(host->irq);
1567 local_irq_restore(flags);
1570 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1571 __releases(&host->lock)
1572 __acquires(&host->lock)
1574 if(DW_MCI_SEND_STATUS == host->dir_status){
1576 if( MMC_BUS_TEST_W != host->cmd->opcode){
1577 if(host->data_status & SDMMC_INT_DCRC)
1578 host->data->error = -EILSEQ;
1579 else if(host->data_status & SDMMC_INT_EBE)
1580 host->data->error = -ETIMEDOUT;
1582 dw_mci_wait_unbusy(host);
1585 dw_mci_wait_unbusy(host);
1591 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1592 __releases(&host->lock)
1593 __acquires(&host->lock)
1595 struct dw_mci_slot *slot;
1596 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1598 WARN_ON(host->cmd || host->data);
1600 dw_mci_deal_data_end(host, mrq);
1603 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1604 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
1606 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
1607 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
1609 host->cur_slot->mrq = NULL;
1611 if (!list_empty(&host->queue)) {
1612 slot = list_entry(host->queue.next,
1613 struct dw_mci_slot, queue_node);
1614 list_del(&slot->queue_node);
1615 dev_vdbg(host->dev, "list not empty: %s is next\n",
1616 mmc_hostname(slot->mmc));
1617 host->state = STATE_SENDING_CMD;
1618 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
1619 dw_mci_start_request(host, slot);
1621 dev_vdbg(host->dev, "list empty\n");
1622 host->state = STATE_IDLE;
1625 spin_unlock(&host->lock);
1626 mmc_request_done(prev_mmc, mrq);
1627 spin_lock(&host->lock);
1630 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1632 u32 status = host->cmd_status;
1634 host->cmd_status = 0;
1636 /* Read the response from the card (up to 16 bytes) */
1637 if (cmd->flags & MMC_RSP_PRESENT) {
1638 if (cmd->flags & MMC_RSP_136) {
1639 cmd->resp[3] = mci_readl(host, RESP0);
1640 cmd->resp[2] = mci_readl(host, RESP1);
1641 cmd->resp[1] = mci_readl(host, RESP2);
1642 cmd->resp[0] = mci_readl(host, RESP3);
1644 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
1645 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
1647 cmd->resp[0] = mci_readl(host, RESP0);
1651 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
1652 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
1656 if (status & SDMMC_INT_RTO)
1658 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1661 cmd->error = -ETIMEDOUT;
1663 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1664 cmd->error = -EILSEQ;
1665 else if (status & SDMMC_INT_RESP_ERR)
1669 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1672 if(MMC_SEND_STATUS != cmd->opcode)
1673 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
1674 MMC_DBG_ERR_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
1675 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1679 /* newer ip versions need a delay between retries */
1680 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1686 static void dw_mci_tasklet_func(unsigned long priv)
1688 struct dw_mci *host = (struct dw_mci *)priv;
1689 struct dw_mci_slot *slot = mmc_priv(host->mmc);
1690 struct mmc_data *data;
1691 struct mmc_command *cmd;
1692 enum dw_mci_state state;
1693 enum dw_mci_state prev_state;
1694 u32 status, ctrl, cmd_flags;
1696 spin_lock(&host->lock);
1698 state = host->state;
1708 case STATE_SENDING_CMD:
1709 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1710 &host->pending_events))
1715 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1716 dw_mci_command_complete(host, cmd);
1717 if (cmd == host->mrq->sbc && !cmd->error) {
1718 prev_state = state = STATE_SENDING_CMD;
1719 __dw_mci_start_request(host, host->cur_slot,
1724 if (cmd->data && cmd->error) {
1725 dw_mci_stop_dma(host);
1728 send_stop_cmd(host, data);
1729 state = STATE_SENDING_STOP;
1735 send_stop_abort(host, data);
1736 state = STATE_SENDING_STOP;
1742 if (!host->mrq->data || cmd->error) {
1743 dw_mci_request_end(host, host->mrq);
1747 prev_state = state = STATE_SENDING_DATA;
1750 case STATE_SENDING_DATA:
1751 if (test_and_clear_bit(EVENT_DATA_ERROR,
1752 &host->pending_events)) {
1753 dw_mci_stop_dma(host);
1756 send_stop_cmd(host, data);
1758 /*single block read/write, send stop cmd manually to prevent host controller halt*/
1759 printk("%s status 1 0x%08x [%s]\n",
1760 __func__,mci_readl(host, STATUS),mmc_hostname(host->mmc));
1761 mci_writel(host, CMDARG, 0);
1763 cmd_flags = SDMMC_CMD_STOP |SDMMC_CMD_RESP_CRC|SDMMC_CMD_RESP_EXP|MMC_STOP_TRANSMISSION;
1764 if(host->mmc->hold_reg_flag)
1765 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1766 // mci_writel(host, CMD, SDMMC_CMD_USE_HOLD_REG |SDMMC_CMD_STOP |SDMMC_CMD_RESP_CRC|SDMMC_CMD_RESP_EXP| SDMMC_CMD_START|0x0c);
1767 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1769 unsigned long timeout = jiffies + msecs_to_jiffies(500);
1771 while(ret=time_before(jiffies, timeout)){
1772 if(!(mci_readl(host, CMD)&SDMMC_CMD_START))
1776 printk("%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
1777 __func__,mmc_hostname(host->mmc));
1780 send_stop_abort(host, data);
1782 state = STATE_DATA_ERROR;
1785 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
1786 prev_state,state, mmc_hostname(host->mmc));
1788 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1789 &host->pending_events))
1791 MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
1792 prev_state,state,mmc_hostname(host->mmc));
1794 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1795 prev_state = state = STATE_DATA_BUSY;
1798 case STATE_DATA_BUSY:
1799 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1800 &host->pending_events))
1803 dw_mci_deal_data_end(host, host->mrq);
1804 MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
1805 prev_state,state,mmc_hostname(host->mmc));
1808 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1809 status = host->data_status;
1811 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1812 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
1813 MMC_DBG_ERR_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
1814 prev_state,state, status, mmc_hostname(host->mmc));
1816 if (status & SDMMC_INT_DRTO) {
1817 data->error = -ETIMEDOUT;
1818 } else if (status & SDMMC_INT_DCRC) {
1819 data->error = -EILSEQ;
1820 } else if (status & SDMMC_INT_EBE &&
1822 DW_MCI_SEND_STATUS) {
1824 * No data CRC status was returned.
1825 * The number of bytes transferred will
1826 * be exaggerated in PIO mode.
1828 data->bytes_xfered = 0;
1829 data->error = -ETIMEDOUT;
1838 * After an error, there may be data lingering
1839 * in the FIFO, so reset it - doing so
1840 * generates a block interrupt, hence setting
1841 * the scatter-gather pointer to NULL.
1843 dw_mci_fifo_reset(host);
1845 data->bytes_xfered = data->blocks * data->blksz;
1850 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
1851 prev_state,state,mmc_hostname(host->mmc));
1852 dw_mci_request_end(host, host->mrq);
1855 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
1856 prev_state,state,mmc_hostname(host->mmc));
1858 if (host->mrq->sbc && !data->error) {
1859 data->stop->error = 0;
1861 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
1862 prev_state,state,mmc_hostname(host->mmc));
1864 dw_mci_request_end(host, host->mrq);
1868 prev_state = state = STATE_SENDING_STOP;
1870 send_stop_cmd(host, data);
1872 if (data->stop && !data->error) {
1873 /* stop command for open-ended transfer*/
1875 send_stop_abort(host, data);
1879 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
1880 prev_state,state,mmc_hostname(host->mmc));
1882 case STATE_SENDING_STOP:
1883 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1884 &host->pending_events))
1886 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
1887 prev_state,state,mmc_hostname(host->mmc));
1889 /* CMD error in data command */
1890 if (host->mrq->cmd->error && host->mrq->data) {
1891 dw_mci_fifo_reset(host);
1897 dw_mci_command_complete(host, host->mrq->stop);
1899 if (host->mrq->stop)
1900 dw_mci_command_complete(host, host->mrq->stop);
1902 host->cmd_status = 0;
1905 dw_mci_request_end(host, host->mrq);
1908 case STATE_DATA_ERROR:
1909 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1910 &host->pending_events))
1913 state = STATE_DATA_BUSY;
1916 } while (state != prev_state);
1918 host->state = state;
1920 spin_unlock(&host->lock);
1924 /* push final bytes to part_buf, only use during push */
1925 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1927 memcpy((void *)&host->part_buf, buf, cnt);
1928 host->part_buf_count = cnt;
1931 /* append bytes to part_buf, only use during push */
1932 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1934 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1935 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1936 host->part_buf_count += cnt;
1940 /* pull first bytes from part_buf, only use during pull */
1941 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1943 cnt = min(cnt, (int)host->part_buf_count);
1945 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1947 host->part_buf_count -= cnt;
1948 host->part_buf_start += cnt;
1953 /* pull final bytes from the part_buf, assuming it's just been filled */
1954 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1956 memcpy(buf, &host->part_buf, cnt);
1957 host->part_buf_start = cnt;
1958 host->part_buf_count = (1 << host->data_shift) - cnt;
1961 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1963 struct mmc_data *data = host->data;
1966 /* try and push anything in the part_buf */
1967 if (unlikely(host->part_buf_count)) {
1968 int len = dw_mci_push_part_bytes(host, buf, cnt);
1971 if (host->part_buf_count == 2) {
1972 mci_writew(host, DATA(host->data_offset),
1974 host->part_buf_count = 0;
1977 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1978 if (unlikely((unsigned long)buf & 0x1)) {
1980 u16 aligned_buf[64];
1981 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1982 int items = len >> 1;
1984 /* memcpy from input buffer into aligned buffer */
1985 memcpy(aligned_buf, buf, len);
1988 /* push data from aligned buffer into fifo */
1989 for (i = 0; i < items; ++i)
1990 mci_writew(host, DATA(host->data_offset),
1997 for (; cnt >= 2; cnt -= 2)
1998 mci_writew(host, DATA(host->data_offset), *pdata++);
2001 /* put anything remaining in the part_buf */
2003 dw_mci_set_part_bytes(host, buf, cnt);
2004 /* Push data if we have reached the expected data length */
2005 if ((data->bytes_xfered + init_cnt) ==
2006 (data->blksz * data->blocks))
2007 mci_writew(host, DATA(host->data_offset),
2012 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2014 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2015 if (unlikely((unsigned long)buf & 0x1)) {
2017 /* pull data from fifo into aligned buffer */
2018 u16 aligned_buf[64];
2019 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2020 int items = len >> 1;
2022 for (i = 0; i < items; ++i)
2023 aligned_buf[i] = mci_readw(host,
2024 DATA(host->data_offset));
2025 /* memcpy from aligned buffer into output buffer */
2026 memcpy(buf, aligned_buf, len);
2034 for (; cnt >= 2; cnt -= 2)
2035 *pdata++ = mci_readw(host, DATA(host->data_offset));
2039 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2040 dw_mci_pull_final_bytes(host, buf, cnt);
2044 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2046 struct mmc_data *data = host->data;
2049 /* try and push anything in the part_buf */
2050 if (unlikely(host->part_buf_count)) {
2051 int len = dw_mci_push_part_bytes(host, buf, cnt);
2054 if (host->part_buf_count == 4) {
2055 mci_writel(host, DATA(host->data_offset),
2057 host->part_buf_count = 0;
2060 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2061 if (unlikely((unsigned long)buf & 0x3)) {
2063 u32 aligned_buf[32];
2064 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2065 int items = len >> 2;
2067 /* memcpy from input buffer into aligned buffer */
2068 memcpy(aligned_buf, buf, len);
2071 /* push data from aligned buffer into fifo */
2072 for (i = 0; i < items; ++i)
2073 mci_writel(host, DATA(host->data_offset),
2080 for (; cnt >= 4; cnt -= 4)
2081 mci_writel(host, DATA(host->data_offset), *pdata++);
2084 /* put anything remaining in the part_buf */
2086 dw_mci_set_part_bytes(host, buf, cnt);
2087 /* Push data if we have reached the expected data length */
2088 if ((data->bytes_xfered + init_cnt) ==
2089 (data->blksz * data->blocks))
2090 mci_writel(host, DATA(host->data_offset),
2095 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2097 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2098 if (unlikely((unsigned long)buf & 0x3)) {
2100 /* pull data from fifo into aligned buffer */
2101 u32 aligned_buf[32];
2102 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2103 int items = len >> 2;
2105 for (i = 0; i < items; ++i)
2106 aligned_buf[i] = mci_readl(host,
2107 DATA(host->data_offset));
2108 /* memcpy from aligned buffer into output buffer */
2109 memcpy(buf, aligned_buf, len);
2117 for (; cnt >= 4; cnt -= 4)
2118 *pdata++ = mci_readl(host, DATA(host->data_offset));
2122 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2123 dw_mci_pull_final_bytes(host, buf, cnt);
2127 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2129 struct mmc_data *data = host->data;
2132 /* try and push anything in the part_buf */
2133 if (unlikely(host->part_buf_count)) {
2134 int len = dw_mci_push_part_bytes(host, buf, cnt);
2138 if (host->part_buf_count == 8) {
2139 mci_writeq(host, DATA(host->data_offset),
2141 host->part_buf_count = 0;
2144 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2145 if (unlikely((unsigned long)buf & 0x7)) {
2147 u64 aligned_buf[16];
2148 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2149 int items = len >> 3;
2151 /* memcpy from input buffer into aligned buffer */
2152 memcpy(aligned_buf, buf, len);
2155 /* push data from aligned buffer into fifo */
2156 for (i = 0; i < items; ++i)
2157 mci_writeq(host, DATA(host->data_offset),
2164 for (; cnt >= 8; cnt -= 8)
2165 mci_writeq(host, DATA(host->data_offset), *pdata++);
2168 /* put anything remaining in the part_buf */
2170 dw_mci_set_part_bytes(host, buf, cnt);
2171 /* Push data if we have reached the expected data length */
2172 if ((data->bytes_xfered + init_cnt) ==
2173 (data->blksz * data->blocks))
2174 mci_writeq(host, DATA(host->data_offset),
2179 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2181 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2182 if (unlikely((unsigned long)buf & 0x7)) {
2184 /* pull data from fifo into aligned buffer */
2185 u64 aligned_buf[16];
2186 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2187 int items = len >> 3;
2189 for (i = 0; i < items; ++i)
2190 aligned_buf[i] = mci_readq(host,
2191 DATA(host->data_offset));
2192 /* memcpy from aligned buffer into output buffer */
2193 memcpy(buf, aligned_buf, len);
2201 for (; cnt >= 8; cnt -= 8)
2202 *pdata++ = mci_readq(host, DATA(host->data_offset));
2206 host->part_buf = mci_readq(host, DATA(host->data_offset));
2207 dw_mci_pull_final_bytes(host, buf, cnt);
2211 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2215 /* get remaining partial bytes */
2216 len = dw_mci_pull_part_bytes(host, buf, cnt);
2217 if (unlikely(len == cnt))
2222 /* get the rest of the data */
2223 host->pull_data(host, buf, cnt);
2226 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2228 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2230 unsigned int offset;
2231 struct mmc_data *data = host->data;
2232 int shift = host->data_shift;
2235 unsigned int remain, fcnt;
2237 if(!host->mmc->bus_refs){
2238 printk("Note: %s host->mmc->bus_refs is 0!!!\n",__func__,host->mmc->bus_refs);
2242 if (!sg_miter_next(sg_miter))
2245 host->sg = sg_miter->piter.sg;
2246 buf = sg_miter->addr;
2247 remain = sg_miter->length;
2251 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2252 << shift) + host->part_buf_count;
2253 len = min(remain, fcnt);
2256 dw_mci_pull_data(host, (void *)(buf + offset), len);
2257 data->bytes_xfered += len;
2262 sg_miter->consumed = offset;
2263 status = mci_readl(host, MINTSTS);
2264 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2265 /* if the RXDR is ready read again */
2266 } while ((status & SDMMC_INT_RXDR) ||
2267 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2270 if (!sg_miter_next(sg_miter))
2272 sg_miter->consumed = 0;
2274 sg_miter_stop(sg_miter);
2278 sg_miter_stop(sg_miter);
2282 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2285 static void dw_mci_write_data_pio(struct dw_mci *host)
2287 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2289 unsigned int offset;
2290 struct mmc_data *data = host->data;
2291 int shift = host->data_shift;
2294 unsigned int fifo_depth = host->fifo_depth;
2295 unsigned int remain, fcnt;
2297 if(!host->mmc->bus_refs){
2298 printk("Note: %s host->mmc->bus_refs is 0!!!\n",__func__,host->mmc->bus_refs);
2303 if (!sg_miter_next(sg_miter))
2306 host->sg = sg_miter->piter.sg;
2307 buf = sg_miter->addr;
2308 remain = sg_miter->length;
2312 fcnt = ((fifo_depth -
2313 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2314 << shift) - host->part_buf_count;
2315 len = min(remain, fcnt);
2318 host->push_data(host, (void *)(buf + offset), len);
2319 data->bytes_xfered += len;
2324 sg_miter->consumed = offset;
2325 status = mci_readl(host, MINTSTS);
2326 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2327 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2330 if (!sg_miter_next(sg_miter))
2332 sg_miter->consumed = 0;
2334 sg_miter_stop(sg_miter);
2338 sg_miter_stop(sg_miter);
2342 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2345 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2347 if (!host->cmd_status)
2348 host->cmd_status = status;
2352 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2353 tasklet_schedule(&host->tasklet);
2356 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2358 struct dw_mci *host = dev_id;
2359 u32 pending, sdio_int;
2362 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2363 //if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
2364 // printk("%s pending: 0x%08x\n",__func__,pending);
2366 * DTO fix - version 2.10a and below, and only if internal DMA
2369 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2371 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2372 pending |= SDMMC_INT_DATA_OVER;
2376 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2377 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2378 host->cmd_status = pending;
2380 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2381 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2383 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2386 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2387 /* if there is an error report DATA_ERROR */
2388 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2389 host->data_status = pending;
2391 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2393 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2394 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2395 tasklet_schedule(&host->tasklet);
2398 if (pending & SDMMC_INT_DATA_OVER) {
2399 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2400 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2401 if (!host->data_status)
2402 host->data_status = pending;
2404 if (host->dir_status == DW_MCI_RECV_STATUS) {
2405 if (host->sg != NULL)
2406 dw_mci_read_data_pio(host, true);
2408 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2409 tasklet_schedule(&host->tasklet);
2412 if (pending & SDMMC_INT_RXDR) {
2413 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2414 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2415 dw_mci_read_data_pio(host, false);
2418 if (pending & SDMMC_INT_TXDR) {
2419 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2420 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2421 dw_mci_write_data_pio(host);
2424 if (pending & SDMMC_INT_VSI) {
2425 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2426 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2427 dw_mci_cmd_interrupt(host, pending);
2430 if (pending & SDMMC_INT_CMD_DONE) {
2431 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2432 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2433 dw_mci_cmd_interrupt(host, pending);
2436 if (pending & SDMMC_INT_CD) {
2437 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2438 rk_send_wakeup_key();//wake up system
2439 queue_work(host->card_workqueue, &host->card_work);
2442 if (pending & SDMMC_INT_HLE) {
2443 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2444 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2448 /* Handle SDIO Interrupts */
2449 for (i = 0; i < host->num_slots; i++) {
2450 struct dw_mci_slot *slot = host->slot[i];
2452 if (host->verid < DW_MMC_240A)
2453 sdio_int = SDMMC_INT_SDIO(i);
2455 sdio_int = SDMMC_INT_SDIO(i + 8);
2457 if (pending & sdio_int) {
2458 mci_writel(host, RINTSTS, sdio_int);
2459 mmc_signal_sdio_irq(slot->mmc);
2465 #ifdef CONFIG_MMC_DW_IDMAC
2466 /* Handle DMA interrupts */
2467 pending = mci_readl(host, IDSTS);
2468 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2469 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2470 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2471 host->dma_ops->complete(host);
2478 static void dw_mci_work_routine_card(struct work_struct *work)
2480 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2483 for (i = 0; i < host->num_slots; i++) {
2484 struct dw_mci_slot *slot = host->slot[i];
2485 struct mmc_host *mmc = slot->mmc;
2486 struct mmc_request *mrq;
2490 present = dw_mci_get_cd(mmc);
2491 while (present != slot->last_detect_state) {
2492 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2493 present ? "inserted" : "removed");
2494 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2495 present ? "inserted" : "removed.", mmc_hostname(mmc));
2497 spin_lock_bh(&host->lock);
2499 /* Card change detected */
2500 slot->last_detect_state = present;
2502 /* Clean up queue if present */
2505 if (mrq == host->mrq) {
2509 switch (host->state) {
2512 case STATE_SENDING_CMD:
2513 mrq->cmd->error = -ENOMEDIUM;
2517 case STATE_SENDING_DATA:
2518 mrq->data->error = -ENOMEDIUM;
2519 dw_mci_stop_dma(host);
2521 case STATE_DATA_BUSY:
2522 case STATE_DATA_ERROR:
2523 if (mrq->data->error == -EINPROGRESS)
2524 mrq->data->error = -ENOMEDIUM;
2528 case STATE_SENDING_STOP:
2529 mrq->stop->error = -ENOMEDIUM;
2533 dw_mci_request_end(host, mrq);
2535 list_del(&slot->queue_node);
2536 mrq->cmd->error = -ENOMEDIUM;
2538 mrq->data->error = -ENOMEDIUM;
2540 mrq->stop->error = -ENOMEDIUM;
2542 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",mrq->cmd->opcode, mmc_hostname(mmc));
2544 spin_unlock(&host->lock);
2545 mmc_request_done(slot->mmc, mrq);
2546 spin_lock(&host->lock);
2550 /* Power down slot */
2552 /* Clear down the FIFO */
2553 dw_mci_fifo_reset(host);
2554 #ifdef CONFIG_MMC_DW_IDMAC
2555 dw_mci_idmac_reset(host);
2560 spin_unlock_bh(&host->lock);
2562 present = dw_mci_get_cd(mmc);
2565 mmc_detect_change(slot->mmc,
2566 msecs_to_jiffies(host->pdata->detect_delay_ms));
2571 /* given a slot id, find out the device node representing that slot */
2572 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2574 struct device_node *np;
2578 if (!dev || !dev->of_node)
2581 for_each_child_of_node(dev->of_node, np) {
2582 addr = of_get_property(np, "reg", &len);
2583 if (!addr || (len < sizeof(int)))
2585 if (be32_to_cpup(addr) == slot)
2591 static struct dw_mci_of_slot_quirks {
2594 } of_slot_quirks[] = {
2596 .quirk = "disable-wp",
2597 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2601 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2603 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2608 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2609 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2610 quirks |= of_slot_quirks[idx].id;
2615 /* find out bus-width for a given slot */
2616 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2618 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2624 if (of_property_read_u32(np, "bus-width", &bus_wd))
2625 dev_err(dev, "bus-width property not found, assuming width"
2631 /* find the pwr-en gpio for a given slot; or -1 if none specified */
2632 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
2634 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2640 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
2642 /* Having a missing entry is valid; return silently */
2643 if (!gpio_is_valid(gpio))
2646 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
2647 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2651 gpio_direction_output(gpio, 0);//set 0 to pwr-en
2657 /* find the write protect gpio for a given slot; or -1 if none specified */
2658 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2660 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2666 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2668 /* Having a missing entry is valid; return silently */
2669 if (!gpio_is_valid(gpio))
2672 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2673 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2680 /* find the cd gpio for a given slot */
2681 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2682 struct mmc_host *mmc)
2684 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2690 gpio = of_get_named_gpio(np, "cd-gpios", 0);
2692 /* Having a missing entry is valid; return silently */
2693 if (!gpio_is_valid(gpio))
2696 if (mmc_gpio_request_cd(mmc, gpio, 0))
2697 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2699 #else /* CONFIG_OF */
2700 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2704 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2708 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2712 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2716 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2717 struct mmc_host *mmc)
2721 #endif /* CONFIG_OF */
2723 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2725 struct mmc_host *mmc;
2726 struct dw_mci_slot *slot;
2727 const struct dw_mci_drv_data *drv_data = host->drv_data;
2732 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2736 slot = mmc_priv(mmc);
2740 host->slot[id] = slot;
2743 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2745 mmc->ops = &dw_mci_ops;
2747 if (of_property_read_u32_array(host->dev->of_node,
2748 "clock-freq-min-max", freq, 2)) {
2749 mmc->f_min = DW_MCI_FREQ_MIN;
2750 mmc->f_max = DW_MCI_FREQ_MAX;
2752 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
2753 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
2755 mmc->f_min = freq[0];
2756 mmc->f_max = freq[1];
2758 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
2759 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
2762 if(strstr("mmc0",mmc_hostname(mmc)))
2763 printk("Line%d..%s: The rk_sdmmc %s",__LINE__, __FUNCTION__,RK_SDMMC_DRIVER_VERSION);
2765 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
2766 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
2767 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
2768 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
2769 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
2770 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
2772 if (host->pdata->get_ocr)
2773 mmc->ocr_avail = host->pdata->get_ocr(id);
2776 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
2777 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
2778 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
2779 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
2783 * Start with slot power disabled, it will be enabled when a card
2786 if (host->pdata->setpower)
2787 host->pdata->setpower(id, 0);
2789 if (host->pdata->caps)
2790 mmc->caps = host->pdata->caps;
2792 if (host->pdata->pm_caps)
2793 mmc->pm_caps = host->pdata->pm_caps;
2795 if (host->dev->of_node) {
2796 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2800 ctrl_id = to_platform_device(host->dev)->id;
2802 if (drv_data && drv_data->caps)
2803 mmc->caps |= drv_data->caps[ctrl_id];
2804 if (drv_data && drv_data->hold_reg_flag)
2805 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
2807 //set the compatibility of driver.
2808 mmc->caps |= MMC_CAP_UHS_SDR12|MMC_CAP_UHS_SDR25|MMC_CAP_UHS_SDR50|MMC_CAP_UHS_SDR104|MMC_CAP_ERASE;
2810 if (host->pdata->caps2)
2811 mmc->caps2 = host->pdata->caps2;
2813 if (host->pdata->get_bus_wd)
2814 bus_width = host->pdata->get_bus_wd(slot->id);
2815 else if (host->dev->of_node)
2816 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2820 switch (bus_width) {
2822 mmc->caps |= MMC_CAP_8_BIT_DATA;
2824 mmc->caps |= MMC_CAP_4_BIT_DATA;
2826 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
2827 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
2828 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
2829 mmc->caps |= MMC_CAP_SDIO_IRQ;
2830 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
2831 mmc->caps |= MMC_CAP_HW_RESET;
2832 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
2833 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
2834 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
2835 mmc->pm_caps |= MMC_PM_KEEP_POWER;
2836 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
2837 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
2838 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
2839 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2841 /*Assign pm_caps pass to pm_flags*/
2842 mmc->pm_flags = mmc->pm_caps;
2844 if (host->pdata->blk_settings) {
2845 mmc->max_segs = host->pdata->blk_settings->max_segs;
2846 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2847 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2848 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2849 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2851 /* Useful defaults if platform data is unset. */
2852 #ifdef CONFIG_MMC_DW_IDMAC
2853 mmc->max_segs = host->ring_size;
2854 mmc->max_blk_size = 65536;
2855 mmc->max_blk_count = host->ring_size;
2856 mmc->max_seg_size = 0x1000;
2857 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2860 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2861 mmc->max_blk_count = 512;
2862 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2863 mmc->max_seg_size = mmc->max_req_size;
2864 #endif /* CONFIG_MMC_DW_IDMAC */
2867 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
2869 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))//(gpio_is_valid(slot->pwr_en_gpio))
2874 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
2876 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
2883 if (IS_ERR(host->vmmc)) {
2884 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
2887 ret = regulator_enable(host->vmmc);
2890 "failed to enable regulator: %d\n", ret);
2896 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2898 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2899 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2901 ret = mmc_add_host(mmc);
2905 /* Pinctrl set default iomux state to fucntion port.
2906 * Fixme: DON'T TOUCH EMMC SETTING!
2908 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
2910 host->pinctrl = devm_pinctrl_get(host->dev);
2911 if(IS_ERR(host->pinctrl))
2912 printk("%s: Warning : No pinctrl used!\n",mmc_hostname(host->mmc));
2915 host->pins_idle= pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_IDLE);
2916 if(IS_ERR(host->pins_default))
2917 printk("%s: Warning : No IDLE pinctrl matched!\n", mmc_hostname(host->mmc));
2920 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
2921 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
2924 host->pins_default = pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_DEFAULT);
2925 if(IS_ERR(host->pins_default))
2926 printk("%s: Warning : No default pinctrl matched!\n", mmc_hostname(host->mmc));
2929 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
2930 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
2936 #if defined(CONFIG_DEBUG_FS)
2937 dw_mci_init_debugfs(slot);
2940 /* Card initially undetected */
2941 slot->last_detect_state = 1;
2950 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2952 /* Shutdown detect IRQ */
2953 if (slot->host->pdata->exit)
2954 slot->host->pdata->exit(id);
2956 /* Debugfs stuff is cleaned up by mmc core */
2957 mmc_remove_host(slot->mmc);
2958 slot->host->slot[id] = NULL;
2959 mmc_free_host(slot->mmc);
2962 static void dw_mci_init_dma(struct dw_mci *host)
2964 /* Alloc memory for sg translation */
2965 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2966 &host->sg_dma, GFP_KERNEL);
2967 if (!host->sg_cpu) {
2968 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2973 /* Determine which DMA interface to use */
2974 #ifdef CONFIG_MMC_DW_IDMAC
2975 host->dma_ops = &dw_mci_idmac_ops;
2976 dev_info(host->dev, "Using internal DMA controller.\n");
2982 if (host->dma_ops->init && host->dma_ops->start &&
2983 host->dma_ops->stop && host->dma_ops->cleanup) {
2984 if (host->dma_ops->init(host)) {
2985 dev_err(host->dev, "%s: Unable to initialize "
2986 "DMA Controller.\n", __func__);
2990 dev_err(host->dev, "DMA initialization not found.\n");
2998 dev_info(host->dev, "Using PIO mode.\n");
3003 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3005 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3008 ctrl = mci_readl(host, CTRL);
3010 mci_writel(host, CTRL, ctrl);
3012 /* wait till resets clear */
3014 ctrl = mci_readl(host, CTRL);
3015 if (!(ctrl & reset))
3017 } while (time_before(jiffies, timeout));
3020 "Timeout resetting block (ctrl reset %#x)\n",
3026 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3029 * Reseting generates a block interrupt, hence setting
3030 * the scatter-gather pointer to NULL.
3033 sg_miter_stop(&host->sg_miter);
3037 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3040 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3042 return dw_mci_ctrl_reset(host,
3043 SDMMC_CTRL_FIFO_RESET |
3045 SDMMC_CTRL_DMA_RESET);
3049 static struct dw_mci_of_quirks {
3054 .quirk = "broken-cd",
3055 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3059 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3061 struct dw_mci_board *pdata;
3062 struct device *dev = host->dev;
3063 struct device_node *np = dev->of_node;
3064 const struct dw_mci_drv_data *drv_data = host->drv_data;
3066 u32 clock_frequency;
3068 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3070 dev_err(dev, "could not allocate memory for pdata\n");
3071 return ERR_PTR(-ENOMEM);
3074 /* find out number of slots supported */
3075 if (of_property_read_u32(dev->of_node, "num-slots",
3076 &pdata->num_slots)) {
3077 dev_info(dev, "num-slots property not found, "
3078 "assuming 1 slot is available\n");
3079 pdata->num_slots = 1;
3083 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3084 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3085 pdata->quirks |= of_quirks[idx].id;
3088 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3089 dev_info(dev, "fifo-depth property not found, using "
3090 "value of FIFOTH register as default\n");
3092 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3094 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3095 pdata->bus_hz = clock_frequency;
3097 if (drv_data && drv_data->parse_dt) {
3098 ret = drv_data->parse_dt(host);
3100 return ERR_PTR(ret);
3103 if (of_find_property(np, "keep-power-in-suspend", NULL))
3104 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3106 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3107 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3109 if (of_find_property(np, "supports-highspeed", NULL))
3110 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3112 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3113 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3115 if (of_find_property(np, "supports-DDR_MODE", NULL))
3116 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3118 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3119 pdata->caps2 |= MMC_CAP2_HS200;
3121 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3122 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3124 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3125 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3127 if (of_get_property(np, "cd-inverted", NULL))
3128 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3129 if (of_get_property(np, "bootpart-no-access", NULL))
3130 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3135 #else /* CONFIG_OF */
3136 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3138 return ERR_PTR(-EINVAL);
3140 #endif /* CONFIG_OF */
3142 int dw_mci_probe(struct dw_mci *host)
3144 const struct dw_mci_drv_data *drv_data = host->drv_data;
3145 int width, i, ret = 0;
3151 host->pdata = dw_mci_parse_dt(host);
3152 if (IS_ERR(host->pdata)) {
3153 dev_err(host->dev, "platform data not available\n");
3158 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3160 "Platform data must supply select_slot function\n");
3165 * In 2.40a spec, Data offset is changed.
3166 * Need to check the version-id and set data-offset for DATA register.
3168 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3169 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3171 if (host->verid < DW_MMC_240A)
3172 host->data_offset = DATA_OFFSET;
3174 host->data_offset = DATA_240A_OFFSET;
3177 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3178 if (IS_ERR(host->hclk_mmc)) {
3179 dev_err(host->dev, "failed to get hclk_mmc\n");
3180 ret = PTR_ERR(host->hclk_mmc);
3183 clk_prepare_enable(host->hclk_mmc);
3186 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3187 if (IS_ERR(host->clk_mmc)) {
3188 dev_err(host->dev, "failed to get clk mmc_per\n");
3189 ret = PTR_ERR(host->clk_mmc);
3193 if (host->verid < DW_MMC_240A)
3194 host->bus_hz = host->pdata->bus_hz;
3196 host->bus_hz = host->pdata->bus_hz*2;// *2 due to fix divider 2 in controller.
3197 if (!host->bus_hz) {
3198 dev_err(host->dev,"Platform data must supply bus speed\n");
3203 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3205 dev_err(host->dev, "failed to set clk mmc\n");
3208 clk_prepare_enable(host->clk_mmc);
3210 if (drv_data && drv_data->setup_clock) {
3211 ret = drv_data->setup_clock(host);
3214 "implementation specific clock setup failed\n");
3219 host->quirks = host->pdata->quirks;
3220 host->irq_state = true;
3221 host->set_speed = 0;
3224 spin_lock_init(&host->lock);
3225 INIT_LIST_HEAD(&host->queue);
3228 * Get the host data width - this assumes that HCON has been set with
3229 * the correct values.
3231 i = (mci_readl(host, HCON) >> 7) & 0x7;
3233 host->push_data = dw_mci_push_data16;
3234 host->pull_data = dw_mci_pull_data16;
3236 host->data_shift = 1;
3237 } else if (i == 2) {
3238 host->push_data = dw_mci_push_data64;
3239 host->pull_data = dw_mci_pull_data64;
3241 host->data_shift = 3;
3243 /* Check for a reserved value, and warn if it is */
3245 "HCON reports a reserved host data width!\n"
3246 "Defaulting to 32-bit access.\n");
3247 host->push_data = dw_mci_push_data32;
3248 host->pull_data = dw_mci_pull_data32;
3250 host->data_shift = 2;
3253 /* Reset all blocks */
3254 if (!dw_mci_ctrl_all_reset(host))
3257 host->dma_ops = host->pdata->dma_ops;
3258 dw_mci_init_dma(host);
3260 /* Clear the interrupts for the host controller */
3261 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3262 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3264 /* Put in max timeout */
3265 mci_writel(host, TMOUT, 0xFFFFFFFF);
3268 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3269 * Tx Mark = fifo_size / 2 DMA Size = 8
3271 if (!host->pdata->fifo_depth) {
3273 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3274 * have been overwritten by the bootloader, just like we're
3275 * about to do, so if you know the value for your hardware, you
3276 * should put it in the platform data.
3278 fifo_size = mci_readl(host, FIFOTH);
3279 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3281 fifo_size = host->pdata->fifo_depth;
3283 host->fifo_depth = fifo_size;
3285 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3286 mci_writel(host, FIFOTH, host->fifoth_val);
3288 /* disable clock to CIU */
3289 mci_writel(host, CLKENA, 0);
3290 mci_writel(host, CLKSRC, 0);
3292 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3293 host->card_workqueue = alloc_workqueue("dw-mci-card",
3294 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3295 if (!host->card_workqueue) {
3299 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3300 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3301 host->irq_flags, "dw-mci", host);
3305 if (host->pdata->num_slots)
3306 host->num_slots = host->pdata->num_slots;
3308 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
3310 /* We need at least one slot to succeed */
3311 for (i = 0; i < host->num_slots; i++) {
3312 ret = dw_mci_init_slot(host, i);
3314 dev_dbg(host->dev, "slot %d init failed\n", i);
3320 * Enable interrupts for command done, data over, data empty, card det,
3321 * receive ready and error such as transmit, receive timeout, crc error
3323 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3324 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
3325 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
3326 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3327 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3328 regs |= SDMMC_INT_CD;
3330 mci_writel(host, INTMASK, regs);
3332 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
3334 dev_info(host->dev, "DW MMC controller at irq %d, "
3335 "%d bit host data width, "
3337 host->irq, width, fifo_size);
3340 dev_info(host->dev, "%d slots initialized\n", init_slots);
3342 dev_dbg(host->dev, "attempted to initialize %d slots, "
3343 "but failed on all\n", host->num_slots);
3348 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
3349 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
3354 destroy_workqueue(host->card_workqueue);
3357 if (host->use_dma && host->dma_ops->exit)
3358 host->dma_ops->exit(host);
3361 regulator_disable(host->vmmc);
3362 regulator_put(host->vmmc);
3366 if (!IS_ERR(host->clk_mmc))
3367 clk_disable_unprepare(host->clk_mmc);
3369 if (!IS_ERR(host->hclk_mmc))
3370 clk_disable_unprepare(host->hclk_mmc);
3374 EXPORT_SYMBOL(dw_mci_probe);
3376 void dw_mci_remove(struct dw_mci *host)
3380 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3381 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3383 for (i = 0; i < host->num_slots; i++) {
3384 dev_dbg(host->dev, "remove slot %d\n", i);
3386 dw_mci_cleanup_slot(host->slot[i], i);
3389 /* disable clock to CIU */
3390 mci_writel(host, CLKENA, 0);
3391 mci_writel(host, CLKSRC, 0);
3393 destroy_workqueue(host->card_workqueue);
3395 if (host->use_dma && host->dma_ops->exit)
3396 host->dma_ops->exit(host);
3399 regulator_disable(host->vmmc);
3400 regulator_put(host->vmmc);
3403 if (!IS_ERR(host->clk_mmc))
3404 clk_disable_unprepare(host->clk_mmc);
3406 if (!IS_ERR(host->hclk_mmc))
3407 clk_disable_unprepare(host->hclk_mmc);
3409 EXPORT_SYMBOL(dw_mci_remove);
3413 #ifdef CONFIG_PM_SLEEP
3415 * TODO: we should probably disable the clock to the card in the suspend path.
3417 int dw_mci_suspend(struct dw_mci *host)
3421 regulator_disable(host->vmmc);
3423 /*only for sdmmc controller*/
3424 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3425 disable_irq(host->irq);
3427 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3428 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
3429 dw_mci_of_get_cd_gpio(host->dev,0,host->mmc);
3431 enable_irq_wake(host->mmc->slot.cd_irq);
3435 EXPORT_SYMBOL(dw_mci_suspend);
3437 int dw_mci_resume(struct dw_mci *host)
3443 ret = regulator_enable(host->vmmc);
3446 "failed to enable regulator: %d\n", ret);
3451 if (!dw_mci_ctrl_all_reset(host)) {
3456 if (host->use_dma && host->dma_ops->init)
3457 host->dma_ops->init(host);
3460 * Restore the initial value at FIFOTH register
3461 * And Invalidate the prev_blksz with zero
3463 mci_writel(host, FIFOTH, host->fifoth_val);
3464 host->prev_blksz = 0;
3465 /* Put in max timeout */
3466 mci_writel(host, TMOUT, 0xFFFFFFFF);
3468 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3469 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
3471 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3472 regs |= SDMMC_INT_CD;
3473 mci_writel(host, INTMASK, regs);
3474 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3476 for (i = 0; i < host->num_slots; i++) {
3477 struct dw_mci_slot *slot = host->slot[i];
3480 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3481 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3482 dw_mci_setup_bus(slot, true);
3486 /*only for sdmmc controller*/
3487 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3488 disable_irq_wake(host->mmc->slot.cd_irq);
3490 mmc_gpio_free_cd(host->mmc);
3491 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3492 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
3494 enable_irq(host->irq);
3500 EXPORT_SYMBOL(dw_mci_resume);
3501 #endif /* CONFIG_PM_SLEEP */
3503 static int __init dw_mci_init(void)
3505 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3509 static void __exit dw_mci_exit(void)
3513 module_init(dw_mci_init);
3514 module_exit(dw_mci_exit);
3516 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3518 MODULE_AUTHOR("NXP Semiconductor VietNam");
3519 MODULE_AUTHOR("Imagination Technologies Ltd");
3520 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
3522 MODULE_LICENSE("GPL v2");