2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/seq_file.h>
28 #include <linux/slab.h>
29 #include <linux/stat.h>
30 #include <linux/delay.h>
31 #include <linux/irq.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/mmc.h>
34 #include <linux/mmc/sd.h>
35 #include <linux/mmc/sdio.h>
36 #include <linux/mmc/rk_mmc.h>
37 #include <linux/bitops.h>
38 #include <linux/regulator/consumer.h>
39 #include <linux/workqueue.h>
41 #include <linux/of_gpio.h>
42 #include <linux/mmc/slot-gpio.h>
45 #include "rk_sdmmc_of.h"
46 #include <linux/regulator/rockchip_io_vol_domain.h>
48 #define RK_SDMMC_DRIVER_VERSION "Ver 1.00. The last modify date is 2014-05-05"
50 /* Common flag combinations */
51 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
52 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
54 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
56 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
57 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
58 #define DW_MCI_SEND_STATUS 1
59 #define DW_MCI_RECV_STATUS 2
60 #define DW_MCI_DMA_THRESHOLD 16
62 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
63 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
65 #define SDMMC_DATA_TIMEOUT_SDIO 250
66 #define SDMMC_DATA_TIMEOUT_SD 5000; /*max is 250ms refer to Spec; Maybe adapt the value to the sick card.*/
67 #define SDMMC_DATA_TIMEOUT_EMMC 2500
69 #define SDMMC_CMD_RTO_MAX_HOLD 200
70 //#define SDMMC_WAIT_FOR_UNBUSY 2500
72 #ifdef CONFIG_MMC_DW_IDMAC
73 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
74 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
75 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
79 u32 des0; /* Control Descriptor */
80 #define IDMAC_DES0_DIC BIT(1)
81 #define IDMAC_DES0_LD BIT(2)
82 #define IDMAC_DES0_FD BIT(3)
83 #define IDMAC_DES0_CH BIT(4)
84 #define IDMAC_DES0_ER BIT(5)
85 #define IDMAC_DES0_CES BIT(30)
86 #define IDMAC_DES0_OWN BIT(31)
88 u32 des1; /* Buffer sizes */
89 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
90 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
92 u32 des2; /* buffer 1 physical address */
94 u32 des3; /* buffer 2 physical address */
96 #endif /* CONFIG_MMC_DW_IDMAC */
98 static const u8 tuning_blk_pattern_4bit[] = {
99 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
100 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
101 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
102 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
103 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
104 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
105 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
106 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
109 static const u8 tuning_blk_pattern_8bit[] = {
110 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
111 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
112 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
113 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
114 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
115 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
116 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
117 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
118 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
119 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
120 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
121 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
122 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
123 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
124 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
125 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
128 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
129 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
130 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
131 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
133 /*printk the all register of current host*/
134 static int dw_mci_regs_printk(struct dw_mci *host)
136 struct sdmmc_reg *regs = dw_mci_regs;
138 while( regs->name != 0 ){
139 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
142 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
147 #if defined(CONFIG_DEBUG_FS)
148 static int dw_mci_req_show(struct seq_file *s, void *v)
150 struct dw_mci_slot *slot = s->private;
151 struct mmc_request *mrq;
152 struct mmc_command *cmd;
153 struct mmc_command *stop;
154 struct mmc_data *data;
156 /* Make sure we get a consistent snapshot */
157 spin_lock_bh(&slot->host->lock);
167 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
168 cmd->opcode, cmd->arg, cmd->flags,
169 cmd->resp[0], cmd->resp[1], cmd->resp[2],
170 cmd->resp[2], cmd->error);
172 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
173 data->bytes_xfered, data->blocks,
174 data->blksz, data->flags, data->error);
177 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
178 stop->opcode, stop->arg, stop->flags,
179 stop->resp[0], stop->resp[1], stop->resp[2],
180 stop->resp[2], stop->error);
183 spin_unlock_bh(&slot->host->lock);
188 static int dw_mci_req_open(struct inode *inode, struct file *file)
190 return single_open(file, dw_mci_req_show, inode->i_private);
193 static const struct file_operations dw_mci_req_fops = {
194 .owner = THIS_MODULE,
195 .open = dw_mci_req_open,
198 .release = single_release,
201 static int dw_mci_regs_show(struct seq_file *s, void *v)
203 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
204 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
205 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
206 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
207 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
208 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
213 static int dw_mci_regs_open(struct inode *inode, struct file *file)
215 return single_open(file, dw_mci_regs_show, inode->i_private);
218 static const struct file_operations dw_mci_regs_fops = {
219 .owner = THIS_MODULE,
220 .open = dw_mci_regs_open,
223 .release = single_release,
226 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
228 struct mmc_host *mmc = slot->mmc;
229 struct dw_mci *host = slot->host;
233 root = mmc->debugfs_root;
237 node = debugfs_create_file("regs", S_IRUSR, root, host,
242 node = debugfs_create_file("req", S_IRUSR, root, slot,
247 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
251 node = debugfs_create_x32("pending_events", S_IRUSR, root,
252 (u32 *)&host->pending_events);
256 node = debugfs_create_x32("completed_events", S_IRUSR, root,
257 (u32 *)&host->completed_events);
264 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
266 #endif /* defined(CONFIG_DEBUG_FS) */
268 static void dw_mci_set_timeout(struct dw_mci *host)
270 /* timeout (maximum) */
271 mci_writel(host, TMOUT, 0xffffffff);
274 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
276 struct mmc_data *data;
277 struct dw_mci_slot *slot = mmc_priv(mmc);
278 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
280 cmd->error = -EINPROGRESS;
284 if (cmdr == MMC_STOP_TRANSMISSION)
285 cmdr |= SDMMC_CMD_STOP;
287 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
289 if (cmd->flags & MMC_RSP_PRESENT) {
290 /* We expect a response, so set this bit */
291 cmdr |= SDMMC_CMD_RESP_EXP;
292 if (cmd->flags & MMC_RSP_136)
293 cmdr |= SDMMC_CMD_RESP_LONG;
296 if (cmd->flags & MMC_RSP_CRC)
297 cmdr |= SDMMC_CMD_RESP_CRC;
301 cmdr |= SDMMC_CMD_DAT_EXP;
302 if (data->flags & MMC_DATA_STREAM)
303 cmdr |= SDMMC_CMD_STRM_MODE;
304 if (data->flags & MMC_DATA_WRITE)
305 cmdr |= SDMMC_CMD_DAT_WR;
308 if (drv_data && drv_data->prepare_command)
309 drv_data->prepare_command(slot->host, &cmdr);
314 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
316 struct mmc_command *stop;
322 stop = &host->stop_abort;
324 memset(stop, 0, sizeof(struct mmc_command));
326 if (cmdr == MMC_READ_SINGLE_BLOCK ||
327 cmdr == MMC_READ_MULTIPLE_BLOCK ||
328 cmdr == MMC_WRITE_BLOCK ||
329 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
330 stop->opcode = MMC_STOP_TRANSMISSION;
332 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
333 } else if (cmdr == SD_IO_RW_EXTENDED) {
334 stop->opcode = SD_IO_RW_DIRECT;
335 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
336 ((cmd->arg >> 28) & 0x7);
337 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
342 cmdr = stop->opcode | SDMMC_CMD_STOP |
343 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
348 static void dw_mci_start_command(struct dw_mci *host,
349 struct mmc_command *cmd, u32 cmd_flags)
351 unsigned long time_loop = jiffies + msecs_to_jiffies(5000);
355 host->pre_cmd = host->cmd;
358 "start command: ARGR=0x%08x CMDR=0x%08x\n",
359 cmd->arg, cmd_flags);
361 if(SD_SWITCH_VOLTAGE == cmd->opcode){
362 /*confirm non-low-power mode*/
363 struct dw_mci_slot *slot = host->slot[0];//temporality fix slot[0] due to host->num_slots equal to 1;
364 mci_writel(host, CMDARG, 0);
365 dw_mci_disable_low_power(slot);
366 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
367 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
369 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
372 mci_writel(host, CMDARG, cmd->arg);
373 if(host->mmc->hold_reg_flag)
374 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;//fix the value to 1 in some Soc,for example RK3188.
376 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
379 while ((time_before(jiffies, time_loop))&&(test_bit(DW_MMC_CARD_PRESENT, &host->cur_slot->flags))){
380 status = mci_readl(host, STATUS);
381 if (!(status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY))){
382 ret = 1 ;//card is unbusy
387 MMC_DBG_ERR_FUNC(host->mmc,"Line%d..%s start cmd=%d(arg=0x%x), cmd_reg=0x%x, unbusy=%d,card-present=%d. [%s]",
388 __LINE__, __FUNCTION__,cmd->opcode, cmd->arg,cmd_flags,
389 ret,test_bit(DW_MMC_CARD_PRESENT, &host->cur_slot->flags), mmc_hostname(host->mmc));
393 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
395 dw_mci_start_command(host, data->stop, host->stop_cmdr);
398 /* DMA interface functions */
399 static void dw_mci_stop_dma(struct dw_mci *host)
401 if (host->using_dma) {
402 host->dma_ops->stop(host);
403 host->dma_ops->cleanup(host);
406 /* Data transfer was stopped by the interrupt handler */
407 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
410 static int dw_mci_get_dma_dir(struct mmc_data *data)
412 if (data->flags & MMC_DATA_WRITE)
413 return DMA_TO_DEVICE;
415 return DMA_FROM_DEVICE;
418 #ifdef CONFIG_MMC_DW_IDMAC
419 static void dw_mci_dma_cleanup(struct dw_mci *host)
421 struct mmc_data *data = host->data;
424 if (!data->host_cookie)
425 dma_unmap_sg(host->dev,
428 dw_mci_get_dma_dir(data));
431 static void dw_mci_idmac_reset(struct dw_mci *host)
433 u32 bmod = mci_readl(host, BMOD);
434 /* Software reset of DMA */
435 bmod |= SDMMC_IDMAC_SWRESET;
436 mci_writel(host, BMOD, bmod);
439 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
443 /* Disable and reset the IDMAC interface */
444 temp = mci_readl(host, CTRL);
445 temp &= ~SDMMC_CTRL_USE_IDMAC;
446 temp |= SDMMC_CTRL_DMA_RESET;
447 mci_writel(host, CTRL, temp);
449 /* Stop the IDMAC running */
450 temp = mci_readl(host, BMOD);
451 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
452 temp |= SDMMC_IDMAC_SWRESET;
453 mci_writel(host, BMOD, temp);
456 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
458 struct mmc_data *data = host->data;
460 dev_vdbg(host->dev, "DMA complete\n");
463 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
464 host->mrq->cmd->opcode,host->mrq->cmd->arg,data->blocks,data->blksz,mmc_hostname(host->mmc));
467 host->dma_ops->cleanup(host);
470 * If the card was removed, data will be NULL. No point in trying to
471 * send the stop command or waiting for NBUSY in this case.
474 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
475 tasklet_schedule(&host->tasklet);
479 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
483 struct idmac_desc *desc = host->sg_cpu;
485 for (i = 0; i < sg_len; i++, desc++) {
486 unsigned int length = sg_dma_len(&data->sg[i]);
487 u32 mem_addr = sg_dma_address(&data->sg[i]);
489 /* Set the OWN bit and disable interrupts for this descriptor */
490 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
493 IDMAC_SET_BUFFER1_SIZE(desc, length);
495 /* Physical address to DMA to/from */
496 desc->des2 = mem_addr;
499 /* Set first descriptor */
501 desc->des0 |= IDMAC_DES0_FD;
503 /* Set last descriptor */
504 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
505 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
506 desc->des0 |= IDMAC_DES0_LD;
511 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
515 dw_mci_translate_sglist(host, host->data, sg_len);
517 /* Select IDMAC interface */
518 temp = mci_readl(host, CTRL);
519 temp |= SDMMC_CTRL_USE_IDMAC;
520 mci_writel(host, CTRL, temp);
524 /* Enable the IDMAC */
525 temp = mci_readl(host, BMOD);
526 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
527 mci_writel(host, BMOD, temp);
529 /* Start it running */
530 mci_writel(host, PLDMND, 1);
533 static int dw_mci_idmac_init(struct dw_mci *host)
535 struct idmac_desc *p;
538 /* Number of descriptors in the ring buffer */
539 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
541 /* Forward link the descriptor list */
542 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
543 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
545 /* Set the last descriptor as the end-of-ring descriptor */
546 p->des3 = host->sg_dma;
547 p->des0 = IDMAC_DES0_ER;
549 dw_mci_idmac_reset(host);
551 /* Mask out interrupts - get Tx & Rx complete only */
552 mci_writel(host, IDSTS, IDMAC_INT_CLR);
553 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
556 /* Set the descriptor base address */
557 mci_writel(host, DBADDR, host->sg_dma);
561 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
562 .init = dw_mci_idmac_init,
563 .start = dw_mci_idmac_start_dma,
564 .stop = dw_mci_idmac_stop_dma,
565 .complete = dw_mci_idmac_complete_dma,
566 .cleanup = dw_mci_dma_cleanup,
568 #endif /* CONFIG_MMC_DW_IDMAC */
570 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
571 struct mmc_data *data,
574 struct scatterlist *sg;
575 unsigned int i, sg_len;
577 if (!next && data->host_cookie)
578 return data->host_cookie;
581 * We don't do DMA on "complex" transfers, i.e. with
582 * non-word-aligned buffers or lengths. Also, we don't bother
583 * with all the DMA setup overhead for short transfers.
585 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
591 for_each_sg(data->sg, sg, data->sg_len, i) {
592 if (sg->offset & 3 || sg->length & 3)
596 sg_len = dma_map_sg(host->dev,
599 dw_mci_get_dma_dir(data));
604 data->host_cookie = sg_len;
609 static void dw_mci_pre_req(struct mmc_host *mmc,
610 struct mmc_request *mrq,
613 struct dw_mci_slot *slot = mmc_priv(mmc);
614 struct mmc_data *data = mrq->data;
616 if (!slot->host->use_dma || !data)
619 if (data->host_cookie) {
620 data->host_cookie = 0;
624 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
625 data->host_cookie = 0;
628 static void dw_mci_post_req(struct mmc_host *mmc,
629 struct mmc_request *mrq,
632 struct dw_mci_slot *slot = mmc_priv(mmc);
633 struct mmc_data *data = mrq->data;
635 if (!slot->host->use_dma || !data)
638 if (data->host_cookie)
639 dma_unmap_sg(slot->host->dev,
642 dw_mci_get_dma_dir(data));
643 data->host_cookie = 0;
646 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
648 #ifdef CONFIG_MMC_DW_IDMAC
649 unsigned int blksz = data->blksz;
650 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
651 u32 fifo_width = 1 << host->data_shift;
652 u32 blksz_depth = blksz / fifo_width, fifoth_val;
653 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
654 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
656 tx_wmark = (host->fifo_depth) / 2;
657 tx_wmark_invers = host->fifo_depth - tx_wmark;
661 * if blksz is not a multiple of the FIFO width
663 if (blksz % fifo_width) {
670 if (!((blksz_depth % mszs[idx]) ||
671 (tx_wmark_invers % mszs[idx]))) {
673 rx_wmark = mszs[idx] - 1;
678 * If idx is '0', it won't be tried
679 * Thus, initial values are uesed
682 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
683 mci_writel(host, FIFOTH, fifoth_val);
687 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
689 unsigned int blksz = data->blksz;
690 u32 blksz_depth, fifo_depth;
693 WARN_ON(!(data->flags & MMC_DATA_READ));
695 if (host->timing != MMC_TIMING_MMC_HS200 &&
696 host->timing != MMC_TIMING_UHS_SDR104)
699 blksz_depth = blksz / (1 << host->data_shift);
700 fifo_depth = host->fifo_depth;
702 if (blksz_depth > fifo_depth)
706 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
707 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
708 * Currently just choose blksz.
711 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
715 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
718 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
725 /* If we don't have a channel, we can't do DMA */
729 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
731 host->dma_ops->stop(host);
738 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
739 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
743 * Decide the MSIZE and RX/TX Watermark.
744 * If current block size is same with previous size,
745 * no need to update fifoth.
747 if (host->prev_blksz != data->blksz)
748 dw_mci_adjust_fifoth(host, data);
751 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
753 /* Enable the DMA interface */
754 temp = mci_readl(host, CTRL);
755 temp |= SDMMC_CTRL_DMA_ENABLE;
756 mci_writel(host, CTRL, temp);
758 /* Disable RX/TX IRQs, let DMA handle it */
759 temp = mci_readl(host, INTMASK);
760 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
761 mci_writel(host, INTMASK, temp);
763 host->dma_ops->start(host, sg_len);
768 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
772 data->error = -EINPROGRESS;
779 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
781 if (data->flags & MMC_DATA_READ) {
782 host->dir_status = DW_MCI_RECV_STATUS;
783 dw_mci_ctrl_rd_thld(host, data);
785 host->dir_status = DW_MCI_SEND_STATUS;
788 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
789 data->blocks, data->blksz, mmc_hostname(host->mmc));
791 if (dw_mci_submit_data_dma(host, data)) {
792 int flags = SG_MITER_ATOMIC;
793 if (host->data->flags & MMC_DATA_READ)
794 flags |= SG_MITER_TO_SG;
796 flags |= SG_MITER_FROM_SG;
798 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
800 host->part_buf_start = 0;
801 host->part_buf_count = 0;
803 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
804 temp = mci_readl(host, INTMASK);
805 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
806 mci_writel(host, INTMASK, temp);
808 temp = mci_readl(host, CTRL);
809 temp &= ~SDMMC_CTRL_DMA_ENABLE;
810 mci_writel(host, CTRL, temp);
813 * Use the initial fifoth_val for PIO mode.
814 * If next issued data may be transfered by DMA mode,
815 * prev_blksz should be invalidated.
817 mci_writel(host, FIFOTH, host->fifoth_val);
818 host->prev_blksz = 0;
821 * Keep the current block size.
822 * It will be used to decide whether to update
823 * fifoth register next time.
825 host->prev_blksz = data->blksz;
829 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
831 struct dw_mci *host = slot->host;
832 unsigned long timeout = jiffies + msecs_to_jiffies(5000);
833 unsigned int cmd_status = 0;
834 #ifdef SDMMC_WAIT_FOR_UNBUSY
835 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
837 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
838 while (ret=time_before(jiffies, timeout)) {
839 cmd_status = mci_readl(host, STATUS);
840 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
844 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
845 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
848 mci_writel(host, CMDARG, arg);
850 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
852 while (time_before(jiffies, timeout)) {
853 cmd_status = mci_readl(host, CMD);
854 if (!(cmd_status & SDMMC_CMD_START))
857 dev_err(&slot->mmc->class_dev,
858 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
859 cmd, arg, cmd_status);
862 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
864 struct dw_mci *host = slot->host;
865 unsigned int tempck,clock = slot->clock;
870 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
871 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
874 mci_writel(host, CLKENA, 0);
876 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
877 } else if (clock != host->current_speed || force_clkinit) {
878 div = host->bus_hz / clock;
879 if (host->bus_hz % clock && host->bus_hz > clock)
881 * move the + 1 after the divide to prevent
882 * over-clocking the card.
886 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
888 if ((clock << div) != slot->__clk_old || force_clkinit) {
889 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
890 dev_info(&slot->mmc->class_dev,
891 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
892 slot->id, host->bus_hz, clock,
895 host->set_speed = tempck;
900 mci_writel(host, CLKENA, 0);
901 mci_writel(host, CLKSRC, 0);
905 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
907 /* set clock to desired speed */
908 mci_writel(host, CLKDIV, div);
912 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
914 /* enable clock; only low power if no SDIO */
915 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
917 if (host->verid < DW_MMC_240A)
918 sdio_int = SDMMC_INT_SDIO(slot->id);
920 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
922 if (!(mci_readl(host, INTMASK) & sdio_int))
923 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
924 mci_writel(host, CLKENA, clk_en_a);
928 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
930 /* keep the clock with reflecting clock dividor */
931 slot->__clk_old = clock << div;
934 host->current_speed = clock;
936 if(slot->ctype != slot->pre_ctype)
937 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
939 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
940 mmc_hostname(host->mmc));
941 slot->pre_ctype = slot->ctype;
943 /* Set the current slot bus width */
944 mci_writel(host, CTYPE, (slot->ctype << slot->id));
951 * 1--status is unbusy.
953 int dw_mci_wait_unbusy(struct mmc_host *mmc)
955 struct dw_mci_slot *slot = mmc_priv(mmc);
956 struct dw_mci *host = slot->host;
957 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
958 unsigned long time_loop;
962 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
963 timeout = SDMMC_DATA_TIMEOUT_EMMC;
964 else if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
965 timeout = SDMMC_DATA_TIMEOUT_SD;
966 timeout = 250*1000;//test
967 time_loop = jiffies + msecs_to_jiffies(timeout);
969 MMC_DBG_INFO_FUNC(host->mmc, "line%d: dw_mci_wait_unbusy,timeloop=%lu, status=0x%x ",
970 __LINE__, time_loop, mci_readl(host, STATUS));
972 status = mci_readl(host, STATUS);
973 if (!(status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY))){
974 ret = 1;//card is unbusy.
977 //MMC_DBG_INFO_FUNC("dw_mci_wait_unbusy, waiting for......");
978 } while (time_before(jiffies, time_loop));
979 MMC_DBG_INFO_FUNC(host->mmc, "line%d: dw_mci_wait_unbusy,ret=%d, status=0x%x ",
980 __LINE__,ret,mci_readl(host, STATUS));
985 static void __dw_mci_start_request(struct dw_mci *host,
986 struct dw_mci_slot *slot,
987 struct mmc_command *cmd)
989 struct mmc_request *mrq;
990 struct mmc_data *data;
994 if (host->pdata->select_slot)
995 host->pdata->select_slot(slot->id);
997 host->cur_slot = slot;
999 #if 0 //add by xbw,at 2014-03-12
1000 /*clean FIFO if it is a new request*/
1001 if(!(mrq->cmd->opcode & SDMMC_CMD_STOP)) {
1002 MMC_DBG_INFO_FUNC("%d..%s: reset the ctrl.", __LINE__, __FUNCTION__);
1003 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1004 SDMMC_CTRL_DMA_RESET));
1007 dw_mci_wait_unbusy(host->mmc);
1009 host->pending_events = 0;
1010 host->completed_events = 0;
1011 host->data_status = 0;
1015 dw_mci_set_timeout(host);
1016 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1017 mci_writel(host, BLKSIZ, data->blksz);
1020 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1022 /* this is the first command, send the initialization clock */
1023 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1024 cmdflags |= SDMMC_CMD_INIT;
1027 dw_mci_submit_data(host, data);
1031 dw_mci_start_command(host, cmd, cmdflags);
1034 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1037 static void dw_mci_start_request(struct dw_mci *host,
1038 struct dw_mci_slot *slot)
1040 struct mmc_request *mrq = slot->mrq;
1041 struct mmc_command *cmd;
1043 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1044 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1046 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1047 __dw_mci_start_request(host, slot, cmd);
1050 /* must be called with host->lock held */
1051 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1052 struct mmc_request *mrq)
1054 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1059 if (host->state == STATE_IDLE) {
1060 host->state = STATE_SENDING_CMD;
1061 dw_mci_start_request(host, slot);
1063 list_add_tail(&slot->queue_node, &host->queue);
1067 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1069 struct dw_mci_slot *slot = mmc_priv(mmc);
1070 struct dw_mci *host = slot->host;
1075 * The check for card presence and queueing of the request must be
1076 * atomic, otherwise the card could be removed in between and the
1077 * request wouldn't fail until another card was inserted.
1079 spin_lock_bh(&host->lock);
1081 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1082 spin_unlock_bh(&host->lock);
1083 mrq->cmd->error = -ENOMEDIUM;
1084 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1085 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1087 mmc_request_done(mmc, mrq);
1090 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1091 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1093 dw_mci_queue_request(host, slot, mrq);
1095 spin_unlock_bh(&host->lock);
1098 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1100 struct dw_mci_slot *slot = mmc_priv(mmc);
1101 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1103 #ifdef SDMMC_WAIT_FOR_UNBUSY
1104 unsigned long time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1107 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1108 printk("%d..%s: no card. [%s]\n", \
1109 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1113 while (ret=time_before(jiffies, time_loop)) {
1114 regs = mci_readl(slot->host, STATUS);
1115 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1120 printk("slot->flags=%d ", slot->flags);
1122 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1123 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1126 switch (ios->bus_width) {
1127 case MMC_BUS_WIDTH_4:
1128 slot->ctype = SDMMC_CTYPE_4BIT;
1130 case MMC_BUS_WIDTH_8:
1131 slot->ctype = SDMMC_CTYPE_8BIT;
1134 /* set default 1 bit mode */
1135 slot->ctype = SDMMC_CTYPE_1BIT;
1136 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1139 regs = mci_readl(slot->host, UHS_REG);
1142 if (ios->timing == MMC_TIMING_UHS_DDR50)
1143 regs |= ((0x1 << slot->id) << 16);
1145 regs &= ~((0x1 << slot->id) << 16);
1147 mci_writel(slot->host, UHS_REG, regs);
1148 slot->host->timing = ios->timing;
1151 * Use mirror of ios->clock to prevent race with mmc
1152 * core ios update when finding the minimum.
1154 slot->clock = ios->clock;
1156 if (drv_data && drv_data->set_ios)
1157 drv_data->set_ios(slot->host, ios);
1159 /* Slot specific timing and width adjustment */
1160 dw_mci_setup_bus(slot, false);
1163 switch (ios->power_mode) {
1165 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1167 if (slot->host->pdata->setpower)
1168 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1169 regs = mci_readl(slot->host, PWREN);
1170 regs |= (1 << slot->id);
1171 mci_writel(slot->host, PWREN, regs);
1174 /* Power down slot */
1175 if (slot->host->pdata->setpower)
1176 slot->host->pdata->setpower(slot->id, 0);
1177 regs = mci_readl(slot->host, PWREN);
1178 regs &= ~(1 << slot->id);
1179 mci_writel(slot->host, PWREN, regs);
1186 static int dw_mci_get_ro(struct mmc_host *mmc)
1189 struct dw_mci_slot *slot = mmc_priv(mmc);
1190 struct dw_mci_board *brd = slot->host->pdata;
1192 /* Use platform get_ro function, else try on board write protect */
1193 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1195 else if (brd->get_ro)
1196 read_only = brd->get_ro(slot->id);
1197 else if (gpio_is_valid(slot->wp_gpio))
1198 read_only = gpio_get_value(slot->wp_gpio);
1201 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1203 dev_dbg(&mmc->class_dev, "card is %s\n",
1204 read_only ? "read-only" : "read-write");
1209 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1211 struct dw_mci_slot *slot = mmc_priv(mmc);
1212 struct dw_mci_board *brd = slot->host->pdata;
1213 struct dw_mci *host = slot->host;
1214 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1217 spin_lock_bh(&host->lock);
1219 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1221 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1223 spin_unlock_bh(&host->lock);
1225 mmc_detect_change(slot->mmc, 20);
1232 static int dw_mci_get_cd(struct mmc_host *mmc)
1235 struct dw_mci_slot *slot = mmc_priv(mmc);
1236 struct dw_mci_board *brd = slot->host->pdata;
1237 struct dw_mci *host = slot->host;
1238 int gpio_cd = mmc_gpio_get_cd(mmc);
1240 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1241 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1243 /* Use platform get_cd function, else try onboard card detect */
1244 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1246 else if (brd->get_cd)
1247 present = !brd->get_cd(slot->id);
1248 else if (!IS_ERR_VALUE(gpio_cd))
1251 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1254 spin_lock_bh(&host->lock);
1256 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1257 dev_dbg(&mmc->class_dev, "card is present\n");
1259 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1260 dev_dbg(&mmc->class_dev, "card is not present\n");
1262 spin_unlock_bh(&host->lock);
1267 static void dw_mci_hw_reset(struct mmc_host *mmc)
1269 struct dw_mci_slot *slot = mmc_priv(mmc);
1272 * According to eMMC spec
1273 * tRstW >= 1us ; RST_n pulse width
1274 * tRSCA >= 200us ; RST_n to Command time
1275 * tRSTH >= 1us ; RST_n high period
1278 mci_writel(slot->host, RST_n, 0x1);
1280 udelay(10); //10us for bad quality eMMc.
1282 mci_writel(slot->host, RST_n, 0x0);
1284 usleep_range(300, 1000); //ay least 300(> 200us)
1289 * Disable lower power mode.
1291 * Low power mode will stop the card clock when idle. According to the
1292 * description of the CLKENA register we should disable low power mode
1293 * for SDIO cards if we need SDIO interrupts to work.
1295 * This function is fast if low power mode is already disabled.
1297 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1299 struct dw_mci *host = slot->host;
1301 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1303 clk_en_a = mci_readl(host, CLKENA);
1305 if (clk_en_a & clken_low_pwr) {
1306 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1307 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1308 SDMMC_CMD_PRV_DAT_WAIT, 0);
1312 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1314 struct dw_mci_slot *slot = mmc_priv(mmc);
1315 struct dw_mci *host = slot->host;
1319 /* Enable/disable Slot Specific SDIO interrupt */
1320 int_mask = mci_readl(host, INTMASK);
1322 if (host->verid < DW_MMC_240A)
1323 sdio_int = SDMMC_INT_SDIO(slot->id);
1325 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1329 * Turn off low power mode if it was enabled. This is a bit of
1330 * a heavy operation and we disable / enable IRQs a lot, so
1331 * we'll leave low power mode disabled and it will get
1332 * re-enabled again in dw_mci_setup_bus().
1334 dw_mci_disable_low_power(slot);
1336 mci_writel(host, INTMASK,
1337 (int_mask | sdio_int));
1339 mci_writel(host, INTMASK,
1340 (int_mask & ~sdio_int));
1345 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1346 struct mmc_ios *ios)
1349 unsigned int value,uhs_reg;
1352 * Signal Voltage Switching is only applicable for Host Controllers
1355 if (host->verid < DW_MMC_240A)
1358 uhs_reg = mci_readl(host, UHS_REG);
1359 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1360 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1362 switch (ios->signal_voltage) {
1363 case MMC_SIGNAL_VOLTAGE_330:
1364 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1366 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1367 //regulator_put(host->vmmc); //to be done in remove function.
1369 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1370 __func__, regulator_get_voltage(host->vmmc), ret);
1372 pr_warning("%s: Switching to 3.3V signalling voltage "
1373 " failed\n", mmc_hostname(host->mmc));
1377 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1379 //set High-power mode
1380 value = mci_readl(host, CLKENA);
1381 mci_writel(host,CLKENA , value& ~SDMMC_CLKEN_LOW_PWR);
1383 mci_writel(host,UHS_REG , uhs_reg & ~SDMMC_UHS_VOLT_REG_18);
1386 usleep_range(5000, 5500);
1388 /* 3.3V regulator output should be stable within 5 ms */
1389 uhs_reg = mci_readl(host, UHS_REG);
1390 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1393 pr_warning("%s: 3.3V regulator output did not became stable\n",
1394 mmc_hostname(host->mmc));
1397 case MMC_SIGNAL_VOLTAGE_180:
1399 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1400 // regulator_put(host->vmmc);//to be done in remove function.
1402 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1403 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1405 pr_warning("%s: Switching to 1.8V signalling voltage "
1406 " failed\n", mmc_hostname(host->mmc));
1412 * Enable 1.8V Signal Enable in the Host Control2
1415 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1418 usleep_range(5000, 5500);
1419 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__, __FUNCTION__,mmc_hostname(host->mmc));
1421 /* 1.8V regulator output should be stable within 5 ms */
1422 uhs_reg = mci_readl(host, UHS_REG);
1423 if( uhs_reg & SDMMC_UHS_VOLT_REG_18){
1428 pr_warning("%s: 1.8V regulator output did not became stable\n",
1429 mmc_hostname(host->mmc));
1432 case MMC_SIGNAL_VOLTAGE_120:
1434 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1436 pr_warning("%s: Switching to 1.2V signalling voltage "
1437 " failed\n", mmc_hostname(host->mmc));
1443 /* No signal voltage switch required */
1449 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1450 struct mmc_ios *ios)
1452 struct dw_mci_slot *slot = mmc_priv(mmc);
1453 struct dw_mci *host = slot->host;
1456 if (host->verid < DW_MMC_240A)
1458 //sdhci_runtime_pm_get(host);
1459 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1460 //sdhci_runtime_pm_put(host);
1464 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1466 struct dw_mci_slot *slot = mmc_priv(mmc);
1467 struct dw_mci *host = slot->host;
1468 const struct dw_mci_drv_data *drv_data = host->drv_data;
1469 struct dw_mci_tuning_data tuning_data;
1472 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1473 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1474 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1475 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1476 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1477 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1478 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1482 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1483 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1484 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1487 "Undefined command(%d) for tuning\n", opcode);
1491 /////////////////////////////////////////////////
1492 //temporary settings,!!!!!!!!!!!!!!!
1493 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1494 tuning_data.con_id = 3;
1495 else if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1496 tuning_data.con_id = 1;
1498 tuning_data.con_id = 0;
1499 tuning_data.tuning_type = 1; //0--drv, 1--sample
1500 /////////////////////////////////////////////////
1502 if (drv_data && drv_data->execute_tuning)
1503 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1507 static const struct mmc_host_ops dw_mci_ops = {
1508 .request = dw_mci_request,
1509 .pre_req = dw_mci_pre_req,
1510 .post_req = dw_mci_post_req,
1511 .set_ios = dw_mci_set_ios,
1512 .get_ro = dw_mci_get_ro,
1513 .get_cd = dw_mci_get_cd,
1514 .set_sdio_status = dw_mci_set_sdio_status,
1515 .hw_reset = dw_mci_hw_reset,
1516 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1517 .execute_tuning = dw_mci_execute_tuning,
1518 //.start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1519 .card_busy = dw_mci_wait_unbusy,
1522 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1524 unsigned long flags;
1529 local_irq_save(flags);
1530 if(host->irq_state != irqflag)
1532 host->irq_state = irqflag;
1535 enable_irq(host->irq);
1539 disable_irq(host->irq);
1542 local_irq_restore(flags);
1545 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1546 __releases(&host->lock)
1547 __acquires(&host->lock)
1549 if(DW_MCI_SEND_STATUS == host->dir_status){
1551 if( MMC_BUS_TEST_W != host->cmd->opcode){
1552 if(host->data_status & SDMMC_INT_DCRC)
1553 host->data->error = -EILSEQ;
1554 else if(host->data_status & SDMMC_INT_EBE)
1555 host->data->error = -ETIMEDOUT;
1557 dw_mci_wait_unbusy(host->mmc);
1560 dw_mci_wait_unbusy(host->mmc);
1566 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1567 __releases(&host->lock)
1568 __acquires(&host->lock)
1570 struct dw_mci_slot *slot;
1571 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1573 WARN_ON(host->cmd || host->data);
1575 dw_mci_deal_data_end(host, mrq);
1578 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1579 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
1581 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
1582 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
1584 host->cur_slot->mrq = NULL;
1586 if (!list_empty(&host->queue)) {
1587 slot = list_entry(host->queue.next,
1588 struct dw_mci_slot, queue_node);
1589 list_del(&slot->queue_node);
1590 dev_vdbg(host->dev, "list not empty: %s is next\n",
1591 mmc_hostname(slot->mmc));
1592 host->state = STATE_SENDING_CMD;
1593 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
1594 dw_mci_start_request(host, slot);
1596 dev_vdbg(host->dev, "list empty\n");
1597 host->state = STATE_IDLE;
1600 spin_unlock(&host->lock);
1601 mmc_request_done(prev_mmc, mrq);
1602 spin_lock(&host->lock);
1605 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1607 u32 status = host->cmd_status;
1609 host->cmd_status = 0;
1611 /* Read the response from the card (up to 16 bytes) */
1612 if (cmd->flags & MMC_RSP_PRESENT) {
1613 if (cmd->flags & MMC_RSP_136) {
1614 cmd->resp[3] = mci_readl(host, RESP0);
1615 cmd->resp[2] = mci_readl(host, RESP1);
1616 cmd->resp[1] = mci_readl(host, RESP2);
1617 cmd->resp[0] = mci_readl(host, RESP3);
1619 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
1620 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
1622 cmd->resp[0] = mci_readl(host, RESP0);
1626 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
1627 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
1631 if (status & SDMMC_INT_RTO)
1633 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1636 cmd->error = -ETIMEDOUT;
1638 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1639 cmd->error = -EILSEQ;
1640 else if (status & SDMMC_INT_RESP_ERR)
1644 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1647 if(MMC_SEND_STATUS != cmd->opcode)
1648 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
1649 MMC_DBG_ERR_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
1650 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1654 /* newer ip versions need a delay between retries */
1655 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1661 static void dw_mci_tasklet_func(unsigned long priv)
1663 struct dw_mci *host = (struct dw_mci *)priv;
1664 struct dw_mci_slot *slot = mmc_priv(host->mmc);
1665 struct mmc_data *data;
1666 struct mmc_command *cmd;
1667 enum dw_mci_state state;
1668 enum dw_mci_state prev_state;
1671 spin_lock(&host->lock);
1673 state = host->state;
1683 case STATE_SENDING_CMD:
1684 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1685 &host->pending_events))
1690 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1691 dw_mci_command_complete(host, cmd);
1692 if (cmd == host->mrq->sbc && !cmd->error) {
1693 prev_state = state = STATE_SENDING_CMD;
1694 __dw_mci_start_request(host, host->cur_slot,
1699 if (cmd->data && cmd->error) {
1700 dw_mci_stop_dma(host);
1703 send_stop_cmd(host, data);
1704 state = STATE_SENDING_STOP;
1710 send_stop_abort(host, data);
1711 state = STATE_SENDING_STOP;
1717 if (!host->mrq->data || cmd->error) {
1718 dw_mci_request_end(host, host->mrq);
1722 prev_state = state = STATE_SENDING_DATA;
1725 case STATE_SENDING_DATA:
1726 if (test_and_clear_bit(EVENT_DATA_ERROR,
1727 &host->pending_events)) {
1728 dw_mci_stop_dma(host);
1731 send_stop_cmd(host, data);
1733 send_stop_abort(host, data);
1735 state = STATE_DATA_ERROR;
1738 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
1739 prev_state,state, mmc_hostname(host->mmc));
1741 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1742 &host->pending_events))
1744 MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
1745 prev_state,state,mmc_hostname(host->mmc));
1747 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1748 prev_state = state = STATE_DATA_BUSY;
1751 case STATE_DATA_BUSY:
1752 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1753 &host->pending_events))
1756 dw_mci_deal_data_end(host, host->mrq);
1757 MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
1758 prev_state,state,mmc_hostname(host->mmc));
1761 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1762 status = host->data_status;
1764 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1765 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
1766 MMC_DBG_ERR_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
1767 prev_state,state, status, mmc_hostname(host->mmc));
1769 if (status & SDMMC_INT_DRTO) {
1770 data->error = -ETIMEDOUT;
1771 } else if (status & SDMMC_INT_DCRC) {
1772 data->error = -EILSEQ;
1773 } else if (status & SDMMC_INT_EBE &&
1775 DW_MCI_SEND_STATUS) {
1777 * No data CRC status was returned.
1778 * The number of bytes transferred will
1779 * be exaggerated in PIO mode.
1781 data->bytes_xfered = 0;
1782 data->error = -ETIMEDOUT;
1791 * After an error, there may be data lingering
1792 * in the FIFO, so reset it - doing so
1793 * generates a block interrupt, hence setting
1794 * the scatter-gather pointer to NULL.
1796 dw_mci_fifo_reset(host);
1798 data->bytes_xfered = data->blocks * data->blksz;
1803 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
1804 prev_state,state,mmc_hostname(host->mmc));
1805 dw_mci_request_end(host, host->mrq);
1808 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
1809 prev_state,state,mmc_hostname(host->mmc));
1811 if (host->mrq->sbc && !data->error) {
1812 data->stop->error = 0;
1814 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
1815 prev_state,state,mmc_hostname(host->mmc));
1817 dw_mci_request_end(host, host->mrq);
1821 prev_state = state = STATE_SENDING_STOP;
1823 send_stop_cmd(host, data);
1825 if (data->stop && !data->error) {
1826 /* stop command for open-ended transfer*/
1828 send_stop_abort(host, data);
1832 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
1833 prev_state,state,mmc_hostname(host->mmc));
1835 case STATE_SENDING_STOP:
1836 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1837 &host->pending_events))
1839 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
1840 prev_state,state,mmc_hostname(host->mmc));
1842 /* CMD error in data command */
1843 if (host->mrq->cmd->error && host->mrq->data) {
1844 dw_mci_fifo_reset(host);
1850 dw_mci_command_complete(host, host->mrq->stop);
1852 if (host->mrq->stop)
1853 dw_mci_command_complete(host, host->mrq->stop);
1855 host->cmd_status = 0;
1858 dw_mci_request_end(host, host->mrq);
1861 case STATE_DATA_ERROR:
1862 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1863 &host->pending_events))
1866 state = STATE_DATA_BUSY;
1869 } while (state != prev_state);
1871 host->state = state;
1873 spin_unlock(&host->lock);
1877 /* push final bytes to part_buf, only use during push */
1878 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1880 memcpy((void *)&host->part_buf, buf, cnt);
1881 host->part_buf_count = cnt;
1884 /* append bytes to part_buf, only use during push */
1885 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1887 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1888 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1889 host->part_buf_count += cnt;
1893 /* pull first bytes from part_buf, only use during pull */
1894 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1896 cnt = min(cnt, (int)host->part_buf_count);
1898 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1900 host->part_buf_count -= cnt;
1901 host->part_buf_start += cnt;
1906 /* pull final bytes from the part_buf, assuming it's just been filled */
1907 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1909 memcpy(buf, &host->part_buf, cnt);
1910 host->part_buf_start = cnt;
1911 host->part_buf_count = (1 << host->data_shift) - cnt;
1914 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1916 struct mmc_data *data = host->data;
1919 /* try and push anything in the part_buf */
1920 if (unlikely(host->part_buf_count)) {
1921 int len = dw_mci_push_part_bytes(host, buf, cnt);
1924 if (host->part_buf_count == 2) {
1925 mci_writew(host, DATA(host->data_offset),
1927 host->part_buf_count = 0;
1930 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1931 if (unlikely((unsigned long)buf & 0x1)) {
1933 u16 aligned_buf[64];
1934 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1935 int items = len >> 1;
1937 /* memcpy from input buffer into aligned buffer */
1938 memcpy(aligned_buf, buf, len);
1941 /* push data from aligned buffer into fifo */
1942 for (i = 0; i < items; ++i)
1943 mci_writew(host, DATA(host->data_offset),
1950 for (; cnt >= 2; cnt -= 2)
1951 mci_writew(host, DATA(host->data_offset), *pdata++);
1954 /* put anything remaining in the part_buf */
1956 dw_mci_set_part_bytes(host, buf, cnt);
1957 /* Push data if we have reached the expected data length */
1958 if ((data->bytes_xfered + init_cnt) ==
1959 (data->blksz * data->blocks))
1960 mci_writew(host, DATA(host->data_offset),
1965 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1967 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1968 if (unlikely((unsigned long)buf & 0x1)) {
1970 /* pull data from fifo into aligned buffer */
1971 u16 aligned_buf[64];
1972 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1973 int items = len >> 1;
1975 for (i = 0; i < items; ++i)
1976 aligned_buf[i] = mci_readw(host,
1977 DATA(host->data_offset));
1978 /* memcpy from aligned buffer into output buffer */
1979 memcpy(buf, aligned_buf, len);
1987 for (; cnt >= 2; cnt -= 2)
1988 *pdata++ = mci_readw(host, DATA(host->data_offset));
1992 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1993 dw_mci_pull_final_bytes(host, buf, cnt);
1997 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1999 struct mmc_data *data = host->data;
2002 /* try and push anything in the part_buf */
2003 if (unlikely(host->part_buf_count)) {
2004 int len = dw_mci_push_part_bytes(host, buf, cnt);
2007 if (host->part_buf_count == 4) {
2008 mci_writel(host, DATA(host->data_offset),
2010 host->part_buf_count = 0;
2013 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2014 if (unlikely((unsigned long)buf & 0x3)) {
2016 u32 aligned_buf[32];
2017 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2018 int items = len >> 2;
2020 /* memcpy from input buffer into aligned buffer */
2021 memcpy(aligned_buf, buf, len);
2024 /* push data from aligned buffer into fifo */
2025 for (i = 0; i < items; ++i)
2026 mci_writel(host, DATA(host->data_offset),
2033 for (; cnt >= 4; cnt -= 4)
2034 mci_writel(host, DATA(host->data_offset), *pdata++);
2037 /* put anything remaining in the part_buf */
2039 dw_mci_set_part_bytes(host, buf, cnt);
2040 /* Push data if we have reached the expected data length */
2041 if ((data->bytes_xfered + init_cnt) ==
2042 (data->blksz * data->blocks))
2043 mci_writel(host, DATA(host->data_offset),
2048 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2050 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2051 if (unlikely((unsigned long)buf & 0x3)) {
2053 /* pull data from fifo into aligned buffer */
2054 u32 aligned_buf[32];
2055 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2056 int items = len >> 2;
2058 for (i = 0; i < items; ++i)
2059 aligned_buf[i] = mci_readl(host,
2060 DATA(host->data_offset));
2061 /* memcpy from aligned buffer into output buffer */
2062 memcpy(buf, aligned_buf, len);
2070 for (; cnt >= 4; cnt -= 4)
2071 *pdata++ = mci_readl(host, DATA(host->data_offset));
2075 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2076 dw_mci_pull_final_bytes(host, buf, cnt);
2080 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2082 struct mmc_data *data = host->data;
2085 /* try and push anything in the part_buf */
2086 if (unlikely(host->part_buf_count)) {
2087 int len = dw_mci_push_part_bytes(host, buf, cnt);
2091 if (host->part_buf_count == 8) {
2092 mci_writeq(host, DATA(host->data_offset),
2094 host->part_buf_count = 0;
2097 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2098 if (unlikely((unsigned long)buf & 0x7)) {
2100 u64 aligned_buf[16];
2101 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2102 int items = len >> 3;
2104 /* memcpy from input buffer into aligned buffer */
2105 memcpy(aligned_buf, buf, len);
2108 /* push data from aligned buffer into fifo */
2109 for (i = 0; i < items; ++i)
2110 mci_writeq(host, DATA(host->data_offset),
2117 for (; cnt >= 8; cnt -= 8)
2118 mci_writeq(host, DATA(host->data_offset), *pdata++);
2121 /* put anything remaining in the part_buf */
2123 dw_mci_set_part_bytes(host, buf, cnt);
2124 /* Push data if we have reached the expected data length */
2125 if ((data->bytes_xfered + init_cnt) ==
2126 (data->blksz * data->blocks))
2127 mci_writeq(host, DATA(host->data_offset),
2132 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2134 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2135 if (unlikely((unsigned long)buf & 0x7)) {
2137 /* pull data from fifo into aligned buffer */
2138 u64 aligned_buf[16];
2139 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2140 int items = len >> 3;
2142 for (i = 0; i < items; ++i)
2143 aligned_buf[i] = mci_readq(host,
2144 DATA(host->data_offset));
2145 /* memcpy from aligned buffer into output buffer */
2146 memcpy(buf, aligned_buf, len);
2154 for (; cnt >= 8; cnt -= 8)
2155 *pdata++ = mci_readq(host, DATA(host->data_offset));
2159 host->part_buf = mci_readq(host, DATA(host->data_offset));
2160 dw_mci_pull_final_bytes(host, buf, cnt);
2164 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2168 /* get remaining partial bytes */
2169 len = dw_mci_pull_part_bytes(host, buf, cnt);
2170 if (unlikely(len == cnt))
2175 /* get the rest of the data */
2176 host->pull_data(host, buf, cnt);
2179 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2181 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2183 unsigned int offset;
2184 struct mmc_data *data = host->data;
2185 int shift = host->data_shift;
2188 unsigned int remain, fcnt;
2191 if (!sg_miter_next(sg_miter))
2194 host->sg = sg_miter->piter.sg;
2195 buf = sg_miter->addr;
2196 remain = sg_miter->length;
2200 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2201 << shift) + host->part_buf_count;
2202 len = min(remain, fcnt);
2205 dw_mci_pull_data(host, (void *)(buf + offset), len);
2206 data->bytes_xfered += len;
2211 sg_miter->consumed = offset;
2212 status = mci_readl(host, MINTSTS);
2213 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2214 /* if the RXDR is ready read again */
2215 } while ((status & SDMMC_INT_RXDR) ||
2216 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2219 if (!sg_miter_next(sg_miter))
2221 sg_miter->consumed = 0;
2223 sg_miter_stop(sg_miter);
2227 sg_miter_stop(sg_miter);
2230 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2233 static void dw_mci_write_data_pio(struct dw_mci *host)
2235 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2237 unsigned int offset;
2238 struct mmc_data *data = host->data;
2239 int shift = host->data_shift;
2242 unsigned int fifo_depth = host->fifo_depth;
2243 unsigned int remain, fcnt;
2246 if (!sg_miter_next(sg_miter))
2249 host->sg = sg_miter->piter.sg;
2250 buf = sg_miter->addr;
2251 remain = sg_miter->length;
2255 fcnt = ((fifo_depth -
2256 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2257 << shift) - host->part_buf_count;
2258 len = min(remain, fcnt);
2261 host->push_data(host, (void *)(buf + offset), len);
2262 data->bytes_xfered += len;
2267 sg_miter->consumed = offset;
2268 status = mci_readl(host, MINTSTS);
2269 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2270 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2273 if (!sg_miter_next(sg_miter))
2275 sg_miter->consumed = 0;
2277 sg_miter_stop(sg_miter);
2281 sg_miter_stop(sg_miter);
2284 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2287 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2289 if (!host->cmd_status)
2290 host->cmd_status = status;
2294 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2295 tasklet_schedule(&host->tasklet);
2298 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2300 struct dw_mci *host = dev_id;
2301 u32 pending, sdio_int;
2304 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2307 * DTO fix - version 2.10a and below, and only if internal DMA
2310 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2312 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2313 pending |= SDMMC_INT_DATA_OVER;
2317 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2318 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2319 host->cmd_status = pending;
2321 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2322 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2324 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2327 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2328 /* if there is an error report DATA_ERROR */
2329 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2330 host->data_status = pending;
2332 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2334 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2335 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2336 tasklet_schedule(&host->tasklet);
2339 if (pending & SDMMC_INT_DATA_OVER) {
2340 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2341 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2342 if (!host->data_status)
2343 host->data_status = pending;
2345 if (host->dir_status == DW_MCI_RECV_STATUS) {
2346 if (host->sg != NULL)
2347 dw_mci_read_data_pio(host, true);
2349 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2350 tasklet_schedule(&host->tasklet);
2353 if (pending & SDMMC_INT_RXDR) {
2354 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2355 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2356 dw_mci_read_data_pio(host, false);
2359 if (pending & SDMMC_INT_TXDR) {
2360 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2361 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2362 dw_mci_write_data_pio(host);
2365 if (pending & SDMMC_INT_VSI) {
2366 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2367 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2368 dw_mci_cmd_interrupt(host, pending);
2371 if (pending & SDMMC_INT_CMD_DONE) {
2372 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2373 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2374 dw_mci_cmd_interrupt(host, pending);
2377 if (pending & SDMMC_INT_CD) {
2378 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2379 queue_work(host->card_workqueue, &host->card_work);
2382 /* Handle SDIO Interrupts */
2383 for (i = 0; i < host->num_slots; i++) {
2384 struct dw_mci_slot *slot = host->slot[i];
2386 if (host->verid < DW_MMC_240A)
2387 sdio_int = SDMMC_INT_SDIO(i);
2389 sdio_int = SDMMC_INT_SDIO(i + 8);
2391 if (pending & sdio_int) {
2392 mci_writel(host, RINTSTS, sdio_int);
2393 mmc_signal_sdio_irq(slot->mmc);
2399 #ifdef CONFIG_MMC_DW_IDMAC
2400 /* Handle DMA interrupts */
2401 pending = mci_readl(host, IDSTS);
2402 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2403 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2404 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2405 host->dma_ops->complete(host);
2412 static void dw_mci_work_routine_card(struct work_struct *work)
2414 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2417 for (i = 0; i < host->num_slots; i++) {
2418 struct dw_mci_slot *slot = host->slot[i];
2419 struct mmc_host *mmc = slot->mmc;
2420 struct mmc_request *mrq;
2424 present = dw_mci_get_cd(mmc);
2425 while (present != slot->last_detect_state) {
2426 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2427 present ? "inserted" : "removed");
2428 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2429 present ? "inserted" : "removed.", mmc_hostname(mmc));
2431 spin_lock_bh(&host->lock);
2433 /* Card change detected */
2434 slot->last_detect_state = present;
2436 /* Clean up queue if present */
2439 if (mrq == host->mrq) {
2443 switch (host->state) {
2446 case STATE_SENDING_CMD:
2447 mrq->cmd->error = -ENOMEDIUM;
2451 case STATE_SENDING_DATA:
2452 mrq->data->error = -ENOMEDIUM;
2453 dw_mci_stop_dma(host);
2455 case STATE_DATA_BUSY:
2456 case STATE_DATA_ERROR:
2457 if (mrq->data->error == -EINPROGRESS)
2458 mrq->data->error = -ENOMEDIUM;
2462 case STATE_SENDING_STOP:
2463 mrq->stop->error = -ENOMEDIUM;
2467 dw_mci_request_end(host, mrq);
2469 list_del(&slot->queue_node);
2470 mrq->cmd->error = -ENOMEDIUM;
2472 mrq->data->error = -ENOMEDIUM;
2474 mrq->stop->error = -ENOMEDIUM;
2476 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",mrq->cmd->opcode, mmc_hostname(mmc));
2478 spin_unlock(&host->lock);
2479 mmc_request_done(slot->mmc, mrq);
2480 spin_lock(&host->lock);
2484 /* Power down slot */
2486 /* Clear down the FIFO */
2487 dw_mci_fifo_reset(host);
2488 #ifdef CONFIG_MMC_DW_IDMAC
2489 dw_mci_idmac_reset(host);
2494 spin_unlock_bh(&host->lock);
2496 present = dw_mci_get_cd(mmc);
2499 mmc_detect_change(slot->mmc,
2500 msecs_to_jiffies(host->pdata->detect_delay_ms));
2505 /* given a slot id, find out the device node representing that slot */
2506 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2508 struct device_node *np;
2512 if (!dev || !dev->of_node)
2515 for_each_child_of_node(dev->of_node, np) {
2516 addr = of_get_property(np, "reg", &len);
2517 if (!addr || (len < sizeof(int)))
2519 if (be32_to_cpup(addr) == slot)
2525 static struct dw_mci_of_slot_quirks {
2528 } of_slot_quirks[] = {
2530 .quirk = "disable-wp",
2531 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2535 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2537 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2542 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2543 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2544 quirks |= of_slot_quirks[idx].id;
2549 /* find out bus-width for a given slot */
2550 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2552 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2558 if (of_property_read_u32(np, "bus-width", &bus_wd))
2559 dev_err(dev, "bus-width property not found, assuming width"
2565 /* find the pwr-en gpio for a given slot; or -1 if none specified */
2566 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
2568 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2574 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
2576 /* Having a missing entry is valid; return silently */
2577 if (!gpio_is_valid(gpio))
2580 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
2581 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2585 gpio_direction_output(gpio, 0);//set 0 to pwr-en
2591 /* find the write protect gpio for a given slot; or -1 if none specified */
2592 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2594 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2600 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2602 /* Having a missing entry is valid; return silently */
2603 if (!gpio_is_valid(gpio))
2606 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2607 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2614 /* find the cd gpio for a given slot */
2615 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2616 struct mmc_host *mmc)
2618 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2624 gpio = of_get_named_gpio(np, "cd-gpios", 0);
2626 /* Having a missing entry is valid; return silently */
2627 if (!gpio_is_valid(gpio))
2630 if (mmc_gpio_request_cd(mmc, gpio, 0))
2631 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2633 #else /* CONFIG_OF */
2634 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2638 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2642 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2646 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2650 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2651 struct mmc_host *mmc)
2655 #endif /* CONFIG_OF */
2657 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2659 struct mmc_host *mmc;
2660 struct dw_mci_slot *slot;
2661 const struct dw_mci_drv_data *drv_data = host->drv_data;
2666 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2670 slot = mmc_priv(mmc);
2674 host->slot[id] = slot;
2677 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2679 mmc->ops = &dw_mci_ops;
2681 if (of_property_read_u32_array(host->dev->of_node,
2682 "clock-freq-min-max", freq, 2)) {
2683 mmc->f_min = DW_MCI_FREQ_MIN;
2684 mmc->f_max = DW_MCI_FREQ_MAX;
2686 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
2687 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
2689 mmc->f_min = freq[0];
2690 mmc->f_max = freq[1];
2692 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
2693 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
2696 if(strstr("mmc0",mmc_hostname(mmc)))
2697 printk("Line%d..%s: The rk_sdmmc %s",__LINE__, __FUNCTION__,RK_SDMMC_DRIVER_VERSION);
2699 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
2700 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
2701 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
2702 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
2703 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
2704 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
2706 if (host->pdata->get_ocr)
2707 mmc->ocr_avail = host->pdata->get_ocr(id);
2710 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
2711 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
2712 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
2713 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
2717 * Start with slot power disabled, it will be enabled when a card
2720 if (host->pdata->setpower)
2721 host->pdata->setpower(id, 0);
2723 if (host->pdata->caps)
2724 mmc->caps = host->pdata->caps;
2726 if (host->pdata->pm_caps)
2727 mmc->pm_caps = host->pdata->pm_caps;
2729 if (host->dev->of_node) {
2730 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2734 ctrl_id = to_platform_device(host->dev)->id;
2736 if (drv_data && drv_data->caps)
2737 mmc->caps |= drv_data->caps[ctrl_id];
2738 if (drv_data && drv_data->hold_reg_flag)
2739 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
2741 //set the compatibility of driver.
2742 mmc->caps |= MMC_CAP_UHS_SDR12|MMC_CAP_UHS_SDR25|MMC_CAP_UHS_SDR50|MMC_CAP_UHS_SDR104|MMC_CAP_ERASE;
2744 if (host->pdata->caps2)
2745 mmc->caps2 = host->pdata->caps2;
2747 if (host->pdata->get_bus_wd)
2748 bus_width = host->pdata->get_bus_wd(slot->id);
2749 else if (host->dev->of_node)
2750 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2754 switch (bus_width) {
2756 mmc->caps |= MMC_CAP_8_BIT_DATA;
2758 mmc->caps |= MMC_CAP_4_BIT_DATA;
2760 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
2761 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
2762 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
2763 mmc->caps |= MMC_CAP_SDIO_IRQ;
2764 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
2765 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
2766 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
2767 mmc->pm_caps |= MMC_PM_KEEP_POWER;
2768 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
2769 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
2770 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
2771 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2773 /*Assign pm_caps pass to pm_flags*/
2774 mmc->pm_flags = mmc->pm_caps;
2776 if (host->pdata->blk_settings) {
2777 mmc->max_segs = host->pdata->blk_settings->max_segs;
2778 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2779 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2780 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2781 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2783 /* Useful defaults if platform data is unset. */
2784 #ifdef CONFIG_MMC_DW_IDMAC
2785 mmc->max_segs = host->ring_size;
2786 mmc->max_blk_size = 65536;
2787 mmc->max_blk_count = host->ring_size;
2788 mmc->max_seg_size = 0x1000;
2789 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2792 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2793 mmc->max_blk_count = 512;
2794 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2795 mmc->max_seg_size = mmc->max_req_size;
2796 #endif /* CONFIG_MMC_DW_IDMAC */
2799 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
2801 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))//(gpio_is_valid(slot->pwr_en_gpio))
2806 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
2808 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
2815 if (IS_ERR(host->vmmc)) {
2816 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
2819 ret = regulator_enable(host->vmmc);
2822 "failed to enable regulator: %d\n", ret);
2828 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2829 dw_mci_of_get_cd_gpio(host->dev, slot->id, mmc);
2831 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2832 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2834 ret = mmc_add_host(mmc);
2838 /* Pinctrl set default iomux state to fucntion port.
2839 * Fixme: DON'T TOUCH EMMC SETTING!
2841 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
2843 host->pinctrl = devm_pinctrl_get(host->dev);
2844 if(IS_ERR(host->pinctrl))
2845 printk("%s: Warning : No pinctrl used!\n",mmc_hostname(host->mmc));
2848 host->pins_default = pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_DEFAULT);
2849 if(IS_ERR(host->pins_default))
2850 printk("%s: Warning : No default pinctrl matched!\n", mmc_hostname(host->mmc));
2853 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
2854 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
2860 #if defined(CONFIG_DEBUG_FS)
2861 dw_mci_init_debugfs(slot);
2864 /* Card initially undetected */
2865 slot->last_detect_state = 1;
2874 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2876 /* Shutdown detect IRQ */
2877 if (slot->host->pdata->exit)
2878 slot->host->pdata->exit(id);
2880 /* Debugfs stuff is cleaned up by mmc core */
2881 mmc_remove_host(slot->mmc);
2882 slot->host->slot[id] = NULL;
2883 mmc_free_host(slot->mmc);
2886 static void dw_mci_init_dma(struct dw_mci *host)
2888 /* Alloc memory for sg translation */
2889 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2890 &host->sg_dma, GFP_KERNEL);
2891 if (!host->sg_cpu) {
2892 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2897 /* Determine which DMA interface to use */
2898 #ifdef CONFIG_MMC_DW_IDMAC
2899 host->dma_ops = &dw_mci_idmac_ops;
2900 dev_info(host->dev, "Using internal DMA controller.\n");
2906 if (host->dma_ops->init && host->dma_ops->start &&
2907 host->dma_ops->stop && host->dma_ops->cleanup) {
2908 if (host->dma_ops->init(host)) {
2909 dev_err(host->dev, "%s: Unable to initialize "
2910 "DMA Controller.\n", __func__);
2914 dev_err(host->dev, "DMA initialization not found.\n");
2922 dev_info(host->dev, "Using PIO mode.\n");
2927 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2929 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2932 ctrl = mci_readl(host, CTRL);
2934 mci_writel(host, CTRL, ctrl);
2936 /* wait till resets clear */
2938 ctrl = mci_readl(host, CTRL);
2939 if (!(ctrl & reset))
2941 } while (time_before(jiffies, timeout));
2944 "Timeout resetting block (ctrl reset %#x)\n",
2950 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
2953 * Reseting generates a block interrupt, hence setting
2954 * the scatter-gather pointer to NULL.
2957 sg_miter_stop(&host->sg_miter);
2961 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
2964 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
2966 return dw_mci_ctrl_reset(host,
2967 SDMMC_CTRL_FIFO_RESET |
2969 SDMMC_CTRL_DMA_RESET);
2973 static struct dw_mci_of_quirks {
2978 .quirk = "broken-cd",
2979 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2983 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2985 struct dw_mci_board *pdata;
2986 struct device *dev = host->dev;
2987 struct device_node *np = dev->of_node;
2988 const struct dw_mci_drv_data *drv_data = host->drv_data;
2990 u32 clock_frequency;
2992 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2994 dev_err(dev, "could not allocate memory for pdata\n");
2995 return ERR_PTR(-ENOMEM);
2998 /* find out number of slots supported */
2999 if (of_property_read_u32(dev->of_node, "num-slots",
3000 &pdata->num_slots)) {
3001 dev_info(dev, "num-slots property not found, "
3002 "assuming 1 slot is available\n");
3003 pdata->num_slots = 1;
3007 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3008 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3009 pdata->quirks |= of_quirks[idx].id;
3012 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3013 dev_info(dev, "fifo-depth property not found, using "
3014 "value of FIFOTH register as default\n");
3016 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3018 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3019 pdata->bus_hz = clock_frequency;
3021 if (drv_data && drv_data->parse_dt) {
3022 ret = drv_data->parse_dt(host);
3024 return ERR_PTR(ret);
3027 if (of_find_property(np, "keep-power-in-suspend", NULL))
3028 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3030 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3031 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3033 if (of_find_property(np, "supports-highspeed", NULL))
3034 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3036 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3037 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3039 if (of_find_property(np, "supports-DDR_MODE", NULL))
3040 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3042 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3043 pdata->caps2 |= MMC_CAP2_HS200;
3045 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3046 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3048 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3049 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3051 if (of_get_property(np, "cd-inverted", NULL))
3052 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3053 if (of_get_property(np, "bootpart-no-access", NULL))
3054 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3059 #else /* CONFIG_OF */
3060 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3062 return ERR_PTR(-EINVAL);
3064 #endif /* CONFIG_OF */
3066 int dw_mci_probe(struct dw_mci *host)
3068 const struct dw_mci_drv_data *drv_data = host->drv_data;
3069 int width, i, ret = 0;
3075 host->pdata = dw_mci_parse_dt(host);
3076 if (IS_ERR(host->pdata)) {
3077 dev_err(host->dev, "platform data not available\n");
3082 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3084 "Platform data must supply select_slot function\n");
3089 * In 2.40a spec, Data offset is changed.
3090 * Need to check the version-id and set data-offset for DATA register.
3092 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3093 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3095 if (host->verid < DW_MMC_240A)
3096 host->data_offset = DATA_OFFSET;
3098 host->data_offset = DATA_240A_OFFSET;
3101 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3102 if (IS_ERR(host->hclk_mmc)) {
3103 dev_err(host->dev, "failed to get hclk_mmc\n");
3104 ret = PTR_ERR(host->hclk_mmc);
3107 clk_prepare_enable(host->hclk_mmc);
3110 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3111 if (IS_ERR(host->clk_mmc)) {
3112 dev_err(host->dev, "failed to get clk mmc_per\n");
3113 ret = PTR_ERR(host->clk_mmc);
3117 if (host->verid < DW_MMC_240A)
3118 host->bus_hz = host->pdata->bus_hz;
3120 host->bus_hz = host->pdata->bus_hz*2;// *2 due to fix divider 2 in controller.
3121 if (!host->bus_hz) {
3122 dev_err(host->dev,"Platform data must supply bus speed\n");
3127 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3129 dev_err(host->dev, "failed to set clk mmc\n");
3132 clk_prepare_enable(host->clk_mmc);
3134 if (drv_data && drv_data->setup_clock) {
3135 ret = drv_data->setup_clock(host);
3138 "implementation specific clock setup failed\n");
3143 host->quirks = host->pdata->quirks;
3144 host->irq_state = true;
3145 host->set_speed = 0;
3148 spin_lock_init(&host->lock);
3149 INIT_LIST_HEAD(&host->queue);
3152 * Get the host data width - this assumes that HCON has been set with
3153 * the correct values.
3155 i = (mci_readl(host, HCON) >> 7) & 0x7;
3157 host->push_data = dw_mci_push_data16;
3158 host->pull_data = dw_mci_pull_data16;
3160 host->data_shift = 1;
3161 } else if (i == 2) {
3162 host->push_data = dw_mci_push_data64;
3163 host->pull_data = dw_mci_pull_data64;
3165 host->data_shift = 3;
3167 /* Check for a reserved value, and warn if it is */
3169 "HCON reports a reserved host data width!\n"
3170 "Defaulting to 32-bit access.\n");
3171 host->push_data = dw_mci_push_data32;
3172 host->pull_data = dw_mci_pull_data32;
3174 host->data_shift = 2;
3177 /* Reset all blocks */
3178 if (!dw_mci_ctrl_all_reset(host))
3181 host->dma_ops = host->pdata->dma_ops;
3182 dw_mci_init_dma(host);
3184 /* Clear the interrupts for the host controller */
3185 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3186 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3188 /* Put in max timeout */
3189 mci_writel(host, TMOUT, 0xFFFFFFFF);
3192 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3193 * Tx Mark = fifo_size / 2 DMA Size = 8
3195 if (!host->pdata->fifo_depth) {
3197 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3198 * have been overwritten by the bootloader, just like we're
3199 * about to do, so if you know the value for your hardware, you
3200 * should put it in the platform data.
3202 fifo_size = mci_readl(host, FIFOTH);
3203 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3205 fifo_size = host->pdata->fifo_depth;
3207 host->fifo_depth = fifo_size;
3209 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3210 mci_writel(host, FIFOTH, host->fifoth_val);
3212 /* disable clock to CIU */
3213 mci_writel(host, CLKENA, 0);
3214 mci_writel(host, CLKSRC, 0);
3216 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3217 host->card_workqueue = alloc_workqueue("dw-mci-card",
3218 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3219 if (!host->card_workqueue) {
3223 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3224 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3225 host->irq_flags, "dw-mci", host);
3229 if (host->pdata->num_slots)
3230 host->num_slots = host->pdata->num_slots;
3232 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
3234 /* We need at least one slot to succeed */
3235 for (i = 0; i < host->num_slots; i++) {
3236 ret = dw_mci_init_slot(host, i);
3238 dev_dbg(host->dev, "slot %d init failed\n", i);
3244 * Enable interrupts for command done, data over, data empty, card det,
3245 * receive ready and error such as transmit, receive timeout, crc error
3247 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3248 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
3249 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
3250 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3251 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3252 regs |= SDMMC_INT_CD;
3254 mci_writel(host, INTMASK, regs);
3256 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
3258 dev_info(host->dev, "DW MMC controller at irq %d, "
3259 "%d bit host data width, "
3261 host->irq, width, fifo_size);
3264 dev_info(host->dev, "%d slots initialized\n", init_slots);
3266 dev_dbg(host->dev, "attempted to initialize %d slots, "
3267 "but failed on all\n", host->num_slots);
3272 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
3273 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
3278 destroy_workqueue(host->card_workqueue);
3281 if (host->use_dma && host->dma_ops->exit)
3282 host->dma_ops->exit(host);
3285 regulator_disable(host->vmmc);
3286 regulator_put(host->vmmc);
3290 if (!IS_ERR(host->clk_mmc))
3291 clk_disable_unprepare(host->clk_mmc);
3293 if (!IS_ERR(host->hclk_mmc))
3294 clk_disable_unprepare(host->hclk_mmc);
3298 EXPORT_SYMBOL(dw_mci_probe);
3300 void dw_mci_remove(struct dw_mci *host)
3304 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3305 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3307 for (i = 0; i < host->num_slots; i++) {
3308 dev_dbg(host->dev, "remove slot %d\n", i);
3310 dw_mci_cleanup_slot(host->slot[i], i);
3313 /* disable clock to CIU */
3314 mci_writel(host, CLKENA, 0);
3315 mci_writel(host, CLKSRC, 0);
3317 destroy_workqueue(host->card_workqueue);
3319 if (host->use_dma && host->dma_ops->exit)
3320 host->dma_ops->exit(host);
3323 regulator_disable(host->vmmc);
3324 regulator_put(host->vmmc);
3327 if (!IS_ERR(host->clk_mmc))
3328 clk_disable_unprepare(host->clk_mmc);
3330 if (!IS_ERR(host->hclk_mmc))
3331 clk_disable_unprepare(host->hclk_mmc);
3333 EXPORT_SYMBOL(dw_mci_remove);
3337 #ifdef CONFIG_PM_SLEEP
3339 * TODO: we should probably disable the clock to the card in the suspend path.
3341 int dw_mci_suspend(struct dw_mci *host)
3345 regulator_disable(host->vmmc);
3349 EXPORT_SYMBOL(dw_mci_suspend);
3351 int dw_mci_resume(struct dw_mci *host)
3357 ret = regulator_enable(host->vmmc);
3360 "failed to enable regulator: %d\n", ret);
3365 if (!dw_mci_ctrl_all_reset(host)) {
3370 if (host->use_dma && host->dma_ops->init)
3371 host->dma_ops->init(host);
3374 * Restore the initial value at FIFOTH register
3375 * And Invalidate the prev_blksz with zero
3377 mci_writel(host, FIFOTH, host->fifoth_val);
3378 host->prev_blksz = 0;
3379 /* Put in max timeout */
3380 mci_writel(host, TMOUT, 0xFFFFFFFF);
3382 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3383 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
3385 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3386 regs |= SDMMC_INT_CD;
3387 mci_writel(host, INTMASK, regs);
3388 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3390 for (i = 0; i < host->num_slots; i++) {
3391 struct dw_mci_slot *slot = host->slot[i];
3394 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3395 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3396 dw_mci_setup_bus(slot, true);
3401 EXPORT_SYMBOL(dw_mci_resume);
3402 #endif /* CONFIG_PM_SLEEP */
3404 static int __init dw_mci_init(void)
3406 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3410 static void __exit dw_mci_exit(void)
3414 module_init(dw_mci_init);
3415 module_exit(dw_mci_exit);
3417 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3419 MODULE_AUTHOR("NXP Semiconductor VietNam");
3420 MODULE_AUTHOR("Imagination Technologies Ltd");
3421 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
3423 MODULE_LICENSE("GPL v2");