2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * Copyright (C) 2014 Fuzhou Rockchip Electronics Co.Ltd.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
16 #include <linux/blkdev.h>
17 #include <linux/clk.h>
18 #include <linux/debugfs.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/seq_file.h>
28 #include <linux/slab.h>
29 #include <linux/stat.h>
30 #include <linux/delay.h>
31 #include <linux/irq.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/mmc.h>
34 #include <linux/mmc/sd.h>
35 #include <linux/mmc/sdio.h>
36 #include <linux/mmc/rk_mmc.h>
37 #include <linux/bitops.h>
38 #include <linux/regulator/consumer.h>
39 #include <linux/workqueue.h>
41 #include <linux/of_gpio.h>
42 #include <linux/mmc/slot-gpio.h>
43 #include <linux/clk-private.h>
46 #include "rk_sdmmc_of.h"
47 #include <linux/regulator/rockchip_io_vol_domain.h>
49 #define RK_SDMMC_DRIVER_VERSION "Ver 1.00. The last modify date is 2014-05-05"
51 /* Common flag combinations */
52 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
53 /*SDMMC_INT_HTO | */SDMMC_INT_SBE | \
55 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
57 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
58 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
59 #define DW_MCI_SEND_STATUS 1
60 #define DW_MCI_RECV_STATUS 2
61 #define DW_MCI_DMA_THRESHOLD 16
63 #define DW_MCI_FREQ_MAX 50000000//200000000 /* unit: HZ */
64 #define DW_MCI_FREQ_MIN 300000//400000 /* unit: HZ */
66 #define SDMMC_DATA_TIMEOUT_SD 500; /*max is 250ms showed in Spec; Maybe adapt the value for the sick card.*/
67 #define SDMMC_DATA_TIMEOUT_SDIO 250
68 #define SDMMC_DATA_TIMEOUT_EMMC 2500
70 #define SDMMC_CMD_RTO_MAX_HOLD 200
71 #define SDMMC_WAIT_FOR_UNBUSY 2500
73 #ifdef CONFIG_MMC_DW_IDMAC
74 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
75 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
76 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
80 u32 des0; /* Control Descriptor */
81 #define IDMAC_DES0_DIC BIT(1)
82 #define IDMAC_DES0_LD BIT(2)
83 #define IDMAC_DES0_FD BIT(3)
84 #define IDMAC_DES0_CH BIT(4)
85 #define IDMAC_DES0_ER BIT(5)
86 #define IDMAC_DES0_CES BIT(30)
87 #define IDMAC_DES0_OWN BIT(31)
89 u32 des1; /* Buffer sizes */
90 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
91 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
93 u32 des2; /* buffer 1 physical address */
95 u32 des3; /* buffer 2 physical address */
97 #endif /* CONFIG_MMC_DW_IDMAC */
99 static const u8 tuning_blk_pattern_4bit[] = {
100 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
101 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
102 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
103 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
104 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
105 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
106 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
107 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
110 static const u8 tuning_blk_pattern_8bit[] = {
111 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
112 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
113 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
114 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
115 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
116 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
117 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
118 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
119 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
120 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
121 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
122 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
123 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
124 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
125 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
126 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
129 static inline bool dw_mci_fifo_reset(struct dw_mci *host);
130 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host);
131 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
132 static void dw_mci_disable_low_power(struct dw_mci_slot *slot);
134 /*printk the all register of current host*/
135 static int dw_mci_regs_printk(struct dw_mci *host)
137 struct sdmmc_reg *regs = dw_mci_regs;
139 while( regs->name != 0 ){
140 printk("%s: (0x%04x) = 0x%08x\n", regs->name, regs->addr, mci_readreg(host,regs->addr));
143 printk("=======printk %s-register end =========\n", mmc_hostname(host->mmc));
148 #if defined(CONFIG_DEBUG_FS)
149 static int dw_mci_req_show(struct seq_file *s, void *v)
151 struct dw_mci_slot *slot = s->private;
152 struct mmc_request *mrq;
153 struct mmc_command *cmd;
154 struct mmc_command *stop;
155 struct mmc_data *data;
157 /* Make sure we get a consistent snapshot */
158 spin_lock_bh(&slot->host->lock);
168 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
169 cmd->opcode, cmd->arg, cmd->flags,
170 cmd->resp[0], cmd->resp[1], cmd->resp[2],
171 cmd->resp[2], cmd->error);
173 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
174 data->bytes_xfered, data->blocks,
175 data->blksz, data->flags, data->error);
178 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
179 stop->opcode, stop->arg, stop->flags,
180 stop->resp[0], stop->resp[1], stop->resp[2],
181 stop->resp[2], stop->error);
184 spin_unlock_bh(&slot->host->lock);
189 static int dw_mci_req_open(struct inode *inode, struct file *file)
191 return single_open(file, dw_mci_req_show, inode->i_private);
194 static const struct file_operations dw_mci_req_fops = {
195 .owner = THIS_MODULE,
196 .open = dw_mci_req_open,
199 .release = single_release,
202 static int dw_mci_regs_show(struct seq_file *s, void *v)
204 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
205 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
206 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
207 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
208 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
209 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
214 static int dw_mci_regs_open(struct inode *inode, struct file *file)
216 return single_open(file, dw_mci_regs_show, inode->i_private);
219 static const struct file_operations dw_mci_regs_fops = {
220 .owner = THIS_MODULE,
221 .open = dw_mci_regs_open,
224 .release = single_release,
227 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
229 struct mmc_host *mmc = slot->mmc;
230 struct dw_mci *host = slot->host;
234 root = mmc->debugfs_root;
238 node = debugfs_create_file("regs", S_IRUSR, root, host,
243 node = debugfs_create_file("req", S_IRUSR, root, slot,
248 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
252 node = debugfs_create_x32("pending_events", S_IRUSR, root,
253 (u32 *)&host->pending_events);
257 node = debugfs_create_x32("completed_events", S_IRUSR, root,
258 (u32 *)&host->completed_events);
265 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
267 #endif /* defined(CONFIG_DEBUG_FS) */
269 static void dw_mci_set_timeout(struct dw_mci *host)
271 /* timeout (maximum) */
272 mci_writel(host, TMOUT, 0xffffffff);
275 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
277 struct mmc_data *data;
278 struct dw_mci_slot *slot = mmc_priv(mmc);
279 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
281 cmd->error = -EINPROGRESS;
285 if (cmdr == MMC_STOP_TRANSMISSION)
286 cmdr |= SDMMC_CMD_STOP;
288 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
290 if (cmd->flags & MMC_RSP_PRESENT) {
291 /* We expect a response, so set this bit */
292 cmdr |= SDMMC_CMD_RESP_EXP;
293 if (cmd->flags & MMC_RSP_136)
294 cmdr |= SDMMC_CMD_RESP_LONG;
297 if (cmd->flags & MMC_RSP_CRC)
298 cmdr |= SDMMC_CMD_RESP_CRC;
302 cmdr |= SDMMC_CMD_DAT_EXP;
303 if (data->flags & MMC_DATA_STREAM)
304 cmdr |= SDMMC_CMD_STRM_MODE;
305 if (data->flags & MMC_DATA_WRITE)
306 cmdr |= SDMMC_CMD_DAT_WR;
309 if (drv_data && drv_data->prepare_command)
310 drv_data->prepare_command(slot->host, &cmdr);
315 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
317 struct mmc_command *stop;
323 stop = &host->stop_abort;
325 memset(stop, 0, sizeof(struct mmc_command));
327 if (cmdr == MMC_READ_SINGLE_BLOCK ||
328 cmdr == MMC_READ_MULTIPLE_BLOCK ||
329 cmdr == MMC_WRITE_BLOCK ||
330 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
331 stop->opcode = MMC_STOP_TRANSMISSION;
333 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
334 } else if (cmdr == SD_IO_RW_EXTENDED) {
335 stop->opcode = SD_IO_RW_DIRECT;
336 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
337 ((cmd->arg >> 28) & 0x7);
338 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
343 cmdr = stop->opcode | SDMMC_CMD_STOP |
344 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
349 static void dw_mci_start_command(struct dw_mci *host,
350 struct mmc_command *cmd, u32 cmd_flags)
352 unsigned long time_loop = jiffies + msecs_to_jiffies(5000);
356 host->pre_cmd = host->cmd;
359 "start command: ARGR=0x%08x CMDR=0x%08x\n",
360 cmd->arg, cmd_flags);
362 if(SD_SWITCH_VOLTAGE == cmd->opcode){
363 /*confirm non-low-power mode*/
364 struct dw_mci_slot *slot = host->slot[0];//temporality fix slot[0] due to host->num_slots equal to 1;
365 mci_writel(host, CMDARG, 0);
366 dw_mci_disable_low_power(slot);
367 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s before start cmd=11,[%s]",
368 __LINE__, __FUNCTION__,mmc_hostname(host->mmc));
370 cmd_flags |= SDMMC_CMD_VOLT_SWITCH;
373 mci_writel(host, CMDARG, cmd->arg);
375 if(host->mmc->hold_reg_flag)
376 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;//fix the value to 1 in some Soc,for example RK3188.
378 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
382 while ((time_before(jiffies, time_loop))&&(test_bit(DW_MMC_CARD_PRESENT, &host->cur_slot->flags))){
383 status = mci_readl(host, STATUS);
384 if (!(status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY))){
385 ret = 1 ;//card is unbusy
390 MMC_DBG_ERR_FUNC(host->mmc,"Line%d..%s start cmd=%d(arg=0x%x), cmd_reg=0x%x, unbusy=%d,card-present=%d. [%s]",
391 __LINE__, __FUNCTION__,cmd->opcode, cmd->arg,cmd_flags,
392 ret,test_bit(DW_MMC_CARD_PRESENT, &host->cur_slot->flags), mmc_hostname(host->mmc));
396 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
398 dw_mci_start_command(host, data->stop, host->stop_cmdr);
401 /* DMA interface functions */
402 static void dw_mci_stop_dma(struct dw_mci *host)
404 if (host->using_dma) {
405 host->dma_ops->stop(host);
406 host->dma_ops->cleanup(host);
409 /* Data transfer was stopped by the interrupt handler */
410 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
413 static int dw_mci_get_dma_dir(struct mmc_data *data)
415 if (data->flags & MMC_DATA_WRITE)
416 return DMA_TO_DEVICE;
418 return DMA_FROM_DEVICE;
421 #ifdef CONFIG_MMC_DW_IDMAC
422 static void dw_mci_dma_cleanup(struct dw_mci *host)
424 struct mmc_data *data = host->data;
427 if (!data->host_cookie)
428 dma_unmap_sg(host->dev,
431 dw_mci_get_dma_dir(data));
434 static void dw_mci_idmac_reset(struct dw_mci *host)
436 u32 bmod = mci_readl(host, BMOD);
437 /* Software reset of DMA */
438 bmod |= SDMMC_IDMAC_SWRESET;
439 mci_writel(host, BMOD, bmod);
442 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
446 /* Disable and reset the IDMAC interface */
447 temp = mci_readl(host, CTRL);
448 temp &= ~SDMMC_CTRL_USE_IDMAC;
449 temp |= SDMMC_CTRL_DMA_RESET;
450 mci_writel(host, CTRL, temp);
452 /* Stop the IDMAC running */
453 temp = mci_readl(host, BMOD);
454 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
455 temp |= SDMMC_IDMAC_SWRESET;
456 mci_writel(host, BMOD, temp);
459 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
461 struct mmc_data *data = host->data;
463 dev_vdbg(host->dev, "DMA complete\n");
466 MMC_DBG_CMD_FUNC(host->mmc," DMA complete cmd=%d(arg=0x%x), blocks=%d,blksz=%d[%s]", \
467 host->mrq->cmd->opcode,host->mrq->cmd->arg,data->blocks,data->blksz,mmc_hostname(host->mmc));
470 host->dma_ops->cleanup(host);
473 * If the card was removed, data will be NULL. No point in trying to
474 * send the stop command or waiting for NBUSY in this case.
477 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
478 tasklet_schedule(&host->tasklet);
482 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
486 struct idmac_desc *desc = host->sg_cpu;
488 for (i = 0; i < sg_len; i++, desc++) {
489 unsigned int length = sg_dma_len(&data->sg[i]);
490 u32 mem_addr = sg_dma_address(&data->sg[i]);
492 /* Set the OWN bit and disable interrupts for this descriptor */
493 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
496 IDMAC_SET_BUFFER1_SIZE(desc, length);
498 /* Physical address to DMA to/from */
499 desc->des2 = mem_addr;
502 /* Set first descriptor */
504 desc->des0 |= IDMAC_DES0_FD;
506 /* Set last descriptor */
507 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
508 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
509 desc->des0 |= IDMAC_DES0_LD;
514 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
518 dw_mci_translate_sglist(host, host->data, sg_len);
520 /* Select IDMAC interface */
521 temp = mci_readl(host, CTRL);
522 temp |= SDMMC_CTRL_USE_IDMAC;
523 mci_writel(host, CTRL, temp);
527 /* Enable the IDMAC */
528 temp = mci_readl(host, BMOD);
529 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
530 mci_writel(host, BMOD, temp);
532 /* Start it running */
533 mci_writel(host, PLDMND, 1);
536 static int dw_mci_idmac_init(struct dw_mci *host)
538 struct idmac_desc *p;
541 /* Number of descriptors in the ring buffer */
542 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
544 /* Forward link the descriptor list */
545 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
546 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
548 /* Set the last descriptor as the end-of-ring descriptor */
549 p->des3 = host->sg_dma;
550 p->des0 = IDMAC_DES0_ER;
552 dw_mci_idmac_reset(host);
554 /* Mask out interrupts - get Tx & Rx complete only */
555 mci_writel(host, IDSTS, IDMAC_INT_CLR);
556 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
559 /* Set the descriptor base address */
560 mci_writel(host, DBADDR, host->sg_dma);
564 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
565 .init = dw_mci_idmac_init,
566 .start = dw_mci_idmac_start_dma,
567 .stop = dw_mci_idmac_stop_dma,
568 .complete = dw_mci_idmac_complete_dma,
569 .cleanup = dw_mci_dma_cleanup,
571 #endif /* CONFIG_MMC_DW_IDMAC */
573 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
574 struct mmc_data *data,
577 struct scatterlist *sg;
578 unsigned int i, sg_len;
580 if (!next && data->host_cookie)
581 return data->host_cookie;
584 * We don't do DMA on "complex" transfers, i.e. with
585 * non-word-aligned buffers or lengths. Also, we don't bother
586 * with all the DMA setup overhead for short transfers.
588 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
594 for_each_sg(data->sg, sg, data->sg_len, i) {
595 if (sg->offset & 3 || sg->length & 3)
599 sg_len = dma_map_sg(host->dev,
602 dw_mci_get_dma_dir(data));
607 data->host_cookie = sg_len;
612 static void dw_mci_pre_req(struct mmc_host *mmc,
613 struct mmc_request *mrq,
616 struct dw_mci_slot *slot = mmc_priv(mmc);
617 struct mmc_data *data = mrq->data;
619 if (!slot->host->use_dma || !data)
622 if (data->host_cookie) {
623 data->host_cookie = 0;
627 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
628 data->host_cookie = 0;
631 static void dw_mci_post_req(struct mmc_host *mmc,
632 struct mmc_request *mrq,
635 struct dw_mci_slot *slot = mmc_priv(mmc);
636 struct mmc_data *data = mrq->data;
638 if (!slot->host->use_dma || !data)
641 if (data->host_cookie)
642 dma_unmap_sg(slot->host->dev,
645 dw_mci_get_dma_dir(data));
646 data->host_cookie = 0;
649 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
651 #ifdef CONFIG_MMC_DW_IDMAC
652 unsigned int blksz = data->blksz;
653 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
654 u32 fifo_width = 1 << host->data_shift;
655 u32 blksz_depth = blksz / fifo_width, fifoth_val;
656 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
657 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
659 tx_wmark = (host->fifo_depth) / 2;
660 tx_wmark_invers = host->fifo_depth - tx_wmark;
664 * if blksz is not a multiple of the FIFO width
666 if (blksz % fifo_width) {
673 if (!((blksz_depth % mszs[idx]) ||
674 (tx_wmark_invers % mszs[idx]))) {
676 rx_wmark = mszs[idx] - 1;
681 * If idx is '0', it won't be tried
682 * Thus, initial values are uesed
685 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
686 mci_writel(host, FIFOTH, fifoth_val);
690 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
692 unsigned int blksz = data->blksz;
693 u32 blksz_depth, fifo_depth;
696 WARN_ON(!(data->flags & MMC_DATA_READ));
698 if (host->timing != MMC_TIMING_MMC_HS200 &&
699 host->timing != MMC_TIMING_UHS_SDR104)
702 blksz_depth = blksz / (1 << host->data_shift);
703 fifo_depth = host->fifo_depth;
705 if (blksz_depth > fifo_depth)
709 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
710 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
711 * Currently just choose blksz.
714 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
718 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
721 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
728 /* If we don't have a channel, we can't do DMA */
732 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
734 host->dma_ops->stop(host);
741 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
742 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
746 * Decide the MSIZE and RX/TX Watermark.
747 * If current block size is same with previous size,
748 * no need to update fifoth.
750 if (host->prev_blksz != data->blksz)
751 dw_mci_adjust_fifoth(host, data);
754 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET); //dange to fifo-reset; noted by xbw,at 2014-03-28
756 /* Enable the DMA interface */
757 temp = mci_readl(host, CTRL);
758 temp |= SDMMC_CTRL_DMA_ENABLE;
759 mci_writel(host, CTRL, temp);
761 /* Disable RX/TX IRQs, let DMA handle it */
762 temp = mci_readl(host, INTMASK);
763 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
764 mci_writel(host, INTMASK, temp);
766 host->dma_ops->start(host, sg_len);
771 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
775 data->error = -EINPROGRESS;
782 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);// | SDMMC_CTRL_FIFO_RESET);//dange to fifo-reset; noted by xbw,at 2014-03-28
784 if (data->flags & MMC_DATA_READ) {
785 host->dir_status = DW_MCI_RECV_STATUS;
786 dw_mci_ctrl_rd_thld(host, data);
788 host->dir_status = DW_MCI_SEND_STATUS;
791 MMC_DBG_INFO_FUNC(host->mmc," dw_mci_submit_data,blocks=%d,blksz=%d [%s]",\
792 data->blocks, data->blksz, mmc_hostname(host->mmc));
794 if (dw_mci_submit_data_dma(host, data)) {
795 int flags = SG_MITER_ATOMIC;
796 if (host->data->flags & MMC_DATA_READ)
797 flags |= SG_MITER_TO_SG;
799 flags |= SG_MITER_FROM_SG;
801 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
803 host->part_buf_start = 0;
804 host->part_buf_count = 0;
806 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
807 temp = mci_readl(host, INTMASK);
808 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
809 mci_writel(host, INTMASK, temp);
811 temp = mci_readl(host, CTRL);
812 temp &= ~SDMMC_CTRL_DMA_ENABLE;
813 mci_writel(host, CTRL, temp);
816 * Use the initial fifoth_val for PIO mode.
817 * If next issued data may be transfered by DMA mode,
818 * prev_blksz should be invalidated.
820 mci_writel(host, FIFOTH, host->fifoth_val);
821 host->prev_blksz = 0;
824 * Keep the current block size.
825 * It will be used to decide whether to update
826 * fifoth register next time.
828 host->prev_blksz = data->blksz;
832 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
834 struct dw_mci *host = slot->host;
835 unsigned long timeout = jiffies + msecs_to_jiffies(500);//msecs_to_jiffies(5000);
836 unsigned int cmd_status = 0;
837 #ifdef SDMMC_WAIT_FOR_UNBUSY
838 timeout = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
840 if(test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
841 while (ret=time_before(jiffies, timeout)) {
842 cmd_status = mci_readl(host, STATUS);
843 if (!(cmd_status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
847 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
848 __LINE__, __FUNCTION__, mmc_hostname(host->mmc));
851 mci_writel(host, CMDARG, arg);
853 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
855 while (time_before(jiffies, timeout)) {
856 cmd_status = mci_readl(host, CMD);
857 if (!(cmd_status & SDMMC_CMD_START))
860 dev_err(&slot->mmc->class_dev,
861 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
862 cmd, arg, cmd_status);
865 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
867 struct dw_mci *host = slot->host;
868 unsigned int tempck,clock = slot->clock;
873 MMC_DBG_INFO_FUNC(host->mmc,"%d..%s: clock=%d, current_speed=%d, bus_hz=%d,forc=%d[%s]\n",
874 __LINE__, __FUNCTION__, clock, host->current_speed,host->bus_hz,force_clkinit,mmc_hostname(host->mmc));
877 mci_writel(host, CLKENA, 0);
879 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
880 } else if (clock != host->current_speed || force_clkinit) {
881 div = host->bus_hz / clock;
882 if (host->bus_hz % clock && host->bus_hz > clock)
884 * move the + 1 after the divide to prevent
885 * over-clocking the card.
889 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
891 if ((clock << div) != slot->__clk_old || force_clkinit) {
892 tempck = div ? ((host->bus_hz / div) >> 1) :host->bus_hz;
893 dev_info(&slot->mmc->class_dev,
894 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
895 slot->id, host->bus_hz, clock,
898 host->set_speed = tempck;
903 mci_writel(host, CLKENA, 0);
904 mci_writel(host, CLKSRC, 0);
908 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
911 if((host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
912 && host->bus_hz > 100000000){
913 printk("rk_sdmmc: emmc : div larger than 1, illegal clk in dts ![%s]\n ",
914 mmc_hostname(host->mmc));
915 printk("eMMC ERROR, emergancy halt!!!!!!!!!\n");
916 printk("Please refer to your eMMC datasheet to determine speed mode!\n");
917 printk("================================rk3288====================================");
918 printk("DDR 8-bits mode: clk in dts should be 100MHz!\n");
919 printk("DDR 4-bits mode: clk in dts should be <=100MHz(recommand 50 or 100Mhz)!\n");
920 printk("SDR mode: clk in dts should <= 100MHz(recommand 50 or 100Mhz)!\n");
921 printk("HS200 mode: clk in dts should <= 150MHz!\n");
922 printk("==========================================================================");
926 /* set clock to desired speed */
927 mci_writel(host, CLKDIV, div);
931 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
933 /* enable clock; only low power if no SDIO */
934 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
936 if (host->verid < DW_MMC_240A)
937 sdio_int = SDMMC_INT_SDIO(slot->id);
939 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
941 if (!(mci_readl(host, INTMASK) & sdio_int))
942 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
943 mci_writel(host, CLKENA, clk_en_a);
947 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
949 /* keep the clock with reflecting clock dividor */
950 slot->__clk_old = clock << div;
953 host->current_speed = clock;
955 if(slot->ctype != slot->pre_ctype)
956 MMC_DBG_BOOT_FUNC(host->mmc, "Bus speed=%dHz,Bus width=%s.[%s]",
958 (slot->ctype == SDMMC_CTYPE_4BIT)?"4bits":"8bits",
959 mmc_hostname(host->mmc));
960 slot->pre_ctype = slot->ctype;
962 /* Set the current slot bus width */
963 mci_writel(host, CTYPE, (slot->ctype << slot->id));
967 static void dw_mci_wait_unbusy(struct dw_mci *host)
970 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
971 unsigned long time_loop;
974 MMC_DBG_INFO_FUNC(host->mmc, "dw_mci_wait_unbusy, status=0x%x ", mci_readl(host, STATUS));
976 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
977 timeout = SDMMC_DATA_TIMEOUT_EMMC;
978 else if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
979 timeout = SDMMC_DATA_TIMEOUT_SD;
981 time_loop = jiffies + msecs_to_jiffies(timeout);
983 status = mci_readl(host, STATUS);
984 if (!(status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
986 //MMC_DBG_INFO_FUNC("dw_mci_wait_unbusy, waiting for......");
987 } while (time_before(jiffies, time_loop));
993 * 1--status is unbusy.
995 int dw_mci_card_busy(struct mmc_host *mmc)
997 struct dw_mci_slot *slot = mmc_priv(mmc);
998 struct dw_mci *host = slot->host;
999 unsigned int timeout= SDMMC_DATA_TIMEOUT_SDIO;
1000 unsigned long time_loop;
1001 unsigned int status;
1004 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1005 timeout = SDMMC_DATA_TIMEOUT_EMMC;
1006 else if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
1007 timeout = SDMMC_DATA_TIMEOUT_SD;
1008 timeout = 250*1000;//test
1009 time_loop = jiffies + msecs_to_jiffies(timeout);
1011 MMC_DBG_INFO_FUNC(host->mmc, "line%d: dw_mci_wait_unbusy,timeloop=%lu, status=0x%x ",
1012 __LINE__, time_loop, mci_readl(host, STATUS));
1014 status = mci_readl(host, STATUS);
1015 if (!(status & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY))){
1016 ret = 1;//card is unbusy.
1019 //MMC_DBG_INFO_FUNC("dw_mci_wait_unbusy, waiting for......");
1020 } while (time_before(jiffies, time_loop));
1021 MMC_DBG_INFO_FUNC(host->mmc, "line%d: dw_mci_wait_unbusy,ret=%d, status=0x%x ",
1022 __LINE__,ret,mci_readl(host, STATUS));
1027 static void __dw_mci_start_request(struct dw_mci *host,
1028 struct dw_mci_slot *slot,
1029 struct mmc_command *cmd)
1031 struct mmc_request *mrq;
1032 struct mmc_data *data;
1036 if (host->pdata->select_slot)
1037 host->pdata->select_slot(slot->id);
1039 host->cur_slot = slot;
1041 #if 0 //add by xbw,at 2014-03-12
1042 /*clean FIFO if it is a new request*/
1043 if(!(mrq->cmd->opcode & SDMMC_CMD_STOP)) {
1044 MMC_DBG_INFO_FUNC("%d..%s: reset the ctrl.", __LINE__, __FUNCTION__);
1045 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
1046 SDMMC_CTRL_DMA_RESET));
1049 dw_mci_wait_unbusy(host);
1051 host->pending_events = 0;
1052 host->completed_events = 0;
1053 host->data_status = 0;
1057 dw_mci_set_timeout(host);
1058 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1059 mci_writel(host, BLKSIZ, data->blksz);
1062 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1064 /* this is the first command, send the initialization clock */
1065 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1066 cmdflags |= SDMMC_CMD_INIT;
1069 dw_mci_submit_data(host, data);
1073 dw_mci_start_command(host, cmd, cmdflags);
1076 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
1079 static void dw_mci_start_request(struct dw_mci *host,
1080 struct dw_mci_slot *slot)
1082 struct mmc_request *mrq = slot->mrq;
1083 struct mmc_command *cmd;
1085 MMC_DBG_INFO_FUNC(host->mmc, " Begin to start the new request. cmd=%d(arg=0x%x)[%s]", \
1086 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1088 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1089 __dw_mci_start_request(host, slot, cmd);
1092 /* must be called with host->lock held */
1093 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1094 struct mmc_request *mrq)
1096 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1101 if (host->state == STATE_IDLE) {
1102 host->state = STATE_SENDING_CMD;
1103 dw_mci_start_request(host, slot);
1105 list_add_tail(&slot->queue_node, &host->queue);
1109 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1111 struct dw_mci_slot *slot = mmc_priv(mmc);
1112 struct dw_mci *host = slot->host;
1117 * The check for card presence and queueing of the request must be
1118 * atomic, otherwise the card could be removed in between and the
1119 * request wouldn't fail until another card was inserted.
1121 spin_lock_bh(&host->lock);
1123 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1124 spin_unlock_bh(&host->lock);
1125 mrq->cmd->error = -ENOMEDIUM;
1126 MMC_DBG_CMD_FUNC(host->mmc, "%d..%s: no card,so reqeuest done, cmd=%d [%s]",\
1127 __LINE__, __FUNCTION__, mrq->cmd->opcode, mmc_hostname(host->mmc));
1129 mmc_request_done(mmc, mrq);
1132 MMC_DBG_CMD_FUNC(host->mmc, "======>\n pull a new request from MMC-frame to dw_mci_queue. cmd=%d(arg=0x%x)[%s]", \
1133 mrq->cmd->opcode, mrq->cmd->arg, mmc_hostname(host->mmc));
1135 dw_mci_queue_request(host, slot, mrq);
1137 spin_unlock_bh(&host->lock);
1140 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1142 struct dw_mci_slot *slot = mmc_priv(mmc);
1143 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1145 #ifdef SDMMC_WAIT_FOR_UNBUSY
1146 unsigned long time_loop = jiffies + msecs_to_jiffies(SDMMC_WAIT_FOR_UNBUSY);
1149 if(!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)){
1150 printk("%d..%s: no card. [%s]\n", \
1151 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1155 while (ret=time_before(jiffies, time_loop)) {
1156 regs = mci_readl(slot->host, STATUS);
1157 if (!(regs & (SDMMC_STAUTS_DATA_BUSY|SDMMC_STAUTS_MC_BUSY)))
1162 printk("slot->flags=%d ", slot->flags);
1164 printk("%d..%s: wait for unbusy timeout.......[%s]\n", \
1165 __LINE__, __FUNCTION__, mmc_hostname(mmc));
1168 switch (ios->bus_width) {
1169 case MMC_BUS_WIDTH_4:
1170 slot->ctype = SDMMC_CTYPE_4BIT;
1172 case MMC_BUS_WIDTH_8:
1173 slot->ctype = SDMMC_CTYPE_8BIT;
1176 /* set default 1 bit mode */
1177 slot->ctype = SDMMC_CTYPE_1BIT;
1178 slot->pre_ctype = SDMMC_CTYPE_1BIT;
1181 regs = mci_readl(slot->host, UHS_REG);
1184 if (ios->timing == MMC_TIMING_UHS_DDR50)
1185 regs |= ((0x1 << slot->id) << 16);
1187 regs &= ~((0x1 << slot->id) << 16);
1189 mci_writel(slot->host, UHS_REG, regs);
1190 slot->host->timing = ios->timing;
1193 * Use mirror of ios->clock to prevent race with mmc
1194 * core ios update when finding the minimum.
1196 slot->clock = ios->clock;
1198 if (drv_data && drv_data->set_ios)
1199 drv_data->set_ios(slot->host, ios);
1201 /* Slot specific timing and width adjustment */
1202 dw_mci_setup_bus(slot, false);
1205 switch (ios->power_mode) {
1207 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1209 if (slot->host->pdata->setpower)
1210 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
1211 regs = mci_readl(slot->host, PWREN);
1212 regs |= (1 << slot->id);
1213 mci_writel(slot->host, PWREN, regs);
1216 /* Power down slot */
1217 if (slot->host->pdata->setpower)
1218 slot->host->pdata->setpower(slot->id, 0);
1219 regs = mci_readl(slot->host, PWREN);
1220 regs &= ~(1 << slot->id);
1221 mci_writel(slot->host, PWREN, regs);
1228 static int dw_mci_get_ro(struct mmc_host *mmc)
1231 struct dw_mci_slot *slot = mmc_priv(mmc);
1232 struct dw_mci_board *brd = slot->host->pdata;
1234 /* Use platform get_ro function, else try on board write protect */
1235 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
1237 else if (brd->get_ro)
1238 read_only = brd->get_ro(slot->id);
1239 else if (gpio_is_valid(slot->wp_gpio))
1240 read_only = gpio_get_value(slot->wp_gpio);
1243 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1245 dev_dbg(&mmc->class_dev, "card is %s\n",
1246 read_only ? "read-only" : "read-write");
1251 static int dw_mci_set_sdio_status(struct mmc_host *mmc, int val)
1253 struct dw_mci_slot *slot = mmc_priv(mmc);
1254 struct dw_mci_board *brd = slot->host->pdata;
1255 struct dw_mci *host = slot->host;
1256 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
1259 spin_lock_bh(&host->lock);
1261 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1262 if(__clk_is_enabled(host->hclk_mmc) == false)
1263 clk_prepare_enable(host->hclk_mmc);
1264 if(__clk_is_enabled(host->clk_mmc) == false)
1265 clk_prepare_enable(host->clk_mmc);
1267 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1268 if(__clk_is_enabled(host->clk_mmc) == true)
1269 clk_disable_unprepare(slot->host->clk_mmc);
1270 if(__clk_is_enabled(host->hclk_mmc) == true)
1271 clk_disable_unprepare(slot->host->hclk_mmc);
1273 spin_unlock_bh(&host->lock);
1274 mmc_detect_change(slot->mmc, 20);
1281 static int dw_mci_get_cd(struct mmc_host *mmc)
1284 struct dw_mci_slot *slot = mmc_priv(mmc);
1285 struct dw_mci_board *brd = slot->host->pdata;
1286 struct dw_mci *host = slot->host;
1287 int gpio_cd = mmc_gpio_get_cd(mmc);
1289 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1290 return test_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1292 /* Use platform get_cd function, else try onboard card detect */
1293 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1295 else if (brd->get_cd)
1296 present = !brd->get_cd(slot->id);
1297 else if (!IS_ERR_VALUE(gpio_cd))
1300 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1303 spin_lock_bh(&host->lock);
1305 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1306 dev_dbg(&mmc->class_dev, "card is present\n");
1308 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1309 dev_dbg(&mmc->class_dev, "card is not present\n");
1311 spin_unlock_bh(&host->lock);
1316 static void dw_mci_hw_reset(struct mmc_host *mmc)
1318 struct dw_mci_slot *slot = mmc_priv(mmc);
1321 * According to eMMC spec
1322 * tRstW >= 1us ; RST_n pulse width
1323 * tRSCA >= 200us ; RST_n to Command time
1324 * tRSTH >= 1us ; RST_n high period
1327 mci_writel(slot->host, RST_n, 0x1);
1329 udelay(10); //10us for bad quality eMMc.
1331 mci_writel(slot->host, RST_n, 0x0);
1333 usleep_range(300, 1000); //ay least 300(> 200us)
1338 * Disable lower power mode.
1340 * Low power mode will stop the card clock when idle. According to the
1341 * description of the CLKENA register we should disable low power mode
1342 * for SDIO cards if we need SDIO interrupts to work.
1344 * This function is fast if low power mode is already disabled.
1346 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1348 struct dw_mci *host = slot->host;
1350 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1352 clk_en_a = mci_readl(host, CLKENA);
1354 if (clk_en_a & clken_low_pwr) {
1355 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1356 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1357 SDMMC_CMD_PRV_DAT_WAIT, 0);
1361 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1363 struct dw_mci_slot *slot = mmc_priv(mmc);
1364 struct dw_mci *host = slot->host;
1368 /* Enable/disable Slot Specific SDIO interrupt */
1369 int_mask = mci_readl(host, INTMASK);
1371 if (host->verid < DW_MMC_240A)
1372 sdio_int = SDMMC_INT_SDIO(slot->id);
1374 sdio_int = SDMMC_INT_SDIO((slot->id) + 8);
1378 * Turn off low power mode if it was enabled. This is a bit of
1379 * a heavy operation and we disable / enable IRQs a lot, so
1380 * we'll leave low power mode disabled and it will get
1381 * re-enabled again in dw_mci_setup_bus().
1383 dw_mci_disable_low_power(slot);
1385 mci_writel(host, INTMASK,
1386 (int_mask | sdio_int));
1388 mci_writel(host, INTMASK,
1389 (int_mask & ~sdio_int));
1394 static int dw_mci_do_start_signal_voltage_switch(struct dw_mci *host,
1395 struct mmc_ios *ios)
1398 unsigned int value,uhs_reg;
1401 * Signal Voltage Switching is only applicable for Host Controllers
1404 if (host->verid < DW_MMC_240A)
1407 uhs_reg = mci_readl(host, UHS_REG);
1408 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: vol=%d.[%s]\n",
1409 __LINE__, __FUNCTION__,ios->signal_voltage, mmc_hostname(host->mmc));
1411 switch (ios->signal_voltage) {
1412 case MMC_SIGNAL_VOLTAGE_330:
1413 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1415 ret = io_domain_regulator_set_voltage(host->vmmc, 3300000, 3300000);
1416 //regulator_put(host->vmmc); //to be done in remove function.
1418 MMC_DBG_SW_VOL_FUNC(host->mmc,"%s =%dmV set 3.3end, ret=%d \n",
1419 __func__, regulator_get_voltage(host->vmmc), ret);
1421 pr_warning("%s: Switching to 3.3V signalling voltage "
1422 " failed\n", mmc_hostname(host->mmc));
1426 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: [%s]\n",__LINE__, __FUNCTION__, mmc_hostname(host->mmc));
1428 //set High-power mode
1429 value = mci_readl(host, CLKENA);
1430 mci_writel(host,CLKENA , value& ~SDMMC_CLKEN_LOW_PWR);
1432 mci_writel(host,UHS_REG , uhs_reg & ~SDMMC_UHS_VOLT_REG_18);
1435 usleep_range(5000, 5500);
1437 /* 3.3V regulator output should be stable within 5 ms */
1438 uhs_reg = mci_readl(host, UHS_REG);
1439 if( !(uhs_reg & SDMMC_UHS_VOLT_REG_18))
1442 pr_warning("%s: 3.3V regulator output did not became stable\n",
1443 mmc_hostname(host->mmc));
1446 case MMC_SIGNAL_VOLTAGE_180:
1448 ret = io_domain_regulator_set_voltage(host->vmmc,1800000, 1800000);
1449 // regulator_put(host->vmmc);//to be done in remove function.
1451 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s =%dmV set 1.8end, ret=%d . \n",
1452 __LINE__, __func__, regulator_get_voltage(host->vmmc), ret);
1454 pr_warning("%s: Switching to 1.8V signalling voltage "
1455 " failed\n", mmc_hostname(host->mmc));
1461 * Enable 1.8V Signal Enable in the Host Control2
1464 mci_writel(host,UHS_REG , uhs_reg | SDMMC_UHS_VOLT_REG_18);
1467 usleep_range(5000, 5500);
1468 MMC_DBG_SW_VOL_FUNC(host->mmc,"%d..%s: .[%s]\n",__LINE__, __FUNCTION__,mmc_hostname(host->mmc));
1470 /* 1.8V regulator output should be stable within 5 ms */
1471 uhs_reg = mci_readl(host, UHS_REG);
1472 if( uhs_reg & SDMMC_UHS_VOLT_REG_18){
1477 pr_warning("%s: 1.8V regulator output did not became stable\n",
1478 mmc_hostname(host->mmc));
1481 case MMC_SIGNAL_VOLTAGE_120:
1483 ret = io_domain_regulator_set_voltage(host->vmmc, 1200000, 1200000);
1485 pr_warning("%s: Switching to 1.2V signalling voltage "
1486 " failed\n", mmc_hostname(host->mmc));
1492 /* No signal voltage switch required */
1498 static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
1499 struct mmc_ios *ios)
1501 struct dw_mci_slot *slot = mmc_priv(mmc);
1502 struct dw_mci *host = slot->host;
1505 if (host->verid < DW_MMC_240A)
1507 //sdhci_runtime_pm_get(host);
1508 err = dw_mci_do_start_signal_voltage_switch(host, ios);
1509 //sdhci_runtime_pm_put(host);
1513 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1515 struct dw_mci_slot *slot = mmc_priv(mmc);
1516 struct dw_mci *host = slot->host;
1517 const struct dw_mci_drv_data *drv_data = host->drv_data;
1518 struct dw_mci_tuning_data tuning_data;
1521 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1522 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1523 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1524 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1525 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1526 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1527 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1531 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1532 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1533 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1536 "Undefined command(%d) for tuning\n", opcode);
1540 /////////////////////////////////////////////////
1541 //temporary settings,!!!!!!!!!!!!!!!
1542 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC)
1543 tuning_data.con_id = 3;
1544 else if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1545 tuning_data.con_id = 1;
1547 tuning_data.con_id = 0;
1548 tuning_data.tuning_type = 1; //0--drv, 1--sample
1549 /////////////////////////////////////////////////
1551 if (drv_data && drv_data->execute_tuning)
1552 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1556 static const struct mmc_host_ops dw_mci_ops = {
1557 .request = dw_mci_request,
1558 .pre_req = dw_mci_pre_req,
1559 .post_req = dw_mci_post_req,
1560 .set_ios = dw_mci_set_ios,
1561 .get_ro = dw_mci_get_ro,
1562 .get_cd = dw_mci_get_cd,
1563 .set_sdio_status = dw_mci_set_sdio_status,
1564 .hw_reset = dw_mci_hw_reset,
1565 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1566 .execute_tuning = dw_mci_execute_tuning,
1567 //.start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
1568 //.card_busy = dw_mci_card_busy,
1571 static void dw_mci_enable_irq(struct dw_mci *host, bool irqflag)
1573 unsigned long flags;
1578 local_irq_save(flags);
1579 if(host->irq_state != irqflag)
1581 host->irq_state = irqflag;
1584 enable_irq(host->irq);
1588 disable_irq(host->irq);
1591 local_irq_restore(flags);
1594 static void dw_mci_deal_data_end(struct dw_mci *host, struct mmc_request *mrq)
1595 __releases(&host->lock)
1596 __acquires(&host->lock)
1598 if(DW_MCI_SEND_STATUS == host->dir_status){
1600 if( MMC_BUS_TEST_W != host->cmd->opcode){
1601 if(host->data_status & SDMMC_INT_DCRC)
1602 host->data->error = -EILSEQ;
1603 else if(host->data_status & SDMMC_INT_EBE)
1604 host->data->error = -ETIMEDOUT;
1606 dw_mci_wait_unbusy(host);
1609 dw_mci_wait_unbusy(host);
1615 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1616 __releases(&host->lock)
1617 __acquires(&host->lock)
1619 struct dw_mci_slot *slot;
1620 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1622 WARN_ON(host->cmd || host->data);
1624 dw_mci_deal_data_end(host, mrq);
1627 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, cmderr=%d, host->state=%d [%s]",\
1628 mrq->cmd->opcode,mrq->cmd->error, host->state,mmc_hostname(host->mmc));
1630 MMC_DBG_CMD_FUNC(host->mmc, " reqeust end--reqeuest done, cmd=%d, dataerr=%d, host->state=%d [%s]",\
1631 mrq->cmd->opcode,mrq->data->error, host->state, mmc_hostname(host->mmc));
1633 host->cur_slot->mrq = NULL;
1635 if (!list_empty(&host->queue)) {
1636 slot = list_entry(host->queue.next,
1637 struct dw_mci_slot, queue_node);
1638 list_del(&slot->queue_node);
1639 dev_vdbg(host->dev, "list not empty: %s is next\n",
1640 mmc_hostname(slot->mmc));
1641 host->state = STATE_SENDING_CMD;
1642 MMC_DBG_CMD_FUNC(host->mmc, " list is not empty. run the request in list. [%s]", mmc_hostname(host->mmc));
1643 dw_mci_start_request(host, slot);
1645 dev_vdbg(host->dev, "list empty\n");
1646 host->state = STATE_IDLE;
1649 spin_unlock(&host->lock);
1650 mmc_request_done(prev_mmc, mrq);
1651 spin_lock(&host->lock);
1654 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1656 u32 status = host->cmd_status;
1658 host->cmd_status = 0;
1660 /* Read the response from the card (up to 16 bytes) */
1661 if (cmd->flags & MMC_RSP_PRESENT) {
1662 if (cmd->flags & MMC_RSP_136) {
1663 cmd->resp[3] = mci_readl(host, RESP0);
1664 cmd->resp[2] = mci_readl(host, RESP1);
1665 cmd->resp[1] = mci_readl(host, RESP2);
1666 cmd->resp[0] = mci_readl(host, RESP3);
1668 MMC_DBG_INFO_FUNC(host->mmc,"Line%d: command complete cmd=%d,resp[3]=0x%x, resp[2]=0x%x,resp[1]=0x%x,resp[0]=0x%x.[%s]", \
1669 __LINE__,cmd->opcode,cmd->resp[3], cmd->resp[2], cmd->resp[1], cmd->resp[0], mmc_hostname(host->mmc));
1671 cmd->resp[0] = mci_readl(host, RESP0);
1675 MMC_DBG_INFO_FUNC(host->mmc, "Line%d: command complete cmd=%d,resp[0]=0x%x. [%s]",\
1676 __LINE__,cmd->opcode, cmd->resp[0], mmc_hostname(host->mmc));
1680 if (status & SDMMC_INT_RTO)
1682 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
1685 cmd->error = -ETIMEDOUT;
1687 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1688 cmd->error = -EILSEQ;
1689 else if (status & SDMMC_INT_RESP_ERR)
1693 MMC_DBG_CMD_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1696 if(MMC_SEND_STATUS != cmd->opcode)
1697 if(host->cmd_rto >= SDMMC_CMD_RTO_MAX_HOLD){
1698 MMC_DBG_ERR_FUNC(host->mmc, " command complete, cmd=%d,cmdError=%d [%s]",\
1699 cmd->opcode, cmd->error,mmc_hostname(host->mmc));
1703 /* newer ip versions need a delay between retries */
1704 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1710 static void dw_mci_tasklet_func(unsigned long priv)
1712 struct dw_mci *host = (struct dw_mci *)priv;
1713 struct dw_mci_slot *slot = mmc_priv(host->mmc);
1714 struct mmc_data *data;
1715 struct mmc_command *cmd;
1716 enum dw_mci_state state;
1717 enum dw_mci_state prev_state;
1718 u32 status, ctrl, cmd_flags;
1719 unsigned long timeout = 0;
1722 spin_lock(&host->lock);
1724 state = host->state;
1734 case STATE_SENDING_CMD:
1735 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1736 &host->pending_events))
1741 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1742 dw_mci_command_complete(host, cmd);
1743 if (cmd == host->mrq->sbc && !cmd->error) {
1744 prev_state = state = STATE_SENDING_CMD;
1745 __dw_mci_start_request(host, host->cur_slot,
1750 if (cmd->data && cmd->error) {
1751 dw_mci_stop_dma(host);
1754 send_stop_cmd(host, data);
1755 state = STATE_SENDING_STOP;
1761 send_stop_abort(host, data);
1762 state = STATE_SENDING_STOP;
1768 if (!host->mrq->data || cmd->error) {
1769 dw_mci_request_end(host, host->mrq);
1773 prev_state = state = STATE_SENDING_DATA;
1776 case STATE_SENDING_DATA:
1777 if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
1778 dw_mci_stop_dma(host);
1781 send_stop_cmd(host, data);
1783 /*single block read/write, send stop cmd manually to prevent host controller halt*/
1784 MMC_DBG_INFO_FUNC(host->mmc, "%s status 1 0x%08x [%s]\n",
1785 __func__, mci_readl(host, STATUS), mmc_hostname(host->mmc));
1787 mci_writel(host, CMDARG, 0);
1789 cmd_flags = SDMMC_CMD_STOP |SDMMC_CMD_RESP_CRC|SDMMC_CMD_RESP_EXP|MMC_STOP_TRANSMISSION;
1791 if(host->mmc->hold_reg_flag)
1792 cmd_flags |= SDMMC_CMD_USE_HOLD_REG;
1794 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
1796 timeout = jiffies + msecs_to_jiffies(500);
1798 while(ret = time_before(jiffies, timeout)){
1799 if(!(mci_readl(host, CMD) & SDMMC_CMD_START))
1804 MMC_DBG_ERR_FUNC(host->mmc, "%s EVENT_DATA_ERROR recovery failed!!! [%s]\n",
1805 __func__, mmc_hostname(host->mmc));
1808 send_stop_abort(host, data);
1810 state = STATE_DATA_ERROR;
1814 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_XFER_COMPLETE.[%s]",\
1815 prev_state,state, mmc_hostname(host->mmc));
1817 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1818 &host->pending_events))
1820 MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_SENDING_DATA, wait for EVENT_DATA_COMPLETE. [%s]",\
1821 prev_state,state,mmc_hostname(host->mmc));
1823 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1824 prev_state = state = STATE_DATA_BUSY;
1827 case STATE_DATA_BUSY:
1828 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1829 &host->pending_events))
1832 dw_mci_deal_data_end(host, host->mrq);
1833 MMC_DBG_INFO_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: STATE_DATA_BUSY, after EVENT_DATA_COMPLETE. [%s]", \
1834 prev_state,state,mmc_hostname(host->mmc));
1837 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1838 status = host->data_status;
1840 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1841 if((SDMMC_CTYPE_1BIT != slot->ctype)&&(MMC_SEND_EXT_CSD == host->mrq->cmd->opcode))
1842 MMC_DBG_ERR_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: DW_MCI_DATA_ERROR_FLAGS,datastatus=0x%x [%s]",\
1843 prev_state,state, status, mmc_hostname(host->mmc));
1845 if (status & SDMMC_INT_DRTO) {
1846 data->error = -ETIMEDOUT;
1847 } else if (status & SDMMC_INT_DCRC) {
1848 data->error = -EILSEQ;
1849 } else if (status & SDMMC_INT_EBE &&
1851 DW_MCI_SEND_STATUS) {
1853 * No data CRC status was returned.
1854 * The number of bytes transferred will
1855 * be exaggerated in PIO mode.
1857 data->bytes_xfered = 0;
1858 data->error = -ETIMEDOUT;
1867 * After an error, there may be data lingering
1868 * in the FIFO, so reset it - doing so
1869 * generates a block interrupt, hence setting
1870 * the scatter-gather pointer to NULL.
1872 dw_mci_fifo_reset(host);
1874 data->bytes_xfered = data->blocks * data->blksz;
1879 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: no stop and no dataerr, exit. [%s]", \
1880 prev_state,state,mmc_hostname(host->mmc));
1881 dw_mci_request_end(host, host->mrq);
1884 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to stop . [%s]", \
1885 prev_state,state,mmc_hostname(host->mmc));
1887 if (host->mrq->sbc && !data->error) {
1888 data->stop->error = 0;
1890 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: have stop and sbc, exit. [%s]", \
1891 prev_state,state,mmc_hostname(host->mmc));
1893 dw_mci_request_end(host, host->mrq);
1897 prev_state = state = STATE_SENDING_STOP;
1899 send_stop_cmd(host, data);
1901 if (data->stop && !data->error) {
1902 /* stop command for open-ended transfer*/
1904 send_stop_abort(host, data);
1908 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to STATE_SENDING_STOP . [%s]", \
1909 prev_state,state,mmc_hostname(host->mmc));
1911 case STATE_SENDING_STOP:
1912 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1913 &host->pending_events))
1915 MMC_DBG_CMD_FUNC(host->mmc, "Pre-state[%d]-->NowState[%d]: begin to send cmd12 . [%s]", \
1916 prev_state,state,mmc_hostname(host->mmc));
1918 /* CMD error in data command */
1919 if (host->mrq->cmd->error && host->mrq->data) {
1920 dw_mci_fifo_reset(host);
1926 dw_mci_command_complete(host, host->mrq->stop);
1928 if (host->mrq->stop)
1929 dw_mci_command_complete(host, host->mrq->stop);
1931 host->cmd_status = 0;
1934 dw_mci_request_end(host, host->mrq);
1937 case STATE_DATA_ERROR:
1938 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1939 &host->pending_events))
1942 state = STATE_DATA_BUSY;
1945 } while (state != prev_state);
1947 host->state = state;
1949 spin_unlock(&host->lock);
1953 /* push final bytes to part_buf, only use during push */
1954 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1956 memcpy((void *)&host->part_buf, buf, cnt);
1957 host->part_buf_count = cnt;
1960 /* append bytes to part_buf, only use during push */
1961 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1963 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1964 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1965 host->part_buf_count += cnt;
1969 /* pull first bytes from part_buf, only use during pull */
1970 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1972 cnt = min(cnt, (int)host->part_buf_count);
1974 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1976 host->part_buf_count -= cnt;
1977 host->part_buf_start += cnt;
1982 /* pull final bytes from the part_buf, assuming it's just been filled */
1983 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1985 memcpy(buf, &host->part_buf, cnt);
1986 host->part_buf_start = cnt;
1987 host->part_buf_count = (1 << host->data_shift) - cnt;
1990 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1992 struct mmc_data *data = host->data;
1995 /* try and push anything in the part_buf */
1996 if (unlikely(host->part_buf_count)) {
1997 int len = dw_mci_push_part_bytes(host, buf, cnt);
2000 if (host->part_buf_count == 2) {
2001 mci_writew(host, DATA(host->data_offset),
2003 host->part_buf_count = 0;
2006 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2007 if (unlikely((unsigned long)buf & 0x1)) {
2009 u16 aligned_buf[64];
2010 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2011 int items = len >> 1;
2013 /* memcpy from input buffer into aligned buffer */
2014 memcpy(aligned_buf, buf, len);
2017 /* push data from aligned buffer into fifo */
2018 for (i = 0; i < items; ++i)
2019 mci_writew(host, DATA(host->data_offset),
2026 for (; cnt >= 2; cnt -= 2)
2027 mci_writew(host, DATA(host->data_offset), *pdata++);
2030 /* put anything remaining in the part_buf */
2032 dw_mci_set_part_bytes(host, buf, cnt);
2033 /* Push data if we have reached the expected data length */
2034 if ((data->bytes_xfered + init_cnt) ==
2035 (data->blksz * data->blocks))
2036 mci_writew(host, DATA(host->data_offset),
2041 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2043 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2044 if (unlikely((unsigned long)buf & 0x1)) {
2046 /* pull data from fifo into aligned buffer */
2047 u16 aligned_buf[64];
2048 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2049 int items = len >> 1;
2051 for (i = 0; i < items; ++i)
2052 aligned_buf[i] = mci_readw(host,
2053 DATA(host->data_offset));
2054 /* memcpy from aligned buffer into output buffer */
2055 memcpy(buf, aligned_buf, len);
2063 for (; cnt >= 2; cnt -= 2)
2064 *pdata++ = mci_readw(host, DATA(host->data_offset));
2068 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
2069 dw_mci_pull_final_bytes(host, buf, cnt);
2073 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2075 struct mmc_data *data = host->data;
2078 /* try and push anything in the part_buf */
2079 if (unlikely(host->part_buf_count)) {
2080 int len = dw_mci_push_part_bytes(host, buf, cnt);
2083 if (host->part_buf_count == 4) {
2084 mci_writel(host, DATA(host->data_offset),
2086 host->part_buf_count = 0;
2089 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2090 if (unlikely((unsigned long)buf & 0x3)) {
2092 u32 aligned_buf[32];
2093 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2094 int items = len >> 2;
2096 /* memcpy from input buffer into aligned buffer */
2097 memcpy(aligned_buf, buf, len);
2100 /* push data from aligned buffer into fifo */
2101 for (i = 0; i < items; ++i)
2102 mci_writel(host, DATA(host->data_offset),
2109 for (; cnt >= 4; cnt -= 4)
2110 mci_writel(host, DATA(host->data_offset), *pdata++);
2113 /* put anything remaining in the part_buf */
2115 dw_mci_set_part_bytes(host, buf, cnt);
2116 /* Push data if we have reached the expected data length */
2117 if ((data->bytes_xfered + init_cnt) ==
2118 (data->blksz * data->blocks))
2119 mci_writel(host, DATA(host->data_offset),
2124 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2126 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2127 if (unlikely((unsigned long)buf & 0x3)) {
2129 /* pull data from fifo into aligned buffer */
2130 u32 aligned_buf[32];
2131 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2132 int items = len >> 2;
2134 for (i = 0; i < items; ++i)
2135 aligned_buf[i] = mci_readl(host,
2136 DATA(host->data_offset));
2137 /* memcpy from aligned buffer into output buffer */
2138 memcpy(buf, aligned_buf, len);
2146 for (; cnt >= 4; cnt -= 4)
2147 *pdata++ = mci_readl(host, DATA(host->data_offset));
2151 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
2152 dw_mci_pull_final_bytes(host, buf, cnt);
2156 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2158 struct mmc_data *data = host->data;
2161 /* try and push anything in the part_buf */
2162 if (unlikely(host->part_buf_count)) {
2163 int len = dw_mci_push_part_bytes(host, buf, cnt);
2167 if (host->part_buf_count == 8) {
2168 mci_writeq(host, DATA(host->data_offset),
2170 host->part_buf_count = 0;
2173 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2174 if (unlikely((unsigned long)buf & 0x7)) {
2176 u64 aligned_buf[16];
2177 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2178 int items = len >> 3;
2180 /* memcpy from input buffer into aligned buffer */
2181 memcpy(aligned_buf, buf, len);
2184 /* push data from aligned buffer into fifo */
2185 for (i = 0; i < items; ++i)
2186 mci_writeq(host, DATA(host->data_offset),
2193 for (; cnt >= 8; cnt -= 8)
2194 mci_writeq(host, DATA(host->data_offset), *pdata++);
2197 /* put anything remaining in the part_buf */
2199 dw_mci_set_part_bytes(host, buf, cnt);
2200 /* Push data if we have reached the expected data length */
2201 if ((data->bytes_xfered + init_cnt) ==
2202 (data->blksz * data->blocks))
2203 mci_writeq(host, DATA(host->data_offset),
2208 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2210 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2211 if (unlikely((unsigned long)buf & 0x7)) {
2213 /* pull data from fifo into aligned buffer */
2214 u64 aligned_buf[16];
2215 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2216 int items = len >> 3;
2218 for (i = 0; i < items; ++i)
2219 aligned_buf[i] = mci_readq(host,
2220 DATA(host->data_offset));
2221 /* memcpy from aligned buffer into output buffer */
2222 memcpy(buf, aligned_buf, len);
2230 for (; cnt >= 8; cnt -= 8)
2231 *pdata++ = mci_readq(host, DATA(host->data_offset));
2235 host->part_buf = mci_readq(host, DATA(host->data_offset));
2236 dw_mci_pull_final_bytes(host, buf, cnt);
2240 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2244 /* get remaining partial bytes */
2245 len = dw_mci_pull_part_bytes(host, buf, cnt);
2246 if (unlikely(len == cnt))
2251 /* get the rest of the data */
2252 host->pull_data(host, buf, cnt);
2255 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2257 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2259 unsigned int offset;
2260 struct mmc_data *data = host->data;
2261 int shift = host->data_shift;
2264 unsigned int remain, fcnt;
2266 if(!host->mmc->bus_refs){
2267 printk("Note: %s host->mmc->bus_refs is 0!!!\n",__func__,host->mmc->bus_refs);
2271 if (!sg_miter_next(sg_miter))
2274 host->sg = sg_miter->piter.sg;
2275 buf = sg_miter->addr;
2276 remain = sg_miter->length;
2280 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2281 << shift) + host->part_buf_count;
2282 len = min(remain, fcnt);
2285 dw_mci_pull_data(host, (void *)(buf + offset), len);
2286 data->bytes_xfered += len;
2291 sg_miter->consumed = offset;
2292 status = mci_readl(host, MINTSTS);
2293 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2294 /* if the RXDR is ready read again */
2295 } while ((status & SDMMC_INT_RXDR) ||
2296 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2299 if (!sg_miter_next(sg_miter))
2301 sg_miter->consumed = 0;
2303 sg_miter_stop(sg_miter);
2307 sg_miter_stop(sg_miter);
2311 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2314 static void dw_mci_write_data_pio(struct dw_mci *host)
2316 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2318 unsigned int offset;
2319 struct mmc_data *data = host->data;
2320 int shift = host->data_shift;
2323 unsigned int fifo_depth = host->fifo_depth;
2324 unsigned int remain, fcnt;
2326 if(!host->mmc->bus_refs){
2327 printk("Note: %s host->mmc->bus_refs is 0!!!\n",__func__,host->mmc->bus_refs);
2332 if (!sg_miter_next(sg_miter))
2335 host->sg = sg_miter->piter.sg;
2336 buf = sg_miter->addr;
2337 remain = sg_miter->length;
2341 fcnt = ((fifo_depth -
2342 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2343 << shift) - host->part_buf_count;
2344 len = min(remain, fcnt);
2347 host->push_data(host, (void *)(buf + offset), len);
2348 data->bytes_xfered += len;
2353 sg_miter->consumed = offset;
2354 status = mci_readl(host, MINTSTS);
2355 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2356 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2359 if (!sg_miter_next(sg_miter))
2361 sg_miter->consumed = 0;
2363 sg_miter_stop(sg_miter);
2367 sg_miter_stop(sg_miter);
2371 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2374 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2376 if (!host->cmd_status)
2377 host->cmd_status = status;
2381 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2382 tasklet_schedule(&host->tasklet);
2385 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2387 struct dw_mci *host = dev_id;
2388 u32 pending, sdio_int;
2391 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2392 //if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
2393 // printk("%s pending: 0x%08x\n",__func__,pending);
2395 * DTO fix - version 2.10a and below, and only if internal DMA
2398 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
2400 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
2401 pending |= SDMMC_INT_DATA_OVER;
2405 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2406 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2407 host->cmd_status = pending;
2409 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s cmd_status INT=0x%x,[%s]",
2410 __LINE__, __FUNCTION__,host->cmd_status,mmc_hostname(host->mmc));
2412 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2415 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2416 /* if there is an error report DATA_ERROR */
2417 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2418 host->data_status = pending;
2420 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2422 MMC_DBG_INFO_FUNC(host->mmc,"Line%d..%s data_status INT=0x%x,[%s]",
2423 __LINE__, __FUNCTION__,host->data_status,mmc_hostname(host->mmc));
2424 tasklet_schedule(&host->tasklet);
2427 if (pending & SDMMC_INT_DATA_OVER) {
2428 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2429 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_DATA_OVER, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2430 if (!host->data_status)
2431 host->data_status = pending;
2433 if (host->dir_status == DW_MCI_RECV_STATUS) {
2434 if (host->sg != NULL)
2435 dw_mci_read_data_pio(host, true);
2437 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2438 tasklet_schedule(&host->tasklet);
2441 if (pending & SDMMC_INT_RXDR) {
2442 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2443 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2444 dw_mci_read_data_pio(host, false);
2447 if (pending & SDMMC_INT_TXDR) {
2448 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2449 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2450 dw_mci_write_data_pio(host);
2453 if (pending & SDMMC_INT_VSI) {
2454 MMC_DBG_SW_VOL_FUNC(host->mmc, "SDMMC_INT_VSI, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2455 mci_writel(host, RINTSTS, SDMMC_INT_VSI);
2456 dw_mci_cmd_interrupt(host, pending);
2459 if (pending & SDMMC_INT_CMD_DONE) {
2460 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_CMD_DONE, INT-pending=0x%x. [%s]",pending,mmc_hostname(host->mmc));
2461 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2462 dw_mci_cmd_interrupt(host, pending);
2465 if (pending & SDMMC_INT_CD) {
2466 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2467 queue_work(host->card_workqueue, &host->card_work);
2470 if (pending & SDMMC_INT_HLE) {
2471 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
2472 MMC_DBG_CMD_FUNC(host->mmc, "SDMMC_INT_HLE INT-pending=0x%x. [%s]\n",pending,mmc_hostname(host->mmc));
2476 /* Handle SDIO Interrupts */
2477 for (i = 0; i < host->num_slots; i++) {
2478 struct dw_mci_slot *slot = host->slot[i];
2480 if (host->verid < DW_MMC_240A)
2481 sdio_int = SDMMC_INT_SDIO(i);
2483 sdio_int = SDMMC_INT_SDIO(i + 8);
2485 if (pending & sdio_int) {
2486 mci_writel(host, RINTSTS, sdio_int);
2487 mmc_signal_sdio_irq(slot->mmc);
2493 #ifdef CONFIG_MMC_DW_IDMAC
2494 /* Handle DMA interrupts */
2495 pending = mci_readl(host, IDSTS);
2496 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2497 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2498 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2499 host->dma_ops->complete(host);
2506 static void dw_mci_work_routine_card(struct work_struct *work)
2508 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2511 for (i = 0; i < host->num_slots; i++) {
2512 struct dw_mci_slot *slot = host->slot[i];
2513 struct mmc_host *mmc = slot->mmc;
2514 struct mmc_request *mrq;
2518 present = dw_mci_get_cd(mmc);
2519 while (present != slot->last_detect_state) {
2520 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2521 present ? "inserted" : "removed");
2522 MMC_DBG_BOOT_FUNC(mmc, " The card is %s. ===!!!!!!==[%s]\n",
2523 present ? "inserted" : "removed.", mmc_hostname(mmc));
2525 rk_send_wakeup_key();//wake up system
2526 spin_lock_bh(&host->lock);
2528 /* Card change detected */
2529 slot->last_detect_state = present;
2531 /* Clean up queue if present */
2534 if (mrq == host->mrq) {
2538 switch (host->state) {
2541 case STATE_SENDING_CMD:
2542 mrq->cmd->error = -ENOMEDIUM;
2546 case STATE_SENDING_DATA:
2547 mrq->data->error = -ENOMEDIUM;
2548 dw_mci_stop_dma(host);
2550 case STATE_DATA_BUSY:
2551 case STATE_DATA_ERROR:
2552 if (mrq->data->error == -EINPROGRESS)
2553 mrq->data->error = -ENOMEDIUM;
2557 case STATE_SENDING_STOP:
2558 mrq->stop->error = -ENOMEDIUM;
2562 dw_mci_request_end(host, mrq);
2564 list_del(&slot->queue_node);
2565 mrq->cmd->error = -ENOMEDIUM;
2567 mrq->data->error = -ENOMEDIUM;
2569 mrq->stop->error = -ENOMEDIUM;
2571 MMC_DBG_CMD_FUNC(host->mmc, "dw_mci_work--reqeuest done, cmd=%d [%s]",mrq->cmd->opcode, mmc_hostname(mmc));
2573 spin_unlock(&host->lock);
2574 mmc_request_done(slot->mmc, mrq);
2575 spin_lock(&host->lock);
2579 /* Power down slot */
2581 /* Clear down the FIFO */
2582 dw_mci_fifo_reset(host);
2583 #ifdef CONFIG_MMC_DW_IDMAC
2584 dw_mci_idmac_reset(host);
2589 spin_unlock_bh(&host->lock);
2591 present = dw_mci_get_cd(mmc);
2594 mmc_detect_change(slot->mmc,
2595 msecs_to_jiffies(host->pdata->detect_delay_ms));
2600 /* given a slot id, find out the device node representing that slot */
2601 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2603 struct device_node *np;
2607 if (!dev || !dev->of_node)
2610 for_each_child_of_node(dev->of_node, np) {
2611 addr = of_get_property(np, "reg", &len);
2612 if (!addr || (len < sizeof(int)))
2614 if (be32_to_cpup(addr) == slot)
2620 static struct dw_mci_of_slot_quirks {
2623 } of_slot_quirks[] = {
2625 .quirk = "disable-wp",
2626 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2630 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2632 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2637 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2638 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
2639 quirks |= of_slot_quirks[idx].id;
2644 /* find out bus-width for a given slot */
2645 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2647 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2653 if (of_property_read_u32(np, "bus-width", &bus_wd))
2654 dev_err(dev, "bus-width property not found, assuming width"
2660 /* find the pwr-en gpio for a given slot; or -1 if none specified */
2661 static int dw_mci_of_get_pwr_en_gpio(struct device *dev, u8 slot)
2663 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2669 gpio = of_get_named_gpio(np, "pwr-gpios", 0);
2671 /* Having a missing entry is valid; return silently */
2672 if (!gpio_is_valid(gpio))
2675 if (devm_gpio_request(dev, gpio, "dw-mci-pwr_en")) {
2676 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2680 gpio_direction_output(gpio, 0);//set 0 to pwr-en
2686 /* find the write protect gpio for a given slot; or -1 if none specified */
2687 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2689 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2695 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2697 /* Having a missing entry is valid; return silently */
2698 if (!gpio_is_valid(gpio))
2701 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2702 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2709 /* find the cd gpio for a given slot */
2710 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2711 struct mmc_host *mmc)
2713 struct device_node *np = dev->of_node;//dw_mci_of_find_slot_node(dev, slot);
2719 gpio = of_get_named_gpio(np, "cd-gpios", 0);
2721 /* Having a missing entry is valid; return silently */
2722 if (!gpio_is_valid(gpio))
2725 if (mmc_gpio_request_cd(mmc, gpio, 0))
2726 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2728 #else /* CONFIG_OF */
2729 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2733 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2737 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2741 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2745 static void dw_mci_of_get_cd_gpio(struct device *dev, u8 slot,
2746 struct mmc_host *mmc)
2750 #endif /* CONFIG_OF */
2752 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2754 struct mmc_host *mmc;
2755 struct dw_mci_slot *slot;
2756 const struct dw_mci_drv_data *drv_data = host->drv_data;
2761 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2765 slot = mmc_priv(mmc);
2769 host->slot[id] = slot;
2772 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2774 mmc->ops = &dw_mci_ops;
2776 if (of_property_read_u32_array(host->dev->of_node,
2777 "clock-freq-min-max", freq, 2)) {
2778 mmc->f_min = DW_MCI_FREQ_MIN;
2779 mmc->f_max = DW_MCI_FREQ_MAX;
2781 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
2782 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
2784 mmc->f_min = freq[0];
2785 mmc->f_max = freq[1];
2787 printk("%d..%s: fmin=%d, fmax=%d [%s]\n", __LINE__,__FUNCTION__,
2788 mmc->f_min, mmc->f_max, mmc_hostname(mmc));
2791 if(strstr("mmc0",mmc_hostname(mmc)))
2792 printk("Line%d..%s: The rk_sdmmc %s",__LINE__, __FUNCTION__,RK_SDMMC_DRIVER_VERSION);
2794 if (of_find_property(host->dev->of_node, "supports-sd", NULL))
2795 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SD;
2796 if (of_find_property(host->dev->of_node, "supports-sdio", NULL))
2797 mmc->restrict_caps |= RESTRICT_CARD_TYPE_SDIO;
2798 if (of_find_property(host->dev->of_node, "supports-emmc", NULL))
2799 mmc->restrict_caps |= RESTRICT_CARD_TYPE_EMMC;
2801 if (host->pdata->get_ocr)
2802 mmc->ocr_avail = host->pdata->get_ocr(id);
2805 mmc->ocr_avail = MMC_VDD_27_28| MMC_VDD_28_29| MMC_VDD_29_30| MMC_VDD_30_31
2806 | MMC_VDD_31_32| MMC_VDD_32_33| MMC_VDD_33_34| MMC_VDD_34_35| MMC_VDD_35_36
2807 | MMC_VDD_26_27| MMC_VDD_25_26| MMC_VDD_24_25| MMC_VDD_23_24
2808 | MMC_VDD_22_23| MMC_VDD_21_22| MMC_VDD_20_21| MMC_VDD_165_195;
2812 * Start with slot power disabled, it will be enabled when a card
2815 if (host->pdata->setpower)
2816 host->pdata->setpower(id, 0);
2818 if (host->pdata->caps)
2819 mmc->caps = host->pdata->caps;
2821 if (host->pdata->pm_caps)
2822 mmc->pm_caps = host->pdata->pm_caps;
2824 if (host->dev->of_node) {
2825 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2829 ctrl_id = to_platform_device(host->dev)->id;
2831 if (drv_data && drv_data->caps)
2832 mmc->caps |= drv_data->caps[ctrl_id];
2833 if (drv_data && drv_data->hold_reg_flag)
2834 mmc->hold_reg_flag |= drv_data->hold_reg_flag[ctrl_id];
2836 //set the compatibility of driver.
2837 mmc->caps |= MMC_CAP_UHS_SDR12|MMC_CAP_UHS_SDR25|MMC_CAP_UHS_SDR50|MMC_CAP_UHS_SDR104|MMC_CAP_ERASE;
2839 if (host->pdata->caps2)
2840 mmc->caps2 = host->pdata->caps2;
2842 if (host->pdata->get_bus_wd)
2843 bus_width = host->pdata->get_bus_wd(slot->id);
2844 else if (host->dev->of_node)
2845 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2849 switch (bus_width) {
2851 mmc->caps |= MMC_CAP_8_BIT_DATA;
2853 mmc->caps |= MMC_CAP_4_BIT_DATA;
2855 if (of_find_property(host->dev->of_node, "cap-power-off-card", NULL))
2856 mmc->caps |= MMC_CAP_POWER_OFF_CARD;
2857 if (of_find_property(host->dev->of_node, "cap-sdio-irq", NULL))
2858 mmc->caps |= MMC_CAP_SDIO_IRQ;
2859 if (of_find_property(host->dev->of_node, "poll-hw-reset", NULL))
2860 mmc->caps |= MMC_CAP_HW_RESET;
2861 if (of_find_property(host->dev->of_node, "full-pwr-cycle", NULL))
2862 mmc->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
2863 if (of_find_property(host->dev->of_node, "keep-power-in-suspend", NULL))
2864 mmc->pm_caps |= MMC_PM_KEEP_POWER;
2865 if (of_find_property(host->dev->of_node, "ignore-pm-notify", NULL))
2866 mmc->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
2867 if (of_find_property(host->dev->of_node, "enable-sdio-wakeup", NULL))
2868 mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2870 /*Assign pm_caps pass to pm_flags*/
2871 mmc->pm_flags = mmc->pm_caps;
2873 if (host->pdata->blk_settings) {
2874 mmc->max_segs = host->pdata->blk_settings->max_segs;
2875 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2876 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2877 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2878 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2880 /* Useful defaults if platform data is unset. */
2881 #ifdef CONFIG_MMC_DW_IDMAC
2882 mmc->max_segs = host->ring_size;
2883 mmc->max_blk_size = 65536;
2884 mmc->max_blk_count = host->ring_size;
2885 mmc->max_seg_size = 0x1000;
2886 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2889 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2890 mmc->max_blk_count = 512;
2891 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2892 mmc->max_seg_size = mmc->max_req_size;
2893 #endif /* CONFIG_MMC_DW_IDMAC */
2896 slot->pwr_en_gpio = dw_mci_of_get_pwr_en_gpio(host->dev, slot->id);
2898 if (!(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD))//(gpio_is_valid(slot->pwr_en_gpio))
2903 if(mmc->restrict_caps & RESTRICT_CARD_TYPE_SD)
2905 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
2912 if (IS_ERR(host->vmmc)) {
2913 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
2916 ret = regulator_enable(host->vmmc);
2919 "failed to enable regulator: %d\n", ret);
2925 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2927 if (mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
2928 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2930 ret = mmc_add_host(mmc);
2934 /* Pinctrl set default iomux state to fucntion port.
2935 * Fixme: DON'T TOUCH EMMC SETTING!
2937 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
2939 host->pinctrl = devm_pinctrl_get(host->dev);
2940 if(IS_ERR(host->pinctrl))
2941 printk("%s: Warning : No pinctrl used!\n",mmc_hostname(host->mmc));
2944 host->pins_idle= pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_IDLE);
2945 if(IS_ERR(host->pins_default))
2946 printk("%s: Warning : No IDLE pinctrl matched!\n", mmc_hostname(host->mmc));
2949 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
2950 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
2953 host->pins_default = pinctrl_lookup_state(host->pinctrl,PINCTRL_STATE_DEFAULT);
2954 if(IS_ERR(host->pins_default))
2955 printk("%s: Warning : No default pinctrl matched!\n", mmc_hostname(host->mmc));
2958 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
2959 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
2965 #if defined(CONFIG_DEBUG_FS)
2966 dw_mci_init_debugfs(slot);
2969 /* Card initially undetected */
2970 slot->last_detect_state = 1;
2979 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2981 /* Shutdown detect IRQ */
2982 if (slot->host->pdata->exit)
2983 slot->host->pdata->exit(id);
2985 /* Debugfs stuff is cleaned up by mmc core */
2986 mmc_remove_host(slot->mmc);
2987 slot->host->slot[id] = NULL;
2988 mmc_free_host(slot->mmc);
2991 static void dw_mci_init_dma(struct dw_mci *host)
2993 /* Alloc memory for sg translation */
2994 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2995 &host->sg_dma, GFP_KERNEL);
2996 if (!host->sg_cpu) {
2997 dev_err(host->dev, "%s: could not alloc DMA memory\n",
3002 /* Determine which DMA interface to use */
3003 #ifdef CONFIG_MMC_DW_IDMAC
3004 host->dma_ops = &dw_mci_idmac_ops;
3005 dev_info(host->dev, "Using internal DMA controller.\n");
3011 if (host->dma_ops->init && host->dma_ops->start &&
3012 host->dma_ops->stop && host->dma_ops->cleanup) {
3013 if (host->dma_ops->init(host)) {
3014 dev_err(host->dev, "%s: Unable to initialize "
3015 "DMA Controller.\n", __func__);
3019 dev_err(host->dev, "DMA initialization not found.\n");
3027 dev_info(host->dev, "Using PIO mode.\n");
3032 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
3034 unsigned long timeout = jiffies + msecs_to_jiffies(500);
3037 ctrl = mci_readl(host, CTRL);
3039 mci_writel(host, CTRL, ctrl);
3041 /* wait till resets clear */
3043 ctrl = mci_readl(host, CTRL);
3044 if (!(ctrl & reset))
3046 } while (time_before(jiffies, timeout));
3049 "Timeout resetting block (ctrl reset %#x)\n",
3055 static inline bool dw_mci_fifo_reset(struct dw_mci *host)
3058 * Reseting generates a block interrupt, hence setting
3059 * the scatter-gather pointer to NULL.
3062 sg_miter_stop(&host->sg_miter);
3066 return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
3069 static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host)
3071 return dw_mci_ctrl_reset(host,
3072 SDMMC_CTRL_FIFO_RESET |
3074 SDMMC_CTRL_DMA_RESET);
3078 static struct dw_mci_of_quirks {
3083 .quirk = "broken-cd",
3084 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3088 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3090 struct dw_mci_board *pdata;
3091 struct device *dev = host->dev;
3092 struct device_node *np = dev->of_node;
3093 const struct dw_mci_drv_data *drv_data = host->drv_data;
3095 u32 clock_frequency;
3097 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3099 dev_err(dev, "could not allocate memory for pdata\n");
3100 return ERR_PTR(-ENOMEM);
3103 /* find out number of slots supported */
3104 if (of_property_read_u32(dev->of_node, "num-slots",
3105 &pdata->num_slots)) {
3106 dev_info(dev, "num-slots property not found, "
3107 "assuming 1 slot is available\n");
3108 pdata->num_slots = 1;
3112 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
3113 if (of_get_property(np, of_quirks[idx].quirk, NULL))
3114 pdata->quirks |= of_quirks[idx].id;
3117 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
3118 dev_info(dev, "fifo-depth property not found, using "
3119 "value of FIFOTH register as default\n");
3121 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3123 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
3124 pdata->bus_hz = clock_frequency;
3126 if (drv_data && drv_data->parse_dt) {
3127 ret = drv_data->parse_dt(host);
3129 return ERR_PTR(ret);
3132 if (of_find_property(np, "keep-power-in-suspend", NULL))
3133 pdata->pm_caps |= MMC_PM_KEEP_POWER;
3135 if (of_find_property(np, "enable-sdio-wakeup", NULL))
3136 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
3138 if (of_find_property(np, "supports-highspeed", NULL))
3139 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3141 if (of_find_property(np, "supports-UHS_SDR104", NULL))
3142 pdata->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3144 if (of_find_property(np, "supports-DDR_MODE", NULL))
3145 pdata->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_1_2V_DDR;
3147 if (of_find_property(np, "caps2-mmc-hs200", NULL))
3148 pdata->caps2 |= MMC_CAP2_HS200;
3150 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
3151 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
3153 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
3154 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
3156 if (of_get_property(np, "cd-inverted", NULL))
3157 pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
3158 if (of_get_property(np, "bootpart-no-access", NULL))
3159 pdata->caps2 |= MMC_CAP2_BOOTPART_NOACC;
3164 #else /* CONFIG_OF */
3165 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3167 return ERR_PTR(-EINVAL);
3169 #endif /* CONFIG_OF */
3171 int dw_mci_probe(struct dw_mci *host)
3173 const struct dw_mci_drv_data *drv_data = host->drv_data;
3174 int width, i, ret = 0;
3180 host->pdata = dw_mci_parse_dt(host);
3181 if (IS_ERR(host->pdata)) {
3182 dev_err(host->dev, "platform data not available\n");
3187 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
3189 "Platform data must supply select_slot function\n");
3194 * In 2.40a spec, Data offset is changed.
3195 * Need to check the version-id and set data-offset for DATA register.
3197 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3198 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3200 if (host->verid < DW_MMC_240A)
3201 host->data_offset = DATA_OFFSET;
3203 host->data_offset = DATA_240A_OFFSET;
3206 host->hclk_mmc= devm_clk_get(host->dev, "hclk_mmc");
3207 if (IS_ERR(host->hclk_mmc)) {
3208 dev_err(host->dev, "failed to get hclk_mmc\n");
3209 ret = PTR_ERR(host->hclk_mmc);
3213 clk_prepare_enable(host->hclk_mmc);
3216 host->clk_mmc = devm_clk_get(host->dev, "clk_mmc");
3217 if (IS_ERR(host->clk_mmc)) {
3218 dev_err(host->dev, "failed to get clk mmc_per\n");
3219 ret = PTR_ERR(host->clk_mmc);
3223 host->bus_hz = host->pdata->bus_hz;
3224 if (!host->bus_hz) {
3225 dev_err(host->dev,"Platform data must supply bus speed\n");
3230 if (host->verid < DW_MMC_240A)
3231 ret = clk_set_rate(host->clk_mmc, host->bus_hz);
3233 //rockchip: fix divider 2 in clksum before controlller
3234 ret = clk_set_rate(host->clk_mmc, host->bus_hz * 2);
3237 dev_err(host->dev, "failed to set clk mmc\n");
3240 clk_prepare_enable(host->clk_mmc);
3242 if (drv_data && drv_data->setup_clock) {
3243 ret = drv_data->setup_clock(host);
3246 "implementation specific clock setup failed\n");
3251 host->quirks = host->pdata->quirks;
3252 host->irq_state = true;
3253 host->set_speed = 0;
3256 spin_lock_init(&host->lock);
3257 INIT_LIST_HEAD(&host->queue);
3260 * Get the host data width - this assumes that HCON has been set with
3261 * the correct values.
3263 i = (mci_readl(host, HCON) >> 7) & 0x7;
3265 host->push_data = dw_mci_push_data16;
3266 host->pull_data = dw_mci_pull_data16;
3268 host->data_shift = 1;
3269 } else if (i == 2) {
3270 host->push_data = dw_mci_push_data64;
3271 host->pull_data = dw_mci_pull_data64;
3273 host->data_shift = 3;
3275 /* Check for a reserved value, and warn if it is */
3277 "HCON reports a reserved host data width!\n"
3278 "Defaulting to 32-bit access.\n");
3279 host->push_data = dw_mci_push_data32;
3280 host->pull_data = dw_mci_pull_data32;
3282 host->data_shift = 2;
3285 /* Reset all blocks */
3286 if (!dw_mci_ctrl_all_reset(host))
3289 host->dma_ops = host->pdata->dma_ops;
3290 dw_mci_init_dma(host);
3292 /* Clear the interrupts for the host controller */
3293 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3294 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3296 /* Put in max timeout */
3297 mci_writel(host, TMOUT, 0xFFFFFFFF);
3300 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3301 * Tx Mark = fifo_size / 2 DMA Size = 8
3303 if (!host->pdata->fifo_depth) {
3305 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3306 * have been overwritten by the bootloader, just like we're
3307 * about to do, so if you know the value for your hardware, you
3308 * should put it in the platform data.
3310 fifo_size = mci_readl(host, FIFOTH);
3311 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3313 fifo_size = host->pdata->fifo_depth;
3315 host->fifo_depth = fifo_size;
3317 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3318 mci_writel(host, FIFOTH, host->fifoth_val);
3320 /* disable clock to CIU */
3321 mci_writel(host, CLKENA, 0);
3322 mci_writel(host, CLKSRC, 0);
3324 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3325 host->card_workqueue = alloc_workqueue("dw-mci-card",
3326 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
3327 if (!host->card_workqueue) {
3331 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3332 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3333 host->irq_flags, "dw-mci", host);
3337 if (host->pdata->num_slots)
3338 host->num_slots = host->pdata->num_slots;
3340 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
3342 /* We need at least one slot to succeed */
3343 for (i = 0; i < host->num_slots; i++) {
3344 ret = dw_mci_init_slot(host, i);
3346 dev_dbg(host->dev, "slot %d init failed\n", i);
3352 * Enable interrupts for command done, data over, data empty, card det,
3353 * receive ready and error such as transmit, receive timeout, crc error
3355 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3356 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_VSI |
3357 SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS;
3358 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO)
3359 && !(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_EMMC))
3360 regs |= SDMMC_INT_CD;
3362 mci_writel(host, INTMASK, regs);
3364 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
3366 dev_info(host->dev, "DW MMC controller at irq %d, "
3367 "%d bit host data width, "
3369 host->irq, width, fifo_size);
3372 dev_info(host->dev, "%d slots initialized\n", init_slots);
3374 dev_dbg(host->dev, "attempted to initialize %d slots, "
3375 "but failed on all\n", host->num_slots);
3380 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
3381 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
3386 destroy_workqueue(host->card_workqueue);
3389 if (host->use_dma && host->dma_ops->exit)
3390 host->dma_ops->exit(host);
3393 regulator_disable(host->vmmc);
3394 regulator_put(host->vmmc);
3398 if (!IS_ERR(host->clk_mmc))
3399 clk_disable_unprepare(host->clk_mmc);
3401 if (!IS_ERR(host->hclk_mmc))
3402 clk_disable_unprepare(host->hclk_mmc);
3406 EXPORT_SYMBOL(dw_mci_probe);
3408 void dw_mci_remove(struct dw_mci *host)
3412 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3413 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3415 for (i = 0; i < host->num_slots; i++) {
3416 dev_dbg(host->dev, "remove slot %d\n", i);
3418 dw_mci_cleanup_slot(host->slot[i], i);
3421 /* disable clock to CIU */
3422 mci_writel(host, CLKENA, 0);
3423 mci_writel(host, CLKSRC, 0);
3425 destroy_workqueue(host->card_workqueue);
3427 if (host->use_dma && host->dma_ops->exit)
3428 host->dma_ops->exit(host);
3431 regulator_disable(host->vmmc);
3432 regulator_put(host->vmmc);
3435 if (!IS_ERR(host->clk_mmc))
3436 clk_disable_unprepare(host->clk_mmc);
3438 if (!IS_ERR(host->hclk_mmc))
3439 clk_disable_unprepare(host->hclk_mmc);
3441 EXPORT_SYMBOL(dw_mci_remove);
3445 #ifdef CONFIG_PM_SLEEP
3447 * TODO: we should probably disable the clock to the card in the suspend path.
3449 int dw_mci_suspend(struct dw_mci *host)
3453 regulator_disable(host->vmmc);
3455 /*only for sdmmc controller*/
3456 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3457 disable_irq(host->irq);
3459 if(pinctrl_select_state(host->pinctrl, host->pins_idle) < 0)
3460 printk("%s: Warning : Idle pinctrl setting failed!\n", mmc_hostname(host->mmc));
3461 dw_mci_of_get_cd_gpio(host->dev,0,host->mmc);
3463 enable_irq_wake(host->mmc->slot.cd_irq);
3467 EXPORT_SYMBOL(dw_mci_suspend);
3469 int dw_mci_resume(struct dw_mci *host)
3475 ret = regulator_enable(host->vmmc);
3478 "failed to enable regulator: %d\n", ret);
3483 if (!dw_mci_ctrl_all_reset(host)) {
3488 if (host->use_dma && host->dma_ops->init)
3489 host->dma_ops->init(host);
3492 * Restore the initial value at FIFOTH register
3493 * And Invalidate the prev_blksz with zero
3495 mci_writel(host, FIFOTH, host->fifoth_val);
3496 host->prev_blksz = 0;
3497 /* Put in max timeout */
3498 mci_writel(host, TMOUT, 0xFFFFFFFF);
3500 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3501 regs = SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR | SDMMC_INT_VSI |
3503 if(!(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SDIO))
3504 regs |= SDMMC_INT_CD;
3505 mci_writel(host, INTMASK, regs);
3506 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3508 for (i = 0; i < host->num_slots; i++) {
3509 struct dw_mci_slot *slot = host->slot[i];
3512 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
3513 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3514 dw_mci_setup_bus(slot, true);
3518 /*only for sdmmc controller*/
3519 if(host->mmc->restrict_caps & RESTRICT_CARD_TYPE_SD) {
3520 disable_irq_wake(host->mmc->slot.cd_irq);
3522 mmc_gpio_free_cd(host->mmc);
3523 if(pinctrl_select_state(host->pinctrl, host->pins_default) < 0)
3524 printk("%s: Warning : Default pinctrl setting failed!\n", mmc_hostname(host->mmc));
3526 enable_irq(host->irq);
3532 EXPORT_SYMBOL(dw_mci_resume);
3533 #endif /* CONFIG_PM_SLEEP */
3535 static int __init dw_mci_init(void)
3537 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3541 static void __exit dw_mci_exit(void)
3545 module_init(dw_mci_init);
3546 module_exit(dw_mci_exit);
3548 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3550 MODULE_AUTHOR("NXP Semiconductor VietNam");
3551 MODULE_AUTHOR("Imagination Technologies Ltd");
3552 MODULE_AUTHOR("Rockchip Electronics£¬Bangwang Xie < xbw@rock-chips.com> ");
3554 MODULE_LICENSE("GPL v2");