2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sd.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/dw_mmc.h>
35 #include <linux/bitops.h>
36 #include <linux/regulator/consumer.h>
37 #include <linux/workqueue.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mmc/slot-gpio.h>
44 /* Common flag combinations */
45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46 SDMMC_INT_HTO | SDMMC_INT_SBE | \
48 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
50 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
51 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
52 #define DW_MCI_SEND_STATUS 1
53 #define DW_MCI_RECV_STATUS 2
54 #define DW_MCI_DMA_THRESHOLD 16
56 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
57 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
59 #ifdef CONFIG_MMC_DW_IDMAC
60 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
61 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
62 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
66 u32 des0; /* Control Descriptor */
67 #define IDMAC_DES0_DIC BIT(1)
68 #define IDMAC_DES0_LD BIT(2)
69 #define IDMAC_DES0_FD BIT(3)
70 #define IDMAC_DES0_CH BIT(4)
71 #define IDMAC_DES0_ER BIT(5)
72 #define IDMAC_DES0_CES BIT(30)
73 #define IDMAC_DES0_OWN BIT(31)
75 u32 des1; /* Buffer sizes */
76 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
77 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
79 u32 des2; /* buffer 1 physical address */
81 u32 des3; /* buffer 2 physical address */
83 #endif /* CONFIG_MMC_DW_IDMAC */
85 static bool dw_mci_reset(struct dw_mci *host);
87 #if defined(CONFIG_DEBUG_FS)
88 static int dw_mci_req_show(struct seq_file *s, void *v)
90 struct dw_mci_slot *slot = s->private;
91 struct mmc_request *mrq;
92 struct mmc_command *cmd;
93 struct mmc_command *stop;
94 struct mmc_data *data;
96 /* Make sure we get a consistent snapshot */
97 spin_lock_bh(&slot->host->lock);
107 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
108 cmd->opcode, cmd->arg, cmd->flags,
109 cmd->resp[0], cmd->resp[1], cmd->resp[2],
110 cmd->resp[2], cmd->error);
112 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
113 data->bytes_xfered, data->blocks,
114 data->blksz, data->flags, data->error);
117 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
118 stop->opcode, stop->arg, stop->flags,
119 stop->resp[0], stop->resp[1], stop->resp[2],
120 stop->resp[2], stop->error);
123 spin_unlock_bh(&slot->host->lock);
128 static int dw_mci_req_open(struct inode *inode, struct file *file)
130 return single_open(file, dw_mci_req_show, inode->i_private);
133 static const struct file_operations dw_mci_req_fops = {
134 .owner = THIS_MODULE,
135 .open = dw_mci_req_open,
138 .release = single_release,
141 static int dw_mci_regs_show(struct seq_file *s, void *v)
143 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
144 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
145 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
146 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
147 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
148 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
153 static int dw_mci_regs_open(struct inode *inode, struct file *file)
155 return single_open(file, dw_mci_regs_show, inode->i_private);
158 static const struct file_operations dw_mci_regs_fops = {
159 .owner = THIS_MODULE,
160 .open = dw_mci_regs_open,
163 .release = single_release,
166 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
168 struct mmc_host *mmc = slot->mmc;
169 struct dw_mci *host = slot->host;
173 root = mmc->debugfs_root;
177 node = debugfs_create_file("regs", S_IRUSR, root, host,
182 node = debugfs_create_file("req", S_IRUSR, root, slot,
187 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
191 node = debugfs_create_x32("pending_events", S_IRUSR, root,
192 (u32 *)&host->pending_events);
196 node = debugfs_create_x32("completed_events", S_IRUSR, root,
197 (u32 *)&host->completed_events);
204 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
206 #endif /* defined(CONFIG_DEBUG_FS) */
208 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg);
210 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
212 struct mmc_data *data;
213 struct dw_mci_slot *slot = mmc_priv(mmc);
214 struct dw_mci *host = slot->host;
215 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
217 cmd->error = -EINPROGRESS;
221 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
222 cmd->opcode == MMC_GO_IDLE_STATE ||
223 cmd->opcode == MMC_GO_INACTIVE_STATE ||
224 (cmd->opcode == SD_IO_RW_DIRECT &&
225 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
226 cmdr |= SDMMC_CMD_STOP;
227 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
228 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
230 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
233 /* Special bit makes CMD11 not die */
234 cmdr |= SDMMC_CMD_VOLT_SWITCH;
236 /* Change state to continue to handle CMD11 weirdness */
237 WARN_ON(slot->host->state != STATE_SENDING_CMD);
238 slot->host->state = STATE_SENDING_CMD11;
241 * We need to disable low power mode (automatic clock stop)
242 * while doing voltage switch so we don't confuse the card,
243 * since stopping the clock is a specific part of the UHS
244 * voltage change dance.
246 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
247 * unconditionally turned back on in dw_mci_setup_bus() if it's
248 * ever called with a non-zero clock. That shouldn't happen
249 * until the voltage change is all done.
251 clk_en_a = mci_readl(host, CLKENA);
252 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
253 mci_writel(host, CLKENA, clk_en_a);
254 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
255 SDMMC_CMD_PRV_DAT_WAIT, 0);
258 if (cmd->flags & MMC_RSP_PRESENT) {
259 /* We expect a response, so set this bit */
260 cmdr |= SDMMC_CMD_RESP_EXP;
261 if (cmd->flags & MMC_RSP_136)
262 cmdr |= SDMMC_CMD_RESP_LONG;
265 if (cmd->flags & MMC_RSP_CRC)
266 cmdr |= SDMMC_CMD_RESP_CRC;
270 cmdr |= SDMMC_CMD_DAT_EXP;
271 if (data->flags & MMC_DATA_STREAM)
272 cmdr |= SDMMC_CMD_STRM_MODE;
273 if (data->flags & MMC_DATA_WRITE)
274 cmdr |= SDMMC_CMD_DAT_WR;
277 if (drv_data && drv_data->prepare_command)
278 drv_data->prepare_command(slot->host, &cmdr);
283 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
285 struct mmc_command *stop;
291 stop = &host->stop_abort;
293 memset(stop, 0, sizeof(struct mmc_command));
295 if (cmdr == MMC_READ_SINGLE_BLOCK ||
296 cmdr == MMC_READ_MULTIPLE_BLOCK ||
297 cmdr == MMC_WRITE_BLOCK ||
298 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
299 stop->opcode = MMC_STOP_TRANSMISSION;
301 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
302 } else if (cmdr == SD_IO_RW_EXTENDED) {
303 stop->opcode = SD_IO_RW_DIRECT;
304 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
305 ((cmd->arg >> 28) & 0x7);
306 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
311 cmdr = stop->opcode | SDMMC_CMD_STOP |
312 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
317 static void dw_mci_start_command(struct dw_mci *host,
318 struct mmc_command *cmd, u32 cmd_flags)
322 "start command: ARGR=0x%08x CMDR=0x%08x\n",
323 cmd->arg, cmd_flags);
325 mci_writel(host, CMDARG, cmd->arg);
328 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
331 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
333 struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort;
334 dw_mci_start_command(host, stop, host->stop_cmdr);
337 /* DMA interface functions */
338 static void dw_mci_stop_dma(struct dw_mci *host)
340 if (host->using_dma) {
341 host->dma_ops->stop(host);
342 host->dma_ops->cleanup(host);
345 /* Data transfer was stopped by the interrupt handler */
346 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
349 static int dw_mci_get_dma_dir(struct mmc_data *data)
351 if (data->flags & MMC_DATA_WRITE)
352 return DMA_TO_DEVICE;
354 return DMA_FROM_DEVICE;
357 #ifdef CONFIG_MMC_DW_IDMAC
358 static void dw_mci_dma_cleanup(struct dw_mci *host)
360 struct mmc_data *data = host->data;
363 if (!data->host_cookie)
364 dma_unmap_sg(host->dev,
367 dw_mci_get_dma_dir(data));
370 static void dw_mci_idmac_reset(struct dw_mci *host)
372 u32 bmod = mci_readl(host, BMOD);
373 /* Software reset of DMA */
374 bmod |= SDMMC_IDMAC_SWRESET;
375 mci_writel(host, BMOD, bmod);
378 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
382 /* Disable and reset the IDMAC interface */
383 temp = mci_readl(host, CTRL);
384 temp &= ~SDMMC_CTRL_USE_IDMAC;
385 temp |= SDMMC_CTRL_DMA_RESET;
386 mci_writel(host, CTRL, temp);
388 /* Stop the IDMAC running */
389 temp = mci_readl(host, BMOD);
390 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
391 temp |= SDMMC_IDMAC_SWRESET;
392 mci_writel(host, BMOD, temp);
395 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
397 struct mmc_data *data = host->data;
399 dev_vdbg(host->dev, "DMA complete\n");
401 host->dma_ops->cleanup(host);
404 * If the card was removed, data will be NULL. No point in trying to
405 * send the stop command or waiting for NBUSY in this case.
408 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
409 tasklet_schedule(&host->tasklet);
413 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
417 struct idmac_desc *desc = host->sg_cpu;
419 for (i = 0; i < sg_len; i++, desc++) {
420 unsigned int length = sg_dma_len(&data->sg[i]);
421 u32 mem_addr = sg_dma_address(&data->sg[i]);
423 /* Set the OWN bit and disable interrupts for this descriptor */
424 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
427 IDMAC_SET_BUFFER1_SIZE(desc, length);
429 /* Physical address to DMA to/from */
430 desc->des2 = mem_addr;
433 /* Set first descriptor */
435 desc->des0 |= IDMAC_DES0_FD;
437 /* Set last descriptor */
438 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
439 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
440 desc->des0 |= IDMAC_DES0_LD;
445 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
449 dw_mci_translate_sglist(host, host->data, sg_len);
451 /* Select IDMAC interface */
452 temp = mci_readl(host, CTRL);
453 temp |= SDMMC_CTRL_USE_IDMAC;
454 mci_writel(host, CTRL, temp);
458 /* Enable the IDMAC */
459 temp = mci_readl(host, BMOD);
460 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
461 mci_writel(host, BMOD, temp);
463 /* Start it running */
464 mci_writel(host, PLDMND, 1);
467 static int dw_mci_idmac_init(struct dw_mci *host)
469 struct idmac_desc *p;
472 /* Number of descriptors in the ring buffer */
473 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
475 /* Forward link the descriptor list */
476 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
477 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
479 /* Set the last descriptor as the end-of-ring descriptor */
480 p->des3 = host->sg_dma;
481 p->des0 = IDMAC_DES0_ER;
483 dw_mci_idmac_reset(host);
485 /* Mask out interrupts - get Tx & Rx complete only */
486 mci_writel(host, IDSTS, IDMAC_INT_CLR);
487 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
490 /* Set the descriptor base address */
491 mci_writel(host, DBADDR, host->sg_dma);
495 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
496 .init = dw_mci_idmac_init,
497 .start = dw_mci_idmac_start_dma,
498 .stop = dw_mci_idmac_stop_dma,
499 .complete = dw_mci_idmac_complete_dma,
500 .cleanup = dw_mci_dma_cleanup,
502 #endif /* CONFIG_MMC_DW_IDMAC */
504 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
505 struct mmc_data *data,
508 struct scatterlist *sg;
509 unsigned int i, sg_len;
511 if (!next && data->host_cookie)
512 return data->host_cookie;
515 * We don't do DMA on "complex" transfers, i.e. with
516 * non-word-aligned buffers or lengths. Also, we don't bother
517 * with all the DMA setup overhead for short transfers.
519 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
525 for_each_sg(data->sg, sg, data->sg_len, i) {
526 if (sg->offset & 3 || sg->length & 3)
530 sg_len = dma_map_sg(host->dev,
533 dw_mci_get_dma_dir(data));
538 data->host_cookie = sg_len;
543 static void dw_mci_pre_req(struct mmc_host *mmc,
544 struct mmc_request *mrq,
547 struct dw_mci_slot *slot = mmc_priv(mmc);
548 struct mmc_data *data = mrq->data;
550 if (!slot->host->use_dma || !data)
553 if (data->host_cookie) {
554 data->host_cookie = 0;
558 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
559 data->host_cookie = 0;
562 static void dw_mci_post_req(struct mmc_host *mmc,
563 struct mmc_request *mrq,
566 struct dw_mci_slot *slot = mmc_priv(mmc);
567 struct mmc_data *data = mrq->data;
569 if (!slot->host->use_dma || !data)
572 if (data->host_cookie)
573 dma_unmap_sg(slot->host->dev,
576 dw_mci_get_dma_dir(data));
577 data->host_cookie = 0;
580 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
582 #ifdef CONFIG_MMC_DW_IDMAC
583 unsigned int blksz = data->blksz;
584 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
585 u32 fifo_width = 1 << host->data_shift;
586 u32 blksz_depth = blksz / fifo_width, fifoth_val;
587 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
588 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
590 tx_wmark = (host->fifo_depth) / 2;
591 tx_wmark_invers = host->fifo_depth - tx_wmark;
595 * if blksz is not a multiple of the FIFO width
597 if (blksz % fifo_width) {
604 if (!((blksz_depth % mszs[idx]) ||
605 (tx_wmark_invers % mszs[idx]))) {
607 rx_wmark = mszs[idx] - 1;
612 * If idx is '0', it won't be tried
613 * Thus, initial values are uesed
616 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
617 mci_writel(host, FIFOTH, fifoth_val);
621 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
623 unsigned int blksz = data->blksz;
624 u32 blksz_depth, fifo_depth;
627 WARN_ON(!(data->flags & MMC_DATA_READ));
629 if (host->timing != MMC_TIMING_MMC_HS200 &&
630 host->timing != MMC_TIMING_UHS_SDR104)
633 blksz_depth = blksz / (1 << host->data_shift);
634 fifo_depth = host->fifo_depth;
636 if (blksz_depth > fifo_depth)
640 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
641 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
642 * Currently just choose blksz.
645 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
649 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
652 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
659 /* If we don't have a channel, we can't do DMA */
663 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
665 host->dma_ops->stop(host);
672 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
673 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
677 * Decide the MSIZE and RX/TX Watermark.
678 * If current block size is same with previous size,
679 * no need to update fifoth.
681 if (host->prev_blksz != data->blksz)
682 dw_mci_adjust_fifoth(host, data);
684 /* Enable the DMA interface */
685 temp = mci_readl(host, CTRL);
686 temp |= SDMMC_CTRL_DMA_ENABLE;
687 mci_writel(host, CTRL, temp);
689 /* Disable RX/TX IRQs, let DMA handle it */
690 temp = mci_readl(host, INTMASK);
691 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
692 mci_writel(host, INTMASK, temp);
694 host->dma_ops->start(host, sg_len);
699 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
703 data->error = -EINPROGRESS;
709 if (data->flags & MMC_DATA_READ) {
710 host->dir_status = DW_MCI_RECV_STATUS;
711 dw_mci_ctrl_rd_thld(host, data);
713 host->dir_status = DW_MCI_SEND_STATUS;
716 if (dw_mci_submit_data_dma(host, data)) {
717 int flags = SG_MITER_ATOMIC;
718 if (host->data->flags & MMC_DATA_READ)
719 flags |= SG_MITER_TO_SG;
721 flags |= SG_MITER_FROM_SG;
723 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
725 host->part_buf_start = 0;
726 host->part_buf_count = 0;
728 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
729 temp = mci_readl(host, INTMASK);
730 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
731 mci_writel(host, INTMASK, temp);
733 temp = mci_readl(host, CTRL);
734 temp &= ~SDMMC_CTRL_DMA_ENABLE;
735 mci_writel(host, CTRL, temp);
738 * Use the initial fifoth_val for PIO mode.
739 * If next issued data may be transfered by DMA mode,
740 * prev_blksz should be invalidated.
742 mci_writel(host, FIFOTH, host->fifoth_val);
743 host->prev_blksz = 0;
746 * Keep the current block size.
747 * It will be used to decide whether to update
748 * fifoth register next time.
750 host->prev_blksz = data->blksz;
754 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
756 struct dw_mci *host = slot->host;
757 unsigned long timeout = jiffies + msecs_to_jiffies(500);
758 unsigned int cmd_status = 0;
760 mci_writel(host, CMDARG, arg);
762 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
764 while (time_before(jiffies, timeout)) {
765 cmd_status = mci_readl(host, CMD);
766 if (!(cmd_status & SDMMC_CMD_START))
769 dev_err(&slot->mmc->class_dev,
770 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
771 cmd, arg, cmd_status);
774 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
776 struct dw_mci *host = slot->host;
777 unsigned int clock = slot->clock;
780 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
782 /* We must continue to set bit 28 in CMD until the change is complete */
783 if (host->state == STATE_WAITING_CMD11_DONE)
784 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
787 mci_writel(host, CLKENA, 0);
788 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
789 } else if (clock != host->current_speed || force_clkinit) {
790 div = host->bus_hz / clock;
791 if (host->bus_hz % clock && host->bus_hz > clock)
793 * move the + 1 after the divide to prevent
794 * over-clocking the card.
798 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
800 if ((clock << div) != slot->__clk_old || force_clkinit)
801 dev_info(&slot->mmc->class_dev,
802 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
803 slot->id, host->bus_hz, clock,
804 div ? ((host->bus_hz / div) >> 1) :
808 mci_writel(host, CLKENA, 0);
809 mci_writel(host, CLKSRC, 0);
812 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
814 /* set clock to desired speed */
815 mci_writel(host, CLKDIV, div);
818 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
820 /* enable clock; only low power if no SDIO */
821 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
822 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
823 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
824 mci_writel(host, CLKENA, clk_en_a);
827 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
829 /* keep the clock with reflecting clock dividor */
830 slot->__clk_old = clock << div;
833 host->current_speed = clock;
835 /* Set the current slot bus width */
836 mci_writel(host, CTYPE, (slot->ctype << slot->id));
839 static void __dw_mci_start_request(struct dw_mci *host,
840 struct dw_mci_slot *slot,
841 struct mmc_command *cmd)
843 struct mmc_request *mrq;
844 struct mmc_data *data;
849 host->cur_slot = slot;
852 host->pending_events = 0;
853 host->completed_events = 0;
854 host->cmd_status = 0;
855 host->data_status = 0;
856 host->dir_status = 0;
860 mci_writel(host, TMOUT, 0xFFFFFFFF);
861 mci_writel(host, BYTCNT, data->blksz*data->blocks);
862 mci_writel(host, BLKSIZ, data->blksz);
865 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
867 /* this is the first command, send the initialization clock */
868 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
869 cmdflags |= SDMMC_CMD_INIT;
872 dw_mci_submit_data(host, data);
876 dw_mci_start_command(host, cmd, cmdflags);
879 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
881 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
884 static void dw_mci_start_request(struct dw_mci *host,
885 struct dw_mci_slot *slot)
887 struct mmc_request *mrq = slot->mrq;
888 struct mmc_command *cmd;
890 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
891 __dw_mci_start_request(host, slot, cmd);
894 /* must be called with host->lock held */
895 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
896 struct mmc_request *mrq)
898 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
903 if (host->state == STATE_WAITING_CMD11_DONE) {
904 dev_warn(&slot->mmc->class_dev,
905 "Voltage change didn't complete\n");
907 * this case isn't expected to happen, so we can
908 * either crash here or just try to continue on
909 * in the closest possible state
911 host->state = STATE_IDLE;
914 if (host->state == STATE_IDLE) {
915 host->state = STATE_SENDING_CMD;
916 dw_mci_start_request(host, slot);
918 list_add_tail(&slot->queue_node, &host->queue);
922 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
924 struct dw_mci_slot *slot = mmc_priv(mmc);
925 struct dw_mci *host = slot->host;
930 * The check for card presence and queueing of the request must be
931 * atomic, otherwise the card could be removed in between and the
932 * request wouldn't fail until another card was inserted.
934 spin_lock_bh(&host->lock);
936 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
937 spin_unlock_bh(&host->lock);
938 mrq->cmd->error = -ENOMEDIUM;
939 mmc_request_done(mmc, mrq);
943 dw_mci_queue_request(host, slot, mrq);
945 spin_unlock_bh(&host->lock);
948 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
950 struct dw_mci_slot *slot = mmc_priv(mmc);
951 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
955 switch (ios->bus_width) {
956 case MMC_BUS_WIDTH_4:
957 slot->ctype = SDMMC_CTYPE_4BIT;
959 case MMC_BUS_WIDTH_8:
960 slot->ctype = SDMMC_CTYPE_8BIT;
963 /* set default 1 bit mode */
964 slot->ctype = SDMMC_CTYPE_1BIT;
967 regs = mci_readl(slot->host, UHS_REG);
970 if (ios->timing == MMC_TIMING_MMC_DDR52)
971 regs |= ((0x1 << slot->id) << 16);
973 regs &= ~((0x1 << slot->id) << 16);
975 mci_writel(slot->host, UHS_REG, regs);
976 slot->host->timing = ios->timing;
979 * Use mirror of ios->clock to prevent race with mmc
980 * core ios update when finding the minimum.
982 slot->clock = ios->clock;
984 if (drv_data && drv_data->set_ios)
985 drv_data->set_ios(slot->host, ios);
987 /* Slot specific timing and width adjustment */
988 dw_mci_setup_bus(slot, false);
990 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
991 slot->host->state = STATE_IDLE;
993 switch (ios->power_mode) {
995 if (!IS_ERR(mmc->supply.vmmc)) {
996 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
999 dev_err(slot->host->dev,
1000 "failed to enable vmmc regulator\n");
1001 /*return, if failed turn on vmmc*/
1005 if (!IS_ERR(mmc->supply.vqmmc) && !slot->host->vqmmc_enabled) {
1006 ret = regulator_enable(mmc->supply.vqmmc);
1008 dev_err(slot->host->dev,
1009 "failed to enable vqmmc regulator\n");
1011 slot->host->vqmmc_enabled = true;
1013 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1014 regs = mci_readl(slot->host, PWREN);
1015 regs |= (1 << slot->id);
1016 mci_writel(slot->host, PWREN, regs);
1019 if (!IS_ERR(mmc->supply.vmmc))
1020 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1022 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled) {
1023 regulator_disable(mmc->supply.vqmmc);
1024 slot->host->vqmmc_enabled = false;
1027 regs = mci_readl(slot->host, PWREN);
1028 regs &= ~(1 << slot->id);
1029 mci_writel(slot->host, PWREN, regs);
1036 static int dw_mci_card_busy(struct mmc_host *mmc)
1038 struct dw_mci_slot *slot = mmc_priv(mmc);
1042 * Check the busy bit which is low when DAT[3:0]
1043 * (the data lines) are 0000
1045 status = mci_readl(slot->host, STATUS);
1047 return !!(status & SDMMC_STATUS_BUSY);
1050 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1052 struct dw_mci_slot *slot = mmc_priv(mmc);
1053 struct dw_mci *host = slot->host;
1055 u32 v18 = SDMMC_UHS_18V << slot->id;
1060 * Program the voltage. Note that some instances of dw_mmc may use
1061 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1062 * does no harm but you need to set the regulator directly. Try both.
1064 uhs = mci_readl(host, UHS_REG);
1065 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1074 if (!IS_ERR(mmc->supply.vqmmc)) {
1075 ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
1078 dev_err(&mmc->class_dev,
1079 "Regulator set error %d: %d - %d\n",
1080 ret, min_uv, max_uv);
1084 mci_writel(host, UHS_REG, uhs);
1089 static int dw_mci_get_ro(struct mmc_host *mmc)
1092 struct dw_mci_slot *slot = mmc_priv(mmc);
1093 int gpio_ro = mmc_gpio_get_ro(mmc);
1095 /* Use platform get_ro function, else try on board write protect */
1096 if ((slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) ||
1097 (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
1099 else if (!IS_ERR_VALUE(gpio_ro))
1100 read_only = gpio_ro;
1103 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1105 dev_dbg(&mmc->class_dev, "card is %s\n",
1106 read_only ? "read-only" : "read-write");
1111 static int dw_mci_get_cd(struct mmc_host *mmc)
1114 struct dw_mci_slot *slot = mmc_priv(mmc);
1115 struct dw_mci_board *brd = slot->host->pdata;
1116 struct dw_mci *host = slot->host;
1117 int gpio_cd = mmc_gpio_get_cd(mmc);
1119 /* Use platform get_cd function, else try onboard card detect */
1120 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1122 else if (!IS_ERR_VALUE(gpio_cd))
1125 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1128 spin_lock_bh(&host->lock);
1130 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1131 dev_dbg(&mmc->class_dev, "card is present\n");
1133 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1134 dev_dbg(&mmc->class_dev, "card is not present\n");
1136 spin_unlock_bh(&host->lock);
1142 * Disable lower power mode.
1144 * Low power mode will stop the card clock when idle. According to the
1145 * description of the CLKENA register we should disable low power mode
1146 * for SDIO cards if we need SDIO interrupts to work.
1148 * This function is fast if low power mode is already disabled.
1150 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1152 struct dw_mci *host = slot->host;
1154 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1156 clk_en_a = mci_readl(host, CLKENA);
1158 if (clk_en_a & clken_low_pwr) {
1159 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1160 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1161 SDMMC_CMD_PRV_DAT_WAIT, 0);
1165 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1167 struct dw_mci_slot *slot = mmc_priv(mmc);
1168 struct dw_mci *host = slot->host;
1171 /* Enable/disable Slot Specific SDIO interrupt */
1172 int_mask = mci_readl(host, INTMASK);
1175 * Turn off low power mode if it was enabled. This is a bit of
1176 * a heavy operation and we disable / enable IRQs a lot, so
1177 * we'll leave low power mode disabled and it will get
1178 * re-enabled again in dw_mci_setup_bus().
1180 dw_mci_disable_low_power(slot);
1182 mci_writel(host, INTMASK,
1183 (int_mask | SDMMC_INT_SDIO(slot->id)));
1185 mci_writel(host, INTMASK,
1186 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1190 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1192 struct dw_mci_slot *slot = mmc_priv(mmc);
1193 struct dw_mci *host = slot->host;
1194 const struct dw_mci_drv_data *drv_data = host->drv_data;
1195 struct dw_mci_tuning_data tuning_data;
1198 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1199 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1200 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1201 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1202 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1203 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1204 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1208 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1209 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1210 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1213 "Undefined command(%d) for tuning\n", opcode);
1217 if (drv_data && drv_data->execute_tuning)
1218 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1222 static const struct mmc_host_ops dw_mci_ops = {
1223 .request = dw_mci_request,
1224 .pre_req = dw_mci_pre_req,
1225 .post_req = dw_mci_post_req,
1226 .set_ios = dw_mci_set_ios,
1227 .get_ro = dw_mci_get_ro,
1228 .get_cd = dw_mci_get_cd,
1229 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1230 .execute_tuning = dw_mci_execute_tuning,
1231 .card_busy = dw_mci_card_busy,
1232 .start_signal_voltage_switch = dw_mci_switch_voltage,
1236 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1237 __releases(&host->lock)
1238 __acquires(&host->lock)
1240 struct dw_mci_slot *slot;
1241 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1243 WARN_ON(host->cmd || host->data);
1245 host->cur_slot->mrq = NULL;
1247 if (!list_empty(&host->queue)) {
1248 slot = list_entry(host->queue.next,
1249 struct dw_mci_slot, queue_node);
1250 list_del(&slot->queue_node);
1251 dev_vdbg(host->dev, "list not empty: %s is next\n",
1252 mmc_hostname(slot->mmc));
1253 host->state = STATE_SENDING_CMD;
1254 dw_mci_start_request(host, slot);
1256 dev_vdbg(host->dev, "list empty\n");
1258 if (host->state == STATE_SENDING_CMD11)
1259 host->state = STATE_WAITING_CMD11_DONE;
1261 host->state = STATE_IDLE;
1264 spin_unlock(&host->lock);
1265 mmc_request_done(prev_mmc, mrq);
1266 spin_lock(&host->lock);
1269 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1271 u32 status = host->cmd_status;
1273 host->cmd_status = 0;
1275 /* Read the response from the card (up to 16 bytes) */
1276 if (cmd->flags & MMC_RSP_PRESENT) {
1277 if (cmd->flags & MMC_RSP_136) {
1278 cmd->resp[3] = mci_readl(host, RESP0);
1279 cmd->resp[2] = mci_readl(host, RESP1);
1280 cmd->resp[1] = mci_readl(host, RESP2);
1281 cmd->resp[0] = mci_readl(host, RESP3);
1283 cmd->resp[0] = mci_readl(host, RESP0);
1290 if (status & SDMMC_INT_RTO)
1291 cmd->error = -ETIMEDOUT;
1292 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1293 cmd->error = -EILSEQ;
1294 else if (status & SDMMC_INT_RESP_ERR)
1300 /* newer ip versions need a delay between retries */
1301 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1308 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1310 u32 status = host->data_status;
1312 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1313 if (status & SDMMC_INT_DRTO) {
1314 data->error = -ETIMEDOUT;
1315 } else if (status & SDMMC_INT_DCRC) {
1316 data->error = -EILSEQ;
1317 } else if (status & SDMMC_INT_EBE) {
1318 if (host->dir_status ==
1319 DW_MCI_SEND_STATUS) {
1321 * No data CRC status was returned.
1322 * The number of bytes transferred
1323 * will be exaggerated in PIO mode.
1325 data->bytes_xfered = 0;
1326 data->error = -ETIMEDOUT;
1327 } else if (host->dir_status ==
1328 DW_MCI_RECV_STATUS) {
1332 /* SDMMC_INT_SBE is included */
1336 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1339 * After an error, there may be data lingering
1344 data->bytes_xfered = data->blocks * data->blksz;
1351 static void dw_mci_tasklet_func(unsigned long priv)
1353 struct dw_mci *host = (struct dw_mci *)priv;
1354 struct mmc_data *data;
1355 struct mmc_command *cmd;
1356 struct mmc_request *mrq;
1357 enum dw_mci_state state;
1358 enum dw_mci_state prev_state;
1361 spin_lock(&host->lock);
1363 state = host->state;
1372 case STATE_WAITING_CMD11_DONE:
1375 case STATE_SENDING_CMD11:
1376 case STATE_SENDING_CMD:
1377 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1378 &host->pending_events))
1383 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1384 err = dw_mci_command_complete(host, cmd);
1385 if (cmd == mrq->sbc && !err) {
1386 prev_state = state = STATE_SENDING_CMD;
1387 __dw_mci_start_request(host, host->cur_slot,
1392 if (cmd->data && err) {
1393 dw_mci_stop_dma(host);
1394 send_stop_abort(host, data);
1395 state = STATE_SENDING_STOP;
1399 if (!cmd->data || err) {
1400 dw_mci_request_end(host, mrq);
1404 prev_state = state = STATE_SENDING_DATA;
1407 case STATE_SENDING_DATA:
1409 * We could get a data error and never a transfer
1410 * complete so we'd better check for it here.
1412 * Note that we don't really care if we also got a
1413 * transfer complete; stopping the DMA and sending an
1416 if (test_and_clear_bit(EVENT_DATA_ERROR,
1417 &host->pending_events)) {
1418 dw_mci_stop_dma(host);
1419 send_stop_abort(host, data);
1420 state = STATE_DATA_ERROR;
1424 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1425 &host->pending_events))
1428 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1431 * Handle an EVENT_DATA_ERROR that might have shown up
1432 * before the transfer completed. This might not have
1433 * been caught by the check above because the interrupt
1434 * could have gone off between the previous check and
1435 * the check for transfer complete.
1437 * Technically this ought not be needed assuming we
1438 * get a DATA_COMPLETE eventually (we'll notice the
1439 * error and end the request), but it shouldn't hurt.
1441 * This has the advantage of sending the stop command.
1443 if (test_and_clear_bit(EVENT_DATA_ERROR,
1444 &host->pending_events)) {
1445 dw_mci_stop_dma(host);
1446 send_stop_abort(host, data);
1447 state = STATE_DATA_ERROR;
1450 prev_state = state = STATE_DATA_BUSY;
1454 case STATE_DATA_BUSY:
1455 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1456 &host->pending_events))
1460 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1461 err = dw_mci_data_complete(host, data);
1464 if (!data->stop || mrq->sbc) {
1465 if (mrq->sbc && data->stop)
1466 data->stop->error = 0;
1467 dw_mci_request_end(host, mrq);
1471 /* stop command for open-ended transfer*/
1473 send_stop_abort(host, data);
1476 * If we don't have a command complete now we'll
1477 * never get one since we just reset everything;
1478 * better end the request.
1480 * If we do have a command complete we'll fall
1481 * through to the SENDING_STOP command and
1482 * everything will be peachy keen.
1484 if (!test_bit(EVENT_CMD_COMPLETE,
1485 &host->pending_events)) {
1487 dw_mci_request_end(host, mrq);
1493 * If err has non-zero,
1494 * stop-abort command has been already issued.
1496 prev_state = state = STATE_SENDING_STOP;
1500 case STATE_SENDING_STOP:
1501 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1502 &host->pending_events))
1505 /* CMD error in data command */
1506 if (mrq->cmd->error && mrq->data)
1513 dw_mci_command_complete(host, mrq->stop);
1515 host->cmd_status = 0;
1517 dw_mci_request_end(host, mrq);
1520 case STATE_DATA_ERROR:
1521 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1522 &host->pending_events))
1525 state = STATE_DATA_BUSY;
1528 } while (state != prev_state);
1530 host->state = state;
1532 spin_unlock(&host->lock);
1536 /* push final bytes to part_buf, only use during push */
1537 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1539 memcpy((void *)&host->part_buf, buf, cnt);
1540 host->part_buf_count = cnt;
1543 /* append bytes to part_buf, only use during push */
1544 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1546 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1547 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1548 host->part_buf_count += cnt;
1552 /* pull first bytes from part_buf, only use during pull */
1553 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1555 cnt = min(cnt, (int)host->part_buf_count);
1557 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1559 host->part_buf_count -= cnt;
1560 host->part_buf_start += cnt;
1565 /* pull final bytes from the part_buf, assuming it's just been filled */
1566 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1568 memcpy(buf, &host->part_buf, cnt);
1569 host->part_buf_start = cnt;
1570 host->part_buf_count = (1 << host->data_shift) - cnt;
1573 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1575 struct mmc_data *data = host->data;
1578 /* try and push anything in the part_buf */
1579 if (unlikely(host->part_buf_count)) {
1580 int len = dw_mci_push_part_bytes(host, buf, cnt);
1583 if (host->part_buf_count == 2) {
1584 mci_writew(host, DATA(host->data_offset),
1586 host->part_buf_count = 0;
1589 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1590 if (unlikely((unsigned long)buf & 0x1)) {
1592 u16 aligned_buf[64];
1593 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1594 int items = len >> 1;
1596 /* memcpy from input buffer into aligned buffer */
1597 memcpy(aligned_buf, buf, len);
1600 /* push data from aligned buffer into fifo */
1601 for (i = 0; i < items; ++i)
1602 mci_writew(host, DATA(host->data_offset),
1609 for (; cnt >= 2; cnt -= 2)
1610 mci_writew(host, DATA(host->data_offset), *pdata++);
1613 /* put anything remaining in the part_buf */
1615 dw_mci_set_part_bytes(host, buf, cnt);
1616 /* Push data if we have reached the expected data length */
1617 if ((data->bytes_xfered + init_cnt) ==
1618 (data->blksz * data->blocks))
1619 mci_writew(host, DATA(host->data_offset),
1624 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1626 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1627 if (unlikely((unsigned long)buf & 0x1)) {
1629 /* pull data from fifo into aligned buffer */
1630 u16 aligned_buf[64];
1631 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1632 int items = len >> 1;
1634 for (i = 0; i < items; ++i)
1635 aligned_buf[i] = mci_readw(host,
1636 DATA(host->data_offset));
1637 /* memcpy from aligned buffer into output buffer */
1638 memcpy(buf, aligned_buf, len);
1646 for (; cnt >= 2; cnt -= 2)
1647 *pdata++ = mci_readw(host, DATA(host->data_offset));
1651 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1652 dw_mci_pull_final_bytes(host, buf, cnt);
1656 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1658 struct mmc_data *data = host->data;
1661 /* try and push anything in the part_buf */
1662 if (unlikely(host->part_buf_count)) {
1663 int len = dw_mci_push_part_bytes(host, buf, cnt);
1666 if (host->part_buf_count == 4) {
1667 mci_writel(host, DATA(host->data_offset),
1669 host->part_buf_count = 0;
1672 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1673 if (unlikely((unsigned long)buf & 0x3)) {
1675 u32 aligned_buf[32];
1676 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1677 int items = len >> 2;
1679 /* memcpy from input buffer into aligned buffer */
1680 memcpy(aligned_buf, buf, len);
1683 /* push data from aligned buffer into fifo */
1684 for (i = 0; i < items; ++i)
1685 mci_writel(host, DATA(host->data_offset),
1692 for (; cnt >= 4; cnt -= 4)
1693 mci_writel(host, DATA(host->data_offset), *pdata++);
1696 /* put anything remaining in the part_buf */
1698 dw_mci_set_part_bytes(host, buf, cnt);
1699 /* Push data if we have reached the expected data length */
1700 if ((data->bytes_xfered + init_cnt) ==
1701 (data->blksz * data->blocks))
1702 mci_writel(host, DATA(host->data_offset),
1707 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1709 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1710 if (unlikely((unsigned long)buf & 0x3)) {
1712 /* pull data from fifo into aligned buffer */
1713 u32 aligned_buf[32];
1714 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1715 int items = len >> 2;
1717 for (i = 0; i < items; ++i)
1718 aligned_buf[i] = mci_readl(host,
1719 DATA(host->data_offset));
1720 /* memcpy from aligned buffer into output buffer */
1721 memcpy(buf, aligned_buf, len);
1729 for (; cnt >= 4; cnt -= 4)
1730 *pdata++ = mci_readl(host, DATA(host->data_offset));
1734 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1735 dw_mci_pull_final_bytes(host, buf, cnt);
1739 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1741 struct mmc_data *data = host->data;
1744 /* try and push anything in the part_buf */
1745 if (unlikely(host->part_buf_count)) {
1746 int len = dw_mci_push_part_bytes(host, buf, cnt);
1750 if (host->part_buf_count == 8) {
1751 mci_writeq(host, DATA(host->data_offset),
1753 host->part_buf_count = 0;
1756 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1757 if (unlikely((unsigned long)buf & 0x7)) {
1759 u64 aligned_buf[16];
1760 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1761 int items = len >> 3;
1763 /* memcpy from input buffer into aligned buffer */
1764 memcpy(aligned_buf, buf, len);
1767 /* push data from aligned buffer into fifo */
1768 for (i = 0; i < items; ++i)
1769 mci_writeq(host, DATA(host->data_offset),
1776 for (; cnt >= 8; cnt -= 8)
1777 mci_writeq(host, DATA(host->data_offset), *pdata++);
1780 /* put anything remaining in the part_buf */
1782 dw_mci_set_part_bytes(host, buf, cnt);
1783 /* Push data if we have reached the expected data length */
1784 if ((data->bytes_xfered + init_cnt) ==
1785 (data->blksz * data->blocks))
1786 mci_writeq(host, DATA(host->data_offset),
1791 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1793 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1794 if (unlikely((unsigned long)buf & 0x7)) {
1796 /* pull data from fifo into aligned buffer */
1797 u64 aligned_buf[16];
1798 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1799 int items = len >> 3;
1801 for (i = 0; i < items; ++i)
1802 aligned_buf[i] = mci_readq(host,
1803 DATA(host->data_offset));
1804 /* memcpy from aligned buffer into output buffer */
1805 memcpy(buf, aligned_buf, len);
1813 for (; cnt >= 8; cnt -= 8)
1814 *pdata++ = mci_readq(host, DATA(host->data_offset));
1818 host->part_buf = mci_readq(host, DATA(host->data_offset));
1819 dw_mci_pull_final_bytes(host, buf, cnt);
1823 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1827 /* get remaining partial bytes */
1828 len = dw_mci_pull_part_bytes(host, buf, cnt);
1829 if (unlikely(len == cnt))
1834 /* get the rest of the data */
1835 host->pull_data(host, buf, cnt);
1838 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1840 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1842 unsigned int offset;
1843 struct mmc_data *data = host->data;
1844 int shift = host->data_shift;
1847 unsigned int remain, fcnt;
1850 if (!sg_miter_next(sg_miter))
1853 host->sg = sg_miter->piter.sg;
1854 buf = sg_miter->addr;
1855 remain = sg_miter->length;
1859 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1860 << shift) + host->part_buf_count;
1861 len = min(remain, fcnt);
1864 dw_mci_pull_data(host, (void *)(buf + offset), len);
1865 data->bytes_xfered += len;
1870 sg_miter->consumed = offset;
1871 status = mci_readl(host, MINTSTS);
1872 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1873 /* if the RXDR is ready read again */
1874 } while ((status & SDMMC_INT_RXDR) ||
1875 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1878 if (!sg_miter_next(sg_miter))
1880 sg_miter->consumed = 0;
1882 sg_miter_stop(sg_miter);
1886 sg_miter_stop(sg_miter);
1889 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1892 static void dw_mci_write_data_pio(struct dw_mci *host)
1894 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1896 unsigned int offset;
1897 struct mmc_data *data = host->data;
1898 int shift = host->data_shift;
1901 unsigned int fifo_depth = host->fifo_depth;
1902 unsigned int remain, fcnt;
1905 if (!sg_miter_next(sg_miter))
1908 host->sg = sg_miter->piter.sg;
1909 buf = sg_miter->addr;
1910 remain = sg_miter->length;
1914 fcnt = ((fifo_depth -
1915 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1916 << shift) - host->part_buf_count;
1917 len = min(remain, fcnt);
1920 host->push_data(host, (void *)(buf + offset), len);
1921 data->bytes_xfered += len;
1926 sg_miter->consumed = offset;
1927 status = mci_readl(host, MINTSTS);
1928 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1929 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1932 if (!sg_miter_next(sg_miter))
1934 sg_miter->consumed = 0;
1936 sg_miter_stop(sg_miter);
1940 sg_miter_stop(sg_miter);
1943 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1946 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1948 if (!host->cmd_status)
1949 host->cmd_status = status;
1953 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1954 tasklet_schedule(&host->tasklet);
1957 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1959 struct dw_mci *host = dev_id;
1963 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1966 * DTO fix - version 2.10a and below, and only if internal DMA
1969 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1971 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1972 pending |= SDMMC_INT_DATA_OVER;
1976 /* Check volt switch first, since it can look like an error */
1977 if ((host->state == STATE_SENDING_CMD11) &&
1978 (pending & SDMMC_INT_VOLT_SWITCH)) {
1979 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
1980 pending &= ~SDMMC_INT_VOLT_SWITCH;
1981 dw_mci_cmd_interrupt(host, pending);
1984 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1985 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1986 host->cmd_status = pending;
1988 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1991 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1992 /* if there is an error report DATA_ERROR */
1993 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1994 host->data_status = pending;
1996 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1997 tasklet_schedule(&host->tasklet);
2000 if (pending & SDMMC_INT_DATA_OVER) {
2001 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2002 if (!host->data_status)
2003 host->data_status = pending;
2005 if (host->dir_status == DW_MCI_RECV_STATUS) {
2006 if (host->sg != NULL)
2007 dw_mci_read_data_pio(host, true);
2009 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2010 tasklet_schedule(&host->tasklet);
2013 if (pending & SDMMC_INT_RXDR) {
2014 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2015 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2016 dw_mci_read_data_pio(host, false);
2019 if (pending & SDMMC_INT_TXDR) {
2020 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2021 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2022 dw_mci_write_data_pio(host);
2025 if (pending & SDMMC_INT_CMD_DONE) {
2026 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2027 dw_mci_cmd_interrupt(host, pending);
2030 if (pending & SDMMC_INT_CD) {
2031 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2032 queue_work(host->card_workqueue, &host->card_work);
2035 /* Handle SDIO Interrupts */
2036 for (i = 0; i < host->num_slots; i++) {
2037 struct dw_mci_slot *slot = host->slot[i];
2038 if (pending & SDMMC_INT_SDIO(i)) {
2039 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
2040 mmc_signal_sdio_irq(slot->mmc);
2046 #ifdef CONFIG_MMC_DW_IDMAC
2047 /* Handle DMA interrupts */
2048 pending = mci_readl(host, IDSTS);
2049 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2050 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
2051 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2052 host->dma_ops->complete(host);
2059 static void dw_mci_work_routine_card(struct work_struct *work)
2061 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
2064 for (i = 0; i < host->num_slots; i++) {
2065 struct dw_mci_slot *slot = host->slot[i];
2066 struct mmc_host *mmc = slot->mmc;
2067 struct mmc_request *mrq;
2070 present = dw_mci_get_cd(mmc);
2071 while (present != slot->last_detect_state) {
2072 dev_dbg(&slot->mmc->class_dev, "card %s\n",
2073 present ? "inserted" : "removed");
2075 spin_lock_bh(&host->lock);
2077 /* Card change detected */
2078 slot->last_detect_state = present;
2080 /* Clean up queue if present */
2083 if (mrq == host->mrq) {
2087 switch (host->state) {
2089 case STATE_WAITING_CMD11_DONE:
2091 case STATE_SENDING_CMD11:
2092 case STATE_SENDING_CMD:
2093 mrq->cmd->error = -ENOMEDIUM;
2097 case STATE_SENDING_DATA:
2098 mrq->data->error = -ENOMEDIUM;
2099 dw_mci_stop_dma(host);
2101 case STATE_DATA_BUSY:
2102 case STATE_DATA_ERROR:
2103 if (mrq->data->error == -EINPROGRESS)
2104 mrq->data->error = -ENOMEDIUM;
2106 case STATE_SENDING_STOP:
2108 mrq->stop->error = -ENOMEDIUM;
2112 dw_mci_request_end(host, mrq);
2114 list_del(&slot->queue_node);
2115 mrq->cmd->error = -ENOMEDIUM;
2117 mrq->data->error = -ENOMEDIUM;
2119 mrq->stop->error = -ENOMEDIUM;
2121 spin_unlock(&host->lock);
2122 mmc_request_done(slot->mmc, mrq);
2123 spin_lock(&host->lock);
2127 /* Power down slot */
2131 spin_unlock_bh(&host->lock);
2133 present = dw_mci_get_cd(mmc);
2136 mmc_detect_change(slot->mmc,
2137 msecs_to_jiffies(host->pdata->detect_delay_ms));
2142 /* given a slot id, find out the device node representing that slot */
2143 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2145 struct device_node *np;
2149 if (!dev || !dev->of_node)
2152 for_each_child_of_node(dev->of_node, np) {
2153 addr = of_get_property(np, "reg", &len);
2154 if (!addr || (len < sizeof(int)))
2156 if (be32_to_cpup(addr) == slot)
2162 static struct dw_mci_of_slot_quirks {
2165 } of_slot_quirks[] = {
2167 .quirk = "disable-wp",
2168 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
2172 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2174 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2179 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
2180 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) {
2181 dev_warn(dev, "Slot quirk %s is deprecated\n",
2182 of_slot_quirks[idx].quirk);
2183 quirks |= of_slot_quirks[idx].id;
2188 #else /* CONFIG_OF */
2189 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2193 #endif /* CONFIG_OF */
2195 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2197 struct mmc_host *mmc;
2198 struct dw_mci_slot *slot;
2199 const struct dw_mci_drv_data *drv_data = host->drv_data;
2203 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2207 slot = mmc_priv(mmc);
2211 host->slot[id] = slot;
2213 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2215 mmc->ops = &dw_mci_ops;
2216 if (of_property_read_u32_array(host->dev->of_node,
2217 "clock-freq-min-max", freq, 2)) {
2218 mmc->f_min = DW_MCI_FREQ_MIN;
2219 mmc->f_max = DW_MCI_FREQ_MAX;
2221 mmc->f_min = freq[0];
2222 mmc->f_max = freq[1];
2225 /*if there are external regulators, get them*/
2226 ret = mmc_regulator_get_supply(mmc);
2227 if (ret == -EPROBE_DEFER)
2228 goto err_host_allocated;
2230 if (!mmc->ocr_avail)
2231 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2233 if (host->pdata->caps)
2234 mmc->caps = host->pdata->caps;
2236 if (host->pdata->pm_caps)
2237 mmc->pm_caps = host->pdata->pm_caps;
2239 if (host->dev->of_node) {
2240 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2244 ctrl_id = to_platform_device(host->dev)->id;
2246 if (drv_data && drv_data->caps)
2247 mmc->caps |= drv_data->caps[ctrl_id];
2249 if (host->pdata->caps2)
2250 mmc->caps2 = host->pdata->caps2;
2252 ret = mmc_of_parse(mmc);
2254 goto err_host_allocated;
2256 if (host->pdata->blk_settings) {
2257 mmc->max_segs = host->pdata->blk_settings->max_segs;
2258 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2259 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2260 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2261 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2263 /* Useful defaults if platform data is unset. */
2264 #ifdef CONFIG_MMC_DW_IDMAC
2265 mmc->max_segs = host->ring_size;
2266 mmc->max_blk_size = 65536;
2267 mmc->max_blk_count = host->ring_size;
2268 mmc->max_seg_size = 0x1000;
2269 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2272 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2273 mmc->max_blk_count = 512;
2274 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2275 mmc->max_seg_size = mmc->max_req_size;
2276 #endif /* CONFIG_MMC_DW_IDMAC */
2279 if (dw_mci_get_cd(mmc))
2280 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2282 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2284 ret = mmc_add_host(mmc);
2286 goto err_host_allocated;
2288 #if defined(CONFIG_DEBUG_FS)
2289 dw_mci_init_debugfs(slot);
2292 /* Card initially undetected */
2293 slot->last_detect_state = 0;
2302 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2304 /* Debugfs stuff is cleaned up by mmc core */
2305 mmc_remove_host(slot->mmc);
2306 slot->host->slot[id] = NULL;
2307 mmc_free_host(slot->mmc);
2310 static void dw_mci_init_dma(struct dw_mci *host)
2312 /* Alloc memory for sg translation */
2313 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2314 &host->sg_dma, GFP_KERNEL);
2315 if (!host->sg_cpu) {
2316 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2321 /* Determine which DMA interface to use */
2322 #ifdef CONFIG_MMC_DW_IDMAC
2323 host->dma_ops = &dw_mci_idmac_ops;
2324 dev_info(host->dev, "Using internal DMA controller.\n");
2330 if (host->dma_ops->init && host->dma_ops->start &&
2331 host->dma_ops->stop && host->dma_ops->cleanup) {
2332 if (host->dma_ops->init(host)) {
2333 dev_err(host->dev, "%s: Unable to initialize "
2334 "DMA Controller.\n", __func__);
2338 dev_err(host->dev, "DMA initialization not found.\n");
2346 dev_info(host->dev, "Using PIO mode.\n");
2351 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
2353 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2356 ctrl = mci_readl(host, CTRL);
2358 mci_writel(host, CTRL, ctrl);
2360 /* wait till resets clear */
2362 ctrl = mci_readl(host, CTRL);
2363 if (!(ctrl & reset))
2365 } while (time_before(jiffies, timeout));
2368 "Timeout resetting block (ctrl reset %#x)\n",
2374 static bool dw_mci_reset(struct dw_mci *host)
2376 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
2380 * Reseting generates a block interrupt, hence setting
2381 * the scatter-gather pointer to NULL.
2384 sg_miter_stop(&host->sg_miter);
2389 flags |= SDMMC_CTRL_DMA_RESET;
2391 if (dw_mci_ctrl_reset(host, flags)) {
2393 * In all cases we clear the RAWINTS register to clear any
2396 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2398 /* if using dma we wait for dma_req to clear */
2399 if (host->use_dma) {
2400 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2403 status = mci_readl(host, STATUS);
2404 if (!(status & SDMMC_STATUS_DMA_REQ))
2407 } while (time_before(jiffies, timeout));
2409 if (status & SDMMC_STATUS_DMA_REQ) {
2411 "%s: Timeout waiting for dma_req to "
2412 "clear during reset\n", __func__);
2416 /* when using DMA next we reset the fifo again */
2417 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
2421 /* if the controller reset bit did clear, then set clock regs */
2422 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
2423 dev_err(host->dev, "%s: fifo/dma reset bits didn't "
2424 "clear but ciu was reset, doing clock update\n",
2430 #if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
2431 /* It is also recommended that we reset and reprogram idmac */
2432 dw_mci_idmac_reset(host);
2438 /* After a CTRL reset we need to have CIU set clock registers */
2439 mci_send_cmd(host->cur_slot, SDMMC_CMD_UPD_CLK, 0);
2445 static struct dw_mci_of_quirks {
2450 .quirk = "broken-cd",
2451 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2453 .quirk = "disable-wp",
2454 .id = DW_MCI_QUIRK_NO_WRITE_PROTECT,
2458 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2460 struct dw_mci_board *pdata;
2461 struct device *dev = host->dev;
2462 struct device_node *np = dev->of_node;
2463 const struct dw_mci_drv_data *drv_data = host->drv_data;
2465 u32 clock_frequency;
2467 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2469 dev_err(dev, "could not allocate memory for pdata\n");
2470 return ERR_PTR(-ENOMEM);
2473 /* find out number of slots supported */
2474 if (of_property_read_u32(dev->of_node, "num-slots",
2475 &pdata->num_slots)) {
2476 dev_info(dev, "num-slots property not found, "
2477 "assuming 1 slot is available\n");
2478 pdata->num_slots = 1;
2482 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2483 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2484 pdata->quirks |= of_quirks[idx].id;
2486 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2487 dev_info(dev, "fifo-depth property not found, using "
2488 "value of FIFOTH register as default\n");
2490 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2492 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2493 pdata->bus_hz = clock_frequency;
2495 if (drv_data && drv_data->parse_dt) {
2496 ret = drv_data->parse_dt(host);
2498 return ERR_PTR(ret);
2501 if (of_find_property(np, "supports-highspeed", NULL))
2502 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2507 #else /* CONFIG_OF */
2508 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2510 return ERR_PTR(-EINVAL);
2512 #endif /* CONFIG_OF */
2514 int dw_mci_probe(struct dw_mci *host)
2516 const struct dw_mci_drv_data *drv_data = host->drv_data;
2517 int width, i, ret = 0;
2522 host->pdata = dw_mci_parse_dt(host);
2523 if (IS_ERR(host->pdata)) {
2524 dev_err(host->dev, "platform data not available\n");
2529 if (host->pdata->num_slots > 1) {
2531 "Platform data must supply num_slots.\n");
2535 host->biu_clk = devm_clk_get(host->dev, "biu");
2536 if (IS_ERR(host->biu_clk)) {
2537 dev_dbg(host->dev, "biu clock not available\n");
2539 ret = clk_prepare_enable(host->biu_clk);
2541 dev_err(host->dev, "failed to enable biu clock\n");
2546 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2547 if (IS_ERR(host->ciu_clk)) {
2548 dev_dbg(host->dev, "ciu clock not available\n");
2549 host->bus_hz = host->pdata->bus_hz;
2551 ret = clk_prepare_enable(host->ciu_clk);
2553 dev_err(host->dev, "failed to enable ciu clock\n");
2557 if (host->pdata->bus_hz) {
2558 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2561 "Unable to set bus rate to %uHz\n",
2562 host->pdata->bus_hz);
2564 host->bus_hz = clk_get_rate(host->ciu_clk);
2567 if (!host->bus_hz) {
2569 "Platform data must supply bus speed\n");
2574 if (drv_data && drv_data->init) {
2575 ret = drv_data->init(host);
2578 "implementation specific init failed\n");
2583 if (drv_data && drv_data->setup_clock) {
2584 ret = drv_data->setup_clock(host);
2587 "implementation specific clock setup failed\n");
2592 host->quirks = host->pdata->quirks;
2594 spin_lock_init(&host->lock);
2595 INIT_LIST_HEAD(&host->queue);
2598 * Get the host data width - this assumes that HCON has been set with
2599 * the correct values.
2601 i = (mci_readl(host, HCON) >> 7) & 0x7;
2603 host->push_data = dw_mci_push_data16;
2604 host->pull_data = dw_mci_pull_data16;
2606 host->data_shift = 1;
2607 } else if (i == 2) {
2608 host->push_data = dw_mci_push_data64;
2609 host->pull_data = dw_mci_pull_data64;
2611 host->data_shift = 3;
2613 /* Check for a reserved value, and warn if it is */
2615 "HCON reports a reserved host data width!\n"
2616 "Defaulting to 32-bit access.\n");
2617 host->push_data = dw_mci_push_data32;
2618 host->pull_data = dw_mci_pull_data32;
2620 host->data_shift = 2;
2623 /* Reset all blocks */
2624 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
2627 host->dma_ops = host->pdata->dma_ops;
2628 dw_mci_init_dma(host);
2630 /* Clear the interrupts for the host controller */
2631 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2632 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2634 /* Put in max timeout */
2635 mci_writel(host, TMOUT, 0xFFFFFFFF);
2638 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2639 * Tx Mark = fifo_size / 2 DMA Size = 8
2641 if (!host->pdata->fifo_depth) {
2643 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2644 * have been overwritten by the bootloader, just like we're
2645 * about to do, so if you know the value for your hardware, you
2646 * should put it in the platform data.
2648 fifo_size = mci_readl(host, FIFOTH);
2649 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2651 fifo_size = host->pdata->fifo_depth;
2653 host->fifo_depth = fifo_size;
2655 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2656 mci_writel(host, FIFOTH, host->fifoth_val);
2658 /* disable clock to CIU */
2659 mci_writel(host, CLKENA, 0);
2660 mci_writel(host, CLKSRC, 0);
2663 * In 2.40a spec, Data offset is changed.
2664 * Need to check the version-id and set data-offset for DATA register.
2666 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2667 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2669 if (host->verid < DW_MMC_240A)
2670 host->data_offset = DATA_OFFSET;
2672 host->data_offset = DATA_240A_OFFSET;
2674 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2675 host->card_workqueue = alloc_workqueue("dw-mci-card",
2677 if (!host->card_workqueue) {
2681 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2682 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2683 host->irq_flags, "dw-mci", host);
2687 if (host->pdata->num_slots)
2688 host->num_slots = host->pdata->num_slots;
2690 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2693 * Enable interrupts for command done, data over, data empty, card det,
2694 * receive ready and error such as transmit, receive timeout, crc error
2696 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2697 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2698 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2699 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2700 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2702 dev_info(host->dev, "DW MMC controller at irq %d, "
2703 "%d bit host data width, "
2705 host->irq, width, fifo_size);
2707 /* We need at least one slot to succeed */
2708 for (i = 0; i < host->num_slots; i++) {
2709 ret = dw_mci_init_slot(host, i);
2711 dev_dbg(host->dev, "slot %d init failed\n", i);
2717 dev_info(host->dev, "%d slots initialized\n", init_slots);
2719 dev_dbg(host->dev, "attempted to initialize %d slots, "
2720 "but failed on all\n", host->num_slots);
2724 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2725 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2730 destroy_workqueue(host->card_workqueue);
2733 if (host->use_dma && host->dma_ops->exit)
2734 host->dma_ops->exit(host);
2737 if (!IS_ERR(host->ciu_clk))
2738 clk_disable_unprepare(host->ciu_clk);
2741 if (!IS_ERR(host->biu_clk))
2742 clk_disable_unprepare(host->biu_clk);
2746 EXPORT_SYMBOL(dw_mci_probe);
2748 void dw_mci_remove(struct dw_mci *host)
2752 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2753 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2755 for (i = 0; i < host->num_slots; i++) {
2756 dev_dbg(host->dev, "remove slot %d\n", i);
2758 dw_mci_cleanup_slot(host->slot[i], i);
2761 /* disable clock to CIU */
2762 mci_writel(host, CLKENA, 0);
2763 mci_writel(host, CLKSRC, 0);
2765 destroy_workqueue(host->card_workqueue);
2767 if (host->use_dma && host->dma_ops->exit)
2768 host->dma_ops->exit(host);
2770 if (!IS_ERR(host->ciu_clk))
2771 clk_disable_unprepare(host->ciu_clk);
2773 if (!IS_ERR(host->biu_clk))
2774 clk_disable_unprepare(host->biu_clk);
2776 EXPORT_SYMBOL(dw_mci_remove);
2780 #ifdef CONFIG_PM_SLEEP
2782 * TODO: we should probably disable the clock to the card in the suspend path.
2784 int dw_mci_suspend(struct dw_mci *host)
2788 EXPORT_SYMBOL(dw_mci_suspend);
2790 int dw_mci_resume(struct dw_mci *host)
2794 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
2799 if (host->use_dma && host->dma_ops->init)
2800 host->dma_ops->init(host);
2803 * Restore the initial value at FIFOTH register
2804 * And Invalidate the prev_blksz with zero
2806 mci_writel(host, FIFOTH, host->fifoth_val);
2807 host->prev_blksz = 0;
2809 /* Put in max timeout */
2810 mci_writel(host, TMOUT, 0xFFFFFFFF);
2812 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2813 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2814 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2815 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2816 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2818 for (i = 0; i < host->num_slots; i++) {
2819 struct dw_mci_slot *slot = host->slot[i];
2822 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2823 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2824 dw_mci_setup_bus(slot, true);
2829 EXPORT_SYMBOL(dw_mci_resume);
2830 #endif /* CONFIG_PM_SLEEP */
2832 static int __init dw_mci_init(void)
2834 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2838 static void __exit dw_mci_exit(void)
2842 module_init(dw_mci_init);
2843 module_exit(dw_mci_exit);
2845 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2846 MODULE_AUTHOR("NXP Semiconductor VietNam");
2847 MODULE_AUTHOR("Imagination Technologies Ltd");
2848 MODULE_LICENSE("GPL v2");