2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/suspend.h>
27 #include <linux/fault-inject.h>
28 #include <linux/random.h>
29 #include <linux/slab.h>
31 #include <linux/mmc/card.h>
32 #include <linux/mmc/host.h>
33 #include <linux/mmc/mmc.h>
34 #include <linux/mmc/sd.h>
46 * Background operations can take a long time, depending on the housekeeping
47 * operations the card has to perform.
49 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
51 static struct workqueue_struct *workqueue;
52 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
55 * Enabling software CRCs on the data blocks can be a significant (30%)
56 * performance cost, and for other reasons may not always be desired.
57 * So we allow it it to be disabled.
60 module_param(use_spi_crc, bool, 0);
63 * We normally treat cards as removed during suspend if they are not
64 * known to be on a non-removable bus, to avoid the risk of writing
65 * back data to a different card after resume. Allow this to be
66 * overridden if necessary.
68 #ifdef CONFIG_MMC_UNSAFE_RESUME
69 bool mmc_assume_removable;
71 bool mmc_assume_removable = 1;
73 EXPORT_SYMBOL(mmc_assume_removable);
74 module_param_named(removable, mmc_assume_removable, bool, 0644);
77 "MMC/SD cards are removable and may be removed during suspend");
80 * Internal function. Schedule delayed work in the MMC work queue.
82 static int mmc_schedule_delayed_work(struct delayed_work *work,
85 return queue_delayed_work(workqueue, work, delay);
89 * Internal function. Flush all scheduled work from the MMC work queue.
91 static void mmc_flush_scheduled_work(void)
93 flush_workqueue(workqueue);
96 #ifdef CONFIG_FAIL_MMC_REQUEST
99 * Internal function. Inject random data errors.
100 * If mmc_data is NULL no errors are injected.
102 static void mmc_should_fail_request(struct mmc_host *host,
103 struct mmc_request *mrq)
105 struct mmc_command *cmd = mrq->cmd;
106 struct mmc_data *data = mrq->data;
107 static const int data_errors[] = {
116 if (cmd->error || data->error ||
117 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
120 data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
121 data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
124 #else /* CONFIG_FAIL_MMC_REQUEST */
126 static inline void mmc_should_fail_request(struct mmc_host *host,
127 struct mmc_request *mrq)
131 #endif /* CONFIG_FAIL_MMC_REQUEST */
134 * mmc_request_done - finish processing an MMC request
135 * @host: MMC host which completed request
136 * @mrq: MMC request which request
138 * MMC drivers should call this function when they have completed
139 * their processing of a request.
141 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
143 struct mmc_command *cmd = mrq->cmd;
144 int err = cmd->error;
146 if (err && cmd->retries && mmc_host_is_spi(host)) {
147 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
151 if (err && cmd->retries && !mmc_card_removed(host->card)) {
153 * Request starter must handle retries - see
154 * mmc_wait_for_req_done().
159 mmc_should_fail_request(host, mrq);
161 led_trigger_event(host->led, LED_OFF);
163 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
164 mmc_hostname(host), cmd->opcode, err,
165 cmd->resp[0], cmd->resp[1],
166 cmd->resp[2], cmd->resp[3]);
169 pr_debug("%s: %d bytes transferred: %d\n",
171 mrq->data->bytes_xfered, mrq->data->error);
175 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
176 mmc_hostname(host), mrq->stop->opcode,
178 mrq->stop->resp[0], mrq->stop->resp[1],
179 mrq->stop->resp[2], mrq->stop->resp[3]);
185 mmc_host_clk_release(host);
189 EXPORT_SYMBOL(mmc_request_done);
192 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
194 #ifdef CONFIG_MMC_DEBUG
196 struct scatterlist *sg;
200 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
201 mmc_hostname(host), mrq->sbc->opcode,
202 mrq->sbc->arg, mrq->sbc->flags);
205 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
206 mmc_hostname(host), mrq->cmd->opcode,
207 mrq->cmd->arg, mrq->cmd->flags);
210 pr_debug("%s: blksz %d blocks %d flags %08x "
211 "tsac %d ms nsac %d\n",
212 mmc_hostname(host), mrq->data->blksz,
213 mrq->data->blocks, mrq->data->flags,
214 mrq->data->timeout_ns / 1000000,
215 mrq->data->timeout_clks);
219 pr_debug("%s: CMD%u arg %08x flags %08x\n",
220 mmc_hostname(host), mrq->stop->opcode,
221 mrq->stop->arg, mrq->stop->flags);
224 WARN_ON(!host->claimed);
229 BUG_ON(mrq->data->blksz > host->max_blk_size);
230 BUG_ON(mrq->data->blocks > host->max_blk_count);
231 BUG_ON(mrq->data->blocks * mrq->data->blksz >
234 #ifdef CONFIG_MMC_DEBUG
236 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
238 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
241 mrq->cmd->data = mrq->data;
242 mrq->data->error = 0;
243 mrq->data->mrq = mrq;
245 mrq->data->stop = mrq->stop;
246 mrq->stop->error = 0;
247 mrq->stop->mrq = mrq;
250 mmc_host_clk_hold(host);
251 led_trigger_event(host->led, LED_FULL);
252 host->ops->request(host, mrq);
256 * mmc_start_bkops - start BKOPS for supported cards
257 * @card: MMC card to start BKOPS
258 * @form_exception: A flag to indicate if this function was
259 * called due to an exception raised by the card
261 * Start background operations whenever requested.
262 * When the urgent BKOPS bit is set in a R1 command response
263 * then background operations should be started immediately.
265 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
269 bool use_busy_signal;
273 if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card))
276 err = mmc_read_bkops_status(card);
278 pr_err("%s: Failed to read bkops status: %d\n",
279 mmc_hostname(card->host), err);
283 if (!card->ext_csd.raw_bkops_status)
286 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
290 mmc_claim_host(card->host);
291 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
292 timeout = MMC_BKOPS_MAX_TIMEOUT;
293 use_busy_signal = true;
296 use_busy_signal = false;
299 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
300 EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal);
302 pr_warn("%s: Error %d starting bkops\n",
303 mmc_hostname(card->host), err);
308 * For urgent bkops status (LEVEL_2 and more)
309 * bkops executed synchronously, otherwise
310 * the operation is in progress
312 if (!use_busy_signal)
313 mmc_card_set_doing_bkops(card);
315 mmc_release_host(card->host);
317 EXPORT_SYMBOL(mmc_start_bkops);
319 static void mmc_wait_done(struct mmc_request *mrq)
321 complete(&mrq->completion);
324 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
326 init_completion(&mrq->completion);
327 mrq->done = mmc_wait_done;
328 if (mmc_card_removed(host->card)) {
329 mrq->cmd->error = -ENOMEDIUM;
330 complete(&mrq->completion);
333 mmc_start_request(host, mrq);
337 static void mmc_wait_for_req_done(struct mmc_host *host,
338 struct mmc_request *mrq)
340 struct mmc_command *cmd;
343 wait_for_completion(&mrq->completion);
346 if (!cmd->error || !cmd->retries ||
347 mmc_card_removed(host->card))
350 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
351 mmc_hostname(host), cmd->opcode, cmd->error);
354 host->ops->request(host, mrq);
359 * mmc_pre_req - Prepare for a new request
360 * @host: MMC host to prepare command
361 * @mrq: MMC request to prepare for
362 * @is_first_req: true if there is no previous started request
363 * that may run in parellel to this call, otherwise false
365 * mmc_pre_req() is called in prior to mmc_start_req() to let
366 * host prepare for the new request. Preparation of a request may be
367 * performed while another request is running on the host.
369 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
372 if (host->ops->pre_req) {
373 mmc_host_clk_hold(host);
374 host->ops->pre_req(host, mrq, is_first_req);
375 mmc_host_clk_release(host);
380 * mmc_post_req - Post process a completed request
381 * @host: MMC host to post process command
382 * @mrq: MMC request to post process for
383 * @err: Error, if non zero, clean up any resources made in pre_req
385 * Let the host post process a completed request. Post processing of
386 * a request may be performed while another reuqest is running.
388 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
391 if (host->ops->post_req) {
392 mmc_host_clk_hold(host);
393 host->ops->post_req(host, mrq, err);
394 mmc_host_clk_release(host);
399 * mmc_start_req - start a non-blocking request
400 * @host: MMC host to start command
401 * @areq: async request to start
402 * @error: out parameter returns 0 for success, otherwise non zero
404 * Start a new MMC custom command request for a host.
405 * If there is on ongoing async request wait for completion
406 * of that request and start the new one and return.
407 * Does not wait for the new request to complete.
409 * Returns the completed request, NULL in case of none completed.
410 * Wait for the an ongoing request (previoulsy started) to complete and
411 * return the completed request. If there is no ongoing request, NULL
412 * is returned without waiting. NULL is not an error condition.
414 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
415 struct mmc_async_req *areq, int *error)
419 struct mmc_async_req *data = host->areq;
421 /* Prepare a new request */
423 mmc_pre_req(host, areq->mrq, !host->areq);
426 mmc_wait_for_req_done(host, host->areq->mrq);
427 err = host->areq->err_check(host->card, host->areq);
429 * Check BKOPS urgency for each R1 response
431 if (host->card && mmc_card_mmc(host->card) &&
432 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
433 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
434 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT))
435 mmc_start_bkops(host->card, true);
439 start_err = __mmc_start_req(host, areq->mrq);
442 mmc_post_req(host, host->areq->mrq, 0);
444 /* Cancel a prepared request if it was not started. */
445 if ((err || start_err) && areq)
446 mmc_post_req(host, areq->mrq, -EINVAL);
457 EXPORT_SYMBOL(mmc_start_req);
460 * mmc_wait_for_req - start a request and wait for completion
461 * @host: MMC host to start command
462 * @mrq: MMC request to start
464 * Start a new MMC custom command request for a host, and wait
465 * for the command to complete. Does not attempt to parse the
468 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
470 __mmc_start_req(host, mrq);
471 mmc_wait_for_req_done(host, mrq);
473 EXPORT_SYMBOL(mmc_wait_for_req);
476 * mmc_interrupt_hpi - Issue for High priority Interrupt
477 * @card: the MMC card associated with the HPI transfer
479 * Issued High Priority Interrupt, and check for card status
480 * until out-of prg-state.
482 int mmc_interrupt_hpi(struct mmc_card *card)
486 unsigned long prg_wait;
490 if (!card->ext_csd.hpi_en) {
491 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
495 mmc_claim_host(card->host);
496 err = mmc_send_status(card, &status);
498 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
502 switch (R1_CURRENT_STATE(status)) {
508 * In idle and transfer states, HPI is not needed and the caller
509 * can issue the next intended command immediately
515 /* In all other states, it's illegal to issue HPI */
516 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
517 mmc_hostname(card->host), R1_CURRENT_STATE(status));
522 err = mmc_send_hpi_cmd(card, &status);
526 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
528 err = mmc_send_status(card, &status);
530 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
532 if (time_after(jiffies, prg_wait))
537 mmc_release_host(card->host);
540 EXPORT_SYMBOL(mmc_interrupt_hpi);
543 * mmc_wait_for_cmd - start a command and wait for completion
544 * @host: MMC host to start command
545 * @cmd: MMC command to start
546 * @retries: maximum number of retries
548 * Start a new MMC command for a host, and wait for the command
549 * to complete. Return any error that occurred while the command
550 * was executing. Do not attempt to parse the response.
552 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
554 struct mmc_request mrq = {NULL};
556 WARN_ON(!host->claimed);
558 memset(cmd->resp, 0, sizeof(cmd->resp));
559 cmd->retries = retries;
564 mmc_wait_for_req(host, &mrq);
569 EXPORT_SYMBOL(mmc_wait_for_cmd);
572 * mmc_stop_bkops - stop ongoing BKOPS
573 * @card: MMC card to check BKOPS
575 * Send HPI command to stop ongoing background operations to
576 * allow rapid servicing of foreground operations, e.g. read/
577 * writes. Wait until the card comes out of the programming state
578 * to avoid errors in servicing read/write requests.
580 int mmc_stop_bkops(struct mmc_card *card)
585 err = mmc_interrupt_hpi(card);
588 * If err is EINVAL, we can't issue an HPI.
589 * It should complete the BKOPS.
591 if (!err || (err == -EINVAL)) {
592 mmc_card_clr_doing_bkops(card);
598 EXPORT_SYMBOL(mmc_stop_bkops);
600 int mmc_read_bkops_status(struct mmc_card *card)
606 * In future work, we should consider storing the entire ext_csd.
608 ext_csd = kmalloc(512, GFP_KERNEL);
610 pr_err("%s: could not allocate buffer to receive the ext_csd.\n",
611 mmc_hostname(card->host));
615 mmc_claim_host(card->host);
616 err = mmc_send_ext_csd(card, ext_csd);
617 mmc_release_host(card->host);
621 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
622 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
627 EXPORT_SYMBOL(mmc_read_bkops_status);
630 * mmc_set_data_timeout - set the timeout for a data command
631 * @data: data phase for command
632 * @card: the MMC card associated with the data transfer
634 * Computes the data timeout parameters according to the
635 * correct algorithm given the card type.
637 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
642 * SDIO cards only define an upper 1 s limit on access.
644 if (mmc_card_sdio(card)) {
645 data->timeout_ns = 1000000000;
646 data->timeout_clks = 0;
651 * SD cards use a 100 multiplier rather than 10
653 mult = mmc_card_sd(card) ? 100 : 10;
656 * Scale up the multiplier (and therefore the timeout) by
657 * the r2w factor for writes.
659 if (data->flags & MMC_DATA_WRITE)
660 mult <<= card->csd.r2w_factor;
662 data->timeout_ns = card->csd.tacc_ns * mult;
663 data->timeout_clks = card->csd.tacc_clks * mult;
666 * SD cards also have an upper limit on the timeout.
668 if (mmc_card_sd(card)) {
669 unsigned int timeout_us, limit_us;
671 timeout_us = data->timeout_ns / 1000;
672 if (mmc_host_clk_rate(card->host))
673 timeout_us += data->timeout_clks * 1000 /
674 (mmc_host_clk_rate(card->host) / 1000);
676 if (data->flags & MMC_DATA_WRITE)
678 * The MMC spec "It is strongly recommended
679 * for hosts to implement more than 500ms
680 * timeout value even if the card indicates
681 * the 250ms maximum busy length." Even the
682 * previous value of 300ms is known to be
683 * insufficient for some cards.
690 * SDHC cards always use these fixed values.
692 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
693 data->timeout_ns = limit_us * 1000;
694 data->timeout_clks = 0;
699 * Some cards require longer data read timeout than indicated in CSD.
700 * Address this by setting the read timeout to a "reasonably high"
701 * value. For the cards tested, 300ms has proven enough. If necessary,
702 * this value can be increased if other problematic cards require this.
704 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
705 data->timeout_ns = 300000000;
706 data->timeout_clks = 0;
710 * Some cards need very high timeouts if driven in SPI mode.
711 * The worst observed timeout was 900ms after writing a
712 * continuous stream of data until the internal logic
715 if (mmc_host_is_spi(card->host)) {
716 if (data->flags & MMC_DATA_WRITE) {
717 if (data->timeout_ns < 1000000000)
718 data->timeout_ns = 1000000000; /* 1s */
720 if (data->timeout_ns < 100000000)
721 data->timeout_ns = 100000000; /* 100ms */
725 EXPORT_SYMBOL(mmc_set_data_timeout);
728 * mmc_align_data_size - pads a transfer size to a more optimal value
729 * @card: the MMC card associated with the data transfer
730 * @sz: original transfer size
732 * Pads the original data size with a number of extra bytes in
733 * order to avoid controller bugs and/or performance hits
734 * (e.g. some controllers revert to PIO for certain sizes).
736 * Returns the improved size, which might be unmodified.
738 * Note that this function is only relevant when issuing a
739 * single scatter gather entry.
741 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
744 * FIXME: We don't have a system for the controller to tell
745 * the core about its problems yet, so for now we just 32-bit
748 sz = ((sz + 3) / 4) * 4;
752 EXPORT_SYMBOL(mmc_align_data_size);
755 * __mmc_claim_host - exclusively claim a host
756 * @host: mmc host to claim
757 * @abort: whether or not the operation should be aborted
759 * Claim a host for a set of operations. If @abort is non null and
760 * dereference a non-zero value then this will return prematurely with
761 * that non-zero value without acquiring the lock. Returns zero
762 * with the lock held otherwise.
764 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
766 DECLARE_WAITQUEUE(wait, current);
772 add_wait_queue(&host->wq, &wait);
773 spin_lock_irqsave(&host->lock, flags);
775 set_current_state(TASK_UNINTERRUPTIBLE);
776 stop = abort ? atomic_read(abort) : 0;
777 if (stop || !host->claimed || host->claimer == current)
779 spin_unlock_irqrestore(&host->lock, flags);
781 spin_lock_irqsave(&host->lock, flags);
783 set_current_state(TASK_RUNNING);
786 host->claimer = current;
787 host->claim_cnt += 1;
790 spin_unlock_irqrestore(&host->lock, flags);
791 remove_wait_queue(&host->wq, &wait);
792 if (host->ops->enable && !stop && host->claim_cnt == 1)
793 host->ops->enable(host);
797 EXPORT_SYMBOL(__mmc_claim_host);
800 * mmc_try_claim_host - try exclusively to claim a host
801 * @host: mmc host to claim
803 * Returns %1 if the host is claimed, %0 otherwise.
805 int mmc_try_claim_host(struct mmc_host *host)
807 int claimed_host = 0;
810 spin_lock_irqsave(&host->lock, flags);
811 if (!host->claimed || host->claimer == current) {
813 host->claimer = current;
814 host->claim_cnt += 1;
817 spin_unlock_irqrestore(&host->lock, flags);
818 if (host->ops->enable && claimed_host && host->claim_cnt == 1)
819 host->ops->enable(host);
822 EXPORT_SYMBOL(mmc_try_claim_host);
825 * mmc_release_host - release a host
826 * @host: mmc host to release
828 * Release a MMC host, allowing others to claim the host
829 * for their operations.
831 void mmc_release_host(struct mmc_host *host)
835 WARN_ON(!host->claimed);
837 if (host->ops->disable && host->claim_cnt == 1)
838 host->ops->disable(host);
840 spin_lock_irqsave(&host->lock, flags);
841 if (--host->claim_cnt) {
842 /* Release for nested claim */
843 spin_unlock_irqrestore(&host->lock, flags);
846 host->claimer = NULL;
847 spin_unlock_irqrestore(&host->lock, flags);
851 EXPORT_SYMBOL(mmc_release_host);
854 * Internal function that does the actual ios call to the host driver,
855 * optionally printing some debug output.
857 static inline void mmc_set_ios(struct mmc_host *host)
859 struct mmc_ios *ios = &host->ios;
861 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
862 "width %u timing %u\n",
863 mmc_hostname(host), ios->clock, ios->bus_mode,
864 ios->power_mode, ios->chip_select, ios->vdd,
865 ios->bus_width, ios->timing);
868 mmc_set_ungated(host);
869 host->ops->set_ios(host, ios);
873 * Control chip select pin on a host.
875 void mmc_set_chip_select(struct mmc_host *host, int mode)
877 mmc_host_clk_hold(host);
878 host->ios.chip_select = mode;
880 mmc_host_clk_release(host);
884 * Sets the host clock to the highest possible frequency that
887 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
889 WARN_ON(hz < host->f_min);
891 if (hz > host->f_max)
894 host->ios.clock = hz;
898 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
900 mmc_host_clk_hold(host);
901 __mmc_set_clock(host, hz);
902 mmc_host_clk_release(host);
905 #ifdef CONFIG_MMC_CLKGATE
907 * This gates the clock by setting it to 0 Hz.
909 void mmc_gate_clock(struct mmc_host *host)
913 spin_lock_irqsave(&host->clk_lock, flags);
914 host->clk_old = host->ios.clock;
916 host->clk_gated = true;
917 spin_unlock_irqrestore(&host->clk_lock, flags);
922 * This restores the clock from gating by using the cached
925 void mmc_ungate_clock(struct mmc_host *host)
928 * We should previously have gated the clock, so the clock shall
929 * be 0 here! The clock may however be 0 during initialization,
930 * when some request operations are performed before setting
931 * the frequency. When ungate is requested in that situation
932 * we just ignore the call.
935 BUG_ON(host->ios.clock);
936 /* This call will also set host->clk_gated to false */
937 __mmc_set_clock(host, host->clk_old);
941 void mmc_set_ungated(struct mmc_host *host)
946 * We've been given a new frequency while the clock is gated,
947 * so make sure we regard this as ungating it.
949 spin_lock_irqsave(&host->clk_lock, flags);
950 host->clk_gated = false;
951 spin_unlock_irqrestore(&host->clk_lock, flags);
955 void mmc_set_ungated(struct mmc_host *host)
961 * Change the bus mode (open drain/push-pull) of a host.
963 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
965 mmc_host_clk_hold(host);
966 host->ios.bus_mode = mode;
968 mmc_host_clk_release(host);
972 * Change data bus width of a host.
974 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
976 mmc_host_clk_hold(host);
977 host->ios.bus_width = width;
979 mmc_host_clk_release(host);
983 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
985 * @low_bits: prefer low bits in boundary cases
987 * This function returns the OCR bit number according to the provided @vdd
988 * value. If conversion is not possible a negative errno value returned.
990 * Depending on the @low_bits flag the function prefers low or high OCR bits
991 * on boundary voltages. For example,
992 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
993 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
995 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
997 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
999 const int max_bit = ilog2(MMC_VDD_35_36);
1002 if (vdd < 1650 || vdd > 3600)
1005 if (vdd >= 1650 && vdd <= 1950)
1006 return ilog2(MMC_VDD_165_195);
1011 /* Base 2000 mV, step 100 mV, bit's base 8. */
1012 bit = (vdd - 2000) / 100 + 8;
1019 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1020 * @vdd_min: minimum voltage value (mV)
1021 * @vdd_max: maximum voltage value (mV)
1023 * This function returns the OCR mask bits according to the provided @vdd_min
1024 * and @vdd_max values. If conversion is not possible the function returns 0.
1026 * Notes wrt boundary cases:
1027 * This function sets the OCR bits for all boundary voltages, for example
1028 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1029 * MMC_VDD_34_35 mask.
1031 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1035 if (vdd_max < vdd_min)
1038 /* Prefer high bits for the boundary vdd_max values. */
1039 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1043 /* Prefer low bits for the boundary vdd_min values. */
1044 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1048 /* Fill the mask, from max bit to min bit. */
1049 while (vdd_max >= vdd_min)
1050 mask |= 1 << vdd_max--;
1054 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1056 #ifdef CONFIG_REGULATOR
1059 * mmc_regulator_get_ocrmask - return mask of supported voltages
1060 * @supply: regulator to use
1062 * This returns either a negative errno, or a mask of voltages that
1063 * can be provided to MMC/SD/SDIO devices using the specified voltage
1064 * regulator. This would normally be called before registering the
1067 int mmc_regulator_get_ocrmask(struct regulator *supply)
1073 count = regulator_count_voltages(supply);
1077 for (i = 0; i < count; i++) {
1081 vdd_uV = regulator_list_voltage(supply, i);
1085 vdd_mV = vdd_uV / 1000;
1086 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1091 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1094 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1095 * @mmc: the host to regulate
1096 * @supply: regulator to use
1097 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1099 * Returns zero on success, else negative errno.
1101 * MMC host drivers may use this to enable or disable a regulator using
1102 * a particular supply voltage. This would normally be called from the
1105 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1106 struct regulator *supply,
1107 unsigned short vdd_bit)
1117 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1118 * bits this regulator doesn't quite support ... don't
1119 * be too picky, most cards and regulators are OK with
1120 * a 0.1V range goof (it's a small error percentage).
1122 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1124 min_uV = 1650 * 1000;
1125 max_uV = 1950 * 1000;
1127 min_uV = 1900 * 1000 + tmp * 100 * 1000;
1128 max_uV = min_uV + 100 * 1000;
1132 * If we're using a fixed/static regulator, don't call
1133 * regulator_set_voltage; it would fail.
1135 voltage = regulator_get_voltage(supply);
1137 if (regulator_count_voltages(supply) == 1)
1138 min_uV = max_uV = voltage;
1142 else if (voltage < min_uV || voltage > max_uV)
1143 result = regulator_set_voltage(supply, min_uV, max_uV);
1147 if (result == 0 && !mmc->regulator_enabled) {
1148 result = regulator_enable(supply);
1150 mmc->regulator_enabled = true;
1152 } else if (mmc->regulator_enabled) {
1153 result = regulator_disable(supply);
1155 mmc->regulator_enabled = false;
1159 dev_err(mmc_dev(mmc),
1160 "could not set regulator OCR (%d)\n", result);
1163 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1165 int mmc_regulator_get_supply(struct mmc_host *mmc)
1167 struct device *dev = mmc_dev(mmc);
1168 struct regulator *supply;
1171 supply = devm_regulator_get(dev, "vmmc");
1172 mmc->supply.vmmc = supply;
1173 mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc");
1176 return PTR_ERR(supply);
1178 ret = mmc_regulator_get_ocrmask(supply);
1180 mmc->ocr_avail = ret;
1182 dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret);
1186 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1188 #endif /* CONFIG_REGULATOR */
1191 * Mask off any voltages we don't support and select
1192 * the lowest voltage
1194 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1198 ocr &= host->ocr_avail;
1206 mmc_host_clk_hold(host);
1207 host->ios.vdd = bit;
1209 mmc_host_clk_release(host);
1211 pr_warning("%s: host doesn't support card's voltages\n",
1212 mmc_hostname(host));
1219 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
1221 struct mmc_command cmd = {0};
1227 * Send CMD11 only if the request is to switch the card to
1230 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
1231 cmd.opcode = SD_SWITCH_VOLTAGE;
1233 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1235 err = mmc_wait_for_cmd(host, &cmd, 0);
1239 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1243 host->ios.signal_voltage = signal_voltage;
1245 if (host->ops->start_signal_voltage_switch) {
1246 mmc_host_clk_hold(host);
1247 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1248 mmc_host_clk_release(host);
1255 * Select timing parameters for host.
1257 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1259 mmc_host_clk_hold(host);
1260 host->ios.timing = timing;
1262 mmc_host_clk_release(host);
1266 * Select appropriate driver type for host.
1268 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1270 mmc_host_clk_hold(host);
1271 host->ios.drv_type = drv_type;
1273 mmc_host_clk_release(host);
1277 * Apply power to the MMC stack. This is a two-stage process.
1278 * First, we enable power to the card without the clock running.
1279 * We then wait a bit for the power to stabilise. Finally,
1280 * enable the bus drivers and clock to the card.
1282 * We must _NOT_ enable the clock prior to power stablising.
1284 * If a host does all the power sequencing itself, ignore the
1285 * initial MMC_POWER_UP stage.
1287 static void mmc_power_up(struct mmc_host *host)
1291 if (host->ios.power_mode == MMC_POWER_ON)
1294 mmc_host_clk_hold(host);
1296 /* If ocr is set, we use it */
1298 bit = ffs(host->ocr) - 1;
1300 bit = fls(host->ocr_avail) - 1;
1302 host->ios.vdd = bit;
1303 if (mmc_host_is_spi(host))
1304 host->ios.chip_select = MMC_CS_HIGH;
1306 host->ios.chip_select = MMC_CS_DONTCARE;
1307 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1308 host->ios.power_mode = MMC_POWER_UP;
1309 host->ios.bus_width = MMC_BUS_WIDTH_1;
1310 host->ios.timing = MMC_TIMING_LEGACY;
1313 /* Set signal voltage to 3.3V */
1314 mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
1317 * This delay should be sufficient to allow the power supply
1318 * to reach the minimum voltage.
1322 host->ios.clock = host->f_init;
1324 host->ios.power_mode = MMC_POWER_ON;
1328 * This delay must be at least 74 clock sizes, or 1 ms, or the
1329 * time required to reach a stable voltage.
1333 mmc_host_clk_release(host);
1336 void mmc_power_off(struct mmc_host *host)
1338 if (host->ios.power_mode == MMC_POWER_OFF)
1341 mmc_host_clk_hold(host);
1343 host->ios.clock = 0;
1348 * Reset ocr mask to be the highest possible voltage supported for
1349 * this mmc host. This value will be used at next power up.
1351 host->ocr = 1 << (fls(host->ocr_avail) - 1);
1353 if (!mmc_host_is_spi(host)) {
1354 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1355 host->ios.chip_select = MMC_CS_DONTCARE;
1357 host->ios.power_mode = MMC_POWER_OFF;
1358 host->ios.bus_width = MMC_BUS_WIDTH_1;
1359 host->ios.timing = MMC_TIMING_LEGACY;
1363 * Some configurations, such as the 802.11 SDIO card in the OLPC
1364 * XO-1.5, require a short delay after poweroff before the card
1365 * can be successfully turned on again.
1369 mmc_host_clk_release(host);
1373 * Cleanup when the last reference to the bus operator is dropped.
1375 static void __mmc_release_bus(struct mmc_host *host)
1378 BUG_ON(host->bus_refs);
1379 BUG_ON(!host->bus_dead);
1381 host->bus_ops = NULL;
1385 * Increase reference count of bus operator
1387 static inline void mmc_bus_get(struct mmc_host *host)
1389 unsigned long flags;
1391 spin_lock_irqsave(&host->lock, flags);
1393 spin_unlock_irqrestore(&host->lock, flags);
1397 * Decrease reference count of bus operator and free it if
1398 * it is the last reference.
1400 static inline void mmc_bus_put(struct mmc_host *host)
1402 unsigned long flags;
1404 spin_lock_irqsave(&host->lock, flags);
1406 if ((host->bus_refs == 0) && host->bus_ops)
1407 __mmc_release_bus(host);
1408 spin_unlock_irqrestore(&host->lock, flags);
1412 * Assign a mmc bus handler to a host. Only one bus handler may control a
1413 * host at any given time.
1415 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1417 unsigned long flags;
1422 WARN_ON(!host->claimed);
1424 spin_lock_irqsave(&host->lock, flags);
1426 BUG_ON(host->bus_ops);
1427 BUG_ON(host->bus_refs);
1429 host->bus_ops = ops;
1433 spin_unlock_irqrestore(&host->lock, flags);
1437 * Remove the current bus handler from a host.
1439 void mmc_detach_bus(struct mmc_host *host)
1441 unsigned long flags;
1445 WARN_ON(!host->claimed);
1446 WARN_ON(!host->bus_ops);
1448 spin_lock_irqsave(&host->lock, flags);
1452 spin_unlock_irqrestore(&host->lock, flags);
1458 * mmc_detect_change - process change of state on a MMC socket
1459 * @host: host which changed state.
1460 * @delay: optional delay to wait before detection (jiffies)
1462 * MMC drivers should call this when they detect a card has been
1463 * inserted or removed. The MMC layer will confirm that any
1464 * present card is still functional, and initialize any newly
1467 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1469 #ifdef CONFIG_MMC_DEBUG
1470 unsigned long flags;
1471 spin_lock_irqsave(&host->lock, flags);
1472 WARN_ON(host->removed);
1473 spin_unlock_irqrestore(&host->lock, flags);
1475 host->detect_change = 1;
1476 mmc_schedule_delayed_work(&host->detect, delay);
1479 EXPORT_SYMBOL(mmc_detect_change);
1481 void mmc_init_erase(struct mmc_card *card)
1485 if (is_power_of_2(card->erase_size))
1486 card->erase_shift = ffs(card->erase_size) - 1;
1488 card->erase_shift = 0;
1491 * It is possible to erase an arbitrarily large area of an SD or MMC
1492 * card. That is not desirable because it can take a long time
1493 * (minutes) potentially delaying more important I/O, and also the
1494 * timeout calculations become increasingly hugely over-estimated.
1495 * Consequently, 'pref_erase' is defined as a guide to limit erases
1496 * to that size and alignment.
1498 * For SD cards that define Allocation Unit size, limit erases to one
1499 * Allocation Unit at a time. For MMC cards that define High Capacity
1500 * Erase Size, whether it is switched on or not, limit to that size.
1501 * Otherwise just have a stab at a good value. For modern cards it
1502 * will end up being 4MiB. Note that if the value is too small, it
1503 * can end up taking longer to erase.
1505 if (mmc_card_sd(card) && card->ssr.au) {
1506 card->pref_erase = card->ssr.au;
1507 card->erase_shift = ffs(card->ssr.au) - 1;
1508 } else if (card->ext_csd.hc_erase_size) {
1509 card->pref_erase = card->ext_csd.hc_erase_size;
1511 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1513 card->pref_erase = 512 * 1024 / 512;
1515 card->pref_erase = 1024 * 1024 / 512;
1517 card->pref_erase = 2 * 1024 * 1024 / 512;
1519 card->pref_erase = 4 * 1024 * 1024 / 512;
1520 if (card->pref_erase < card->erase_size)
1521 card->pref_erase = card->erase_size;
1523 sz = card->pref_erase % card->erase_size;
1525 card->pref_erase += card->erase_size - sz;
1530 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1531 unsigned int arg, unsigned int qty)
1533 unsigned int erase_timeout;
1535 if (arg == MMC_DISCARD_ARG ||
1536 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1537 erase_timeout = card->ext_csd.trim_timeout;
1538 } else if (card->ext_csd.erase_group_def & 1) {
1539 /* High Capacity Erase Group Size uses HC timeouts */
1540 if (arg == MMC_TRIM_ARG)
1541 erase_timeout = card->ext_csd.trim_timeout;
1543 erase_timeout = card->ext_csd.hc_erase_timeout;
1545 /* CSD Erase Group Size uses write timeout */
1546 unsigned int mult = (10 << card->csd.r2w_factor);
1547 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1548 unsigned int timeout_us;
1550 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1551 if (card->csd.tacc_ns < 1000000)
1552 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1554 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1557 * ios.clock is only a target. The real clock rate might be
1558 * less but not that much less, so fudge it by multiplying by 2.
1561 timeout_us += (timeout_clks * 1000) /
1562 (mmc_host_clk_rate(card->host) / 1000);
1564 erase_timeout = timeout_us / 1000;
1567 * Theoretically, the calculation could underflow so round up
1568 * to 1ms in that case.
1574 /* Multiplier for secure operations */
1575 if (arg & MMC_SECURE_ARGS) {
1576 if (arg == MMC_SECURE_ERASE_ARG)
1577 erase_timeout *= card->ext_csd.sec_erase_mult;
1579 erase_timeout *= card->ext_csd.sec_trim_mult;
1582 erase_timeout *= qty;
1585 * Ensure at least a 1 second timeout for SPI as per
1586 * 'mmc_set_data_timeout()'
1588 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1589 erase_timeout = 1000;
1591 return erase_timeout;
1594 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1598 unsigned int erase_timeout;
1600 if (card->ssr.erase_timeout) {
1601 /* Erase timeout specified in SD Status Register (SSR) */
1602 erase_timeout = card->ssr.erase_timeout * qty +
1603 card->ssr.erase_offset;
1606 * Erase timeout not specified in SD Status Register (SSR) so
1607 * use 250ms per write block.
1609 erase_timeout = 250 * qty;
1612 /* Must not be less than 1 second */
1613 if (erase_timeout < 1000)
1614 erase_timeout = 1000;
1616 return erase_timeout;
1619 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1623 if (mmc_card_sd(card))
1624 return mmc_sd_erase_timeout(card, arg, qty);
1626 return mmc_mmc_erase_timeout(card, arg, qty);
1629 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1630 unsigned int to, unsigned int arg)
1632 struct mmc_command cmd = {0};
1633 unsigned int qty = 0;
1637 * qty is used to calculate the erase timeout which depends on how many
1638 * erase groups (or allocation units in SD terminology) are affected.
1639 * We count erasing part of an erase group as one erase group.
1640 * For SD, the allocation units are always a power of 2. For MMC, the
1641 * erase group size is almost certainly also power of 2, but it does not
1642 * seem to insist on that in the JEDEC standard, so we fall back to
1643 * division in that case. SD may not specify an allocation unit size,
1644 * in which case the timeout is based on the number of write blocks.
1646 * Note that the timeout for secure trim 2 will only be correct if the
1647 * number of erase groups specified is the same as the total of all
1648 * preceding secure trim 1 commands. Since the power may have been
1649 * lost since the secure trim 1 commands occurred, it is generally
1650 * impossible to calculate the secure trim 2 timeout correctly.
1652 if (card->erase_shift)
1653 qty += ((to >> card->erase_shift) -
1654 (from >> card->erase_shift)) + 1;
1655 else if (mmc_card_sd(card))
1656 qty += to - from + 1;
1658 qty += ((to / card->erase_size) -
1659 (from / card->erase_size)) + 1;
1661 if (!mmc_card_blockaddr(card)) {
1666 if (mmc_card_sd(card))
1667 cmd.opcode = SD_ERASE_WR_BLK_START;
1669 cmd.opcode = MMC_ERASE_GROUP_START;
1671 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1672 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1674 pr_err("mmc_erase: group start error %d, "
1675 "status %#x\n", err, cmd.resp[0]);
1680 memset(&cmd, 0, sizeof(struct mmc_command));
1681 if (mmc_card_sd(card))
1682 cmd.opcode = SD_ERASE_WR_BLK_END;
1684 cmd.opcode = MMC_ERASE_GROUP_END;
1686 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1687 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1689 pr_err("mmc_erase: group end error %d, status %#x\n",
1695 memset(&cmd, 0, sizeof(struct mmc_command));
1696 cmd.opcode = MMC_ERASE;
1698 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1699 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1700 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1702 pr_err("mmc_erase: erase error %d, status %#x\n",
1708 if (mmc_host_is_spi(card->host))
1712 memset(&cmd, 0, sizeof(struct mmc_command));
1713 cmd.opcode = MMC_SEND_STATUS;
1714 cmd.arg = card->rca << 16;
1715 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1716 /* Do not retry else we can't see errors */
1717 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1718 if (err || (cmd.resp[0] & 0xFDF92000)) {
1719 pr_err("error %d requesting status %#x\n",
1724 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1725 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1731 * mmc_erase - erase sectors.
1732 * @card: card to erase
1733 * @from: first sector to erase
1734 * @nr: number of sectors to erase
1735 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1737 * Caller must claim host before calling this function.
1739 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1742 unsigned int rem, to = from + nr;
1744 if (!(card->host->caps & MMC_CAP_ERASE) ||
1745 !(card->csd.cmdclass & CCC_ERASE))
1748 if (!card->erase_size)
1751 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1754 if ((arg & MMC_SECURE_ARGS) &&
1755 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1758 if ((arg & MMC_TRIM_ARGS) &&
1759 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1762 if (arg == MMC_SECURE_ERASE_ARG) {
1763 if (from % card->erase_size || nr % card->erase_size)
1767 if (arg == MMC_ERASE_ARG) {
1768 rem = from % card->erase_size;
1770 rem = card->erase_size - rem;
1777 rem = nr % card->erase_size;
1790 /* 'from' and 'to' are inclusive */
1793 return mmc_do_erase(card, from, to, arg);
1795 EXPORT_SYMBOL(mmc_erase);
1797 int mmc_can_erase(struct mmc_card *card)
1799 if ((card->host->caps & MMC_CAP_ERASE) &&
1800 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1804 EXPORT_SYMBOL(mmc_can_erase);
1806 int mmc_can_trim(struct mmc_card *card)
1808 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1812 EXPORT_SYMBOL(mmc_can_trim);
1814 int mmc_can_discard(struct mmc_card *card)
1817 * As there's no way to detect the discard support bit at v4.5
1818 * use the s/w feature support filed.
1820 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1824 EXPORT_SYMBOL(mmc_can_discard);
1826 int mmc_can_sanitize(struct mmc_card *card)
1828 if (!mmc_can_trim(card) && !mmc_can_erase(card))
1830 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1834 EXPORT_SYMBOL(mmc_can_sanitize);
1836 int mmc_can_secure_erase_trim(struct mmc_card *card)
1838 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1842 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1844 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1847 if (!card->erase_size)
1849 if (from % card->erase_size || nr % card->erase_size)
1853 EXPORT_SYMBOL(mmc_erase_group_aligned);
1855 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1858 struct mmc_host *host = card->host;
1859 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1860 unsigned int last_timeout = 0;
1862 if (card->erase_shift)
1863 max_qty = UINT_MAX >> card->erase_shift;
1864 else if (mmc_card_sd(card))
1867 max_qty = UINT_MAX / card->erase_size;
1869 /* Find the largest qty with an OK timeout */
1872 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1873 timeout = mmc_erase_timeout(card, arg, qty + x);
1874 if (timeout > host->max_discard_to)
1876 if (timeout < last_timeout)
1878 last_timeout = timeout;
1890 /* Convert qty to sectors */
1891 if (card->erase_shift)
1892 max_discard = --qty << card->erase_shift;
1893 else if (mmc_card_sd(card))
1896 max_discard = --qty * card->erase_size;
1901 unsigned int mmc_calc_max_discard(struct mmc_card *card)
1903 struct mmc_host *host = card->host;
1904 unsigned int max_discard, max_trim;
1906 if (!host->max_discard_to)
1910 * Without erase_group_def set, MMC erase timeout depends on clock
1911 * frequence which can change. In that case, the best choice is
1912 * just the preferred erase size.
1914 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1915 return card->pref_erase;
1917 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1918 if (mmc_can_trim(card)) {
1919 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1920 if (max_trim < max_discard)
1921 max_discard = max_trim;
1922 } else if (max_discard < card->erase_size) {
1925 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1926 mmc_hostname(host), max_discard, host->max_discard_to);
1929 EXPORT_SYMBOL(mmc_calc_max_discard);
1931 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1933 struct mmc_command cmd = {0};
1935 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1938 cmd.opcode = MMC_SET_BLOCKLEN;
1940 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1941 return mmc_wait_for_cmd(card->host, &cmd, 5);
1943 EXPORT_SYMBOL(mmc_set_blocklen);
1945 static void mmc_hw_reset_for_init(struct mmc_host *host)
1947 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1949 mmc_host_clk_hold(host);
1950 host->ops->hw_reset(host);
1951 mmc_host_clk_release(host);
1954 int mmc_can_reset(struct mmc_card *card)
1958 if (!mmc_card_mmc(card))
1960 rst_n_function = card->ext_csd.rst_n_function;
1961 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
1965 EXPORT_SYMBOL(mmc_can_reset);
1967 static int mmc_do_hw_reset(struct mmc_host *host, int check)
1969 struct mmc_card *card = host->card;
1971 if (!host->bus_ops->power_restore)
1974 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1980 if (!mmc_can_reset(card))
1983 mmc_host_clk_hold(host);
1984 mmc_set_clock(host, host->f_init);
1986 host->ops->hw_reset(host);
1988 /* If the reset has happened, then a status command will fail */
1990 struct mmc_command cmd = {0};
1993 cmd.opcode = MMC_SEND_STATUS;
1994 if (!mmc_host_is_spi(card->host))
1995 cmd.arg = card->rca << 16;
1996 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1997 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1999 mmc_host_clk_release(host);
2004 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
2005 if (mmc_host_is_spi(host)) {
2006 host->ios.chip_select = MMC_CS_HIGH;
2007 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
2009 host->ios.chip_select = MMC_CS_DONTCARE;
2010 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
2012 host->ios.bus_width = MMC_BUS_WIDTH_1;
2013 host->ios.timing = MMC_TIMING_LEGACY;
2016 mmc_host_clk_release(host);
2018 return host->bus_ops->power_restore(host);
2021 int mmc_hw_reset(struct mmc_host *host)
2023 return mmc_do_hw_reset(host, 0);
2025 EXPORT_SYMBOL(mmc_hw_reset);
2027 int mmc_hw_reset_check(struct mmc_host *host)
2029 return mmc_do_hw_reset(host, 1);
2031 EXPORT_SYMBOL(mmc_hw_reset_check);
2033 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2035 host->f_init = freq;
2037 #ifdef CONFIG_MMC_DEBUG
2038 pr_info("%s: %s: trying to init card at %u Hz\n",
2039 mmc_hostname(host), __func__, host->f_init);
2044 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2045 * do a hardware reset if possible.
2047 mmc_hw_reset_for_init(host);
2050 * sdio_reset sends CMD52 to reset card. Since we do not know
2051 * if the card is being re-initialized, just send it. CMD52
2052 * should be ignored by SD/eMMC cards.
2057 mmc_send_if_cond(host, host->ocr_avail);
2059 /* Order's important: probe SDIO, then SD, then MMC */
2060 if (!mmc_attach_sdio(host))
2062 if (!mmc_attach_sd(host))
2064 if (!mmc_attach_mmc(host))
2067 mmc_power_off(host);
2071 int _mmc_detect_card_removed(struct mmc_host *host)
2075 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
2078 if (!host->card || mmc_card_removed(host->card))
2081 ret = host->bus_ops->alive(host);
2083 mmc_card_set_removed(host->card);
2084 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2090 int mmc_detect_card_removed(struct mmc_host *host)
2092 struct mmc_card *card = host->card;
2095 WARN_ON(!host->claimed);
2100 ret = mmc_card_removed(card);
2102 * The card will be considered unchanged unless we have been asked to
2103 * detect a change or host requires polling to provide card detection.
2105 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) &&
2106 !(host->caps2 & MMC_CAP2_DETECT_ON_ERR))
2109 host->detect_change = 0;
2111 ret = _mmc_detect_card_removed(host);
2112 if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) {
2114 * Schedule a detect work as soon as possible to let a
2115 * rescan handle the card removal.
2117 cancel_delayed_work(&host->detect);
2118 mmc_detect_change(host, 0);
2124 EXPORT_SYMBOL(mmc_detect_card_removed);
2126 void mmc_rescan(struct work_struct *work)
2128 struct mmc_host *host =
2129 container_of(work, struct mmc_host, detect.work);
2132 if (host->rescan_disable)
2135 /* If there is a non-removable card registered, only scan once */
2136 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
2138 host->rescan_entered = 1;
2143 * if there is a _removable_ card registered, check whether it is
2146 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
2147 && !(host->caps & MMC_CAP_NONREMOVABLE))
2148 host->bus_ops->detect(host);
2150 host->detect_change = 0;
2153 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2154 * the card is no longer present.
2159 /* if there still is a card present, stop here */
2160 if (host->bus_ops != NULL) {
2166 * Only we can add a new handler, so it's safe to
2167 * release the lock here.
2171 if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
2172 mmc_claim_host(host);
2173 mmc_power_off(host);
2174 mmc_release_host(host);
2178 mmc_claim_host(host);
2179 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2180 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2182 if (freqs[i] <= host->f_min)
2185 mmc_release_host(host);
2188 if (host->caps & MMC_CAP_NEEDS_POLL)
2189 mmc_schedule_delayed_work(&host->detect, HZ);
2192 void mmc_start_host(struct mmc_host *host)
2194 host->f_init = max(freqs[0], host->f_min);
2195 host->rescan_disable = 0;
2197 mmc_detect_change(host, 0);
2200 void mmc_stop_host(struct mmc_host *host)
2202 #ifdef CONFIG_MMC_DEBUG
2203 unsigned long flags;
2204 spin_lock_irqsave(&host->lock, flags);
2206 spin_unlock_irqrestore(&host->lock, flags);
2209 host->rescan_disable = 1;
2210 cancel_delayed_work_sync(&host->detect);
2211 mmc_flush_scheduled_work();
2213 /* clear pm flags now and let card drivers set them as needed */
2217 if (host->bus_ops && !host->bus_dead) {
2218 /* Calling bus_ops->remove() with a claimed host can deadlock */
2219 if (host->bus_ops->remove)
2220 host->bus_ops->remove(host);
2222 mmc_claim_host(host);
2223 mmc_detach_bus(host);
2224 mmc_power_off(host);
2225 mmc_release_host(host);
2233 mmc_power_off(host);
2236 int mmc_power_save_host(struct mmc_host *host)
2240 #ifdef CONFIG_MMC_DEBUG
2241 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2246 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2251 if (host->bus_ops->power_save)
2252 ret = host->bus_ops->power_save(host);
2256 mmc_power_off(host);
2260 EXPORT_SYMBOL(mmc_power_save_host);
2262 int mmc_power_restore_host(struct mmc_host *host)
2266 #ifdef CONFIG_MMC_DEBUG
2267 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2272 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2278 ret = host->bus_ops->power_restore(host);
2284 EXPORT_SYMBOL(mmc_power_restore_host);
2286 int mmc_card_awake(struct mmc_host *host)
2290 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2295 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
2296 err = host->bus_ops->awake(host);
2302 EXPORT_SYMBOL(mmc_card_awake);
2304 int mmc_card_sleep(struct mmc_host *host)
2308 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2313 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
2314 err = host->bus_ops->sleep(host);
2320 EXPORT_SYMBOL(mmc_card_sleep);
2322 int mmc_card_can_sleep(struct mmc_host *host)
2324 struct mmc_card *card = host->card;
2326 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
2330 EXPORT_SYMBOL(mmc_card_can_sleep);
2333 * Flush the cache to the non-volatile storage.
2335 int mmc_flush_cache(struct mmc_card *card)
2337 struct mmc_host *host = card->host;
2340 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
2343 if (mmc_card_mmc(card) &&
2344 (card->ext_csd.cache_size > 0) &&
2345 (card->ext_csd.cache_ctrl & 1)) {
2346 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2347 EXT_CSD_FLUSH_CACHE, 1, 0);
2349 pr_err("%s: cache flush error %d\n",
2350 mmc_hostname(card->host), err);
2355 EXPORT_SYMBOL(mmc_flush_cache);
2358 * Turn the cache ON/OFF.
2359 * Turning the cache OFF shall trigger flushing of the data
2360 * to the non-volatile storage.
2362 int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2364 struct mmc_card *card = host->card;
2365 unsigned int timeout;
2368 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
2369 mmc_card_is_removable(host))
2372 mmc_claim_host(host);
2373 if (card && mmc_card_mmc(card) &&
2374 (card->ext_csd.cache_size > 0)) {
2377 if (card->ext_csd.cache_ctrl ^ enable) {
2378 timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
2379 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2380 EXT_CSD_CACHE_CTRL, enable, timeout);
2382 pr_err("%s: cache %s error %d\n",
2383 mmc_hostname(card->host),
2384 enable ? "on" : "off",
2387 card->ext_csd.cache_ctrl = enable;
2390 mmc_release_host(host);
2394 EXPORT_SYMBOL(mmc_cache_ctrl);
2399 * mmc_suspend_host - suspend a host
2402 int mmc_suspend_host(struct mmc_host *host)
2406 cancel_delayed_work(&host->detect);
2407 mmc_flush_scheduled_work();
2409 err = mmc_cache_ctrl(host, 0);
2414 if (host->bus_ops && !host->bus_dead) {
2415 if (host->bus_ops->suspend) {
2416 if (mmc_card_doing_bkops(host->card)) {
2417 err = mmc_stop_bkops(host->card);
2421 err = host->bus_ops->suspend(host);
2424 if (err == -ENOSYS || !host->bus_ops->resume) {
2426 * We simply "remove" the card in this case.
2427 * It will be redetected on resume. (Calling
2428 * bus_ops->remove() with a claimed host can
2431 if (host->bus_ops->remove)
2432 host->bus_ops->remove(host);
2433 mmc_claim_host(host);
2434 mmc_detach_bus(host);
2435 mmc_power_off(host);
2436 mmc_release_host(host);
2443 if (!err && !mmc_card_keep_power(host))
2444 mmc_power_off(host);
2450 EXPORT_SYMBOL(mmc_suspend_host);
2453 * mmc_resume_host - resume a previously suspended host
2456 int mmc_resume_host(struct mmc_host *host)
2461 if (host->bus_ops && !host->bus_dead) {
2462 if (!mmc_card_keep_power(host)) {
2464 mmc_select_voltage(host, host->ocr);
2466 * Tell runtime PM core we just powered up the card,
2467 * since it still believes the card is powered off.
2468 * Note that currently runtime PM is only enabled
2469 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
2471 if (mmc_card_sdio(host->card) &&
2472 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
2473 pm_runtime_disable(&host->card->dev);
2474 pm_runtime_set_active(&host->card->dev);
2475 pm_runtime_enable(&host->card->dev);
2478 BUG_ON(!host->bus_ops->resume);
2479 err = host->bus_ops->resume(host);
2481 pr_warning("%s: error %d during resume "
2482 "(card was removed?)\n",
2483 mmc_hostname(host), err);
2487 host->pm_flags &= ~MMC_PM_KEEP_POWER;
2492 EXPORT_SYMBOL(mmc_resume_host);
2494 /* Do the card removal on suspend if card is assumed removeable
2495 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2498 int mmc_pm_notify(struct notifier_block *notify_block,
2499 unsigned long mode, void *unused)
2501 struct mmc_host *host = container_of(
2502 notify_block, struct mmc_host, pm_notify);
2503 unsigned long flags;
2507 case PM_HIBERNATION_PREPARE:
2508 case PM_SUSPEND_PREPARE:
2509 if (host->card && mmc_card_mmc(host->card) &&
2510 mmc_card_doing_bkops(host->card)) {
2511 err = mmc_stop_bkops(host->card);
2513 pr_err("%s: didn't stop bkops\n",
2514 mmc_hostname(host));
2517 mmc_card_clr_doing_bkops(host->card);
2520 spin_lock_irqsave(&host->lock, flags);
2521 host->rescan_disable = 1;
2522 spin_unlock_irqrestore(&host->lock, flags);
2523 cancel_delayed_work_sync(&host->detect);
2525 if (!host->bus_ops || host->bus_ops->suspend)
2528 /* Calling bus_ops->remove() with a claimed host can deadlock */
2529 if (host->bus_ops->remove)
2530 host->bus_ops->remove(host);
2532 mmc_claim_host(host);
2533 mmc_detach_bus(host);
2534 mmc_power_off(host);
2535 mmc_release_host(host);
2539 case PM_POST_SUSPEND:
2540 case PM_POST_HIBERNATION:
2541 case PM_POST_RESTORE:
2543 spin_lock_irqsave(&host->lock, flags);
2544 host->rescan_disable = 0;
2545 spin_unlock_irqrestore(&host->lock, flags);
2546 mmc_detect_change(host, 0);
2554 static int __init mmc_init(void)
2558 workqueue = alloc_ordered_workqueue("kmmcd", 0);
2562 ret = mmc_register_bus();
2564 goto destroy_workqueue;
2566 ret = mmc_register_host_class();
2568 goto unregister_bus;
2570 ret = sdio_register_bus();
2572 goto unregister_host_class;
2576 unregister_host_class:
2577 mmc_unregister_host_class();
2579 mmc_unregister_bus();
2581 destroy_workqueue(workqueue);
2586 static void __exit mmc_exit(void)
2588 sdio_unregister_bus();
2589 mmc_unregister_host_class();
2590 mmc_unregister_bus();
2591 destroy_workqueue(workqueue);
2594 subsys_initcall(mmc_init);
2595 module_exit(mmc_exit);
2597 MODULE_LICENSE("GPL");