2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/suspend.h>
27 #include <linux/fault-inject.h>
28 #include <linux/random.h>
29 #include <linux/slab.h>
30 #include <linux/wakelock.h>
32 #include <trace/events/mmc.h>
34 #include <linux/mmc/card.h>
35 #include <linux/mmc/host.h>
36 #include <linux/mmc/mmc.h>
37 #include <linux/mmc/sd.h>
48 /* If the device is not responding */
49 #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
52 * Background operations can take a long time, depending on the housekeeping
53 * operations the card has to perform.
55 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
57 static struct workqueue_struct *workqueue;
58 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
61 * Enabling software CRCs on the data blocks can be a significant (30%)
62 * performance cost, and for other reasons may not always be desired.
63 * So we allow it it to be disabled.
66 module_param(use_spi_crc, bool, 0);
69 * We normally treat cards as removed during suspend if they are not
70 * known to be on a non-removable bus, to avoid the risk of writing
71 * back data to a different card after resume. Allow this to be
72 * overridden if necessary.
74 #ifdef CONFIG_MMC_UNSAFE_RESUME
75 bool mmc_assume_removable;
77 bool mmc_assume_removable = 1;
79 EXPORT_SYMBOL(mmc_assume_removable);
80 module_param_named(removable, mmc_assume_removable, bool, 0644);
83 "MMC/SD cards are removable and may be removed during suspend");
86 * Internal function. Schedule delayed work in the MMC work queue.
88 static int mmc_schedule_delayed_work(struct delayed_work *work,
91 return queue_delayed_work(workqueue, work, delay);
95 * Internal function. Flush all scheduled work from the MMC work queue.
97 static void mmc_flush_scheduled_work(void)
99 flush_workqueue(workqueue);
102 #ifdef CONFIG_FAIL_MMC_REQUEST
105 * Internal function. Inject random data errors.
106 * If mmc_data is NULL no errors are injected.
108 static void mmc_should_fail_request(struct mmc_host *host,
109 struct mmc_request *mrq)
111 struct mmc_command *cmd = mrq->cmd;
112 struct mmc_data *data = mrq->data;
113 static const int data_errors[] = {
122 if (cmd->error || data->error ||
123 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
126 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
127 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
130 #else /* CONFIG_FAIL_MMC_REQUEST */
132 static inline void mmc_should_fail_request(struct mmc_host *host,
133 struct mmc_request *mrq)
137 #endif /* CONFIG_FAIL_MMC_REQUEST */
140 * mmc_request_done - finish processing an MMC request
141 * @host: MMC host which completed request
142 * @mrq: MMC request which request
144 * MMC drivers should call this function when they have completed
145 * their processing of a request.
147 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
149 struct mmc_command *cmd = mrq->cmd;
150 int err = cmd->error;
152 if (err && cmd->retries && mmc_host_is_spi(host)) {
153 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
157 if (err && cmd->retries && !mmc_card_removed(host->card)) {
159 * Request starter must handle retries - see
160 * mmc_wait_for_req_done().
165 mmc_should_fail_request(host, mrq);
167 led_trigger_event(host->led, LED_OFF);
169 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
170 mmc_hostname(host), cmd->opcode, err,
171 cmd->resp[0], cmd->resp[1],
172 cmd->resp[2], cmd->resp[3]);
175 pr_debug("%s: %d bytes transferred: %d\n",
177 mrq->data->bytes_xfered, mrq->data->error);
178 trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
182 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
183 mmc_hostname(host), mrq->stop->opcode,
185 mrq->stop->resp[0], mrq->stop->resp[1],
186 mrq->stop->resp[2], mrq->stop->resp[3]);
192 mmc_host_clk_release(host);
196 EXPORT_SYMBOL(mmc_request_done);
199 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
201 #ifdef CONFIG_MMC_DEBUG
203 struct scatterlist *sg;
207 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
208 mmc_hostname(host), mrq->sbc->opcode,
209 mrq->sbc->arg, mrq->sbc->flags);
212 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
213 mmc_hostname(host), mrq->cmd->opcode,
214 mrq->cmd->arg, mrq->cmd->flags);
217 pr_debug("%s: blksz %d blocks %d flags %08x "
218 "tsac %d ms nsac %d\n",
219 mmc_hostname(host), mrq->data->blksz,
220 mrq->data->blocks, mrq->data->flags,
221 mrq->data->timeout_ns / 1000000,
222 mrq->data->timeout_clks);
226 pr_debug("%s: CMD%u arg %08x flags %08x\n",
227 mmc_hostname(host), mrq->stop->opcode,
228 mrq->stop->arg, mrq->stop->flags);
231 WARN_ON(!host->claimed);
236 BUG_ON(mrq->data->blksz > host->max_blk_size);
237 BUG_ON(mrq->data->blocks > host->max_blk_count);
238 BUG_ON(mrq->data->blocks * mrq->data->blksz >
241 #ifdef CONFIG_MMC_DEBUG
243 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
245 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
248 mrq->cmd->data = mrq->data;
249 mrq->data->error = 0;
250 mrq->data->mrq = mrq;
252 mrq->data->stop = mrq->stop;
253 mrq->stop->error = 0;
254 mrq->stop->mrq = mrq;
257 mmc_host_clk_hold(host);
258 led_trigger_event(host->led, LED_FULL);
259 host->ops->request(host, mrq);
263 * mmc_start_bkops - start BKOPS for supported cards
264 * @card: MMC card to start BKOPS
265 * @form_exception: A flag to indicate if this function was
266 * called due to an exception raised by the card
268 * Start background operations whenever requested.
269 * When the urgent BKOPS bit is set in a R1 command response
270 * then background operations should be started immediately.
272 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
276 bool use_busy_signal;
280 if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card))
283 err = mmc_read_bkops_status(card);
285 pr_err("%s: Failed to read bkops status: %d\n",
286 mmc_hostname(card->host), err);
290 if (!card->ext_csd.raw_bkops_status)
293 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
297 mmc_claim_host(card->host);
298 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
299 timeout = MMC_BKOPS_MAX_TIMEOUT;
300 use_busy_signal = true;
303 use_busy_signal = false;
306 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
307 EXT_CSD_BKOPS_START, 1, timeout, use_busy_signal);
309 pr_warn("%s: Error %d starting bkops\n",
310 mmc_hostname(card->host), err);
315 * For urgent bkops status (LEVEL_2 and more)
316 * bkops executed synchronously, otherwise
317 * the operation is in progress
319 if (!use_busy_signal)
320 mmc_card_set_doing_bkops(card);
322 mmc_release_host(card->host);
324 EXPORT_SYMBOL(mmc_start_bkops);
327 * mmc_wait_data_done() - done callback for data request
328 * @mrq: done data request
330 * Wakes up mmc context, passed as a callback to host controller driver
332 static void mmc_wait_data_done(struct mmc_request *mrq)
334 mrq->host->context_info.is_done_rcv = true;
335 wake_up_interruptible(&mrq->host->context_info.wait);
338 static void mmc_wait_done(struct mmc_request *mrq)
340 complete(&mrq->completion);
344 *__mmc_start_data_req() - starts data request
345 * @host: MMC host to start the request
346 * @mrq: data request to start
348 * Sets the done callback to be called when request is completed by the card.
349 * Starts data mmc request execution
351 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
353 mrq->done = mmc_wait_data_done;
355 if (mmc_card_removed(host->card)) {
356 mrq->cmd->error = -ENOMEDIUM;
357 mmc_wait_data_done(mrq);
360 mmc_start_request(host, mrq);
365 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
367 init_completion(&mrq->completion);
368 mrq->done = mmc_wait_done;
369 if (mmc_card_removed(host->card)) {
370 mrq->cmd->error = -ENOMEDIUM;
371 complete(&mrq->completion);
374 mmc_start_request(host, mrq);
379 * mmc_wait_for_data_req_done() - wait for request completed
380 * @host: MMC host to prepare the command.
381 * @mrq: MMC request to wait for
383 * Blocks MMC context till host controller will ack end of data request
384 * execution or new request notification arrives from the block layer.
385 * Handles command retries.
387 * Returns enum mmc_blk_status after checking errors.
389 static int mmc_wait_for_data_req_done(struct mmc_host *host,
390 struct mmc_request *mrq,
391 struct mmc_async_req *next_req)
393 struct mmc_command *cmd;
394 struct mmc_context_info *context_info = &host->context_info;
399 wait_event_interruptible(context_info->wait,
400 (context_info->is_done_rcv ||
401 context_info->is_new_req));
402 spin_lock_irqsave(&context_info->lock, flags);
403 context_info->is_waiting_last_req = false;
404 spin_unlock_irqrestore(&context_info->lock, flags);
405 if (context_info->is_done_rcv) {
406 context_info->is_done_rcv = false;
407 context_info->is_new_req = false;
409 if (!cmd->error || !cmd->retries ||
410 mmc_card_removed(host->card)) {
411 err = host->areq->err_check(host->card,
413 break; /* return err */
415 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
417 cmd->opcode, cmd->error);
420 host->ops->request(host, mrq);
421 continue; /* wait for done/new event again */
423 } else if (context_info->is_new_req) {
424 context_info->is_new_req = false;
426 err = MMC_BLK_NEW_REQUEST;
427 break; /* return err */
434 static void mmc_wait_for_req_done(struct mmc_host *host,
435 struct mmc_request *mrq)
437 struct mmc_command *cmd;
440 wait_for_completion(&mrq->completion);
443 if (!cmd->error || !cmd->retries ||
444 mmc_card_removed(host->card))
447 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
448 mmc_hostname(host), cmd->opcode, cmd->error);
451 host->ops->request(host, mrq);
456 * mmc_pre_req - Prepare for a new request
457 * @host: MMC host to prepare command
458 * @mrq: MMC request to prepare for
459 * @is_first_req: true if there is no previous started request
460 * that may run in parellel to this call, otherwise false
462 * mmc_pre_req() is called in prior to mmc_start_req() to let
463 * host prepare for the new request. Preparation of a request may be
464 * performed while another request is running on the host.
466 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
469 if (host->ops->pre_req) {
470 mmc_host_clk_hold(host);
471 host->ops->pre_req(host, mrq, is_first_req);
472 mmc_host_clk_release(host);
477 * mmc_post_req - Post process a completed request
478 * @host: MMC host to post process command
479 * @mrq: MMC request to post process for
480 * @err: Error, if non zero, clean up any resources made in pre_req
482 * Let the host post process a completed request. Post processing of
483 * a request may be performed while another reuqest is running.
485 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
488 if (host->ops->post_req) {
489 mmc_host_clk_hold(host);
490 host->ops->post_req(host, mrq, err);
491 mmc_host_clk_release(host);
496 * mmc_start_req - start a non-blocking request
497 * @host: MMC host to start command
498 * @areq: async request to start
499 * @error: out parameter returns 0 for success, otherwise non zero
501 * Start a new MMC custom command request for a host.
502 * If there is on ongoing async request wait for completion
503 * of that request and start the new one and return.
504 * Does not wait for the new request to complete.
506 * Returns the completed request, NULL in case of none completed.
507 * Wait for the an ongoing request (previoulsy started) to complete and
508 * return the completed request. If there is no ongoing request, NULL
509 * is returned without waiting. NULL is not an error condition.
511 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
512 struct mmc_async_req *areq, int *error)
516 struct mmc_async_req *data = host->areq;
518 /* Prepare a new request */
520 mmc_pre_req(host, areq->mrq, !host->areq);
523 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
524 if (err == MMC_BLK_NEW_REQUEST) {
528 * The previous request was not completed,
534 * Check BKOPS urgency for each R1 response
536 if (host->card && mmc_card_mmc(host->card) &&
537 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
538 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
539 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT))
540 mmc_start_bkops(host->card, true);
544 trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
547 start_err = __mmc_start_data_req(host, areq->mrq);
551 mmc_post_req(host, host->areq->mrq, 0);
553 /* Cancel a prepared request if it was not started. */
554 if ((err || start_err) && areq)
555 mmc_post_req(host, areq->mrq, -EINVAL);
566 EXPORT_SYMBOL(mmc_start_req);
569 * mmc_wait_for_req - start a request and wait for completion
570 * @host: MMC host to start command
571 * @mrq: MMC request to start
573 * Start a new MMC custom command request for a host, and wait
574 * for the command to complete. Does not attempt to parse the
577 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
579 __mmc_start_req(host, mrq);
580 mmc_wait_for_req_done(host, mrq);
582 EXPORT_SYMBOL(mmc_wait_for_req);
585 * mmc_interrupt_hpi - Issue for High priority Interrupt
586 * @card: the MMC card associated with the HPI transfer
588 * Issued High Priority Interrupt, and check for card status
589 * until out-of prg-state.
591 int mmc_interrupt_hpi(struct mmc_card *card)
595 unsigned long prg_wait;
599 if (!card->ext_csd.hpi_en) {
600 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
604 mmc_claim_host(card->host);
605 err = mmc_send_status(card, &status);
607 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
611 switch (R1_CURRENT_STATE(status)) {
617 * In idle and transfer states, HPI is not needed and the caller
618 * can issue the next intended command immediately
624 /* In all other states, it's illegal to issue HPI */
625 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
626 mmc_hostname(card->host), R1_CURRENT_STATE(status));
631 err = mmc_send_hpi_cmd(card, &status);
635 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
637 err = mmc_send_status(card, &status);
639 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
641 if (time_after(jiffies, prg_wait))
646 mmc_release_host(card->host);
649 EXPORT_SYMBOL(mmc_interrupt_hpi);
652 * mmc_wait_for_cmd - start a command and wait for completion
653 * @host: MMC host to start command
654 * @cmd: MMC command to start
655 * @retries: maximum number of retries
657 * Start a new MMC command for a host, and wait for the command
658 * to complete. Return any error that occurred while the command
659 * was executing. Do not attempt to parse the response.
661 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
663 struct mmc_request mrq = {NULL};
665 WARN_ON(!host->claimed);
667 memset(cmd->resp, 0, sizeof(cmd->resp));
668 cmd->retries = retries;
673 mmc_wait_for_req(host, &mrq);
678 EXPORT_SYMBOL(mmc_wait_for_cmd);
681 * mmc_stop_bkops - stop ongoing BKOPS
682 * @card: MMC card to check BKOPS
684 * Send HPI command to stop ongoing background operations to
685 * allow rapid servicing of foreground operations, e.g. read/
686 * writes. Wait until the card comes out of the programming state
687 * to avoid errors in servicing read/write requests.
689 int mmc_stop_bkops(struct mmc_card *card)
694 err = mmc_interrupt_hpi(card);
697 * If err is EINVAL, we can't issue an HPI.
698 * It should complete the BKOPS.
700 if (!err || (err == -EINVAL)) {
701 mmc_card_clr_doing_bkops(card);
707 EXPORT_SYMBOL(mmc_stop_bkops);
709 int mmc_read_bkops_status(struct mmc_card *card)
715 * In future work, we should consider storing the entire ext_csd.
717 ext_csd = kmalloc(512, GFP_KERNEL);
719 pr_err("%s: could not allocate buffer to receive the ext_csd.\n",
720 mmc_hostname(card->host));
724 mmc_claim_host(card->host);
725 err = mmc_send_ext_csd(card, ext_csd);
726 mmc_release_host(card->host);
730 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
731 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
736 EXPORT_SYMBOL(mmc_read_bkops_status);
739 * mmc_set_data_timeout - set the timeout for a data command
740 * @data: data phase for command
741 * @card: the MMC card associated with the data transfer
743 * Computes the data timeout parameters according to the
744 * correct algorithm given the card type.
746 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
751 * SDIO cards only define an upper 1 s limit on access.
753 if (mmc_card_sdio(card)) {
754 data->timeout_ns = 1000000000;
755 data->timeout_clks = 0;
760 * SD cards use a 100 multiplier rather than 10
762 mult = mmc_card_sd(card) ? 100 : 10;
765 * Scale up the multiplier (and therefore the timeout) by
766 * the r2w factor for writes.
768 if (data->flags & MMC_DATA_WRITE)
769 mult <<= card->csd.r2w_factor;
771 data->timeout_ns = card->csd.tacc_ns * mult;
772 data->timeout_clks = card->csd.tacc_clks * mult;
775 * SD cards also have an upper limit on the timeout.
777 if (mmc_card_sd(card)) {
778 unsigned int timeout_us, limit_us;
780 timeout_us = data->timeout_ns / 1000;
781 if (mmc_host_clk_rate(card->host))
782 timeout_us += data->timeout_clks * 1000 /
783 (mmc_host_clk_rate(card->host) / 1000);
785 if (data->flags & MMC_DATA_WRITE)
787 * The MMC spec "It is strongly recommended
788 * for hosts to implement more than 500ms
789 * timeout value even if the card indicates
790 * the 250ms maximum busy length." Even the
791 * previous value of 300ms is known to be
792 * insufficient for some cards.
799 * SDHC cards always use these fixed values.
801 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
802 data->timeout_ns = limit_us * 1000;
803 data->timeout_clks = 0;
808 * Some cards require longer data read timeout than indicated in CSD.
809 * Address this by setting the read timeout to a "reasonably high"
810 * value. For the cards tested, 300ms has proven enough. If necessary,
811 * this value can be increased if other problematic cards require this.
813 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
814 data->timeout_ns = 300000000;
815 data->timeout_clks = 0;
819 * Some cards need very high timeouts if driven in SPI mode.
820 * The worst observed timeout was 900ms after writing a
821 * continuous stream of data until the internal logic
824 if (mmc_host_is_spi(card->host)) {
825 if (data->flags & MMC_DATA_WRITE) {
826 if (data->timeout_ns < 1000000000)
827 data->timeout_ns = 1000000000; /* 1s */
829 if (data->timeout_ns < 100000000)
830 data->timeout_ns = 100000000; /* 100ms */
834 EXPORT_SYMBOL(mmc_set_data_timeout);
837 * mmc_align_data_size - pads a transfer size to a more optimal value
838 * @card: the MMC card associated with the data transfer
839 * @sz: original transfer size
841 * Pads the original data size with a number of extra bytes in
842 * order to avoid controller bugs and/or performance hits
843 * (e.g. some controllers revert to PIO for certain sizes).
845 * Returns the improved size, which might be unmodified.
847 * Note that this function is only relevant when issuing a
848 * single scatter gather entry.
850 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
853 * FIXME: We don't have a system for the controller to tell
854 * the core about its problems yet, so for now we just 32-bit
857 sz = ((sz + 3) / 4) * 4;
861 EXPORT_SYMBOL(mmc_align_data_size);
864 * __mmc_claim_host - exclusively claim a host
865 * @host: mmc host to claim
866 * @abort: whether or not the operation should be aborted
868 * Claim a host for a set of operations. If @abort is non null and
869 * dereference a non-zero value then this will return prematurely with
870 * that non-zero value without acquiring the lock. Returns zero
871 * with the lock held otherwise.
873 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
875 DECLARE_WAITQUEUE(wait, current);
881 add_wait_queue(&host->wq, &wait);
882 spin_lock_irqsave(&host->lock, flags);
884 set_current_state(TASK_UNINTERRUPTIBLE);
885 stop = abort ? atomic_read(abort) : 0;
886 if (stop || !host->claimed || host->claimer == current)
888 spin_unlock_irqrestore(&host->lock, flags);
890 spin_lock_irqsave(&host->lock, flags);
892 set_current_state(TASK_RUNNING);
895 host->claimer = current;
896 host->claim_cnt += 1;
899 spin_unlock_irqrestore(&host->lock, flags);
900 remove_wait_queue(&host->wq, &wait);
901 if (host->ops->enable && !stop && host->claim_cnt == 1)
902 host->ops->enable(host);
906 EXPORT_SYMBOL(__mmc_claim_host);
909 * mmc_try_claim_host - try exclusively to claim a host
910 * @host: mmc host to claim
912 * Returns %1 if the host is claimed, %0 otherwise.
914 int mmc_try_claim_host(struct mmc_host *host)
916 int claimed_host = 0;
919 spin_lock_irqsave(&host->lock, flags);
920 if (!host->claimed || host->claimer == current) {
922 host->claimer = current;
923 host->claim_cnt += 1;
926 spin_unlock_irqrestore(&host->lock, flags);
927 if (host->ops->enable && claimed_host && host->claim_cnt == 1)
928 host->ops->enable(host);
931 EXPORT_SYMBOL(mmc_try_claim_host);
934 * mmc_release_host - release a host
935 * @host: mmc host to release
937 * Release a MMC host, allowing others to claim the host
938 * for their operations.
940 void mmc_release_host(struct mmc_host *host)
944 WARN_ON(!host->claimed);
946 if (host->ops->disable && host->claim_cnt == 1)
947 host->ops->disable(host);
949 spin_lock_irqsave(&host->lock, flags);
950 if (--host->claim_cnt) {
951 /* Release for nested claim */
952 spin_unlock_irqrestore(&host->lock, flags);
955 host->claimer = NULL;
956 spin_unlock_irqrestore(&host->lock, flags);
960 EXPORT_SYMBOL(mmc_release_host);
963 * Internal function that does the actual ios call to the host driver,
964 * optionally printing some debug output.
966 static inline void mmc_set_ios(struct mmc_host *host)
968 struct mmc_ios *ios = &host->ios;
970 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
971 "width %u timing %u\n",
972 mmc_hostname(host), ios->clock, ios->bus_mode,
973 ios->power_mode, ios->chip_select, ios->vdd,
974 ios->bus_width, ios->timing);
977 mmc_set_ungated(host);
978 host->ops->set_ios(host, ios);
982 * Control chip select pin on a host.
984 void mmc_set_chip_select(struct mmc_host *host, int mode)
986 mmc_host_clk_hold(host);
987 host->ios.chip_select = mode;
989 mmc_host_clk_release(host);
993 * Sets the host clock to the highest possible frequency that
996 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
998 WARN_ON(hz < host->f_min);
1000 if (hz > host->f_max)
1003 host->ios.clock = hz;
1007 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1009 mmc_host_clk_hold(host);
1010 __mmc_set_clock(host, hz);
1011 mmc_host_clk_release(host);
1014 #ifdef CONFIG_MMC_CLKGATE
1016 * This gates the clock by setting it to 0 Hz.
1018 void mmc_gate_clock(struct mmc_host *host)
1020 unsigned long flags;
1022 spin_lock_irqsave(&host->clk_lock, flags);
1023 host->clk_old = host->ios.clock;
1024 host->ios.clock = 0;
1025 host->clk_gated = true;
1026 spin_unlock_irqrestore(&host->clk_lock, flags);
1031 * This restores the clock from gating by using the cached
1034 void mmc_ungate_clock(struct mmc_host *host)
1037 * We should previously have gated the clock, so the clock shall
1038 * be 0 here! The clock may however be 0 during initialization,
1039 * when some request operations are performed before setting
1040 * the frequency. When ungate is requested in that situation
1041 * we just ignore the call.
1043 if (host->clk_old) {
1044 BUG_ON(host->ios.clock);
1045 /* This call will also set host->clk_gated to false */
1046 __mmc_set_clock(host, host->clk_old);
1050 void mmc_set_ungated(struct mmc_host *host)
1052 unsigned long flags;
1055 * We've been given a new frequency while the clock is gated,
1056 * so make sure we regard this as ungating it.
1058 spin_lock_irqsave(&host->clk_lock, flags);
1059 host->clk_gated = false;
1060 spin_unlock_irqrestore(&host->clk_lock, flags);
1064 void mmc_set_ungated(struct mmc_host *host)
1070 * Change the bus mode (open drain/push-pull) of a host.
1072 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1074 mmc_host_clk_hold(host);
1075 host->ios.bus_mode = mode;
1077 mmc_host_clk_release(host);
1081 * Change data bus width of a host.
1083 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1085 mmc_host_clk_hold(host);
1086 host->ios.bus_width = width;
1088 mmc_host_clk_release(host);
1092 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1093 * @vdd: voltage (mV)
1094 * @low_bits: prefer low bits in boundary cases
1096 * This function returns the OCR bit number according to the provided @vdd
1097 * value. If conversion is not possible a negative errno value returned.
1099 * Depending on the @low_bits flag the function prefers low or high OCR bits
1100 * on boundary voltages. For example,
1101 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1102 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1104 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1106 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1108 const int max_bit = ilog2(MMC_VDD_35_36);
1111 if (vdd < 1650 || vdd > 3600)
1114 if (vdd >= 1650 && vdd <= 1950)
1115 return ilog2(MMC_VDD_165_195);
1120 /* Base 2000 mV, step 100 mV, bit's base 8. */
1121 bit = (vdd - 2000) / 100 + 8;
1128 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1129 * @vdd_min: minimum voltage value (mV)
1130 * @vdd_max: maximum voltage value (mV)
1132 * This function returns the OCR mask bits according to the provided @vdd_min
1133 * and @vdd_max values. If conversion is not possible the function returns 0.
1135 * Notes wrt boundary cases:
1136 * This function sets the OCR bits for all boundary voltages, for example
1137 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1138 * MMC_VDD_34_35 mask.
1140 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1144 if (vdd_max < vdd_min)
1147 /* Prefer high bits for the boundary vdd_max values. */
1148 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1152 /* Prefer low bits for the boundary vdd_min values. */
1153 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1157 /* Fill the mask, from max bit to min bit. */
1158 while (vdd_max >= vdd_min)
1159 mask |= 1 << vdd_max--;
1163 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1165 #ifdef CONFIG_REGULATOR
1168 * mmc_regulator_get_ocrmask - return mask of supported voltages
1169 * @supply: regulator to use
1171 * This returns either a negative errno, or a mask of voltages that
1172 * can be provided to MMC/SD/SDIO devices using the specified voltage
1173 * regulator. This would normally be called before registering the
1176 int mmc_regulator_get_ocrmask(struct regulator *supply)
1182 count = regulator_count_voltages(supply);
1186 for (i = 0; i < count; i++) {
1190 vdd_uV = regulator_list_voltage(supply, i);
1194 vdd_mV = vdd_uV / 1000;
1195 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1200 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1203 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1204 * @mmc: the host to regulate
1205 * @supply: regulator to use
1206 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1208 * Returns zero on success, else negative errno.
1210 * MMC host drivers may use this to enable or disable a regulator using
1211 * a particular supply voltage. This would normally be called from the
1214 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1215 struct regulator *supply,
1216 unsigned short vdd_bit)
1226 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1227 * bits this regulator doesn't quite support ... don't
1228 * be too picky, most cards and regulators are OK with
1229 * a 0.1V range goof (it's a small error percentage).
1231 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1233 min_uV = 1650 * 1000;
1234 max_uV = 1950 * 1000;
1236 min_uV = 1900 * 1000 + tmp * 100 * 1000;
1237 max_uV = min_uV + 100 * 1000;
1241 * If we're using a fixed/static regulator, don't call
1242 * regulator_set_voltage; it would fail.
1244 voltage = regulator_get_voltage(supply);
1246 if (!regulator_can_change_voltage(supply))
1247 min_uV = max_uV = voltage;
1251 else if (voltage < min_uV || voltage > max_uV)
1252 result = regulator_set_voltage(supply, min_uV, max_uV);
1256 if (result == 0 && !mmc->regulator_enabled) {
1257 result = regulator_enable(supply);
1259 mmc->regulator_enabled = true;
1261 } else if (mmc->regulator_enabled) {
1262 result = regulator_disable(supply);
1264 mmc->regulator_enabled = false;
1268 dev_err(mmc_dev(mmc),
1269 "could not set regulator OCR (%d)\n", result);
1272 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1274 int mmc_regulator_get_supply(struct mmc_host *mmc)
1276 struct device *dev = mmc_dev(mmc);
1277 struct regulator *supply;
1280 supply = devm_regulator_get(dev, "vmmc");
1281 mmc->supply.vmmc = supply;
1282 mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc");
1285 return PTR_ERR(supply);
1287 ret = mmc_regulator_get_ocrmask(supply);
1289 mmc->ocr_avail = ret;
1291 dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret);
1295 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1297 #endif /* CONFIG_REGULATOR */
1300 * Mask off any voltages we don't support and select
1301 * the lowest voltage
1303 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1307 ocr &= host->ocr_avail;
1315 mmc_host_clk_hold(host);
1316 host->ios.vdd = bit;
1318 mmc_host_clk_release(host);
1320 pr_warning("%s: host doesn't support card's voltages\n",
1321 mmc_hostname(host));
1328 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1331 int old_signal_voltage = host->ios.signal_voltage;
1333 host->ios.signal_voltage = signal_voltage;
1334 if (host->ops->start_signal_voltage_switch) {
1335 mmc_host_clk_hold(host);
1336 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1337 mmc_host_clk_release(host);
1341 host->ios.signal_voltage = old_signal_voltage;
1347 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1349 struct mmc_command cmd = {0};
1356 * Send CMD11 only if the request is to switch the card to
1359 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1360 return __mmc_set_signal_voltage(host, signal_voltage);
1363 * If we cannot switch voltages, return failure so the caller
1364 * can continue without UHS mode
1366 if (!host->ops->start_signal_voltage_switch)
1368 if (!host->ops->card_busy)
1369 pr_warning("%s: cannot verify signal voltage switch\n",
1370 mmc_hostname(host));
1372 cmd.opcode = SD_SWITCH_VOLTAGE;
1374 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1376 err = mmc_wait_for_cmd(host, &cmd, 0);
1380 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1383 mmc_host_clk_hold(host);
1385 * The card should drive cmd and dat[0:3] low immediately
1386 * after the response of cmd11, but wait 1 ms to be sure
1389 if (host->ops->card_busy && !host->ops->card_busy(host)) {
1394 * During a signal voltage level switch, the clock must be gated
1395 * for 5 ms according to the SD spec
1397 clock = host->ios.clock;
1398 host->ios.clock = 0;
1401 if (__mmc_set_signal_voltage(host, signal_voltage)) {
1403 * Voltages may not have been switched, but we've already
1404 * sent CMD11, so a power cycle is required anyway
1410 /* Keep clock gated for at least 5 ms */
1412 host->ios.clock = clock;
1415 /* Wait for at least 1 ms according to spec */
1419 * Failure to switch is indicated by the card holding
1422 if (host->ops->card_busy && host->ops->card_busy(host))
1427 pr_debug("%s: Signal voltage switch failed, "
1428 "power cycling card\n", mmc_hostname(host));
1429 mmc_power_cycle(host);
1432 mmc_host_clk_release(host);
1438 * Select timing parameters for host.
1440 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1442 mmc_host_clk_hold(host);
1443 host->ios.timing = timing;
1445 mmc_host_clk_release(host);
1449 * Select appropriate driver type for host.
1451 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1453 mmc_host_clk_hold(host);
1454 host->ios.drv_type = drv_type;
1456 mmc_host_clk_release(host);
1460 * Apply power to the MMC stack. This is a two-stage process.
1461 * First, we enable power to the card without the clock running.
1462 * We then wait a bit for the power to stabilise. Finally,
1463 * enable the bus drivers and clock to the card.
1465 * We must _NOT_ enable the clock prior to power stablising.
1467 * If a host does all the power sequencing itself, ignore the
1468 * initial MMC_POWER_UP stage.
1470 static void mmc_power_up(struct mmc_host *host)
1474 if (host->ios.power_mode == MMC_POWER_ON)
1477 mmc_host_clk_hold(host);
1479 /* If ocr is set, we use it */
1481 bit = ffs(host->ocr) - 1;
1483 bit = fls(host->ocr_avail) - 1;
1485 host->ios.vdd = bit;
1486 if (mmc_host_is_spi(host))
1487 host->ios.chip_select = MMC_CS_HIGH;
1489 host->ios.chip_select = MMC_CS_DONTCARE;
1490 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1491 host->ios.power_mode = MMC_POWER_UP;
1492 host->ios.bus_width = MMC_BUS_WIDTH_1;
1493 host->ios.timing = MMC_TIMING_LEGACY;
1496 /* Set signal voltage to 3.3V */
1497 __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
1500 * This delay should be sufficient to allow the power supply
1501 * to reach the minimum voltage.
1505 host->ios.clock = host->f_init;
1507 host->ios.power_mode = MMC_POWER_ON;
1511 * This delay must be at least 74 clock sizes, or 1 ms, or the
1512 * time required to reach a stable voltage.
1516 mmc_host_clk_release(host);
1519 void mmc_power_off(struct mmc_host *host)
1521 if (host->ios.power_mode == MMC_POWER_OFF)
1524 mmc_host_clk_hold(host);
1526 host->ios.clock = 0;
1531 * Reset ocr mask to be the highest possible voltage supported for
1532 * this mmc host. This value will be used at next power up.
1534 host->ocr = 1 << (fls(host->ocr_avail) - 1);
1536 if (!mmc_host_is_spi(host)) {
1537 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1538 host->ios.chip_select = MMC_CS_DONTCARE;
1540 host->ios.power_mode = MMC_POWER_OFF;
1541 host->ios.bus_width = MMC_BUS_WIDTH_1;
1542 host->ios.timing = MMC_TIMING_LEGACY;
1546 * Some configurations, such as the 802.11 SDIO card in the OLPC
1547 * XO-1.5, require a short delay after poweroff before the card
1548 * can be successfully turned on again.
1552 mmc_host_clk_release(host);
1555 void mmc_power_cycle(struct mmc_host *host)
1557 mmc_power_off(host);
1558 /* Wait at least 1 ms according to SD spec */
1564 * Cleanup when the last reference to the bus operator is dropped.
1566 static void __mmc_release_bus(struct mmc_host *host)
1569 BUG_ON(host->bus_refs);
1570 BUG_ON(!host->bus_dead);
1572 host->bus_ops = NULL;
1576 * Increase reference count of bus operator
1578 static inline void mmc_bus_get(struct mmc_host *host)
1580 unsigned long flags;
1582 spin_lock_irqsave(&host->lock, flags);
1584 spin_unlock_irqrestore(&host->lock, flags);
1588 * Decrease reference count of bus operator and free it if
1589 * it is the last reference.
1591 static inline void mmc_bus_put(struct mmc_host *host)
1593 unsigned long flags;
1595 spin_lock_irqsave(&host->lock, flags);
1597 if ((host->bus_refs == 0) && host->bus_ops)
1598 __mmc_release_bus(host);
1599 spin_unlock_irqrestore(&host->lock, flags);
1602 int mmc_resume_bus(struct mmc_host *host)
1604 unsigned long flags;
1606 if (!mmc_bus_needs_resume(host))
1609 printk("%s: Starting deferred resume\n", mmc_hostname(host));
1610 spin_lock_irqsave(&host->lock, flags);
1611 host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
1612 host->rescan_disable = 0;
1613 spin_unlock_irqrestore(&host->lock, flags);
1616 if (host->bus_ops && !host->bus_dead) {
1618 BUG_ON(!host->bus_ops->resume);
1619 host->bus_ops->resume(host);
1622 if (host->bus_ops->detect && !host->bus_dead)
1623 host->bus_ops->detect(host);
1626 printk("%s: Deferred resume completed\n", mmc_hostname(host));
1630 EXPORT_SYMBOL(mmc_resume_bus);
1633 * Assign a mmc bus handler to a host. Only one bus handler may control a
1634 * host at any given time.
1636 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1638 unsigned long flags;
1643 WARN_ON(!host->claimed);
1645 spin_lock_irqsave(&host->lock, flags);
1647 BUG_ON(host->bus_ops);
1648 BUG_ON(host->bus_refs);
1650 host->bus_ops = ops;
1654 spin_unlock_irqrestore(&host->lock, flags);
1658 * Remove the current bus handler from a host.
1660 void mmc_detach_bus(struct mmc_host *host)
1662 unsigned long flags;
1666 WARN_ON(!host->claimed);
1667 WARN_ON(!host->bus_ops);
1669 spin_lock_irqsave(&host->lock, flags);
1673 spin_unlock_irqrestore(&host->lock, flags);
1679 * mmc_detect_change - process change of state on a MMC socket
1680 * @host: host which changed state.
1681 * @delay: optional delay to wait before detection (jiffies)
1683 * MMC drivers should call this when they detect a card has been
1684 * inserted or removed. The MMC layer will confirm that any
1685 * present card is still functional, and initialize any newly
1688 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1690 #ifdef CONFIG_MMC_DEBUG
1691 unsigned long flags;
1692 spin_lock_irqsave(&host->lock, flags);
1693 WARN_ON(host->removed);
1694 spin_unlock_irqrestore(&host->lock, flags);
1696 host->detect_change = 1;
1698 wake_lock(&host->detect_wake_lock);
1699 mmc_schedule_delayed_work(&host->detect, delay);
1702 EXPORT_SYMBOL(mmc_detect_change);
1704 void mmc_init_erase(struct mmc_card *card)
1708 if (is_power_of_2(card->erase_size))
1709 card->erase_shift = ffs(card->erase_size) - 1;
1711 card->erase_shift = 0;
1714 * It is possible to erase an arbitrarily large area of an SD or MMC
1715 * card. That is not desirable because it can take a long time
1716 * (minutes) potentially delaying more important I/O, and also the
1717 * timeout calculations become increasingly hugely over-estimated.
1718 * Consequently, 'pref_erase' is defined as a guide to limit erases
1719 * to that size and alignment.
1721 * For SD cards that define Allocation Unit size, limit erases to one
1722 * Allocation Unit at a time. For MMC cards that define High Capacity
1723 * Erase Size, whether it is switched on or not, limit to that size.
1724 * Otherwise just have a stab at a good value. For modern cards it
1725 * will end up being 4MiB. Note that if the value is too small, it
1726 * can end up taking longer to erase.
1728 if (mmc_card_sd(card) && card->ssr.au) {
1729 card->pref_erase = card->ssr.au;
1730 card->erase_shift = ffs(card->ssr.au) - 1;
1731 } else if (card->ext_csd.hc_erase_size) {
1732 card->pref_erase = card->ext_csd.hc_erase_size;
1734 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1736 card->pref_erase = 512 * 1024 / 512;
1738 card->pref_erase = 1024 * 1024 / 512;
1740 card->pref_erase = 2 * 1024 * 1024 / 512;
1742 card->pref_erase = 4 * 1024 * 1024 / 512;
1743 if (card->pref_erase < card->erase_size)
1744 card->pref_erase = card->erase_size;
1746 sz = card->pref_erase % card->erase_size;
1748 card->pref_erase += card->erase_size - sz;
1753 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1754 unsigned int arg, unsigned int qty)
1756 unsigned int erase_timeout;
1758 if (arg == MMC_DISCARD_ARG ||
1759 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1760 erase_timeout = card->ext_csd.trim_timeout;
1761 } else if (card->ext_csd.erase_group_def & 1) {
1762 /* High Capacity Erase Group Size uses HC timeouts */
1763 if (arg == MMC_TRIM_ARG)
1764 erase_timeout = card->ext_csd.trim_timeout;
1766 erase_timeout = card->ext_csd.hc_erase_timeout;
1768 /* CSD Erase Group Size uses write timeout */
1769 unsigned int mult = (10 << card->csd.r2w_factor);
1770 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1771 unsigned int timeout_us;
1773 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1774 if (card->csd.tacc_ns < 1000000)
1775 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1777 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1780 * ios.clock is only a target. The real clock rate might be
1781 * less but not that much less, so fudge it by multiplying by 2.
1784 timeout_us += (timeout_clks * 1000) /
1785 (mmc_host_clk_rate(card->host) / 1000);
1787 erase_timeout = timeout_us / 1000;
1790 * Theoretically, the calculation could underflow so round up
1791 * to 1ms in that case.
1797 /* Multiplier for secure operations */
1798 if (arg & MMC_SECURE_ARGS) {
1799 if (arg == MMC_SECURE_ERASE_ARG)
1800 erase_timeout *= card->ext_csd.sec_erase_mult;
1802 erase_timeout *= card->ext_csd.sec_trim_mult;
1805 erase_timeout *= qty;
1808 * Ensure at least a 1 second timeout for SPI as per
1809 * 'mmc_set_data_timeout()'
1811 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1812 erase_timeout = 1000;
1814 return erase_timeout;
1817 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1821 unsigned int erase_timeout;
1823 if (card->ssr.erase_timeout) {
1824 /* Erase timeout specified in SD Status Register (SSR) */
1825 erase_timeout = card->ssr.erase_timeout * qty +
1826 card->ssr.erase_offset;
1829 * Erase timeout not specified in SD Status Register (SSR) so
1830 * use 250ms per write block.
1832 erase_timeout = 250 * qty;
1835 /* Must not be less than 1 second */
1836 if (erase_timeout < 1000)
1837 erase_timeout = 1000;
1839 return erase_timeout;
1842 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1846 if (mmc_card_sd(card))
1847 return mmc_sd_erase_timeout(card, arg, qty);
1849 return mmc_mmc_erase_timeout(card, arg, qty);
1852 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1853 unsigned int to, unsigned int arg)
1855 struct mmc_command cmd = {0};
1856 unsigned int qty = 0;
1857 unsigned long timeout;
1858 unsigned int fr, nr;
1863 trace_mmc_blk_erase_start(arg, fr, nr);
1866 * qty is used to calculate the erase timeout which depends on how many
1867 * erase groups (or allocation units in SD terminology) are affected.
1868 * We count erasing part of an erase group as one erase group.
1869 * For SD, the allocation units are always a power of 2. For MMC, the
1870 * erase group size is almost certainly also power of 2, but it does not
1871 * seem to insist on that in the JEDEC standard, so we fall back to
1872 * division in that case. SD may not specify an allocation unit size,
1873 * in which case the timeout is based on the number of write blocks.
1875 * Note that the timeout for secure trim 2 will only be correct if the
1876 * number of erase groups specified is the same as the total of all
1877 * preceding secure trim 1 commands. Since the power may have been
1878 * lost since the secure trim 1 commands occurred, it is generally
1879 * impossible to calculate the secure trim 2 timeout correctly.
1881 if (card->erase_shift)
1882 qty += ((to >> card->erase_shift) -
1883 (from >> card->erase_shift)) + 1;
1884 else if (mmc_card_sd(card))
1885 qty += to - from + 1;
1887 qty += ((to / card->erase_size) -
1888 (from / card->erase_size)) + 1;
1890 if (!mmc_card_blockaddr(card)) {
1895 if (mmc_card_sd(card))
1896 cmd.opcode = SD_ERASE_WR_BLK_START;
1898 cmd.opcode = MMC_ERASE_GROUP_START;
1900 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1901 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1903 pr_err("mmc_erase: group start error %d, "
1904 "status %#x\n", err, cmd.resp[0]);
1909 memset(&cmd, 0, sizeof(struct mmc_command));
1910 if (mmc_card_sd(card))
1911 cmd.opcode = SD_ERASE_WR_BLK_END;
1913 cmd.opcode = MMC_ERASE_GROUP_END;
1915 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1916 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1918 pr_err("mmc_erase: group end error %d, status %#x\n",
1924 memset(&cmd, 0, sizeof(struct mmc_command));
1925 cmd.opcode = MMC_ERASE;
1927 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1928 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1929 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1931 pr_err("mmc_erase: erase error %d, status %#x\n",
1937 if (mmc_host_is_spi(card->host))
1940 timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
1942 memset(&cmd, 0, sizeof(struct mmc_command));
1943 cmd.opcode = MMC_SEND_STATUS;
1944 cmd.arg = card->rca << 16;
1945 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1946 /* Do not retry else we can't see errors */
1947 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1948 if (err || (cmd.resp[0] & 0xFDF92000)) {
1949 pr_err("error %d requesting status %#x\n",
1955 /* Timeout if the device never becomes ready for data and
1956 * never leaves the program state.
1958 if (time_after(jiffies, timeout)) {
1959 pr_err("%s: Card stuck in programming state! %s\n",
1960 mmc_hostname(card->host), __func__);
1965 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1966 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
1969 trace_mmc_blk_erase_end(arg, fr, nr);
1974 * mmc_erase - erase sectors.
1975 * @card: card to erase
1976 * @from: first sector to erase
1977 * @nr: number of sectors to erase
1978 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1980 * Caller must claim host before calling this function.
1982 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1985 unsigned int rem, to = from + nr;
1987 if (!(card->host->caps & MMC_CAP_ERASE) ||
1988 !(card->csd.cmdclass & CCC_ERASE))
1991 if (!card->erase_size)
1994 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1997 if ((arg & MMC_SECURE_ARGS) &&
1998 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2001 if ((arg & MMC_TRIM_ARGS) &&
2002 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2005 if (arg == MMC_SECURE_ERASE_ARG) {
2006 if (from % card->erase_size || nr % card->erase_size)
2010 if (arg == MMC_ERASE_ARG) {
2011 rem = from % card->erase_size;
2013 rem = card->erase_size - rem;
2020 rem = nr % card->erase_size;
2033 /* 'from' and 'to' are inclusive */
2036 return mmc_do_erase(card, from, to, arg);
2038 EXPORT_SYMBOL(mmc_erase);
2040 int mmc_can_erase(struct mmc_card *card)
2042 if ((card->host->caps & MMC_CAP_ERASE) &&
2043 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2047 EXPORT_SYMBOL(mmc_can_erase);
2049 int mmc_can_trim(struct mmc_card *card)
2051 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
2055 EXPORT_SYMBOL(mmc_can_trim);
2057 int mmc_can_discard(struct mmc_card *card)
2060 * As there's no way to detect the discard support bit at v4.5
2061 * use the s/w feature support filed.
2063 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2067 EXPORT_SYMBOL(mmc_can_discard);
2069 int mmc_can_sanitize(struct mmc_card *card)
2071 if (!mmc_can_trim(card) && !mmc_can_erase(card))
2073 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2077 EXPORT_SYMBOL(mmc_can_sanitize);
2079 int mmc_can_secure_erase_trim(struct mmc_card *card)
2081 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
2085 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2087 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2090 if (!card->erase_size)
2092 if (from % card->erase_size || nr % card->erase_size)
2096 EXPORT_SYMBOL(mmc_erase_group_aligned);
2098 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2101 struct mmc_host *host = card->host;
2102 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
2103 unsigned int last_timeout = 0;
2105 if (card->erase_shift)
2106 max_qty = UINT_MAX >> card->erase_shift;
2107 else if (mmc_card_sd(card))
2110 max_qty = UINT_MAX / card->erase_size;
2112 /* Find the largest qty with an OK timeout */
2115 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2116 timeout = mmc_erase_timeout(card, arg, qty + x);
2117 if (timeout > host->max_discard_to)
2119 if (timeout < last_timeout)
2121 last_timeout = timeout;
2133 /* Convert qty to sectors */
2134 if (card->erase_shift)
2135 max_discard = --qty << card->erase_shift;
2136 else if (mmc_card_sd(card))
2139 max_discard = --qty * card->erase_size;
2144 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2146 struct mmc_host *host = card->host;
2147 unsigned int max_discard, max_trim;
2149 if (!host->max_discard_to)
2153 * Without erase_group_def set, MMC erase timeout depends on clock
2154 * frequence which can change. In that case, the best choice is
2155 * just the preferred erase size.
2157 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2158 return card->pref_erase;
2160 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2161 if (mmc_can_trim(card)) {
2162 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2163 if (max_trim < max_discard)
2164 max_discard = max_trim;
2165 } else if (max_discard < card->erase_size) {
2168 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2169 mmc_hostname(host), max_discard, host->max_discard_to);
2172 EXPORT_SYMBOL(mmc_calc_max_discard);
2174 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2176 struct mmc_command cmd = {0};
2178 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
2181 cmd.opcode = MMC_SET_BLOCKLEN;
2183 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2184 return mmc_wait_for_cmd(card->host, &cmd, 5);
2186 EXPORT_SYMBOL(mmc_set_blocklen);
2188 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2191 struct mmc_command cmd = {0};
2193 cmd.opcode = MMC_SET_BLOCK_COUNT;
2194 cmd.arg = blockcount & 0x0000FFFF;
2197 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2198 return mmc_wait_for_cmd(card->host, &cmd, 5);
2200 EXPORT_SYMBOL(mmc_set_blockcount);
2202 static void mmc_hw_reset_for_init(struct mmc_host *host)
2204 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2206 mmc_host_clk_hold(host);
2207 host->ops->hw_reset(host);
2208 mmc_host_clk_release(host);
2211 int mmc_can_reset(struct mmc_card *card)
2215 if (!mmc_card_mmc(card))
2217 rst_n_function = card->ext_csd.rst_n_function;
2218 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
2222 EXPORT_SYMBOL(mmc_can_reset);
2224 static int mmc_do_hw_reset(struct mmc_host *host, int check)
2226 struct mmc_card *card = host->card;
2228 if (!host->bus_ops->power_restore)
2231 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2237 if (!mmc_can_reset(card))
2240 mmc_host_clk_hold(host);
2241 mmc_set_clock(host, host->f_init);
2243 host->ops->hw_reset(host);
2245 /* If the reset has happened, then a status command will fail */
2247 struct mmc_command cmd = {0};
2250 cmd.opcode = MMC_SEND_STATUS;
2251 if (!mmc_host_is_spi(card->host))
2252 cmd.arg = card->rca << 16;
2253 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2254 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2256 mmc_host_clk_release(host);
2261 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
2262 if (mmc_host_is_spi(host)) {
2263 host->ios.chip_select = MMC_CS_HIGH;
2264 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
2266 host->ios.chip_select = MMC_CS_DONTCARE;
2267 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
2269 host->ios.bus_width = MMC_BUS_WIDTH_1;
2270 host->ios.timing = MMC_TIMING_LEGACY;
2273 mmc_host_clk_release(host);
2275 return host->bus_ops->power_restore(host);
2278 int mmc_hw_reset(struct mmc_host *host)
2280 return mmc_do_hw_reset(host, 0);
2282 EXPORT_SYMBOL(mmc_hw_reset);
2284 int mmc_hw_reset_check(struct mmc_host *host)
2286 return mmc_do_hw_reset(host, 1);
2288 EXPORT_SYMBOL(mmc_hw_reset_check);
2290 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2292 host->f_init = freq;
2294 #ifdef CONFIG_MMC_DEBUG
2295 pr_info("%s: %s: trying to init card at %u Hz\n",
2296 mmc_hostname(host), __func__, host->f_init);
2301 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2302 * do a hardware reset if possible.
2304 mmc_hw_reset_for_init(host);
2307 * sdio_reset sends CMD52 to reset card. Since we do not know
2308 * if the card is being re-initialized, just send it. CMD52
2309 * should be ignored by SD/eMMC cards.
2314 mmc_send_if_cond(host, host->ocr_avail);
2316 /* Order's important: probe SDIO, then SD, then MMC */
2317 if (!mmc_attach_sdio(host))
2319 if (!mmc_attach_sd(host))
2321 if (!mmc_attach_mmc(host))
2324 mmc_power_off(host);
2328 int _mmc_detect_card_removed(struct mmc_host *host)
2332 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
2335 if (!host->card || mmc_card_removed(host->card))
2338 ret = host->bus_ops->alive(host);
2341 * Card detect status and alive check may be out of sync if card is
2342 * removed slowly, when card detect switch changes while card/slot
2343 * pads are still contacted in hardware (refer to "SD Card Mechanical
2344 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2345 * detect work 200ms later for this case.
2347 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2348 mmc_detect_change(host, msecs_to_jiffies(200));
2349 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2353 mmc_card_set_removed(host->card);
2354 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2360 int mmc_detect_card_removed(struct mmc_host *host)
2362 struct mmc_card *card = host->card;
2365 WARN_ON(!host->claimed);
2370 ret = mmc_card_removed(card);
2372 * The card will be considered unchanged unless we have been asked to
2373 * detect a change or host requires polling to provide card detection.
2375 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) &&
2376 !(host->caps2 & MMC_CAP2_DETECT_ON_ERR))
2379 host->detect_change = 0;
2381 ret = _mmc_detect_card_removed(host);
2382 if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) {
2384 * Schedule a detect work as soon as possible to let a
2385 * rescan handle the card removal.
2387 cancel_delayed_work(&host->detect);
2388 mmc_detect_change(host, 0);
2394 EXPORT_SYMBOL(mmc_detect_card_removed);
2396 void mmc_rescan(struct work_struct *work)
2398 struct mmc_host *host =
2399 container_of(work, struct mmc_host, detect.work);
2401 bool extend_wakelock = false;
2403 if (host->rescan_disable)
2406 /* If there is a non-removable card registered, only scan once */
2407 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
2409 host->rescan_entered = 1;
2414 * if there is a _removable_ card registered, check whether it is
2417 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
2418 && !(host->caps & MMC_CAP_NONREMOVABLE))
2419 host->bus_ops->detect(host);
2421 host->detect_change = 0;
2423 /* If the card was removed the bus will be marked
2424 * as dead - extend the wakelock so userspace
2427 extend_wakelock = 1;
2430 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2431 * the card is no longer present.
2436 /* if there still is a card present, stop here */
2437 if (host->bus_ops != NULL) {
2443 * Only we can add a new handler, so it's safe to
2444 * release the lock here.
2448 if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
2449 mmc_claim_host(host);
2450 mmc_power_off(host);
2451 mmc_release_host(host);
2455 mmc_claim_host(host);
2456 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2457 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
2458 extend_wakelock = true;
2461 if (freqs[i] <= host->f_min)
2464 mmc_release_host(host);
2467 if (extend_wakelock)
2468 wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
2470 wake_unlock(&host->detect_wake_lock);
2471 if (host->caps & MMC_CAP_NEEDS_POLL) {
2472 wake_lock(&host->detect_wake_lock);
2473 mmc_schedule_delayed_work(&host->detect, HZ);
2477 void mmc_start_host(struct mmc_host *host)
2479 host->f_init = max(freqs[0], host->f_min);
2480 host->rescan_disable = 0;
2481 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2482 mmc_power_off(host);
2485 mmc_detect_change(host, 0);
2488 void mmc_stop_host(struct mmc_host *host)
2490 #ifdef CONFIG_MMC_DEBUG
2491 unsigned long flags;
2492 spin_lock_irqsave(&host->lock, flags);
2494 spin_unlock_irqrestore(&host->lock, flags);
2497 host->rescan_disable = 1;
2498 if (cancel_delayed_work_sync(&host->detect))
2499 wake_unlock(&host->detect_wake_lock);
2500 mmc_flush_scheduled_work();
2502 /* clear pm flags now and let card drivers set them as needed */
2506 if (host->bus_ops && !host->bus_dead) {
2507 /* Calling bus_ops->remove() with a claimed host can deadlock */
2508 if (host->bus_ops->remove)
2509 host->bus_ops->remove(host);
2511 mmc_claim_host(host);
2512 mmc_detach_bus(host);
2513 mmc_power_off(host);
2514 mmc_release_host(host);
2522 mmc_power_off(host);
2525 int mmc_power_save_host(struct mmc_host *host)
2529 #ifdef CONFIG_MMC_DEBUG
2530 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2535 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2540 if (host->bus_ops->power_save)
2541 ret = host->bus_ops->power_save(host);
2545 mmc_power_off(host);
2549 EXPORT_SYMBOL(mmc_power_save_host);
2551 int mmc_power_restore_host(struct mmc_host *host)
2555 #ifdef CONFIG_MMC_DEBUG
2556 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2561 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2567 ret = host->bus_ops->power_restore(host);
2573 EXPORT_SYMBOL(mmc_power_restore_host);
2575 int mmc_card_awake(struct mmc_host *host)
2579 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2584 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
2585 err = host->bus_ops->awake(host);
2591 EXPORT_SYMBOL(mmc_card_awake);
2593 int mmc_card_sleep(struct mmc_host *host)
2597 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2602 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
2603 err = host->bus_ops->sleep(host);
2609 EXPORT_SYMBOL(mmc_card_sleep);
2611 int mmc_card_can_sleep(struct mmc_host *host)
2613 struct mmc_card *card = host->card;
2615 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
2619 EXPORT_SYMBOL(mmc_card_can_sleep);
2622 * Flush the cache to the non-volatile storage.
2624 int mmc_flush_cache(struct mmc_card *card)
2626 struct mmc_host *host = card->host;
2629 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
2632 if (mmc_card_mmc(card) &&
2633 (card->ext_csd.cache_size > 0) &&
2634 (card->ext_csd.cache_ctrl & 1)) {
2635 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2636 EXT_CSD_FLUSH_CACHE, 1, 0);
2638 pr_err("%s: cache flush error %d\n",
2639 mmc_hostname(card->host), err);
2644 EXPORT_SYMBOL(mmc_flush_cache);
2647 * Turn the cache ON/OFF.
2648 * Turning the cache OFF shall trigger flushing of the data
2649 * to the non-volatile storage.
2650 * This function should be called with host claimed
2652 int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2654 struct mmc_card *card = host->card;
2655 unsigned int timeout;
2658 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
2659 mmc_card_is_removable(host))
2662 if (card && mmc_card_mmc(card) &&
2663 (card->ext_csd.cache_size > 0)) {
2666 if (card->ext_csd.cache_ctrl ^ enable) {
2667 timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
2668 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2669 EXT_CSD_CACHE_CTRL, enable, timeout);
2671 pr_err("%s: cache %s error %d\n",
2672 mmc_hostname(card->host),
2673 enable ? "on" : "off",
2676 card->ext_csd.cache_ctrl = enable;
2682 EXPORT_SYMBOL(mmc_cache_ctrl);
2687 * mmc_suspend_host - suspend a host
2690 int mmc_suspend_host(struct mmc_host *host)
2694 if (mmc_bus_needs_resume(host))
2697 if (cancel_delayed_work(&host->detect))
2698 wake_unlock(&host->detect_wake_lock);
2699 mmc_flush_scheduled_work();
2702 if (host->bus_ops && !host->bus_dead) {
2703 if (host->bus_ops->suspend) {
2704 if (mmc_card_doing_bkops(host->card)) {
2705 err = mmc_stop_bkops(host->card);
2709 err = host->bus_ops->suspend(host);
2712 if (err == -ENOSYS || !host->bus_ops->resume) {
2714 * We simply "remove" the card in this case.
2715 * It will be redetected on resume. (Calling
2716 * bus_ops->remove() with a claimed host can
2719 if (host->bus_ops->remove)
2720 host->bus_ops->remove(host);
2721 mmc_claim_host(host);
2722 mmc_detach_bus(host);
2723 mmc_power_off(host);
2724 mmc_release_host(host);
2731 if (!err && !mmc_card_keep_power(host))
2732 mmc_power_off(host);
2738 EXPORT_SYMBOL(mmc_suspend_host);
2741 * mmc_resume_host - resume a previously suspended host
2744 int mmc_resume_host(struct mmc_host *host)
2749 if (mmc_bus_manual_resume(host)) {
2750 host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
2755 if (host->bus_ops && !host->bus_dead) {
2756 if (!mmc_card_keep_power(host)) {
2758 mmc_select_voltage(host, host->ocr);
2760 * Tell runtime PM core we just powered up the card,
2761 * since it still believes the card is powered off.
2762 * Note that currently runtime PM is only enabled
2763 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
2765 if (mmc_card_sdio(host->card) &&
2766 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
2767 pm_runtime_disable(&host->card->dev);
2768 pm_runtime_set_active(&host->card->dev);
2769 pm_runtime_enable(&host->card->dev);
2772 BUG_ON(!host->bus_ops->resume);
2773 err = host->bus_ops->resume(host);
2775 pr_warning("%s: error %d during resume "
2776 "(card was removed?)\n",
2777 mmc_hostname(host), err);
2781 host->pm_flags &= ~MMC_PM_KEEP_POWER;
2786 EXPORT_SYMBOL(mmc_resume_host);
2788 /* Do the card removal on suspend if card is assumed removeable
2789 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2792 int mmc_pm_notify(struct notifier_block *notify_block,
2793 unsigned long mode, void *unused)
2795 struct mmc_host *host = container_of(
2796 notify_block, struct mmc_host, pm_notify);
2797 unsigned long flags;
2801 case PM_HIBERNATION_PREPARE:
2802 case PM_SUSPEND_PREPARE:
2803 if (host->card && mmc_card_mmc(host->card) &&
2804 mmc_card_doing_bkops(host->card)) {
2805 err = mmc_stop_bkops(host->card);
2807 pr_err("%s: didn't stop bkops\n",
2808 mmc_hostname(host));
2811 mmc_card_clr_doing_bkops(host->card);
2814 spin_lock_irqsave(&host->lock, flags);
2815 if (mmc_bus_needs_resume(host)) {
2816 spin_unlock_irqrestore(&host->lock, flags);
2819 host->rescan_disable = 1;
2820 spin_unlock_irqrestore(&host->lock, flags);
2821 if (cancel_delayed_work_sync(&host->detect))
2822 wake_unlock(&host->detect_wake_lock);
2824 if (!host->bus_ops || host->bus_ops->suspend)
2827 /* Calling bus_ops->remove() with a claimed host can deadlock */
2828 if (host->bus_ops->remove)
2829 host->bus_ops->remove(host);
2831 mmc_claim_host(host);
2832 mmc_detach_bus(host);
2833 mmc_power_off(host);
2834 mmc_release_host(host);
2838 case PM_POST_SUSPEND:
2839 case PM_POST_HIBERNATION:
2840 case PM_POST_RESTORE:
2842 spin_lock_irqsave(&host->lock, flags);
2843 if (mmc_bus_manual_resume(host)) {
2844 spin_unlock_irqrestore(&host->lock, flags);
2847 host->rescan_disable = 0;
2848 spin_unlock_irqrestore(&host->lock, flags);
2849 mmc_detect_change(host, 0);
2858 * mmc_init_context_info() - init synchronization context
2861 * Init struct context_info needed to implement asynchronous
2862 * request mechanism, used by mmc core, host driver and mmc requests
2865 void mmc_init_context_info(struct mmc_host *host)
2867 spin_lock_init(&host->context_info.lock);
2868 host->context_info.is_new_req = false;
2869 host->context_info.is_done_rcv = false;
2870 host->context_info.is_waiting_last_req = false;
2871 init_waitqueue_head(&host->context_info.wait);
2874 #ifdef CONFIG_MMC_EMBEDDED_SDIO
2875 void mmc_set_embedded_sdio_data(struct mmc_host *host,
2876 struct sdio_cis *cis,
2877 struct sdio_cccr *cccr,
2878 struct sdio_embedded_func *funcs,
2881 host->embedded_sdio_data.cis = cis;
2882 host->embedded_sdio_data.cccr = cccr;
2883 host->embedded_sdio_data.funcs = funcs;
2884 host->embedded_sdio_data.num_funcs = num_funcs;
2887 EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
2890 static int __init mmc_init(void)
2894 workqueue = alloc_ordered_workqueue("kmmcd", 0);
2898 ret = mmc_register_bus();
2900 goto destroy_workqueue;
2902 ret = mmc_register_host_class();
2904 goto unregister_bus;
2906 ret = sdio_register_bus();
2908 goto unregister_host_class;
2912 unregister_host_class:
2913 mmc_unregister_host_class();
2915 mmc_unregister_bus();
2917 destroy_workqueue(workqueue);
2922 static void __exit mmc_exit(void)
2924 sdio_unregister_bus();
2925 mmc_unregister_host_class();
2926 mmc_unregister_bus();
2927 destroy_workqueue(workqueue);
2930 subsys_initcall(mmc_init);
2931 module_exit(mmc_exit);
2933 MODULE_LICENSE("GPL");