2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
35 #include <linux/mmc/card.h>
36 #include <linux/mmc/host.h>
37 #include <linux/mmc/mmc.h>
38 #include <linux/mmc/sd.h>
40 #include <asm/system.h>
41 #include <asm/uaccess.h>
45 MODULE_ALIAS("mmc:block");
46 #ifdef MODULE_PARAM_PREFIX
47 #undef MODULE_PARAM_PREFIX
49 #define MODULE_PARAM_PREFIX "mmcblk."
51 #define INAND_CMD38_ARG_EXT_CSD 113
52 #define INAND_CMD38_ARG_ERASE 0x00
53 #define INAND_CMD38_ARG_TRIM 0x01
54 #define INAND_CMD38_ARG_SECERASE 0x80
55 #define INAND_CMD38_ARG_SECTRIM1 0x81
56 #define INAND_CMD38_ARG_SECTRIM2 0x88
58 #define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) && \
59 (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || \
60 ((card)->ext_csd.rel_sectors)))
62 static DEFINE_MUTEX(block_mutex);
65 * The defaults come from config options but can be overriden by module
68 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
71 * We've only got one major, so number of mmcblk devices is
72 * limited to 256 / number of minors per device.
74 static int max_devices;
76 /* 256 minors, so at most 256 separate devices */
77 static DECLARE_BITMAP(dev_use, 256);
78 static DECLARE_BITMAP(name_use, 256);
81 * There is one mmc_blk_data per slot.
86 struct mmc_queue queue;
87 struct list_head part;
90 unsigned int read_only;
91 unsigned int part_type;
92 unsigned int name_idx;
95 * Only set in main mmc_blk_data associated
96 * with mmc_card with mmc_set_drvdata, and keeps
97 * track of the current selected device partition.
99 unsigned int part_curr;
100 struct device_attribute force_ro;
103 static DEFINE_MUTEX(open_lock);
105 module_param(perdev_minors, int, 0444);
106 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
108 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
110 struct mmc_blk_data *md;
112 mutex_lock(&open_lock);
113 md = disk->private_data;
114 if (md && md->usage == 0)
118 mutex_unlock(&open_lock);
123 static inline int mmc_get_devidx(struct gendisk *disk)
125 int devmaj = MAJOR(disk_devt(disk));
126 int devidx = MINOR(disk_devt(disk)) / perdev_minors;
129 devidx = disk->first_minor / perdev_minors;
133 static void mmc_blk_put(struct mmc_blk_data *md)
135 mutex_lock(&open_lock);
137 if (md->usage == 0) {
138 int devidx = mmc_get_devidx(md->disk);
139 blk_cleanup_queue(md->queue.queue);
141 __clear_bit(devidx, dev_use);
146 mutex_unlock(&open_lock);
149 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
153 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
155 ret = snprintf(buf, PAGE_SIZE, "%d",
156 get_disk_ro(dev_to_disk(dev)) ^
162 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
163 const char *buf, size_t count)
167 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
168 unsigned long set = simple_strtoul(buf, &end, 0);
174 set_disk_ro(dev_to_disk(dev), set || md->read_only);
181 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
183 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
186 mutex_lock(&block_mutex);
189 check_disk_change(bdev);
192 if ((mode & FMODE_WRITE) && md->read_only) {
197 mutex_unlock(&block_mutex);
202 static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
204 struct mmc_blk_data *md = disk->private_data;
206 mutex_lock(&block_mutex);
208 mutex_unlock(&block_mutex);
213 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
215 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
221 static const struct block_device_operations mmc_bdops = {
222 .open = mmc_blk_open,
223 .release = mmc_blk_release,
224 .getgeo = mmc_blk_getgeo,
225 .owner = THIS_MODULE,
228 struct mmc_blk_request {
229 struct mmc_request mrq;
230 struct mmc_command cmd;
231 struct mmc_command stop;
232 struct mmc_data data;
235 static inline int mmc_blk_part_switch(struct mmc_card *card,
236 struct mmc_blk_data *md)
239 struct mmc_blk_data *main_md = mmc_get_drvdata(card);
240 if (main_md->part_curr == md->part_type)
243 if (mmc_card_mmc(card)) {
244 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
245 card->ext_csd.part_config |= md->part_type;
247 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
248 EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
249 card->ext_csd.part_time);
254 main_md->part_curr = md->part_type;
258 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
264 struct mmc_request mrq = {0};
265 struct mmc_command cmd = {0};
266 struct mmc_data data = {0};
267 unsigned int timeout_us;
269 struct scatterlist sg;
271 cmd.opcode = MMC_APP_CMD;
272 cmd.arg = card->rca << 16;
273 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
275 err = mmc_wait_for_cmd(card->host, &cmd, 0);
278 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
281 memset(&cmd, 0, sizeof(struct mmc_command));
283 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
285 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
287 data.timeout_ns = card->csd.tacc_ns * 100;
288 data.timeout_clks = card->csd.tacc_clks * 100;
290 timeout_us = data.timeout_ns / 1000;
291 timeout_us += data.timeout_clks * 1000 /
292 (card->host->ios.clock / 1000);
294 if (timeout_us > 100000) {
295 data.timeout_ns = 100000000;
296 data.timeout_clks = 0;
301 data.flags = MMC_DATA_READ;
308 blocks = kmalloc(4, GFP_KERNEL);
312 sg_init_one(&sg, blocks, 4);
314 mmc_wait_for_req(card->host, &mrq);
316 result = ntohl(*blocks);
319 if (cmd.error || data.error)
325 static u32 get_card_status(struct mmc_card *card, struct request *req)
327 struct mmc_command cmd = {0};
330 cmd.opcode = MMC_SEND_STATUS;
331 if (!mmc_host_is_spi(card->host))
332 cmd.arg = card->rca << 16;
333 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
334 err = mmc_wait_for_cmd(card->host, &cmd, 0);
336 printk(KERN_ERR "%s: error %d sending status command",
337 req->rq_disk->disk_name, err);
341 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
343 struct mmc_blk_data *md = mq->data;
344 struct mmc_card *card = md->queue.card;
345 unsigned int from, nr, arg;
348 if (!mmc_can_erase(card)) {
353 from = blk_rq_pos(req);
354 nr = blk_rq_sectors(req);
356 if (mmc_can_trim(card))
361 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
362 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
363 INAND_CMD38_ARG_EXT_CSD,
364 arg == MMC_TRIM_ARG ?
365 INAND_CMD38_ARG_TRIM :
366 INAND_CMD38_ARG_ERASE,
371 err = mmc_erase(card, from, nr, arg);
373 spin_lock_irq(&md->lock);
374 __blk_end_request(req, err, blk_rq_bytes(req));
375 spin_unlock_irq(&md->lock);
380 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
383 struct mmc_blk_data *md = mq->data;
384 struct mmc_card *card = md->queue.card;
385 unsigned int from, nr, arg;
388 if (!mmc_can_secure_erase_trim(card)) {
393 from = blk_rq_pos(req);
394 nr = blk_rq_sectors(req);
396 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
397 arg = MMC_SECURE_TRIM1_ARG;
399 arg = MMC_SECURE_ERASE_ARG;
401 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
402 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
403 INAND_CMD38_ARG_EXT_CSD,
404 arg == MMC_SECURE_TRIM1_ARG ?
405 INAND_CMD38_ARG_SECTRIM1 :
406 INAND_CMD38_ARG_SECERASE,
411 err = mmc_erase(card, from, nr, arg);
412 if (!err && arg == MMC_SECURE_TRIM1_ARG) {
413 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
414 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
415 INAND_CMD38_ARG_EXT_CSD,
416 INAND_CMD38_ARG_SECTRIM2,
421 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
424 spin_lock_irq(&md->lock);
425 __blk_end_request(req, err, blk_rq_bytes(req));
426 spin_unlock_irq(&md->lock);
431 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
433 struct mmc_blk_data *md = mq->data;
436 * No-op, only service this because we need REQ_FUA for reliable
439 spin_lock_irq(&md->lock);
440 __blk_end_request_all(req, 0);
441 spin_unlock_irq(&md->lock);
447 * Reformat current write as a reliable write, supporting
448 * both legacy and the enhanced reliable write MMC cards.
449 * In each transfer we'll handle only as much as a single
450 * reliable write can handle, thus finish the request in
451 * partial completions.
453 static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
454 struct mmc_card *card,
458 struct mmc_command set_count = {0};
460 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
461 /* Legacy mode imposes restrictions on transfers. */
462 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
463 brq->data.blocks = 1;
465 if (brq->data.blocks > card->ext_csd.rel_sectors)
466 brq->data.blocks = card->ext_csd.rel_sectors;
467 else if (brq->data.blocks < card->ext_csd.rel_sectors)
468 brq->data.blocks = 1;
471 set_count.opcode = MMC_SET_BLOCK_COUNT;
472 set_count.arg = brq->data.blocks | (1 << 31);
473 set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
474 err = mmc_wait_for_cmd(card->host, &set_count, 0);
476 printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
477 req->rq_disk->disk_name, err);
481 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
483 struct mmc_blk_data *md = mq->data;
484 struct mmc_card *card = md->queue.card;
485 struct mmc_blk_request brq;
486 int ret = 1, disable_multi = 0;
489 * Reliable writes are used to implement Forced Unit Access and
490 * REQ_META accesses, and are supported only on MMCs.
492 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
493 (req->cmd_flags & REQ_META)) &&
494 (rq_data_dir(req) == WRITE) &&
495 REL_WRITES_SUPPORTED(card);
498 struct mmc_command cmd = {0};
499 u32 readcmd, writecmd, status = 0;
501 memset(&brq, 0, sizeof(struct mmc_blk_request));
502 brq.mrq.cmd = &brq.cmd;
503 brq.mrq.data = &brq.data;
505 brq.cmd.arg = blk_rq_pos(req);
506 if (!mmc_card_blockaddr(card))
508 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
509 brq.data.blksz = 512;
510 brq.stop.opcode = MMC_STOP_TRANSMISSION;
512 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
513 brq.data.blocks = blk_rq_sectors(req);
516 * The block layer doesn't support all sector count
517 * restrictions, so we need to be prepared for too big
520 if (brq.data.blocks > card->host->max_blk_count)
521 brq.data.blocks = card->host->max_blk_count;
524 * After a read error, we redo the request one sector at a time
525 * in order to accurately determine which sectors can be read
528 if (disable_multi && brq.data.blocks > 1)
531 if (brq.data.blocks > 1 || do_rel_wr) {
532 /* SPI multiblock writes terminate using a special
533 * token, not a STOP_TRANSMISSION request. Reliable
534 * writes use SET_BLOCK_COUNT and do not use a
535 * STOP_TRANSMISSION request either.
537 if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
538 rq_data_dir(req) == READ)
539 brq.mrq.stop = &brq.stop;
540 readcmd = MMC_READ_MULTIPLE_BLOCK;
541 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
544 readcmd = MMC_READ_SINGLE_BLOCK;
545 writecmd = MMC_WRITE_BLOCK;
547 if (rq_data_dir(req) == READ) {
548 brq.cmd.opcode = readcmd;
549 brq.data.flags |= MMC_DATA_READ;
551 brq.cmd.opcode = writecmd;
552 brq.data.flags |= MMC_DATA_WRITE;
555 if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
558 mmc_set_data_timeout(&brq.data, card);
560 brq.data.sg = mq->sg;
561 brq.data.sg_len = mmc_queue_map_sg(mq);
564 * Adjust the sg list so it is the same size as the
567 if (brq.data.blocks != blk_rq_sectors(req)) {
568 int i, data_size = brq.data.blocks << 9;
569 struct scatterlist *sg;
571 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
572 data_size -= sg->length;
573 if (data_size <= 0) {
574 sg->length += data_size;
582 mmc_queue_bounce_pre(mq);
584 mmc_wait_for_req(card->host, &brq.mrq);
586 mmc_queue_bounce_post(mq);
589 * Check for errors here, but don't jump to cmd_err
590 * until later as we need to wait for the card to leave
591 * programming mode even when things go wrong.
593 if (brq.cmd.error || brq.data.error || brq.stop.error) {
594 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
595 /* Redo read one sector at a time */
596 printk(KERN_WARNING "%s: retrying using single "
597 "block read\n", req->rq_disk->disk_name);
601 status = get_card_status(card, req);
605 printk(KERN_ERR "%s: error %d sending read/write "
606 "command, response %#x, card status %#x\n",
607 req->rq_disk->disk_name, brq.cmd.error,
608 brq.cmd.resp[0], status);
611 if (brq.data.error) {
612 if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
613 /* 'Stop' response contains card status */
614 status = brq.mrq.stop->resp[0];
615 printk(KERN_ERR "%s: error %d transferring data,"
616 " sector %u, nr %u, card status %#x\n",
617 req->rq_disk->disk_name, brq.data.error,
618 (unsigned)blk_rq_pos(req),
619 (unsigned)blk_rq_sectors(req), status);
622 if (brq.stop.error) {
623 printk(KERN_ERR "%s: error %d sending stop command, "
624 "response %#x, card status %#x\n",
625 req->rq_disk->disk_name, brq.stop.error,
626 brq.stop.resp[0], status);
629 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
633 cmd.opcode = MMC_SEND_STATUS;
634 cmd.arg = card->rca << 16;
635 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
636 err = mmc_wait_for_cmd(card->host, &cmd, 5);
638 printk(KERN_ERR "%s: error %d requesting status\n",
639 req->rq_disk->disk_name, err);
643 * Some cards mishandle the status bits,
644 * so make sure to check both the busy
645 * indication and the card state.
647 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
648 (R1_CURRENT_STATE(cmd.resp[0]) == 7));
651 if (cmd.resp[0] & ~0x00000900)
652 printk(KERN_ERR "%s: status = %08x\n",
653 req->rq_disk->disk_name, cmd.resp[0]);
654 if (mmc_decode_status(cmd.resp))
659 if (brq.cmd.error || brq.stop.error || brq.data.error) {
660 if (rq_data_dir(req) == READ) {
662 * After an error, we redo I/O one sector at a
663 * time, so we only reach here after trying to
664 * read a single sector.
666 spin_lock_irq(&md->lock);
667 ret = __blk_end_request(req, -EIO, brq.data.blksz);
668 spin_unlock_irq(&md->lock);
675 * A block was successfully transferred.
677 spin_lock_irq(&md->lock);
678 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
679 spin_unlock_irq(&md->lock);
686 * If this is an SD card and we're writing, we can first
687 * mark the known good sectors as ok.
689 * If the card is not SD, we can still ok written sectors
690 * as reported by the controller (which might be less than
691 * the real number of written sectors, but never more).
693 if (mmc_card_sd(card)) {
696 blocks = mmc_sd_num_wr_blocks(card);
697 if (blocks != (u32)-1) {
698 spin_lock_irq(&md->lock);
699 ret = __blk_end_request(req, 0, blocks << 9);
700 spin_unlock_irq(&md->lock);
703 spin_lock_irq(&md->lock);
704 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
705 spin_unlock_irq(&md->lock);
708 spin_lock_irq(&md->lock);
710 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
711 spin_unlock_irq(&md->lock);
716 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
719 struct mmc_blk_data *md = mq->data;
720 struct mmc_card *card = md->queue.card;
722 mmc_claim_host(card->host);
723 ret = mmc_blk_part_switch(card, md);
729 if (req->cmd_flags & REQ_DISCARD) {
730 if (req->cmd_flags & REQ_SECURE)
731 ret = mmc_blk_issue_secdiscard_rq(mq, req);
733 ret = mmc_blk_issue_discard_rq(mq, req);
734 } else if (req->cmd_flags & REQ_FLUSH) {
735 ret = mmc_blk_issue_flush(mq, req);
737 ret = mmc_blk_issue_rw_rq(mq, req);
741 mmc_release_host(card->host);
745 static inline int mmc_blk_readonly(struct mmc_card *card)
747 return mmc_card_readonly(card) ||
748 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
751 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
752 struct device *parent,
757 struct mmc_blk_data *md;
760 devidx = find_first_zero_bit(dev_use, max_devices);
761 if (devidx >= max_devices)
762 return ERR_PTR(-ENOSPC);
763 __set_bit(devidx, dev_use);
765 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
772 * !subname implies we are creating main mmc_blk_data that will be
773 * associated with mmc_card with mmc_set_drvdata. Due to device
774 * partitions, devidx will not coincide with a per-physical card
775 * index anymore so we keep track of a name index.
778 md->name_idx = find_first_zero_bit(name_use, max_devices);
779 __set_bit(md->name_idx, name_use);
782 md->name_idx = ((struct mmc_blk_data *)
783 dev_to_disk(parent)->private_data)->name_idx;
786 * Set the read-only status based on the supported commands
787 * and the write protect switch.
789 md->read_only = mmc_blk_readonly(card);
791 md->disk = alloc_disk(perdev_minors);
792 if (md->disk == NULL) {
797 spin_lock_init(&md->lock);
798 INIT_LIST_HEAD(&md->part);
801 ret = mmc_init_queue(&md->queue, card, &md->lock);
805 md->queue.issue_fn = mmc_blk_issue_rq;
808 md->disk->major = MMC_BLOCK_MAJOR;
809 md->disk->first_minor = devidx * perdev_minors;
810 md->disk->fops = &mmc_bdops;
811 md->disk->private_data = md;
812 md->disk->queue = md->queue.queue;
813 md->disk->driverfs_dev = parent;
814 set_disk_ro(md->disk, md->read_only || default_ro);
815 if (REL_WRITES_SUPPORTED(card))
816 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
819 * As discussed on lkml, GENHD_FL_REMOVABLE should:
821 * - be set for removable media with permanent block devices
822 * - be unset for removable block devices with permanent media
824 * Since MMC block devices clearly fall under the second
825 * case, we do not set GENHD_FL_REMOVABLE. Userspace
826 * should use the block device creation/destruction hotplug
827 * messages to tell when the card is present.
830 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
831 "mmcblk%d%s", md->name_idx, subname ? subname : "");
833 blk_queue_logical_block_size(md->queue.queue, 512);
834 set_capacity(md->disk, size);
845 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
848 struct mmc_blk_data *md;
850 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
852 * The EXT_CSD sector count is in number or 512 byte
855 size = card->ext_csd.sectors;
858 * The CSD capacity field is in units of read_blkbits.
859 * set_capacity takes units of 512 bytes.
861 size = card->csd.capacity << (card->csd.read_blkbits - 9);
864 md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
868 static int mmc_blk_alloc_part(struct mmc_card *card,
869 struct mmc_blk_data *md,
870 unsigned int part_type,
876 struct mmc_blk_data *part_md;
878 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
881 return PTR_ERR(part_md);
882 part_md->part_type = part_type;
883 list_add(&part_md->part, &md->part);
885 string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
886 cap_str, sizeof(cap_str));
887 printk(KERN_INFO "%s: %s %s partition %u %s\n",
888 part_md->disk->disk_name, mmc_card_id(card),
889 mmc_card_name(card), part_md->part_type, cap_str);
893 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
897 if (!mmc_card_mmc(card))
900 if (card->ext_csd.boot_size) {
901 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
902 card->ext_csd.boot_size >> 9,
907 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
908 card->ext_csd.boot_size >> 9,
919 mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
923 mmc_claim_host(card->host);
924 err = mmc_set_blocklen(card, 512);
925 mmc_release_host(card->host);
928 printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
929 md->disk->disk_name, err);
936 static void mmc_blk_remove_req(struct mmc_blk_data *md)
939 if (md->disk->flags & GENHD_FL_UP) {
940 device_remove_file(disk_to_dev(md->disk), &md->force_ro);
942 /* Stop new requests from getting into the queue */
943 del_gendisk(md->disk);
946 /* Then flush out any already in there */
947 mmc_cleanup_queue(&md->queue);
952 static void mmc_blk_remove_parts(struct mmc_card *card,
953 struct mmc_blk_data *md)
955 struct list_head *pos, *q;
956 struct mmc_blk_data *part_md;
958 __clear_bit(md->name_idx, name_use);
959 list_for_each_safe(pos, q, &md->part) {
960 part_md = list_entry(pos, struct mmc_blk_data, part);
962 mmc_blk_remove_req(part_md);
966 static int mmc_add_disk(struct mmc_blk_data *md)
971 md->force_ro.show = force_ro_show;
972 md->force_ro.store = force_ro_store;
973 sysfs_attr_init(&md->force_ro.attr);
974 md->force_ro.attr.name = "force_ro";
975 md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
976 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
978 del_gendisk(md->disk);
983 static const struct mmc_fixup blk_fixups[] =
985 MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
986 MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
987 MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
988 MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
989 MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
993 static int mmc_blk_probe(struct mmc_card *card)
995 struct mmc_blk_data *md, *part_md;
1000 * Check that the card supports the command class(es) we need.
1002 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1005 md = mmc_blk_alloc(card);
1009 err = mmc_blk_set_blksize(md, card);
1013 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
1014 cap_str, sizeof(cap_str));
1015 printk(KERN_INFO "%s: %s %s %s %s\n",
1016 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
1017 cap_str, md->read_only ? "(ro)" : "");
1019 if (mmc_blk_alloc_parts(card, md))
1022 mmc_set_drvdata(card, md);
1023 mmc_fixup_device(card, blk_fixups);
1025 if (mmc_add_disk(md))
1028 list_for_each_entry(part_md, &md->part, part) {
1029 if (mmc_add_disk(part_md))
1035 mmc_blk_remove_parts(card, md);
1036 mmc_blk_remove_req(md);
1040 static void mmc_blk_remove(struct mmc_card *card)
1042 struct mmc_blk_data *md = mmc_get_drvdata(card);
1044 mmc_blk_remove_parts(card, md);
1045 mmc_blk_remove_req(md);
1046 mmc_set_drvdata(card, NULL);
1050 static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
1052 struct mmc_blk_data *part_md;
1053 struct mmc_blk_data *md = mmc_get_drvdata(card);
1056 mmc_queue_suspend(&md->queue);
1057 list_for_each_entry(part_md, &md->part, part) {
1058 mmc_queue_suspend(&part_md->queue);
1064 static int mmc_blk_resume(struct mmc_card *card)
1066 struct mmc_blk_data *part_md;
1067 struct mmc_blk_data *md = mmc_get_drvdata(card);
1070 mmc_blk_set_blksize(md, card);
1073 * Resume involves the card going into idle state,
1074 * so current partition is always the main one.
1076 md->part_curr = md->part_type;
1077 mmc_queue_resume(&md->queue);
1078 list_for_each_entry(part_md, &md->part, part) {
1079 mmc_queue_resume(&part_md->queue);
1085 #define mmc_blk_suspend NULL
1086 #define mmc_blk_resume NULL
1089 static struct mmc_driver mmc_driver = {
1093 .probe = mmc_blk_probe,
1094 .remove = mmc_blk_remove,
1095 .suspend = mmc_blk_suspend,
1096 .resume = mmc_blk_resume,
1099 static int __init mmc_blk_init(void)
1103 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1104 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1106 max_devices = 256 / perdev_minors;
1108 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
1112 res = mmc_register_driver(&mmc_driver);
1118 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1123 static void __exit mmc_blk_exit(void)
1125 mmc_unregister_driver(&mmc_driver);
1126 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1129 module_init(mmc_blk_init);
1130 module_exit(mmc_blk_exit);
1132 MODULE_LICENSE("GPL");
1133 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");