mmc: block: init force_ro sysfs attribute
[firefly-linux-kernel-4.4.55.git] / drivers / mmc / card / block.c
1 /*
2  * Block driver for media (i.e., flash cards)
3  *
4  * Copyright 2002 Hewlett-Packard Company
5  * Copyright 2005-2008 Pierre Ossman
6  *
7  * Use consistent with the GNU GPL is permitted,
8  * provided that this copyright notice is
9  * preserved in its entirety in all copies and derived works.
10  *
11  * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12  * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13  * FITNESS FOR ANY PARTICULAR PURPOSE.
14  *
15  * Many thanks to Alessandro Rubini and Jonathan Corbet!
16  *
17  * Author:  Andrew Christian
18  *          28 May 2002
19  */
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23
24 #include <linux/kernel.h>
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34
35 #include <linux/mmc/card.h>
36 #include <linux/mmc/host.h>
37 #include <linux/mmc/mmc.h>
38 #include <linux/mmc/sd.h>
39
40 #include <asm/system.h>
41 #include <asm/uaccess.h>
42
43 #include "queue.h"
44
45 MODULE_ALIAS("mmc:block");
46 #ifdef MODULE_PARAM_PREFIX
47 #undef MODULE_PARAM_PREFIX
48 #endif
49 #define MODULE_PARAM_PREFIX "mmcblk."
50
51 #define INAND_CMD38_ARG_EXT_CSD  113
52 #define INAND_CMD38_ARG_ERASE    0x00
53 #define INAND_CMD38_ARG_TRIM     0x01
54 #define INAND_CMD38_ARG_SECERASE 0x80
55 #define INAND_CMD38_ARG_SECTRIM1 0x81
56 #define INAND_CMD38_ARG_SECTRIM2 0x88
57
58 #define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) &&     \
59     (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||   \
60      ((card)->ext_csd.rel_sectors)))
61
62 static DEFINE_MUTEX(block_mutex);
63
64 /*
65  * The defaults come from config options but can be overriden by module
66  * or bootarg options.
67  */
68 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
69
70 /*
71  * We've only got one major, so number of mmcblk devices is
72  * limited to 256 / number of minors per device.
73  */
74 static int max_devices;
75
76 /* 256 minors, so at most 256 separate devices */
77 static DECLARE_BITMAP(dev_use, 256);
78 static DECLARE_BITMAP(name_use, 256);
79
80 /*
81  * There is one mmc_blk_data per slot.
82  */
83 struct mmc_blk_data {
84         spinlock_t      lock;
85         struct gendisk  *disk;
86         struct mmc_queue queue;
87         struct list_head part;
88
89         unsigned int    usage;
90         unsigned int    read_only;
91         unsigned int    part_type;
92         unsigned int    name_idx;
93
94         /*
95          * Only set in main mmc_blk_data associated
96          * with mmc_card with mmc_set_drvdata, and keeps
97          * track of the current selected device partition.
98          */
99         unsigned int    part_curr;
100         struct device_attribute force_ro;
101 };
102
103 static DEFINE_MUTEX(open_lock);
104
105 module_param(perdev_minors, int, 0444);
106 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
107
108 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
109 {
110         struct mmc_blk_data *md;
111
112         mutex_lock(&open_lock);
113         md = disk->private_data;
114         if (md && md->usage == 0)
115                 md = NULL;
116         if (md)
117                 md->usage++;
118         mutex_unlock(&open_lock);
119
120         return md;
121 }
122
123 static inline int mmc_get_devidx(struct gendisk *disk)
124 {
125         int devmaj = MAJOR(disk_devt(disk));
126         int devidx = MINOR(disk_devt(disk)) / perdev_minors;
127
128         if (!devmaj)
129                 devidx = disk->first_minor / perdev_minors;
130         return devidx;
131 }
132
133 static void mmc_blk_put(struct mmc_blk_data *md)
134 {
135         mutex_lock(&open_lock);
136         md->usage--;
137         if (md->usage == 0) {
138                 int devidx = mmc_get_devidx(md->disk);
139                 blk_cleanup_queue(md->queue.queue);
140
141                 __clear_bit(devidx, dev_use);
142
143                 put_disk(md->disk);
144                 kfree(md);
145         }
146         mutex_unlock(&open_lock);
147 }
148
149 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
150                              char *buf)
151 {
152         int ret;
153         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
154
155         ret = snprintf(buf, PAGE_SIZE, "%d",
156                        get_disk_ro(dev_to_disk(dev)) ^
157                        md->read_only);
158         mmc_blk_put(md);
159         return ret;
160 }
161
162 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
163                               const char *buf, size_t count)
164 {
165         int ret;
166         char *end;
167         struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
168         unsigned long set = simple_strtoul(buf, &end, 0);
169         if (end == buf) {
170                 ret = -EINVAL;
171                 goto out;
172         }
173
174         set_disk_ro(dev_to_disk(dev), set || md->read_only);
175         ret = count;
176 out:
177         mmc_blk_put(md);
178         return ret;
179 }
180
181 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
182 {
183         struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
184         int ret = -ENXIO;
185
186         mutex_lock(&block_mutex);
187         if (md) {
188                 if (md->usage == 2)
189                         check_disk_change(bdev);
190                 ret = 0;
191
192                 if ((mode & FMODE_WRITE) && md->read_only) {
193                         mmc_blk_put(md);
194                         ret = -EROFS;
195                 }
196         }
197         mutex_unlock(&block_mutex);
198
199         return ret;
200 }
201
202 static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
203 {
204         struct mmc_blk_data *md = disk->private_data;
205
206         mutex_lock(&block_mutex);
207         mmc_blk_put(md);
208         mutex_unlock(&block_mutex);
209         return 0;
210 }
211
212 static int
213 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
214 {
215         geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
216         geo->heads = 4;
217         geo->sectors = 16;
218         return 0;
219 }
220
221 static const struct block_device_operations mmc_bdops = {
222         .open                   = mmc_blk_open,
223         .release                = mmc_blk_release,
224         .getgeo                 = mmc_blk_getgeo,
225         .owner                  = THIS_MODULE,
226 };
227
228 struct mmc_blk_request {
229         struct mmc_request      mrq;
230         struct mmc_command      cmd;
231         struct mmc_command      stop;
232         struct mmc_data         data;
233 };
234
235 static inline int mmc_blk_part_switch(struct mmc_card *card,
236                                       struct mmc_blk_data *md)
237 {
238         int ret;
239         struct mmc_blk_data *main_md = mmc_get_drvdata(card);
240         if (main_md->part_curr == md->part_type)
241                 return 0;
242
243         if (mmc_card_mmc(card)) {
244                 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
245                 card->ext_csd.part_config |= md->part_type;
246
247                 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
248                                  EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
249                                  card->ext_csd.part_time);
250                 if (ret)
251                         return ret;
252 }
253
254         main_md->part_curr = md->part_type;
255         return 0;
256 }
257
258 static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
259 {
260         int err;
261         u32 result;
262         __be32 *blocks;
263
264         struct mmc_request mrq = {0};
265         struct mmc_command cmd = {0};
266         struct mmc_data data = {0};
267         unsigned int timeout_us;
268
269         struct scatterlist sg;
270
271         cmd.opcode = MMC_APP_CMD;
272         cmd.arg = card->rca << 16;
273         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
274
275         err = mmc_wait_for_cmd(card->host, &cmd, 0);
276         if (err)
277                 return (u32)-1;
278         if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
279                 return (u32)-1;
280
281         memset(&cmd, 0, sizeof(struct mmc_command));
282
283         cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
284         cmd.arg = 0;
285         cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
286
287         data.timeout_ns = card->csd.tacc_ns * 100;
288         data.timeout_clks = card->csd.tacc_clks * 100;
289
290         timeout_us = data.timeout_ns / 1000;
291         timeout_us += data.timeout_clks * 1000 /
292                 (card->host->ios.clock / 1000);
293
294         if (timeout_us > 100000) {
295                 data.timeout_ns = 100000000;
296                 data.timeout_clks = 0;
297         }
298
299         data.blksz = 4;
300         data.blocks = 1;
301         data.flags = MMC_DATA_READ;
302         data.sg = &sg;
303         data.sg_len = 1;
304
305         mrq.cmd = &cmd;
306         mrq.data = &data;
307
308         blocks = kmalloc(4, GFP_KERNEL);
309         if (!blocks)
310                 return (u32)-1;
311
312         sg_init_one(&sg, blocks, 4);
313
314         mmc_wait_for_req(card->host, &mrq);
315
316         result = ntohl(*blocks);
317         kfree(blocks);
318
319         if (cmd.error || data.error)
320                 result = (u32)-1;
321
322         return result;
323 }
324
325 static u32 get_card_status(struct mmc_card *card, struct request *req)
326 {
327         struct mmc_command cmd = {0};
328         int err;
329
330         cmd.opcode = MMC_SEND_STATUS;
331         if (!mmc_host_is_spi(card->host))
332                 cmd.arg = card->rca << 16;
333         cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
334         err = mmc_wait_for_cmd(card->host, &cmd, 0);
335         if (err)
336                 printk(KERN_ERR "%s: error %d sending status command",
337                        req->rq_disk->disk_name, err);
338         return cmd.resp[0];
339 }
340
341 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
342 {
343         struct mmc_blk_data *md = mq->data;
344         struct mmc_card *card = md->queue.card;
345         unsigned int from, nr, arg;
346         int err = 0;
347
348         if (!mmc_can_erase(card)) {
349                 err = -EOPNOTSUPP;
350                 goto out;
351         }
352
353         from = blk_rq_pos(req);
354         nr = blk_rq_sectors(req);
355
356         if (mmc_can_trim(card))
357                 arg = MMC_TRIM_ARG;
358         else
359                 arg = MMC_ERASE_ARG;
360
361         if (card->quirks & MMC_QUIRK_INAND_CMD38) {
362                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
363                                  INAND_CMD38_ARG_EXT_CSD,
364                                  arg == MMC_TRIM_ARG ?
365                                  INAND_CMD38_ARG_TRIM :
366                                  INAND_CMD38_ARG_ERASE,
367                                  0);
368                 if (err)
369                         goto out;
370         }
371         err = mmc_erase(card, from, nr, arg);
372 out:
373         spin_lock_irq(&md->lock);
374         __blk_end_request(req, err, blk_rq_bytes(req));
375         spin_unlock_irq(&md->lock);
376
377         return err ? 0 : 1;
378 }
379
380 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
381                                        struct request *req)
382 {
383         struct mmc_blk_data *md = mq->data;
384         struct mmc_card *card = md->queue.card;
385         unsigned int from, nr, arg;
386         int err = 0;
387
388         if (!mmc_can_secure_erase_trim(card)) {
389                 err = -EOPNOTSUPP;
390                 goto out;
391         }
392
393         from = blk_rq_pos(req);
394         nr = blk_rq_sectors(req);
395
396         if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
397                 arg = MMC_SECURE_TRIM1_ARG;
398         else
399                 arg = MMC_SECURE_ERASE_ARG;
400
401         if (card->quirks & MMC_QUIRK_INAND_CMD38) {
402                 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
403                                  INAND_CMD38_ARG_EXT_CSD,
404                                  arg == MMC_SECURE_TRIM1_ARG ?
405                                  INAND_CMD38_ARG_SECTRIM1 :
406                                  INAND_CMD38_ARG_SECERASE,
407                                  0);
408                 if (err)
409                         goto out;
410         }
411         err = mmc_erase(card, from, nr, arg);
412         if (!err && arg == MMC_SECURE_TRIM1_ARG) {
413                 if (card->quirks & MMC_QUIRK_INAND_CMD38) {
414                         err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
415                                          INAND_CMD38_ARG_EXT_CSD,
416                                          INAND_CMD38_ARG_SECTRIM2,
417                                          0);
418                         if (err)
419                                 goto out;
420                 }
421                 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
422         }
423 out:
424         spin_lock_irq(&md->lock);
425         __blk_end_request(req, err, blk_rq_bytes(req));
426         spin_unlock_irq(&md->lock);
427
428         return err ? 0 : 1;
429 }
430
431 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
432 {
433         struct mmc_blk_data *md = mq->data;
434
435         /*
436          * No-op, only service this because we need REQ_FUA for reliable
437          * writes.
438          */
439         spin_lock_irq(&md->lock);
440         __blk_end_request_all(req, 0);
441         spin_unlock_irq(&md->lock);
442
443         return 1;
444 }
445
446 /*
447  * Reformat current write as a reliable write, supporting
448  * both legacy and the enhanced reliable write MMC cards.
449  * In each transfer we'll handle only as much as a single
450  * reliable write can handle, thus finish the request in
451  * partial completions.
452  */
453 static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
454                                    struct mmc_card *card,
455                                    struct request *req)
456 {
457         int err;
458         struct mmc_command set_count = {0};
459
460         if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
461                 /* Legacy mode imposes restrictions on transfers. */
462                 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
463                         brq->data.blocks = 1;
464
465                 if (brq->data.blocks > card->ext_csd.rel_sectors)
466                         brq->data.blocks = card->ext_csd.rel_sectors;
467                 else if (brq->data.blocks < card->ext_csd.rel_sectors)
468                         brq->data.blocks = 1;
469         }
470
471         set_count.opcode = MMC_SET_BLOCK_COUNT;
472         set_count.arg = brq->data.blocks | (1 << 31);
473         set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
474         err = mmc_wait_for_cmd(card->host, &set_count, 0);
475         if (err)
476                 printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
477                        req->rq_disk->disk_name, err);
478         return err;
479 }
480
481 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
482 {
483         struct mmc_blk_data *md = mq->data;
484         struct mmc_card *card = md->queue.card;
485         struct mmc_blk_request brq;
486         int ret = 1, disable_multi = 0;
487
488         /*
489          * Reliable writes are used to implement Forced Unit Access and
490          * REQ_META accesses, and are supported only on MMCs.
491          */
492         bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
493                           (req->cmd_flags & REQ_META)) &&
494                 (rq_data_dir(req) == WRITE) &&
495                 REL_WRITES_SUPPORTED(card);
496
497         do {
498                 struct mmc_command cmd = {0};
499                 u32 readcmd, writecmd, status = 0;
500
501                 memset(&brq, 0, sizeof(struct mmc_blk_request));
502                 brq.mrq.cmd = &brq.cmd;
503                 brq.mrq.data = &brq.data;
504
505                 brq.cmd.arg = blk_rq_pos(req);
506                 if (!mmc_card_blockaddr(card))
507                         brq.cmd.arg <<= 9;
508                 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
509                 brq.data.blksz = 512;
510                 brq.stop.opcode = MMC_STOP_TRANSMISSION;
511                 brq.stop.arg = 0;
512                 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
513                 brq.data.blocks = blk_rq_sectors(req);
514
515                 /*
516                  * The block layer doesn't support all sector count
517                  * restrictions, so we need to be prepared for too big
518                  * requests.
519                  */
520                 if (brq.data.blocks > card->host->max_blk_count)
521                         brq.data.blocks = card->host->max_blk_count;
522
523                 /*
524                  * After a read error, we redo the request one sector at a time
525                  * in order to accurately determine which sectors can be read
526                  * successfully.
527                  */
528                 if (disable_multi && brq.data.blocks > 1)
529                         brq.data.blocks = 1;
530
531                 if (brq.data.blocks > 1 || do_rel_wr) {
532                         /* SPI multiblock writes terminate using a special
533                          * token, not a STOP_TRANSMISSION request. Reliable
534                          * writes use SET_BLOCK_COUNT and do not use a
535                          * STOP_TRANSMISSION request either.
536                          */
537                         if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
538                             rq_data_dir(req) == READ)
539                                 brq.mrq.stop = &brq.stop;
540                         readcmd = MMC_READ_MULTIPLE_BLOCK;
541                         writecmd = MMC_WRITE_MULTIPLE_BLOCK;
542                 } else {
543                         brq.mrq.stop = NULL;
544                         readcmd = MMC_READ_SINGLE_BLOCK;
545                         writecmd = MMC_WRITE_BLOCK;
546                 }
547                 if (rq_data_dir(req) == READ) {
548                         brq.cmd.opcode = readcmd;
549                         brq.data.flags |= MMC_DATA_READ;
550                 } else {
551                         brq.cmd.opcode = writecmd;
552                         brq.data.flags |= MMC_DATA_WRITE;
553                 }
554
555                 if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
556                         goto cmd_err;
557
558                 mmc_set_data_timeout(&brq.data, card);
559
560                 brq.data.sg = mq->sg;
561                 brq.data.sg_len = mmc_queue_map_sg(mq);
562
563                 /*
564                  * Adjust the sg list so it is the same size as the
565                  * request.
566                  */
567                 if (brq.data.blocks != blk_rq_sectors(req)) {
568                         int i, data_size = brq.data.blocks << 9;
569                         struct scatterlist *sg;
570
571                         for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
572                                 data_size -= sg->length;
573                                 if (data_size <= 0) {
574                                         sg->length += data_size;
575                                         i++;
576                                         break;
577                                 }
578                         }
579                         brq.data.sg_len = i;
580                 }
581
582                 mmc_queue_bounce_pre(mq);
583
584                 mmc_wait_for_req(card->host, &brq.mrq);
585
586                 mmc_queue_bounce_post(mq);
587
588                 /*
589                  * Check for errors here, but don't jump to cmd_err
590                  * until later as we need to wait for the card to leave
591                  * programming mode even when things go wrong.
592                  */
593                 if (brq.cmd.error || brq.data.error || brq.stop.error) {
594                         if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
595                                 /* Redo read one sector at a time */
596                                 printk(KERN_WARNING "%s: retrying using single "
597                                        "block read\n", req->rq_disk->disk_name);
598                                 disable_multi = 1;
599                                 continue;
600                         }
601                         status = get_card_status(card, req);
602                 }
603
604                 if (brq.cmd.error) {
605                         printk(KERN_ERR "%s: error %d sending read/write "
606                                "command, response %#x, card status %#x\n",
607                                req->rq_disk->disk_name, brq.cmd.error,
608                                brq.cmd.resp[0], status);
609                 }
610
611                 if (brq.data.error) {
612                         if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
613                                 /* 'Stop' response contains card status */
614                                 status = brq.mrq.stop->resp[0];
615                         printk(KERN_ERR "%s: error %d transferring data,"
616                                " sector %u, nr %u, card status %#x\n",
617                                req->rq_disk->disk_name, brq.data.error,
618                                (unsigned)blk_rq_pos(req),
619                                (unsigned)blk_rq_sectors(req), status);
620                 }
621
622                 if (brq.stop.error) {
623                         printk(KERN_ERR "%s: error %d sending stop command, "
624                                "response %#x, card status %#x\n",
625                                req->rq_disk->disk_name, brq.stop.error,
626                                brq.stop.resp[0], status);
627                 }
628
629                 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
630                         do {
631                                 int err;
632
633                                 cmd.opcode = MMC_SEND_STATUS;
634                                 cmd.arg = card->rca << 16;
635                                 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
636                                 err = mmc_wait_for_cmd(card->host, &cmd, 5);
637                                 if (err) {
638                                         printk(KERN_ERR "%s: error %d requesting status\n",
639                                                req->rq_disk->disk_name, err);
640                                         goto cmd_err;
641                                 }
642                                 /*
643                                  * Some cards mishandle the status bits,
644                                  * so make sure to check both the busy
645                                  * indication and the card state.
646                                  */
647                         } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
648                                 (R1_CURRENT_STATE(cmd.resp[0]) == 7));
649
650 #if 0
651                         if (cmd.resp[0] & ~0x00000900)
652                                 printk(KERN_ERR "%s: status = %08x\n",
653                                        req->rq_disk->disk_name, cmd.resp[0]);
654                         if (mmc_decode_status(cmd.resp))
655                                 goto cmd_err;
656 #endif
657                 }
658
659                 if (brq.cmd.error || brq.stop.error || brq.data.error) {
660                         if (rq_data_dir(req) == READ) {
661                                 /*
662                                  * After an error, we redo I/O one sector at a
663                                  * time, so we only reach here after trying to
664                                  * read a single sector.
665                                  */
666                                 spin_lock_irq(&md->lock);
667                                 ret = __blk_end_request(req, -EIO, brq.data.blksz);
668                                 spin_unlock_irq(&md->lock);
669                                 continue;
670                         }
671                         goto cmd_err;
672                 }
673
674                 /*
675                  * A block was successfully transferred.
676                  */
677                 spin_lock_irq(&md->lock);
678                 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
679                 spin_unlock_irq(&md->lock);
680         } while (ret);
681
682         return 1;
683
684  cmd_err:
685         /*
686          * If this is an SD card and we're writing, we can first
687          * mark the known good sectors as ok.
688          *
689          * If the card is not SD, we can still ok written sectors
690          * as reported by the controller (which might be less than
691          * the real number of written sectors, but never more).
692          */
693         if (mmc_card_sd(card)) {
694                 u32 blocks;
695
696                 blocks = mmc_sd_num_wr_blocks(card);
697                 if (blocks != (u32)-1) {
698                         spin_lock_irq(&md->lock);
699                         ret = __blk_end_request(req, 0, blocks << 9);
700                         spin_unlock_irq(&md->lock);
701                 }
702         } else {
703                 spin_lock_irq(&md->lock);
704                 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
705                 spin_unlock_irq(&md->lock);
706         }
707
708         spin_lock_irq(&md->lock);
709         while (ret)
710                 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
711         spin_unlock_irq(&md->lock);
712
713         return 0;
714 }
715
716 static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
717 {
718         int ret;
719         struct mmc_blk_data *md = mq->data;
720         struct mmc_card *card = md->queue.card;
721
722         mmc_claim_host(card->host);
723         ret = mmc_blk_part_switch(card, md);
724         if (ret) {
725                 ret = 0;
726                 goto out;
727         }
728
729         if (req->cmd_flags & REQ_DISCARD) {
730                 if (req->cmd_flags & REQ_SECURE)
731                         ret = mmc_blk_issue_secdiscard_rq(mq, req);
732                 else
733                         ret = mmc_blk_issue_discard_rq(mq, req);
734         } else if (req->cmd_flags & REQ_FLUSH) {
735                 ret = mmc_blk_issue_flush(mq, req);
736         } else {
737                 ret = mmc_blk_issue_rw_rq(mq, req);
738         }
739
740 out:
741         mmc_release_host(card->host);
742         return ret;
743 }
744
745 static inline int mmc_blk_readonly(struct mmc_card *card)
746 {
747         return mmc_card_readonly(card) ||
748                !(card->csd.cmdclass & CCC_BLOCK_WRITE);
749 }
750
751 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
752                                               struct device *parent,
753                                               sector_t size,
754                                               bool default_ro,
755                                               const char *subname)
756 {
757         struct mmc_blk_data *md;
758         int devidx, ret;
759
760         devidx = find_first_zero_bit(dev_use, max_devices);
761         if (devidx >= max_devices)
762                 return ERR_PTR(-ENOSPC);
763         __set_bit(devidx, dev_use);
764
765         md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
766         if (!md) {
767                 ret = -ENOMEM;
768                 goto out;
769         }
770
771         /*
772          * !subname implies we are creating main mmc_blk_data that will be
773          * associated with mmc_card with mmc_set_drvdata. Due to device
774          * partitions, devidx will not coincide with a per-physical card
775          * index anymore so we keep track of a name index.
776          */
777         if (!subname) {
778                 md->name_idx = find_first_zero_bit(name_use, max_devices);
779                 __set_bit(md->name_idx, name_use);
780         }
781         else
782                 md->name_idx = ((struct mmc_blk_data *)
783                                 dev_to_disk(parent)->private_data)->name_idx;
784
785         /*
786          * Set the read-only status based on the supported commands
787          * and the write protect switch.
788          */
789         md->read_only = mmc_blk_readonly(card);
790
791         md->disk = alloc_disk(perdev_minors);
792         if (md->disk == NULL) {
793                 ret = -ENOMEM;
794                 goto err_kfree;
795         }
796
797         spin_lock_init(&md->lock);
798         INIT_LIST_HEAD(&md->part);
799         md->usage = 1;
800
801         ret = mmc_init_queue(&md->queue, card, &md->lock);
802         if (ret)
803                 goto err_putdisk;
804
805         md->queue.issue_fn = mmc_blk_issue_rq;
806         md->queue.data = md;
807
808         md->disk->major = MMC_BLOCK_MAJOR;
809         md->disk->first_minor = devidx * perdev_minors;
810         md->disk->fops = &mmc_bdops;
811         md->disk->private_data = md;
812         md->disk->queue = md->queue.queue;
813         md->disk->driverfs_dev = parent;
814         set_disk_ro(md->disk, md->read_only || default_ro);
815         if (REL_WRITES_SUPPORTED(card))
816                 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
817
818         /*
819          * As discussed on lkml, GENHD_FL_REMOVABLE should:
820          *
821          * - be set for removable media with permanent block devices
822          * - be unset for removable block devices with permanent media
823          *
824          * Since MMC block devices clearly fall under the second
825          * case, we do not set GENHD_FL_REMOVABLE.  Userspace
826          * should use the block device creation/destruction hotplug
827          * messages to tell when the card is present.
828          */
829
830         snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
831                  "mmcblk%d%s", md->name_idx, subname ? subname : "");
832
833         blk_queue_logical_block_size(md->queue.queue, 512);
834         set_capacity(md->disk, size);
835         return md;
836
837  err_putdisk:
838         put_disk(md->disk);
839  err_kfree:
840         kfree(md);
841  out:
842         return ERR_PTR(ret);
843 }
844
845 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
846 {
847         sector_t size;
848         struct mmc_blk_data *md;
849
850         if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
851                 /*
852                  * The EXT_CSD sector count is in number or 512 byte
853                  * sectors.
854                  */
855                 size = card->ext_csd.sectors;
856         } else {
857                 /*
858                  * The CSD capacity field is in units of read_blkbits.
859                  * set_capacity takes units of 512 bytes.
860                  */
861                 size = card->csd.capacity << (card->csd.read_blkbits - 9);
862         }
863
864         md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
865         return md;
866 }
867
868 static int mmc_blk_alloc_part(struct mmc_card *card,
869                               struct mmc_blk_data *md,
870                               unsigned int part_type,
871                               sector_t size,
872                               bool default_ro,
873                               const char *subname)
874 {
875         char cap_str[10];
876         struct mmc_blk_data *part_md;
877
878         part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
879                                     subname);
880         if (IS_ERR(part_md))
881                 return PTR_ERR(part_md);
882         part_md->part_type = part_type;
883         list_add(&part_md->part, &md->part);
884
885         string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
886                         cap_str, sizeof(cap_str));
887         printk(KERN_INFO "%s: %s %s partition %u %s\n",
888                part_md->disk->disk_name, mmc_card_id(card),
889                mmc_card_name(card), part_md->part_type, cap_str);
890         return 0;
891 }
892
893 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
894 {
895         int ret = 0;
896
897         if (!mmc_card_mmc(card))
898                 return 0;
899
900         if (card->ext_csd.boot_size) {
901                 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
902                                          card->ext_csd.boot_size >> 9,
903                                          true,
904                                          "boot0");
905                 if (ret)
906                         return ret;
907                 ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
908                                          card->ext_csd.boot_size >> 9,
909                                          true,
910                                          "boot1");
911                 if (ret)
912                         return ret;
913         }
914
915         return ret;
916 }
917
918 static int
919 mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
920 {
921         int err;
922
923         mmc_claim_host(card->host);
924         err = mmc_set_blocklen(card, 512);
925         mmc_release_host(card->host);
926
927         if (err) {
928                 printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
929                         md->disk->disk_name, err);
930                 return -EINVAL;
931         }
932
933         return 0;
934 }
935
936 static void mmc_blk_remove_req(struct mmc_blk_data *md)
937 {
938         if (md) {
939                 if (md->disk->flags & GENHD_FL_UP) {
940                         device_remove_file(disk_to_dev(md->disk), &md->force_ro);
941
942                         /* Stop new requests from getting into the queue */
943                         del_gendisk(md->disk);
944                 }
945
946                 /* Then flush out any already in there */
947                 mmc_cleanup_queue(&md->queue);
948                 mmc_blk_put(md);
949         }
950 }
951
952 static void mmc_blk_remove_parts(struct mmc_card *card,
953                                  struct mmc_blk_data *md)
954 {
955         struct list_head *pos, *q;
956         struct mmc_blk_data *part_md;
957
958         __clear_bit(md->name_idx, name_use);
959         list_for_each_safe(pos, q, &md->part) {
960                 part_md = list_entry(pos, struct mmc_blk_data, part);
961                 list_del(pos);
962                 mmc_blk_remove_req(part_md);
963         }
964 }
965
966 static int mmc_add_disk(struct mmc_blk_data *md)
967 {
968         int ret;
969
970         add_disk(md->disk);
971         md->force_ro.show = force_ro_show;
972         md->force_ro.store = force_ro_store;
973         sysfs_attr_init(&md->force_ro.attr);
974         md->force_ro.attr.name = "force_ro";
975         md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
976         ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
977         if (ret)
978                 del_gendisk(md->disk);
979
980         return ret;
981 }
982
983 static const struct mmc_fixup blk_fixups[] =
984 {
985         MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
986         MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
987         MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
988         MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
989         MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
990         END_FIXUP
991 };
992
993 static int mmc_blk_probe(struct mmc_card *card)
994 {
995         struct mmc_blk_data *md, *part_md;
996         int err;
997         char cap_str[10];
998
999         /*
1000          * Check that the card supports the command class(es) we need.
1001          */
1002         if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1003                 return -ENODEV;
1004
1005         md = mmc_blk_alloc(card);
1006         if (IS_ERR(md))
1007                 return PTR_ERR(md);
1008
1009         err = mmc_blk_set_blksize(md, card);
1010         if (err)
1011                 goto out;
1012
1013         string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
1014                         cap_str, sizeof(cap_str));
1015         printk(KERN_INFO "%s: %s %s %s %s\n",
1016                 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
1017                 cap_str, md->read_only ? "(ro)" : "");
1018
1019         if (mmc_blk_alloc_parts(card, md))
1020                 goto out;
1021
1022         mmc_set_drvdata(card, md);
1023         mmc_fixup_device(card, blk_fixups);
1024
1025         if (mmc_add_disk(md))
1026                 goto out;
1027
1028         list_for_each_entry(part_md, &md->part, part) {
1029                 if (mmc_add_disk(part_md))
1030                         goto out;
1031         }
1032         return 0;
1033
1034  out:
1035         mmc_blk_remove_parts(card, md);
1036         mmc_blk_remove_req(md);
1037         return err;
1038 }
1039
1040 static void mmc_blk_remove(struct mmc_card *card)
1041 {
1042         struct mmc_blk_data *md = mmc_get_drvdata(card);
1043
1044         mmc_blk_remove_parts(card, md);
1045         mmc_blk_remove_req(md);
1046         mmc_set_drvdata(card, NULL);
1047 }
1048
1049 #ifdef CONFIG_PM
1050 static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
1051 {
1052         struct mmc_blk_data *part_md;
1053         struct mmc_blk_data *md = mmc_get_drvdata(card);
1054
1055         if (md) {
1056                 mmc_queue_suspend(&md->queue);
1057                 list_for_each_entry(part_md, &md->part, part) {
1058                         mmc_queue_suspend(&part_md->queue);
1059                 }
1060         }
1061         return 0;
1062 }
1063
1064 static int mmc_blk_resume(struct mmc_card *card)
1065 {
1066         struct mmc_blk_data *part_md;
1067         struct mmc_blk_data *md = mmc_get_drvdata(card);
1068
1069         if (md) {
1070                 mmc_blk_set_blksize(md, card);
1071
1072                 /*
1073                  * Resume involves the card going into idle state,
1074                  * so current partition is always the main one.
1075                  */
1076                 md->part_curr = md->part_type;
1077                 mmc_queue_resume(&md->queue);
1078                 list_for_each_entry(part_md, &md->part, part) {
1079                         mmc_queue_resume(&part_md->queue);
1080                 }
1081         }
1082         return 0;
1083 }
1084 #else
1085 #define mmc_blk_suspend NULL
1086 #define mmc_blk_resume  NULL
1087 #endif
1088
1089 static struct mmc_driver mmc_driver = {
1090         .drv            = {
1091                 .name   = "mmcblk",
1092         },
1093         .probe          = mmc_blk_probe,
1094         .remove         = mmc_blk_remove,
1095         .suspend        = mmc_blk_suspend,
1096         .resume         = mmc_blk_resume,
1097 };
1098
1099 static int __init mmc_blk_init(void)
1100 {
1101         int res;
1102
1103         if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
1104                 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
1105
1106         max_devices = 256 / perdev_minors;
1107
1108         res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
1109         if (res)
1110                 goto out;
1111
1112         res = mmc_register_driver(&mmc_driver);
1113         if (res)
1114                 goto out2;
1115
1116         return 0;
1117  out2:
1118         unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1119  out:
1120         return res;
1121 }
1122
1123 static void __exit mmc_blk_exit(void)
1124 {
1125         mmc_unregister_driver(&mmc_driver);
1126         unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1127 }
1128
1129 module_init(mmc_blk_init);
1130 module_exit(mmc_blk_exit);
1131
1132 MODULE_LICENSE("GPL");
1133 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
1134