X-Git-Url: http://plrg.eecs.uci.edu/git/?p=firefly-linux-kernel-4.4.55.git;a=blobdiff_plain;f=drivers%2Fmmc%2Fcard%2Fblock.c;h=139c4e37735a93eb6f1f43b41d23b796d7e7e983;hp=21aa3244029b277b2287307d16929e78b5383bf9;hb=67ed0175f08f82855f9a7b38f0ab0839d176a682;hpb=c455cc2a8f795d3bbafe8f076482fa3f23b9d77d diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 21aa3244029b..139c4e37735a 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -36,7 +36,6 @@ #include #include -#define CREATE_TRACE_POINTS #include #include @@ -89,7 +88,6 @@ static int max_devices; /* TODO: Replace these with struct ida */ static DECLARE_BITMAP(dev_use, MAX_DEVICES); -static DECLARE_BITMAP(name_use, MAX_DEVICES); /* * There is one mmc_blk_data per slot. @@ -108,7 +106,6 @@ struct mmc_blk_data { unsigned int usage; unsigned int read_only; unsigned int part_type; - unsigned int name_idx; unsigned int reset_done; #define MMC_BLK_READ BIT(0) #define MMC_BLK_WRITE BIT(1) @@ -289,6 +286,250 @@ out: return ret; } +#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED + +static int max_read_speed, max_write_speed, cache_size = 4; + +module_param(max_read_speed, int, S_IRUSR | S_IRGRP); +MODULE_PARM_DESC(max_read_speed, "maximum KB/s read speed 0=off"); +module_param(max_write_speed, int, S_IRUSR | S_IRGRP); +MODULE_PARM_DESC(max_write_speed, "maximum KB/s write speed 0=off"); +module_param(cache_size, int, S_IRUSR | S_IRGRP); +MODULE_PARM_DESC(cache_size, "MB high speed memory or SLC cache"); + +/* + * helper macros and expectations: + * size - unsigned long number of bytes + * jiffies - unsigned long HZ timestamp difference + * speed - unsigned KB/s transfer rate + */ +#define size_and_speed_to_jiffies(size, speed) \ + ((size) * HZ / (speed) / 1024UL) +#define jiffies_and_speed_to_size(jiffies, speed) \ + (((speed) * (jiffies) * 1024UL) / HZ) +#define jiffies_and_size_to_speed(jiffies, size) \ + ((size) * HZ / (jiffies) / 1024UL) + +/* Limits to report warning */ +/* jiffies_and_size_to_speed(10*HZ, queue_max_hw_sectors(q) * 512UL) ~ 25 */ +#define MIN_SPEED(q) 250 /* 10 times faster than a floppy disk */ +#define MAX_SPEED(q) jiffies_and_size_to_speed(1, queue_max_sectors(q) * 512UL) + +#define speed_valid(speed) ((speed) > 0) + +static const char off[] = "off\n"; + +static int max_speed_show(int speed, char *buf) +{ + if (speed) + return scnprintf(buf, PAGE_SIZE, "%uKB/s\n", speed); + else + return scnprintf(buf, PAGE_SIZE, off); +} + +static int max_speed_store(const char *buf, struct request_queue *q) +{ + unsigned int limit, set = 0; + + if (!strncasecmp(off, buf, sizeof(off) - 2)) + return set; + if (kstrtouint(buf, 0, &set) || (set > INT_MAX)) + return -EINVAL; + if (set == 0) + return set; + limit = MAX_SPEED(q); + if (set > limit) + pr_warn("max speed %u ineffective above %u\n", set, limit); + limit = MIN_SPEED(q); + if (set < limit) + pr_warn("max speed %u painful below %u\n", set, limit); + return set; +} + +static ssize_t max_write_speed_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + int ret = max_speed_show(atomic_read(&md->queue.max_write_speed), buf); + + mmc_blk_put(md); + return ret; +} + +static ssize_t max_write_speed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + int set = max_speed_store(buf, md->queue.queue); + + if (set < 0) { + mmc_blk_put(md); + return set; + } + + atomic_set(&md->queue.max_write_speed, set); + mmc_blk_put(md); + return count; +} + +static const DEVICE_ATTR(max_write_speed, S_IRUGO | S_IWUSR, + max_write_speed_show, max_write_speed_store); + +static ssize_t max_read_speed_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + int ret = max_speed_show(atomic_read(&md->queue.max_read_speed), buf); + + mmc_blk_put(md); + return ret; +} + +static ssize_t max_read_speed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + int set = max_speed_store(buf, md->queue.queue); + + if (set < 0) { + mmc_blk_put(md); + return set; + } + + atomic_set(&md->queue.max_read_speed, set); + mmc_blk_put(md); + return count; +} + +static const DEVICE_ATTR(max_read_speed, S_IRUGO | S_IWUSR, + max_read_speed_show, max_read_speed_store); + +static ssize_t cache_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + struct mmc_queue *mq = &md->queue; + int cache_size = atomic_read(&mq->cache_size); + int ret; + + if (!cache_size) + ret = scnprintf(buf, PAGE_SIZE, off); + else { + int speed = atomic_read(&mq->max_write_speed); + + if (!speed_valid(speed)) + ret = scnprintf(buf, PAGE_SIZE, "%uMB\n", cache_size); + else { /* We accept race between cache_jiffies and cache_used */ + unsigned long size = jiffies_and_speed_to_size( + jiffies - mq->cache_jiffies, speed); + long used = atomic_long_read(&mq->cache_used); + + if (size >= used) + size = 0; + else + size = (used - size) * 100 / cache_size + / 1024UL / 1024UL; + + ret = scnprintf(buf, PAGE_SIZE, "%uMB %lu%% used\n", + cache_size, size); + } + } + + mmc_blk_put(md); + return ret; +} + +static ssize_t cache_size_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mmc_blk_data *md; + unsigned int set = 0; + + if (strncasecmp(off, buf, sizeof(off) - 2) + && (kstrtouint(buf, 0, &set) || (set > INT_MAX))) + return -EINVAL; + + md = mmc_blk_get(dev_to_disk(dev)); + atomic_set(&md->queue.cache_size, set); + mmc_blk_put(md); + return count; +} + +static const DEVICE_ATTR(cache_size, S_IRUGO | S_IWUSR, + cache_size_show, cache_size_store); + +/* correct for write-back */ +static long mmc_blk_cache_used(struct mmc_queue *mq, unsigned long waitfor) +{ + long used = 0; + int speed = atomic_read(&mq->max_write_speed); + + if (speed_valid(speed)) { + unsigned long size = jiffies_and_speed_to_size( + waitfor - mq->cache_jiffies, speed); + used = atomic_long_read(&mq->cache_used); + + if (size >= used) + used = 0; + else + used -= size; + } + + atomic_long_set(&mq->cache_used, used); + mq->cache_jiffies = waitfor; + + return used; +} + +static void mmc_blk_simulate_delay( + struct mmc_queue *mq, + struct request *req, + unsigned long waitfor) +{ + int max_speed; + + if (!req) + return; + + max_speed = (rq_data_dir(req) == READ) + ? atomic_read(&mq->max_read_speed) + : atomic_read(&mq->max_write_speed); + if (speed_valid(max_speed)) { + unsigned long bytes = blk_rq_bytes(req); + + if (rq_data_dir(req) != READ) { + int cache_size = atomic_read(&mq->cache_size); + + if (cache_size) { + unsigned long size = cache_size * 1024L * 1024L; + long used = mmc_blk_cache_used(mq, waitfor); + + used += bytes; + atomic_long_set(&mq->cache_used, used); + bytes = 0; + if (used > size) + bytes = used - size; + } + } + waitfor += size_and_speed_to_jiffies(bytes, max_speed); + if (time_is_after_jiffies(waitfor)) { + long msecs = jiffies_to_msecs(waitfor - jiffies); + + if (likely(msecs > 0)) + msleep(msecs); + } + } +} + +#else + +#define mmc_blk_simulate_delay(mq, req, waitfor) + +#endif + static int mmc_blk_open(struct block_device *bdev, fmode_t mode) { struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); @@ -594,6 +835,14 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, struct mmc_card *card; int err = 0, ioc_err = 0; + /* + * The caller must have CAP_SYS_RAWIO, and must be calling this on the + * whole block device, not on a partition. This prevents overspray + * between sibling partitions. + */ + if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) + return -EPERM; + idata = mmc_blk_ioctl_copy_from_user(ic_ptr); if (IS_ERR(idata)) return PTR_ERR(idata); @@ -636,6 +885,14 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, int i, err = 0, ioc_err = 0; __u64 num_of_cmds; + /* + * The caller must have CAP_SYS_RAWIO, and must be calling this on the + * whole block device, not on a partition. This prevents overspray + * between sibling partitions. + */ + if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) + return -EPERM; + if (copy_from_user(&num_of_cmds, &user->num_of_cmds, sizeof(num_of_cmds))) return -EFAULT; @@ -691,14 +948,6 @@ cmd_err: static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - /* - * The caller must have CAP_SYS_RAWIO, and must be calling this on the - * whole block device, not on a partition. This prevents overspray - * between sibling partitions. - */ - if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) - return -EPERM; - switch (cmd) { case MMC_IOC_CMD: return mmc_blk_ioctl_cmd(bdev, @@ -1257,6 +1506,23 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) if (ret) ret = -EIO; +#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED + else if (atomic_read(&mq->cache_size)) { + long used = mmc_blk_cache_used(mq, jiffies); + + if (used) { + int speed = atomic_read(&mq->max_write_speed); + + if (speed_valid(speed)) { + unsigned long msecs = jiffies_to_msecs( + size_and_speed_to_jiffies( + used, speed)); + if (msecs) + msleep(msecs); + } + } + } +#endif blk_end_request_all(req, ret); return ret ? 0 : 1; @@ -1749,7 +2015,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, struct mmc_blk_data *md = mq->data; struct mmc_packed *packed = mqrq->packed; bool do_rel_wr, do_data_tag; - u32 *packed_cmd_hdr; + __le32 *packed_cmd_hdr; u8 hdr_blocks; u8 i = 1; @@ -1761,8 +2027,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, packed_cmd_hdr = packed->cmd_hdr; memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr)); - packed_cmd_hdr[0] = (packed->nr_entries << 16) | - (PACKED_CMD_WR << 8) | PACKED_CMD_VER; + packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) | + (PACKED_CMD_WR << 8) | PACKED_CMD_VER); hdr_blocks = mmc_large_sector(card) ? 8 : 1; /* @@ -1773,17 +2039,16 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, do_data_tag = (card->ext_csd.data_tag_unit_size) && (prq->cmd_flags & REQ_META) && (rq_data_dir(prq) == WRITE) && - ((brq->data.blocks * brq->data.blksz) >= - card->ext_csd.data_tag_unit_size); + blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size; /* Argument of CMD23 */ - packed_cmd_hdr[(i * 2)] = + packed_cmd_hdr[(i * 2)] = cpu_to_le32( (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | - blk_rq_sectors(prq); + blk_rq_sectors(prq)); /* Argument of CMD18 or CMD25 */ - packed_cmd_hdr[((i * 2)) + 1] = + packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32( mmc_card_blockaddr(card) ? - blk_rq_pos(prq) : blk_rq_pos(prq) << 9; + blk_rq_pos(prq) : blk_rq_pos(prq) << 9); packed->blocks += blk_rq_sectors(prq); i++; } @@ -1936,6 +2201,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) struct mmc_async_req *areq; const u8 packed_nr = 2; u8 reqs = 0; +#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED + unsigned long waitfor = jiffies; +#endif if (!rqc && !mq->mqrq_prev->req) return 0; @@ -1986,6 +2254,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) */ mmc_blk_reset_success(md, type); + mmc_blk_simulate_delay(mq, rqc, waitfor); + if (mmc_packed_cmd(mq_rq->cmd_type)) { ret = mmc_blk_end_packed_req(mq_rq); break; @@ -2201,19 +2471,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, goto out; } - /* - * !subname implies we are creating main mmc_blk_data that will be - * associated with mmc_card with dev_set_drvdata. Due to device - * partitions, devidx will not coincide with a per-physical card - * index anymore so we keep track of a name index. - */ - if (!subname) { - md->name_idx = find_first_zero_bit(name_use, max_devices); - __set_bit(md->name_idx, name_use); - } else - md->name_idx = ((struct mmc_blk_data *) - dev_to_disk(parent)->private_data)->name_idx; - md->area_type = area_type; /* @@ -2263,7 +2520,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, */ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), - "mmcblk%u%s", md->name_idx, subname ? subname : ""); + "mmcblk%u%s", card->host->index, subname ? subname : ""); if (mmc_card_mmc(card)) blk_queue_logical_block_size(md->queue.queue, @@ -2274,7 +2531,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, set_capacity(md->disk, size); if (mmc_host_cmd23(card->host)) { - if (mmc_card_mmc(card) || + if ((mmc_card_mmc(card) && + card->csd.mmca_vsn >= CSD_SPEC_VER_3) || (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT)) md->flags |= MMC_BLK_CMD23; @@ -2404,6 +2662,14 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) card->ext_csd.boot_ro_lockable) device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); +#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED + device_remove_file(disk_to_dev(md->disk), + &dev_attr_max_write_speed); + device_remove_file(disk_to_dev(md->disk), + &dev_attr_max_read_speed); + device_remove_file(disk_to_dev(md->disk), + &dev_attr_cache_size); +#endif del_gendisk(md->disk); } @@ -2417,7 +2683,6 @@ static void mmc_blk_remove_parts(struct mmc_card *card, struct list_head *pos, *q; struct mmc_blk_data *part_md; - __clear_bit(md->name_idx, name_use); list_for_each_safe(pos, q, &md->part) { part_md = list_entry(pos, struct mmc_blk_data, part); list_del(pos); @@ -2439,6 +2704,24 @@ static int mmc_add_disk(struct mmc_blk_data *md) ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); if (ret) goto force_ro_fail; +#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED + atomic_set(&md->queue.max_write_speed, max_write_speed); + ret = device_create_file(disk_to_dev(md->disk), + &dev_attr_max_write_speed); + if (ret) + goto max_write_speed_fail; + atomic_set(&md->queue.max_read_speed, max_read_speed); + ret = device_create_file(disk_to_dev(md->disk), + &dev_attr_max_read_speed); + if (ret) + goto max_read_speed_fail; + atomic_set(&md->queue.cache_size, cache_size); + atomic_long_set(&md->queue.cache_used, 0); + md->queue.cache_jiffies = jiffies; + ret = device_create_file(disk_to_dev(md->disk), &dev_attr_cache_size); + if (ret) + goto cache_size_fail; +#endif if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && card->ext_csd.boot_ro_lockable) { @@ -2463,6 +2746,14 @@ static int mmc_add_disk(struct mmc_blk_data *md) return ret; power_ro_lock_fail: +#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED + device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size); +cache_size_fail: + device_remove_file(disk_to_dev(md->disk), &dev_attr_max_read_speed); +max_read_speed_fail: + device_remove_file(disk_to_dev(md->disk), &dev_attr_max_write_speed); +max_write_speed_fail: +#endif device_remove_file(disk_to_dev(md->disk), &md->force_ro); force_ro_fail: del_gendisk(md->disk); @@ -2509,11 +2800,12 @@ static const struct mmc_fixup blk_fixups[] = MMC_QUIRK_BLK_NO_CMD23), /* - * Some Micron MMC cards needs longer data read timeout than - * indicated in CSD. + * Some MMC cards need longer data read timeout than indicated in CSD. */ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), + MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_LONG_READ_TIME), /* * On these Samsung MoviNAND parts, performing secure erase or @@ -2549,6 +2841,7 @@ static const struct mmc_fixup blk_fixups[] = END_FIXUP }; +extern struct mmc_card *this_card; static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md, *part_md; @@ -2577,6 +2870,15 @@ static int mmc_blk_probe(struct mmc_card *card) dev_set_drvdata(&card->dev, md); +#if defined(CONFIG_MMC_DW_ROCKCHIP) || defined(CONFIG_MMC_SDHCI_OF_ARASAN) + if (card->host->restrict_caps & RESTRICT_CARD_TYPE_EMMC) { + this_card = card; + md->disk->is_rk_disk = true; + } else { + md->disk->is_rk_disk = false; + } +#endif + if (mmc_add_disk(md)) goto out; @@ -2609,6 +2911,11 @@ static void mmc_blk_remove(struct mmc_card *card) { struct mmc_blk_data *md = dev_get_drvdata(&card->dev); +#if defined(CONFIG_MMC_DW_ROCKCHIP) || defined(CONFIG_MMC_SDHCI_OF_ARASAN) + if (card->host->restrict_caps & RESTRICT_CARD_TYPE_EMMC) + this_card = NULL; +#endif + mmc_blk_remove_parts(card, md); pm_runtime_get_sync(&card->dev); mmc_claim_host(card->host);