#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
+#include <linux/delay.h>
+#include <linux/capability.h>
+#include <linux/compat.h>
+#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#define INAND_CMD38_ARG_SECTRIM1 0x81
#define INAND_CMD38_ARG_SECTRIM2 0x88
-#define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) && \
- (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || \
- ((card)->ext_csd.rel_sectors)))
-
static DEFINE_MUTEX(block_mutex);
/*
/* 256 minors, so at most 256 separate devices */
static DECLARE_BITMAP(dev_use, 256);
+static DECLARE_BITMAP(name_use, 256);
/*
* There is one mmc_blk_data per slot.
struct mmc_queue queue;
struct list_head part;
+ unsigned int flags;
+#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
+#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
+
unsigned int usage;
unsigned int read_only;
unsigned int part_type;
+ unsigned int name_idx;
/*
* Only set in main mmc_blk_data associated
static inline int mmc_get_devidx(struct gendisk *disk)
{
- int devmaj = MAJOR(disk_devt(disk));
- int devidx = MINOR(disk_devt(disk)) / perdev_minors;
-
- if (!devmaj)
- devidx = disk->first_minor / perdev_minors;
+ int devidx = disk->first_minor / perdev_minors;
return devidx;
}
return 0;
}
+struct mmc_blk_ioc_data {
+ struct mmc_ioc_cmd ic;
+ unsigned char *buf;
+ u64 buf_bytes;
+};
+
+static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
+ struct mmc_ioc_cmd __user *user)
+{
+ struct mmc_blk_ioc_data *idata;
+ int err;
+
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+ if (!idata) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
+ err = -EFAULT;
+ goto idata_err;
+ }
+
+ idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
+ if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
+ err = -EOVERFLOW;
+ goto idata_err;
+ }
+
+ idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
+ if (!idata->buf) {
+ err = -ENOMEM;
+ goto idata_err;
+ }
+
+ if (copy_from_user(idata->buf, (void __user *)(unsigned long)
+ idata->ic.data_ptr, idata->buf_bytes)) {
+ err = -EFAULT;
+ goto copy_err;
+ }
+
+ return idata;
+
+copy_err:
+ kfree(idata->buf);
+idata_err:
+ kfree(idata);
+out:
+ return ERR_PTR(err);
+}
+
+static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+ struct mmc_ioc_cmd __user *ic_ptr)
+{
+ struct mmc_blk_ioc_data *idata;
+ struct mmc_blk_data *md;
+ struct mmc_card *card;
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct mmc_request mrq = {0};
+ struct scatterlist sg;
+ int err;
+
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
+ idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+ if (IS_ERR(idata))
+ return PTR_ERR(idata);
+
+ cmd.opcode = idata->ic.opcode;
+ cmd.arg = idata->ic.arg;
+ cmd.flags = idata->ic.flags;
+
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.blksz = idata->ic.blksz;
+ data.blocks = idata->ic.blocks;
+
+ sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+
+ if (idata->ic.write_flag)
+ data.flags = MMC_DATA_WRITE;
+ else
+ data.flags = MMC_DATA_READ;
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ md = mmc_blk_get(bdev->bd_disk);
+ if (!md) {
+ err = -EINVAL;
+ goto cmd_done;
+ }
+
+ card = md->queue.card;
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto cmd_done;
+ }
+
+ mmc_claim_host(card->host);
+
+ if (idata->ic.is_acmd) {
+ err = mmc_app_cmd(card->host, card);
+ if (err)
+ goto cmd_rel_host;
+ }
+
+ /* data.flags must already be set before doing this. */
+ mmc_set_data_timeout(&data, card);
+ /* Allow overriding the timeout_ns for empirical tuning. */
+ if (idata->ic.data_timeout_ns)
+ data.timeout_ns = idata->ic.data_timeout_ns;
+
+ if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+ /*
+ * Pretend this is a data transfer and rely on the host driver
+ * to compute timeout. When all host drivers support
+ * cmd.cmd_timeout for R1B, this can be changed to:
+ *
+ * mrq.data = NULL;
+ * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+ */
+ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+ }
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error) {
+ dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+ __func__, cmd.error);
+ err = cmd.error;
+ goto cmd_rel_host;
+ }
+ if (data.error) {
+ dev_err(mmc_dev(card->host), "%s: data error %d\n",
+ __func__, data.error);
+ err = data.error;
+ goto cmd_rel_host;
+ }
+
+ /*
+ * According to the SD specs, some commands require a delay after
+ * issuing the command.
+ */
+ if (idata->ic.postsleep_min_us)
+ usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
+
+ if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
+ err = -EFAULT;
+ goto cmd_rel_host;
+ }
+
+ if (!idata->ic.write_flag) {
+ if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
+ idata->buf, idata->buf_bytes)) {
+ err = -EFAULT;
+ goto cmd_rel_host;
+ }
+ }
+
+cmd_rel_host:
+ mmc_release_host(card->host);
+
+cmd_done:
+ mmc_blk_put(md);
+ kfree(idata->buf);
+ kfree(idata);
+ return err;
+}
+
+static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = -EINVAL;
+ if (cmd == MMC_IOC_CMD)
+ ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
static const struct block_device_operations mmc_bdops = {
.open = mmc_blk_open,
.release = mmc_blk_release,
.getgeo = mmc_blk_getgeo,
.owner = THIS_MODULE,
+ .ioctl = mmc_blk_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mmc_blk_compat_ioctl,
+#endif
};
struct mmc_blk_request {
struct mmc_request mrq;
+ struct mmc_command sbc;
struct mmc_command cmd;
struct mmc_command stop;
struct mmc_data data;
u32 result;
__be32 *blocks;
- struct mmc_request mrq;
- struct mmc_command cmd;
- struct mmc_data data;
+ struct mmc_request mrq = {0};
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
unsigned int timeout_us;
struct scatterlist sg;
- memset(&cmd, 0, sizeof(struct mmc_command));
-
cmd.opcode = MMC_APP_CMD;
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- memset(&data, 0, sizeof(struct mmc_data));
-
data.timeout_ns = card->csd.tacc_ns * 100;
data.timeout_clks = card->csd.tacc_clks * 100;
data.sg = &sg;
data.sg_len = 1;
- memset(&mrq, 0, sizeof(struct mmc_request));
-
mrq.cmd = &cmd;
mrq.data = &data;
static u32 get_card_status(struct mmc_card *card, struct request *req)
{
- struct mmc_command cmd;
+ struct mmc_command cmd = {0};
int err;
- memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
* reliable write can handle, thus finish the request in
* partial completions.
*/
-static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
- struct mmc_card *card,
- struct request *req)
+static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
+ struct mmc_card *card,
+ struct request *req)
{
- int err;
- struct mmc_command set_count;
-
if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
/* Legacy mode imposes restrictions on transfers. */
if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
else if (brq->data.blocks < card->ext_csd.rel_sectors)
brq->data.blocks = 1;
}
-
- memset(&set_count, 0, sizeof(struct mmc_command));
- set_count.opcode = MMC_SET_BLOCK_COUNT;
- set_count.arg = brq->data.blocks | (1 << 31);
- set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &set_count, 0);
- if (err)
- printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
- req->rq_disk->disk_name, err);
- return err;
}
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
(req->cmd_flags & REQ_META)) &&
(rq_data_dir(req) == WRITE) &&
- REL_WRITES_SUPPORTED(card);
+ (md->flags & MMC_BLK_REL_WR);
do {
- struct mmc_command cmd;
+ struct mmc_command cmd = {0};
u32 readcmd, writecmd, status = 0;
memset(&brq, 0, sizeof(struct mmc_blk_request));
if (brq.data.blocks > 1 || do_rel_wr) {
/* SPI multiblock writes terminate using a special
- * token, not a STOP_TRANSMISSION request. Reliable
- * writes use SET_BLOCK_COUNT and do not use a
- * STOP_TRANSMISSION request either.
+ * token, not a STOP_TRANSMISSION request.
*/
- if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
+ if (!mmc_host_is_spi(card->host) ||
rq_data_dir(req) == READ)
brq.mrq.stop = &brq.stop;
readcmd = MMC_READ_MULTIPLE_BLOCK;
brq.data.flags |= MMC_DATA_WRITE;
}
- if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
- goto cmd_err;
+ if (do_rel_wr)
+ mmc_apply_rel_rw(&brq, card, req);
+
+ /*
+ * Pre-defined multi-block transfers are preferable to
+ * open ended-ones (and necessary for reliable writes).
+ * However, it is not sufficient to just send CMD23,
+ * and avoid the final CMD12, as on an error condition
+ * CMD12 (stop) needs to be sent anyway. This, coupled
+ * with Auto-CMD23 enhancements provided by some
+ * hosts, means that the complexity of dealing
+ * with this is best left to the host. If CMD23 is
+ * supported by card and host, we'll fill sbc in and let
+ * the host deal with handling it correctly. This means
+ * that for hosts that don't expose MMC_CAP_CMD23, no
+ * change of behavior will be observed.
+ *
+ * N.B: Some MMC cards experience perf degradation.
+ * We'll avoid using CMD23-bounded multiblock writes for
+ * these, while retaining features like reliable writes.
+ */
+
+ if ((md->flags & MMC_BLK_CMD23) &&
+ mmc_op_multi(brq.cmd.opcode) &&
+ (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
+ brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq.sbc.arg = brq.data.blocks |
+ (do_rel_wr ? (1 << 31) : 0);
+ brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ brq.mrq.sbc = &brq.sbc;
+ }
mmc_set_data_timeout(&brq.data, card);
* until later as we need to wait for the card to leave
* programming mode even when things go wrong.
*/
- if (brq.cmd.error || brq.data.error || brq.stop.error) {
+ if (brq.sbc.error || brq.cmd.error ||
+ brq.data.error || brq.stop.error) {
if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
/* Redo read one sector at a time */
printk(KERN_WARNING "%s: retrying using single "
continue;
}
status = get_card_status(card, req);
+ } else if (disable_multi == 1) {
+ disable_multi = 0;
+ }
+
+ if (brq.sbc.error) {
+ printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
+ "command, response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq.sbc.error,
+ brq.sbc.resp[0], status);
}
if (brq.cmd.error) {
return 0;
}
+static int
+mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card);
+
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
int ret;
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(card->host)) {
+ mmc_resume_bus(card->host);
+ mmc_blk_set_blksize(md, card);
+ }
+#endif
+
mmc_claim_host(card->host);
ret = mmc_blk_part_switch(card, md);
if (ret) {
goto out;
}
+ /*
+ * !subname implies we are creating main mmc_blk_data that will be
+ * associated with mmc_card with mmc_set_drvdata. Due to device
+ * partitions, devidx will not coincide with a per-physical card
+ * index anymore so we keep track of a name index.
+ */
+ if (!subname) {
+ md->name_idx = find_first_zero_bit(name_use, max_devices);
+ __set_bit(md->name_idx, name_use);
+ }
+ else
+ md->name_idx = ((struct mmc_blk_data *)
+ dev_to_disk(parent)->private_data)->name_idx;
+
/*
* Set the read-only status based on the supported commands
* and the write protect switch.
INIT_LIST_HEAD(&md->part);
md->usage = 1;
- ret = mmc_init_queue(&md->queue, card, &md->lock);
+ ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
if (ret)
goto err_putdisk;
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
- if (REL_WRITES_SUPPORTED(card))
- blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+ md->disk->flags = GENHD_FL_EXT_DEVT;
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
* messages to tell when the card is present.
*/
- if (subname)
- snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
- "mmcblk%d%s",
- mmc_get_devidx(dev_to_disk(parent)), subname);
- else
- snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
- "mmcblk%d", devidx);
+ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
+ "mmcblk%d%s", md->name_idx, subname ? subname : "");
blk_queue_logical_block_size(md->queue.queue, 512);
set_capacity(md->disk, size);
+
+ if (mmc_host_cmd23(card->host)) {
+ if (mmc_card_mmc(card) ||
+ (mmc_card_sd(card) &&
+ card->scr.cmds & SD_SCR_CMD23_SUPPORT))
+ md->flags |= MMC_BLK_CMD23;
+ }
+
+ if (mmc_card_mmc(card) &&
+ md->flags & MMC_BLK_CMD23 &&
+ ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+ card->ext_csd.rel_sectors)) {
+ md->flags |= MMC_BLK_REL_WR;
+ blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+ }
+
return md;
err_putdisk:
struct list_head *pos, *q;
struct mmc_blk_data *part_md;
+ __clear_bit(md->name_idx, name_use);
list_for_each_safe(pos, q, &md->part) {
part_md = list_entry(pos, struct mmc_blk_data, part);
list_del(pos);
add_disk(md->disk);
md->force_ro.show = force_ro_show;
md->force_ro.store = force_ro_store;
+ sysfs_attr_init(&md->force_ro.attr);
md->force_ro.attr.name = "force_ro";
md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+
+ /*
+ * Some MMC cards experience performance degradation with CMD23
+ * instead of CMD12-bounded multiblock transfers. For now we'll
+ * black list what's bad...
+ * - Certain Toshiba cards.
+ *
+ * N.B. This doesn't affect SD cards.
+ */
+ MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
END_FIXUP
};
mmc_set_drvdata(card, md);
mmc_fixup_device(card, blk_fixups);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 1);
+#endif
if (mmc_add_disk(md))
goto out;
struct mmc_blk_data *md = mmc_get_drvdata(card);
mmc_blk_remove_parts(card, md);
+ mmc_claim_host(card->host);
+ mmc_blk_part_switch(card, md);
+ mmc_release_host(card->host);
mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
}
#ifdef CONFIG_PM
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
+#ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME
mmc_blk_set_blksize(md, card);
+#endif
/*
* Resume involves the card going into idle state,