2 * Gadget Function Driver for MTP
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Mike Lockwood <lockwood@android.com>
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
19 /* #define VERBOSE_DEBUG */
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/poll.h>
24 #include <linux/delay.h>
25 #include <linux/wait.h>
26 #include <linux/err.h>
27 #include <linux/interrupt.h>
29 #include <linux/types.h>
30 #include <linux/file.h>
31 #include <linux/device.h>
32 #include <linux/miscdevice.h>
34 #include <linux/usb.h>
35 #include <linux/usb_usual.h>
36 #include <linux/usb/ch9.h>
37 #include <linux/usb/f_mtp.h>
38 #include <linux/configfs.h>
39 #include <linux/usb/composite.h>
43 #define MTP_BULK_BUFFER_SIZE 16384
44 #define INTR_BUFFER_SIZE 28
45 #define MAX_INST_NAME_LEN 40
48 #define INTERFACE_STRING_INDEX 0
50 /* values for mtp_dev.state */
51 #define STATE_OFFLINE 0 /* initial state, disconnected */
52 #define STATE_READY 1 /* ready for userspace calls */
53 #define STATE_BUSY 2 /* processing userspace calls */
54 #define STATE_CANCELED 3 /* transaction canceled by host */
55 #define STATE_ERROR 4 /* error from completion routine */
57 /* number of tx and rx requests to allocate */
60 #define INTR_REQ_MAX 5
62 /* ID for Microsoft MTP OS String */
63 #define MTP_OS_STRING_ID 0xEE
65 /* MTP class reqeusts */
66 #define MTP_REQ_CANCEL 0x64
67 #define MTP_REQ_GET_EXT_EVENT_DATA 0x65
68 #define MTP_REQ_RESET 0x66
69 #define MTP_REQ_GET_DEVICE_STATUS 0x67
71 /* constants for device status */
72 #define MTP_RESPONSE_OK 0x2001
73 #define MTP_RESPONSE_DEVICE_BUSY 0x2019
74 #define DRIVER_NAME "mtp"
76 static const char mtp_shortname[] = DRIVER_NAME "_usb";
79 struct usb_function function;
80 struct usb_composite_dev *cdev;
84 struct usb_ep *ep_out;
85 struct usb_ep *ep_intr;
89 /* synchronize access to our device file */
91 /* to enforce only one ioctl at a time */
94 struct list_head tx_idle;
95 struct list_head intr_idle;
97 wait_queue_head_t read_wq;
98 wait_queue_head_t write_wq;
99 wait_queue_head_t intr_wq;
100 struct usb_request *rx_req[RX_REQ_MAX];
103 /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
104 * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
106 struct workqueue_struct *wq;
107 struct work_struct send_file_work;
108 struct work_struct receive_file_work;
109 struct file *xfer_file;
110 loff_t xfer_file_offset;
111 int64_t xfer_file_length;
112 unsigned xfer_send_header;
113 uint16_t xfer_command;
114 uint32_t xfer_transaction_id;
118 static struct usb_interface_descriptor mtp_interface_desc = {
119 .bLength = USB_DT_INTERFACE_SIZE,
120 .bDescriptorType = USB_DT_INTERFACE,
121 .bInterfaceNumber = 0,
123 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
124 .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
125 .bInterfaceProtocol = 0,
128 static struct usb_interface_descriptor ptp_interface_desc = {
129 .bLength = USB_DT_INTERFACE_SIZE,
130 .bDescriptorType = USB_DT_INTERFACE,
131 .bInterfaceNumber = 0,
133 .bInterfaceClass = USB_CLASS_STILL_IMAGE,
134 .bInterfaceSubClass = 1,
135 .bInterfaceProtocol = 1,
138 static struct usb_endpoint_descriptor mtp_ss_in_desc = {
139 .bLength = USB_DT_ENDPOINT_SIZE,
140 .bDescriptorType = USB_DT_ENDPOINT,
141 .bEndpointAddress = USB_DIR_IN,
142 .bmAttributes = USB_ENDPOINT_XFER_BULK,
143 .wMaxPacketSize = __constant_cpu_to_le16(1024),
146 static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
147 .bLength = sizeof(mtp_ss_in_comp_desc),
148 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
149 /* .bMaxBurst = DYNAMIC, */
152 static struct usb_endpoint_descriptor mtp_ss_out_desc = {
153 .bLength = USB_DT_ENDPOINT_SIZE,
154 .bDescriptorType = USB_DT_ENDPOINT,
155 .bEndpointAddress = USB_DIR_OUT,
156 .bmAttributes = USB_ENDPOINT_XFER_BULK,
157 .wMaxPacketSize = __constant_cpu_to_le16(1024),
160 static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
161 .bLength = sizeof(mtp_ss_out_comp_desc),
162 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
163 /* .bMaxBurst = DYNAMIC, */
166 static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
167 .bLength = USB_DT_ENDPOINT_SIZE,
168 .bDescriptorType = USB_DT_ENDPOINT,
169 .bEndpointAddress = USB_DIR_IN,
170 .bmAttributes = USB_ENDPOINT_XFER_BULK,
171 .wMaxPacketSize = __constant_cpu_to_le16(512),
174 static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
175 .bLength = USB_DT_ENDPOINT_SIZE,
176 .bDescriptorType = USB_DT_ENDPOINT,
177 .bEndpointAddress = USB_DIR_OUT,
178 .bmAttributes = USB_ENDPOINT_XFER_BULK,
179 .wMaxPacketSize = __constant_cpu_to_le16(512),
182 static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
183 .bLength = USB_DT_ENDPOINT_SIZE,
184 .bDescriptorType = USB_DT_ENDPOINT,
185 .bEndpointAddress = USB_DIR_IN,
186 .bmAttributes = USB_ENDPOINT_XFER_BULK,
189 static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
190 .bLength = USB_DT_ENDPOINT_SIZE,
191 .bDescriptorType = USB_DT_ENDPOINT,
192 .bEndpointAddress = USB_DIR_OUT,
193 .bmAttributes = USB_ENDPOINT_XFER_BULK,
196 static struct usb_endpoint_descriptor mtp_intr_desc = {
197 .bLength = USB_DT_ENDPOINT_SIZE,
198 .bDescriptorType = USB_DT_ENDPOINT,
199 .bEndpointAddress = USB_DIR_IN,
200 .bmAttributes = USB_ENDPOINT_XFER_INT,
201 .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
205 static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
206 .bLength = sizeof(mtp_intr_ss_comp_desc),
207 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
208 .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
211 static struct usb_descriptor_header *fs_mtp_descs[] = {
212 (struct usb_descriptor_header *) &mtp_interface_desc,
213 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
214 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
215 (struct usb_descriptor_header *) &mtp_intr_desc,
219 static struct usb_descriptor_header *hs_mtp_descs[] = {
220 (struct usb_descriptor_header *) &mtp_interface_desc,
221 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
222 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
223 (struct usb_descriptor_header *) &mtp_intr_desc,
227 static struct usb_descriptor_header *ss_mtp_descs[] = {
228 (struct usb_descriptor_header *) &mtp_interface_desc,
229 (struct usb_descriptor_header *) &mtp_ss_in_desc,
230 (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
231 (struct usb_descriptor_header *) &mtp_ss_out_desc,
232 (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
233 (struct usb_descriptor_header *) &mtp_intr_desc,
234 (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
238 static struct usb_descriptor_header *fs_ptp_descs[] = {
239 (struct usb_descriptor_header *) &ptp_interface_desc,
240 (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
241 (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
242 (struct usb_descriptor_header *) &mtp_intr_desc,
246 static struct usb_descriptor_header *hs_ptp_descs[] = {
247 (struct usb_descriptor_header *) &ptp_interface_desc,
248 (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
249 (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
250 (struct usb_descriptor_header *) &mtp_intr_desc,
254 static struct usb_descriptor_header *ss_ptp_descs[] = {
255 (struct usb_descriptor_header *) &ptp_interface_desc,
256 (struct usb_descriptor_header *) &mtp_ss_in_desc,
257 (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
258 (struct usb_descriptor_header *) &mtp_ss_out_desc,
259 (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
260 (struct usb_descriptor_header *) &mtp_intr_desc,
261 (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
265 static struct usb_string mtp_string_defs[] = {
266 /* Naming interface "MTP" so libmtp will recognize us */
267 [INTERFACE_STRING_INDEX].s = "MTP",
268 { }, /* end of list */
271 static struct usb_gadget_strings mtp_string_table = {
272 .language = 0x0409, /* en-US */
273 .strings = mtp_string_defs,
276 static struct usb_gadget_strings *mtp_strings[] = {
281 /* Microsoft MTP OS String */
282 static u8 mtp_os_string[] = {
283 18, /* sizeof(mtp_os_string) */
285 /* Signature field: "MSFT100" */
286 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
293 /* Microsoft Extended Configuration Descriptor Header Section */
294 struct mtp_ext_config_desc_header {
302 /* Microsoft Extended Configuration Descriptor Function Section */
303 struct mtp_ext_config_desc_function {
304 __u8 bFirstInterfaceNumber;
305 __u8 bInterfaceCount;
306 __u8 compatibleID[8];
307 __u8 subCompatibleID[8];
311 /* MTP Extended Configuration Descriptor */
313 struct mtp_ext_config_desc_header header;
314 struct mtp_ext_config_desc_function function;
315 } mtp_ext_config_desc = {
317 .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
318 .bcdVersion = __constant_cpu_to_le16(0x0100),
319 .wIndex = __constant_cpu_to_le16(4),
323 .bFirstInterfaceNumber = 0,
324 .bInterfaceCount = 1,
325 .compatibleID = { 'M', 'T', 'P' },
329 struct mtp_device_status {
334 struct mtp_data_header {
335 /* length of packet, including this header */
337 /* container type (2 for data packet) */
339 /* MTP command code */
341 /* MTP transaction ID */
342 __le32 transaction_id;
345 struct mtp_instance {
346 struct usb_function_instance func_inst;
349 char mtp_ext_compat_id[16];
350 struct usb_os_desc mtp_os_desc;
353 /* temporary variable used between mtp_open() and mtp_gadget_bind() */
354 static struct mtp_dev *_mtp_dev;
356 static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
358 return container_of(f, struct mtp_dev, function);
361 static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
363 struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
368 /* now allocate buffers for the requests */
369 req->buf = kmalloc(buffer_size, GFP_KERNEL);
371 usb_ep_free_request(ep, req);
378 static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
382 usb_ep_free_request(ep, req);
386 static inline int mtp_lock(atomic_t *excl)
388 if (atomic_inc_return(excl) == 1) {
396 static inline void mtp_unlock(atomic_t *excl)
401 /* add a request to the tail of a list */
402 static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
403 struct usb_request *req)
407 spin_lock_irqsave(&dev->lock, flags);
408 list_add_tail(&req->list, head);
409 spin_unlock_irqrestore(&dev->lock, flags);
412 /* remove a request from the head of a list */
413 static struct usb_request
414 *mtp_req_get(struct mtp_dev *dev, struct list_head *head)
417 struct usb_request *req;
419 spin_lock_irqsave(&dev->lock, flags);
420 if (list_empty(head)) {
423 req = list_first_entry(head, struct usb_request, list);
424 list_del(&req->list);
426 spin_unlock_irqrestore(&dev->lock, flags);
430 static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
432 struct mtp_dev *dev = _mtp_dev;
434 if (req->status != 0)
435 dev->state = STATE_ERROR;
437 mtp_req_put(dev, &dev->tx_idle, req);
439 wake_up(&dev->write_wq);
442 static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
444 struct mtp_dev *dev = _mtp_dev;
447 if (req->status != 0)
448 dev->state = STATE_ERROR;
450 wake_up(&dev->read_wq);
453 static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
455 struct mtp_dev *dev = _mtp_dev;
457 if (req->status != 0)
458 dev->state = STATE_ERROR;
460 mtp_req_put(dev, &dev->intr_idle, req);
462 wake_up(&dev->intr_wq);
465 static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
466 struct usb_endpoint_descriptor *in_desc,
467 struct usb_endpoint_descriptor *out_desc,
468 struct usb_endpoint_descriptor *intr_desc)
470 struct usb_composite_dev *cdev = dev->cdev;
471 struct usb_request *req;
475 DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
477 ep = usb_ep_autoconfig(cdev->gadget, in_desc);
479 DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
482 DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
483 ep->driver_data = dev; /* claim the endpoint */
486 ep = usb_ep_autoconfig(cdev->gadget, out_desc);
488 DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
491 DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
492 ep->driver_data = dev; /* claim the endpoint */
495 ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
497 DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
500 DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
501 ep->driver_data = dev; /* claim the endpoint */
504 /* now allocate requests for our endpoints */
505 for (i = 0; i < TX_REQ_MAX; i++) {
506 req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
509 req->complete = mtp_complete_in;
510 mtp_req_put(dev, &dev->tx_idle, req);
512 for (i = 0; i < RX_REQ_MAX; i++) {
513 req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
516 req->complete = mtp_complete_out;
517 dev->rx_req[i] = req;
519 for (i = 0; i < INTR_REQ_MAX; i++) {
520 req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
523 req->complete = mtp_complete_intr;
524 mtp_req_put(dev, &dev->intr_idle, req);
530 pr_err("mtp_bind() could not allocate requests\n");
534 static ssize_t mtp_read(struct file *fp, char __user *buf,
535 size_t count, loff_t *pos)
537 struct mtp_dev *dev = fp->private_data;
538 struct usb_composite_dev *cdev = dev->cdev;
539 struct usb_request *req;
544 DBG(cdev, "mtp_read(%zu)\n", count);
546 if (count > MTP_BULK_BUFFER_SIZE)
549 /* we will block until we're online */
550 DBG(cdev, "mtp_read: waiting for online state\n");
551 ret = wait_event_interruptible(dev->read_wq,
552 dev->state != STATE_OFFLINE);
557 spin_lock_irq(&dev->lock);
558 if (dev->state == STATE_CANCELED) {
559 /* report cancelation to userspace */
560 dev->state = STATE_READY;
561 spin_unlock_irq(&dev->lock);
564 dev->state = STATE_BUSY;
565 spin_unlock_irq(&dev->lock);
568 /* queue a request */
569 req = dev->rx_req[0];
572 ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
577 DBG(cdev, "rx %p queue\n", req);
580 /* wait for a request to complete */
581 ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
584 usb_ep_dequeue(dev->ep_out, req);
587 if (dev->state == STATE_BUSY) {
588 /* If we got a 0-len packet, throw it back and try again. */
589 if (req->actual == 0)
592 DBG(cdev, "rx %p %d\n", req, req->actual);
593 xfer = (req->actual < count) ? req->actual : count;
595 if (copy_to_user(buf, req->buf, xfer))
601 spin_lock_irq(&dev->lock);
602 if (dev->state == STATE_CANCELED)
604 else if (dev->state != STATE_OFFLINE)
605 dev->state = STATE_READY;
606 spin_unlock_irq(&dev->lock);
608 DBG(cdev, "mtp_read returning %zd\n", r);
612 static ssize_t mtp_write(struct file *fp, const char __user *buf,
613 size_t count, loff_t *pos)
615 struct mtp_dev *dev = fp->private_data;
616 struct usb_composite_dev *cdev = dev->cdev;
617 struct usb_request *req = 0;
623 DBG(cdev, "mtp_write(%zu)\n", count);
625 spin_lock_irq(&dev->lock);
626 if (dev->state == STATE_CANCELED) {
627 /* report cancelation to userspace */
628 dev->state = STATE_READY;
629 spin_unlock_irq(&dev->lock);
632 if (dev->state == STATE_OFFLINE) {
633 spin_unlock_irq(&dev->lock);
636 dev->state = STATE_BUSY;
637 spin_unlock_irq(&dev->lock);
639 /* we need to send a zero length packet to signal the end of transfer
640 * if the transfer size is aligned to a packet boundary.
642 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
645 while (count > 0 || sendZLP) {
646 /* so we exit after sending ZLP */
650 if (dev->state != STATE_BUSY) {
651 DBG(cdev, "mtp_write dev->error\n");
656 /* get an idle tx request to use */
658 ret = wait_event_interruptible(dev->write_wq,
659 ((req = mtp_req_get(dev, &dev->tx_idle))
660 || dev->state != STATE_BUSY));
666 if (count > MTP_BULK_BUFFER_SIZE)
667 xfer = MTP_BULK_BUFFER_SIZE;
670 if (xfer && copy_from_user(req->buf, buf, xfer)) {
676 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
678 DBG(cdev, "mtp_write: xfer error %d\n", ret);
686 /* zero this so we don't try to free it on error exit */
691 mtp_req_put(dev, &dev->tx_idle, req);
693 spin_lock_irq(&dev->lock);
694 if (dev->state == STATE_CANCELED)
696 else if (dev->state != STATE_OFFLINE)
697 dev->state = STATE_READY;
698 spin_unlock_irq(&dev->lock);
700 DBG(cdev, "mtp_write returning %zd\n", r);
704 /* read from a local file and write to USB */
705 static void send_file_work(struct work_struct *data)
707 struct mtp_dev *dev = container_of(data, struct mtp_dev,
709 struct usb_composite_dev *cdev = dev->cdev;
710 struct usb_request *req = 0;
711 struct mtp_data_header *header;
715 int xfer, ret, hdr_size;
719 /* read our parameters */
721 filp = dev->xfer_file;
722 offset = dev->xfer_file_offset;
723 count = dev->xfer_file_length;
725 DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
727 if (dev->xfer_send_header) {
728 hdr_size = sizeof(struct mtp_data_header);
734 /* we need to send a zero length packet to signal the end of transfer
735 * if the transfer size is aligned to a packet boundary.
737 if ((count & (dev->ep_in->maxpacket - 1)) == 0)
740 while (count > 0 || sendZLP) {
741 /* so we exit after sending ZLP */
745 /* get an idle tx request to use */
747 ret = wait_event_interruptible(dev->write_wq,
748 (req = mtp_req_get(dev, &dev->tx_idle))
749 || dev->state != STATE_BUSY);
750 if (dev->state == STATE_CANCELED) {
759 if (count > MTP_BULK_BUFFER_SIZE)
760 xfer = MTP_BULK_BUFFER_SIZE;
765 /* prepend MTP data header */
766 header = (struct mtp_data_header *)req->buf;
767 header->length = __cpu_to_le32(count);
768 header->type = __cpu_to_le16(2); /* data packet */
769 header->command = __cpu_to_le16(dev->xfer_command);
770 header->transaction_id =
771 __cpu_to_le32(dev->xfer_transaction_id);
774 ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
780 xfer = ret + hdr_size;
784 ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
786 DBG(cdev, "send_file_work: xfer error %d\n", ret);
787 dev->state = STATE_ERROR;
794 /* zero this so we don't try to free it on error exit */
799 mtp_req_put(dev, &dev->tx_idle, req);
801 DBG(cdev, "send_file_work returning %d\n", r);
802 /* write the result */
803 dev->xfer_result = r;
807 /* read from USB and write to a local file */
808 static void receive_file_work(struct work_struct *data)
810 struct mtp_dev *dev = container_of(data, struct mtp_dev,
812 struct usb_composite_dev *cdev = dev->cdev;
813 struct usb_request *read_req = NULL, *write_req = NULL;
817 int ret, cur_buf = 0;
820 /* read our parameters */
822 filp = dev->xfer_file;
823 offset = dev->xfer_file_offset;
824 count = dev->xfer_file_length;
826 DBG(cdev, "receive_file_work(%lld)\n", count);
828 while (count > 0 || write_req) {
830 /* queue a request */
831 read_req = dev->rx_req[cur_buf];
832 cur_buf = (cur_buf + 1) % RX_REQ_MAX;
834 read_req->length = (count > MTP_BULK_BUFFER_SIZE
835 ? MTP_BULK_BUFFER_SIZE : count);
837 ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
840 dev->state = STATE_ERROR;
846 DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
847 ret = vfs_write(filp, write_req->buf, write_req->actual,
849 DBG(cdev, "vfs_write %d\n", ret);
850 if (ret != write_req->actual) {
852 dev->state = STATE_ERROR;
859 /* wait for our last read to complete */
860 ret = wait_event_interruptible(dev->read_wq,
861 dev->rx_done || dev->state != STATE_BUSY);
862 if (dev->state == STATE_CANCELED) {
865 usb_ep_dequeue(dev->ep_out, read_req);
868 /* if xfer_file_length is 0xFFFFFFFF, then we read until
869 * we get a zero length packet
871 if (count != 0xFFFFFFFF)
872 count -= read_req->actual;
873 if (read_req->actual < read_req->length) {
875 * short packet is used to signal EOF for
878 DBG(cdev, "got short packet\n");
882 write_req = read_req;
887 DBG(cdev, "receive_file_work returning %d\n", r);
888 /* write the result */
889 dev->xfer_result = r;
893 static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
895 struct usb_request *req = NULL;
897 int length = event->length;
899 DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
901 if (length < 0 || length > INTR_BUFFER_SIZE)
903 if (dev->state == STATE_OFFLINE)
906 ret = wait_event_interruptible_timeout(dev->intr_wq,
907 (req = mtp_req_get(dev, &dev->intr_idle)),
908 msecs_to_jiffies(1000));
912 if (copy_from_user(req->buf, (void __user *)event->data, length)) {
913 mtp_req_put(dev, &dev->intr_idle, req);
916 req->length = length;
917 ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
919 mtp_req_put(dev, &dev->intr_idle, req);
924 static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
926 struct mtp_dev *dev = fp->private_data;
927 struct file *filp = NULL;
930 if (mtp_lock(&dev->ioctl_excl))
935 case MTP_RECEIVE_FILE:
936 case MTP_SEND_FILE_WITH_HEADER:
938 struct mtp_file_range mfr;
939 struct work_struct *work;
941 spin_lock_irq(&dev->lock);
942 if (dev->state == STATE_CANCELED) {
943 /* report cancelation to userspace */
944 dev->state = STATE_READY;
945 spin_unlock_irq(&dev->lock);
949 if (dev->state == STATE_OFFLINE) {
950 spin_unlock_irq(&dev->lock);
954 dev->state = STATE_BUSY;
955 spin_unlock_irq(&dev->lock);
957 if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
961 /* hold a reference to the file while we are working with it */
968 /* write the parameters */
969 dev->xfer_file = filp;
970 dev->xfer_file_offset = mfr.offset;
971 dev->xfer_file_length = mfr.length;
974 if (code == MTP_SEND_FILE_WITH_HEADER) {
975 work = &dev->send_file_work;
976 dev->xfer_send_header = 1;
977 dev->xfer_command = mfr.command;
978 dev->xfer_transaction_id = mfr.transaction_id;
979 } else if (code == MTP_SEND_FILE) {
980 work = &dev->send_file_work;
981 dev->xfer_send_header = 0;
983 work = &dev->receive_file_work;
986 /* We do the file transfer on a work queue so it will run
987 * in kernel context, which is necessary for vfs_read and
988 * vfs_write to use our buffers in the kernel address space.
990 queue_work(dev->wq, work);
991 /* wait for operation to complete */
992 flush_workqueue(dev->wq);
995 /* read the result */
997 ret = dev->xfer_result;
1000 case MTP_SEND_EVENT:
1002 struct mtp_event event;
1003 /* return here so we don't change dev->state below,
1004 * which would interfere with bulk transfer state.
1006 if (copy_from_user(&event, (void __user *)value, sizeof(event)))
1009 ret = mtp_send_event(dev, &event);
1012 #ifdef CONFIG_COMPAT
1013 case MTP_SEND_EVENT_32:
1015 struct mtp_event_32 event_32;
1016 struct mtp_event event;
1017 /* return here so we don't change dev->state below,
1018 * which would interfere with bulk transfer state.
1020 if (copy_from_user(&event_32, (void __user *)value,
1024 event.length = event_32.length;
1025 event.data = (void *)(unsigned long)event_32.data;
1026 ret = mtp_send_event(dev, &event);
1034 spin_lock_irq(&dev->lock);
1035 if (dev->state == STATE_CANCELED)
1037 else if (dev->state != STATE_OFFLINE)
1038 dev->state = STATE_READY;
1039 spin_unlock_irq(&dev->lock);
1041 mtp_unlock(&dev->ioctl_excl);
1042 DBG(dev->cdev, "ioctl returning %d\n", ret);
1046 static int mtp_open(struct inode *ip, struct file *fp)
1048 printk(KERN_INFO "mtp_open\n");
1049 if (mtp_lock(&_mtp_dev->open_excl))
1052 /* clear any error condition */
1053 if (_mtp_dev->state != STATE_OFFLINE)
1054 _mtp_dev->state = STATE_READY;
1056 fp->private_data = _mtp_dev;
1060 static int mtp_release(struct inode *ip, struct file *fp)
1062 printk(KERN_INFO "mtp_release\n");
1064 mtp_unlock(&_mtp_dev->open_excl);
1068 /* file operations for /dev/mtp_usb */
1069 static const struct file_operations mtp_fops = {
1070 .owner = THIS_MODULE,
1073 .unlocked_ioctl = mtp_ioctl,
1074 #ifdef CONFIG_COMPAT
1075 .compat_ioctl = mtp_ioctl,
1078 .release = mtp_release,
1081 static struct miscdevice mtp_device = {
1082 .minor = MISC_DYNAMIC_MINOR,
1083 .name = mtp_shortname,
1087 static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
1088 const struct usb_ctrlrequest *ctrl)
1090 struct mtp_dev *dev = _mtp_dev;
1091 int value = -EOPNOTSUPP;
1092 u16 w_index = le16_to_cpu(ctrl->wIndex);
1093 u16 w_value = le16_to_cpu(ctrl->wValue);
1094 u16 w_length = le16_to_cpu(ctrl->wLength);
1095 unsigned long flags;
1097 VDBG(cdev, "mtp_ctrlrequest "
1098 "%02x.%02x v%04x i%04x l%u\n",
1099 ctrl->bRequestType, ctrl->bRequest,
1100 w_value, w_index, w_length);
1102 /* Handle MTP OS string */
1103 if (ctrl->bRequestType ==
1104 (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
1105 && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
1106 && (w_value >> 8) == USB_DT_STRING
1107 && (w_value & 0xFF) == MTP_OS_STRING_ID) {
1108 value = (w_length < sizeof(mtp_os_string)
1109 ? w_length : sizeof(mtp_os_string));
1110 memcpy(cdev->req->buf, mtp_os_string, value);
1111 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
1112 /* Handle MTP OS descriptor */
1113 DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
1114 ctrl->bRequest, w_index, w_value, w_length);
1116 if (ctrl->bRequest == 1
1117 && (ctrl->bRequestType & USB_DIR_IN)
1118 && (w_index == 4 || w_index == 5)) {
1119 value = (w_length < sizeof(mtp_ext_config_desc) ?
1120 w_length : sizeof(mtp_ext_config_desc));
1121 memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
1123 } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1124 DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
1125 ctrl->bRequest, w_index, w_value, w_length);
1127 if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
1129 DBG(cdev, "MTP_REQ_CANCEL\n");
1131 spin_lock_irqsave(&dev->lock, flags);
1132 if (dev->state == STATE_BUSY) {
1133 dev->state = STATE_CANCELED;
1134 wake_up(&dev->read_wq);
1135 wake_up(&dev->write_wq);
1137 spin_unlock_irqrestore(&dev->lock, flags);
1139 /* We need to queue a request to read the remaining
1140 * bytes, but we don't actually need to look at
1144 } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
1145 && w_index == 0 && w_value == 0) {
1146 struct mtp_device_status *status = cdev->req->buf;
1149 __constant_cpu_to_le16(sizeof(*status));
1151 DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
1152 spin_lock_irqsave(&dev->lock, flags);
1153 /* device status is "busy" until we report
1154 * the cancelation to userspace
1156 if (dev->state == STATE_CANCELED)
1158 __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
1161 __cpu_to_le16(MTP_RESPONSE_OK);
1162 spin_unlock_irqrestore(&dev->lock, flags);
1163 value = sizeof(*status);
1167 /* respond with data transfer or status phase? */
1171 cdev->req->zero = value < w_length;
1172 cdev->req->length = value;
1173 rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
1175 ERROR(cdev, "%s: response queue error\n", __func__);
1181 mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
1183 struct usb_composite_dev *cdev = c->cdev;
1184 struct mtp_dev *dev = func_to_mtp(f);
1187 struct mtp_instance *fi_mtp;
1190 DBG(cdev, "mtp_function_bind dev: %p\n", dev);
1192 /* allocate interface ID(s) */
1193 id = usb_interface_id(c, f);
1196 mtp_interface_desc.bInterfaceNumber = id;
1198 if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
1199 ret = usb_string_id(c->cdev);
1202 mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
1203 mtp_interface_desc.iInterface = ret;
1206 fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
1208 if (cdev->use_os_string) {
1209 f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
1211 if (!f->os_desc_table)
1214 f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc;
1217 /* allocate endpoints */
1218 ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
1219 &mtp_fullspeed_out_desc, &mtp_intr_desc);
1223 /* support high speed hardware */
1224 if (gadget_is_dualspeed(c->cdev->gadget)) {
1225 mtp_highspeed_in_desc.bEndpointAddress =
1226 mtp_fullspeed_in_desc.bEndpointAddress;
1227 mtp_highspeed_out_desc.bEndpointAddress =
1228 mtp_fullspeed_out_desc.bEndpointAddress;
1230 /* support super speed hardware */
1231 if (gadget_is_superspeed(c->cdev->gadget)) {
1234 /* Calculate bMaxBurst, we know packet size is 1024 */
1235 max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15);
1236 mtp_ss_in_desc.bEndpointAddress =
1237 mtp_fullspeed_in_desc.bEndpointAddress;
1238 mtp_ss_in_comp_desc.bMaxBurst = max_burst;
1239 mtp_ss_out_desc.bEndpointAddress =
1240 mtp_fullspeed_out_desc.bEndpointAddress;
1241 mtp_ss_out_comp_desc.bMaxBurst = max_burst;
1244 DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
1245 gadget_is_superspeed(c->cdev->gadget) ? "super" :
1246 (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
1247 f->name, dev->ep_in->name, dev->ep_out->name);
1252 mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
1254 struct mtp_dev *dev = func_to_mtp(f);
1255 struct usb_request *req;
1258 mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
1259 while ((req = mtp_req_get(dev, &dev->tx_idle)))
1260 mtp_request_free(req, dev->ep_in);
1261 for (i = 0; i < RX_REQ_MAX; i++)
1262 mtp_request_free(dev->rx_req[i], dev->ep_out);
1263 while ((req = mtp_req_get(dev, &dev->intr_idle)))
1264 mtp_request_free(req, dev->ep_intr);
1265 dev->state = STATE_OFFLINE;
1266 kfree(f->os_desc_table);
1270 static int mtp_function_set_alt(struct usb_function *f,
1271 unsigned intf, unsigned alt)
1273 struct mtp_dev *dev = func_to_mtp(f);
1274 struct usb_composite_dev *cdev = f->config->cdev;
1277 DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
1279 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
1283 ret = usb_ep_enable(dev->ep_in);
1287 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
1291 ret = usb_ep_enable(dev->ep_out);
1293 usb_ep_disable(dev->ep_in);
1297 ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
1301 ret = usb_ep_enable(dev->ep_intr);
1303 usb_ep_disable(dev->ep_out);
1304 usb_ep_disable(dev->ep_in);
1307 dev->state = STATE_READY;
1309 /* readers may be blocked waiting for us to go online */
1310 wake_up(&dev->read_wq);
1314 static void mtp_function_disable(struct usb_function *f)
1316 struct mtp_dev *dev = func_to_mtp(f);
1317 struct usb_composite_dev *cdev = dev->cdev;
1319 DBG(cdev, "mtp_function_disable\n");
1320 dev->state = STATE_OFFLINE;
1321 usb_ep_disable(dev->ep_in);
1322 usb_ep_disable(dev->ep_out);
1323 usb_ep_disable(dev->ep_intr);
1325 /* readers may be blocked waiting for us to go online */
1326 wake_up(&dev->read_wq);
1328 VDBG(cdev, "%s disabled\n", dev->function.name);
1331 static int __mtp_setup(struct mtp_instance *fi_mtp)
1333 struct mtp_dev *dev;
1336 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1344 spin_lock_init(&dev->lock);
1345 init_waitqueue_head(&dev->read_wq);
1346 init_waitqueue_head(&dev->write_wq);
1347 init_waitqueue_head(&dev->intr_wq);
1348 atomic_set(&dev->open_excl, 0);
1349 atomic_set(&dev->ioctl_excl, 0);
1350 INIT_LIST_HEAD(&dev->tx_idle);
1351 INIT_LIST_HEAD(&dev->intr_idle);
1353 dev->wq = create_singlethread_workqueue("f_mtp");
1358 INIT_WORK(&dev->send_file_work, send_file_work);
1359 INIT_WORK(&dev->receive_file_work, receive_file_work);
1363 ret = misc_register(&mtp_device);
1370 destroy_workqueue(dev->wq);
1374 printk(KERN_ERR "mtp gadget driver failed to initialize\n");
1378 static int mtp_setup_configfs(struct mtp_instance *fi_mtp)
1380 return __mtp_setup(fi_mtp);
1384 static void mtp_cleanup(void)
1386 struct mtp_dev *dev = _mtp_dev;
1391 misc_deregister(&mtp_device);
1392 destroy_workqueue(dev->wq);
1397 static struct mtp_instance *to_mtp_instance(struct config_item *item)
1399 return container_of(to_config_group(item), struct mtp_instance,
1403 static void mtp_attr_release(struct config_item *item)
1405 struct mtp_instance *fi_mtp = to_mtp_instance(item);
1407 usb_put_function_instance(&fi_mtp->func_inst);
1410 static struct configfs_item_operations mtp_item_ops = {
1411 .release = mtp_attr_release,
1414 static struct config_item_type mtp_func_type = {
1415 .ct_item_ops = &mtp_item_ops,
1416 .ct_owner = THIS_MODULE,
1420 static struct mtp_instance *to_fi_mtp(struct usb_function_instance *fi)
1422 return container_of(fi, struct mtp_instance, func_inst);
1425 static int mtp_set_inst_name(struct usb_function_instance *fi, const char *name)
1427 struct mtp_instance *fi_mtp;
1431 name_len = strlen(name) + 1;
1432 if (name_len > MAX_INST_NAME_LEN)
1433 return -ENAMETOOLONG;
1435 ptr = kstrndup(name, name_len, GFP_KERNEL);
1439 fi_mtp = to_fi_mtp(fi);
1445 static void mtp_free_inst(struct usb_function_instance *fi)
1447 struct mtp_instance *fi_mtp;
1449 fi_mtp = to_fi_mtp(fi);
1450 kfree(fi_mtp->name);
1452 kfree(fi_mtp->mtp_os_desc.group.default_groups);
1456 struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
1458 struct mtp_instance *fi_mtp;
1460 struct usb_os_desc *descs[1];
1463 fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL);
1465 return ERR_PTR(-ENOMEM);
1466 fi_mtp->func_inst.set_inst_name = mtp_set_inst_name;
1467 fi_mtp->func_inst.free_func_inst = mtp_free_inst;
1469 fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id;
1470 INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop);
1471 descs[0] = &fi_mtp->mtp_os_desc;
1473 usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
1474 descs, names, THIS_MODULE);
1477 ret = mtp_setup_configfs(fi_mtp);
1480 pr_err("Error setting MTP\n");
1481 return ERR_PTR(ret);
1484 fi_mtp->dev = _mtp_dev;
1486 config_group_init_type_name(&fi_mtp->func_inst.group,
1487 "", &mtp_func_type);
1489 return &fi_mtp->func_inst;
1491 EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
1493 static struct usb_function_instance *mtp_alloc_inst(void)
1495 return alloc_inst_mtp_ptp(true);
1498 static int mtp_ctrlreq_configfs(struct usb_function *f,
1499 const struct usb_ctrlrequest *ctrl)
1501 return mtp_ctrlrequest(f->config->cdev, ctrl);
1504 static void mtp_free(struct usb_function *f)
1506 /*NO-OP: no function specific resource allocation in mtp_alloc*/
1509 struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
1512 struct mtp_instance *fi_mtp = to_fi_mtp(fi);
1513 struct mtp_dev *dev;
1516 * PTP piggybacks on MTP function so make sure we have
1517 * created MTP function before we associate this PTP
1518 * function with a gadget configuration.
1520 if (fi_mtp->dev == NULL) {
1521 pr_err("Error: Create MTP function before linking"
1522 " PTP function with a gadget configuration\n");
1523 pr_err("\t1: Delete existing PTP function if any\n");
1524 pr_err("\t2: Create MTP function\n");
1525 pr_err("\t3: Create and symlink PTP function"
1526 " with a gadget configuration\n");
1527 return ERR_PTR(-EINVAL); /* Invalid Configuration */
1531 dev->function.name = DRIVER_NAME;
1532 dev->function.strings = mtp_strings;
1534 dev->function.fs_descriptors = fs_mtp_descs;
1535 dev->function.hs_descriptors = hs_mtp_descs;
1536 dev->function.ss_descriptors = ss_mtp_descs;
1538 dev->function.fs_descriptors = fs_ptp_descs;
1539 dev->function.hs_descriptors = hs_ptp_descs;
1540 dev->function.ss_descriptors = ss_ptp_descs;
1542 dev->function.bind = mtp_function_bind;
1543 dev->function.unbind = mtp_function_unbind;
1544 dev->function.set_alt = mtp_function_set_alt;
1545 dev->function.disable = mtp_function_disable;
1546 dev->function.setup = mtp_ctrlreq_configfs;
1547 dev->function.free_func = mtp_free;
1549 return &dev->function;
1551 EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp);
1553 static struct usb_function *mtp_alloc(struct usb_function_instance *fi)
1555 return function_alloc_mtp_ptp(fi, true);
1558 DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc);
1559 MODULE_LICENSE("GPL");