temp revert usb gadget change
[firefly-linux-kernel-4.4.55.git] / drivers / usb / gadget / f_mass_storage.c
1 /*
2  * drivers/usb/gadget/f_mass_storage.c
3  *
4  * Function Driver for USB Mass Storage
5  *
6  * Copyright (C) 2008 Google, Inc.
7  * Author: Mike Lockwood <lockwood@android.com>
8  *
9  * Based heavily on the file_storage gadget driver in
10  * drivers/usb/gadget/file_storage.c and licensed under the same terms:
11  *
12  * Copyright (C) 2003-2007 Alan Stern
13  * All rights reserved.
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions, and the following disclaimer,
20  *    without modification.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. The names of the above-listed copyright holders may not be used
25  *    to endorse or promote products derived from this software without
26  *    specific prior written permission.
27  *
28  * ALTERNATIVELY, this software may be distributed under the terms of the
29  * GNU General Public License ("GPL") as published by the Free Software
30  * Foundation, either version 2 of that License or (at your option) any
31  * later version.
32  *
33  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
34  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
35  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
36  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
37  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
38  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
39  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
40  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
41  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
42  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
43  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45
46 /* #define DEBUG */
47 /* #define VERBOSE_DEBUG */
48 /* #define DUMP_MSGS */
49
50
51 #include <linux/blkdev.h>
52 #include <linux/completion.h>
53 #include <linux/dcache.h>
54 #include <linux/delay.h>
55 #include <linux/device.h>
56 #include <linux/fcntl.h>
57 #include <linux/file.h>
58 #include <linux/fs.h>
59 #include <linux/kref.h>
60 #include <linux/kthread.h>
61 #include <linux/limits.h>
62 #include <linux/rwsem.h>
63 #include <linux/slab.h>
64 #include <linux/spinlock.h>
65 #include <linux/string.h>
66 #include <linux/switch.h>
67 #include <linux/freezer.h>
68 #include <linux/utsname.h>
69 #include <linux/wakelock.h>
70 #include <linux/platform_device.h>
71
72 #include <linux/usb.h>
73 #include <linux/usb_usual.h>
74 #include <linux/usb/ch9.h>
75 #include <linux/usb/android_composite.h>
76
77 #include "gadget_chips.h"
78
79
80 #define BULK_BUFFER_SIZE           4096
81
82 /* flush after every 4 meg of writes to avoid excessive block level caching */
83 #define MAX_UNFLUSHED_BYTES (4 * 1024 * 1024)
84
85 /*-------------------------------------------------------------------------*/
86
87 #define DRIVER_NAME             "usb_mass_storage"
88 #define MAX_LUNS                8
89
90 static const char shortname[] = DRIVER_NAME;
91
92 #ifdef DEBUG
93 #define LDBG(lun, fmt, args...) \
94         dev_dbg(&(lun)->dev , fmt , ## args)
95 #define MDBG(fmt,args...) \
96         printk(KERN_DEBUG DRIVER_NAME ": " fmt , ## args)
97 #else
98 #define LDBG(lun, fmt, args...) \
99         do { } while (0)
100 #define MDBG(fmt,args...) \
101         do { } while (0)
102 #undef VERBOSE_DEBUG
103 #undef DUMP_MSGS
104 #endif /* DEBUG */
105
106 #ifdef VERBOSE_DEBUG
107 #define VLDBG   LDBG
108 #else
109 #define VLDBG(lun, fmt, args...) \
110         do { } while (0)
111 #endif /* VERBOSE_DEBUG */
112
113 #define LERROR(lun, fmt, args...) \
114         dev_err(&(lun)->dev , fmt , ## args)
115 #define LWARN(lun, fmt, args...) \
116         dev_warn(&(lun)->dev , fmt , ## args)
117 #define LINFO(lun, fmt, args...) \
118         dev_info(&(lun)->dev , fmt , ## args)
119
120 #define MINFO(fmt,args...) \
121         printk(KERN_INFO DRIVER_NAME ": " fmt , ## args)
122
123 #undef DBG
124 #undef VDBG
125 #undef ERROR
126 #undef WARNING
127 #undef INFO
128 #define DBG(d, fmt, args...) \
129         dev_dbg(&(d)->cdev->gadget->dev , fmt , ## args)
130 #define VDBG(d, fmt, args...) \
131         dev_vdbg(&(d)->cdev->gadget->dev , fmt , ## args)
132 #define ERROR(d, fmt, args...) \
133         dev_err(&(d)->cdev->gadget->dev , fmt , ## args)
134 #define WARNING(d, fmt, args...) \
135         dev_warn(&(d)->cdev->gadget->dev , fmt , ## args)
136 #define INFO(d, fmt, args...) \
137         dev_info(&(d)->cdev->gadget->dev , fmt , ## args)
138
139
140 /*-------------------------------------------------------------------------*/
141
142 /* Bulk-only data structures */
143
144 /* Command Block Wrapper */
145 struct bulk_cb_wrap {
146         __le32  Signature;              /* Contains 'USBC' */
147         u32     Tag;                    /* Unique per command id */
148         __le32  DataTransferLength;     /* Size of the data */
149         u8      Flags;                  /* Direction in bit 7 */
150         u8      Lun;                    /* LUN (normally 0) */
151         u8      Length;                 /* Of the CDB, <= MAX_COMMAND_SIZE */
152         u8      CDB[16];                /* Command Data Block */
153 };
154
155 #define USB_BULK_CB_WRAP_LEN    31
156 #define USB_BULK_CB_SIG         0x43425355      /* Spells out USBC */
157 #define USB_BULK_IN_FLAG        0x80
158
159 /* Command Status Wrapper */
160 struct bulk_cs_wrap {
161         __le32  Signature;              /* Should = 'USBS' */
162         u32     Tag;                    /* Same as original command */
163         __le32  Residue;                /* Amount not transferred */
164         u8      Status;                 /* See below */
165 };
166
167 #define USB_BULK_CS_WRAP_LEN    13
168 #define USB_BULK_CS_SIG         0x53425355      /* Spells out 'USBS' */
169 #define USB_STATUS_PASS         0
170 #define USB_STATUS_FAIL         1
171 #define USB_STATUS_PHASE_ERROR  2
172
173 /* Bulk-only class specific requests */
174 #define USB_BULK_RESET_REQUEST          0xff
175 #define USB_BULK_GET_MAX_LUN_REQUEST    0xfe
176
177 /* Length of a SCSI Command Data Block */
178 #define MAX_COMMAND_SIZE        16
179
180 /* SCSI commands that we recognize */
181 #define SC_FORMAT_UNIT                  0x04
182 #define SC_INQUIRY                      0x12
183 #define SC_MODE_SELECT_6                0x15
184 #define SC_MODE_SELECT_10               0x55
185 #define SC_MODE_SENSE_6                 0x1a
186 #define SC_MODE_SENSE_10                0x5a
187 #define SC_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
188 #define SC_READ_6                       0x08
189 #define SC_READ_10                      0x28
190 #define SC_READ_12                      0xa8
191 #define SC_READ_CAPACITY                0x25
192 #define SC_READ_FORMAT_CAPACITIES       0x23
193 #define SC_RELEASE                      0x17
194 #define SC_REQUEST_SENSE                0x03
195 #define SC_RESERVE                      0x16
196 #define SC_SEND_DIAGNOSTIC              0x1d
197 #define SC_START_STOP_UNIT              0x1b
198 #define SC_SYNCHRONIZE_CACHE            0x35
199 #define SC_TEST_UNIT_READY              0x00
200 #define SC_VERIFY                       0x2f
201 #define SC_WRITE_6                      0x0a
202 #define SC_WRITE_10                     0x2a
203 #define SC_WRITE_12                     0xaa
204
205 /* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
206 #define SS_NO_SENSE                             0
207 #define SS_COMMUNICATION_FAILURE                0x040800
208 #define SS_INVALID_COMMAND                      0x052000
209 #define SS_INVALID_FIELD_IN_CDB                 0x052400
210 #define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE   0x052100
211 #define SS_LOGICAL_UNIT_NOT_SUPPORTED           0x052500
212 #define SS_MEDIUM_NOT_PRESENT                   0x023a00
213 #define SS_MEDIUM_REMOVAL_PREVENTED             0x055302
214 #define SS_NOT_READY_TO_READY_TRANSITION        0x062800
215 #define SS_RESET_OCCURRED                       0x062900
216 #define SS_SAVING_PARAMETERS_NOT_SUPPORTED      0x053900
217 #define SS_UNRECOVERED_READ_ERROR               0x031100
218 #define SS_WRITE_ERROR                          0x030c02
219 #define SS_WRITE_PROTECTED                      0x072700
220
221 #define SK(x)           ((u8) ((x) >> 16))      /* Sense Key byte, etc. */
222 #define ASC(x)          ((u8) ((x) >> 8))
223 #define ASCQ(x)         ((u8) (x))
224
225
226 /*-------------------------------------------------------------------------*/
227
228 struct lun {
229         struct file     *filp;
230         loff_t          file_length;
231         loff_t          num_sectors;
232         unsigned int unflushed_bytes;
233
234         unsigned int    ro : 1;
235         unsigned int    prevent_medium_removal : 1;
236         unsigned int    registered : 1;
237         unsigned int    info_valid : 1;
238
239         u32             sense_data;
240         u32             sense_data_info;
241         u32             unit_attention_data;
242
243         struct device   dev;
244 };
245
246 #define backing_file_is_open(curlun)    ((curlun)->filp != NULL)
247
248
249 static struct lun *dev_to_lun(struct device *dev)
250 {
251         return container_of(dev, struct lun, dev);
252 }
253
254 /* Big enough to hold our biggest descriptor */
255 #define EP0_BUFSIZE     256
256
257 /* Number of buffers we will use.  2 is enough for double-buffering */
258 #define NUM_BUFFERS     2
259
260 enum fsg_buffer_state {
261         BUF_STATE_EMPTY = 0,
262         BUF_STATE_FULL,
263         BUF_STATE_BUSY
264 };
265
266 struct fsg_buffhd {
267         void                            *buf;
268         enum fsg_buffer_state           state;
269         struct fsg_buffhd               *next;
270
271         /* The NetChip 2280 is faster, and handles some protocol faults
272          * better, if we don't submit any short bulk-out read requests.
273          * So we will record the intended request length here. */
274         unsigned int                    bulk_out_intended_length;
275
276         struct usb_request              *inreq;
277         int                             inreq_busy;
278         struct usb_request              *outreq;
279         int                             outreq_busy;
280 };
281
282 enum fsg_state {
283         /* This one isn't used anywhere */
284         FSG_STATE_COMMAND_PHASE = -10,
285
286         FSG_STATE_DATA_PHASE,
287         FSG_STATE_STATUS_PHASE,
288
289         FSG_STATE_IDLE = 0,
290         FSG_STATE_ABORT_BULK_OUT,
291         FSG_STATE_RESET,
292         FSG_STATE_CONFIG_CHANGE,
293         FSG_STATE_EXIT,
294         FSG_STATE_TERMINATED
295 };
296
297 enum data_direction {
298         DATA_DIR_UNKNOWN = 0,
299         DATA_DIR_FROM_HOST,
300         DATA_DIR_TO_HOST,
301         DATA_DIR_NONE
302 };
303
304 struct fsg_dev {
305         struct usb_function function;
306         struct usb_composite_dev *cdev;
307
308         /* optional "usb_mass_storage" platform device */
309         struct platform_device *pdev;
310
311         /* lock protects: state and all the req_busy's */
312         spinlock_t              lock;
313
314         /* filesem protects: backing files in use */
315         struct rw_semaphore     filesem;
316
317         /* reference counting: wait until all LUNs are released */
318         struct kref             ref;
319
320         unsigned int            bulk_out_maxpacket;
321         enum fsg_state          state;          /* For exception handling */
322
323         u8                      config, new_config;
324
325         unsigned int            running : 1;
326         unsigned int            bulk_in_enabled : 1;
327         unsigned int            bulk_out_enabled : 1;
328         unsigned int            phase_error : 1;
329         unsigned int            short_packet_received : 1;
330         unsigned int            bad_lun_okay : 1;
331
332         unsigned long           atomic_bitflags;
333 #define REGISTERED              0
334 #define CLEAR_BULK_HALTS        1
335 #define SUSPENDED               2
336
337         struct usb_ep           *bulk_in;
338         struct usb_ep           *bulk_out;
339
340         struct fsg_buffhd       *next_buffhd_to_fill;
341         struct fsg_buffhd       *next_buffhd_to_drain;
342         struct fsg_buffhd       buffhds[NUM_BUFFERS];
343
344         int                     thread_wakeup_needed;
345         struct completion       thread_notifier;
346         struct task_struct      *thread_task;
347
348         int                     cmnd_size;
349         u8                      cmnd[MAX_COMMAND_SIZE];
350         enum data_direction     data_dir;
351         u32                     data_size;
352         u32                     data_size_from_cmnd;
353         u32                     tag;
354         unsigned int            lun;
355         u32                     residue;
356         u32                     usb_amount_left;
357
358         unsigned int            nluns;
359         struct lun              *luns;
360         struct lun              *curlun;
361
362         u32                             buf_size;
363         const char              *vendor;
364         const char              *product;
365         int                             release;
366
367         struct switch_dev sdev;
368
369         struct wake_lock wake_lock;
370 };
371
372 static inline struct fsg_dev *func_to_dev(struct usb_function *f)
373 {
374         return container_of(f, struct fsg_dev, function);
375 }
376
377 static int exception_in_progress(struct fsg_dev *fsg)
378 {
379         return (fsg->state > FSG_STATE_IDLE);
380 }
381
382 /* Make bulk-out requests be divisible by the maxpacket size */
383 static void set_bulk_out_req_length(struct fsg_dev *fsg,
384                 struct fsg_buffhd *bh, unsigned int length)
385 {
386         unsigned int    rem;
387
388         bh->bulk_out_intended_length = length;
389         rem = length % fsg->bulk_out_maxpacket;
390         if (rem > 0)
391                 length += fsg->bulk_out_maxpacket - rem;
392         bh->outreq->length = length;
393 }
394
395 static struct fsg_dev                   *the_fsg;
396
397 static void     close_backing_file(struct fsg_dev *fsg, struct lun *curlun);
398 static void     close_all_backing_files(struct fsg_dev *fsg);
399 static int fsync_sub(struct lun *curlun);
400
401 /*-------------------------------------------------------------------------*/
402
403 #ifdef DUMP_MSGS
404
405 static void dump_msg(struct fsg_dev *fsg, const char *label,
406                 const u8 *buf, unsigned int length)
407 {
408         if (length < 512) {
409                 DBG(fsg, "%s, length %u:\n", label, length);
410                 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
411                                 16, 1, buf, length, 0);
412         }
413 }
414
415 static void dump_cdb(struct fsg_dev *fsg)
416 {}
417
418 #else
419
420 static void dump_msg(struct fsg_dev *fsg, const char *label,
421                 const u8 *buf, unsigned int length)
422 {}
423
424 #ifdef VERBOSE_DEBUG
425
426 static void dump_cdb(struct fsg_dev *fsg)
427 {
428         print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,
429                         16, 1, fsg->cmnd, fsg->cmnd_size, 0);
430 }
431
432 #else
433
434 static void dump_cdb(struct fsg_dev *fsg)
435 {}
436
437 #endif /* VERBOSE_DEBUG */
438 #endif /* DUMP_MSGS */
439
440
441 /*-------------------------------------------------------------------------*/
442
443 /* Routines for unaligned data access */
444
445 static u16 get_be16(u8 *buf)
446 {
447         return ((u16) buf[0] << 8) | ((u16) buf[1]);
448 }
449
450 static u32 get_be32(u8 *buf)
451 {
452         return ((u32) buf[0] << 24) | ((u32) buf[1] << 16) |
453                         ((u32) buf[2] << 8) | ((u32) buf[3]);
454 }
455
456 static void put_be16(u8 *buf, u16 val)
457 {
458         buf[0] = val >> 8;
459         buf[1] = val;
460 }
461
462 static void put_be32(u8 *buf, u32 val)
463 {
464         buf[0] = val >> 24;
465         buf[1] = val >> 16;
466         buf[2] = val >> 8;
467         buf[3] = val & 0xff;
468 }
469
470 /*-------------------------------------------------------------------------*/
471
472 /*
473  * DESCRIPTORS ... most are static, but strings and (full) configuration
474  * descriptors are built on demand.  Also the (static) config and interface
475  * descriptors are adjusted during fsg_bind().
476  */
477
478 /* There is only one interface. */
479
480 static struct usb_interface_descriptor
481 intf_desc = {
482         .bLength =              sizeof intf_desc,
483         .bDescriptorType =      USB_DT_INTERFACE,
484
485         .bNumEndpoints =        2,              /* Adjusted during fsg_bind() */
486         .bInterfaceClass =      USB_CLASS_MASS_STORAGE,
487         .bInterfaceSubClass =   US_SC_SCSI,
488         .bInterfaceProtocol =   US_PR_BULK,
489 };
490
491 /* Three full-speed endpoint descriptors: bulk-in, bulk-out,
492  * and interrupt-in. */
493
494 static struct usb_endpoint_descriptor
495 fs_bulk_in_desc = {
496         .bLength =              USB_DT_ENDPOINT_SIZE,
497         .bDescriptorType =      USB_DT_ENDPOINT,
498
499         .bEndpointAddress =     USB_DIR_IN,
500         .bmAttributes =         USB_ENDPOINT_XFER_BULK,
501         /* wMaxPacketSize set by autoconfiguration */
502 };
503
504 static struct usb_endpoint_descriptor
505 fs_bulk_out_desc = {
506         .bLength =              USB_DT_ENDPOINT_SIZE,
507         .bDescriptorType =      USB_DT_ENDPOINT,
508
509         .bEndpointAddress =     USB_DIR_OUT,
510         .bmAttributes =         USB_ENDPOINT_XFER_BULK,
511         /* wMaxPacketSize set by autoconfiguration */
512 };
513
514 static struct usb_descriptor_header *fs_function[] = {
515         (struct usb_descriptor_header *) &intf_desc,
516         (struct usb_descriptor_header *) &fs_bulk_in_desc,
517         (struct usb_descriptor_header *) &fs_bulk_out_desc,
518         NULL,
519 };
520 #define FS_FUNCTION_PRE_EP_ENTRIES      2
521
522
523 static struct usb_endpoint_descriptor
524 hs_bulk_in_desc = {
525         .bLength =              USB_DT_ENDPOINT_SIZE,
526         .bDescriptorType =      USB_DT_ENDPOINT,
527
528         /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
529         .bmAttributes =         USB_ENDPOINT_XFER_BULK,
530         .wMaxPacketSize =       __constant_cpu_to_le16(512),
531 };
532
533 static struct usb_endpoint_descriptor
534 hs_bulk_out_desc = {
535         .bLength =              USB_DT_ENDPOINT_SIZE,
536         .bDescriptorType =      USB_DT_ENDPOINT,
537
538         /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
539         .bmAttributes =         USB_ENDPOINT_XFER_BULK,
540         .wMaxPacketSize =       __constant_cpu_to_le16(512),
541         .bInterval =            1,      /* NAK every 1 uframe */
542 };
543
544
545 static struct usb_descriptor_header *hs_function[] = {
546         (struct usb_descriptor_header *) &intf_desc,
547         (struct usb_descriptor_header *) &hs_bulk_in_desc,
548         (struct usb_descriptor_header *) &hs_bulk_out_desc,
549         NULL,
550 };
551
552 /* Maxpacket and other transfer characteristics vary by speed. */
553 static struct usb_endpoint_descriptor *
554 ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
555                 struct usb_endpoint_descriptor *hs)
556 {
557         if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
558                 return hs;
559         return fs;
560 }
561
562 /*-------------------------------------------------------------------------*/
563
564 /* These routines may be called in process context or in_irq */
565
566 /* Caller must hold fsg->lock */
567 static void wakeup_thread(struct fsg_dev *fsg)
568 {
569         /* Tell the main thread that something has happened */
570         fsg->thread_wakeup_needed = 1;
571         if (fsg->thread_task)
572                 wake_up_process(fsg->thread_task);
573 }
574
575
576 static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
577 {
578         unsigned long           flags;
579
580         DBG(fsg, "raise_exception %d\n", (int)new_state);
581         /* Do nothing if a higher-priority exception is already in progress.
582          * If a lower-or-equal priority exception is in progress, preempt it
583          * and notify the main thread by sending it a signal. */
584         spin_lock_irqsave(&fsg->lock, flags);
585         if (fsg->state <= new_state) {
586                 fsg->state = new_state;
587                 if (fsg->thread_task)
588                         send_sig_info(SIGUSR1, SEND_SIG_FORCED,
589                                         fsg->thread_task);
590         }
591         spin_unlock_irqrestore(&fsg->lock, flags);
592 }
593
594
595 /*-------------------------------------------------------------------------*/
596
597 /* Bulk and interrupt endpoint completion handlers.
598  * These always run in_irq. */
599
600 static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
601 {
602         struct fsg_dev          *fsg = ep->driver_data;
603         struct fsg_buffhd       *bh = req->context;
604         unsigned long           flags;
605
606         if (req->status || req->actual != req->length)
607                 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
608                                 req->status, req->actual, req->length);
609
610         /* Hold the lock while we update the request and buffer states */
611         smp_wmb();
612         spin_lock_irqsave(&fsg->lock, flags);
613         bh->inreq_busy = 0;
614         bh->state = BUF_STATE_EMPTY;
615         wakeup_thread(fsg);
616         spin_unlock_irqrestore(&fsg->lock, flags);
617 }
618
619 static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
620 {
621         struct fsg_dev          *fsg = ep->driver_data;
622         struct fsg_buffhd       *bh = req->context;
623         unsigned long           flags;
624
625         dump_msg(fsg, "bulk-out", req->buf, req->actual);
626         if (req->status || req->actual != bh->bulk_out_intended_length)
627                 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
628                                 req->status, req->actual,
629                                 bh->bulk_out_intended_length);
630
631         /* Hold the lock while we update the request and buffer states */
632         smp_wmb();
633         spin_lock_irqsave(&fsg->lock, flags);
634         bh->outreq_busy = 0;
635         bh->state = BUF_STATE_FULL;
636         wakeup_thread(fsg);
637         spin_unlock_irqrestore(&fsg->lock, flags);
638 }
639
640 static int fsg_function_setup(struct usb_function *f,
641                                         const struct usb_ctrlrequest *ctrl)
642 {
643         struct fsg_dev  *fsg = func_to_dev(f);
644         struct usb_composite_dev *cdev = fsg->cdev;
645         int                     value = -EOPNOTSUPP;
646         u16                     w_index = le16_to_cpu(ctrl->wIndex);
647         u16                     w_value = le16_to_cpu(ctrl->wValue);
648         u16                     w_length = le16_to_cpu(ctrl->wLength);
649
650         DBG(fsg, "fsg_function_setup\n");
651         /* Handle Bulk-only class-specific requests */
652         if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
653         DBG(fsg, "USB_TYPE_CLASS\n");
654                 switch (ctrl->bRequest) {
655                 case USB_BULK_RESET_REQUEST:
656                         if (ctrl->bRequestType != (USB_DIR_OUT |
657                                         USB_TYPE_CLASS | USB_RECIP_INTERFACE))
658                                 break;
659                         if (w_index != 0 || w_value != 0) {
660                                 value = -EDOM;
661                                 break;
662                         }
663
664                         /* Raise an exception to stop the current operation
665                          * and reinitialize our state. */
666                         DBG(fsg, "bulk reset request\n");
667                         raise_exception(fsg, FSG_STATE_RESET);
668                         value = 0;
669                         break;
670
671                 case USB_BULK_GET_MAX_LUN_REQUEST:
672                         if (ctrl->bRequestType != (USB_DIR_IN |
673                                         USB_TYPE_CLASS | USB_RECIP_INTERFACE))
674                                 break;
675                         if (w_index != 0 || w_value != 0) {
676                                 value = -EDOM;
677                                 break;
678                         }
679                         VDBG(fsg, "get max LUN\n");
680                         *(u8 *)cdev->req->buf = fsg->nluns - 1;
681                         value = 1;
682                         break;
683                 }
684         }
685
686                 /* respond with data transfer or status phase? */
687                 if (value >= 0) {
688                         int rc;
689                         cdev->req->zero = value < w_length;
690                         cdev->req->length = value;
691                         rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
692                         if (rc < 0)
693                                 printk("%s setup response queue error\n", __func__);
694                 }
695
696         if (value == -EOPNOTSUPP)
697                 VDBG(fsg,
698                         "unknown class-specific control req "
699                         "%02x.%02x v%04x i%04x l%u\n",
700                         ctrl->bRequestType, ctrl->bRequest,
701                         le16_to_cpu(ctrl->wValue), w_index, w_length);
702         return value;
703 }
704
705 /*-------------------------------------------------------------------------*/
706
707 /* All the following routines run in process context */
708
709
710 /* Use this for bulk or interrupt transfers, not ep0 */
711 static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
712                 struct usb_request *req, int *pbusy,
713                 enum fsg_buffer_state *state)
714 {
715         int     rc;
716         unsigned long           flags;
717
718         DBG(fsg, "start_transfer req: %p, req->buf: %p\n", req, req->buf);
719         if (ep == fsg->bulk_in)
720                 dump_msg(fsg, "bulk-in", req->buf, req->length);
721
722         spin_lock_irqsave(&fsg->lock, flags);
723         *pbusy = 1;
724         *state = BUF_STATE_BUSY;
725         spin_unlock_irqrestore(&fsg->lock, flags);
726         rc = usb_ep_queue(ep, req, GFP_KERNEL);
727         if (rc != 0) {
728                 *pbusy = 0;
729                 *state = BUF_STATE_EMPTY;
730
731                 /* We can't do much more than wait for a reset */
732
733                 /* Note: currently the net2280 driver fails zero-length
734                  * submissions if DMA is enabled. */
735                 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
736                                                 req->length == 0))
737                         WARN(fsg, "error in submission: %s --> %d\n",
738                                 (ep == fsg->bulk_in ? "bulk-in" : "bulk-out"),
739                                 rc);
740         }
741 }
742
743
744 static int sleep_thread(struct fsg_dev *fsg)
745 {
746         int     rc = 0;
747
748         /* Wait until a signal arrives or we are woken up */
749         for (;;) {
750                 try_to_freeze();
751                 set_current_state(TASK_INTERRUPTIBLE);
752                 if (signal_pending(current)) {
753                         rc = -EINTR;
754                         break;
755                 }
756                 if (fsg->thread_wakeup_needed)
757                         break;
758                 schedule();
759         }
760         __set_current_state(TASK_RUNNING);
761         fsg->thread_wakeup_needed = 0;
762         return rc;
763 }
764
765
766 /*-------------------------------------------------------------------------*/
767
768 static int do_read(struct fsg_dev *fsg)
769 {
770         struct lun              *curlun = fsg->curlun;
771         u32                     lba;
772         struct fsg_buffhd       *bh;
773         int                     rc;
774         u32                     amount_left;
775         loff_t                  file_offset, file_offset_tmp;
776         unsigned int            amount;
777         unsigned int            partial_page;
778         ssize_t                 nread;
779
780         /* Get the starting Logical Block Address and check that it's
781          * not too big */
782         if (fsg->cmnd[0] == SC_READ_6)
783                 lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
784         else {
785                 lba = get_be32(&fsg->cmnd[2]);
786
787                 /* We allow DPO (Disable Page Out = don't save data in the
788                  * cache) and FUA (Force Unit Access = don't read from the
789                  * cache), but we don't implement them. */
790                 if ((fsg->cmnd[1] & ~0x18) != 0) {
791                         curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
792                         return -EINVAL;
793                 }
794         }
795         if (lba >= curlun->num_sectors) {
796                 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
797                 return -EINVAL;
798         }
799         file_offset = ((loff_t) lba) << 9;
800
801         /* Carry out the file reads */
802         amount_left = fsg->data_size_from_cmnd;
803         if (unlikely(amount_left == 0))
804                 return -EIO;            /* No default reply */
805
806         for (;;) {
807
808                 /* Figure out how much we need to read:
809                  * Try to read the remaining amount.
810                  * But don't read more than the buffer size.
811                  * And don't try to read past the end of the file.
812                  * Finally, if we're not at a page boundary, don't read past
813                  *      the next page.
814                  * If this means reading 0 then we were asked to read past
815                  *      the end of file. */
816                 amount = min((unsigned int) amount_left,
817                                 (unsigned int)fsg->buf_size);
818                 amount = min((loff_t) amount,
819                                 curlun->file_length - file_offset);
820                 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
821                 if (partial_page > 0)
822                         amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
823                                         partial_page);
824
825                 /* Wait for the next buffer to become available */
826                 bh = fsg->next_buffhd_to_fill;
827                 while (bh->state != BUF_STATE_EMPTY) {
828                         rc = sleep_thread(fsg);
829                         if (rc)
830                                 return rc;
831                 }
832
833                 /* If we were asked to read past the end of file,
834                  * end with an empty buffer. */
835                 if (amount == 0) {
836                         curlun->sense_data =
837                                         SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
838                         curlun->sense_data_info = file_offset >> 9;
839                         curlun->info_valid = 1;
840                         bh->inreq->length = 0;
841                         bh->state = BUF_STATE_FULL;
842                         break;
843                 }
844
845                 /* Perform the read */
846                 file_offset_tmp = file_offset;
847                 nread = vfs_read(curlun->filp,
848                                 (char __user *) bh->buf,
849                                 amount, &file_offset_tmp);
850                 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
851                                 (unsigned long long) file_offset,
852                                 (int) nread);
853                 if (signal_pending(current))
854                         return -EINTR;
855
856                 if (nread < 0) {
857                         LDBG(curlun, "error in file read: %d\n",
858                                         (int) nread);
859                         nread = 0;
860                 } else if (nread < amount) {
861                         LDBG(curlun, "partial file read: %d/%u\n",
862                                         (int) nread, amount);
863                         nread -= (nread & 511); /* Round down to a block */
864                 }
865                 file_offset  += nread;
866                 amount_left  -= nread;
867                 fsg->residue -= nread;
868                 bh->inreq->length = nread;
869                 bh->state = BUF_STATE_FULL;
870
871                 /* If an error occurred, report it and its position */
872                 if (nread < amount) {
873                         curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
874                         curlun->sense_data_info = file_offset >> 9;
875                         curlun->info_valid = 1;
876                         break;
877                 }
878
879                 if (amount_left == 0)
880                         break;          /* No more left to read */
881
882                 /* Send this buffer and go read some more */
883                 start_transfer(fsg, fsg->bulk_in, bh->inreq,
884                                 &bh->inreq_busy, &bh->state);
885                 fsg->next_buffhd_to_fill = bh->next;
886         }
887
888         return -EIO;            /* No default reply */
889 }
890
891
892 /*-------------------------------------------------------------------------*/
893
894 static int do_write(struct fsg_dev *fsg)
895 {
896         struct lun              *curlun = fsg->curlun;
897         u32                     lba;
898         struct fsg_buffhd       *bh;
899         int                     get_some_more;
900         u32                     amount_left_to_req, amount_left_to_write;
901         loff_t                  usb_offset, file_offset, file_offset_tmp;
902         unsigned int            amount;
903         unsigned int            partial_page;
904         ssize_t                 nwritten;
905         int                     rc;
906
907         if (curlun->ro) {
908                 curlun->sense_data = SS_WRITE_PROTECTED;
909                 return -EINVAL;
910         }
911         curlun->filp->f_flags &= ~O_SYNC;       /* Default is not to wait */
912
913         /* Get the starting Logical Block Address and check that it's
914          * not too big */
915         if (fsg->cmnd[0] == SC_WRITE_6)
916                 lba = (fsg->cmnd[1] << 16) | get_be16(&fsg->cmnd[2]);
917         else {
918                 lba = get_be32(&fsg->cmnd[2]);
919
920                 /* We allow DPO (Disable Page Out = don't save data in the
921                  * cache) and FUA (Force Unit Access = write directly to the
922                  * medium).  We don't implement DPO; we implement FUA by
923                  * performing synchronous output. */
924                 if ((fsg->cmnd[1] & ~0x18) != 0) {
925                         curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
926                         return -EINVAL;
927                 }
928                 if (fsg->cmnd[1] & 0x08)        /* FUA */
929                         curlun->filp->f_flags |= O_SYNC;
930         }
931         if (lba >= curlun->num_sectors) {
932                 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
933                 return -EINVAL;
934         }
935
936         /* Carry out the file writes */
937         get_some_more = 1;
938         file_offset = usb_offset = ((loff_t) lba) << 9;
939         amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
940
941         while (amount_left_to_write > 0) {
942
943                 /* Queue a request for more data from the host */
944                 bh = fsg->next_buffhd_to_fill;
945                 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
946
947                         /* Figure out how much we want to get:
948                          * Try to get the remaining amount.
949                          * But don't get more than the buffer size.
950                          * And don't try to go past the end of the file.
951                          * If we're not at a page boundary,
952                          *      don't go past the next page.
953                          * If this means getting 0, then we were asked
954                          *      to write past the end of file.
955                          * Finally, round down to a block boundary. */
956                         amount = min(amount_left_to_req, (u32)fsg->buf_size);
957                         amount = min((loff_t) amount, curlun->file_length -
958                                         usb_offset);
959                         partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
960                         if (partial_page > 0)
961                                 amount = min(amount,
962         (unsigned int) PAGE_CACHE_SIZE - partial_page);
963
964                         if (amount == 0) {
965                                 get_some_more = 0;
966                                 curlun->sense_data =
967                                         SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
968                                 curlun->sense_data_info = usb_offset >> 9;
969                                 curlun->info_valid = 1;
970                                 continue;
971                         }
972                         amount -= (amount & 511);
973                         if (amount == 0) {
974
975                                 /* Why were we were asked to transfer a
976                                  * partial block? */
977                                 get_some_more = 0;
978                                 continue;
979                         }
980
981                         /* Get the next buffer */
982                         usb_offset += amount;
983                         fsg->usb_amount_left -= amount;
984                         amount_left_to_req -= amount;
985                         if (amount_left_to_req == 0)
986                                 get_some_more = 0;
987
988                         /* amount is always divisible by 512, hence by
989                          * the bulk-out maxpacket size */
990                         bh->outreq->length = bh->bulk_out_intended_length =
991                                         amount;
992                         start_transfer(fsg, fsg->bulk_out, bh->outreq,
993                                         &bh->outreq_busy, &bh->state);
994                         fsg->next_buffhd_to_fill = bh->next;
995                         continue;
996                 }
997
998                 /* Write the received data to the backing file */
999                 bh = fsg->next_buffhd_to_drain;
1000                 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
1001                         break;                  /* We stopped early */
1002                 if (bh->state == BUF_STATE_FULL) {
1003                         smp_rmb();
1004                         fsg->next_buffhd_to_drain = bh->next;
1005                         bh->state = BUF_STATE_EMPTY;
1006
1007                         /* Did something go wrong with the transfer? */
1008                         if (bh->outreq->status != 0) {
1009                                 curlun->sense_data = SS_COMMUNICATION_FAILURE;
1010                                 curlun->sense_data_info = file_offset >> 9;
1011                                 curlun->info_valid = 1;
1012                                 break;
1013                         }
1014
1015                         amount = bh->outreq->actual;
1016                         if (curlun->file_length - file_offset < amount) {
1017                                 LERROR(curlun,
1018         "write %u @ %llu beyond end %llu\n",
1019         amount, (unsigned long long) file_offset,
1020         (unsigned long long) curlun->file_length);
1021                                 amount = curlun->file_length - file_offset;
1022                         }
1023
1024                         /* Perform the write */
1025                         file_offset_tmp = file_offset;
1026                         nwritten = vfs_write(curlun->filp,
1027                                         (char __user *) bh->buf,
1028                                         amount, &file_offset_tmp);
1029                         VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
1030                                         (unsigned long long) file_offset,
1031                                         (int) nwritten);
1032                         if (signal_pending(current))
1033                                 return -EINTR;          /* Interrupted! */
1034
1035                         if (nwritten < 0) {
1036                                 LDBG(curlun, "error in file write: %d\n",
1037                                                 (int) nwritten);
1038                                 nwritten = 0;
1039                         } else if (nwritten < amount) {
1040                                 LDBG(curlun, "partial file write: %d/%u\n",
1041                                                 (int) nwritten, amount);
1042                                 nwritten -= (nwritten & 511);
1043                                                 /* Round down to a block */
1044                         }
1045                         file_offset += nwritten;
1046                         amount_left_to_write -= nwritten;
1047                         fsg->residue -= nwritten;
1048
1049 #ifdef MAX_UNFLUSHED_BYTES
1050                         curlun->unflushed_bytes += nwritten;
1051                         if (curlun->unflushed_bytes >= MAX_UNFLUSHED_BYTES) {
1052                                 fsync_sub(curlun);
1053                                 curlun->unflushed_bytes = 0;
1054                         }
1055 #endif
1056                         /* If an error occurred, report it and its position */
1057                         if (nwritten < amount) {
1058                                 curlun->sense_data = SS_WRITE_ERROR;
1059                                 curlun->sense_data_info = file_offset >> 9;
1060                                 curlun->info_valid = 1;
1061                                 break;
1062                         }
1063
1064                         /* Did the host decide to stop early? */
1065                         if (bh->outreq->actual != bh->outreq->length) {
1066                                 fsg->short_packet_received = 1;
1067                                 break;
1068                         }
1069                         continue;
1070                 }
1071
1072                 /* Wait for something to happen */
1073                 rc = sleep_thread(fsg);
1074                 if (rc)
1075                         return rc;
1076         }
1077
1078         return -EIO;            /* No default reply */
1079 }
1080
1081
1082 /*-------------------------------------------------------------------------*/
1083
1084 /* Sync the file data, don't bother with the metadata.
1085  * The caller must own fsg->filesem.
1086  * This code was copied from fs/buffer.c:sys_fdatasync(). */
1087 static int fsync_sub(struct lun *curlun)
1088 {
1089         struct file     *filp = curlun->filp;
1090         struct inode    *inode;
1091         int             rc, err;
1092
1093         if (curlun->ro || !filp)
1094                 return 0;
1095         if (!filp->f_op->fsync)
1096                 return -EINVAL;
1097
1098         inode = filp->f_path.dentry->d_inode;
1099         mutex_lock(&inode->i_mutex);
1100         rc = filemap_fdatawrite(inode->i_mapping);
1101         err = filp->f_op->fsync(filp, filp->f_path.dentry, 1);
1102         if (!rc)
1103                 rc = err;
1104         err = filemap_fdatawait(inode->i_mapping);
1105         if (!rc)
1106                 rc = err;
1107         mutex_unlock(&inode->i_mutex);
1108         VLDBG(curlun, "fdatasync -> %d\n", rc);
1109         return rc;
1110 }
1111
1112 static void fsync_all(struct fsg_dev *fsg)
1113 {
1114         int     i;
1115
1116         for (i = 0; i < fsg->nluns; ++i)
1117                 fsync_sub(&fsg->luns[i]);
1118 }
1119
1120 static int do_synchronize_cache(struct fsg_dev *fsg)
1121 {
1122         struct lun      *curlun = fsg->curlun;
1123         int             rc;
1124
1125         /* We ignore the requested LBA and write out all file's
1126          * dirty data buffers. */
1127         rc = fsync_sub(curlun);
1128         if (rc)
1129                 curlun->sense_data = SS_WRITE_ERROR;
1130         return 0;
1131 }
1132
1133
1134 /*-------------------------------------------------------------------------*/
1135
1136 static void invalidate_sub(struct lun *curlun)
1137 {
1138         struct file     *filp = curlun->filp;
1139         struct inode    *inode = filp->f_path.dentry->d_inode;
1140         unsigned long   rc;
1141
1142         rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1143         VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
1144 }
1145
1146 static int do_verify(struct fsg_dev *fsg)
1147 {
1148         struct lun              *curlun = fsg->curlun;
1149         u32                     lba;
1150         u32                     verification_length;
1151         struct fsg_buffhd       *bh = fsg->next_buffhd_to_fill;
1152         loff_t                  file_offset, file_offset_tmp;
1153         u32                     amount_left;
1154         unsigned int            amount;
1155         ssize_t                 nread;
1156
1157         /* Get the starting Logical Block Address and check that it's
1158          * not too big */
1159         lba = get_be32(&fsg->cmnd[2]);
1160         if (lba >= curlun->num_sectors) {
1161                 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1162                 return -EINVAL;
1163         }
1164
1165         /* We allow DPO (Disable Page Out = don't save data in the
1166          * cache) but we don't implement it. */
1167         if ((fsg->cmnd[1] & ~0x10) != 0) {
1168                 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1169                 return -EINVAL;
1170         }
1171
1172         verification_length = get_be16(&fsg->cmnd[7]);
1173         if (unlikely(verification_length == 0))
1174                 return -EIO;            /* No default reply */
1175
1176         /* Prepare to carry out the file verify */
1177         amount_left = verification_length << 9;
1178         file_offset = ((loff_t) lba) << 9;
1179
1180         /* Write out all the dirty buffers before invalidating them */
1181         fsync_sub(curlun);
1182         if (signal_pending(current))
1183                 return -EINTR;
1184
1185         invalidate_sub(curlun);
1186         if (signal_pending(current))
1187                 return -EINTR;
1188
1189         /* Just try to read the requested blocks */
1190         while (amount_left > 0) {
1191
1192                 /* Figure out how much we need to read:
1193                  * Try to read the remaining amount, but not more than
1194                  * the buffer size.
1195                  * And don't try to read past the end of the file.
1196                  * If this means reading 0 then we were asked to read
1197                  * past the end of file. */
1198                 amount = min((unsigned int) amount_left,
1199                                 (unsigned int)fsg->buf_size);
1200                 amount = min((loff_t) amount,
1201                                 curlun->file_length - file_offset);
1202                 if (amount == 0) {
1203                         curlun->sense_data =
1204                                         SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1205                         curlun->sense_data_info = file_offset >> 9;
1206                         curlun->info_valid = 1;
1207                         break;
1208                 }
1209
1210                 /* Perform the read */
1211                 file_offset_tmp = file_offset;
1212                 nread = vfs_read(curlun->filp,
1213                                 (char __user *) bh->buf,
1214                                 amount, &file_offset_tmp);
1215                 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1216                                 (unsigned long long) file_offset,
1217                                 (int) nread);
1218                 if (signal_pending(current))
1219                         return -EINTR;
1220
1221                 if (nread < 0) {
1222                         LDBG(curlun, "error in file verify: %d\n",
1223                                         (int) nread);
1224                         nread = 0;
1225                 } else if (nread < amount) {
1226                         LDBG(curlun, "partial file verify: %d/%u\n",
1227                                         (int) nread, amount);
1228                         nread -= (nread & 511); /* Round down to a sector */
1229                 }
1230                 if (nread == 0) {
1231                         curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1232                         curlun->sense_data_info = file_offset >> 9;
1233                         curlun->info_valid = 1;
1234                         break;
1235                 }
1236                 file_offset += nread;
1237                 amount_left -= nread;
1238         }
1239         return 0;
1240 }
1241
1242
1243 /*-------------------------------------------------------------------------*/
1244
1245 static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1246 {
1247         u8      *buf = (u8 *) bh->buf;
1248
1249         if (!fsg->curlun) {             /* Unsupported LUNs are okay */
1250                 fsg->bad_lun_okay = 1;
1251                 memset(buf, 0, 36);
1252                 buf[0] = 0x7f;          /* Unsupported, no device-type */
1253                 return 36;
1254         }
1255
1256         memset(buf, 0, 8);      /* Non-removable, direct-access device */
1257
1258         buf[1] = 0x80;  /* set removable bit */
1259         buf[2] = 2;             /* ANSI SCSI level 2 */
1260         buf[3] = 2;             /* SCSI-2 INQUIRY data format */
1261         buf[4] = 31;            /* Additional length */
1262                                 /* No special options */
1263         sprintf(buf + 8, "%-8s%-16s%04x", fsg->vendor,
1264                         fsg->product, fsg->release);
1265         return 36;
1266 }
1267
1268
1269 static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1270 {
1271         struct lun      *curlun = fsg->curlun;
1272         u8              *buf = (u8 *) bh->buf;
1273         u32             sd, sdinfo;
1274         int             valid;
1275
1276         /*
1277          * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1278          *
1279          * If a REQUEST SENSE command is received from an initiator
1280          * with a pending unit attention condition (before the target
1281          * generates the contingent allegiance condition), then the
1282          * target shall either:
1283          *   a) report any pending sense data and preserve the unit
1284          *      attention condition on the logical unit, or,
1285          *   b) report the unit attention condition, may discard any
1286          *      pending sense data, and clear the unit attention
1287          *      condition on the logical unit for that initiator.
1288          *
1289          * FSG normally uses option a); enable this code to use option b).
1290          */
1291 #if 0
1292         if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1293                 curlun->sense_data = curlun->unit_attention_data;
1294                 curlun->unit_attention_data = SS_NO_SENSE;
1295         }
1296 #endif
1297
1298         if (!curlun) {          /* Unsupported LUNs are okay */
1299                 fsg->bad_lun_okay = 1;
1300                 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1301                 sdinfo = 0;
1302                 valid = 0;
1303         } else {
1304                 sd = curlun->sense_data;
1305                 sdinfo = curlun->sense_data_info;
1306                 valid = curlun->info_valid << 7;
1307                 curlun->sense_data = SS_NO_SENSE;
1308                 curlun->sense_data_info = 0;
1309                 curlun->info_valid = 0;
1310         }
1311
1312         memset(buf, 0, 18);
1313         buf[0] = valid | 0x70;                  /* Valid, current error */
1314         buf[2] = SK(sd);
1315         put_be32(&buf[3], sdinfo);              /* Sense information */
1316         buf[7] = 18 - 8;                        /* Additional sense length */
1317         buf[12] = ASC(sd);
1318         buf[13] = ASCQ(sd);
1319         return 18;
1320 }
1321
1322
1323 static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1324 {
1325         struct lun      *curlun = fsg->curlun;
1326         u32             lba = get_be32(&fsg->cmnd[2]);
1327         int             pmi = fsg->cmnd[8];
1328         u8              *buf = (u8 *) bh->buf;
1329
1330         /* Check the PMI and LBA fields */
1331         if (pmi > 1 || (pmi == 0 && lba != 0)) {
1332                 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1333                 return -EINVAL;
1334         }
1335
1336         put_be32(&buf[0], curlun->num_sectors - 1);     /* Max logical block */
1337         put_be32(&buf[4], 512);                         /* Block length */
1338         return 8;
1339 }
1340
1341
1342 static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1343 {
1344         struct lun      *curlun = fsg->curlun;
1345         int             mscmnd = fsg->cmnd[0];
1346         u8              *buf = (u8 *) bh->buf;
1347         u8              *buf0 = buf;
1348         int             pc, page_code;
1349         int             changeable_values, all_pages;
1350         int             valid_page = 0;
1351         int             len, limit;
1352
1353         if ((fsg->cmnd[1] & ~0x08) != 0) {              /* Mask away DBD */
1354                 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1355                 return -EINVAL;
1356         }
1357         pc = fsg->cmnd[2] >> 6;
1358         page_code = fsg->cmnd[2] & 0x3f;
1359         if (pc == 3) {
1360                 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1361                 return -EINVAL;
1362         }
1363         changeable_values = (pc == 1);
1364         all_pages = (page_code == 0x3f);
1365
1366         /* Write the mode parameter header.  Fixed values are: default
1367          * medium type, no cache control (DPOFUA), and no block descriptors.
1368          * The only variable value is the WriteProtect bit.  We will fill in
1369          * the mode data length later. */
1370         memset(buf, 0, 8);
1371         if (mscmnd == SC_MODE_SENSE_6) {
1372                 buf[2] = (curlun->ro ? 0x80 : 0x00);            /* WP, DPOFUA */
1373                 buf += 4;
1374                 limit = 255;
1375         } else {                        /* SC_MODE_SENSE_10 */
1376                 buf[3] = (curlun->ro ? 0x80 : 0x00);            /* WP, DPOFUA */
1377                 buf += 8;
1378                 limit = 65535;
1379         }
1380
1381         /* No block descriptors */
1382
1383         /* Disabled to workaround USB reset problems with a Vista host.
1384          */
1385 #if 0
1386         /* The mode pages, in numerical order.  The only page we support
1387          * is the Caching page. */
1388         if (page_code == 0x08 || all_pages) {
1389                 valid_page = 1;
1390                 buf[0] = 0x08;          /* Page code */
1391                 buf[1] = 10;            /* Page length */
1392                 memset(buf+2, 0, 10);   /* None of the fields are changeable */
1393
1394                 if (!changeable_values) {
1395                         buf[2] = 0x04;  /* Write cache enable, */
1396                                         /* Read cache not disabled */
1397                                         /* No cache retention priorities */
1398                         put_be16(&buf[4], 0xffff);  /* Don't disable prefetch */
1399                                         /* Minimum prefetch = 0 */
1400                         put_be16(&buf[8], 0xffff);  /* Maximum prefetch */
1401                         /* Maximum prefetch ceiling */
1402                         put_be16(&buf[10], 0xffff);
1403                 }
1404                 buf += 12;
1405         }
1406 #else
1407         valid_page = 1;
1408 #endif
1409
1410         /* Check that a valid page was requested and the mode data length
1411          * isn't too long. */
1412         len = buf - buf0;
1413         if (!valid_page || len > limit) {
1414                 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1415                 return -EINVAL;
1416         }
1417
1418         /*  Store the mode data length */
1419         if (mscmnd == SC_MODE_SENSE_6)
1420                 buf0[0] = len - 1;
1421         else
1422                 put_be16(buf0, len - 2);
1423         return len;
1424 }
1425
1426 static int do_start_stop(struct fsg_dev *fsg)
1427 {
1428         struct lun      *curlun = fsg->curlun;
1429         int             loej, start;
1430
1431         /* int immed = fsg->cmnd[1] & 0x01; */
1432         loej = fsg->cmnd[4] & 0x02;
1433         start = fsg->cmnd[4] & 0x01;
1434
1435         if (loej) {
1436                 /* eject request from the host */
1437                 if (backing_file_is_open(curlun)) {
1438                         close_backing_file(fsg, curlun);
1439                         curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
1440                 }
1441         }
1442
1443         return 0;
1444 }
1445
1446 static int do_prevent_allow(struct fsg_dev *fsg)
1447 {
1448         struct lun      *curlun = fsg->curlun;
1449         int             prevent;
1450
1451         prevent = fsg->cmnd[4] & 0x01;
1452         if ((fsg->cmnd[4] & ~0x01) != 0) {              /* Mask away Prevent */
1453                 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1454                 return -EINVAL;
1455         }
1456
1457         if (curlun->prevent_medium_removal && !prevent)
1458                 fsync_sub(curlun);
1459         curlun->prevent_medium_removal = prevent;
1460         return 0;
1461 }
1462
1463
1464 static int do_read_format_capacities(struct fsg_dev *fsg,
1465                         struct fsg_buffhd *bh)
1466 {
1467         struct lun      *curlun = fsg->curlun;
1468         u8              *buf = (u8 *) bh->buf;
1469
1470         buf[0] = buf[1] = buf[2] = 0;
1471         buf[3] = 8;     /* Only the Current/Maximum Capacity Descriptor */
1472         buf += 4;
1473
1474         put_be32(&buf[0], curlun->num_sectors); /* Number of blocks */
1475         put_be32(&buf[4], 512);                         /* Block length */
1476         buf[4] = 0x02;                                  /* Current capacity */
1477         return 12;
1478 }
1479
1480
1481 static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1482 {
1483         struct lun      *curlun = fsg->curlun;
1484
1485         /* We don't support MODE SELECT */
1486         curlun->sense_data = SS_INVALID_COMMAND;
1487         return -EINVAL;
1488 }
1489
1490
1491 /*-------------------------------------------------------------------------*/
1492 #if 0
1493 static int write_zero(struct fsg_dev *fsg)
1494 {
1495         struct fsg_buffhd       *bh;
1496         int                     rc;
1497
1498         DBG(fsg, "write_zero\n");
1499         /* Wait for the next buffer to become available */
1500         bh = fsg->next_buffhd_to_fill;
1501         while (bh->state != BUF_STATE_EMPTY) {
1502                 rc = sleep_thread(fsg);
1503                 if (rc)
1504                         return rc;
1505         }
1506
1507         bh->inreq->length = 0;
1508         start_transfer(fsg, fsg->bulk_in, bh->inreq,
1509                         &bh->inreq_busy, &bh->state);
1510
1511         fsg->next_buffhd_to_fill = bh->next;
1512         return 0;
1513 }
1514 #endif
1515
1516 static int throw_away_data(struct fsg_dev *fsg)
1517 {
1518         struct fsg_buffhd       *bh;
1519         u32                     amount;
1520         int                     rc;
1521
1522         DBG(fsg, "throw_away_data\n");
1523         while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
1524                         fsg->usb_amount_left > 0) {
1525
1526                 /* Throw away the data in a filled buffer */
1527                 if (bh->state == BUF_STATE_FULL) {
1528                         smp_rmb();
1529                         bh->state = BUF_STATE_EMPTY;
1530                         fsg->next_buffhd_to_drain = bh->next;
1531
1532                         /* A short packet or an error ends everything */
1533                         if (bh->outreq->actual != bh->outreq->length ||
1534                                         bh->outreq->status != 0) {
1535                                 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1536                                 return -EINTR;
1537                         }
1538                         continue;
1539                 }
1540
1541                 /* Try to submit another request if we need one */
1542                 bh = fsg->next_buffhd_to_fill;
1543                 if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
1544                         amount = min(fsg->usb_amount_left, (u32) fsg->buf_size);
1545
1546                         /* amount is always divisible by 512, hence by
1547                          * the bulk-out maxpacket size */
1548                         bh->outreq->length = bh->bulk_out_intended_length =
1549                                         amount;
1550                         start_transfer(fsg, fsg->bulk_out, bh->outreq,
1551                                         &bh->outreq_busy, &bh->state);
1552                         fsg->next_buffhd_to_fill = bh->next;
1553                         fsg->usb_amount_left -= amount;
1554                         continue;
1555                 }
1556
1557                 /* Otherwise wait for something to happen */
1558                 rc = sleep_thread(fsg);
1559                 if (rc)
1560                         return rc;
1561         }
1562         return 0;
1563 }
1564
1565
1566 static int finish_reply(struct fsg_dev *fsg)
1567 {
1568         struct fsg_buffhd       *bh = fsg->next_buffhd_to_fill;
1569         int                     rc = 0;
1570
1571         switch (fsg->data_dir) {
1572         case DATA_DIR_NONE:
1573                 break;                  /* Nothing to send */
1574
1575         case DATA_DIR_UNKNOWN:
1576                 rc = -EINVAL;
1577                 break;
1578
1579         /* All but the last buffer of data must have already been sent */
1580         case DATA_DIR_TO_HOST:
1581                 if (fsg->data_size == 0)
1582                         ;               /* Nothing to send */
1583
1584                 /* If there's no residue, simply send the last buffer */
1585                 else if (fsg->residue == 0) {
1586                         start_transfer(fsg, fsg->bulk_in, bh->inreq,
1587                                         &bh->inreq_busy, &bh->state);
1588                         fsg->next_buffhd_to_fill = bh->next;
1589                 } else {
1590                         start_transfer(fsg, fsg->bulk_in, bh->inreq,
1591                                         &bh->inreq_busy, &bh->state);
1592                         fsg->next_buffhd_to_fill = bh->next;
1593 #if 0
1594                         /* this is unnecessary, and was causing problems with MacOS */
1595                         if (bh->inreq->length > 0)
1596                                 write_zero(fsg);
1597 #endif
1598                 }
1599                 break;
1600
1601         /* We have processed all we want from the data the host has sent.
1602          * There may still be outstanding bulk-out requests. */
1603         case DATA_DIR_FROM_HOST:
1604                 if (fsg->residue == 0)
1605                         ;               /* Nothing to receive */
1606
1607                 /* Did the host stop sending unexpectedly early? */
1608                 else if (fsg->short_packet_received) {
1609                         raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1610                         rc = -EINTR;
1611                 }
1612
1613                 /* We haven't processed all the incoming data.  Even though
1614                  * we may be allowed to stall, doing so would cause a race.
1615                  * The controller may already have ACK'ed all the remaining
1616                  * bulk-out packets, in which case the host wouldn't see a
1617                  * STALL.  Not realizing the endpoint was halted, it wouldn't
1618                  * clear the halt -- leading to problems later on. */
1619 #if 0
1620                 fsg_set_halt(fsg, fsg->bulk_out);
1621                 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1622                 rc = -EINTR;
1623 #endif
1624
1625                 /* We can't stall.  Read in the excess data and throw it
1626                  * all away. */
1627                 else
1628                         rc = throw_away_data(fsg);
1629                 break;
1630         }
1631         return rc;
1632 }
1633
1634
1635 static int send_status(struct fsg_dev *fsg)
1636 {
1637         struct lun              *curlun = fsg->curlun;
1638         struct fsg_buffhd       *bh;
1639         int                     rc;
1640         u8                      status = USB_STATUS_PASS;
1641         u32                     sd, sdinfo = 0;
1642         struct bulk_cs_wrap     *csw;
1643
1644         DBG(fsg, "send_status\n");
1645         /* Wait for the next buffer to become available */
1646         bh = fsg->next_buffhd_to_fill;
1647         while (bh->state != BUF_STATE_EMPTY) {
1648                 rc = sleep_thread(fsg);
1649                 if (rc)
1650                         return rc;
1651         }
1652
1653         if (curlun) {
1654                 sd = curlun->sense_data;
1655                 sdinfo = curlun->sense_data_info;
1656         } else if (fsg->bad_lun_okay)
1657                 sd = SS_NO_SENSE;
1658         else
1659                 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1660
1661         if (fsg->phase_error) {
1662                 DBG(fsg, "sending phase-error status\n");
1663                 status = USB_STATUS_PHASE_ERROR;
1664                 sd = SS_INVALID_COMMAND;
1665         } else if (sd != SS_NO_SENSE) {
1666                 DBG(fsg, "sending command-failure status\n");
1667                 status = USB_STATUS_FAIL;
1668                 VDBG(fsg, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1669                                 "  info x%x\n",
1670                                 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1671         }
1672
1673         csw = bh->buf;
1674
1675         /* Store and send the Bulk-only CSW */
1676         csw->Signature = __constant_cpu_to_le32(USB_BULK_CS_SIG);
1677         csw->Tag = fsg->tag;
1678         csw->Residue = cpu_to_le32(fsg->residue);
1679         csw->Status = status;
1680
1681         bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1682         start_transfer(fsg, fsg->bulk_in, bh->inreq,
1683                         &bh->inreq_busy, &bh->state);
1684
1685         fsg->next_buffhd_to_fill = bh->next;
1686         return 0;
1687 }
1688
1689
1690 /*-------------------------------------------------------------------------*/
1691
1692 /* Check whether the command is properly formed and whether its data size
1693  * and direction agree with the values we already have. */
1694 static int check_command(struct fsg_dev *fsg, int cmnd_size,
1695                 enum data_direction data_dir, unsigned int mask,
1696                 int needs_medium, const char *name)
1697 {
1698         int                     i;
1699         int                     lun = fsg->cmnd[1] >> 5;
1700         static const char       dirletter[4] = {'u', 'o', 'i', 'n'};
1701         char                    hdlen[20];
1702         struct lun              *curlun;
1703
1704         hdlen[0] = 0;
1705         if (fsg->data_dir != DATA_DIR_UNKNOWN)
1706                 sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
1707                                 fsg->data_size);
1708         VDBG(fsg, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
1709                         name, cmnd_size, dirletter[(int) data_dir],
1710                         fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
1711
1712         /* We can't reply at all until we know the correct data direction
1713          * and size. */
1714         if (fsg->data_size_from_cmnd == 0)
1715                 data_dir = DATA_DIR_NONE;
1716         if (fsg->data_dir == DATA_DIR_UNKNOWN) {        /* CB or CBI */
1717                 fsg->data_dir = data_dir;
1718                 fsg->data_size = fsg->data_size_from_cmnd;
1719
1720         } else {                                        /* Bulk-only */
1721                 if (fsg->data_size < fsg->data_size_from_cmnd) {
1722
1723                         /* Host data size < Device data size is a phase error.
1724                          * Carry out the command, but only transfer as much
1725                          * as we are allowed. */
1726                         DBG(fsg, "phase error 1\n");
1727                         fsg->data_size_from_cmnd = fsg->data_size;
1728                         fsg->phase_error = 1;
1729                 }
1730         }
1731         fsg->residue = fsg->usb_amount_left = fsg->data_size;
1732
1733         /* Conflicting data directions is a phase error */
1734         if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
1735                 fsg->phase_error = 1;
1736                 DBG(fsg, "phase error 2\n");
1737                 return -EINVAL;
1738         }
1739
1740         /* Verify the length of the command itself */
1741         if (cmnd_size != fsg->cmnd_size) {
1742
1743                 /* Special case workaround: MS-Windows issues REQUEST_SENSE
1744                  * and INQUIRY commands with cbw->Length == 12 (it should be 6). */
1745                 if ((fsg->cmnd[0] == SC_REQUEST_SENSE && fsg->cmnd_size == 12)
1746                  || (fsg->cmnd[0] == SC_INQUIRY && fsg->cmnd_size == 12))
1747                         cmnd_size = fsg->cmnd_size;
1748                 else {
1749                         fsg->phase_error = 1;
1750                         return -EINVAL;
1751                 }
1752         }
1753
1754         /* Check that the LUN values are consistent */
1755         if (fsg->lun != lun)
1756                 DBG(fsg, "using LUN %d from CBW, "
1757                                 "not LUN %d from CDB\n",
1758                                 fsg->lun, lun);
1759
1760         /* Check the LUN */
1761         if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
1762                 fsg->curlun = curlun = &fsg->luns[fsg->lun];
1763                 if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
1764                         curlun->sense_data = SS_NO_SENSE;
1765                         curlun->sense_data_info = 0;
1766                         curlun->info_valid = 0;
1767                 }
1768         } else {
1769                 fsg->curlun = curlun = NULL;
1770                 fsg->bad_lun_okay = 0;
1771
1772                 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1773                  * to use unsupported LUNs; all others may not. */
1774                 if (fsg->cmnd[0] != SC_INQUIRY &&
1775                                 fsg->cmnd[0] != SC_REQUEST_SENSE) {
1776                         DBG(fsg, "unsupported LUN %d\n", fsg->lun);
1777                         return -EINVAL;
1778                 }
1779         }
1780
1781         /* If a unit attention condition exists, only INQUIRY and
1782          * REQUEST SENSE commands are allowed; anything else must fail. */
1783         if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1784                         fsg->cmnd[0] != SC_INQUIRY &&
1785                         fsg->cmnd[0] != SC_REQUEST_SENSE) {
1786                 curlun->sense_data = curlun->unit_attention_data;
1787                 curlun->unit_attention_data = SS_NO_SENSE;
1788                 return -EINVAL;
1789         }
1790
1791         /* Check that only command bytes listed in the mask are non-zero */
1792         fsg->cmnd[1] &= 0x1f;                   /* Mask away the LUN */
1793         for (i = 1; i < cmnd_size; ++i) {
1794                 if (fsg->cmnd[i] && !(mask & (1 << i))) {
1795                         if (curlun)
1796                                 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1797                         DBG(fsg, "SS_INVALID_FIELD_IN_CDB\n");
1798                         return -EINVAL;
1799                 }
1800         }
1801
1802         /* If the medium isn't mounted and the command needs to access
1803          * it, return an error. */
1804         if (curlun && !backing_file_is_open(curlun) && needs_medium) {
1805                 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1806                 DBG(fsg, "SS_MEDIUM_NOT_PRESENT\n");
1807                 return -EINVAL;
1808         }
1809
1810         return 0;
1811 }
1812
1813
1814 static int do_scsi_command(struct fsg_dev *fsg)
1815 {
1816         struct fsg_buffhd       *bh;
1817         int                     rc;
1818         int                     reply = -EINVAL;
1819         int                     i;
1820         static char             unknown[16];
1821
1822         dump_cdb(fsg);
1823
1824         /* Wait for the next buffer to become available for data or status */
1825         bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
1826         while (bh->state != BUF_STATE_EMPTY) {
1827                 rc = sleep_thread(fsg);
1828                 if (rc)
1829                         return rc;
1830         }
1831         fsg->phase_error = 0;
1832         fsg->short_packet_received = 0;
1833
1834         down_read(&fsg->filesem);       /* We're using the backing file */
1835         switch (fsg->cmnd[0]) {
1836
1837         case SC_INQUIRY:
1838                 fsg->data_size_from_cmnd = fsg->cmnd[4];
1839                 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1840                                 (1<<4), 0,
1841                                 "INQUIRY")) == 0)
1842                         reply = do_inquiry(fsg, bh);
1843                 break;
1844
1845         case SC_MODE_SELECT_6:
1846                 fsg->data_size_from_cmnd = fsg->cmnd[4];
1847                 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
1848                                 (1<<1) | (1<<4), 0,
1849                                 "MODE SELECT(6)")) == 0)
1850                         reply = do_mode_select(fsg, bh);
1851                 break;
1852
1853         case SC_MODE_SELECT_10:
1854                 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
1855                 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
1856                                 (1<<1) | (3<<7), 0,
1857                                 "MODE SELECT(10)")) == 0)
1858                         reply = do_mode_select(fsg, bh);
1859                 break;
1860
1861         case SC_MODE_SENSE_6:
1862                 fsg->data_size_from_cmnd = fsg->cmnd[4];
1863                 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1864                                 (1<<1) | (1<<2) | (1<<4), 0,
1865                                 "MODE SENSE(6)")) == 0)
1866                         reply = do_mode_sense(fsg, bh);
1867                 break;
1868
1869         case SC_MODE_SENSE_10:
1870                 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
1871                 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1872                                 (1<<1) | (1<<2) | (3<<7), 0,
1873                                 "MODE SENSE(10)")) == 0)
1874                         reply = do_mode_sense(fsg, bh);
1875                 break;
1876
1877         case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
1878                 fsg->data_size_from_cmnd = 0;
1879                 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
1880                                 (1<<4), 0,
1881                                 "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
1882                         reply = do_prevent_allow(fsg);
1883                 break;
1884
1885         case SC_READ_6:
1886                 i = fsg->cmnd[4];
1887                 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1888                 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1889                                 (7<<1) | (1<<4), 1,
1890                                 "READ(6)")) == 0)
1891                         reply = do_read(fsg);
1892                 break;
1893
1894         case SC_READ_10:
1895                 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
1896                 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1897                                 (1<<1) | (0xf<<2) | (3<<7), 1,
1898                                 "READ(10)")) == 0)
1899                         reply = do_read(fsg);
1900                 break;
1901
1902         case SC_READ_12:
1903                 fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
1904                 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
1905                                 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1906                                 "READ(12)")) == 0)
1907                         reply = do_read(fsg);
1908                 break;
1909
1910         case SC_READ_CAPACITY:
1911                 fsg->data_size_from_cmnd = 8;
1912                 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1913                                 (0xf<<2) | (1<<8), 1,
1914                                 "READ CAPACITY")) == 0)
1915                         reply = do_read_capacity(fsg, bh);
1916                 break;
1917
1918         case SC_READ_FORMAT_CAPACITIES:
1919                 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]);
1920                 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1921                                 (3<<7), 1,
1922                                 "READ FORMAT CAPACITIES")) == 0)
1923                         reply = do_read_format_capacities(fsg, bh);
1924                 break;
1925
1926         case SC_REQUEST_SENSE:
1927                 fsg->data_size_from_cmnd = fsg->cmnd[4];
1928                 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1929                                 (1<<4), 0,
1930                                 "REQUEST SENSE")) == 0)
1931                         reply = do_request_sense(fsg, bh);
1932                 break;
1933
1934         case SC_START_STOP_UNIT:
1935                 fsg->data_size_from_cmnd = 0;
1936                 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
1937                                 (1<<1) | (1<<4), 0,
1938                                 "START-STOP UNIT")) == 0)
1939                         reply = do_start_stop(fsg);
1940                 break;
1941
1942         case SC_SYNCHRONIZE_CACHE:
1943                 fsg->data_size_from_cmnd = 0;
1944                 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
1945                                 (0xf<<2) | (3<<7), 1,
1946                                 "SYNCHRONIZE CACHE")) == 0)
1947                         reply = do_synchronize_cache(fsg);
1948                 break;
1949
1950         case SC_TEST_UNIT_READY:
1951                 fsg->data_size_from_cmnd = 0;
1952                 reply = check_command(fsg, 6, DATA_DIR_NONE,
1953                                 0, 1,
1954                                 "TEST UNIT READY");
1955                 break;
1956
1957         /* Although optional, this command is used by MS-Windows.  We
1958          * support a minimal version: BytChk must be 0. */
1959         case SC_VERIFY:
1960                 fsg->data_size_from_cmnd = 0;
1961                 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
1962                                 (1<<1) | (0xf<<2) | (3<<7), 1,
1963                                 "VERIFY")) == 0)
1964                         reply = do_verify(fsg);
1965                 break;
1966
1967         case SC_WRITE_6:
1968                 i = fsg->cmnd[4];
1969                 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1970                 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
1971                                 (7<<1) | (1<<4), 1,
1972                                 "WRITE(6)")) == 0)
1973                         reply = do_write(fsg);
1974                 break;
1975
1976         case SC_WRITE_10:
1977                 fsg->data_size_from_cmnd = get_be16(&fsg->cmnd[7]) << 9;
1978                 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
1979                                 (1<<1) | (0xf<<2) | (3<<7), 1,
1980                                 "WRITE(10)")) == 0)
1981                         reply = do_write(fsg);
1982                 break;
1983
1984         case SC_WRITE_12:
1985                 fsg->data_size_from_cmnd = get_be32(&fsg->cmnd[6]) << 9;
1986                 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
1987                                 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1988                                 "WRITE(12)")) == 0)
1989                         reply = do_write(fsg);
1990                 break;
1991
1992         /* Some mandatory commands that we recognize but don't implement.
1993          * They don't mean much in this setting.  It's left as an exercise
1994          * for anyone interested to implement RESERVE and RELEASE in terms
1995          * of Posix locks. */
1996         case SC_FORMAT_UNIT:
1997         case SC_RELEASE:
1998         case SC_RESERVE:
1999         case SC_SEND_DIAGNOSTIC:
2000                 /* Fall through */
2001
2002         default:
2003                 fsg->data_size_from_cmnd = 0;
2004                 sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
2005                 if ((reply = check_command(fsg, fsg->cmnd_size,
2006                                 DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
2007                         fsg->curlun->sense_data = SS_INVALID_COMMAND;
2008                         reply = -EINVAL;
2009                 }
2010                 break;
2011         }
2012         up_read(&fsg->filesem);
2013
2014         VDBG(fsg, "reply: %d, fsg->data_size_from_cmnd: %d\n",
2015                         reply, fsg->data_size_from_cmnd);
2016         if (reply == -EINTR || signal_pending(current))
2017                 return -EINTR;
2018
2019         /* Set up the single reply buffer for finish_reply() */
2020         if (reply == -EINVAL)
2021                 reply = 0;              /* Error reply length */
2022         if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
2023                 reply = min((u32) reply, fsg->data_size_from_cmnd);
2024                 bh->inreq->length = reply;
2025                 bh->state = BUF_STATE_FULL;
2026                 fsg->residue -= reply;
2027         }                               /* Otherwise it's already set */
2028
2029         return 0;
2030 }
2031
2032
2033 /*-------------------------------------------------------------------------*/
2034
2035 static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2036 {
2037         struct usb_request      *req = bh->outreq;
2038         struct bulk_cb_wrap     *cbw = req->buf;
2039
2040         /* Was this a real packet? */
2041         if (req->status)
2042                 return -EINVAL;
2043
2044         /* Is the CBW valid? */
2045         if (req->actual != USB_BULK_CB_WRAP_LEN ||
2046                         cbw->Signature != __constant_cpu_to_le32(
2047                                 USB_BULK_CB_SIG)) {
2048                 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2049                                 req->actual,
2050                                 le32_to_cpu(cbw->Signature));
2051                 return -EINVAL;
2052         }
2053
2054         /* Is the CBW meaningful? */
2055         if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2056                         cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2057                 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2058                                 "cmdlen %u\n",
2059                                 cbw->Lun, cbw->Flags, cbw->Length);
2060                 return -EINVAL;
2061         }
2062
2063         /* Save the command for later */
2064         fsg->cmnd_size = cbw->Length;
2065         memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
2066         if (cbw->Flags & USB_BULK_IN_FLAG)
2067                 fsg->data_dir = DATA_DIR_TO_HOST;
2068         else
2069                 fsg->data_dir = DATA_DIR_FROM_HOST;
2070         fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
2071         if (fsg->data_size == 0)
2072                 fsg->data_dir = DATA_DIR_NONE;
2073         fsg->lun = cbw->Lun;
2074         fsg->tag = cbw->Tag;
2075         return 0;
2076 }
2077
2078
2079 static int get_next_command(struct fsg_dev *fsg)
2080 {
2081         struct fsg_buffhd       *bh;
2082         int                     rc = 0;
2083
2084         /* Wait for the next buffer to become available */
2085         bh = fsg->next_buffhd_to_fill;
2086         while (bh->state != BUF_STATE_EMPTY) {
2087                 rc = sleep_thread(fsg);
2088                 if (rc) {
2089                         usb_ep_dequeue(fsg->bulk_out, bh->outreq);
2090                         bh->outreq_busy = 0;
2091                         bh->state = BUF_STATE_EMPTY;
2092                         return rc;
2093                 }
2094         }
2095
2096         /* Queue a request to read a Bulk-only CBW */
2097         set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
2098         start_transfer(fsg, fsg->bulk_out, bh->outreq,
2099                         &bh->outreq_busy, &bh->state);
2100
2101         /* We will drain the buffer in software, which means we
2102          * can reuse it for the next filling.  No need to advance
2103          * next_buffhd_to_fill. */
2104
2105         /* Wait for the CBW to arrive */
2106         while (bh->state != BUF_STATE_FULL) {
2107                 rc = sleep_thread(fsg);
2108                 if (rc) {
2109                         usb_ep_dequeue(fsg->bulk_out, bh->outreq);
2110                         bh->outreq_busy = 0;
2111                         bh->state = BUF_STATE_EMPTY;
2112                         return rc;
2113                 }
2114         }
2115         smp_rmb();
2116         rc = received_cbw(fsg, bh);
2117         bh->state = BUF_STATE_EMPTY;
2118
2119         return rc;
2120 }
2121
2122
2123 /*-------------------------------------------------------------------------*/
2124
2125 static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
2126                 const struct usb_endpoint_descriptor *d)
2127 {
2128         int     rc;
2129
2130         DBG(fsg, "usb_ep_enable %s\n", ep->name);
2131         ep->driver_data = fsg;
2132         rc = usb_ep_enable(ep, d);
2133         if (rc)
2134                 ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
2135         return rc;
2136 }
2137
2138 static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
2139                 struct usb_request **preq)
2140 {
2141         *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2142         if (*preq)
2143                 return 0;
2144         ERROR(fsg, "can't allocate request for %s\n", ep->name);
2145         return -ENOMEM;
2146 }
2147
2148 /*
2149  * Reset interface setting and re-init endpoint state (toggle etc).
2150  * Call with altsetting < 0 to disable the interface.  The only other
2151  * available altsetting is 0, which enables the interface.
2152  */
2153 static int do_set_interface(struct fsg_dev *fsg, int altsetting)
2154 {
2155         struct usb_composite_dev *cdev = fsg->cdev;
2156         int     rc = 0;
2157         int     i;
2158         const struct usb_endpoint_descriptor    *d;
2159
2160         if (fsg->running)
2161                 DBG(fsg, "reset interface\n");
2162 reset:
2163          /* Disable the endpoints */
2164         if (fsg->bulk_in_enabled) {
2165                 DBG(fsg, "usb_ep_disable %s\n", fsg->bulk_in->name);
2166                 usb_ep_disable(fsg->bulk_in);
2167                 fsg->bulk_in_enabled = 0;
2168         }
2169         if (fsg->bulk_out_enabled) {
2170                 DBG(fsg, "usb_ep_disable %s\n", fsg->bulk_out->name);
2171                 usb_ep_disable(fsg->bulk_out);
2172                 fsg->bulk_out_enabled = 0;
2173         }
2174
2175         /* Deallocate the requests */
2176         for (i = 0; i < NUM_BUFFERS; ++i) {
2177                 struct fsg_buffhd *bh = &fsg->buffhds[i];
2178                 if (bh->inreq) {
2179                         usb_ep_free_request(fsg->bulk_in, bh->inreq);
2180                         bh->inreq = NULL;
2181                 }
2182                 if (bh->outreq) {
2183                         usb_ep_free_request(fsg->bulk_out, bh->outreq);
2184                         bh->outreq = NULL;
2185                 }
2186         }
2187
2188
2189         fsg->running = 0;
2190         if (altsetting < 0 || rc != 0)
2191                 return rc;
2192
2193         DBG(fsg, "set interface %d\n", altsetting);
2194
2195         /* Enable the endpoints */
2196         d = ep_desc(cdev->gadget, &fs_bulk_in_desc, &hs_bulk_in_desc);
2197         if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
2198                 goto reset;
2199         fsg->bulk_in_enabled = 1;
2200
2201         d = ep_desc(cdev->gadget, &fs_bulk_out_desc, &hs_bulk_out_desc);
2202         if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
2203                 goto reset;
2204         fsg->bulk_out_enabled = 1;
2205         fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
2206
2207         /* Allocate the requests */
2208         for (i = 0; i < NUM_BUFFERS; ++i) {
2209                 struct fsg_buffhd       *bh = &fsg->buffhds[i];
2210
2211                 rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq);
2212                 if (rc != 0)
2213                         goto reset;
2214                 rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq);
2215                 if (rc != 0)
2216                         goto reset;
2217                 bh->inreq->buf = bh->outreq->buf = bh->buf;
2218                 bh->inreq->context = bh->outreq->context = bh;
2219                 bh->inreq->complete = bulk_in_complete;
2220                 bh->outreq->complete = bulk_out_complete;
2221         }
2222
2223         fsg->running = 1;
2224         for (i = 0; i < fsg->nluns; ++i)
2225                 fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2226
2227         return rc;
2228 }
2229
2230 static void adjust_wake_lock(struct fsg_dev *fsg)
2231 {
2232         int ums_active = 0;
2233         int i;
2234         unsigned long           flags;
2235
2236         spin_lock_irqsave(&fsg->lock, flags);
2237
2238         if (fsg->config) {
2239                 for (i = 0; i < fsg->nluns; ++i) {
2240                         if (backing_file_is_open(&fsg->luns[i]))
2241                                 ums_active = 1;
2242                 }
2243         }
2244
2245         if (ums_active)
2246                 wake_lock(&fsg->wake_lock);
2247         else
2248                 wake_unlock(&fsg->wake_lock);
2249
2250         spin_unlock_irqrestore(&fsg->lock, flags);
2251 }
2252
2253 /*
2254  * Change our operational configuration.  This code must agree with the code
2255  * that returns config descriptors, and with interface altsetting code.
2256  *
2257  * It's also responsible for power management interactions.  Some
2258  * configurations might not work with our current power sources.
2259  * For now we just assume the gadget is always self-powered.
2260  */
2261 static int do_set_config(struct fsg_dev *fsg, u8 new_config)
2262 {
2263         int     rc = 0;
2264
2265         /* Disable the single interface */
2266         if (fsg->config != 0) {
2267                 DBG(fsg, "reset config\n");
2268                 fsg->config = 0;
2269                 rc = do_set_interface(fsg, -1);
2270         }
2271
2272         /* Enable the interface */
2273         if (new_config != 0) {
2274                 fsg->config = new_config;
2275                 if ((rc = do_set_interface(fsg, 0)) != 0)
2276                         fsg->config = 0;        // Reset on errors
2277         }
2278
2279         switch_set_state(&fsg->sdev, new_config);
2280         adjust_wake_lock(fsg);
2281         return rc;
2282 }
2283
2284
2285 /*-------------------------------------------------------------------------*/
2286
2287 static void handle_exception(struct fsg_dev *fsg)
2288 {
2289         siginfo_t               info;
2290         int                     sig;
2291         int                     i;
2292         int                     num_active;
2293         struct fsg_buffhd       *bh;
2294         enum fsg_state          old_state;
2295         u8                      new_config;
2296         struct lun              *curlun;
2297         int                     rc;
2298         unsigned long           flags;
2299
2300         DBG(fsg, "handle_exception state: %d\n", (int)fsg->state);
2301         /* Clear the existing signals.  Anything but SIGUSR1 is converted
2302          * into a high-priority EXIT exception. */
2303         for (;;) {
2304                 sig = dequeue_signal_lock(current, &current->blocked, &info);
2305                 if (!sig)
2306                         break;
2307                 if (sig != SIGUSR1) {
2308                         if (fsg->state < FSG_STATE_EXIT)
2309                                 DBG(fsg, "Main thread exiting on signal\n");
2310                         raise_exception(fsg, FSG_STATE_EXIT);
2311                 }
2312         }
2313
2314         /* Cancel all the pending transfers */
2315         for (i = 0; i < NUM_BUFFERS; ++i) {
2316                 bh = &fsg->buffhds[i];
2317                 if (bh->inreq_busy)
2318                         usb_ep_dequeue(fsg->bulk_in, bh->inreq);
2319                 if (bh->outreq_busy)
2320                         usb_ep_dequeue(fsg->bulk_out, bh->outreq);
2321         }
2322
2323         /* Wait until everything is idle */
2324         for (;;) {
2325                 num_active = 0;
2326                 for (i = 0; i < NUM_BUFFERS; ++i) {
2327                         bh = &fsg->buffhds[i];
2328                         num_active += bh->outreq_busy;
2329                 }
2330                 if (num_active == 0)
2331                         break;
2332                 if (sleep_thread(fsg))
2333                         return;
2334         }
2335
2336         /*
2337         * Do NOT flush the fifo after set_interface()
2338         * Otherwise, it results in some data being lost
2339         */
2340         if ((fsg->state != FSG_STATE_CONFIG_CHANGE) ||
2341                 (fsg->new_config != 1))   {
2342                 /* Clear out the controller's fifos */
2343                 if (fsg->bulk_in_enabled)
2344                         usb_ep_fifo_flush(fsg->bulk_in);
2345                 if (fsg->bulk_out_enabled)
2346                         usb_ep_fifo_flush(fsg->bulk_out);
2347         }
2348         /* Reset the I/O buffer states and pointers, the SCSI
2349          * state, and the exception.  Then invoke the handler. */
2350         spin_lock_irqsave(&fsg->lock, flags);
2351
2352         for (i = 0; i < NUM_BUFFERS; ++i) {
2353                 bh = &fsg->buffhds[i];
2354                 bh->state = BUF_STATE_EMPTY;
2355         }
2356         fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
2357                         &fsg->buffhds[0];
2358
2359         new_config = fsg->new_config;
2360         old_state = fsg->state;
2361
2362         if (old_state == FSG_STATE_ABORT_BULK_OUT)
2363                 fsg->state = FSG_STATE_STATUS_PHASE;
2364         else {
2365                 for (i = 0; i < fsg->nluns; ++i) {
2366                         curlun = &fsg->luns[i];
2367                         curlun->prevent_medium_removal = 0;
2368                         curlun->sense_data = curlun->unit_attention_data =
2369                                         SS_NO_SENSE;
2370                         curlun->sense_data_info = 0;
2371                         curlun->info_valid = 0;
2372                 }
2373                 fsg->state = FSG_STATE_IDLE;
2374         }
2375         spin_unlock_irqrestore(&fsg->lock, flags);
2376
2377         /* Carry out any extra actions required for the exception */
2378         switch (old_state) {
2379         default:
2380                 break;
2381
2382         case FSG_STATE_ABORT_BULK_OUT:
2383                 DBG(fsg, "FSG_STATE_ABORT_BULK_OUT\n");
2384                 spin_lock_irqsave(&fsg->lock, flags);
2385                 if (fsg->state == FSG_STATE_STATUS_PHASE)
2386                         fsg->state = FSG_STATE_IDLE;
2387                 spin_unlock_irqrestore(&fsg->lock, flags);
2388                 break;
2389
2390         case FSG_STATE_RESET:
2391                 /* really not much to do here */
2392                 break;
2393
2394         case FSG_STATE_CONFIG_CHANGE:
2395                 rc = do_set_config(fsg, new_config);
2396                 if (new_config == 0) {
2397                         /* We're using the backing file */
2398                         down_read(&fsg->filesem);
2399                         fsync_all(fsg);
2400                         up_read(&fsg->filesem);
2401                 }
2402                 break;
2403
2404         case FSG_STATE_EXIT:
2405         case FSG_STATE_TERMINATED:
2406                 do_set_config(fsg, 0);                  /* Free resources */
2407                 spin_lock_irqsave(&fsg->lock, flags);
2408                 fsg->state = FSG_STATE_TERMINATED;      /* Stop the thread */
2409                 spin_unlock_irqrestore(&fsg->lock, flags);
2410                 break;
2411         }
2412 }
2413
2414
2415 /*-------------------------------------------------------------------------*/
2416
2417 static int fsg_main_thread(void *fsg_)
2418 {
2419         struct fsg_dev          *fsg = fsg_;
2420         unsigned long           flags;
2421
2422         /* Allow the thread to be killed by a signal, but set the signal mask
2423          * to block everything but INT, TERM, KILL, and USR1. */
2424         allow_signal(SIGINT);
2425         allow_signal(SIGTERM);
2426         allow_signal(SIGKILL);
2427         allow_signal(SIGUSR1);
2428
2429         /* Allow the thread to be frozen */
2430         set_freezable();
2431
2432         /* Arrange for userspace references to be interpreted as kernel
2433          * pointers.  That way we can pass a kernel pointer to a routine
2434          * that expects a __user pointer and it will work okay. */
2435         set_fs(get_ds());
2436
2437         /* The main loop */
2438         while (fsg->state != FSG_STATE_TERMINATED) {
2439                 if (exception_in_progress(fsg) || signal_pending(current)) {
2440                         handle_exception(fsg);
2441                         continue;
2442                 }
2443
2444                 if (!fsg->running) {
2445                         sleep_thread(fsg);
2446                         continue;
2447                 }
2448
2449                 if (get_next_command(fsg))
2450                         continue;
2451
2452                 spin_lock_irqsave(&fsg->lock, flags);
2453                 if (!exception_in_progress(fsg))
2454                         fsg->state = FSG_STATE_DATA_PHASE;
2455                 spin_unlock_irqrestore(&fsg->lock, flags);
2456
2457                 if (do_scsi_command(fsg) || finish_reply(fsg))
2458                         continue;
2459
2460                 spin_lock_irqsave(&fsg->lock, flags);
2461                 if (!exception_in_progress(fsg))
2462                         fsg->state = FSG_STATE_STATUS_PHASE;
2463                 spin_unlock_irqrestore(&fsg->lock, flags);
2464
2465                 if (send_status(fsg))
2466                         continue;
2467
2468                 spin_lock_irqsave(&fsg->lock, flags);
2469                 if (!exception_in_progress(fsg))
2470                         fsg->state = FSG_STATE_IDLE;
2471                 spin_unlock_irqrestore(&fsg->lock, flags);
2472         }
2473
2474         spin_lock_irqsave(&fsg->lock, flags);
2475         fsg->thread_task = NULL;
2476         spin_unlock_irqrestore(&fsg->lock, flags);
2477
2478         /* In case we are exiting because of a signal, unregister the
2479          * gadget driver and close the backing file. */
2480         if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
2481                 close_all_backing_files(fsg);
2482
2483         /* Let the unbind and cleanup routines know the thread has exited */
2484         complete_and_exit(&fsg->thread_notifier, 0);
2485 }
2486
2487
2488 /*-------------------------------------------------------------------------*/
2489
2490 /* If the next two routines are called while the gadget is registered,
2491  * the caller must own fsg->filesem for writing. */
2492
2493 static int open_backing_file(struct fsg_dev *fsg, struct lun *curlun,
2494         const char *filename)
2495 {
2496         int                             ro;
2497         struct file                     *filp = NULL;
2498         int                             rc = -EINVAL;
2499         struct inode                    *inode = NULL;
2500         loff_t                          size;
2501         loff_t                          num_sectors;
2502
2503         /* R/W if we can, R/O if we must */
2504         ro = curlun->ro;
2505         if (!ro) {
2506                 filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
2507                 if (-EROFS == PTR_ERR(filp))
2508                         ro = 1;
2509         }
2510         if (ro)
2511                 filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
2512         if (IS_ERR(filp)) {
2513                 LINFO(curlun, "unable to open backing file: %s\n", filename);
2514                 return PTR_ERR(filp);
2515         }
2516
2517         if (!(filp->f_mode & FMODE_WRITE))
2518                 ro = 1;
2519
2520         if (filp->f_path.dentry)
2521                 inode = filp->f_path.dentry->d_inode;
2522         if (inode && S_ISBLK(inode->i_mode)) {
2523                 if (bdev_read_only(inode->i_bdev))
2524                         ro = 1;
2525         } else if (!inode || !S_ISREG(inode->i_mode)) {
2526                 LINFO(curlun, "invalid file type: %s\n", filename);
2527                 goto out;
2528         }
2529
2530         /* If we can't read the file, it's no good.
2531          * If we can't write the file, use it read-only. */
2532         if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
2533                 LINFO(curlun, "file not readable: %s\n", filename);
2534                 goto out;
2535         }
2536         if (!(filp->f_op->write || filp->f_op->aio_write))
2537                 ro = 1;
2538
2539         size = i_size_read(inode->i_mapping->host);
2540         if (size < 0) {
2541                 LINFO(curlun, "unable to find file size: %s\n", filename);
2542                 rc = (int) size;
2543                 goto out;
2544         }
2545         num_sectors = size >> 9;        /* File size in 512-byte sectors */
2546         if (num_sectors == 0) {
2547                 LINFO(curlun, "file too small: %s\n", filename);
2548                 rc = -ETOOSMALL;
2549                 goto out;
2550         }
2551
2552         get_file(filp);
2553         curlun->ro = ro;
2554         curlun->filp = filp;
2555         curlun->file_length = size;
2556         curlun->unflushed_bytes = 0;
2557         curlun->num_sectors = num_sectors;
2558         LDBG(curlun, "open backing file: %s size: %lld num_sectors: %lld\n",
2559                         filename, size, num_sectors);
2560         rc = 0;
2561         adjust_wake_lock(fsg);
2562
2563 out:
2564         filp_close(filp, current->files);
2565         return rc;
2566 }
2567
2568
2569 static void close_backing_file(struct fsg_dev *fsg, struct lun *curlun)
2570 {
2571         if (curlun->filp) {
2572                 int rc;
2573
2574                 /*
2575                  * XXX: San: Ugly hack here added to ensure that
2576                  * our pages get synced to disk.
2577                  * Also drop caches here just to be extra-safe
2578                  */
2579                 rc = vfs_fsync(curlun->filp, curlun->filp->f_path.dentry, 1);
2580                 if (rc < 0)
2581                         printk(KERN_ERR "ums: Error syncing data (%d)\n", rc);
2582                 /* drop_pagecache and drop_slab are no longer available */
2583                 /* drop_pagecache(); */
2584                 /* drop_slab(); */
2585
2586                 LDBG(curlun, "close backing file\n");
2587                 fput(curlun->filp);
2588                 curlun->filp = NULL;
2589                 adjust_wake_lock(fsg);
2590         }
2591 }
2592
2593 static void close_all_backing_files(struct fsg_dev *fsg)
2594 {
2595         int     i;
2596
2597         for (i = 0; i < fsg->nluns; ++i)
2598                 close_backing_file(fsg, &fsg->luns[i]);
2599 }
2600
2601 static ssize_t show_file(struct device *dev, struct device_attribute *attr,
2602                 char *buf)
2603 {
2604         struct lun      *curlun = dev_to_lun(dev);
2605         struct fsg_dev  *fsg = dev_get_drvdata(dev);
2606         char            *p;
2607         ssize_t         rc;
2608
2609         down_read(&fsg->filesem);
2610         if (backing_file_is_open(curlun)) {     /* Get the complete pathname */
2611                 p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
2612                 if (IS_ERR(p))
2613                         rc = PTR_ERR(p);
2614                 else {
2615                         rc = strlen(p);
2616                         memmove(buf, p, rc);
2617                         buf[rc] = '\n';         /* Add a newline */
2618                         buf[++rc] = 0;
2619                 }
2620         } else {                                /* No file, return 0 bytes */
2621                 *buf = 0;
2622                 rc = 0;
2623         }
2624         up_read(&fsg->filesem);
2625         return rc;
2626 }
2627
2628 static ssize_t store_file(struct device *dev, struct device_attribute *attr,
2629                 const char *buf, size_t count)
2630 {
2631         struct lun      *curlun = dev_to_lun(dev);
2632         struct fsg_dev  *fsg = dev_get_drvdata(dev);
2633         int             rc = 0;
2634
2635         DBG(fsg, "store_file: \"%s\"\n", buf);
2636 #if 0
2637         /* disabled because we need to allow closing the backing file if the media was removed */
2638         if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
2639                 LDBG(curlun, "eject attempt prevented\n");
2640                 return -EBUSY;                          /* "Door is locked" */
2641         }
2642 #endif
2643
2644         /* Remove a trailing newline */
2645         if (count > 0 && buf[count-1] == '\n')
2646                 ((char *) buf)[count-1] = 0;
2647
2648         /* Eject current medium */
2649         down_write(&fsg->filesem);
2650         if (backing_file_is_open(curlun)) {
2651                 close_backing_file(fsg, curlun);
2652                 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
2653         }
2654
2655         /* Load new medium */
2656         if (count > 0 && buf[0]) {
2657                 rc = open_backing_file(fsg, curlun, buf);
2658                 if (rc == 0)
2659                         curlun->unit_attention_data =
2660                                         SS_NOT_READY_TO_READY_TRANSITION;
2661         }
2662         up_write(&fsg->filesem);
2663         return (rc < 0 ? rc : count);
2664 }
2665
2666
2667 static DEVICE_ATTR(file, 0444, show_file, store_file);
2668
2669 /*-------------------------------------------------------------------------*/
2670
2671 static void fsg_release(struct kref *ref)
2672 {
2673         struct fsg_dev  *fsg = container_of(ref, struct fsg_dev, ref);
2674
2675         kfree(fsg->luns);
2676         kfree(fsg);
2677 }
2678
2679 static void lun_release(struct device *dev)
2680 {
2681         struct fsg_dev  *fsg = dev_get_drvdata(dev);
2682
2683         kref_put(&fsg->ref, fsg_release);
2684 }
2685
2686
2687 /*-------------------------------------------------------------------------*/
2688
2689 static int __init fsg_alloc(void)
2690 {
2691         struct fsg_dev          *fsg;
2692
2693         fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
2694         if (!fsg)
2695                 return -ENOMEM;
2696         spin_lock_init(&fsg->lock);
2697         init_rwsem(&fsg->filesem);
2698         kref_init(&fsg->ref);
2699         init_completion(&fsg->thread_notifier);
2700
2701         the_fsg = fsg;
2702         return 0;
2703 }
2704
2705 static ssize_t print_switch_name(struct switch_dev *sdev, char *buf)
2706 {
2707         return sprintf(buf, "%s\n", DRIVER_NAME);
2708 }
2709
2710 static ssize_t print_switch_state(struct switch_dev *sdev, char *buf)
2711 {
2712         struct fsg_dev  *fsg = container_of(sdev, struct fsg_dev, sdev);
2713         return sprintf(buf, "%s\n", (fsg->config ? "online" : "offline"));
2714 }
2715
2716 static void
2717 fsg_function_unbind(struct usb_configuration *c, struct usb_function *f)
2718 {
2719         struct fsg_dev  *fsg = func_to_dev(f);
2720         int                     i;
2721         struct lun              *curlun;
2722
2723         DBG(fsg, "fsg_function_unbind\n");
2724         clear_bit(REGISTERED, &fsg->atomic_bitflags);
2725
2726         /* Unregister the sysfs attribute files and the LUNs */
2727         for (i = 0; i < fsg->nluns; ++i) {
2728                 curlun = &fsg->luns[i];
2729                 if (curlun->registered) {
2730                         device_remove_file(&curlun->dev, &dev_attr_file);
2731                         device_unregister(&curlun->dev);
2732                         curlun->registered = 0;
2733                 }
2734         }
2735
2736         /* If the thread isn't already dead, tell it to exit now */
2737         if (fsg->state != FSG_STATE_TERMINATED) {
2738                 raise_exception(fsg, FSG_STATE_EXIT);
2739                 wait_for_completion(&fsg->thread_notifier);
2740
2741                 /* The cleanup routine waits for this completion also */
2742                 complete(&fsg->thread_notifier);
2743         }
2744
2745         /* Free the data buffers */
2746         for (i = 0; i < NUM_BUFFERS; ++i)
2747                 kfree(fsg->buffhds[i].buf);
2748         switch_dev_unregister(&fsg->sdev);
2749 }
2750
2751 static int
2752 fsg_function_bind(struct usb_configuration *c, struct usb_function *f)
2753 {
2754         struct usb_composite_dev *cdev = c->cdev;
2755         struct fsg_dev  *fsg = func_to_dev(f);
2756         int                     rc;
2757         int                     i;
2758         int                     id;
2759         struct lun              *curlun;
2760         struct usb_ep           *ep;
2761         char                    *pathbuf, *p;
2762
2763         fsg->cdev = cdev;
2764         DBG(fsg, "fsg_function_bind\n");
2765
2766         dev_attr_file.attr.mode = 0644;
2767
2768         /* Find out how many LUNs there should be */
2769         i = fsg->nluns;
2770         if (i == 0)
2771                 i = 1;
2772         if (i > MAX_LUNS) {
2773                 ERROR(fsg, "invalid number of LUNs: %d\n", i);
2774                 rc = -EINVAL;
2775                 goto out;
2776         }
2777
2778         /* Create the LUNs, open their backing files, and register the
2779          * LUN devices in sysfs. */
2780         fsg->luns = kzalloc(i * sizeof(struct lun), GFP_KERNEL);
2781         if (!fsg->luns) {
2782                 rc = -ENOMEM;
2783                 goto out;
2784         }
2785         fsg->nluns = i;
2786
2787         for (i = 0; i < fsg->nluns; ++i) {
2788                 curlun = &fsg->luns[i];
2789                 curlun->ro = 0;
2790                 curlun->dev.release = lun_release;
2791                 /* use "usb_mass_storage" platform device as parent if available */
2792                 if (fsg->pdev)
2793                         curlun->dev.parent = &fsg->pdev->dev;
2794                 else
2795                         curlun->dev.parent = &cdev->gadget->dev;
2796                 dev_set_drvdata(&curlun->dev, fsg);
2797                 dev_set_name(&curlun->dev,"lun%d", i);
2798
2799                 rc = device_register(&curlun->dev);
2800                 if (rc != 0) {
2801                         INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
2802                         goto out;
2803                 }
2804                 rc = device_create_file(&curlun->dev, &dev_attr_file);
2805                 if (rc != 0) {
2806                         ERROR(fsg, "device_create_file failed: %d\n", rc);
2807                         device_unregister(&curlun->dev);
2808                         goto out;
2809                 }
2810                 curlun->registered = 1;
2811                 kref_get(&fsg->ref);
2812         }
2813
2814         /* allocate interface ID(s) */
2815         id = usb_interface_id(c, f);
2816         if (id < 0)
2817                 return id;
2818         intf_desc.bInterfaceNumber = id;
2819
2820         ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_in_desc);
2821         if (!ep)
2822                 goto autoconf_fail;
2823         ep->driver_data = fsg;          /* claim the endpoint */
2824         fsg->bulk_in = ep;
2825
2826         ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_out_desc);
2827         if (!ep)
2828                 goto autoconf_fail;
2829         ep->driver_data = fsg;          /* claim the endpoint */
2830         fsg->bulk_out = ep;
2831
2832         rc = -ENOMEM;
2833
2834         if (gadget_is_dualspeed(cdev->gadget)) {
2835                 /* Assume endpoint addresses are the same for both speeds */
2836                 hs_bulk_in_desc.bEndpointAddress =
2837                                 fs_bulk_in_desc.bEndpointAddress;
2838                 hs_bulk_out_desc.bEndpointAddress =
2839                                 fs_bulk_out_desc.bEndpointAddress;
2840
2841                 f->hs_descriptors = hs_function;
2842         }
2843
2844         /* Allocate the data buffers */
2845         for (i = 0; i < NUM_BUFFERS; ++i) {
2846                 struct fsg_buffhd       *bh = &fsg->buffhds[i];
2847
2848                 /* Allocate for the bulk-in endpoint.  We assume that
2849                  * the buffer will also work with the bulk-out (and
2850                  * interrupt-in) endpoint. */
2851                 bh->buf = kmalloc(fsg->buf_size, GFP_KERNEL);
2852                 if (!bh->buf)
2853                         goto out;
2854                 bh->next = bh + 1;
2855         }
2856         fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0];
2857
2858         fsg->thread_task = kthread_create(fsg_main_thread, fsg,
2859                         shortname);
2860         if (IS_ERR(fsg->thread_task)) {
2861                 rc = PTR_ERR(fsg->thread_task);
2862                 ERROR(fsg, "kthread_create failed: %d\n", rc);
2863                 goto out;
2864         }
2865
2866         INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
2867
2868         pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
2869         for (i = 0; i < fsg->nluns; ++i) {
2870                 curlun = &fsg->luns[i];
2871                 if (backing_file_is_open(curlun)) {
2872                         p = NULL;
2873                         if (pathbuf) {
2874                                 p = d_path(&curlun->filp->f_path,
2875                                            pathbuf, PATH_MAX);
2876                                 if (IS_ERR(p))
2877                                         p = NULL;
2878                         }
2879                         LINFO(curlun, "ro=%d, file: %s\n",
2880                                         curlun->ro, (p ? p : "(error)"));
2881                 }
2882         }
2883         kfree(pathbuf);
2884
2885         set_bit(REGISTERED, &fsg->atomic_bitflags);
2886
2887         /* Tell the thread to start working */
2888         wake_up_process(fsg->thread_task);
2889         return 0;
2890
2891 autoconf_fail:
2892         ERROR(fsg, "unable to autoconfigure all endpoints\n");
2893         rc = -ENOTSUPP;
2894
2895 out:
2896         DBG(fsg, "fsg_function_bind failed: %d\n", rc);
2897         fsg->state = FSG_STATE_TERMINATED;      /* The thread is dead */
2898         fsg_function_unbind(c, f);
2899         close_all_backing_files(fsg);
2900         return rc;
2901 }
2902
2903 static int fsg_function_set_alt(struct usb_function *f,
2904                 unsigned intf, unsigned alt)
2905 {
2906         struct fsg_dev  *fsg = func_to_dev(f);
2907         DBG(fsg, "fsg_function_set_alt intf: %d alt: %d\n", intf, alt);
2908         fsg->new_config = 1;
2909         raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
2910         return 0;
2911 }
2912
2913 static void fsg_function_disable(struct usb_function *f)
2914 {
2915         struct fsg_dev  *fsg = func_to_dev(f);
2916         DBG(fsg, "fsg_function_disable\n");
2917         fsg->new_config = 0;
2918         raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
2919 }
2920
2921 static int __init fsg_probe(struct platform_device *pdev)
2922 {
2923         struct usb_mass_storage_platform_data *pdata = pdev->dev.platform_data;
2924         struct fsg_dev *fsg = the_fsg;
2925
2926         fsg->pdev = pdev;
2927         printk(KERN_INFO "fsg_probe pdata: %p\n", pdata);
2928
2929         if (pdata) {
2930                 if (pdata->vendor)
2931                         fsg->vendor = pdata->vendor;
2932
2933                 if (pdata->product)
2934                         fsg->product = pdata->product;
2935
2936                 if (pdata->release)
2937                         fsg->release = pdata->release;
2938                 fsg->nluns = pdata->nluns;
2939         }
2940
2941         return 0;
2942 }
2943
2944 static struct platform_driver fsg_platform_driver = {
2945         .driver = { .name = "usb_mass_storage", },
2946         .probe = fsg_probe,
2947 };
2948
2949 int mass_storage_bind_config(struct usb_configuration *c)
2950 {
2951         int             rc;
2952         struct fsg_dev  *fsg;
2953
2954         printk(KERN_INFO "mass_storage_bind_config\n");
2955         rc = fsg_alloc();
2956         if (rc)
2957                 return rc;
2958         fsg = the_fsg;
2959
2960         spin_lock_init(&fsg->lock);
2961         init_rwsem(&fsg->filesem);
2962         kref_init(&fsg->ref);
2963         init_completion(&fsg->thread_notifier);
2964
2965         the_fsg->buf_size = BULK_BUFFER_SIZE;
2966         the_fsg->sdev.name = DRIVER_NAME;
2967         the_fsg->sdev.print_name = print_switch_name;
2968         the_fsg->sdev.print_state = print_switch_state;
2969         rc = switch_dev_register(&the_fsg->sdev);
2970         if (rc < 0)
2971                 goto err_switch_dev_register;
2972
2973         rc = platform_driver_register(&fsg_platform_driver);
2974         if (rc != 0)
2975                 goto err_platform_driver_register;
2976
2977         wake_lock_init(&the_fsg->wake_lock, WAKE_LOCK_SUSPEND,
2978                            "usb_mass_storage");
2979
2980         fsg->cdev = c->cdev;
2981         fsg->function.name = shortname;
2982         fsg->function.descriptors = fs_function;
2983         fsg->function.bind = fsg_function_bind;
2984         fsg->function.unbind = fsg_function_unbind;
2985         fsg->function.setup = fsg_function_setup;
2986         fsg->function.set_alt = fsg_function_set_alt;
2987         fsg->function.disable = fsg_function_disable;
2988
2989         rc = usb_add_function(c, &fsg->function);
2990         if (rc != 0)
2991                 goto err_usb_add_function;
2992
2993
2994         return 0;
2995
2996 err_usb_add_function:
2997         wake_lock_destroy(&the_fsg->wake_lock);
2998         platform_driver_unregister(&fsg_platform_driver);
2999 err_platform_driver_register:
3000         switch_dev_unregister(&the_fsg->sdev);
3001 err_switch_dev_register:
3002         kref_put(&the_fsg->ref, fsg_release);
3003
3004         return rc;
3005 }
3006
3007 static struct android_usb_function mass_storage_function = {
3008         .name = "usb_mass_storage",
3009         .bind_config = mass_storage_bind_config,
3010 };
3011
3012 static int __init init(void)
3013 {
3014         printk(KERN_INFO "f_mass_storage init\n");
3015         android_register_function(&mass_storage_function);
3016         return 0;
3017 }
3018 module_init(init);
3019