]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/usb/gadget/f_mass_storage.c
usb: gadget: Allow larger configuration descriptors
[net-next-2.6.git] / drivers / usb / gadget / f_mass_storage.c
CommitLineData
d5e2b67a 1/*
d26a6aa0 2 * f_mass_storage.c -- Mass Storage USB Composite Function
d5e2b67a
MN
3 *
4 * Copyright (C) 2003-2008 Alan Stern
d26a6aa0
MN
5 * Copyright (C) 2009 Samsung Electronics
6 * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
d5e2b67a
MN
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") as published by the Free Software
24 * Foundation, either version 2 of that License or (at your option) any
25 * later version.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40
41/*
d26a6aa0
MN
42 * The Mass Storage Function acts as a USB Mass Storage device,
43 * appearing to the host as a disk drive or as a CD-ROM drive. In
44 * addition to providing an example of a genuinely useful composite
45 * function for a USB device, it also illustrates a technique of
46 * double-buffering for increased throughput.
d5e2b67a 47 *
d26a6aa0
MN
48 * Function supports multiple logical units (LUNs). Backing storage
49 * for each LUN is provided by a regular file or a block device.
50 * Access for each LUN can be limited to read-only. Moreover, the
51 * function can indicate that LUN is removable and/or CD-ROM. (The
52 * later implies read-only access.)
53 *
54 * MSF is configured by specifying a fsg_config structure. It has the
55 * following fields:
56 *
57 * nluns Number of LUNs function have (anywhere from 1
58 * to FSG_MAX_LUNS which is 8).
59 * luns An array of LUN configuration values. This
60 * should be filled for each LUN that
61 * function will include (ie. for "nluns"
62 * LUNs). Each element of the array has
63 * the following fields:
64 * ->filename The path to the backing file for the LUN.
65 * Required if LUN is not marked as
66 * removable.
67 * ->ro Flag specifying access to the LUN shall be
68 * read-only. This is implied if CD-ROM
69 * emulation is enabled as well as when
70 * it was impossible to open "filename"
71 * in R/W mode.
72 * ->removable Flag specifying that LUN shall be indicated as
73 * being removable.
74 * ->cdrom Flag specifying that LUN shall be reported as
75 * being a CD-ROM.
76 *
77 * lun_name_format A printf-like format for names of the LUN
78 * devices. This determines how the
79 * directory in sysfs will be named.
80 * Unless you are using several MSFs in
81 * a single gadget (as opposed to single
82 * MSF in many configurations) you may
83 * leave it as NULL (in which case
84 * "lun%d" will be used). In the format
85 * you can use "%d" to index LUNs for
86 * MSF's with more than one LUN. (Beware
87 * that there is only one integer given
88 * as an argument for the format and
89 * specifying invalid format may cause
90 * unspecified behaviour.)
91 * thread_name Name of the kernel thread process used by the
92 * MSF. You can safely set it to NULL
93 * (in which case default "file-storage"
94 * will be used).
95 *
96 * vendor_name
97 * product_name
98 * release Information used as a reply to INQUIRY
99 * request. To use default set to NULL,
100 * NULL, 0xffff respectively. The first
101 * field should be 8 and the second 16
102 * characters or less.
103 *
104 * can_stall Set to permit function to halt bulk endpoints.
105 * Disabled on some USB devices known not
106 * to work correctly. You should set it
107 * to true.
108 *
109 * If "removable" is not set for a LUN then a backing file must be
110 * specified. If it is set, then NULL filename means the LUN's medium
111 * is not loaded (an empty string as "filename" in the fsg_config
112 * structure causes error). The CD-ROM emulation includes a single
113 * data track and no audio tracks; hence there need be only one
114 * backing file per LUN. Note also that the CD-ROM block length is
115 * set to 512 rather than the more common value 2048.
116 *
117 *
118 * MSF includes support for module parameters. If gadget using it
119 * decides to use it, the following module parameters will be
120 * available:
121 *
122 * file=filename[,filename...]
123 * Names of the files or block devices used for
124 * backing storage.
125 * ro=b[,b...] Default false, boolean for read-only access.
126 * removable=b[,b...]
127 * Default true, boolean for removable media.
128 * cdrom=b[,b...] Default false, boolean for whether to emulate
129 * a CD-ROM drive.
130 * luns=N Default N = number of filenames, number of
131 * LUNs to support.
132 * stall Default determined according to the type of
133 * USB device controller (usually true),
134 * boolean to permit the driver to halt
135 * bulk endpoints.
136 *
137 * The module parameters may be prefixed with some string. You need
138 * to consult gadget's documentation or source to verify whether it is
139 * using those module parameters and if it does what are the prefixes
140 * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is
141 * the prefix).
d5e2b67a 142 *
d5e2b67a
MN
143 *
144 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
d26a6aa0
MN
145 * needed. The memory requirement amounts to two 16K buffers, size
146 * configurable by a parameter. Support is included for both
147 * full-speed and high-speed operation.
d5e2b67a
MN
148 *
149 * Note that the driver is slightly non-portable in that it assumes a
150 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
151 * interrupt-in endpoints. With most device controllers this isn't an
152 * issue, but there may be some with hardware restrictions that prevent
153 * a buffer from being used by more than one endpoint.
154 *
d5e2b67a 155 *
d26a6aa0
MN
156 * The pathnames of the backing files and the ro settings are
157 * available in the attribute files "file" and "ro" in the lun<n> (or
158 * to be more precise in a directory which name comes from
159 * "lun_name_format" option!) subdirectory of the gadget's sysfs
160 * directory. If the "removable" option is set, writing to these
161 * files will simulate ejecting/loading the medium (writing an empty
162 * line means eject) and adjusting a write-enable tab. Changes to the
163 * ro setting are not allowed when the medium is loaded or if CD-ROM
164 * emulation is being used.
d5e2b67a 165 *
d5e2b67a 166 *
d26a6aa0
MN
167 * This function is heavily based on "File-backed Storage Gadget" by
168 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
169 * Brownell. The driver's SCSI command interface was based on the
170 * "Information technology - Small Computer System Interface - 2"
171 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
172 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
173 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
174 * was based on the "Universal Serial Bus Mass Storage Class UFI
175 * Command Specification" document, Revision 1.0, December 14, 1998,
176 * available at
d5e2b67a
MN
177 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
178 */
179
180
181/*
182 * Driver Design
183 *
d26a6aa0 184 * The MSF is fairly straightforward. There is a main kernel
d5e2b67a
MN
185 * thread that handles most of the work. Interrupt routines field
186 * callbacks from the controller driver: bulk- and interrupt-request
187 * completion notifications, endpoint-0 events, and disconnect events.
188 * Completion events are passed to the main thread by wakeup calls. Many
189 * ep0 requests are handled at interrupt time, but SetInterface,
190 * SetConfiguration, and device reset requests are forwarded to the
191 * thread in the form of "exceptions" using SIGUSR1 signals (since they
192 * should interrupt any ongoing file I/O operations).
193 *
194 * The thread's main routine implements the standard command/data/status
195 * parts of a SCSI interaction. It and its subroutines are full of tests
196 * for pending signals/exceptions -- all this polling is necessary since
197 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
198 * indication that the driver really wants to be running in userspace.)
199 * An important point is that so long as the thread is alive it keeps an
200 * open reference to the backing file. This will prevent unmounting
201 * the backing file's underlying filesystem and could cause problems
202 * during system shutdown, for example. To prevent such problems, the
203 * thread catches INT, TERM, and KILL signals and converts them into
204 * an EXIT exception.
205 *
206 * In normal operation the main thread is started during the gadget's
d26a6aa0
MN
207 * fsg_bind() callback and stopped during fsg_unbind(). But it can
208 * also exit when it receives a signal, and there's no point leaving
209 * the gadget running when the thread is dead. At of this moment, MSF
210 * provides no way to deregister the gadget when thread dies -- maybe
211 * a callback functions is needed.
d5e2b67a
MN
212 *
213 * To provide maximum throughput, the driver uses a circular pipeline of
214 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
215 * arbitrarily long; in practice the benefits don't justify having more
216 * than 2 stages (i.e., double buffering). But it helps to think of the
217 * pipeline as being a long one. Each buffer head contains a bulk-in and
218 * a bulk-out request pointer (since the buffer can be used for both
219 * output and input -- directions always are given from the host's
220 * point of view) as well as a pointer to the buffer and various state
221 * variables.
222 *
223 * Use of the pipeline follows a simple protocol. There is a variable
224 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
225 * At any time that buffer head may still be in use from an earlier
226 * request, so each buffer head has a state variable indicating whether
227 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
228 * buffer head to be EMPTY, filling the buffer either by file I/O or by
229 * USB I/O (during which the buffer head is BUSY), and marking the buffer
230 * head FULL when the I/O is complete. Then the buffer will be emptied
231 * (again possibly by USB I/O, during which it is marked BUSY) and
232 * finally marked EMPTY again (possibly by a completion routine).
233 *
234 * A module parameter tells the driver to avoid stalling the bulk
235 * endpoints wherever the transport specification allows. This is
236 * necessary for some UDCs like the SuperH, which cannot reliably clear a
237 * halt on a bulk endpoint. However, under certain circumstances the
238 * Bulk-only specification requires a stall. In such cases the driver
239 * will halt the endpoint and set a flag indicating that it should clear
240 * the halt in software during the next device reset. Hopefully this
241 * will permit everything to work correctly. Furthermore, although the
242 * specification allows the bulk-out endpoint to halt when the host sends
243 * too much data, implementing this would cause an unavoidable race.
244 * The driver will always use the "no-stall" approach for OUT transfers.
245 *
246 * One subtle point concerns sending status-stage responses for ep0
247 * requests. Some of these requests, such as device reset, can involve
248 * interrupting an ongoing file I/O operation, which might take an
249 * arbitrarily long time. During that delay the host might give up on
250 * the original ep0 request and issue a new one. When that happens the
251 * driver should not notify the host about completion of the original
252 * request, as the host will no longer be waiting for it. So the driver
253 * assigns to each ep0 request a unique tag, and it keeps track of the
254 * tag value of the request associated with a long-running exception
255 * (device-reset, interface-change, or configuration-change). When the
256 * exception handler is finished, the status-stage response is submitted
257 * only if the current ep0 request tag is equal to the exception request
258 * tag. Thus only the most recently received ep0 request will get a
259 * status-stage response.
260 *
261 * Warning: This driver source file is too long. It ought to be split up
262 * into a header file plus about 3 separate .c files, to handle the details
263 * of the Gadget, USB Mass Storage, and SCSI protocols.
264 */
265
266
267/* #define VERBOSE_DEBUG */
268/* #define DUMP_MSGS */
269
270
271#include <linux/blkdev.h>
272#include <linux/completion.h>
273#include <linux/dcache.h>
274#include <linux/delay.h>
275#include <linux/device.h>
276#include <linux/fcntl.h>
277#include <linux/file.h>
278#include <linux/fs.h>
279#include <linux/kref.h>
280#include <linux/kthread.h>
281#include <linux/limits.h>
282#include <linux/rwsem.h>
283#include <linux/slab.h>
284#include <linux/spinlock.h>
285#include <linux/string.h>
286#include <linux/freezer.h>
287#include <linux/utsname.h>
288
289#include <linux/usb/ch9.h>
290#include <linux/usb/gadget.h>
291
292#include "gadget_chips.h"
293
294
295
e8b6f8c5 296/*------------------------------------------------------------------------*/
d5e2b67a 297
d23b0f08 298#define FSG_DRIVER_DESC "Mass Storage Function"
d26a6aa0 299#define FSG_DRIVER_VERSION "2009/09/11"
d5e2b67a 300
d5e2b67a
MN
301static const char fsg_string_interface[] = "Mass Storage";
302
303
93bcf12e 304#define FSG_NO_INTR_EP 1
d23b0f08
MN
305#define FSG_NO_DEVICE_STRINGS 1
306#define FSG_NO_OTG 1
307#define FSG_NO_INTR_EP 1
93bcf12e 308
d5e2b67a
MN
309#include "storage_common.c"
310
311
d5e2b67a
MN
312/*-------------------------------------------------------------------------*/
313
8ea864cf
MN
314struct fsg_dev;
315
d5e2b67a 316
a41ae418
MN
317/* Data shared by all the FSG instances. */
318struct fsg_common {
9c610213 319 struct usb_gadget *gadget;
8ea864cf
MN
320 struct fsg_dev *fsg;
321 struct fsg_dev *prev_fsg;
9c610213 322
a41ae418
MN
323 /* filesem protects: backing files in use */
324 struct rw_semaphore filesem;
325
8ea864cf
MN
326 /* lock protects: state, all the req_busy's */
327 spinlock_t lock;
328
329 struct usb_ep *ep0; /* Copy of gadget->ep0 */
330 struct usb_request *ep0req; /* Copy of cdev->req */
331 unsigned int ep0_req_tag;
332 const char *ep0req_name;
333
a41ae418
MN
334 struct fsg_buffhd *next_buffhd_to_fill;
335 struct fsg_buffhd *next_buffhd_to_drain;
336 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
337
338 int cmnd_size;
339 u8 cmnd[MAX_COMMAND_SIZE];
340
341 unsigned int nluns;
342 unsigned int lun;
343 struct fsg_lun *luns;
344 struct fsg_lun *curlun;
9c610213 345
8ea864cf
MN
346 unsigned int bulk_out_maxpacket;
347 enum fsg_state state; /* For exception handling */
348 unsigned int exception_req_tag;
349
350 u8 config, new_config;
351 enum data_direction data_dir;
352 u32 data_size;
353 u32 data_size_from_cmnd;
354 u32 tag;
355 u32 residue;
356 u32 usb_amount_left;
357
481e4929 358 unsigned int can_stall:1;
9c610213 359 unsigned int free_storage_on_release:1;
8ea864cf
MN
360 unsigned int phase_error:1;
361 unsigned int short_packet_received:1;
362 unsigned int bad_lun_okay:1;
363 unsigned int running:1;
9c610213 364
8ea864cf
MN
365 int thread_wakeup_needed;
366 struct completion thread_notifier;
367 struct task_struct *thread_task;
e8b6f8c5 368
c85efcb9 369 /* Callback function to call when thread exits. */
7f1ee826 370 int (*thread_exits)(struct fsg_common *common);
c85efcb9
MN
371 /* Gadget's private data. */
372 void *private_data;
373
481e4929
MN
374 /* Vendor (8 chars), product (16 chars), release (4
375 * hexadecimal digits) and NUL byte */
376 char inquiry_string[8 + 16 + 4 + 1];
377
9c610213 378 struct kref ref;
a41ae418
MN
379};
380
381
481e4929
MN
382struct fsg_config {
383 unsigned nluns;
384 struct fsg_lun_config {
385 const char *filename;
386 char ro;
387 char removable;
388 char cdrom;
389 } luns[FSG_MAX_LUNS];
390
e8b6f8c5
MN
391 const char *lun_name_format;
392 const char *thread_name;
393
7f1ee826
MN
394 /* Callback function to call when thread exits. If no
395 * callback is set or it returns value lower then zero MSF
396 * will force eject all LUNs it operates on (including those
397 * marked as non-removable or with prevent_medium_removal flag
398 * set). */
399 int (*thread_exits)(struct fsg_common *common);
c85efcb9
MN
400 /* Gadget's private data. */
401 void *private_data;
402
481e4929
MN
403 const char *vendor_name; /* 8 characters or less */
404 const char *product_name; /* 16 characters or less */
405 u16 release;
406
407 char can_stall;
408};
409
410
d5e2b67a 411struct fsg_dev {
d23b0f08 412 struct usb_function function;
d23b0f08 413 struct usb_gadget *gadget; /* Copy of cdev->gadget */
a41ae418
MN
414 struct fsg_common *common;
415
d23b0f08
MN
416 u16 interface_number;
417
d26a6aa0
MN
418 unsigned int bulk_in_enabled:1;
419 unsigned int bulk_out_enabled:1;
d5e2b67a
MN
420
421 unsigned long atomic_bitflags;
8ea864cf 422#define IGNORE_BULK_OUT 0
d5e2b67a
MN
423
424 struct usb_ep *bulk_in;
425 struct usb_ep *bulk_out;
8ea864cf 426};
d5e2b67a 427
d5e2b67a 428
8ea864cf
MN
429static inline int __fsg_is_set(struct fsg_common *common,
430 const char *func, unsigned line)
431{
432 if (common->fsg)
433 return 1;
434 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
435 return 0;
436}
437
438#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
d5e2b67a 439
d23b0f08
MN
440
441static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
442{
443 return container_of(f, struct fsg_dev, function);
444}
445
446
d5e2b67a
MN
447typedef void (*fsg_routine_t)(struct fsg_dev *);
448
8ea864cf 449static int exception_in_progress(struct fsg_common *common)
d5e2b67a 450{
8ea864cf 451 return common->state > FSG_STATE_IDLE;
d5e2b67a
MN
452}
453
454/* Make bulk-out requests be divisible by the maxpacket size */
8ea864cf 455static void set_bulk_out_req_length(struct fsg_common *common,
d5e2b67a
MN
456 struct fsg_buffhd *bh, unsigned int length)
457{
458 unsigned int rem;
459
460 bh->bulk_out_intended_length = length;
8ea864cf 461 rem = length % common->bulk_out_maxpacket;
d5e2b67a 462 if (rem > 0)
8ea864cf 463 length += common->bulk_out_maxpacket - rem;
d5e2b67a
MN
464 bh->outreq->length = length;
465}
466
d5e2b67a
MN
467/*-------------------------------------------------------------------------*/
468
469static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
470{
471 const char *name;
472
473 if (ep == fsg->bulk_in)
474 name = "bulk-in";
475 else if (ep == fsg->bulk_out)
476 name = "bulk-out";
477 else
478 name = ep->name;
479 DBG(fsg, "%s set halt\n", name);
480 return usb_ep_set_halt(ep);
481}
482
483
d5e2b67a
MN
484/*-------------------------------------------------------------------------*/
485
486/* These routines may be called in process context or in_irq */
487
488/* Caller must hold fsg->lock */
8ea864cf 489static void wakeup_thread(struct fsg_common *common)
d5e2b67a
MN
490{
491 /* Tell the main thread that something has happened */
8ea864cf
MN
492 common->thread_wakeup_needed = 1;
493 if (common->thread_task)
494 wake_up_process(common->thread_task);
d5e2b67a
MN
495}
496
497
8ea864cf 498static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
d5e2b67a
MN
499{
500 unsigned long flags;
501
502 /* Do nothing if a higher-priority exception is already in progress.
503 * If a lower-or-equal priority exception is in progress, preempt it
504 * and notify the main thread by sending it a signal. */
8ea864cf
MN
505 spin_lock_irqsave(&common->lock, flags);
506 if (common->state <= new_state) {
507 common->exception_req_tag = common->ep0_req_tag;
508 common->state = new_state;
509 if (common->thread_task)
d5e2b67a 510 send_sig_info(SIGUSR1, SEND_SIG_FORCED,
8ea864cf 511 common->thread_task);
d5e2b67a 512 }
8ea864cf 513 spin_unlock_irqrestore(&common->lock, flags);
d5e2b67a
MN
514}
515
516
517/*-------------------------------------------------------------------------*/
518
8ea864cf 519static int ep0_queue(struct fsg_common *common)
d5e2b67a
MN
520{
521 int rc;
522
8ea864cf
MN
523 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
524 common->ep0->driver_data = common;
d5e2b67a 525 if (rc != 0 && rc != -ESHUTDOWN) {
d5e2b67a 526 /* We can't do much more than wait for a reset */
8ea864cf
MN
527 WARNING(common, "error in submission: %s --> %d\n",
528 common->ep0->name, rc);
d5e2b67a
MN
529 }
530 return rc;
531}
532
d5e2b67a
MN
533/*-------------------------------------------------------------------------*/
534
535/* Bulk and interrupt endpoint completion handlers.
536 * These always run in_irq. */
537
538static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
539{
8ea864cf 540 struct fsg_common *common = ep->driver_data;
d5e2b67a
MN
541 struct fsg_buffhd *bh = req->context;
542
543 if (req->status || req->actual != req->length)
8ea864cf 544 DBG(common, "%s --> %d, %u/%u\n", __func__,
d5e2b67a 545 req->status, req->actual, req->length);
d26a6aa0 546 if (req->status == -ECONNRESET) /* Request was cancelled */
d5e2b67a
MN
547 usb_ep_fifo_flush(ep);
548
549 /* Hold the lock while we update the request and buffer states */
550 smp_wmb();
8ea864cf 551 spin_lock(&common->lock);
d5e2b67a
MN
552 bh->inreq_busy = 0;
553 bh->state = BUF_STATE_EMPTY;
8ea864cf
MN
554 wakeup_thread(common);
555 spin_unlock(&common->lock);
d5e2b67a
MN
556}
557
558static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
559{
8ea864cf 560 struct fsg_common *common = ep->driver_data;
d5e2b67a
MN
561 struct fsg_buffhd *bh = req->context;
562
8ea864cf 563 dump_msg(common, "bulk-out", req->buf, req->actual);
d5e2b67a 564 if (req->status || req->actual != bh->bulk_out_intended_length)
8ea864cf 565 DBG(common, "%s --> %d, %u/%u\n", __func__,
d5e2b67a
MN
566 req->status, req->actual,
567 bh->bulk_out_intended_length);
d26a6aa0 568 if (req->status == -ECONNRESET) /* Request was cancelled */
d5e2b67a
MN
569 usb_ep_fifo_flush(ep);
570
571 /* Hold the lock while we update the request and buffer states */
572 smp_wmb();
8ea864cf 573 spin_lock(&common->lock);
d5e2b67a
MN
574 bh->outreq_busy = 0;
575 bh->state = BUF_STATE_FULL;
8ea864cf
MN
576 wakeup_thread(common);
577 spin_unlock(&common->lock);
d5e2b67a
MN
578}
579
580
d5e2b67a
MN
581/*-------------------------------------------------------------------------*/
582
583/* Ep0 class-specific handlers. These always run in_irq. */
584
d23b0f08 585static int fsg_setup(struct usb_function *f,
d5e2b67a
MN
586 const struct usb_ctrlrequest *ctrl)
587{
d23b0f08 588 struct fsg_dev *fsg = fsg_from_func(f);
8ea864cf 589 struct usb_request *req = fsg->common->ep0req;
d5e2b67a 590 u16 w_index = le16_to_cpu(ctrl->wIndex);
93bcf12e 591 u16 w_value = le16_to_cpu(ctrl->wValue);
d5e2b67a
MN
592 u16 w_length = le16_to_cpu(ctrl->wLength);
593
8ea864cf 594 if (!fsg->common->config)
93bcf12e 595 return -EOPNOTSUPP;
d5e2b67a 596
93bcf12e 597 switch (ctrl->bRequest) {
d5e2b67a 598
93bcf12e
MN
599 case USB_BULK_RESET_REQUEST:
600 if (ctrl->bRequestType !=
601 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
d5e2b67a 602 break;
d23b0f08 603 if (w_index != fsg->interface_number || w_value != 0)
93bcf12e 604 return -EDOM;
d5e2b67a 605
93bcf12e
MN
606 /* Raise an exception to stop the current operation
607 * and reinitialize our state. */
608 DBG(fsg, "bulk reset request\n");
8ea864cf 609 raise_exception(fsg->common, FSG_STATE_RESET);
93bcf12e 610 return DELAYED_STATUS;
d5e2b67a 611
93bcf12e
MN
612 case USB_BULK_GET_MAX_LUN_REQUEST:
613 if (ctrl->bRequestType !=
614 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
d5e2b67a 615 break;
d23b0f08 616 if (w_index != fsg->interface_number || w_value != 0)
93bcf12e
MN
617 return -EDOM;
618 VDBG(fsg, "get max LUN\n");
a41ae418 619 *(u8 *) req->buf = fsg->common->nluns - 1;
b00ce11f
MN
620
621 /* Respond with data/status */
d7e18a9f 622 req->length = min((u16)1, w_length);
b00ce11f
MN
623 fsg->common->ep0req_name =
624 ctrl->bRequestType & USB_DIR_IN ? "ep0-in" : "ep0-out";
625 return ep0_queue(fsg->common);
93bcf12e
MN
626 }
627
628 VDBG(fsg,
629 "unknown class-specific control req "
630 "%02x.%02x v%04x i%04x l%u\n",
631 ctrl->bRequestType, ctrl->bRequest,
632 le16_to_cpu(ctrl->wValue), w_index, w_length);
633 return -EOPNOTSUPP;
d5e2b67a
MN
634}
635
636
d5e2b67a
MN
637/*-------------------------------------------------------------------------*/
638
639/* All the following routines run in process context */
640
641
642/* Use this for bulk or interrupt transfers, not ep0 */
643static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
644 struct usb_request *req, int *pbusy,
645 enum fsg_buffer_state *state)
646{
647 int rc;
648
649 if (ep == fsg->bulk_in)
650 dump_msg(fsg, "bulk-in", req->buf, req->length);
d5e2b67a 651
8ea864cf 652 spin_lock_irq(&fsg->common->lock);
d5e2b67a
MN
653 *pbusy = 1;
654 *state = BUF_STATE_BUSY;
8ea864cf 655 spin_unlock_irq(&fsg->common->lock);
d5e2b67a
MN
656 rc = usb_ep_queue(ep, req, GFP_KERNEL);
657 if (rc != 0) {
658 *pbusy = 0;
659 *state = BUF_STATE_EMPTY;
660
661 /* We can't do much more than wait for a reset */
662
663 /* Note: currently the net2280 driver fails zero-length
664 * submissions if DMA is enabled. */
665 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
666 req->length == 0))
667 WARNING(fsg, "error in submission: %s --> %d\n",
668 ep->name, rc);
669 }
670}
671
8ea864cf
MN
672#define START_TRANSFER_OR(common, ep_name, req, pbusy, state) \
673 if (fsg_is_set(common)) \
674 start_transfer((common)->fsg, (common)->fsg->ep_name, \
675 req, pbusy, state); \
676 else
677
678#define START_TRANSFER(common, ep_name, req, pbusy, state) \
679 START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0
680
681
d5e2b67a 682
8ea864cf 683static int sleep_thread(struct fsg_common *common)
d5e2b67a
MN
684{
685 int rc = 0;
686
687 /* Wait until a signal arrives or we are woken up */
688 for (;;) {
689 try_to_freeze();
690 set_current_state(TASK_INTERRUPTIBLE);
691 if (signal_pending(current)) {
692 rc = -EINTR;
693 break;
694 }
8ea864cf 695 if (common->thread_wakeup_needed)
d5e2b67a
MN
696 break;
697 schedule();
698 }
699 __set_current_state(TASK_RUNNING);
8ea864cf 700 common->thread_wakeup_needed = 0;
d5e2b67a
MN
701 return rc;
702}
703
704
705/*-------------------------------------------------------------------------*/
706
8ea864cf 707static int do_read(struct fsg_common *common)
d5e2b67a 708{
8ea864cf 709 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
710 u32 lba;
711 struct fsg_buffhd *bh;
712 int rc;
713 u32 amount_left;
714 loff_t file_offset, file_offset_tmp;
715 unsigned int amount;
716 unsigned int partial_page;
717 ssize_t nread;
718
719 /* Get the starting Logical Block Address and check that it's
720 * not too big */
8ea864cf
MN
721 if (common->cmnd[0] == SC_READ_6)
722 lba = get_unaligned_be24(&common->cmnd[1]);
d5e2b67a 723 else {
8ea864cf 724 lba = get_unaligned_be32(&common->cmnd[2]);
d5e2b67a
MN
725
726 /* We allow DPO (Disable Page Out = don't save data in the
727 * cache) and FUA (Force Unit Access = don't read from the
728 * cache), but we don't implement them. */
8ea864cf 729 if ((common->cmnd[1] & ~0x18) != 0) {
d5e2b67a
MN
730 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
731 return -EINVAL;
732 }
733 }
734 if (lba >= curlun->num_sectors) {
735 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
736 return -EINVAL;
737 }
738 file_offset = ((loff_t) lba) << 9;
739
740 /* Carry out the file reads */
8ea864cf 741 amount_left = common->data_size_from_cmnd;
d5e2b67a 742 if (unlikely(amount_left == 0))
d26a6aa0 743 return -EIO; /* No default reply */
d5e2b67a
MN
744
745 for (;;) {
746
747 /* Figure out how much we need to read:
748 * Try to read the remaining amount.
749 * But don't read more than the buffer size.
750 * And don't try to read past the end of the file.
751 * Finally, if we're not at a page boundary, don't read past
752 * the next page.
753 * If this means reading 0 then we were asked to read past
754 * the end of file. */
93bcf12e 755 amount = min(amount_left, FSG_BUFLEN);
d5e2b67a
MN
756 amount = min((loff_t) amount,
757 curlun->file_length - file_offset);
758 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
759 if (partial_page > 0)
760 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
761 partial_page);
762
763 /* Wait for the next buffer to become available */
8ea864cf 764 bh = common->next_buffhd_to_fill;
d5e2b67a 765 while (bh->state != BUF_STATE_EMPTY) {
8ea864cf 766 rc = sleep_thread(common);
d5e2b67a
MN
767 if (rc)
768 return rc;
769 }
770
771 /* If we were asked to read past the end of file,
772 * end with an empty buffer. */
773 if (amount == 0) {
774 curlun->sense_data =
775 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
776 curlun->sense_data_info = file_offset >> 9;
777 curlun->info_valid = 1;
778 bh->inreq->length = 0;
779 bh->state = BUF_STATE_FULL;
780 break;
781 }
782
783 /* Perform the read */
784 file_offset_tmp = file_offset;
785 nread = vfs_read(curlun->filp,
786 (char __user *) bh->buf,
787 amount, &file_offset_tmp);
788 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
789 (unsigned long long) file_offset,
790 (int) nread);
791 if (signal_pending(current))
792 return -EINTR;
793
794 if (nread < 0) {
795 LDBG(curlun, "error in file read: %d\n",
796 (int) nread);
797 nread = 0;
798 } else if (nread < amount) {
799 LDBG(curlun, "partial file read: %d/%u\n",
800 (int) nread, amount);
d26a6aa0 801 nread -= (nread & 511); /* Round down to a block */
d5e2b67a
MN
802 }
803 file_offset += nread;
804 amount_left -= nread;
8ea864cf 805 common->residue -= nread;
d5e2b67a
MN
806 bh->inreq->length = nread;
807 bh->state = BUF_STATE_FULL;
808
809 /* If an error occurred, report it and its position */
810 if (nread < amount) {
811 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
812 curlun->sense_data_info = file_offset >> 9;
813 curlun->info_valid = 1;
814 break;
815 }
816
817 if (amount_left == 0)
d26a6aa0 818 break; /* No more left to read */
d5e2b67a
MN
819
820 /* Send this buffer and go read some more */
821 bh->inreq->zero = 0;
8ea864cf
MN
822 START_TRANSFER_OR(common, bulk_in, bh->inreq,
823 &bh->inreq_busy, &bh->state)
824 /* Don't know what to do if
825 * common->fsg is NULL */
826 return -EIO;
827 common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
828 }
829
d26a6aa0 830 return -EIO; /* No default reply */
d5e2b67a
MN
831}
832
833
834/*-------------------------------------------------------------------------*/
835
8ea864cf 836static int do_write(struct fsg_common *common)
d5e2b67a 837{
8ea864cf 838 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
839 u32 lba;
840 struct fsg_buffhd *bh;
841 int get_some_more;
842 u32 amount_left_to_req, amount_left_to_write;
843 loff_t usb_offset, file_offset, file_offset_tmp;
844 unsigned int amount;
845 unsigned int partial_page;
846 ssize_t nwritten;
847 int rc;
848
849 if (curlun->ro) {
850 curlun->sense_data = SS_WRITE_PROTECTED;
851 return -EINVAL;
852 }
853 spin_lock(&curlun->filp->f_lock);
d26a6aa0 854 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
d5e2b67a
MN
855 spin_unlock(&curlun->filp->f_lock);
856
857 /* Get the starting Logical Block Address and check that it's
858 * not too big */
8ea864cf
MN
859 if (common->cmnd[0] == SC_WRITE_6)
860 lba = get_unaligned_be24(&common->cmnd[1]);
d5e2b67a 861 else {
8ea864cf 862 lba = get_unaligned_be32(&common->cmnd[2]);
d5e2b67a
MN
863
864 /* We allow DPO (Disable Page Out = don't save data in the
865 * cache) and FUA (Force Unit Access = write directly to the
866 * medium). We don't implement DPO; we implement FUA by
867 * performing synchronous output. */
8ea864cf 868 if (common->cmnd[1] & ~0x18) {
d5e2b67a
MN
869 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
870 return -EINVAL;
871 }
8ea864cf 872 if (common->cmnd[1] & 0x08) { /* FUA */
d5e2b67a
MN
873 spin_lock(&curlun->filp->f_lock);
874 curlun->filp->f_flags |= O_SYNC;
875 spin_unlock(&curlun->filp->f_lock);
876 }
877 }
878 if (lba >= curlun->num_sectors) {
879 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
880 return -EINVAL;
881 }
882
883 /* Carry out the file writes */
884 get_some_more = 1;
885 file_offset = usb_offset = ((loff_t) lba) << 9;
8ea864cf
MN
886 amount_left_to_req = common->data_size_from_cmnd;
887 amount_left_to_write = common->data_size_from_cmnd;
d5e2b67a
MN
888
889 while (amount_left_to_write > 0) {
890
891 /* Queue a request for more data from the host */
8ea864cf 892 bh = common->next_buffhd_to_fill;
d5e2b67a
MN
893 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
894
895 /* Figure out how much we want to get:
896 * Try to get the remaining amount.
897 * But don't get more than the buffer size.
898 * And don't try to go past the end of the file.
899 * If we're not at a page boundary,
900 * don't go past the next page.
901 * If this means getting 0, then we were asked
902 * to write past the end of file.
903 * Finally, round down to a block boundary. */
93bcf12e 904 amount = min(amount_left_to_req, FSG_BUFLEN);
d5e2b67a
MN
905 amount = min((loff_t) amount, curlun->file_length -
906 usb_offset);
907 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
908 if (partial_page > 0)
909 amount = min(amount,
910 (unsigned int) PAGE_CACHE_SIZE - partial_page);
911
912 if (amount == 0) {
913 get_some_more = 0;
914 curlun->sense_data =
915 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
916 curlun->sense_data_info = usb_offset >> 9;
917 curlun->info_valid = 1;
918 continue;
919 }
920 amount -= (amount & 511);
921 if (amount == 0) {
922
923 /* Why were we were asked to transfer a
924 * partial block? */
925 get_some_more = 0;
926 continue;
927 }
928
929 /* Get the next buffer */
930 usb_offset += amount;
8ea864cf 931 common->usb_amount_left -= amount;
d5e2b67a
MN
932 amount_left_to_req -= amount;
933 if (amount_left_to_req == 0)
934 get_some_more = 0;
935
936 /* amount is always divisible by 512, hence by
937 * the bulk-out maxpacket size */
d26a6aa0
MN
938 bh->outreq->length = amount;
939 bh->bulk_out_intended_length = amount;
d5e2b67a 940 bh->outreq->short_not_ok = 1;
8ea864cf
MN
941 START_TRANSFER_OR(common, bulk_out, bh->outreq,
942 &bh->outreq_busy, &bh->state)
943 /* Don't know what to do if
944 * common->fsg is NULL */
945 return -EIO;
946 common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
947 continue;
948 }
949
950 /* Write the received data to the backing file */
8ea864cf 951 bh = common->next_buffhd_to_drain;
d5e2b67a 952 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
d26a6aa0 953 break; /* We stopped early */
d5e2b67a
MN
954 if (bh->state == BUF_STATE_FULL) {
955 smp_rmb();
8ea864cf 956 common->next_buffhd_to_drain = bh->next;
d5e2b67a
MN
957 bh->state = BUF_STATE_EMPTY;
958
959 /* Did something go wrong with the transfer? */
960 if (bh->outreq->status != 0) {
961 curlun->sense_data = SS_COMMUNICATION_FAILURE;
962 curlun->sense_data_info = file_offset >> 9;
963 curlun->info_valid = 1;
964 break;
965 }
966
967 amount = bh->outreq->actual;
968 if (curlun->file_length - file_offset < amount) {
969 LERROR(curlun,
970 "write %u @ %llu beyond end %llu\n",
971 amount, (unsigned long long) file_offset,
972 (unsigned long long) curlun->file_length);
973 amount = curlun->file_length - file_offset;
974 }
975
976 /* Perform the write */
977 file_offset_tmp = file_offset;
978 nwritten = vfs_write(curlun->filp,
979 (char __user *) bh->buf,
980 amount, &file_offset_tmp);
981 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
982 (unsigned long long) file_offset,
983 (int) nwritten);
984 if (signal_pending(current))
d26a6aa0 985 return -EINTR; /* Interrupted! */
d5e2b67a
MN
986
987 if (nwritten < 0) {
988 LDBG(curlun, "error in file write: %d\n",
989 (int) nwritten);
990 nwritten = 0;
991 } else if (nwritten < amount) {
992 LDBG(curlun, "partial file write: %d/%u\n",
993 (int) nwritten, amount);
994 nwritten -= (nwritten & 511);
d26a6aa0 995 /* Round down to a block */
d5e2b67a
MN
996 }
997 file_offset += nwritten;
998 amount_left_to_write -= nwritten;
8ea864cf 999 common->residue -= nwritten;
d5e2b67a
MN
1000
1001 /* If an error occurred, report it and its position */
1002 if (nwritten < amount) {
1003 curlun->sense_data = SS_WRITE_ERROR;
1004 curlun->sense_data_info = file_offset >> 9;
1005 curlun->info_valid = 1;
1006 break;
1007 }
1008
1009 /* Did the host decide to stop early? */
1010 if (bh->outreq->actual != bh->outreq->length) {
8ea864cf 1011 common->short_packet_received = 1;
d5e2b67a
MN
1012 break;
1013 }
1014 continue;
1015 }
1016
1017 /* Wait for something to happen */
8ea864cf 1018 rc = sleep_thread(common);
d5e2b67a
MN
1019 if (rc)
1020 return rc;
1021 }
1022
d26a6aa0 1023 return -EIO; /* No default reply */
d5e2b67a
MN
1024}
1025
1026
1027/*-------------------------------------------------------------------------*/
1028
8ea864cf 1029static int do_synchronize_cache(struct fsg_common *common)
d5e2b67a 1030{
8ea864cf 1031 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1032 int rc;
1033
1034 /* We ignore the requested LBA and write out all file's
1035 * dirty data buffers. */
1036 rc = fsg_lun_fsync_sub(curlun);
1037 if (rc)
1038 curlun->sense_data = SS_WRITE_ERROR;
1039 return 0;
1040}
1041
1042
1043/*-------------------------------------------------------------------------*/
1044
1045static void invalidate_sub(struct fsg_lun *curlun)
1046{
1047 struct file *filp = curlun->filp;
1048 struct inode *inode = filp->f_path.dentry->d_inode;
1049 unsigned long rc;
1050
1051 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
2ecdc82e 1052 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
d5e2b67a
MN
1053}
1054
8ea864cf 1055static int do_verify(struct fsg_common *common)
d5e2b67a 1056{
8ea864cf 1057 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1058 u32 lba;
1059 u32 verification_length;
8ea864cf 1060 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
d5e2b67a
MN
1061 loff_t file_offset, file_offset_tmp;
1062 u32 amount_left;
1063 unsigned int amount;
1064 ssize_t nread;
1065
1066 /* Get the starting Logical Block Address and check that it's
1067 * not too big */
8ea864cf 1068 lba = get_unaligned_be32(&common->cmnd[2]);
d5e2b67a
MN
1069 if (lba >= curlun->num_sectors) {
1070 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1071 return -EINVAL;
1072 }
1073
1074 /* We allow DPO (Disable Page Out = don't save data in the
1075 * cache) but we don't implement it. */
8ea864cf 1076 if (common->cmnd[1] & ~0x10) {
d5e2b67a
MN
1077 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1078 return -EINVAL;
1079 }
1080
8ea864cf 1081 verification_length = get_unaligned_be16(&common->cmnd[7]);
d5e2b67a 1082 if (unlikely(verification_length == 0))
d26a6aa0 1083 return -EIO; /* No default reply */
d5e2b67a
MN
1084
1085 /* Prepare to carry out the file verify */
1086 amount_left = verification_length << 9;
1087 file_offset = ((loff_t) lba) << 9;
1088
1089 /* Write out all the dirty buffers before invalidating them */
1090 fsg_lun_fsync_sub(curlun);
1091 if (signal_pending(current))
1092 return -EINTR;
1093
1094 invalidate_sub(curlun);
1095 if (signal_pending(current))
1096 return -EINTR;
1097
1098 /* Just try to read the requested blocks */
1099 while (amount_left > 0) {
1100
1101 /* Figure out how much we need to read:
1102 * Try to read the remaining amount, but not more than
1103 * the buffer size.
1104 * And don't try to read past the end of the file.
1105 * If this means reading 0 then we were asked to read
1106 * past the end of file. */
93bcf12e 1107 amount = min(amount_left, FSG_BUFLEN);
d5e2b67a
MN
1108 amount = min((loff_t) amount,
1109 curlun->file_length - file_offset);
1110 if (amount == 0) {
1111 curlun->sense_data =
1112 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1113 curlun->sense_data_info = file_offset >> 9;
1114 curlun->info_valid = 1;
1115 break;
1116 }
1117
1118 /* Perform the read */
1119 file_offset_tmp = file_offset;
1120 nread = vfs_read(curlun->filp,
1121 (char __user *) bh->buf,
1122 amount, &file_offset_tmp);
1123 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1124 (unsigned long long) file_offset,
1125 (int) nread);
1126 if (signal_pending(current))
1127 return -EINTR;
1128
1129 if (nread < 0) {
1130 LDBG(curlun, "error in file verify: %d\n",
1131 (int) nread);
1132 nread = 0;
1133 } else if (nread < amount) {
1134 LDBG(curlun, "partial file verify: %d/%u\n",
1135 (int) nread, amount);
d26a6aa0 1136 nread -= (nread & 511); /* Round down to a sector */
d5e2b67a
MN
1137 }
1138 if (nread == 0) {
1139 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1140 curlun->sense_data_info = file_offset >> 9;
1141 curlun->info_valid = 1;
1142 break;
1143 }
1144 file_offset += nread;
1145 amount_left -= nread;
1146 }
1147 return 0;
1148}
1149
1150
1151/*-------------------------------------------------------------------------*/
1152
8ea864cf 1153static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1154{
8ea864cf 1155 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1156 u8 *buf = (u8 *) bh->buf;
1157
481e4929 1158 if (!curlun) { /* Unsupported LUNs are okay */
8ea864cf 1159 common->bad_lun_okay = 1;
d5e2b67a 1160 memset(buf, 0, 36);
d26a6aa0
MN
1161 buf[0] = 0x7f; /* Unsupported, no device-type */
1162 buf[4] = 31; /* Additional length */
d5e2b67a
MN
1163 return 36;
1164 }
1165
481e4929
MN
1166 buf[0] = curlun->cdrom ? TYPE_CDROM : TYPE_DISK;
1167 buf[1] = curlun->removable ? 0x80 : 0;
d26a6aa0
MN
1168 buf[2] = 2; /* ANSI SCSI level 2 */
1169 buf[3] = 2; /* SCSI-2 INQUIRY data format */
1170 buf[4] = 31; /* Additional length */
1171 buf[5] = 0; /* No special options */
481e4929
MN
1172 buf[6] = 0;
1173 buf[7] = 0;
8ea864cf 1174 memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string);
d5e2b67a
MN
1175 return 36;
1176}
1177
1178
8ea864cf 1179static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1180{
8ea864cf 1181 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1182 u8 *buf = (u8 *) bh->buf;
1183 u32 sd, sdinfo;
1184 int valid;
1185
1186 /*
1187 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1188 *
1189 * If a REQUEST SENSE command is received from an initiator
1190 * with a pending unit attention condition (before the target
1191 * generates the contingent allegiance condition), then the
1192 * target shall either:
1193 * a) report any pending sense data and preserve the unit
1194 * attention condition on the logical unit, or,
1195 * b) report the unit attention condition, may discard any
1196 * pending sense data, and clear the unit attention
1197 * condition on the logical unit for that initiator.
1198 *
1199 * FSG normally uses option a); enable this code to use option b).
1200 */
1201#if 0
1202 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1203 curlun->sense_data = curlun->unit_attention_data;
1204 curlun->unit_attention_data = SS_NO_SENSE;
1205 }
1206#endif
1207
d26a6aa0 1208 if (!curlun) { /* Unsupported LUNs are okay */
8ea864cf 1209 common->bad_lun_okay = 1;
d5e2b67a
MN
1210 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1211 sdinfo = 0;
1212 valid = 0;
1213 } else {
1214 sd = curlun->sense_data;
1215 sdinfo = curlun->sense_data_info;
1216 valid = curlun->info_valid << 7;
1217 curlun->sense_data = SS_NO_SENSE;
1218 curlun->sense_data_info = 0;
1219 curlun->info_valid = 0;
1220 }
1221
1222 memset(buf, 0, 18);
d26a6aa0 1223 buf[0] = valid | 0x70; /* Valid, current error */
d5e2b67a
MN
1224 buf[2] = SK(sd);
1225 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
d26a6aa0 1226 buf[7] = 18 - 8; /* Additional sense length */
d5e2b67a
MN
1227 buf[12] = ASC(sd);
1228 buf[13] = ASCQ(sd);
1229 return 18;
1230}
1231
1232
8ea864cf 1233static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1234{
8ea864cf
MN
1235 struct fsg_lun *curlun = common->curlun;
1236 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1237 int pmi = common->cmnd[8];
d5e2b67a
MN
1238 u8 *buf = (u8 *) bh->buf;
1239
1240 /* Check the PMI and LBA fields */
1241 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1242 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1243 return -EINVAL;
1244 }
1245
1246 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1247 /* Max logical block */
1248 put_unaligned_be32(512, &buf[4]); /* Block length */
1249 return 8;
1250}
1251
1252
8ea864cf 1253static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1254{
8ea864cf
MN
1255 struct fsg_lun *curlun = common->curlun;
1256 int msf = common->cmnd[1] & 0x02;
1257 u32 lba = get_unaligned_be32(&common->cmnd[2]);
d5e2b67a
MN
1258 u8 *buf = (u8 *) bh->buf;
1259
8ea864cf 1260 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
d5e2b67a
MN
1261 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1262 return -EINVAL;
1263 }
1264 if (lba >= curlun->num_sectors) {
1265 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1266 return -EINVAL;
1267 }
1268
1269 memset(buf, 0, 8);
1270 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1271 store_cdrom_address(&buf[4], msf, lba);
1272 return 8;
1273}
1274
1275
8ea864cf 1276static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1277{
8ea864cf
MN
1278 struct fsg_lun *curlun = common->curlun;
1279 int msf = common->cmnd[1] & 0x02;
1280 int start_track = common->cmnd[6];
d5e2b67a
MN
1281 u8 *buf = (u8 *) bh->buf;
1282
8ea864cf 1283 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
d5e2b67a
MN
1284 start_track > 1) {
1285 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1286 return -EINVAL;
1287 }
1288
1289 memset(buf, 0, 20);
1290 buf[1] = (20-2); /* TOC data length */
1291 buf[2] = 1; /* First track number */
1292 buf[3] = 1; /* Last track number */
1293 buf[5] = 0x16; /* Data track, copying allowed */
1294 buf[6] = 0x01; /* Only track is number 1 */
1295 store_cdrom_address(&buf[8], msf, 0);
1296
1297 buf[13] = 0x16; /* Lead-out track is data */
1298 buf[14] = 0xAA; /* Lead-out track number */
1299 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1300 return 20;
1301}
1302
1303
8ea864cf 1304static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1305{
8ea864cf
MN
1306 struct fsg_lun *curlun = common->curlun;
1307 int mscmnd = common->cmnd[0];
d5e2b67a
MN
1308 u8 *buf = (u8 *) bh->buf;
1309 u8 *buf0 = buf;
1310 int pc, page_code;
1311 int changeable_values, all_pages;
1312 int valid_page = 0;
1313 int len, limit;
1314
8ea864cf 1315 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
d5e2b67a
MN
1316 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1317 return -EINVAL;
1318 }
8ea864cf
MN
1319 pc = common->cmnd[2] >> 6;
1320 page_code = common->cmnd[2] & 0x3f;
d5e2b67a
MN
1321 if (pc == 3) {
1322 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1323 return -EINVAL;
1324 }
1325 changeable_values = (pc == 1);
1326 all_pages = (page_code == 0x3f);
1327
1328 /* Write the mode parameter header. Fixed values are: default
1329 * medium type, no cache control (DPOFUA), and no block descriptors.
1330 * The only variable value is the WriteProtect bit. We will fill in
1331 * the mode data length later. */
1332 memset(buf, 0, 8);
1333 if (mscmnd == SC_MODE_SENSE_6) {
d26a6aa0 1334 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
d5e2b67a
MN
1335 buf += 4;
1336 limit = 255;
d26a6aa0
MN
1337 } else { /* SC_MODE_SENSE_10 */
1338 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
d5e2b67a 1339 buf += 8;
d26a6aa0 1340 limit = 65535; /* Should really be FSG_BUFLEN */
d5e2b67a
MN
1341 }
1342
1343 /* No block descriptors */
1344
1345 /* The mode pages, in numerical order. The only page we support
1346 * is the Caching page. */
1347 if (page_code == 0x08 || all_pages) {
1348 valid_page = 1;
d26a6aa0
MN
1349 buf[0] = 0x08; /* Page code */
1350 buf[1] = 10; /* Page length */
1351 memset(buf+2, 0, 10); /* None of the fields are changeable */
d5e2b67a
MN
1352
1353 if (!changeable_values) {
d26a6aa0
MN
1354 buf[2] = 0x04; /* Write cache enable, */
1355 /* Read cache not disabled */
1356 /* No cache retention priorities */
d5e2b67a
MN
1357 put_unaligned_be16(0xffff, &buf[4]);
1358 /* Don't disable prefetch */
1359 /* Minimum prefetch = 0 */
1360 put_unaligned_be16(0xffff, &buf[8]);
1361 /* Maximum prefetch */
1362 put_unaligned_be16(0xffff, &buf[10]);
1363 /* Maximum prefetch ceiling */
1364 }
1365 buf += 12;
1366 }
1367
1368 /* Check that a valid page was requested and the mode data length
1369 * isn't too long. */
1370 len = buf - buf0;
1371 if (!valid_page || len > limit) {
1372 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1373 return -EINVAL;
1374 }
1375
1376 /* Store the mode data length */
1377 if (mscmnd == SC_MODE_SENSE_6)
1378 buf0[0] = len - 1;
1379 else
1380 put_unaligned_be16(len - 2, buf0);
1381 return len;
1382}
1383
1384
8ea864cf 1385static int do_start_stop(struct fsg_common *common)
d5e2b67a 1386{
8ea864cf 1387 if (!common->curlun) {
481e4929 1388 return -EINVAL;
8ea864cf
MN
1389 } else if (!common->curlun->removable) {
1390 common->curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
1391 return -EINVAL;
1392 }
d5e2b67a
MN
1393 return 0;
1394}
1395
1396
8ea864cf 1397static int do_prevent_allow(struct fsg_common *common)
d5e2b67a 1398{
8ea864cf 1399 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1400 int prevent;
1401
8ea864cf 1402 if (!common->curlun) {
481e4929 1403 return -EINVAL;
8ea864cf
MN
1404 } else if (!common->curlun->removable) {
1405 common->curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
1406 return -EINVAL;
1407 }
1408
8ea864cf
MN
1409 prevent = common->cmnd[4] & 0x01;
1410 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
d5e2b67a
MN
1411 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1412 return -EINVAL;
1413 }
1414
1415 if (curlun->prevent_medium_removal && !prevent)
1416 fsg_lun_fsync_sub(curlun);
1417 curlun->prevent_medium_removal = prevent;
1418 return 0;
1419}
1420
1421
8ea864cf 1422static int do_read_format_capacities(struct fsg_common *common,
d5e2b67a
MN
1423 struct fsg_buffhd *bh)
1424{
8ea864cf 1425 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1426 u8 *buf = (u8 *) bh->buf;
1427
1428 buf[0] = buf[1] = buf[2] = 0;
d26a6aa0 1429 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
d5e2b67a
MN
1430 buf += 4;
1431
1432 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1433 /* Number of blocks */
1434 put_unaligned_be32(512, &buf[4]); /* Block length */
1435 buf[4] = 0x02; /* Current capacity */
1436 return 12;
1437}
1438
1439
8ea864cf 1440static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1441{
8ea864cf 1442 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1443
1444 /* We don't support MODE SELECT */
8ea864cf
MN
1445 if (curlun)
1446 curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
1447 return -EINVAL;
1448}
1449
1450
1451/*-------------------------------------------------------------------------*/
1452
1453static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1454{
1455 int rc;
1456
1457 rc = fsg_set_halt(fsg, fsg->bulk_in);
1458 if (rc == -EAGAIN)
1459 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1460 while (rc != 0) {
1461 if (rc != -EAGAIN) {
1462 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1463 rc = 0;
1464 break;
1465 }
1466
1467 /* Wait for a short time and then try again */
1468 if (msleep_interruptible(100) != 0)
1469 return -EINTR;
1470 rc = usb_ep_set_halt(fsg->bulk_in);
1471 }
1472 return rc;
1473}
1474
1475static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1476{
1477 int rc;
1478
1479 DBG(fsg, "bulk-in set wedge\n");
1480 rc = usb_ep_set_wedge(fsg->bulk_in);
1481 if (rc == -EAGAIN)
1482 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1483 while (rc != 0) {
1484 if (rc != -EAGAIN) {
1485 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1486 rc = 0;
1487 break;
1488 }
1489
1490 /* Wait for a short time and then try again */
1491 if (msleep_interruptible(100) != 0)
1492 return -EINTR;
1493 rc = usb_ep_set_wedge(fsg->bulk_in);
1494 }
1495 return rc;
1496}
1497
1498static int pad_with_zeros(struct fsg_dev *fsg)
1499{
a41ae418 1500 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
1501 u32 nkeep = bh->inreq->length;
1502 u32 nsend;
1503 int rc;
1504
d26a6aa0 1505 bh->state = BUF_STATE_EMPTY; /* For the first iteration */
8ea864cf
MN
1506 fsg->common->usb_amount_left = nkeep + fsg->common->residue;
1507 while (fsg->common->usb_amount_left > 0) {
d5e2b67a
MN
1508
1509 /* Wait for the next buffer to be free */
1510 while (bh->state != BUF_STATE_EMPTY) {
8ea864cf 1511 rc = sleep_thread(fsg->common);
d5e2b67a
MN
1512 if (rc)
1513 return rc;
1514 }
1515
8ea864cf 1516 nsend = min(fsg->common->usb_amount_left, FSG_BUFLEN);
d5e2b67a
MN
1517 memset(bh->buf + nkeep, 0, nsend - nkeep);
1518 bh->inreq->length = nsend;
1519 bh->inreq->zero = 0;
1520 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1521 &bh->inreq_busy, &bh->state);
a41ae418 1522 bh = fsg->common->next_buffhd_to_fill = bh->next;
8ea864cf 1523 fsg->common->usb_amount_left -= nsend;
d5e2b67a
MN
1524 nkeep = 0;
1525 }
1526 return 0;
1527}
1528
8ea864cf 1529static int throw_away_data(struct fsg_common *common)
d5e2b67a
MN
1530{
1531 struct fsg_buffhd *bh;
1532 u32 amount;
1533 int rc;
1534
8ea864cf
MN
1535 for (bh = common->next_buffhd_to_drain;
1536 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
1537 bh = common->next_buffhd_to_drain) {
d5e2b67a
MN
1538
1539 /* Throw away the data in a filled buffer */
1540 if (bh->state == BUF_STATE_FULL) {
1541 smp_rmb();
1542 bh->state = BUF_STATE_EMPTY;
8ea864cf 1543 common->next_buffhd_to_drain = bh->next;
d5e2b67a
MN
1544
1545 /* A short packet or an error ends everything */
1546 if (bh->outreq->actual != bh->outreq->length ||
1547 bh->outreq->status != 0) {
8ea864cf
MN
1548 raise_exception(common,
1549 FSG_STATE_ABORT_BULK_OUT);
d5e2b67a
MN
1550 return -EINTR;
1551 }
1552 continue;
1553 }
1554
1555 /* Try to submit another request if we need one */
8ea864cf
MN
1556 bh = common->next_buffhd_to_fill;
1557 if (bh->state == BUF_STATE_EMPTY
1558 && common->usb_amount_left > 0) {
1559 amount = min(common->usb_amount_left, FSG_BUFLEN);
d5e2b67a
MN
1560
1561 /* amount is always divisible by 512, hence by
1562 * the bulk-out maxpacket size */
d26a6aa0
MN
1563 bh->outreq->length = amount;
1564 bh->bulk_out_intended_length = amount;
d5e2b67a 1565 bh->outreq->short_not_ok = 1;
8ea864cf
MN
1566 START_TRANSFER_OR(common, bulk_out, bh->outreq,
1567 &bh->outreq_busy, &bh->state)
1568 /* Don't know what to do if
1569 * common->fsg is NULL */
1570 return -EIO;
1571 common->next_buffhd_to_fill = bh->next;
1572 common->usb_amount_left -= amount;
d5e2b67a
MN
1573 continue;
1574 }
1575
1576 /* Otherwise wait for something to happen */
8ea864cf 1577 rc = sleep_thread(common);
d5e2b67a
MN
1578 if (rc)
1579 return rc;
1580 }
1581 return 0;
1582}
1583
1584
8ea864cf 1585static int finish_reply(struct fsg_common *common)
d5e2b67a 1586{
8ea864cf 1587 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
d5e2b67a
MN
1588 int rc = 0;
1589
8ea864cf 1590 switch (common->data_dir) {
d5e2b67a 1591 case DATA_DIR_NONE:
d26a6aa0 1592 break; /* Nothing to send */
d5e2b67a
MN
1593
1594 /* If we don't know whether the host wants to read or write,
1595 * this must be CB or CBI with an unknown command. We mustn't
1596 * try to send or receive any data. So stall both bulk pipes
1597 * if we can and wait for a reset. */
1598 case DATA_DIR_UNKNOWN:
8ea864cf
MN
1599 if (!common->can_stall) {
1600 /* Nothing */
1601 } else if (fsg_is_set(common)) {
1602 fsg_set_halt(common->fsg, common->fsg->bulk_out);
1603 rc = halt_bulk_in_endpoint(common->fsg);
1604 } else {
1605 /* Don't know what to do if common->fsg is NULL */
1606 rc = -EIO;
d5e2b67a
MN
1607 }
1608 break;
1609
1610 /* All but the last buffer of data must have already been sent */
1611 case DATA_DIR_TO_HOST:
8ea864cf 1612 if (common->data_size == 0) {
93bcf12e 1613 /* Nothing to send */
d5e2b67a
MN
1614
1615 /* If there's no residue, simply send the last buffer */
8ea864cf 1616 } else if (common->residue == 0) {
d5e2b67a 1617 bh->inreq->zero = 0;
8ea864cf
MN
1618 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1619 &bh->inreq_busy, &bh->state)
1620 return -EIO;
1621 common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1622
1623 /* For Bulk-only, if we're allowed to stall then send the
1624 * short packet and halt the bulk-in endpoint. If we can't
1625 * stall, pad out the remaining data with 0's. */
8ea864cf 1626 } else if (common->can_stall) {
93bcf12e 1627 bh->inreq->zero = 1;
8ea864cf
MN
1628 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1629 &bh->inreq_busy, &bh->state)
1630 /* Don't know what to do if
1631 * common->fsg is NULL */
1632 rc = -EIO;
1633 common->next_buffhd_to_fill = bh->next;
1634 if (common->fsg)
1635 rc = halt_bulk_in_endpoint(common->fsg);
1636 } else if (fsg_is_set(common)) {
1637 rc = pad_with_zeros(common->fsg);
93bcf12e 1638 } else {
8ea864cf
MN
1639 /* Don't know what to do if common->fsg is NULL */
1640 rc = -EIO;
d5e2b67a
MN
1641 }
1642 break;
1643
1644 /* We have processed all we want from the data the host has sent.
1645 * There may still be outstanding bulk-out requests. */
1646 case DATA_DIR_FROM_HOST:
8ea864cf 1647 if (common->residue == 0) {
d26a6aa0 1648 /* Nothing to receive */
d5e2b67a
MN
1649
1650 /* Did the host stop sending unexpectedly early? */
8ea864cf
MN
1651 } else if (common->short_packet_received) {
1652 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
d5e2b67a 1653 rc = -EINTR;
d5e2b67a
MN
1654
1655 /* We haven't processed all the incoming data. Even though
1656 * we may be allowed to stall, doing so would cause a race.
1657 * The controller may already have ACK'ed all the remaining
1658 * bulk-out packets, in which case the host wouldn't see a
1659 * STALL. Not realizing the endpoint was halted, it wouldn't
1660 * clear the halt -- leading to problems later on. */
1661#if 0
8ea864cf
MN
1662 } else if (common->can_stall) {
1663 if (fsg_is_set(common))
1664 fsg_set_halt(common->fsg,
1665 common->fsg->bulk_out);
1666 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
d5e2b67a 1667 rc = -EINTR;
d5e2b67a
MN
1668#endif
1669
1670 /* We can't stall. Read in the excess data and throw it
1671 * all away. */
d26a6aa0 1672 } else {
8ea864cf 1673 rc = throw_away_data(common);
d26a6aa0 1674 }
d5e2b67a
MN
1675 break;
1676 }
1677 return rc;
1678}
1679
1680
8ea864cf 1681static int send_status(struct fsg_common *common)
d5e2b67a 1682{
8ea864cf 1683 struct fsg_lun *curlun = common->curlun;
d5e2b67a 1684 struct fsg_buffhd *bh;
93bcf12e 1685 struct bulk_cs_wrap *csw;
d5e2b67a
MN
1686 int rc;
1687 u8 status = USB_STATUS_PASS;
1688 u32 sd, sdinfo = 0;
1689
1690 /* Wait for the next buffer to become available */
8ea864cf 1691 bh = common->next_buffhd_to_fill;
d5e2b67a 1692 while (bh->state != BUF_STATE_EMPTY) {
8ea864cf 1693 rc = sleep_thread(common);
d5e2b67a
MN
1694 if (rc)
1695 return rc;
1696 }
1697
1698 if (curlun) {
1699 sd = curlun->sense_data;
1700 sdinfo = curlun->sense_data_info;
8ea864cf 1701 } else if (common->bad_lun_okay)
d5e2b67a
MN
1702 sd = SS_NO_SENSE;
1703 else
1704 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1705
8ea864cf
MN
1706 if (common->phase_error) {
1707 DBG(common, "sending phase-error status\n");
d5e2b67a
MN
1708 status = USB_STATUS_PHASE_ERROR;
1709 sd = SS_INVALID_COMMAND;
1710 } else if (sd != SS_NO_SENSE) {
8ea864cf 1711 DBG(common, "sending command-failure status\n");
d5e2b67a 1712 status = USB_STATUS_FAIL;
8ea864cf 1713 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
d5e2b67a
MN
1714 " info x%x\n",
1715 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1716 }
1717
93bcf12e 1718 /* Store and send the Bulk-only CSW */
d26a6aa0 1719 csw = (void *)bh->buf;
d5e2b67a 1720
93bcf12e 1721 csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
8ea864cf
MN
1722 csw->Tag = common->tag;
1723 csw->Residue = cpu_to_le32(common->residue);
93bcf12e 1724 csw->Status = status;
d5e2b67a 1725
93bcf12e
MN
1726 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1727 bh->inreq->zero = 0;
8ea864cf
MN
1728 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1729 &bh->inreq_busy, &bh->state)
1730 /* Don't know what to do if common->fsg is NULL */
1731 return -EIO;
d5e2b67a 1732
8ea864cf 1733 common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1734 return 0;
1735}
1736
1737
1738/*-------------------------------------------------------------------------*/
1739
1740/* Check whether the command is properly formed and whether its data size
1741 * and direction agree with the values we already have. */
8ea864cf 1742static int check_command(struct fsg_common *common, int cmnd_size,
d5e2b67a
MN
1743 enum data_direction data_dir, unsigned int mask,
1744 int needs_medium, const char *name)
1745{
1746 int i;
8ea864cf 1747 int lun = common->cmnd[1] >> 5;
d5e2b67a
MN
1748 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1749 char hdlen[20];
1750 struct fsg_lun *curlun;
1751
d5e2b67a 1752 hdlen[0] = 0;
8ea864cf
MN
1753 if (common->data_dir != DATA_DIR_UNKNOWN)
1754 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1755 common->data_size);
1756 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
d26a6aa0 1757 name, cmnd_size, dirletter[(int) data_dir],
8ea864cf 1758 common->data_size_from_cmnd, common->cmnd_size, hdlen);
d5e2b67a
MN
1759
1760 /* We can't reply at all until we know the correct data direction
1761 * and size. */
8ea864cf 1762 if (common->data_size_from_cmnd == 0)
d5e2b67a 1763 data_dir = DATA_DIR_NONE;
8ea864cf
MN
1764 if (common->data_size < common->data_size_from_cmnd) {
1765 /* Host data size < Device data size is a phase error.
1766 * Carry out the command, but only transfer as much as
1767 * we are allowed. */
1768 common->data_size_from_cmnd = common->data_size;
1769 common->phase_error = 1;
d5e2b67a 1770 }
8ea864cf
MN
1771 common->residue = common->data_size;
1772 common->usb_amount_left = common->data_size;
d5e2b67a
MN
1773
1774 /* Conflicting data directions is a phase error */
8ea864cf
MN
1775 if (common->data_dir != data_dir
1776 && common->data_size_from_cmnd > 0) {
1777 common->phase_error = 1;
d5e2b67a
MN
1778 return -EINVAL;
1779 }
1780
1781 /* Verify the length of the command itself */
8ea864cf 1782 if (cmnd_size != common->cmnd_size) {
d5e2b67a
MN
1783
1784 /* Special case workaround: There are plenty of buggy SCSI
1785 * implementations. Many have issues with cbw->Length
1786 * field passing a wrong command size. For those cases we
1787 * always try to work around the problem by using the length
1788 * sent by the host side provided it is at least as large
1789 * as the correct command length.
1790 * Examples of such cases would be MS-Windows, which issues
1791 * REQUEST SENSE with cbw->Length == 12 where it should
1792 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1793 * REQUEST SENSE with cbw->Length == 10 where it should
1794 * be 6 as well.
1795 */
8ea864cf
MN
1796 if (cmnd_size <= common->cmnd_size) {
1797 DBG(common, "%s is buggy! Expected length %d "
a41ae418 1798 "but we got %d\n", name,
8ea864cf
MN
1799 cmnd_size, common->cmnd_size);
1800 cmnd_size = common->cmnd_size;
d5e2b67a 1801 } else {
8ea864cf 1802 common->phase_error = 1;
d5e2b67a
MN
1803 return -EINVAL;
1804 }
1805 }
1806
1807 /* Check that the LUN values are consistent */
8ea864cf
MN
1808 if (common->lun != lun)
1809 DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
1810 common->lun, lun);
d5e2b67a
MN
1811
1812 /* Check the LUN */
8ea864cf
MN
1813 if (common->lun >= 0 && common->lun < common->nluns) {
1814 curlun = &common->luns[common->lun];
1815 common->curlun = curlun;
1816 if (common->cmnd[0] != SC_REQUEST_SENSE) {
d5e2b67a
MN
1817 curlun->sense_data = SS_NO_SENSE;
1818 curlun->sense_data_info = 0;
1819 curlun->info_valid = 0;
1820 }
1821 } else {
8ea864cf
MN
1822 common->curlun = NULL;
1823 curlun = NULL;
1824 common->bad_lun_okay = 0;
d5e2b67a
MN
1825
1826 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1827 * to use unsupported LUNs; all others may not. */
8ea864cf
MN
1828 if (common->cmnd[0] != SC_INQUIRY &&
1829 common->cmnd[0] != SC_REQUEST_SENSE) {
1830 DBG(common, "unsupported LUN %d\n", common->lun);
d5e2b67a
MN
1831 return -EINVAL;
1832 }
1833 }
1834
1835 /* If a unit attention condition exists, only INQUIRY and
1836 * REQUEST SENSE commands are allowed; anything else must fail. */
1837 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
8ea864cf
MN
1838 common->cmnd[0] != SC_INQUIRY &&
1839 common->cmnd[0] != SC_REQUEST_SENSE) {
d5e2b67a
MN
1840 curlun->sense_data = curlun->unit_attention_data;
1841 curlun->unit_attention_data = SS_NO_SENSE;
1842 return -EINVAL;
1843 }
1844
1845 /* Check that only command bytes listed in the mask are non-zero */
8ea864cf 1846 common->cmnd[1] &= 0x1f; /* Mask away the LUN */
d5e2b67a 1847 for (i = 1; i < cmnd_size; ++i) {
8ea864cf 1848 if (common->cmnd[i] && !(mask & (1 << i))) {
d5e2b67a
MN
1849 if (curlun)
1850 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1851 return -EINVAL;
1852 }
1853 }
1854
1855 /* If the medium isn't mounted and the command needs to access
1856 * it, return an error. */
1857 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
1858 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1859 return -EINVAL;
1860 }
1861
1862 return 0;
1863}
1864
1865
8ea864cf 1866static int do_scsi_command(struct fsg_common *common)
d5e2b67a
MN
1867{
1868 struct fsg_buffhd *bh;
1869 int rc;
1870 int reply = -EINVAL;
1871 int i;
1872 static char unknown[16];
1873
8ea864cf 1874 dump_cdb(common);
d5e2b67a
MN
1875
1876 /* Wait for the next buffer to become available for data or status */
8ea864cf
MN
1877 bh = common->next_buffhd_to_fill;
1878 common->next_buffhd_to_drain = bh;
d5e2b67a 1879 while (bh->state != BUF_STATE_EMPTY) {
8ea864cf 1880 rc = sleep_thread(common);
d5e2b67a
MN
1881 if (rc)
1882 return rc;
1883 }
8ea864cf
MN
1884 common->phase_error = 0;
1885 common->short_packet_received = 0;
d5e2b67a 1886
8ea864cf
MN
1887 down_read(&common->filesem); /* We're using the backing file */
1888 switch (common->cmnd[0]) {
d5e2b67a
MN
1889
1890 case SC_INQUIRY:
8ea864cf
MN
1891 common->data_size_from_cmnd = common->cmnd[4];
1892 reply = check_command(common, 6, DATA_DIR_TO_HOST,
d26a6aa0
MN
1893 (1<<4), 0,
1894 "INQUIRY");
1895 if (reply == 0)
8ea864cf 1896 reply = do_inquiry(common, bh);
d5e2b67a
MN
1897 break;
1898
1899 case SC_MODE_SELECT_6:
8ea864cf
MN
1900 common->data_size_from_cmnd = common->cmnd[4];
1901 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
d26a6aa0
MN
1902 (1<<1) | (1<<4), 0,
1903 "MODE SELECT(6)");
1904 if (reply == 0)
8ea864cf 1905 reply = do_mode_select(common, bh);
d5e2b67a
MN
1906 break;
1907
1908 case SC_MODE_SELECT_10:
8ea864cf
MN
1909 common->data_size_from_cmnd =
1910 get_unaligned_be16(&common->cmnd[7]);
1911 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
d26a6aa0
MN
1912 (1<<1) | (3<<7), 0,
1913 "MODE SELECT(10)");
1914 if (reply == 0)
8ea864cf 1915 reply = do_mode_select(common, bh);
d5e2b67a
MN
1916 break;
1917
1918 case SC_MODE_SENSE_6:
8ea864cf
MN
1919 common->data_size_from_cmnd = common->cmnd[4];
1920 reply = check_command(common, 6, DATA_DIR_TO_HOST,
d26a6aa0
MN
1921 (1<<1) | (1<<2) | (1<<4), 0,
1922 "MODE SENSE(6)");
1923 if (reply == 0)
8ea864cf 1924 reply = do_mode_sense(common, bh);
d5e2b67a
MN
1925 break;
1926
1927 case SC_MODE_SENSE_10:
8ea864cf
MN
1928 common->data_size_from_cmnd =
1929 get_unaligned_be16(&common->cmnd[7]);
1930 reply = check_command(common, 10, DATA_DIR_TO_HOST,
d26a6aa0
MN
1931 (1<<1) | (1<<2) | (3<<7), 0,
1932 "MODE SENSE(10)");
1933 if (reply == 0)
8ea864cf 1934 reply = do_mode_sense(common, bh);
d5e2b67a
MN
1935 break;
1936
1937 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
8ea864cf
MN
1938 common->data_size_from_cmnd = 0;
1939 reply = check_command(common, 6, DATA_DIR_NONE,
d26a6aa0
MN
1940 (1<<4), 0,
1941 "PREVENT-ALLOW MEDIUM REMOVAL");
1942 if (reply == 0)
8ea864cf 1943 reply = do_prevent_allow(common);
d5e2b67a
MN
1944 break;
1945
1946 case SC_READ_6:
8ea864cf
MN
1947 i = common->cmnd[4];
1948 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1949 reply = check_command(common, 6, DATA_DIR_TO_HOST,
d26a6aa0
MN
1950 (7<<1) | (1<<4), 1,
1951 "READ(6)");
1952 if (reply == 0)
8ea864cf 1953 reply = do_read(common);
d5e2b67a
MN
1954 break;
1955
1956 case SC_READ_10:
8ea864cf
MN
1957 common->data_size_from_cmnd =
1958 get_unaligned_be16(&common->cmnd[7]) << 9;
1959 reply = check_command(common, 10, DATA_DIR_TO_HOST,
d26a6aa0
MN
1960 (1<<1) | (0xf<<2) | (3<<7), 1,
1961 "READ(10)");
1962 if (reply == 0)
8ea864cf 1963 reply = do_read(common);
d5e2b67a
MN
1964 break;
1965
1966 case SC_READ_12:
8ea864cf
MN
1967 common->data_size_from_cmnd =
1968 get_unaligned_be32(&common->cmnd[6]) << 9;
1969 reply = check_command(common, 12, DATA_DIR_TO_HOST,
d26a6aa0
MN
1970 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1971 "READ(12)");
1972 if (reply == 0)
8ea864cf 1973 reply = do_read(common);
d5e2b67a
MN
1974 break;
1975
1976 case SC_READ_CAPACITY:
8ea864cf
MN
1977 common->data_size_from_cmnd = 8;
1978 reply = check_command(common, 10, DATA_DIR_TO_HOST,
d26a6aa0
MN
1979 (0xf<<2) | (1<<8), 1,
1980 "READ CAPACITY");
1981 if (reply == 0)
8ea864cf 1982 reply = do_read_capacity(common, bh);
d5e2b67a
MN
1983 break;
1984
1985 case SC_READ_HEADER:
8ea864cf 1986 if (!common->curlun || !common->curlun->cdrom)
d5e2b67a 1987 goto unknown_cmnd;
8ea864cf
MN
1988 common->data_size_from_cmnd =
1989 get_unaligned_be16(&common->cmnd[7]);
1990 reply = check_command(common, 10, DATA_DIR_TO_HOST,
d26a6aa0
MN
1991 (3<<7) | (0x1f<<1), 1,
1992 "READ HEADER");
1993 if (reply == 0)
8ea864cf 1994 reply = do_read_header(common, bh);
d5e2b67a
MN
1995 break;
1996
1997 case SC_READ_TOC:
8ea864cf 1998 if (!common->curlun || !common->curlun->cdrom)
d5e2b67a 1999 goto unknown_cmnd;
8ea864cf
MN
2000 common->data_size_from_cmnd =
2001 get_unaligned_be16(&common->cmnd[7]);
2002 reply = check_command(common, 10, DATA_DIR_TO_HOST,
d26a6aa0
MN
2003 (7<<6) | (1<<1), 1,
2004 "READ TOC");
2005 if (reply == 0)
8ea864cf 2006 reply = do_read_toc(common, bh);
d5e2b67a
MN
2007 break;
2008
2009 case SC_READ_FORMAT_CAPACITIES:
8ea864cf
MN
2010 common->data_size_from_cmnd =
2011 get_unaligned_be16(&common->cmnd[7]);
2012 reply = check_command(common, 10, DATA_DIR_TO_HOST,
d26a6aa0
MN
2013 (3<<7), 1,
2014 "READ FORMAT CAPACITIES");
2015 if (reply == 0)
8ea864cf 2016 reply = do_read_format_capacities(common, bh);
d5e2b67a
MN
2017 break;
2018
2019 case SC_REQUEST_SENSE:
8ea864cf
MN
2020 common->data_size_from_cmnd = common->cmnd[4];
2021 reply = check_command(common, 6, DATA_DIR_TO_HOST,
d26a6aa0
MN
2022 (1<<4), 0,
2023 "REQUEST SENSE");
2024 if (reply == 0)
8ea864cf 2025 reply = do_request_sense(common, bh);
d5e2b67a
MN
2026 break;
2027
2028 case SC_START_STOP_UNIT:
8ea864cf
MN
2029 common->data_size_from_cmnd = 0;
2030 reply = check_command(common, 6, DATA_DIR_NONE,
d26a6aa0
MN
2031 (1<<1) | (1<<4), 0,
2032 "START-STOP UNIT");
2033 if (reply == 0)
8ea864cf 2034 reply = do_start_stop(common);
d5e2b67a
MN
2035 break;
2036
2037 case SC_SYNCHRONIZE_CACHE:
8ea864cf
MN
2038 common->data_size_from_cmnd = 0;
2039 reply = check_command(common, 10, DATA_DIR_NONE,
d26a6aa0
MN
2040 (0xf<<2) | (3<<7), 1,
2041 "SYNCHRONIZE CACHE");
2042 if (reply == 0)
8ea864cf 2043 reply = do_synchronize_cache(common);
d5e2b67a
MN
2044 break;
2045
2046 case SC_TEST_UNIT_READY:
8ea864cf
MN
2047 common->data_size_from_cmnd = 0;
2048 reply = check_command(common, 6, DATA_DIR_NONE,
d5e2b67a
MN
2049 0, 1,
2050 "TEST UNIT READY");
2051 break;
2052
2053 /* Although optional, this command is used by MS-Windows. We
2054 * support a minimal version: BytChk must be 0. */
2055 case SC_VERIFY:
8ea864cf
MN
2056 common->data_size_from_cmnd = 0;
2057 reply = check_command(common, 10, DATA_DIR_NONE,
d26a6aa0
MN
2058 (1<<1) | (0xf<<2) | (3<<7), 1,
2059 "VERIFY");
2060 if (reply == 0)
8ea864cf 2061 reply = do_verify(common);
d5e2b67a
MN
2062 break;
2063
2064 case SC_WRITE_6:
8ea864cf
MN
2065 i = common->cmnd[4];
2066 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2067 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
d26a6aa0
MN
2068 (7<<1) | (1<<4), 1,
2069 "WRITE(6)");
2070 if (reply == 0)
8ea864cf 2071 reply = do_write(common);
d5e2b67a
MN
2072 break;
2073
2074 case SC_WRITE_10:
8ea864cf
MN
2075 common->data_size_from_cmnd =
2076 get_unaligned_be16(&common->cmnd[7]) << 9;
2077 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
d26a6aa0
MN
2078 (1<<1) | (0xf<<2) | (3<<7), 1,
2079 "WRITE(10)");
2080 if (reply == 0)
8ea864cf 2081 reply = do_write(common);
d5e2b67a
MN
2082 break;
2083
2084 case SC_WRITE_12:
8ea864cf
MN
2085 common->data_size_from_cmnd =
2086 get_unaligned_be32(&common->cmnd[6]) << 9;
2087 reply = check_command(common, 12, DATA_DIR_FROM_HOST,
d26a6aa0
MN
2088 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2089 "WRITE(12)");
2090 if (reply == 0)
8ea864cf 2091 reply = do_write(common);
d5e2b67a
MN
2092 break;
2093
2094 /* Some mandatory commands that we recognize but don't implement.
2095 * They don't mean much in this setting. It's left as an exercise
2096 * for anyone interested to implement RESERVE and RELEASE in terms
2097 * of Posix locks. */
2098 case SC_FORMAT_UNIT:
2099 case SC_RELEASE:
2100 case SC_RESERVE:
2101 case SC_SEND_DIAGNOSTIC:
d26a6aa0 2102 /* Fall through */
d5e2b67a
MN
2103
2104 default:
d26a6aa0 2105unknown_cmnd:
8ea864cf
MN
2106 common->data_size_from_cmnd = 0;
2107 sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2108 reply = check_command(common, common->cmnd_size,
d26a6aa0
MN
2109 DATA_DIR_UNKNOWN, 0xff, 0, unknown);
2110 if (reply == 0) {
8ea864cf 2111 common->curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
2112 reply = -EINVAL;
2113 }
2114 break;
2115 }
8ea864cf 2116 up_read(&common->filesem);
d5e2b67a
MN
2117
2118 if (reply == -EINTR || signal_pending(current))
2119 return -EINTR;
2120
2121 /* Set up the single reply buffer for finish_reply() */
2122 if (reply == -EINVAL)
d26a6aa0 2123 reply = 0; /* Error reply length */
8ea864cf
MN
2124 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2125 reply = min((u32) reply, common->data_size_from_cmnd);
d5e2b67a
MN
2126 bh->inreq->length = reply;
2127 bh->state = BUF_STATE_FULL;
8ea864cf 2128 common->residue -= reply;
d26a6aa0 2129 } /* Otherwise it's already set */
d5e2b67a
MN
2130
2131 return 0;
2132}
2133
2134
2135/*-------------------------------------------------------------------------*/
2136
2137static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2138{
8ea864cf 2139 struct usb_request *req = bh->outreq;
d5e2b67a 2140 struct fsg_bulk_cb_wrap *cbw = req->buf;
8ea864cf 2141 struct fsg_common *common = fsg->common;
d5e2b67a
MN
2142
2143 /* Was this a real packet? Should it be ignored? */
2144 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2145 return -EINVAL;
2146
2147 /* Is the CBW valid? */
2148 if (req->actual != USB_BULK_CB_WRAP_LEN ||
2149 cbw->Signature != cpu_to_le32(
2150 USB_BULK_CB_SIG)) {
2151 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2152 req->actual,
2153 le32_to_cpu(cbw->Signature));
2154
2155 /* The Bulk-only spec says we MUST stall the IN endpoint
2156 * (6.6.1), so it's unavoidable. It also says we must
2157 * retain this state until the next reset, but there's
2158 * no way to tell the controller driver it should ignore
2159 * Clear-Feature(HALT) requests.
2160 *
2161 * We aren't required to halt the OUT endpoint; instead
2162 * we can simply accept and discard any data received
2163 * until the next reset. */
2164 wedge_bulk_in_endpoint(fsg);
2165 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2166 return -EINVAL;
2167 }
2168
2169 /* Is the CBW meaningful? */
2170 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2171 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2172 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2173 "cmdlen %u\n",
2174 cbw->Lun, cbw->Flags, cbw->Length);
2175
2176 /* We can do anything we want here, so let's stall the
2177 * bulk pipes if we are allowed to. */
8ea864cf 2178 if (common->can_stall) {
d5e2b67a
MN
2179 fsg_set_halt(fsg, fsg->bulk_out);
2180 halt_bulk_in_endpoint(fsg);
2181 }
2182 return -EINVAL;
2183 }
2184
2185 /* Save the command for later */
8ea864cf
MN
2186 common->cmnd_size = cbw->Length;
2187 memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
d5e2b67a 2188 if (cbw->Flags & USB_BULK_IN_FLAG)
8ea864cf 2189 common->data_dir = DATA_DIR_TO_HOST;
d5e2b67a 2190 else
8ea864cf
MN
2191 common->data_dir = DATA_DIR_FROM_HOST;
2192 common->data_size = le32_to_cpu(cbw->DataTransferLength);
2193 if (common->data_size == 0)
2194 common->data_dir = DATA_DIR_NONE;
2195 common->lun = cbw->Lun;
2196 common->tag = cbw->Tag;
d5e2b67a
MN
2197 return 0;
2198}
2199
2200
8ea864cf 2201static int get_next_command(struct fsg_common *common)
d5e2b67a
MN
2202{
2203 struct fsg_buffhd *bh;
2204 int rc = 0;
2205
93bcf12e 2206 /* Wait for the next buffer to become available */
8ea864cf 2207 bh = common->next_buffhd_to_fill;
93bcf12e 2208 while (bh->state != BUF_STATE_EMPTY) {
8ea864cf 2209 rc = sleep_thread(common);
93bcf12e
MN
2210 if (rc)
2211 return rc;
2212 }
d5e2b67a 2213
93bcf12e 2214 /* Queue a request to read a Bulk-only CBW */
8ea864cf 2215 set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
93bcf12e 2216 bh->outreq->short_not_ok = 1;
8ea864cf
MN
2217 START_TRANSFER_OR(common, bulk_out, bh->outreq,
2218 &bh->outreq_busy, &bh->state)
2219 /* Don't know what to do if common->fsg is NULL */
2220 return -EIO;
d5e2b67a 2221
93bcf12e
MN
2222 /* We will drain the buffer in software, which means we
2223 * can reuse it for the next filling. No need to advance
2224 * next_buffhd_to_fill. */
d5e2b67a 2225
93bcf12e
MN
2226 /* Wait for the CBW to arrive */
2227 while (bh->state != BUF_STATE_FULL) {
8ea864cf 2228 rc = sleep_thread(common);
93bcf12e
MN
2229 if (rc)
2230 return rc;
d5e2b67a 2231 }
93bcf12e 2232 smp_rmb();
8ea864cf 2233 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
93bcf12e
MN
2234 bh->state = BUF_STATE_EMPTY;
2235
d5e2b67a
MN
2236 return rc;
2237}
2238
2239
2240/*-------------------------------------------------------------------------*/
2241
8ea864cf 2242static int enable_endpoint(struct fsg_common *common, struct usb_ep *ep,
d5e2b67a
MN
2243 const struct usb_endpoint_descriptor *d)
2244{
2245 int rc;
2246
8ea864cf 2247 ep->driver_data = common;
d5e2b67a
MN
2248 rc = usb_ep_enable(ep, d);
2249 if (rc)
8ea864cf 2250 ERROR(common, "can't enable %s, result %d\n", ep->name, rc);
d5e2b67a
MN
2251 return rc;
2252}
2253
8ea864cf 2254static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
d5e2b67a
MN
2255 struct usb_request **preq)
2256{
2257 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2258 if (*preq)
2259 return 0;
8ea864cf 2260 ERROR(common, "can't allocate request for %s\n", ep->name);
d5e2b67a
MN
2261 return -ENOMEM;
2262}
2263
2264/*
2265 * Reset interface setting and re-init endpoint state (toggle etc).
2266 * Call with altsetting < 0 to disable the interface. The only other
2267 * available altsetting is 0, which enables the interface.
2268 */
8ea864cf 2269static int do_set_interface(struct fsg_common *common, int altsetting)
d5e2b67a
MN
2270{
2271 int rc = 0;
2272 int i;
2273 const struct usb_endpoint_descriptor *d;
2274
8ea864cf
MN
2275 if (common->running)
2276 DBG(common, "reset interface\n");
d5e2b67a
MN
2277
2278reset:
2279 /* Deallocate the requests */
8ea864cf
MN
2280 if (common->prev_fsg) {
2281 struct fsg_dev *fsg = common->prev_fsg;
2282
2283 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2284 struct fsg_buffhd *bh = &common->buffhds[i];
d5e2b67a 2285
8ea864cf
MN
2286 if (bh->inreq) {
2287 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2288 bh->inreq = NULL;
2289 }
2290 if (bh->outreq) {
2291 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2292 bh->outreq = NULL;
2293 }
d5e2b67a 2294 }
8ea864cf
MN
2295
2296 /* Disable the endpoints */
2297 if (fsg->bulk_in_enabled) {
2298 usb_ep_disable(fsg->bulk_in);
2299 fsg->bulk_in_enabled = 0;
2300 }
2301 if (fsg->bulk_out_enabled) {
2302 usb_ep_disable(fsg->bulk_out);
2303 fsg->bulk_out_enabled = 0;
d5e2b67a 2304 }
d5e2b67a 2305
8ea864cf 2306 common->prev_fsg = 0;
d5e2b67a 2307 }
d5e2b67a 2308
8ea864cf 2309 common->running = 0;
d5e2b67a
MN
2310 if (altsetting < 0 || rc != 0)
2311 return rc;
2312
8ea864cf 2313 DBG(common, "set interface %d\n", altsetting);
d5e2b67a 2314
8ea864cf
MN
2315 if (fsg_is_set(common)) {
2316 struct fsg_dev *fsg = common->fsg;
2317 common->prev_fsg = common->fsg;
2318
2319 /* Enable the endpoints */
2320 d = fsg_ep_desc(common->gadget,
2321 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
2322 rc = enable_endpoint(common, fsg->bulk_in, d);
2323 if (rc)
d5e2b67a 2324 goto reset;
8ea864cf
MN
2325 fsg->bulk_in_enabled = 1;
2326
2327 d = fsg_ep_desc(common->gadget,
2328 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
2329 rc = enable_endpoint(common, fsg->bulk_out, d);
2330 if (rc)
d5e2b67a 2331 goto reset;
8ea864cf
MN
2332 fsg->bulk_out_enabled = 1;
2333 common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
2334 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
d5e2b67a 2335
8ea864cf
MN
2336 /* Allocate the requests */
2337 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2338 struct fsg_buffhd *bh = &common->buffhds[i];
2339
2340 rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2341 if (rc)
2342 goto reset;
2343 rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2344 if (rc)
2345 goto reset;
2346 bh->inreq->buf = bh->outreq->buf = bh->buf;
2347 bh->inreq->context = bh->outreq->context = bh;
2348 bh->inreq->complete = bulk_in_complete;
2349 bh->outreq->complete = bulk_out_complete;
2350 }
2351
2352 common->running = 1;
2353 for (i = 0; i < common->nluns; ++i)
2354 common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2355 return rc;
2356 } else {
2357 return -EIO;
2358 }
d5e2b67a
MN
2359}
2360
2361
2362/*
2363 * Change our operational configuration. This code must agree with the code
2364 * that returns config descriptors, and with interface altsetting code.
2365 *
2366 * It's also responsible for power management interactions. Some
2367 * configurations might not work with our current power sources.
2368 * For now we just assume the gadget is always self-powered.
2369 */
8ea864cf 2370static int do_set_config(struct fsg_common *common, u8 new_config)
d5e2b67a
MN
2371{
2372 int rc = 0;
2373
2374 /* Disable the single interface */
8ea864cf
MN
2375 if (common->config != 0) {
2376 DBG(common, "reset config\n");
2377 common->config = 0;
2378 rc = do_set_interface(common, -1);
d5e2b67a
MN
2379 }
2380
2381 /* Enable the interface */
2382 if (new_config != 0) {
8ea864cf
MN
2383 common->config = new_config;
2384 rc = do_set_interface(common, 0);
d23b0f08 2385 if (rc != 0)
8ea864cf 2386 common->config = 0; /* Reset on errors */
d5e2b67a
MN
2387 }
2388 return rc;
2389}
2390
2391
d23b0f08
MN
2392/****************************** ALT CONFIGS ******************************/
2393
2394
2395static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2396{
2397 struct fsg_dev *fsg = fsg_from_func(f);
8ea864cf
MN
2398 fsg->common->prev_fsg = fsg->common->fsg;
2399 fsg->common->fsg = fsg;
2400 fsg->common->new_config = 1;
2401 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
d23b0f08
MN
2402 return 0;
2403}
2404
2405static void fsg_disable(struct usb_function *f)
2406{
2407 struct fsg_dev *fsg = fsg_from_func(f);
8ea864cf
MN
2408 fsg->common->prev_fsg = fsg->common->fsg;
2409 fsg->common->fsg = fsg;
2410 fsg->common->new_config = 0;
2411 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
d23b0f08
MN
2412}
2413
2414
d5e2b67a
MN
2415/*-------------------------------------------------------------------------*/
2416
8ea864cf 2417static void handle_exception(struct fsg_common *common)
d5e2b67a
MN
2418{
2419 siginfo_t info;
2420 int sig;
2421 int i;
d5e2b67a
MN
2422 struct fsg_buffhd *bh;
2423 enum fsg_state old_state;
2424 u8 new_config;
2425 struct fsg_lun *curlun;
2426 unsigned int exception_req_tag;
2427 int rc;
2428
2429 /* Clear the existing signals. Anything but SIGUSR1 is converted
2430 * into a high-priority EXIT exception. */
2431 for (;;) {
2432 sig = dequeue_signal_lock(current, &current->blocked, &info);
2433 if (!sig)
2434 break;
2435 if (sig != SIGUSR1) {
8ea864cf
MN
2436 if (common->state < FSG_STATE_EXIT)
2437 DBG(common, "Main thread exiting on signal\n");
2438 raise_exception(common, FSG_STATE_EXIT);
d5e2b67a
MN
2439 }
2440 }
2441
2442 /* Cancel all the pending transfers */
8ea864cf 2443 if (fsg_is_set(common)) {
d5e2b67a 2444 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
8ea864cf
MN
2445 bh = &common->buffhds[i];
2446 if (bh->inreq_busy)
2447 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
2448 if (bh->outreq_busy)
2449 usb_ep_dequeue(common->fsg->bulk_out,
2450 bh->outreq);
d5e2b67a 2451 }
d5e2b67a 2452
8ea864cf
MN
2453 /* Wait until everything is idle */
2454 for (;;) {
2455 int num_active = 0;
2456 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2457 bh = &common->buffhds[i];
2458 num_active += bh->inreq_busy + bh->outreq_busy;
2459 }
2460 if (num_active == 0)
2461 break;
2462 if (sleep_thread(common))
2463 return;
2464 }
2465
2466 /* Clear out the controller's fifos */
2467 if (common->fsg->bulk_in_enabled)
2468 usb_ep_fifo_flush(common->fsg->bulk_in);
2469 if (common->fsg->bulk_out_enabled)
2470 usb_ep_fifo_flush(common->fsg->bulk_out);
2471 }
d5e2b67a
MN
2472
2473 /* Reset the I/O buffer states and pointers, the SCSI
2474 * state, and the exception. Then invoke the handler. */
8ea864cf 2475 spin_lock_irq(&common->lock);
d5e2b67a
MN
2476
2477 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
8ea864cf 2478 bh = &common->buffhds[i];
d5e2b67a
MN
2479 bh->state = BUF_STATE_EMPTY;
2480 }
8ea864cf
MN
2481 common->next_buffhd_to_fill = &common->buffhds[0];
2482 common->next_buffhd_to_drain = &common->buffhds[0];
2483 exception_req_tag = common->exception_req_tag;
2484 new_config = common->new_config;
2485 old_state = common->state;
d5e2b67a
MN
2486
2487 if (old_state == FSG_STATE_ABORT_BULK_OUT)
8ea864cf 2488 common->state = FSG_STATE_STATUS_PHASE;
d5e2b67a 2489 else {
8ea864cf
MN
2490 for (i = 0; i < common->nluns; ++i) {
2491 curlun = &common->luns[i];
d5e2b67a 2492 curlun->prevent_medium_removal = 0;
d26a6aa0
MN
2493 curlun->sense_data = SS_NO_SENSE;
2494 curlun->unit_attention_data = SS_NO_SENSE;
d5e2b67a
MN
2495 curlun->sense_data_info = 0;
2496 curlun->info_valid = 0;
2497 }
8ea864cf 2498 common->state = FSG_STATE_IDLE;
d5e2b67a 2499 }
8ea864cf 2500 spin_unlock_irq(&common->lock);
d5e2b67a
MN
2501
2502 /* Carry out any extra actions required for the exception */
2503 switch (old_state) {
d5e2b67a 2504 case FSG_STATE_ABORT_BULK_OUT:
8ea864cf
MN
2505 send_status(common);
2506 spin_lock_irq(&common->lock);
2507 if (common->state == FSG_STATE_STATUS_PHASE)
2508 common->state = FSG_STATE_IDLE;
2509 spin_unlock_irq(&common->lock);
d5e2b67a
MN
2510 break;
2511
2512 case FSG_STATE_RESET:
2513 /* In case we were forced against our will to halt a
2514 * bulk endpoint, clear the halt now. (The SuperH UDC
2515 * requires this.) */
8ea864cf
MN
2516 if (!fsg_is_set(common))
2517 break;
2518 if (test_and_clear_bit(IGNORE_BULK_OUT,
2519 &common->fsg->atomic_bitflags))
2520 usb_ep_clear_halt(common->fsg->bulk_in);
d5e2b67a 2521
8ea864cf
MN
2522 if (common->ep0_req_tag == exception_req_tag)
2523 ep0_queue(common); /* Complete the status stage */
d5e2b67a
MN
2524
2525 /* Technically this should go here, but it would only be
2526 * a waste of time. Ditto for the INTERFACE_CHANGE and
2527 * CONFIG_CHANGE cases. */
8ea864cf
MN
2528 /* for (i = 0; i < common->nluns; ++i) */
2529 /* common->luns[i].unit_attention_data = */
d26a6aa0 2530 /* SS_RESET_OCCURRED; */
d5e2b67a
MN
2531 break;
2532
d5e2b67a 2533 case FSG_STATE_CONFIG_CHANGE:
8ea864cf 2534 rc = do_set_config(common, new_config);
d5e2b67a
MN
2535 break;
2536
d5e2b67a
MN
2537 case FSG_STATE_EXIT:
2538 case FSG_STATE_TERMINATED:
8ea864cf
MN
2539 do_set_config(common, 0); /* Free resources */
2540 spin_lock_irq(&common->lock);
2541 common->state = FSG_STATE_TERMINATED; /* Stop the thread */
2542 spin_unlock_irq(&common->lock);
d5e2b67a 2543 break;
d23b0f08
MN
2544
2545 case FSG_STATE_INTERFACE_CHANGE:
2546 case FSG_STATE_DISCONNECT:
2547 case FSG_STATE_COMMAND_PHASE:
2548 case FSG_STATE_DATA_PHASE:
2549 case FSG_STATE_STATUS_PHASE:
2550 case FSG_STATE_IDLE:
2551 break;
d5e2b67a
MN
2552 }
2553}
2554
2555
2556/*-------------------------------------------------------------------------*/
2557
8ea864cf 2558static int fsg_main_thread(void *common_)
d5e2b67a 2559{
8ea864cf 2560 struct fsg_common *common = common_;
d5e2b67a
MN
2561
2562 /* Allow the thread to be killed by a signal, but set the signal mask
2563 * to block everything but INT, TERM, KILL, and USR1. */
2564 allow_signal(SIGINT);
2565 allow_signal(SIGTERM);
2566 allow_signal(SIGKILL);
2567 allow_signal(SIGUSR1);
2568
2569 /* Allow the thread to be frozen */
2570 set_freezable();
2571
2572 /* Arrange for userspace references to be interpreted as kernel
2573 * pointers. That way we can pass a kernel pointer to a routine
2574 * that expects a __user pointer and it will work okay. */
2575 set_fs(get_ds());
2576
2577 /* The main loop */
8ea864cf
MN
2578 while (common->state != FSG_STATE_TERMINATED) {
2579 if (exception_in_progress(common) || signal_pending(current)) {
2580 handle_exception(common);
d5e2b67a
MN
2581 continue;
2582 }
2583
8ea864cf
MN
2584 if (!common->running) {
2585 sleep_thread(common);
d5e2b67a
MN
2586 continue;
2587 }
2588
8ea864cf 2589 if (get_next_command(common))
d5e2b67a
MN
2590 continue;
2591
8ea864cf
MN
2592 spin_lock_irq(&common->lock);
2593 if (!exception_in_progress(common))
2594 common->state = FSG_STATE_DATA_PHASE;
2595 spin_unlock_irq(&common->lock);
d5e2b67a 2596
8ea864cf 2597 if (do_scsi_command(common) || finish_reply(common))
d5e2b67a
MN
2598 continue;
2599
8ea864cf
MN
2600 spin_lock_irq(&common->lock);
2601 if (!exception_in_progress(common))
2602 common->state = FSG_STATE_STATUS_PHASE;
2603 spin_unlock_irq(&common->lock);
d5e2b67a 2604
8ea864cf 2605 if (send_status(common))
d5e2b67a
MN
2606 continue;
2607
8ea864cf
MN
2608 spin_lock_irq(&common->lock);
2609 if (!exception_in_progress(common))
2610 common->state = FSG_STATE_IDLE;
2611 spin_unlock_irq(&common->lock);
d23b0f08 2612 }
d5e2b67a 2613
8ea864cf
MN
2614 spin_lock_irq(&common->lock);
2615 common->thread_task = NULL;
2616 spin_unlock_irq(&common->lock);
d5e2b67a 2617
7f1ee826
MN
2618 if (!common->thread_exits || common->thread_exits(common) < 0) {
2619 struct fsg_lun *curlun = common->luns;
2620 unsigned i = common->nluns;
2621
2622 down_write(&common->filesem);
2623 for (; i--; ++curlun) {
2624 if (!fsg_lun_is_open(curlun))
2625 continue;
2626
2627 fsg_lun_close(curlun);
2628 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
2629 }
2630 up_write(&common->filesem);
2631 }
d5e2b67a
MN
2632
2633 /* Let the unbind and cleanup routines know the thread has exited */
8ea864cf 2634 complete_and_exit(&common->thread_notifier, 0);
d5e2b67a
MN
2635}
2636
2637
9c610213 2638/*************************** DEVICE ATTRIBUTES ***************************/
d5e2b67a 2639
d23b0f08
MN
2640/* Write permission is checked per LUN in store_*() functions. */
2641static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
2642static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
d5e2b67a
MN
2643
2644
9c610213
MN
2645/****************************** FSG COMMON ******************************/
2646
2647static void fsg_common_release(struct kref *ref);
d5e2b67a 2648
9c610213 2649static void fsg_lun_release(struct device *dev)
d5e2b67a 2650{
9c610213 2651 /* Nothing needs to be done */
d5e2b67a
MN
2652}
2653
9c610213 2654static inline void fsg_common_get(struct fsg_common *common)
d5e2b67a 2655{
9c610213 2656 kref_get(&common->ref);
d5e2b67a
MN
2657}
2658
9c610213
MN
2659static inline void fsg_common_put(struct fsg_common *common)
2660{
2661 kref_put(&common->ref, fsg_common_release);
2662}
2663
2664
2665static struct fsg_common *fsg_common_init(struct fsg_common *common,
481e4929
MN
2666 struct usb_composite_dev *cdev,
2667 struct fsg_config *cfg)
9c610213 2668{
d23b0f08 2669 struct usb_gadget *gadget = cdev->gadget;
9c610213
MN
2670 struct fsg_buffhd *bh;
2671 struct fsg_lun *curlun;
481e4929 2672 struct fsg_lun_config *lcfg;
9c610213 2673 int nluns, i, rc;
d23b0f08 2674 char *pathbuf;
9c610213
MN
2675
2676 /* Find out how many LUNs there should be */
481e4929 2677 nluns = cfg->nluns;
9c610213
MN
2678 if (nluns < 1 || nluns > FSG_MAX_LUNS) {
2679 dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
2680 return ERR_PTR(-EINVAL);
2681 }
2682
2683 /* Allocate? */
2684 if (!common) {
2685 common = kzalloc(sizeof *common, GFP_KERNEL);
2686 if (!common)
2687 return ERR_PTR(-ENOMEM);
2688 common->free_storage_on_release = 1;
2689 } else {
2690 memset(common, 0, sizeof common);
2691 common->free_storage_on_release = 0;
2692 }
8ea864cf 2693
c85efcb9
MN
2694 common->private_data = cfg->private_data;
2695
9c610213 2696 common->gadget = gadget;
8ea864cf
MN
2697 common->ep0 = gadget->ep0;
2698 common->ep0req = cdev->req;
2699
2700 /* Maybe allocate device-global string IDs, and patch descriptors */
2701 if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
2702 rc = usb_string_id(cdev);
2703 if (rc < 0) {
2704 kfree(common);
2705 return ERR_PTR(rc);
2706 }
2707 fsg_strings[FSG_STRING_INTERFACE].id = rc;
2708 fsg_intf_desc.iInterface = rc;
2709 }
9c610213
MN
2710
2711 /* Create the LUNs, open their backing files, and register the
2712 * LUN devices in sysfs. */
2713 curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
2714 if (!curlun) {
2715 kfree(common);
2716 return ERR_PTR(-ENOMEM);
2717 }
2718 common->luns = curlun;
2719
2720 init_rwsem(&common->filesem);
2721
481e4929
MN
2722 for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
2723 curlun->cdrom = !!lcfg->cdrom;
2724 curlun->ro = lcfg->cdrom || lcfg->ro;
2725 curlun->removable = lcfg->removable;
9c610213
MN
2726 curlun->dev.release = fsg_lun_release;
2727 curlun->dev.parent = &gadget->dev;
d23b0f08 2728 /* curlun->dev.driver = &fsg_driver.driver; XXX */
9c610213 2729 dev_set_drvdata(&curlun->dev, &common->filesem);
e8b6f8c5
MN
2730 dev_set_name(&curlun->dev,
2731 cfg->lun_name_format
2732 ? cfg->lun_name_format
2733 : "lun%d",
2734 i);
9c610213
MN
2735
2736 rc = device_register(&curlun->dev);
2737 if (rc) {
2738 INFO(common, "failed to register LUN%d: %d\n", i, rc);
2739 common->nluns = i;
2740 goto error_release;
2741 }
2742
2743 rc = device_create_file(&curlun->dev, &dev_attr_ro);
2744 if (rc)
2745 goto error_luns;
2746 rc = device_create_file(&curlun->dev, &dev_attr_file);
2747 if (rc)
2748 goto error_luns;
2749
481e4929
MN
2750 if (lcfg->filename) {
2751 rc = fsg_lun_open(curlun, lcfg->filename);
9c610213
MN
2752 if (rc)
2753 goto error_luns;
481e4929 2754 } else if (!curlun->removable) {
9c610213
MN
2755 ERROR(common, "no file given for LUN%d\n", i);
2756 rc = -EINVAL;
2757 goto error_luns;
2758 }
2759 }
2760 common->nluns = nluns;
2761
2762
2763 /* Data buffers cyclic list */
9c610213 2764 bh = common->buffhds;
aae86e8a
MN
2765 i = FSG_NUM_BUFFERS;
2766 goto buffhds_first_it;
9c610213
MN
2767 do {
2768 bh->next = bh + 1;
aae86e8a
MN
2769 ++bh;
2770buffhds_first_it:
2771 bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
2772 if (unlikely(!bh->buf)) {
2773 rc = -ENOMEM;
2774 goto error_release;
2775 }
2776 } while (--i);
9c610213
MN
2777 bh->next = common->buffhds;
2778
2779
481e4929
MN
2780 /* Prepare inquiryString */
2781 if (cfg->release != 0xffff) {
2782 i = cfg->release;
2783 } else {
90f79768 2784 i = usb_gadget_controller_number(gadget);
481e4929
MN
2785 if (i >= 0) {
2786 i = 0x0300 + i;
2787 } else {
9c610213
MN
2788 WARNING(common, "controller '%s' not recognized\n",
2789 gadget->name);
481e4929 2790 i = 0x0399;
9c610213
MN
2791 }
2792 }
481e4929
MN
2793#define OR(x, y) ((x) ? (x) : (y))
2794 snprintf(common->inquiry_string, sizeof common->inquiry_string,
2795 "%-8s%-16s%04x",
2796 OR(cfg->vendor_name, "Linux "),
2797 /* Assume product name dependent on the first LUN */
2798 OR(cfg->product_name, common->luns->cdrom
2799 ? "File-Stor Gadget"
2800 : "File-CD Gadget "),
2801 i);
9c610213
MN
2802
2803
2804 /* Some peripheral controllers are known not to be able to
2805 * halt bulk endpoints correctly. If one of them is present,
2806 * disable stalls.
2807 */
481e4929 2808 common->can_stall = cfg->can_stall &&
90f79768 2809 !(gadget_is_at91(common->gadget));
9c610213
MN
2810
2811
8ea864cf 2812 spin_lock_init(&common->lock);
9c610213 2813 kref_init(&common->ref);
8ea864cf
MN
2814
2815
2816 /* Tell the thread to start working */
c85efcb9 2817 common->thread_exits = cfg->thread_exits;
8ea864cf
MN
2818 common->thread_task =
2819 kthread_create(fsg_main_thread, common,
2820 OR(cfg->thread_name, "file-storage"));
2821 if (IS_ERR(common->thread_task)) {
2822 rc = PTR_ERR(common->thread_task);
2823 goto error_release;
2824 }
2825 init_completion(&common->thread_notifier);
e8b6f8c5
MN
2826#undef OR
2827
d23b0f08
MN
2828
2829 /* Information */
2830 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
2831 INFO(common, "Number of LUNs=%d\n", common->nluns);
2832
2833 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
2834 for (i = 0, nluns = common->nluns, curlun = common->luns;
2835 i < nluns;
2836 ++curlun, ++i) {
2837 char *p = "(no medium)";
2838 if (fsg_lun_is_open(curlun)) {
2839 p = "(error)";
2840 if (pathbuf) {
2841 p = d_path(&curlun->filp->f_path,
2842 pathbuf, PATH_MAX);
2843 if (IS_ERR(p))
2844 p = "(error)";
2845 }
2846 }
2847 LINFO(curlun, "LUN: %s%s%sfile: %s\n",
2848 curlun->removable ? "removable " : "",
2849 curlun->ro ? "read only " : "",
2850 curlun->cdrom ? "CD-ROM " : "",
2851 p);
2852 }
2853 kfree(pathbuf);
2854
8ea864cf
MN
2855 DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
2856
2857 wake_up_process(common->thread_task);
2858
9c610213
MN
2859 return common;
2860
2861
2862error_luns:
2863 common->nluns = i + 1;
2864error_release:
8ea864cf 2865 common->state = FSG_STATE_TERMINATED; /* The thread is dead */
d26a6aa0
MN
2866 /* Call fsg_common_release() directly, ref might be not
2867 * initialised */
9c610213
MN
2868 fsg_common_release(&common->ref);
2869 return ERR_PTR(rc);
2870}
2871
2872
2873static void fsg_common_release(struct kref *ref)
2874{
2875 struct fsg_common *common =
2876 container_of(ref, struct fsg_common, ref);
2877 unsigned i = common->nluns;
2878 struct fsg_lun *lun = common->luns;
aae86e8a 2879 struct fsg_buffhd *bh;
9c610213 2880
8ea864cf
MN
2881 /* If the thread isn't already dead, tell it to exit now */
2882 if (common->state != FSG_STATE_TERMINATED) {
2883 raise_exception(common, FSG_STATE_EXIT);
2884 wait_for_completion(&common->thread_notifier);
2885
2886 /* The cleanup routine waits for this completion also */
2887 complete(&common->thread_notifier);
2888 }
2889
9c610213
MN
2890 /* Beware tempting for -> do-while optimization: when in error
2891 * recovery nluns may be zero. */
2892
2893 for (; i; --i, ++lun) {
2894 device_remove_file(&lun->dev, &dev_attr_ro);
2895 device_remove_file(&lun->dev, &dev_attr_file);
2896 fsg_lun_close(lun);
2897 device_unregister(&lun->dev);
2898 }
2899
2900 kfree(common->luns);
aae86e8a
MN
2901
2902 i = FSG_NUM_BUFFERS;
2903 bh = common->buffhds;
2904 do {
2905 kfree(bh->buf);
2906 } while (++bh, --i);
2907
9c610213
MN
2908 if (common->free_storage_on_release)
2909 kfree(common);
2910}
2911
2912
2913/*-------------------------------------------------------------------------*/
2914
2915
d23b0f08 2916static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
d5e2b67a 2917{
d23b0f08 2918 struct fsg_dev *fsg = fsg_from_func(f);
d5e2b67a
MN
2919
2920 DBG(fsg, "unbind\n");
9c610213
MN
2921 fsg_common_put(fsg->common);
2922 kfree(fsg);
d5e2b67a
MN
2923}
2924
2925
f88f6691 2926static int __init fsg_bind(struct usb_configuration *c, struct usb_function *f)
d5e2b67a 2927{
d23b0f08
MN
2928 struct fsg_dev *fsg = fsg_from_func(f);
2929 struct usb_gadget *gadget = c->cdev->gadget;
d5e2b67a
MN
2930 int rc;
2931 int i;
d5e2b67a 2932 struct usb_ep *ep;
d5e2b67a
MN
2933
2934 fsg->gadget = gadget;
d5e2b67a 2935
d23b0f08
MN
2936 /* New interface */
2937 i = usb_interface_id(c, f);
2938 if (i < 0)
2939 return i;
2940 fsg_intf_desc.bInterfaceNumber = i;
2941 fsg->interface_number = i;
d5e2b67a 2942
d5e2b67a 2943 /* Find all the endpoints we will use */
d5e2b67a
MN
2944 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2945 if (!ep)
2946 goto autoconf_fail;
8ea864cf 2947 ep->driver_data = fsg->common; /* claim the endpoint */
d5e2b67a
MN
2948 fsg->bulk_in = ep;
2949
2950 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2951 if (!ep)
2952 goto autoconf_fail;
8ea864cf 2953 ep->driver_data = fsg->common; /* claim the endpoint */
d5e2b67a
MN
2954 fsg->bulk_out = ep;
2955
d5e2b67a 2956 if (gadget_is_dualspeed(gadget)) {
d5e2b67a
MN
2957 /* Assume endpoint addresses are the same for both speeds */
2958 fsg_hs_bulk_in_desc.bEndpointAddress =
2959 fsg_fs_bulk_in_desc.bEndpointAddress;
2960 fsg_hs_bulk_out_desc.bEndpointAddress =
2961 fsg_fs_bulk_out_desc.bEndpointAddress;
d23b0f08 2962 f->hs_descriptors = fsg_hs_function;
d5e2b67a
MN
2963 }
2964
d5e2b67a
MN
2965 return 0;
2966
2967autoconf_fail:
2968 ERROR(fsg, "unable to autoconfigure all endpoints\n");
2969 rc = -ENOTSUPP;
d5e2b67a
MN
2970 return rc;
2971}
2972
2973
d23b0f08 2974/****************************** ADD FUNCTION ******************************/
d5e2b67a 2975
d23b0f08
MN
2976static struct usb_gadget_strings *fsg_strings_array[] = {
2977 &fsg_stringtab,
2978 NULL,
d5e2b67a
MN
2979};
2980
d23b0f08
MN
2981static int fsg_add(struct usb_composite_dev *cdev,
2982 struct usb_configuration *c,
2983 struct fsg_common *common)
d5e2b67a 2984{
d23b0f08
MN
2985 struct fsg_dev *fsg;
2986 int rc;
2987
2988 fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
2989 if (unlikely(!fsg))
2990 return -ENOMEM;
d5e2b67a 2991
d23b0f08
MN
2992 fsg->function.name = FSG_DRIVER_DESC;
2993 fsg->function.strings = fsg_strings_array;
2994 fsg->function.descriptors = fsg_fs_function;
2995 fsg->function.bind = fsg_bind;
2996 fsg->function.unbind = fsg_unbind;
2997 fsg->function.setup = fsg_setup;
2998 fsg->function.set_alt = fsg_set_alt;
2999 fsg->function.disable = fsg_disable;
3000
3001 fsg->common = common;
3002 /* Our caller holds a reference to common structure so we
3003 * don't have to be worry about it being freed until we return
3004 * from this function. So instead of incrementing counter now
3005 * and decrement in error recovery we increment it only when
3006 * call to usb_add_function() was successful. */
3007
3008 rc = usb_add_function(c, &fsg->function);
3009
3010 if (likely(rc == 0))
3011 fsg_common_get(fsg->common);
3012 else
3013 kfree(fsg);
3014
3015 return rc;
d5e2b67a 3016}
481e4929
MN
3017
3018
3019
3020/************************* Module parameters *************************/
3021
3022
3023struct fsg_module_parameters {
3024 char *file[FSG_MAX_LUNS];
3025 int ro[FSG_MAX_LUNS];
3026 int removable[FSG_MAX_LUNS];
3027 int cdrom[FSG_MAX_LUNS];
3028
3029 unsigned int file_count, ro_count, removable_count, cdrom_count;
3030 unsigned int luns; /* nluns */
3031 int stall; /* can_stall */
3032};
3033
3034
3035#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \
3036 module_param_array_named(prefix ## name, params.name, type, \
3037 &prefix ## params.name ## _count, \
3038 S_IRUGO); \
3039 MODULE_PARM_DESC(prefix ## name, desc)
3040
3041#define _FSG_MODULE_PARAM(prefix, params, name, type, desc) \
3042 module_param_named(prefix ## name, params.name, type, \
3043 S_IRUGO); \
3044 MODULE_PARM_DESC(prefix ## name, desc)
3045
3046#define FSG_MODULE_PARAMETERS(prefix, params) \
3047 _FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp, \
3048 "names of backing files or devices"); \
3049 _FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool, \
3050 "true to force read-only"); \
3051 _FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool, \
3052 "true to simulate removable media"); \
3053 _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \
3054 "true to simulate CD-ROM instead of disk"); \
3055 _FSG_MODULE_PARAM(prefix, params, luns, uint, \
3056 "number of LUNs"); \
3057 _FSG_MODULE_PARAM(prefix, params, stall, bool, \
3058 "false to prevent bulk stalls")
3059
3060
3061static void
3062fsg_config_from_params(struct fsg_config *cfg,
3063 const struct fsg_module_parameters *params)
3064{
3065 struct fsg_lun_config *lun;
d26a6aa0 3066 unsigned i;
481e4929
MN
3067
3068 /* Configure LUNs */
d26a6aa0
MN
3069 cfg->nluns =
3070 min(params->luns ?: (params->file_count ?: 1u),
3071 (unsigned)FSG_MAX_LUNS);
3072 for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
481e4929
MN
3073 lun->ro = !!params->ro[i];
3074 lun->cdrom = !!params->cdrom[i];
d26a6aa0 3075 lun->removable = /* Removable by default */
481e4929
MN
3076 params->removable_count <= i || params->removable[i];
3077 lun->filename =
3078 params->file_count > i && params->file[i][0]
3079 ? params->file[i]
3080 : 0;
3081 }
3082
d26a6aa0 3083 /* Let MSF use defaults */
e8b6f8c5
MN
3084 cfg->lun_name_format = 0;
3085 cfg->thread_name = 0;
481e4929
MN
3086 cfg->vendor_name = 0;
3087 cfg->product_name = 0;
3088 cfg->release = 0xffff;
3089
c85efcb9
MN
3090 cfg->thread_exits = 0;
3091 cfg->private_data = 0;
3092
481e4929
MN
3093 /* Finalise */
3094 cfg->can_stall = params->stall;
3095}
3096
3097static inline struct fsg_common *
3098fsg_common_from_params(struct fsg_common *common,
3099 struct usb_composite_dev *cdev,
3100 const struct fsg_module_parameters *params)
3101 __attribute__((unused));
3102static inline struct fsg_common *
3103fsg_common_from_params(struct fsg_common *common,
3104 struct usb_composite_dev *cdev,
3105 const struct fsg_module_parameters *params)
3106{
3107 struct fsg_config cfg;
3108 fsg_config_from_params(&cfg, params);
3109 return fsg_common_init(common, cdev, &cfg);
3110}
3111