]>
Commit | Line | Data |
---|---|---|
3d6392cf JA |
1 | /* |
2 | * bsg.c - block layer implementation of the sg v3 interface | |
3 | * | |
4 | * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs | |
5 | * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> | |
6 | * | |
7 | * This file is subject to the terms and conditions of the GNU General Public | |
8 | * License version 2. See the file "COPYING" in the main directory of this | |
9 | * archive for more details. | |
10 | * | |
11 | */ | |
12 | /* | |
13 | * TODO | |
14 | * - Should this get merged, block/scsi_ioctl.c will be migrated into | |
15 | * this file. To keep maintenance down, it's easier to have them | |
16 | * seperated right now. | |
17 | * | |
18 | */ | |
3d6392cf JA |
19 | #include <linux/module.h> |
20 | #include <linux/init.h> | |
21 | #include <linux/file.h> | |
22 | #include <linux/blkdev.h> | |
23 | #include <linux/poll.h> | |
24 | #include <linux/cdev.h> | |
25 | #include <linux/percpu.h> | |
26 | #include <linux/uio.h> | |
27 | #include <linux/bsg.h> | |
28 | ||
29 | #include <scsi/scsi.h> | |
30 | #include <scsi/scsi_ioctl.h> | |
31 | #include <scsi/scsi_cmnd.h> | |
4e2872d6 FT |
32 | #include <scsi/scsi_device.h> |
33 | #include <scsi/scsi_driver.h> | |
3d6392cf JA |
34 | #include <scsi/sg.h> |
35 | ||
36 | static char bsg_version[] = "block layer sg (bsg) 0.4"; | |
37 | ||
3d6392cf | 38 | struct bsg_device { |
3d6392cf JA |
39 | request_queue_t *queue; |
40 | spinlock_t lock; | |
41 | struct list_head busy_list; | |
42 | struct list_head done_list; | |
43 | struct hlist_node dev_list; | |
44 | atomic_t ref_count; | |
45 | int minor; | |
46 | int queued_cmds; | |
47 | int done_cmds; | |
3d6392cf JA |
48 | wait_queue_head_t wq_done; |
49 | wait_queue_head_t wq_free; | |
d351af01 | 50 | char name[BUS_ID_SIZE]; |
3d6392cf JA |
51 | int max_queue; |
52 | unsigned long flags; | |
53 | }; | |
54 | ||
55 | enum { | |
56 | BSG_F_BLOCK = 1, | |
57 | BSG_F_WRITE_PERM = 2, | |
58 | }; | |
59 | ||
5309cb38 | 60 | #define BSG_DEFAULT_CMDS 64 |
292b7f27 | 61 | #define BSG_MAX_DEVS 32768 |
3d6392cf JA |
62 | |
63 | #undef BSG_DEBUG | |
64 | ||
65 | #ifdef BSG_DEBUG | |
66 | #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args) | |
67 | #else | |
68 | #define dprintk(fmt, args...) | |
69 | #endif | |
70 | ||
71 | #define list_entry_bc(entry) list_entry((entry), struct bsg_command, list) | |
72 | ||
73 | /* | |
74 | * just for testing | |
75 | */ | |
76 | #define BSG_MAJOR (240) | |
77 | ||
78 | static DEFINE_MUTEX(bsg_mutex); | |
292b7f27 | 79 | static int bsg_device_nr, bsg_minor_idx; |
3d6392cf JA |
80 | |
81 | #define BSG_LIST_SIZE (8) | |
82 | #define bsg_list_idx(minor) ((minor) & (BSG_LIST_SIZE - 1)) | |
83 | static struct hlist_head bsg_device_list[BSG_LIST_SIZE]; | |
84 | ||
85 | static struct class *bsg_class; | |
86 | static LIST_HEAD(bsg_class_list); | |
87 | ||
5309cb38 JA |
88 | static struct kmem_cache *bsg_cmd_cachep; |
89 | ||
3d6392cf JA |
90 | /* |
91 | * our internal command type | |
92 | */ | |
93 | struct bsg_command { | |
94 | struct bsg_device *bd; | |
95 | struct list_head list; | |
96 | struct request *rq; | |
97 | struct bio *bio; | |
98 | int err; | |
70e36ece FT |
99 | struct sg_io_v4 hdr; |
100 | struct sg_io_v4 __user *uhdr; | |
3d6392cf JA |
101 | char sense[SCSI_SENSE_BUFFERSIZE]; |
102 | }; | |
103 | ||
104 | static void bsg_free_command(struct bsg_command *bc) | |
105 | { | |
106 | struct bsg_device *bd = bc->bd; | |
3d6392cf JA |
107 | unsigned long flags; |
108 | ||
5309cb38 | 109 | kmem_cache_free(bsg_cmd_cachep, bc); |
3d6392cf JA |
110 | |
111 | spin_lock_irqsave(&bd->lock, flags); | |
112 | bd->queued_cmds--; | |
3d6392cf JA |
113 | spin_unlock_irqrestore(&bd->lock, flags); |
114 | ||
115 | wake_up(&bd->wq_free); | |
116 | } | |
117 | ||
118 | static struct bsg_command *__bsg_alloc_command(struct bsg_device *bd) | |
119 | { | |
120 | struct bsg_command *bc = NULL; | |
3d6392cf JA |
121 | |
122 | spin_lock_irq(&bd->lock); | |
123 | ||
124 | if (bd->queued_cmds >= bd->max_queue) | |
125 | goto out; | |
126 | ||
3d6392cf | 127 | bd->queued_cmds++; |
3d6392cf JA |
128 | spin_unlock_irq(&bd->lock); |
129 | ||
5309cb38 JA |
130 | bc = kmem_cache_alloc(bsg_cmd_cachep, GFP_USER); |
131 | if (unlikely(!bc)) { | |
132 | spin_lock_irq(&bd->lock); | |
7e75d730 FT |
133 | bd->queued_cmds--; |
134 | goto out; | |
5309cb38 JA |
135 | } |
136 | ||
3d6392cf JA |
137 | memset(bc, 0, sizeof(*bc)); |
138 | bc->bd = bd; | |
139 | INIT_LIST_HEAD(&bc->list); | |
5309cb38 | 140 | dprintk("%s: returning free cmd %p\n", bd->name, bc); |
3d6392cf JA |
141 | return bc; |
142 | out: | |
3d6392cf JA |
143 | spin_unlock_irq(&bd->lock); |
144 | return bc; | |
145 | } | |
146 | ||
147 | static inline void | |
148 | bsg_del_done_cmd(struct bsg_device *bd, struct bsg_command *bc) | |
149 | { | |
150 | bd->done_cmds--; | |
151 | list_del(&bc->list); | |
152 | } | |
153 | ||
154 | static inline void | |
155 | bsg_add_done_cmd(struct bsg_device *bd, struct bsg_command *bc) | |
156 | { | |
157 | bd->done_cmds++; | |
158 | list_add_tail(&bc->list, &bd->done_list); | |
159 | wake_up(&bd->wq_done); | |
160 | } | |
161 | ||
162 | static inline int bsg_io_schedule(struct bsg_device *bd, int state) | |
163 | { | |
164 | DEFINE_WAIT(wait); | |
165 | int ret = 0; | |
166 | ||
167 | spin_lock_irq(&bd->lock); | |
168 | ||
169 | BUG_ON(bd->done_cmds > bd->queued_cmds); | |
170 | ||
171 | /* | |
172 | * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no | |
173 | * work to do", even though we return -ENOSPC after this same test | |
174 | * during bsg_write() -- there, it means our buffer can't have more | |
175 | * bsg_commands added to it, thus has no space left. | |
176 | */ | |
177 | if (bd->done_cmds == bd->queued_cmds) { | |
178 | ret = -ENODATA; | |
179 | goto unlock; | |
180 | } | |
181 | ||
182 | if (!test_bit(BSG_F_BLOCK, &bd->flags)) { | |
183 | ret = -EAGAIN; | |
184 | goto unlock; | |
185 | } | |
186 | ||
187 | prepare_to_wait(&bd->wq_done, &wait, state); | |
188 | spin_unlock_irq(&bd->lock); | |
189 | io_schedule(); | |
190 | finish_wait(&bd->wq_done, &wait); | |
191 | ||
192 | if ((state == TASK_INTERRUPTIBLE) && signal_pending(current)) | |
193 | ret = -ERESTARTSYS; | |
194 | ||
195 | return ret; | |
196 | unlock: | |
197 | spin_unlock_irq(&bd->lock); | |
198 | return ret; | |
199 | } | |
200 | ||
201 | /* | |
202 | * get a new free command, blocking if needed and specified | |
203 | */ | |
204 | static struct bsg_command *bsg_get_command(struct bsg_device *bd) | |
205 | { | |
206 | struct bsg_command *bc; | |
207 | int ret; | |
208 | ||
209 | do { | |
210 | bc = __bsg_alloc_command(bd); | |
211 | if (bc) | |
212 | break; | |
213 | ||
214 | ret = bsg_io_schedule(bd, TASK_INTERRUPTIBLE); | |
215 | if (ret) { | |
216 | bc = ERR_PTR(ret); | |
217 | break; | |
218 | } | |
219 | ||
220 | } while (1); | |
221 | ||
222 | return bc; | |
223 | } | |
224 | ||
70e36ece FT |
225 | static int blk_fill_sgv4_hdr_rq(request_queue_t *q, struct request *rq, |
226 | struct sg_io_v4 *hdr, int has_write_perm) | |
227 | { | |
228 | memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ | |
229 | ||
230 | if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, | |
231 | hdr->request_len)) | |
232 | return -EFAULT; | |
233 | if (blk_verify_command(rq->cmd, has_write_perm)) | |
234 | return -EPERM; | |
235 | ||
236 | /* | |
237 | * fill in request structure | |
238 | */ | |
239 | rq->cmd_len = hdr->request_len; | |
240 | rq->cmd_type = REQ_TYPE_BLOCK_PC; | |
241 | ||
242 | rq->timeout = (hdr->timeout * HZ) / 1000; | |
243 | if (!rq->timeout) | |
244 | rq->timeout = q->sg_timeout; | |
245 | if (!rq->timeout) | |
246 | rq->timeout = BLK_DEFAULT_SG_TIMEOUT; | |
247 | ||
248 | return 0; | |
249 | } | |
250 | ||
3d6392cf | 251 | /* |
70e36ece | 252 | * Check if sg_io_v4 from user is allowed and valid |
3d6392cf JA |
253 | */ |
254 | static int | |
70e36ece | 255 | bsg_validate_sgv4_hdr(request_queue_t *q, struct sg_io_v4 *hdr, int *rw) |
3d6392cf | 256 | { |
70e36ece | 257 | if (hdr->guard != 'Q') |
3d6392cf | 258 | return -EINVAL; |
70e36ece | 259 | if (hdr->request_len > BLK_MAX_CDB) |
3d6392cf | 260 | return -EINVAL; |
70e36ece FT |
261 | if (hdr->dout_xfer_len > (q->max_sectors << 9) || |
262 | hdr->din_xfer_len > (q->max_sectors << 9)) | |
3d6392cf JA |
263 | return -EIO; |
264 | ||
70e36ece FT |
265 | /* not supported currently */ |
266 | if (hdr->protocol || hdr->subprotocol) | |
267 | return -EINVAL; | |
268 | ||
3d6392cf JA |
269 | /* |
270 | * looks sane, if no data then it should be fine from our POV | |
271 | */ | |
70e36ece | 272 | if (!hdr->dout_xfer_len && !hdr->din_xfer_len) |
3d6392cf JA |
273 | return 0; |
274 | ||
70e36ece FT |
275 | /* not supported currently */ |
276 | if (hdr->dout_xfer_len && hdr->din_xfer_len) | |
277 | return -EINVAL; | |
278 | ||
279 | *rw = hdr->dout_xfer_len ? WRITE : READ; | |
3d6392cf JA |
280 | |
281 | return 0; | |
282 | } | |
283 | ||
284 | /* | |
70e36ece | 285 | * map sg_io_v4 to a request. |
3d6392cf JA |
286 | */ |
287 | static struct request * | |
70e36ece | 288 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr) |
3d6392cf JA |
289 | { |
290 | request_queue_t *q = bd->queue; | |
3d6392cf | 291 | struct request *rq; |
2ef7086a | 292 | int ret, rw = 0; /* shut up gcc */ |
70e36ece FT |
293 | unsigned int dxfer_len; |
294 | void *dxferp = NULL; | |
3d6392cf | 295 | |
70e36ece FT |
296 | dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, |
297 | hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, | |
298 | hdr->din_xfer_len); | |
3d6392cf | 299 | |
70e36ece | 300 | ret = bsg_validate_sgv4_hdr(q, hdr, &rw); |
3d6392cf JA |
301 | if (ret) |
302 | return ERR_PTR(ret); | |
303 | ||
304 | /* | |
305 | * map scatter-gather elements seperately and string them to request | |
306 | */ | |
307 | rq = blk_get_request(q, rw, GFP_KERNEL); | |
70e36ece FT |
308 | ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, test_bit(BSG_F_WRITE_PERM, |
309 | &bd->flags)); | |
3d6392cf JA |
310 | if (ret) { |
311 | blk_put_request(rq); | |
312 | return ERR_PTR(ret); | |
313 | } | |
314 | ||
70e36ece FT |
315 | if (hdr->dout_xfer_len) { |
316 | dxfer_len = hdr->dout_xfer_len; | |
317 | dxferp = (void*)(unsigned long)hdr->dout_xferp; | |
318 | } else if (hdr->din_xfer_len) { | |
319 | dxfer_len = hdr->din_xfer_len; | |
320 | dxferp = (void*)(unsigned long)hdr->din_xferp; | |
321 | } else | |
322 | dxfer_len = 0; | |
323 | ||
324 | if (dxfer_len) { | |
325 | ret = blk_rq_map_user(q, rq, dxferp, dxfer_len); | |
326 | if (ret) { | |
327 | dprintk("failed map at %d\n", ret); | |
328 | blk_put_request(rq); | |
329 | rq = ERR_PTR(ret); | |
3d6392cf | 330 | } |
3d6392cf JA |
331 | } |
332 | ||
333 | return rq; | |
334 | } | |
335 | ||
336 | /* | |
337 | * async completion call-back from the block layer, when scsi/ide/whatever | |
338 | * calls end_that_request_last() on a request | |
339 | */ | |
340 | static void bsg_rq_end_io(struct request *rq, int uptodate) | |
341 | { | |
342 | struct bsg_command *bc = rq->end_io_data; | |
343 | struct bsg_device *bd = bc->bd; | |
344 | unsigned long flags; | |
345 | ||
5309cb38 JA |
346 | dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", |
347 | bd->name, rq, bc, bc->bio, uptodate); | |
3d6392cf JA |
348 | |
349 | bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); | |
350 | ||
351 | spin_lock_irqsave(&bd->lock, flags); | |
352 | list_del(&bc->list); | |
353 | bsg_add_done_cmd(bd, bc); | |
354 | spin_unlock_irqrestore(&bd->lock, flags); | |
355 | } | |
356 | ||
357 | /* | |
358 | * do final setup of a 'bc' and submit the matching 'rq' to the block | |
359 | * layer for io | |
360 | */ | |
361 | static void bsg_add_command(struct bsg_device *bd, request_queue_t *q, | |
362 | struct bsg_command *bc, struct request *rq) | |
363 | { | |
364 | rq->sense = bc->sense; | |
365 | rq->sense_len = 0; | |
366 | ||
367 | /* | |
368 | * add bc command to busy queue and submit rq for io | |
369 | */ | |
370 | bc->rq = rq; | |
371 | bc->bio = rq->bio; | |
372 | bc->hdr.duration = jiffies; | |
373 | spin_lock_irq(&bd->lock); | |
374 | list_add_tail(&bc->list, &bd->busy_list); | |
375 | spin_unlock_irq(&bd->lock); | |
376 | ||
377 | dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); | |
378 | ||
379 | rq->end_io_data = bc; | |
d351af01 | 380 | blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io); |
3d6392cf JA |
381 | } |
382 | ||
383 | static inline struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) | |
384 | { | |
385 | struct bsg_command *bc = NULL; | |
386 | ||
387 | spin_lock_irq(&bd->lock); | |
388 | if (bd->done_cmds) { | |
389 | bc = list_entry_bc(bd->done_list.next); | |
390 | bsg_del_done_cmd(bd, bc); | |
391 | } | |
392 | spin_unlock_irq(&bd->lock); | |
393 | ||
394 | return bc; | |
395 | } | |
396 | ||
397 | /* | |
398 | * Get a finished command from the done list | |
399 | */ | |
400 | static struct bsg_command *__bsg_get_done_cmd(struct bsg_device *bd, int state) | |
401 | { | |
402 | struct bsg_command *bc; | |
403 | int ret; | |
404 | ||
405 | do { | |
406 | bc = bsg_next_done_cmd(bd); | |
407 | if (bc) | |
408 | break; | |
409 | ||
410 | ret = bsg_io_schedule(bd, state); | |
411 | if (ret) { | |
412 | bc = ERR_PTR(ret); | |
413 | break; | |
414 | } | |
415 | } while (1); | |
416 | ||
417 | dprintk("%s: returning done %p\n", bd->name, bc); | |
418 | ||
419 | return bc; | |
420 | } | |
421 | ||
422 | static struct bsg_command * | |
423 | bsg_get_done_cmd(struct bsg_device *bd, const struct iovec *iov) | |
424 | { | |
425 | return __bsg_get_done_cmd(bd, TASK_INTERRUPTIBLE); | |
426 | } | |
427 | ||
428 | static struct bsg_command * | |
429 | bsg_get_done_cmd_nosignals(struct bsg_device *bd) | |
430 | { | |
431 | return __bsg_get_done_cmd(bd, TASK_UNINTERRUPTIBLE); | |
432 | } | |
433 | ||
70e36ece FT |
434 | static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, |
435 | struct bio *bio) | |
436 | { | |
437 | int ret = 0; | |
438 | ||
439 | dprintk("rq %p bio %p %u\n", rq, bio, rq->errors); | |
440 | /* | |
441 | * fill in all the output members | |
442 | */ | |
443 | hdr->device_status = status_byte(rq->errors); | |
444 | hdr->transport_status = host_byte(rq->errors); | |
445 | hdr->driver_status = driver_byte(rq->errors); | |
446 | hdr->info = 0; | |
447 | if (hdr->device_status || hdr->transport_status || hdr->driver_status) | |
448 | hdr->info |= SG_INFO_CHECK; | |
449 | hdr->din_resid = rq->data_len; | |
450 | hdr->response_len = 0; | |
451 | ||
452 | if (rq->sense_len && hdr->response) { | |
453 | int len = min((unsigned int) hdr->max_response_len, | |
454 | rq->sense_len); | |
455 | ||
456 | ret = copy_to_user((void*)(unsigned long)hdr->response, | |
457 | rq->sense, len); | |
458 | if (!ret) | |
459 | hdr->response_len = len; | |
460 | else | |
461 | ret = -EFAULT; | |
462 | } | |
463 | ||
464 | blk_rq_unmap_user(bio); | |
465 | blk_put_request(rq); | |
466 | ||
467 | return ret; | |
468 | } | |
469 | ||
3d6392cf JA |
470 | static int bsg_complete_all_commands(struct bsg_device *bd) |
471 | { | |
472 | struct bsg_command *bc; | |
473 | int ret, tret; | |
474 | ||
475 | dprintk("%s: entered\n", bd->name); | |
476 | ||
477 | set_bit(BSG_F_BLOCK, &bd->flags); | |
478 | ||
479 | /* | |
480 | * wait for all commands to complete | |
481 | */ | |
482 | ret = 0; | |
483 | do { | |
484 | ret = bsg_io_schedule(bd, TASK_UNINTERRUPTIBLE); | |
485 | /* | |
486 | * look for -ENODATA specifically -- we'll sometimes get | |
487 | * -ERESTARTSYS when we've taken a signal, but we can't | |
488 | * return until we're done freeing the queue, so ignore | |
489 | * it. The signal will get handled when we're done freeing | |
490 | * the bsg_device. | |
491 | */ | |
492 | } while (ret != -ENODATA); | |
493 | ||
494 | /* | |
495 | * discard done commands | |
496 | */ | |
497 | ret = 0; | |
498 | do { | |
499 | bc = bsg_get_done_cmd_nosignals(bd); | |
500 | ||
501 | /* | |
502 | * we _must_ complete before restarting, because | |
503 | * bsg_release can't handle this failing. | |
504 | */ | |
505 | if (PTR_ERR(bc) == -ERESTARTSYS) | |
506 | continue; | |
507 | if (IS_ERR(bc)) { | |
508 | ret = PTR_ERR(bc); | |
509 | break; | |
510 | } | |
511 | ||
70e36ece | 512 | tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio); |
3d6392cf JA |
513 | if (!ret) |
514 | ret = tret; | |
515 | ||
516 | bsg_free_command(bc); | |
517 | } while (1); | |
518 | ||
519 | return ret; | |
520 | } | |
521 | ||
522 | typedef struct bsg_command *(*bsg_command_callback)(struct bsg_device *bd, const struct iovec *iov); | |
523 | ||
524 | static ssize_t | |
525 | __bsg_read(char __user *buf, size_t count, bsg_command_callback get_bc, | |
526 | struct bsg_device *bd, const struct iovec *iov, ssize_t *bytes_read) | |
527 | { | |
528 | struct bsg_command *bc; | |
529 | int nr_commands, ret; | |
530 | ||
70e36ece | 531 | if (count % sizeof(struct sg_io_v4)) |
3d6392cf JA |
532 | return -EINVAL; |
533 | ||
534 | ret = 0; | |
70e36ece | 535 | nr_commands = count / sizeof(struct sg_io_v4); |
3d6392cf JA |
536 | while (nr_commands) { |
537 | bc = get_bc(bd, iov); | |
538 | if (IS_ERR(bc)) { | |
539 | ret = PTR_ERR(bc); | |
540 | break; | |
541 | } | |
542 | ||
543 | /* | |
544 | * this is the only case where we need to copy data back | |
545 | * after completing the request. so do that here, | |
546 | * bsg_complete_work() cannot do that for us | |
547 | */ | |
70e36ece | 548 | ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio); |
3d6392cf JA |
549 | |
550 | if (copy_to_user(buf, (char *) &bc->hdr, sizeof(bc->hdr))) | |
551 | ret = -EFAULT; | |
552 | ||
553 | bsg_free_command(bc); | |
554 | ||
555 | if (ret) | |
556 | break; | |
557 | ||
70e36ece FT |
558 | buf += sizeof(struct sg_io_v4); |
559 | *bytes_read += sizeof(struct sg_io_v4); | |
3d6392cf JA |
560 | nr_commands--; |
561 | } | |
562 | ||
563 | return ret; | |
564 | } | |
565 | ||
566 | static inline void bsg_set_block(struct bsg_device *bd, struct file *file) | |
567 | { | |
568 | if (file->f_flags & O_NONBLOCK) | |
569 | clear_bit(BSG_F_BLOCK, &bd->flags); | |
570 | else | |
571 | set_bit(BSG_F_BLOCK, &bd->flags); | |
572 | } | |
573 | ||
574 | static inline void bsg_set_write_perm(struct bsg_device *bd, struct file *file) | |
575 | { | |
576 | if (file->f_mode & FMODE_WRITE) | |
577 | set_bit(BSG_F_WRITE_PERM, &bd->flags); | |
578 | else | |
579 | clear_bit(BSG_F_WRITE_PERM, &bd->flags); | |
580 | } | |
581 | ||
582 | static inline int err_block_err(int ret) | |
583 | { | |
584 | if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) | |
585 | return 1; | |
586 | ||
587 | return 0; | |
588 | } | |
589 | ||
590 | static ssize_t | |
591 | bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |
592 | { | |
593 | struct bsg_device *bd = file->private_data; | |
594 | int ret; | |
595 | ssize_t bytes_read; | |
596 | ||
9e69fbb5 | 597 | dprintk("%s: read %Zd bytes\n", bd->name, count); |
3d6392cf JA |
598 | |
599 | bsg_set_block(bd, file); | |
600 | bytes_read = 0; | |
601 | ret = __bsg_read(buf, count, bsg_get_done_cmd, | |
602 | bd, NULL, &bytes_read); | |
603 | *ppos = bytes_read; | |
604 | ||
605 | if (!bytes_read || (bytes_read && err_block_err(ret))) | |
606 | bytes_read = ret; | |
607 | ||
608 | return bytes_read; | |
609 | } | |
610 | ||
611 | static ssize_t __bsg_write(struct bsg_device *bd, const char __user *buf, | |
612 | size_t count, ssize_t *bytes_read) | |
613 | { | |
614 | struct bsg_command *bc; | |
615 | struct request *rq; | |
616 | int ret, nr_commands; | |
617 | ||
70e36ece | 618 | if (count % sizeof(struct sg_io_v4)) |
3d6392cf JA |
619 | return -EINVAL; |
620 | ||
70e36ece | 621 | nr_commands = count / sizeof(struct sg_io_v4); |
3d6392cf JA |
622 | rq = NULL; |
623 | bc = NULL; | |
624 | ret = 0; | |
625 | while (nr_commands) { | |
626 | request_queue_t *q = bd->queue; | |
3d6392cf JA |
627 | |
628 | bc = bsg_get_command(bd); | |
629 | if (!bc) | |
630 | break; | |
631 | if (IS_ERR(bc)) { | |
632 | ret = PTR_ERR(bc); | |
633 | bc = NULL; | |
634 | break; | |
635 | } | |
636 | ||
70e36ece | 637 | bc->uhdr = (struct sg_io_v4 __user *) buf; |
3d6392cf JA |
638 | if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { |
639 | ret = -EFAULT; | |
640 | break; | |
641 | } | |
642 | ||
643 | /* | |
644 | * get a request, fill in the blanks, and add to request queue | |
645 | */ | |
70e36ece | 646 | rq = bsg_map_hdr(bd, &bc->hdr); |
3d6392cf JA |
647 | if (IS_ERR(rq)) { |
648 | ret = PTR_ERR(rq); | |
649 | rq = NULL; | |
650 | break; | |
651 | } | |
652 | ||
653 | bsg_add_command(bd, q, bc, rq); | |
654 | bc = NULL; | |
655 | rq = NULL; | |
656 | nr_commands--; | |
70e36ece FT |
657 | buf += sizeof(struct sg_io_v4); |
658 | *bytes_read += sizeof(struct sg_io_v4); | |
3d6392cf JA |
659 | } |
660 | ||
3d6392cf JA |
661 | if (bc) |
662 | bsg_free_command(bc); | |
663 | ||
664 | return ret; | |
665 | } | |
666 | ||
667 | static ssize_t | |
668 | bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) | |
669 | { | |
670 | struct bsg_device *bd = file->private_data; | |
671 | ssize_t bytes_read; | |
672 | int ret; | |
673 | ||
9e69fbb5 | 674 | dprintk("%s: write %Zd bytes\n", bd->name, count); |
3d6392cf JA |
675 | |
676 | bsg_set_block(bd, file); | |
677 | bsg_set_write_perm(bd, file); | |
678 | ||
679 | bytes_read = 0; | |
680 | ret = __bsg_write(bd, buf, count, &bytes_read); | |
681 | *ppos = bytes_read; | |
682 | ||
683 | /* | |
684 | * return bytes written on non-fatal errors | |
685 | */ | |
686 | if (!bytes_read || (bytes_read && err_block_err(ret))) | |
687 | bytes_read = ret; | |
688 | ||
9e69fbb5 | 689 | dprintk("%s: returning %Zd\n", bd->name, bytes_read); |
3d6392cf JA |
690 | return bytes_read; |
691 | } | |
692 | ||
3d6392cf JA |
693 | static struct bsg_device *bsg_alloc_device(void) |
694 | { | |
3d6392cf | 695 | struct bsg_device *bd; |
3d6392cf JA |
696 | |
697 | bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); | |
698 | if (unlikely(!bd)) | |
699 | return NULL; | |
700 | ||
701 | spin_lock_init(&bd->lock); | |
702 | ||
5309cb38 | 703 | bd->max_queue = BSG_DEFAULT_CMDS; |
3d6392cf JA |
704 | |
705 | INIT_LIST_HEAD(&bd->busy_list); | |
706 | INIT_LIST_HEAD(&bd->done_list); | |
707 | INIT_HLIST_NODE(&bd->dev_list); | |
708 | ||
709 | init_waitqueue_head(&bd->wq_free); | |
710 | init_waitqueue_head(&bd->wq_done); | |
711 | return bd; | |
3d6392cf JA |
712 | } |
713 | ||
714 | static int bsg_put_device(struct bsg_device *bd) | |
715 | { | |
716 | int ret = 0; | |
717 | ||
718 | mutex_lock(&bsg_mutex); | |
719 | ||
720 | if (!atomic_dec_and_test(&bd->ref_count)) | |
721 | goto out; | |
722 | ||
723 | dprintk("%s: tearing down\n", bd->name); | |
724 | ||
725 | /* | |
726 | * close can always block | |
727 | */ | |
728 | set_bit(BSG_F_BLOCK, &bd->flags); | |
729 | ||
730 | /* | |
731 | * correct error detection baddies here again. it's the responsibility | |
732 | * of the app to properly reap commands before close() if it wants | |
733 | * fool-proof error detection | |
734 | */ | |
735 | ret = bsg_complete_all_commands(bd); | |
736 | ||
737 | blk_put_queue(bd->queue); | |
738 | hlist_del(&bd->dev_list); | |
5309cb38 | 739 | kfree(bd); |
3d6392cf JA |
740 | out: |
741 | mutex_unlock(&bsg_mutex); | |
742 | return ret; | |
743 | } | |
744 | ||
745 | static struct bsg_device *bsg_add_device(struct inode *inode, | |
d351af01 | 746 | struct request_queue *rq, |
3d6392cf JA |
747 | struct file *file) |
748 | { | |
749 | struct bsg_device *bd = NULL; | |
750 | #ifdef BSG_DEBUG | |
751 | unsigned char buf[32]; | |
752 | #endif | |
753 | ||
754 | bd = bsg_alloc_device(); | |
755 | if (!bd) | |
756 | return ERR_PTR(-ENOMEM); | |
757 | ||
d351af01 FT |
758 | bd->queue = rq; |
759 | kobject_get(&rq->kobj); | |
3d6392cf JA |
760 | bsg_set_block(bd, file); |
761 | ||
762 | atomic_set(&bd->ref_count, 1); | |
763 | bd->minor = iminor(inode); | |
764 | mutex_lock(&bsg_mutex); | |
d351af01 | 765 | hlist_add_head(&bd->dev_list, &bsg_device_list[bsg_list_idx(bd->minor)]); |
3d6392cf | 766 | |
d351af01 | 767 | strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1); |
3d6392cf | 768 | dprintk("bound to <%s>, max queue %d\n", |
9e69fbb5 | 769 | format_dev_t(buf, inode->i_rdev), bd->max_queue); |
3d6392cf JA |
770 | |
771 | mutex_unlock(&bsg_mutex); | |
772 | return bd; | |
773 | } | |
774 | ||
775 | static struct bsg_device *__bsg_get_device(int minor) | |
776 | { | |
777 | struct hlist_head *list = &bsg_device_list[bsg_list_idx(minor)]; | |
778 | struct bsg_device *bd = NULL; | |
779 | struct hlist_node *entry; | |
780 | ||
781 | mutex_lock(&bsg_mutex); | |
782 | ||
783 | hlist_for_each(entry, list) { | |
784 | bd = hlist_entry(entry, struct bsg_device, dev_list); | |
785 | if (bd->minor == minor) { | |
786 | atomic_inc(&bd->ref_count); | |
787 | break; | |
788 | } | |
789 | ||
790 | bd = NULL; | |
791 | } | |
792 | ||
793 | mutex_unlock(&bsg_mutex); | |
794 | return bd; | |
795 | } | |
796 | ||
797 | static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) | |
798 | { | |
799 | struct bsg_device *bd = __bsg_get_device(iminor(inode)); | |
800 | struct bsg_class_device *bcd, *__bcd; | |
801 | ||
802 | if (bd) | |
803 | return bd; | |
804 | ||
805 | /* | |
806 | * find the class device | |
807 | */ | |
808 | bcd = NULL; | |
809 | mutex_lock(&bsg_mutex); | |
810 | list_for_each_entry(__bcd, &bsg_class_list, list) { | |
811 | if (__bcd->minor == iminor(inode)) { | |
812 | bcd = __bcd; | |
813 | break; | |
814 | } | |
815 | } | |
816 | mutex_unlock(&bsg_mutex); | |
817 | ||
818 | if (!bcd) | |
819 | return ERR_PTR(-ENODEV); | |
820 | ||
d351af01 | 821 | return bsg_add_device(inode, bcd->queue, file); |
3d6392cf JA |
822 | } |
823 | ||
824 | static int bsg_open(struct inode *inode, struct file *file) | |
825 | { | |
826 | struct bsg_device *bd = bsg_get_device(inode, file); | |
827 | ||
828 | if (IS_ERR(bd)) | |
829 | return PTR_ERR(bd); | |
830 | ||
831 | file->private_data = bd; | |
832 | return 0; | |
833 | } | |
834 | ||
835 | static int bsg_release(struct inode *inode, struct file *file) | |
836 | { | |
837 | struct bsg_device *bd = file->private_data; | |
838 | ||
839 | file->private_data = NULL; | |
840 | return bsg_put_device(bd); | |
841 | } | |
842 | ||
843 | static unsigned int bsg_poll(struct file *file, poll_table *wait) | |
844 | { | |
845 | struct bsg_device *bd = file->private_data; | |
846 | unsigned int mask = 0; | |
847 | ||
848 | poll_wait(file, &bd->wq_done, wait); | |
849 | poll_wait(file, &bd->wq_free, wait); | |
850 | ||
851 | spin_lock_irq(&bd->lock); | |
852 | if (!list_empty(&bd->done_list)) | |
853 | mask |= POLLIN | POLLRDNORM; | |
854 | if (bd->queued_cmds >= bd->max_queue) | |
855 | mask |= POLLOUT; | |
856 | spin_unlock_irq(&bd->lock); | |
857 | ||
858 | return mask; | |
859 | } | |
860 | ||
861 | static int | |
862 | bsg_ioctl(struct inode *inode, struct file *file, unsigned int cmd, | |
863 | unsigned long arg) | |
864 | { | |
865 | struct bsg_device *bd = file->private_data; | |
866 | int __user *uarg = (int __user *) arg; | |
867 | ||
868 | if (!bd) | |
869 | return -ENXIO; | |
870 | ||
871 | switch (cmd) { | |
872 | /* | |
873 | * our own ioctls | |
874 | */ | |
875 | case SG_GET_COMMAND_Q: | |
876 | return put_user(bd->max_queue, uarg); | |
5309cb38 | 877 | case SG_SET_COMMAND_Q: { |
3d6392cf JA |
878 | int queue; |
879 | ||
880 | if (get_user(queue, uarg)) | |
881 | return -EFAULT; | |
5309cb38 | 882 | if (queue < 1) |
3d6392cf JA |
883 | return -EINVAL; |
884 | ||
5309cb38 | 885 | spin_lock_irq(&bd->lock); |
3d6392cf | 886 | bd->max_queue = queue; |
5309cb38 | 887 | spin_unlock_irq(&bd->lock); |
3d6392cf JA |
888 | return 0; |
889 | } | |
890 | ||
891 | /* | |
892 | * SCSI/sg ioctls | |
893 | */ | |
894 | case SG_GET_VERSION_NUM: | |
895 | case SCSI_IOCTL_GET_IDLUN: | |
896 | case SCSI_IOCTL_GET_BUS_NUMBER: | |
897 | case SG_SET_TIMEOUT: | |
898 | case SG_GET_TIMEOUT: | |
899 | case SG_GET_RESERVED_SIZE: | |
900 | case SG_SET_RESERVED_SIZE: | |
901 | case SG_EMULATED_HOST: | |
3d6392cf JA |
902 | case SCSI_IOCTL_SEND_COMMAND: { |
903 | void __user *uarg = (void __user *) arg; | |
d351af01 | 904 | return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg); |
3d6392cf | 905 | } |
10e8855b FT |
906 | case SG_IO: { |
907 | struct request *rq; | |
908 | struct bio *bio; | |
909 | struct sg_io_v4 hdr; | |
910 | ||
911 | if (copy_from_user(&hdr, uarg, sizeof(hdr))) | |
912 | return -EFAULT; | |
913 | ||
914 | rq = bsg_map_hdr(bd, &hdr); | |
915 | if (IS_ERR(rq)) | |
916 | return PTR_ERR(rq); | |
917 | ||
918 | bio = rq->bio; | |
d351af01 | 919 | blk_execute_rq(bd->queue, NULL, rq, 0); |
10e8855b FT |
920 | blk_complete_sgv4_hdr_rq(rq, &hdr, bio); |
921 | ||
922 | if (copy_to_user(uarg, &hdr, sizeof(hdr))) | |
923 | return -EFAULT; | |
b711afa6 JA |
924 | |
925 | return 0; | |
10e8855b | 926 | } |
3d6392cf JA |
927 | /* |
928 | * block device ioctls | |
929 | */ | |
930 | default: | |
931 | #if 0 | |
932 | return ioctl_by_bdev(bd->bdev, cmd, arg); | |
933 | #else | |
934 | return -ENOTTY; | |
935 | #endif | |
936 | } | |
937 | } | |
938 | ||
939 | static struct file_operations bsg_fops = { | |
940 | .read = bsg_read, | |
941 | .write = bsg_write, | |
942 | .poll = bsg_poll, | |
943 | .open = bsg_open, | |
944 | .release = bsg_release, | |
945 | .ioctl = bsg_ioctl, | |
946 | .owner = THIS_MODULE, | |
947 | }; | |
948 | ||
d351af01 | 949 | void bsg_unregister_queue(struct request_queue *q) |
3d6392cf | 950 | { |
d351af01 | 951 | struct bsg_class_device *bcd = &q->bsg_dev; |
3d6392cf JA |
952 | |
953 | if (!bcd->class_dev) | |
954 | return; | |
955 | ||
956 | mutex_lock(&bsg_mutex); | |
d351af01 | 957 | sysfs_remove_link(&q->kobj, "bsg"); |
3d6392cf JA |
958 | class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor)); |
959 | bcd->class_dev = NULL; | |
960 | list_del_init(&bcd->list); | |
292b7f27 | 961 | bsg_device_nr--; |
3d6392cf JA |
962 | mutex_unlock(&bsg_mutex); |
963 | } | |
964 | ||
d351af01 | 965 | int bsg_register_queue(struct request_queue *q, char *name) |
3d6392cf | 966 | { |
292b7f27 | 967 | struct bsg_class_device *bcd, *__bcd; |
3d6392cf | 968 | dev_t dev; |
292b7f27 | 969 | int ret = -EMFILE; |
4e2872d6 | 970 | struct class_device *class_dev = NULL; |
3d6392cf JA |
971 | |
972 | /* | |
973 | * we need a proper transport to send commands, not a stacked device | |
974 | */ | |
975 | if (!q->request_fn) | |
976 | return 0; | |
977 | ||
d351af01 | 978 | bcd = &q->bsg_dev; |
3d6392cf JA |
979 | memset(bcd, 0, sizeof(*bcd)); |
980 | INIT_LIST_HEAD(&bcd->list); | |
981 | ||
982 | mutex_lock(&bsg_mutex); | |
292b7f27 FT |
983 | if (bsg_device_nr == BSG_MAX_DEVS) { |
984 | printk(KERN_ERR "bsg: too many bsg devices\n"); | |
985 | goto err; | |
986 | } | |
987 | ||
988 | retry: | |
989 | list_for_each_entry(__bcd, &bsg_class_list, list) { | |
990 | if (__bcd->minor == bsg_minor_idx) { | |
991 | bsg_minor_idx++; | |
992 | if (bsg_minor_idx == BSG_MAX_DEVS) | |
993 | bsg_minor_idx = 0; | |
994 | goto retry; | |
995 | } | |
996 | } | |
997 | ||
998 | bcd->minor = bsg_minor_idx++; | |
999 | if (bsg_minor_idx == BSG_MAX_DEVS) | |
1000 | bsg_minor_idx = 0; | |
1001 | ||
d351af01 | 1002 | bcd->queue = q; |
292b7f27 | 1003 | dev = MKDEV(BSG_MAJOR, bcd->minor); |
4e2872d6 FT |
1004 | class_dev = class_device_create(bsg_class, NULL, dev, bcd->dev, "%s", name); |
1005 | if (IS_ERR(class_dev)) { | |
1006 | ret = PTR_ERR(class_dev); | |
264a0472 | 1007 | goto err; |
4e2872d6 FT |
1008 | } |
1009 | bcd->class_dev = class_dev; | |
1010 | ||
1011 | if (q->kobj.dentry) { | |
1012 | ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); | |
1013 | if (ret) | |
1014 | goto err; | |
1015 | } | |
1016 | ||
3d6392cf | 1017 | list_add_tail(&bcd->list, &bsg_class_list); |
292b7f27 | 1018 | bsg_device_nr++; |
4e2872d6 | 1019 | |
3d6392cf JA |
1020 | mutex_unlock(&bsg_mutex); |
1021 | return 0; | |
264a0472 | 1022 | err: |
4e2872d6 | 1023 | if (class_dev) |
264a0472 JA |
1024 | class_device_destroy(bsg_class, MKDEV(BSG_MAJOR, bcd->minor)); |
1025 | mutex_unlock(&bsg_mutex); | |
4e2872d6 FT |
1026 | return ret; |
1027 | } | |
1028 | ||
1029 | static int bsg_add(struct class_device *cl_dev, struct class_interface *cl_intf) | |
1030 | { | |
1031 | int ret; | |
1032 | struct scsi_device *sdp = to_scsi_device(cl_dev->dev); | |
1033 | struct request_queue *rq = sdp->request_queue; | |
1034 | ||
1035 | if (rq->kobj.parent) | |
1036 | ret = bsg_register_queue(rq, kobject_name(rq->kobj.parent)); | |
1037 | else | |
1038 | ret = bsg_register_queue(rq, kobject_name(&sdp->sdev_gendev.kobj)); | |
1039 | return ret; | |
3d6392cf JA |
1040 | } |
1041 | ||
4e2872d6 FT |
1042 | static void bsg_remove(struct class_device *cl_dev, struct class_interface *cl_intf) |
1043 | { | |
1044 | bsg_unregister_queue(to_scsi_device(cl_dev->dev)->request_queue); | |
1045 | } | |
1046 | ||
1047 | static struct class_interface bsg_intf = { | |
1048 | .add = bsg_add, | |
1049 | .remove = bsg_remove, | |
1050 | }; | |
1051 | ||
292b7f27 FT |
1052 | static struct cdev bsg_cdev = { |
1053 | .kobj = {.name = "bsg", }, | |
1054 | .owner = THIS_MODULE, | |
1055 | }; | |
1056 | ||
3d6392cf JA |
1057 | static int __init bsg_init(void) |
1058 | { | |
1059 | int ret, i; | |
1060 | ||
5309cb38 JA |
1061 | bsg_cmd_cachep = kmem_cache_create("bsg_cmd", |
1062 | sizeof(struct bsg_command), 0, 0, NULL, NULL); | |
1063 | if (!bsg_cmd_cachep) { | |
1064 | printk(KERN_ERR "bsg: failed creating slab cache\n"); | |
1065 | return -ENOMEM; | |
1066 | } | |
1067 | ||
3d6392cf JA |
1068 | for (i = 0; i < BSG_LIST_SIZE; i++) |
1069 | INIT_HLIST_HEAD(&bsg_device_list[i]); | |
1070 | ||
1071 | bsg_class = class_create(THIS_MODULE, "bsg"); | |
5309cb38 JA |
1072 | if (IS_ERR(bsg_class)) { |
1073 | kmem_cache_destroy(bsg_cmd_cachep); | |
3d6392cf | 1074 | return PTR_ERR(bsg_class); |
5309cb38 | 1075 | } |
3d6392cf | 1076 | |
292b7f27 FT |
1077 | ret = register_chrdev_region(MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS, "bsg"); |
1078 | if (ret) { | |
1079 | kmem_cache_destroy(bsg_cmd_cachep); | |
1080 | class_destroy(bsg_class); | |
1081 | return ret; | |
1082 | } | |
1083 | ||
1084 | cdev_init(&bsg_cdev, &bsg_fops); | |
1085 | ret = cdev_add(&bsg_cdev, MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS); | |
3d6392cf | 1086 | if (ret) { |
5309cb38 | 1087 | kmem_cache_destroy(bsg_cmd_cachep); |
3d6392cf | 1088 | class_destroy(bsg_class); |
292b7f27 | 1089 | unregister_chrdev_region(MKDEV(BSG_MAJOR, 0), BSG_MAX_DEVS); |
3d6392cf JA |
1090 | return ret; |
1091 | } | |
1092 | ||
4e2872d6 FT |
1093 | ret = scsi_register_interface(&bsg_intf); |
1094 | if (ret) { | |
1095 | printk(KERN_ERR "bsg: failed register scsi interface %d\n", ret); | |
1096 | kmem_cache_destroy(bsg_cmd_cachep); | |
1097 | class_destroy(bsg_class); | |
1098 | unregister_chrdev(BSG_MAJOR, "bsg"); | |
1099 | return ret; | |
1100 | } | |
1101 | ||
3d6392cf JA |
1102 | printk(KERN_INFO "%s loaded\n", bsg_version); |
1103 | return 0; | |
1104 | } | |
1105 | ||
1106 | MODULE_AUTHOR("Jens Axboe"); | |
1107 | MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver"); | |
1108 | MODULE_LICENSE("GPL"); | |
1109 | ||
4e2872d6 | 1110 | device_initcall(bsg_init); |